universal-mcp-agents 0.1.20rc1__tar.gz → 0.1.22__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of universal-mcp-agents might be problematic. Click here for more details.

Files changed (66) hide show
  1. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/PKG-INFO +1 -1
  2. universal_mcp_agents-0.1.22/bech.py +38 -0
  3. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/pyproject.toml +1 -1
  4. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/base.py +29 -28
  5. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/codeact0/agent.py +100 -57
  6. universal_mcp_agents-0.1.22/src/universal_mcp/agents/codeact0/llm_tool.py +25 -0
  7. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/codeact0/prompts.py +93 -104
  8. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/codeact0/state.py +20 -2
  9. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/codeact0/tools.py +18 -28
  10. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/codeact0/utils.py +56 -11
  11. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/applications/llm/app.py +2 -2
  12. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/uv.lock +529 -502
  13. universal_mcp_agents-0.1.20rc1/src/universal_mcp/agents/codeact0/llm_tool.py +0 -277
  14. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/.github/workflows/evals.yml +0 -0
  15. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/.github/workflows/lint.yml +0 -0
  16. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/.github/workflows/release-please.yml +0 -0
  17. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/.github/workflows/tests.yml +0 -0
  18. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/.gitignore +0 -0
  19. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/.pre-commit-config.yaml +0 -0
  20. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/GEMINI.md +0 -0
  21. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/PROMPTS.md +0 -0
  22. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/README.md +0 -0
  23. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/bump_and_release.sh +0 -0
  24. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/evals/__init__.py +0 -0
  25. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/evals/dataset.py +0 -0
  26. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/evals/datasets/codeact.jsonl +0 -0
  27. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/evals/datasets/exact.jsonl +0 -0
  28. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/evals/datasets/tasks.jsonl +0 -0
  29. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/evals/evaluators.py +0 -0
  30. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/evals/prompts.py +0 -0
  31. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/evals/run.py +0 -0
  32. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/evals/utils.py +0 -0
  33. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/tests/test_agents.py +0 -0
  34. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/__init__.py +0 -0
  35. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/bigtool/__init__.py +0 -0
  36. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/bigtool/__main__.py +0 -0
  37. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/bigtool/agent.py +0 -0
  38. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/bigtool/context.py +0 -0
  39. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/bigtool/graph.py +0 -0
  40. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/bigtool/prompts.py +0 -0
  41. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/bigtool/state.py +0 -0
  42. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/bigtool/tools.py +0 -0
  43. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/builder/__main__.py +0 -0
  44. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/builder/builder.py +0 -0
  45. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/builder/helper.py +0 -0
  46. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/builder/prompts.py +0 -0
  47. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/builder/state.py +0 -0
  48. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/cli.py +0 -0
  49. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/codeact0/__init__.py +0 -0
  50. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/codeact0/__main__.py +0 -0
  51. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/codeact0/config.py +0 -0
  52. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/codeact0/langgraph_agent.py +0 -0
  53. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/codeact0/sandbox.py +0 -0
  54. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/hil.py +0 -0
  55. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/llm.py +0 -0
  56. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/react.py +0 -0
  57. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/sandbox.py +0 -0
  58. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/shared/__main__.py +0 -0
  59. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/shared/prompts.py +0 -0
  60. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/shared/tool_node.py +0 -0
  61. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/simple.py +0 -0
  62. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/agents/utils.py +0 -0
  63. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/applications/filesystem/__init__.py +0 -0
  64. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/applications/filesystem/app.py +0 -0
  65. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/applications/llm/__init__.py +0 -0
  66. {universal_mcp_agents-0.1.20rc1 → universal_mcp_agents-0.1.22}/src/universal_mcp/applications/ui/app.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: universal-mcp-agents
3
- Version: 0.1.20rc1
3
+ Version: 0.1.22
4
4
  Summary: Add your description here
5
5
  Project-URL: Homepage, https://github.com/universal-mcp/applications
6
6
  Project-URL: Repository, https://github.com/universal-mcp/applications
@@ -0,0 +1,38 @@
1
+ from universal_mcp.agentr.registry import AgentrRegistry
2
+ from universal_mcp.agents import get_agent
3
+ from langgraph.checkpoint.memory import MemorySaver
4
+ from universal_mcp.agents.utils import messages_to_list
5
+ import time
6
+ from loguru import logger
7
+
8
+
9
+ async def main():
10
+ start_time = time.time()
11
+ memory = MemorySaver()
12
+ logger.info(f"Checkpointer: Time consumed: {time.time() - start_time}")
13
+ agent_cls = get_agent("codeact-repl")
14
+ logger.info(f"Get class: Time consumed: {time.time() - start_time}")
15
+ registry = AgentrRegistry()
16
+ logger.info(f"Init Registry: Time consumed: {time.time() - start_time}")
17
+ agent = agent_cls(
18
+ name="CodeAct Agent",
19
+ instructions="Be very concise in your answers.",
20
+ model="anthropic:claude-4-sonnet-20250514",
21
+ tools={},
22
+ registry=registry,
23
+ memory=memory,
24
+ )
25
+ logger.info(f"Create agent: Time consumed: {time.time() - start_time}")
26
+ print("Init agent...")
27
+ await agent.ainit()
28
+ logger.info(f"Init Agent: Time consumed: {time.time() - start_time}")
29
+ print("Starting agent...")
30
+ async for e in agent.stream(user_input="hi"):
31
+ logger.info(f"First token: Time consumed: {time.time() - start_time}")
32
+ print(e)
33
+
34
+
35
+ if __name__ == "__main__":
36
+ import asyncio
37
+
38
+ asyncio.run(main())
@@ -6,7 +6,7 @@ build-backend = "hatchling.build"
6
6
 
7
7
  [project]
8
8
  name = "universal-mcp-agents"
9
- version = "0.1.20-rc1"
9
+ version = "0.1.22"
10
10
  description = "Add your description here"
11
11
  readme = "README.md"
12
12
  authors = [
@@ -56,40 +56,41 @@ class BaseAgent:
56
56
  "run_name": self.name,
57
57
  }
58
58
 
59
+ last_ai_chunk = None
59
60
  async for event, meta in self._graph.astream(
60
61
  {"messages": [{"role": "user", "content": user_input}]},
61
62
  config=run_config,
62
63
  context={"system_prompt": self.instructions, "model": self.model},
63
- stream_mode="messages",
64
+ stream_mode=["messages", "custom"],
64
65
  stream_usage=True,
65
66
  ):
66
- # Only forward assistant token chunks that are not tool-related.
67
- type_ = type(event)
68
- tags = meta.get("tags", []) if isinstance(meta, dict) else []
69
- is_quiet = isinstance(tags, list) and ("quiet" in tags)
70
- if is_quiet:
71
- continue
72
- # Handle different types of messages
73
- if type_ == AIMessageChunk:
74
- # Accumulate billing and aggregate message
75
- aggregate = event if aggregate is None else aggregate + event
76
- # Ignore intermeddite finish messages
77
- if "finish_reason" in event.response_metadata:
78
- # Got LLM finish reason ignore it
79
- logger.debug(
80
- f"Finish event: {event}, reason: {event.response_metadata['finish_reason']}, Metadata: {meta}"
81
- )
82
- pass
83
- else:
84
- logger.debug(f"Event: {event}, Metadata: {meta}")
85
- yield event
86
- # Send a final finished message
87
- # The last event would be finish
88
- event = cast(AIMessageChunk, event)
89
- event.usage_metadata = aggregate.usage_metadata
90
- logger.debug(f"Usage metadata: {event.usage_metadata}")
91
- event.content = "" # Clear the message since it would have already been streamed above
92
- yield event
67
+ if event == "messages" and isinstance(meta, (tuple, list)) and len(meta) == 2:
68
+ payload, meta_dict = meta
69
+ is_playbook = isinstance(meta_dict, dict) and meta_dict.get("langgraph_node") == "playbook"
70
+ additional_kwargs = getattr(payload, "additional_kwargs", {}) or {}
71
+ if is_playbook and not additional_kwargs.get("stream"):
72
+ continue
73
+ if isinstance(payload, AIMessageChunk):
74
+ last_ai_chunk = payload
75
+ aggregate = payload if aggregate is None else aggregate + payload
76
+ if "finish_reason" in payload.response_metadata:
77
+ logger.debug(
78
+ f"Finish event: {payload}, reason: {payload.response_metadata['finish_reason']}, Metadata: {meta_dict}"
79
+ )
80
+ pass
81
+ logger.debug(f"Event: {payload}, Metadata: {meta_dict}")
82
+ yield payload
83
+
84
+ if event == "custom":
85
+ yield meta
86
+
87
+ # Send a final finished message if we saw any AI chunks (to carry usage)
88
+ if last_ai_chunk is not None and aggregate is not None:
89
+ event = cast(AIMessageChunk, last_ai_chunk)
90
+ event.usage_metadata = aggregate.usage_metadata
91
+ logger.debug(f"Usage metadata: {event.usage_metadata}")
92
+ event.content = "" # Clear the message since it would have already been streamed above
93
+ yield event
93
94
 
94
95
  async def stream_interactive(self, thread_id: str, user_input: str):
95
96
  await self.ainit()
@@ -1,33 +1,35 @@
1
1
  import json
2
2
  import re
3
- from collections.abc import Callable
4
3
  from typing import Literal, cast
4
+ import uuid
5
5
 
6
6
  from langchain_core.messages import AIMessage, ToolMessage
7
7
  from langchain_core.tools import StructuredTool
8
8
  from langgraph.checkpoint.base import BaseCheckpointSaver
9
9
  from langgraph.graph import START, StateGraph
10
- from langgraph.types import Command, RetryPolicy
10
+ from langgraph.types import Command, RetryPolicy, StreamWriter
11
11
  from universal_mcp.tools.registry import ToolRegistry
12
12
  from universal_mcp.types import ToolConfig, ToolFormat
13
13
 
14
14
  from universal_mcp.agents.base import BaseAgent
15
- from universal_mcp.agents.codeact0.llm_tool import ai_classify, call_llm, data_extractor, smart_print
15
+ from universal_mcp.agents.codeact0.llm_tool import smart_print
16
16
  from universal_mcp.agents.codeact0.prompts import (
17
- PLAYBOOK_CONFIRMING_PROMPT,
18
17
  PLAYBOOK_GENERATING_PROMPT,
19
18
  PLAYBOOK_PLANNING_PROMPT,
19
+ PLAYBOOK_META_PROMPT,
20
20
  create_default_prompt,
21
21
  )
22
22
  from universal_mcp.agents.codeact0.sandbox import eval_unsafe, execute_ipython_cell, handle_execute_ipython_cell
23
- from universal_mcp.agents.codeact0.state import CodeActState
23
+ from universal_mcp.agents.codeact0.state import CodeActState, PlaybookCode, PlaybookPlan, PlaybookMeta
24
24
  from universal_mcp.agents.codeact0.tools import (
25
25
  create_meta_tools,
26
26
  enter_playbook_mode,
27
27
  get_valid_tools,
28
28
  )
29
+ from universal_mcp.agents.codeact0.utils import add_tools
29
30
  from universal_mcp.agents.llm import load_chat_model
30
31
  from universal_mcp.agents.utils import convert_tool_ids_to_dict, filter_retry_on, get_message_text
32
+ from universal_mcp.agents.codeact0.utils import get_connected_apps_string
31
33
 
32
34
 
33
35
  class CodeActPlaybookAgent(BaseAgent):
@@ -51,16 +53,22 @@ class CodeActPlaybookAgent(BaseAgent):
51
53
  **kwargs,
52
54
  )
53
55
  self.model_instance = load_chat_model(model)
54
- self.tools_config = tools or []
56
+ self.playbook_model_instance = load_chat_model("azure/gpt-4.1")
57
+ self.tools_config = tools or {}
55
58
  self.registry = registry
56
59
  self.playbook_registry = playbook_registry
60
+ self.playbook = playbook_registry.get_agent() if playbook_registry else None
57
61
  self.eval_fn = eval_unsafe
58
62
  self.sandbox_timeout = sandbox_timeout
59
- self.processed_tools: list[StructuredTool | Callable] = []
63
+ self.default_tools = {
64
+ "llm": ["generate_text", "classify_data", "extract_data", "call_llm"],
65
+ }
66
+ add_tools(self.tools_config, self.default_tools)
67
+
60
68
 
61
69
  async def _build_graph(self):
62
70
  meta_tools = create_meta_tools(self.registry)
63
- additional_tools = [smart_print, data_extractor, ai_classify, call_llm, meta_tools["web_search"]]
71
+ additional_tools = [smart_print, meta_tools["web_search"]]
64
72
  self.additional_tools = [
65
73
  t if isinstance(t, StructuredTool) else StructuredTool.from_function(t) for t in additional_tools
66
74
  ]
@@ -145,7 +153,7 @@ class CodeActPlaybookAgent(BaseAgent):
145
153
  else:
146
154
  raise Exception(
147
155
  f"Unexpected tool call: {tool_call['name']}. "
148
- "tool calls must be one of 'enter_playbook_mode', 'execute_ipython_cell', 'load_functions', or 'search_functions'"
156
+ "tool calls must be one of 'enter_playbook_mode', 'execute_ipython_cell', 'load_functions', or 'search_functions'. For using functions, call them in code using 'execute_ipython_cell'."
149
157
  )
150
158
  except Exception as e:
151
159
  tool_result = str(e)
@@ -161,7 +169,7 @@ class CodeActPlaybookAgent(BaseAgent):
161
169
  self.tools_config.extend(new_tool_ids)
162
170
  self.exported_tools = await self.registry.export_tools(new_tool_ids, ToolFormat.LANGCHAIN)
163
171
  self.final_instructions, self.tools_context = create_default_prompt(
164
- self.exported_tools, self.additional_tools, self.instructions
172
+ self.exported_tools, self.additional_tools, self.instructions, await get_connected_apps_string(self.registry)
165
173
  )
166
174
  if ask_user:
167
175
  tool_messages.append(AIMessage(content=ai_msg))
@@ -184,41 +192,80 @@ class CodeActPlaybookAgent(BaseAgent):
184
192
  },
185
193
  )
186
194
 
187
- def playbook(state: CodeActState) -> Command[Literal["call_model"]]:
195
+ def playbook(state: CodeActState, writer: StreamWriter) -> Command[Literal["call_model"]]:
188
196
  playbook_mode = state.get("playbook_mode")
189
197
  if playbook_mode == "planning":
198
+ plan_id = str(uuid.uuid4())
199
+ writer({
200
+ "type": "custom",
201
+ id: plan_id,
202
+ "name": "planning",
203
+ "data": {"update": bool(self.playbook)}
204
+ })
190
205
  planning_instructions = self.instructions + PLAYBOOK_PLANNING_PROMPT
191
206
  messages = [{"role": "system", "content": planning_instructions}] + state["messages"]
192
207
 
193
- response = self.model_instance.invoke(messages)
194
- response = cast(AIMessage, response)
195
- response_text = get_message_text(response)
196
- # Extract plan from response text between triple backticks
197
- plan_match = re.search(r"```(.*?)```", response_text, re.DOTALL)
198
- if plan_match:
199
- plan = plan_match.group(1).strip()
200
- else:
201
- plan = response_text.strip()
202
- return Command(update={"messages": [response], "playbook_mode": "confirming", "plan": plan})
208
+ model_with_structured_output = self.playbook_model_instance.with_structured_output(PlaybookPlan)
209
+ response = model_with_structured_output.invoke(messages)
210
+ plan = cast(PlaybookPlan, response)
211
+
212
+ writer({"type": "custom", id: plan_id, "name": "planning", "data": {"plan": plan.steps}})
213
+ return Command(update={"messages": [AIMessage(content=json.dumps(plan.dict()), additional_kwargs={"type": "planning", "plan": plan.steps, "update": bool(self.playbook)})], "playbook_mode": "confirming", "plan": plan.steps})
203
214
 
204
215
  elif playbook_mode == "confirming":
205
- confirmation_instructions = self.instructions + PLAYBOOK_CONFIRMING_PROMPT
206
- messages = [{"role": "system", "content": confirmation_instructions}] + state["messages"]
207
- response = self.model_instance.invoke(messages, stream=False)
208
- response = get_message_text(response)
209
- if "true" in response.lower():
210
- return Command(goto="playbook", update={"playbook_mode": "generating"})
211
- else:
212
- return Command(goto="playbook", update={"playbook_mode": "planning"})
216
+ # Deterministic routing based on three exact button inputs from UI
217
+ user_text = ""
218
+ for m in reversed(state["messages"]):
219
+ try:
220
+ if getattr(m, "type", "") in {"human", "user"}:
221
+ user_text = (get_message_text(m) or "").strip()
222
+ if user_text:
223
+ break
224
+ except Exception:
225
+ continue
226
+
227
+ t = user_text.lower()
228
+ if t == "yes, this is great":
229
+ # Generate playbook metadata (name and description) before moving to generation
230
+ meta_id = str(uuid.uuid4())
231
+ writer({
232
+ "type": "custom",
233
+ id: meta_id,
234
+ "name": "metadata",
235
+ "data": {"update": bool(self.playbook)}
236
+ })
237
+ meta_instructions = self.instructions + PLAYBOOK_META_PROMPT
238
+ messages = [{"role": "system", "content": meta_instructions}] + state["messages"]
239
+
240
+ model_with_structured_output = self.playbook_model_instance.with_structured_output(PlaybookMeta)
241
+ meta_response = model_with_structured_output.invoke(messages)
242
+ meta = cast(PlaybookMeta, meta_response)
243
+
244
+ writer({"type": "custom", id: meta_id, "name": "metadata", "data": {"name": meta.name, "description": meta.description}})
245
+ return Command(goto="playbook", update={"playbook_mode": "generating", "playbook_name": meta.name, "playbook_description": meta.description})
246
+ if t == "i would like to modify the plan":
247
+ prompt_ai = AIMessage(content="What would you like to change about the plan? Let me know and I'll update the plan accordingly.", additional_kwargs={"stream": "true"})
248
+ return Command(update={"playbook_mode": "planning", "messages": [prompt_ai]})
249
+ if t == "let's do something else":
250
+ return Command(goto="call_model", update={"playbook_mode": "inactive"})
251
+
252
+ # Fallback safe default
253
+ return Command(goto="call_model", update={"playbook_mode": "inactive"})
213
254
 
214
255
  elif playbook_mode == "generating":
256
+ generate_id = str(uuid.uuid4())
257
+ writer({
258
+ "type": "custom",
259
+ id: generate_id,
260
+ "name": "generating",
261
+ "data": {"update": bool(self.playbook)}
262
+ })
215
263
  generating_instructions = self.instructions + PLAYBOOK_GENERATING_PROMPT
216
264
  messages = [{"role": "system", "content": generating_instructions}] + state["messages"]
217
- response = cast(AIMessage, self.model_instance.invoke(messages))
218
- raw_content = get_message_text(response)
219
- func_code = raw_content.strip()
220
- func_code = func_code.replace("```python", "").replace("```", "")
221
- func_code = func_code.strip()
265
+
266
+ model_with_structured_output = self.playbook_model_instance.with_structured_output(PlaybookCode)
267
+ response = model_with_structured_output.invoke(messages)
268
+ func_code = cast(PlaybookCode, response).code
222
269
 
223
270
  # Extract function name (handle both regular and async functions)
224
271
  match = re.search(r"^\s*(?:async\s+)?def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\(", func_code, re.MULTILINE)
@@ -227,48 +274,44 @@ class CodeActPlaybookAgent(BaseAgent):
227
274
  else:
228
275
  function_name = "generated_playbook"
229
276
 
277
+ # Use generated metadata if available
278
+ final_name = state.get("playbook_name") or function_name
279
+ final_description = state.get("playbook_description") or f"Generated playbook: {function_name}"
280
+
230
281
  # Save or update an Agent using the helper registry
231
- saved_note = ""
232
282
  try:
233
- if not self.playbook_registry:
283
+ if not self.playbook_registry:
234
284
  raise ValueError("Playbook registry is not configured")
235
285
 
236
286
  # Build instructions payload embedding the plan and function code
237
287
  instructions_payload = {
238
288
  "playbookPlan": state["plan"],
239
- "playbookScript": {
240
- "name": function_name,
241
- "code": func_code,
242
- },
289
+ "playbookScript": func_code,
243
290
  }
244
291
 
245
292
  # Convert tool ids list to dict
246
293
  tool_dict = convert_tool_ids_to_dict(state["selected_tool_ids"])
247
294
 
248
- res = self.playbook_registry.create_agent(
249
- name=function_name,
250
- description=f"Generated playbook: {function_name}",
295
+ res = self.playbook_registry.upsert_agent(
296
+ name=final_name,
297
+ description=final_description,
251
298
  instructions=instructions_payload,
252
299
  tools=tool_dict,
253
300
  visibility="private",
254
301
  )
255
- saved_note = f"Successfully created your playbook! Check it out here: [View Playbook](https://wingmen.info/agents/{res.id})"
256
302
  except Exception as e:
257
- saved_note = f"Failed to save generated playbook as Agent '{function_name}': {e}"
303
+ raise e
258
304
 
259
- # Mock tool call for exit_playbook_mode (for testing/demonstration)
260
- mock_exit_tool_call = {"name": "exit_playbook_mode", "args": {}, "id": "mock_exit_playbook_123"}
261
- mock_assistant_message = AIMessage(content=saved_note, tool_calls=[mock_exit_tool_call])
262
-
263
- # Mock tool response for exit_playbook_mode
264
- mock_exit_tool_response = ToolMessage(
265
- content=json.dumps(f"Exited Playbook Mode.{saved_note}"),
266
- name="exit_playbook_mode",
267
- tool_call_id="mock_exit_playbook_123",
268
- )
305
+ writer({
306
+ "type": "custom",
307
+ id: generate_id,
308
+ "name": "generating",
309
+ "data": {"id": str(res.id), "update": bool(self.playbook)}
310
+ })
311
+ mock_assistant_message = AIMessage(content=json.dumps(response.dict()), additional_kwargs={"type": "generating", "id": str(res.id), "update": bool(self.playbook)})
269
312
 
270
313
  return Command(
271
- update={"messages": [mock_assistant_message, mock_exit_tool_response], "playbook_mode": "normal"}
314
+ update={"messages": [mock_assistant_message], "playbook_mode": "normal"}
272
315
  )
273
316
 
274
317
  async def route_entry(state: CodeActState) -> Literal["call_model", "playbook"]:
@@ -277,7 +320,7 @@ class CodeActPlaybookAgent(BaseAgent):
277
320
  self.tools_config.extend(state.get("selected_tool_ids", []))
278
321
  self.exported_tools = await self.registry.export_tools(self.tools_config, ToolFormat.LANGCHAIN)
279
322
  self.final_instructions, self.tools_context = create_default_prompt(
280
- self.exported_tools, self.additional_tools, self.instructions
323
+ self.exported_tools, self.additional_tools, self.instructions, await get_connected_apps_string(self.registry)
281
324
  )
282
325
  if state.get("playbook_mode") in ["planning", "confirming", "generating"]:
283
326
  return "playbook"
@@ -0,0 +1,25 @@
1
+ from typing import Any
2
+
3
+ from universal_mcp.agents.codeact0.utils import light_copy
4
+
5
+ MAX_RETRIES = 3
6
+
7
+
8
+ def get_context_str(source: Any | list[Any] | dict[str, Any]) -> str:
9
+ """Converts context to a string representation."""
10
+ if not isinstance(source, dict):
11
+ if isinstance(source, list):
12
+ source = {f"doc_{i + 1}": str(doc) for i, doc in enumerate(source)}
13
+ else:
14
+ source = {"content": str(source)}
15
+
16
+ return "\n".join(f"<{k}>\n{str(v)}\n</{k}>" for k, v in source.items())
17
+
18
+
19
+ def smart_print(data: Any) -> None:
20
+ """Prints a dictionary or list of dictionaries with string values truncated to 30 characters.
21
+
22
+ Args:
23
+ data: Either a dictionary with string keys, or a list of such dictionaries
24
+ """
25
+ print(light_copy(data)) # noqa: T201