universal-mcp-agents 0.1.22__tar.gz → 0.1.23rc2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/PKG-INFO +3 -3
  2. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/pyproject.toml +3 -3
  3. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/__init__.py +2 -2
  4. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/cli.py +1 -1
  5. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/codeact0/__main__.py +2 -5
  6. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/codeact0/agent.py +173 -111
  7. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/codeact0/prompts.py +48 -33
  8. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/codeact0/state.py +2 -2
  9. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/codeact0/tools.py +47 -11
  10. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/codeact0/utils.py +26 -5
  11. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/uv.lock +3 -3
  12. universal_mcp_agents-0.1.22/bech.py +0 -38
  13. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/.github/workflows/evals.yml +0 -0
  14. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/.github/workflows/lint.yml +0 -0
  15. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/.github/workflows/release-please.yml +0 -0
  16. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/.github/workflows/tests.yml +0 -0
  17. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/.gitignore +0 -0
  18. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/.pre-commit-config.yaml +0 -0
  19. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/GEMINI.md +0 -0
  20. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/PROMPTS.md +0 -0
  21. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/README.md +0 -0
  22. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/bump_and_release.sh +0 -0
  23. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/evals/__init__.py +0 -0
  24. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/evals/dataset.py +0 -0
  25. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/evals/datasets/codeact.jsonl +0 -0
  26. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/evals/datasets/exact.jsonl +0 -0
  27. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/evals/datasets/tasks.jsonl +0 -0
  28. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/evals/evaluators.py +0 -0
  29. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/evals/prompts.py +0 -0
  30. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/evals/run.py +0 -0
  31. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/evals/utils.py +0 -0
  32. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/tests/test_agents.py +0 -0
  33. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/base.py +0 -0
  34. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/bigtool/__init__.py +0 -0
  35. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/bigtool/__main__.py +0 -0
  36. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/bigtool/agent.py +0 -0
  37. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/bigtool/context.py +0 -0
  38. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/bigtool/graph.py +0 -0
  39. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/bigtool/prompts.py +0 -0
  40. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/bigtool/state.py +0 -0
  41. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/bigtool/tools.py +0 -0
  42. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/builder/__main__.py +0 -0
  43. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/builder/builder.py +0 -0
  44. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/builder/helper.py +0 -0
  45. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/builder/prompts.py +0 -0
  46. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/builder/state.py +0 -0
  47. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/codeact0/__init__.py +0 -0
  48. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/codeact0/config.py +0 -0
  49. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/codeact0/langgraph_agent.py +0 -0
  50. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/codeact0/llm_tool.py +0 -0
  51. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/codeact0/sandbox.py +0 -0
  52. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/hil.py +0 -0
  53. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/llm.py +0 -0
  54. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/react.py +0 -0
  55. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/sandbox.py +0 -0
  56. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/shared/__main__.py +0 -0
  57. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/shared/prompts.py +0 -0
  58. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/shared/tool_node.py +0 -0
  59. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/simple.py +0 -0
  60. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/agents/utils.py +0 -0
  61. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/applications/filesystem/__init__.py +0 -0
  62. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/applications/filesystem/app.py +0 -0
  63. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/applications/llm/__init__.py +0 -0
  64. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/applications/llm/app.py +0 -0
  65. {universal_mcp_agents-0.1.22 → universal_mcp_agents-0.1.23rc2}/src/universal_mcp/applications/ui/app.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: universal-mcp-agents
3
- Version: 0.1.22
3
+ Version: 0.1.23rc2
4
4
  Summary: Add your description here
5
5
  Project-URL: Homepage, https://github.com/universal-mcp/applications
6
6
  Project-URL: Repository, https://github.com/universal-mcp/applications
@@ -12,8 +12,8 @@ Requires-Dist: langchain-google-genai>=2.1.10
12
12
  Requires-Dist: langchain-openai>=0.3.32
13
13
  Requires-Dist: langgraph>=0.6.6
14
14
  Requires-Dist: typer>=0.17.4
15
- Requires-Dist: universal-mcp-applications>=0.1.24
16
- Requires-Dist: universal-mcp>=0.1.24rc25
15
+ Requires-Dist: universal-mcp-applications>=0.1.25
16
+ Requires-Dist: universal-mcp>=0.1.24rc26
17
17
  Provides-Extra: dev
18
18
  Requires-Dist: pre-commit; extra == 'dev'
19
19
  Requires-Dist: ruff; extra == 'dev'
@@ -6,7 +6,7 @@ build-backend = "hatchling.build"
6
6
 
7
7
  [project]
8
8
  name = "universal-mcp-agents"
9
- version = "0.1.22"
9
+ version = "0.1.23-rc2"
10
10
  description = "Add your description here"
11
11
  readme = "README.md"
12
12
  authors = [
@@ -19,8 +19,8 @@ dependencies = [
19
19
  "langchain-openai>=0.3.32",
20
20
  "langgraph>=0.6.6",
21
21
  "typer>=0.17.4",
22
- "universal-mcp>=0.1.24rc25",
23
- "universal-mcp-applications>=0.1.24",
22
+ "universal-mcp>=0.1.24rc26",
23
+ "universal-mcp-applications>=0.1.25",
24
24
  ]
25
25
 
26
26
  [project.license]
@@ -9,7 +9,7 @@ from universal_mcp.agents.simple import SimpleAgent
9
9
 
10
10
 
11
11
  def get_agent(
12
- agent_name: Literal["react", "simple", "builder", "bigtool", "codeact-script", "codeact-repl"],
12
+ agent_name: Literal["react", "simple", "builder", "bigtool", "codeact-repl"],
13
13
  ):
14
14
  if agent_name == "react":
15
15
  return ReactAgent
@@ -23,7 +23,7 @@ def get_agent(
23
23
  return CodeActPlaybookAgent
24
24
  else:
25
25
  raise ValueError(
26
- f"Unknown agent: {agent_name}. Possible values: react, simple, builder, bigtool, codeact-script, codeact-repl"
26
+ f"Unknown agent: {agent_name}. Possible values: react, simple, builder, bigtool, codeact-repl"
27
27
  )
28
28
 
29
29
 
@@ -25,7 +25,7 @@ def run(name: str = "react"):
25
25
  client = AgentrClient()
26
26
  params = {
27
27
  "instructions": "You are a helpful assistant",
28
- "model": "anthropic/claude-sonnet-4-20250514",
28
+ "model": "azure/gpt-4.1",
29
29
  "registry": AgentrRegistry(client=client),
30
30
  "memory": MemorySaver(),
31
31
  }
@@ -13,15 +13,12 @@ async def main():
13
13
  agent = CodeActPlaybookAgent(
14
14
  name="CodeAct Agent",
15
15
  instructions="Be very concise in your answers.",
16
- model="anthropic:claude-4-sonnet-20250514",
17
- tools={"google_mail": ["list_messages"]},
16
+ model="azure/gpt-4.1",
18
17
  registry=AgentrRegistry(),
19
18
  memory=memory,
20
19
  )
21
20
  print("Starting agent...")
22
- result = await agent.invoke(
23
- user_input="Fetch unsubscribe links from my Gmail inbox for promo emails I have received in the last 7 days"
24
- )
21
+ result = await agent.invoke(user_input="load all the tools of reddit which can be used to search subreddit")
25
22
  print(messages_to_list(result["messages"]))
26
23
 
27
24
 
@@ -1,35 +1,36 @@
1
+ import copy
1
2
  import json
2
3
  import re
3
- from typing import Literal, cast
4
4
  import uuid
5
+ from typing import Literal, cast
5
6
 
7
+ from langchain_anthropic import ChatAnthropic
6
8
  from langchain_core.messages import AIMessage, ToolMessage
7
9
  from langchain_core.tools import StructuredTool
8
10
  from langgraph.checkpoint.base import BaseCheckpointSaver
9
11
  from langgraph.graph import START, StateGraph
10
12
  from langgraph.types import Command, RetryPolicy, StreamWriter
11
13
  from universal_mcp.tools.registry import ToolRegistry
12
- from universal_mcp.types import ToolConfig, ToolFormat
14
+ from universal_mcp.types import ToolFormat
13
15
 
14
16
  from universal_mcp.agents.base import BaseAgent
15
17
  from universal_mcp.agents.codeact0.llm_tool import smart_print
16
18
  from universal_mcp.agents.codeact0.prompts import (
17
19
  PLAYBOOK_GENERATING_PROMPT,
18
- PLAYBOOK_PLANNING_PROMPT,
19
20
  PLAYBOOK_META_PROMPT,
21
+ PLAYBOOK_PLANNING_PROMPT,
20
22
  create_default_prompt,
21
23
  )
22
24
  from universal_mcp.agents.codeact0.sandbox import eval_unsafe, execute_ipython_cell, handle_execute_ipython_cell
23
- from universal_mcp.agents.codeact0.state import CodeActState, PlaybookCode, PlaybookPlan, PlaybookMeta
25
+ from universal_mcp.agents.codeact0.state import CodeActState, PlaybookCode, PlaybookMeta, PlaybookPlan
24
26
  from universal_mcp.agents.codeact0.tools import (
25
27
  create_meta_tools,
26
28
  enter_playbook_mode,
27
29
  get_valid_tools,
28
30
  )
29
- from universal_mcp.agents.codeact0.utils import add_tools
31
+ from universal_mcp.agents.codeact0.utils import build_anthropic_cache_message, get_connected_apps_string
30
32
  from universal_mcp.agents.llm import load_chat_model
31
33
  from universal_mcp.agents.utils import convert_tool_ids_to_dict, filter_retry_on, get_message_text
32
- from universal_mcp.agents.codeact0.utils import get_connected_apps_string
33
34
 
34
35
 
35
36
  class CodeActPlaybookAgent(BaseAgent):
@@ -39,7 +40,6 @@ class CodeActPlaybookAgent(BaseAgent):
39
40
  instructions: str,
40
41
  model: str,
41
42
  memory: BaseCheckpointSaver | None = None,
42
- tools: ToolConfig | None = None,
43
43
  registry: ToolRegistry | None = None,
44
44
  playbook_registry: object | None = None,
45
45
  sandbox_timeout: int = 20,
@@ -54,17 +54,18 @@ class CodeActPlaybookAgent(BaseAgent):
54
54
  )
55
55
  self.model_instance = load_chat_model(model)
56
56
  self.playbook_model_instance = load_chat_model("azure/gpt-4.1")
57
- self.tools_config = tools or {}
58
57
  self.registry = registry
59
58
  self.playbook_registry = playbook_registry
60
59
  self.playbook = playbook_registry.get_agent() if playbook_registry else None
60
+ self.tools_config = self.playbook.tools if self.playbook else {}
61
61
  self.eval_fn = eval_unsafe
62
62
  self.sandbox_timeout = sandbox_timeout
63
- self.default_tools = {
63
+ self.default_tools_config = {
64
64
  "llm": ["generate_text", "classify_data", "extract_data", "call_llm"],
65
65
  }
66
- add_tools(self.tools_config, self.default_tools)
67
-
66
+ self.final_instructions = ""
67
+ self.tools_context = {}
68
+ self.exported_tools = []
68
69
 
69
70
  async def _build_graph(self):
70
71
  meta_tools = create_meta_tools(self.registry)
@@ -72,28 +73,47 @@ class CodeActPlaybookAgent(BaseAgent):
72
73
  self.additional_tools = [
73
74
  t if isinstance(t, StructuredTool) else StructuredTool.from_function(t) for t in additional_tools
74
75
  ]
76
+
75
77
  if self.tools_config:
76
- # Convert dict format to list format if needed
77
78
  if isinstance(self.tools_config, dict):
78
79
  self.tools_config = [
79
80
  f"{provider}__{tool}" for provider, tools in self.tools_config.items() for tool in tools
80
81
  ]
81
- if not self.registry:
82
- raise ValueError("Tools are configured but no registry is provided")
82
+ if not self.registry:
83
+ raise ValueError("Tools are configured but no registry is provided")
84
+ await self.registry.export_tools(self.tools_config, ToolFormat.LANGCHAIN)
85
+
86
+ await self.registry.export_tools(self.default_tools_config, ToolFormat.LANGCHAIN)
83
87
 
84
88
  async def call_model(state: CodeActState) -> Command[Literal["execute_tools"]]:
85
- messages = [{"role": "system", "content": self.final_instructions}] + state["messages"]
86
-
87
- # Run the model and potentially loop for reflection
88
- model_with_tools = self.model_instance.bind_tools(
89
- tools=[
90
- execute_ipython_cell,
91
- enter_playbook_mode,
92
- meta_tools["search_functions"],
93
- meta_tools["load_functions"],
94
- ],
95
- tool_choice="auto",
96
- )
89
+ """This node now only ever binds the four meta-tools to the LLM."""
90
+ messages = build_anthropic_cache_message(self.final_instructions) + state["messages"]
91
+
92
+ agent_facing_tools = [
93
+ execute_ipython_cell,
94
+ enter_playbook_mode,
95
+ meta_tools["search_functions"],
96
+ meta_tools["load_functions"],
97
+ ]
98
+
99
+ if isinstance(self.model_instance, ChatAnthropic):
100
+ model_with_tools = self.model_instance.bind_tools(
101
+ tools=agent_facing_tools,
102
+ tool_choice="auto",
103
+ cache_control={"type": "ephemeral", "ttl": "1h"},
104
+ )
105
+ if isinstance(messages[-1].content, str):
106
+ pass
107
+ else:
108
+ last = copy.deepcopy(messages[-1])
109
+ last.content[-1]["cache_control"] = {"type": "ephemeral", "ttl": "5m"}
110
+ messages[-1] = last
111
+ else:
112
+ model_with_tools = self.model_instance.bind_tools(
113
+ tools=agent_facing_tools,
114
+ tool_choice="auto",
115
+ )
116
+
97
117
  response = cast(AIMessage, model_with_tools.invoke(messages))
98
118
  if response.tool_calls:
99
119
  return Command(goto="execute_tools", update={"messages": [response]})
@@ -107,15 +127,16 @@ class CodeActPlaybookAgent(BaseAgent):
107
127
 
108
128
  tool_messages = []
109
129
  new_tool_ids = []
110
- ask_user = False
111
- ai_msg = ""
112
130
  tool_result = ""
113
131
  effective_previous_add_context = state.get("add_context", {})
114
132
  effective_existing_context = state.get("context", {})
133
+ # logging.info(f"Initial new_tool_ids_for_context: {new_tool_ids_for_context}")
115
134
 
116
135
  for tool_call in tool_calls:
136
+ tool_name = tool_call["name"]
137
+ tool_args = tool_call["args"]
117
138
  try:
118
- if tool_call["name"] == "enter_playbook_mode":
139
+ if tool_name == "enter_playbook_mode":
119
140
  tool_message = ToolMessage(
120
141
  content=json.dumps("Entered Playbook Mode."),
121
142
  name=tool_call["name"],
@@ -125,11 +146,11 @@ class CodeActPlaybookAgent(BaseAgent):
125
146
  goto="playbook",
126
147
  update={"playbook_mode": "planning", "messages": [tool_message]}, # Entered Playbook mode
127
148
  )
128
- elif tool_call["name"] == "execute_ipython_cell":
149
+ elif tool_name == "execute_ipython_cell":
129
150
  code = tool_call["args"]["snippet"]
130
151
  output, new_context, new_add_context = await handle_execute_ipython_cell(
131
152
  code,
132
- self.tools_context,
153
+ self.tools_context, # Uses the dynamically updated context
133
154
  self.eval_fn,
134
155
  effective_previous_add_context,
135
156
  effective_existing_context,
@@ -137,19 +158,22 @@ class CodeActPlaybookAgent(BaseAgent):
137
158
  effective_existing_context = new_context
138
159
  effective_previous_add_context = new_add_context
139
160
  tool_result = output
140
- elif tool_call["name"] == "load_functions": # Handle load_functions separately
141
- valid_tools, unconnected_links = await get_valid_tools(
142
- tool_ids=tool_call["args"]["tool_ids"], registry=self.registry
143
- )
161
+ elif tool_name == "load_functions":
162
+ # The tool now does all the work of validation and formatting.
163
+ tool_result = await meta_tools["load_functions"].ainvoke(tool_args)
164
+
165
+ # We still need to update the sandbox context for `execute_ipython_cell`
166
+ valid_tools, _ = await get_valid_tools(tool_ids=tool_args["tool_ids"], registry=self.registry)
144
167
  new_tool_ids.extend(valid_tools)
145
- # Create tool message response
146
- tool_result = f"Successfully loaded {len(valid_tools)} tools: {valid_tools}"
147
- links = "\n".join(unconnected_links)
148
- if links:
149
- ask_user = True
150
- ai_msg = f"Please login to the following app(s) using the following links and let me know in order to proceed:\n {links} "
151
- elif tool_call["name"] == "search_functions":
152
- tool_result = await meta_tools["search_functions"].ainvoke(tool_call["args"])
168
+ if new_tool_ids:
169
+ newly_exported = await self.registry.export_tools(new_tool_ids, ToolFormat.LANGCHAIN)
170
+ _, new_context_for_sandbox = create_default_prompt(
171
+ newly_exported, [], "", "", None
172
+ ) # is_initial_prompt is False by default
173
+ self.tools_context.update(new_context_for_sandbox)
174
+
175
+ elif tool_name == "search_functions":
176
+ tool_result = await meta_tools["search_functions"].ainvoke(tool_args)
153
177
  else:
154
178
  raise Exception(
155
179
  f"Unexpected tool call: {tool_call['name']}. "
@@ -165,23 +189,6 @@ class CodeActPlaybookAgent(BaseAgent):
165
189
  )
166
190
  tool_messages.append(tool_message)
167
191
 
168
- if new_tool_ids:
169
- self.tools_config.extend(new_tool_ids)
170
- self.exported_tools = await self.registry.export_tools(new_tool_ids, ToolFormat.LANGCHAIN)
171
- self.final_instructions, self.tools_context = create_default_prompt(
172
- self.exported_tools, self.additional_tools, self.instructions, await get_connected_apps_string(self.registry)
173
- )
174
- if ask_user:
175
- tool_messages.append(AIMessage(content=ai_msg))
176
- return Command(
177
- update={
178
- "messages": tool_messages,
179
- "selected_tool_ids": new_tool_ids,
180
- "context": effective_existing_context,
181
- "add_context": effective_previous_add_context,
182
- }
183
- )
184
-
185
192
  return Command(
186
193
  goto="call_model",
187
194
  update={
@@ -196,21 +203,31 @@ class CodeActPlaybookAgent(BaseAgent):
196
203
  playbook_mode = state.get("playbook_mode")
197
204
  if playbook_mode == "planning":
198
205
  plan_id = str(uuid.uuid4())
199
- writer({
200
- "type": "custom",
201
- id: plan_id,
202
- "name": "planning",
203
- "data": {"update": bool(self.playbook)}
204
- })
206
+ writer({"type": "custom", id: plan_id, "name": "planning", "data": {"update": bool(self.playbook)}})
205
207
  planning_instructions = self.instructions + PLAYBOOK_PLANNING_PROMPT
206
208
  messages = [{"role": "system", "content": planning_instructions}] + state["messages"]
207
209
 
208
210
  model_with_structured_output = self.playbook_model_instance.with_structured_output(PlaybookPlan)
209
211
  response = model_with_structured_output.invoke(messages)
210
212
  plan = cast(PlaybookPlan, response)
211
-
213
+
212
214
  writer({"type": "custom", id: plan_id, "name": "planning", "data": {"plan": plan.steps}})
213
- return Command(update={"messages": [AIMessage(content=json.dumps(plan.dict()), additional_kwargs={"type": "planning", "plan": plan.steps, "update": bool(self.playbook)})], "playbook_mode": "confirming", "plan": plan.steps})
215
+ return Command(
216
+ update={
217
+ "messages": [
218
+ AIMessage(
219
+ content=json.dumps(plan.model_dump()),
220
+ additional_kwargs={
221
+ "type": "planning",
222
+ "plan": plan.steps,
223
+ "update": bool(self.playbook),
224
+ },
225
+ )
226
+ ],
227
+ "playbook_mode": "confirming",
228
+ "plan": plan.steps,
229
+ }
230
+ )
214
231
 
215
232
  elif playbook_mode == "confirming":
216
233
  # Deterministic routing based on three exact button inputs from UI
@@ -226,25 +243,58 @@ class CodeActPlaybookAgent(BaseAgent):
226
243
 
227
244
  t = user_text.lower()
228
245
  if t == "yes, this is great":
229
- # Generate playbook metadata (name and description) before moving to generation
230
- meta_id = str(uuid.uuid4())
231
- writer({
232
- "type": "custom",
233
- id: meta_id,
234
- "name": "metadata",
235
- "data": {"update": bool(self.playbook)}
236
- })
237
- meta_instructions = self.instructions + PLAYBOOK_META_PROMPT
238
- messages = [{"role": "system", "content": meta_instructions}] + state["messages"]
239
-
240
- model_with_structured_output = self.playbook_model_instance.with_structured_output(PlaybookMeta)
241
- meta_response = model_with_structured_output.invoke(messages)
242
- meta = cast(PlaybookMeta, meta_response)
243
-
244
- writer({"type": "custom", id: meta_id, "name": "metadata", "data": {"name": meta.name, "description": meta.description}})
245
- return Command(goto="playbook", update={"playbook_mode": "generating", "playbook_name": meta.name, "playbook_description": meta.description})
246
+ self.meta_id = str(uuid.uuid4())
247
+ name, description = None, None
248
+ if self.playbook:
249
+ # Update flow: use existing name/description and do not re-generate
250
+ name = getattr(self.playbook, "name", None)
251
+ description = getattr(self.playbook, "description", None)
252
+ writer(
253
+ {
254
+ "type": "custom",
255
+ id: self.meta_id,
256
+ "name": "generating",
257
+ "data": {
258
+ "update": True,
259
+ "name": name,
260
+ "description": description,
261
+ },
262
+ }
263
+ )
264
+ else:
265
+ writer({"type": "custom", id: self.meta_id, "name": "generating", "data": {"update": False}})
266
+
267
+ meta_instructions = self.instructions + PLAYBOOK_META_PROMPT
268
+ messages = [{"role": "system", "content": meta_instructions}] + state["messages"]
269
+
270
+ model_with_structured_output = self.playbook_model_instance.with_structured_output(PlaybookMeta)
271
+ meta_response = model_with_structured_output.invoke(messages)
272
+ meta = cast(PlaybookMeta, meta_response)
273
+ name, description = meta.name, meta.description
274
+
275
+ # Emit intermediary UI update with created name/description
276
+ writer(
277
+ {
278
+ "type": "custom",
279
+ id: self.meta_id,
280
+ "name": "generating",
281
+ "data": {"update": False, "name": name, "description": description},
282
+ }
283
+ )
284
+
285
+ return Command(
286
+ goto="playbook",
287
+ update={
288
+ "playbook_mode": "generating",
289
+ "playbook_name": name,
290
+ "playbook_description": description,
291
+ },
292
+ )
246
293
  if t == "i would like to modify the plan":
247
- prompt_ai = AIMessage(content="What would you like to change about the plan? Let me know and I'll update the plan accordingly.", additional_kwargs={"stream": "true"})
294
+ prompt_ai = AIMessage(
295
+ content="What would you like to change about the plan? Let me know and I'll update the plan accordingly.",
296
+ additional_kwargs={"stream": "true"},
297
+ )
248
298
  return Command(update={"playbook_mode": "planning", "messages": [prompt_ai]})
249
299
  if t == "let's do something else":
250
300
  return Command(goto="call_model", update={"playbook_mode": "inactive"})
@@ -253,16 +303,9 @@ class CodeActPlaybookAgent(BaseAgent):
253
303
  return Command(goto="call_model", update={"playbook_mode": "inactive"})
254
304
 
255
305
  elif playbook_mode == "generating":
256
- generate_id = str(uuid.uuid4())
257
- writer({
258
- "type": "custom",
259
- id: generate_id,
260
- "name": "generating",
261
- "data": {"update": bool(self.playbook)}
262
- })
263
306
  generating_instructions = self.instructions + PLAYBOOK_GENERATING_PROMPT
264
307
  messages = [{"role": "system", "content": generating_instructions}] + state["messages"]
265
-
308
+
266
309
  model_with_structured_output = self.playbook_model_instance.with_structured_output(PlaybookCode)
267
310
  response = model_with_structured_output.invoke(messages)
268
311
  func_code = cast(PlaybookCode, response).code
@@ -275,12 +318,12 @@ class CodeActPlaybookAgent(BaseAgent):
275
318
  function_name = "generated_playbook"
276
319
 
277
320
  # Use generated metadata if available
278
- final_name = state.get("playbook_name") or function_name
321
+ final_name = state.get("playbook_.pyname") or function_name
279
322
  final_description = state.get("playbook_description") or f"Generated playbook: {function_name}"
280
323
 
281
324
  # Save or update an Agent using the helper registry
282
325
  try:
283
- if not self.playbook_registry:
326
+ if not self.playbook_registry:
284
327
  raise ValueError("Playbook registry is not configured")
285
328
 
286
329
  # Build instructions payload embedding the plan and function code
@@ -302,25 +345,45 @@ class CodeActPlaybookAgent(BaseAgent):
302
345
  except Exception as e:
303
346
  raise e
304
347
 
305
- writer({
306
- "type": "custom",
307
- id: generate_id,
308
- "name": "generating",
309
- "data": {"id": str(res.id), "update": bool(self.playbook)}
310
- })
311
- mock_assistant_message = AIMessage(content=json.dumps(response.dict()), additional_kwargs={"type": "generating", "id": str(res.id), "update": bool(self.playbook)})
312
-
313
- return Command(
314
- update={"messages": [mock_assistant_message], "playbook_mode": "normal"}
348
+ writer(
349
+ {
350
+ "type": "custom",
351
+ id: self.meta_id,
352
+ "name": "generating",
353
+ "data": {
354
+ "id": str(res.id),
355
+ "update": bool(self.playbook),
356
+ "name": final_name,
357
+ "description": final_description,
358
+ },
359
+ }
360
+ )
361
+ mock_assistant_message = AIMessage(
362
+ content=json.dumps(response.model_dump()),
363
+ additional_kwargs={
364
+ "type": "generating",
365
+ "id": str(res.id),
366
+ "update": bool(self.playbook),
367
+ "name": final_name,
368
+ "description": final_description,
369
+ },
315
370
  )
316
371
 
372
+ return Command(update={"messages": [mock_assistant_message], "playbook_mode": "normal"})
373
+
317
374
  async def route_entry(state: CodeActState) -> Literal["call_model", "playbook"]:
318
375
  """Route to either normal mode or playbook creation"""
319
- self.exported_tools = []
320
- self.tools_config.extend(state.get("selected_tool_ids", []))
321
- self.exported_tools = await self.registry.export_tools(self.tools_config, ToolFormat.LANGCHAIN)
376
+ all_tools = await self.registry.export_tools(state["selected_tool_ids"], ToolFormat.LANGCHAIN)
377
+ # print(all_tools)
378
+
379
+ # Create the initial system prompt and tools_context in one go
322
380
  self.final_instructions, self.tools_context = create_default_prompt(
323
- self.exported_tools, self.additional_tools, self.instructions, await get_connected_apps_string(self.registry)
381
+ all_tools,
382
+ self.additional_tools,
383
+ self.instructions,
384
+ await get_connected_apps_string(self.registry),
385
+ self.playbook,
386
+ is_initial_prompt=True,
324
387
  )
325
388
  if state.get("playbook_mode") in ["planning", "confirming", "generating"]:
326
389
  return "playbook"
@@ -331,5 +394,4 @@ class CodeActPlaybookAgent(BaseAgent):
331
394
  agent.add_node(playbook)
332
395
  agent.add_node(execute_tools)
333
396
  agent.add_conditional_edges(START, route_entry)
334
- # agent.add_edge(START, "call_model")
335
397
  return agent.compile(checkpointer=self.memory)
@@ -3,6 +3,7 @@ import re
3
3
  from collections.abc import Sequence
4
4
 
5
5
  from langchain_core.tools import StructuredTool
6
+
6
7
  from universal_mcp.agents.codeact0.utils import schema_to_signature
7
8
 
8
9
  uneditable_prompt = """
@@ -137,13 +138,20 @@ def create_default_prompt(
137
138
  base_prompt: str | None = None,
138
139
  apps_string: str | None = None,
139
140
  playbook: object | None = None,
141
+ is_initial_prompt: bool = False,
140
142
  ):
141
- system_prompt = uneditable_prompt.strip()
142
- if apps_string:
143
- system_prompt += f"\n\n**Connected external applications (These apps have been logged into by the user):**\n{apps_string}\n\n Use `search_functions` to search for functions you can perform using the above. You can also discover more applications using the `search_functions` tool to find additional tools and integrations, if required.\n"
144
- system_prompt += "\n\nIn addition to the Python Standard Library, you can use the following external functions:\n"
145
-
143
+ if is_initial_prompt:
144
+ system_prompt = uneditable_prompt.strip()
145
+ if apps_string:
146
+ system_prompt += f"\n\n**Connected external applications (These apps have been logged into by the user):**\n{apps_string}\n\n Use `search_functions` to search for functions you can perform using the above. You can also discover more applications using the `search_functions` tool to find additional tools and integrations, if required.\n"
147
+ system_prompt += (
148
+ "\n\nIn addition to the Python Standard Library, you can use the following external functions:\n"
149
+ )
150
+ else:
151
+ system_prompt = ""
152
+
146
153
  tools_context = {}
154
+ tool_definitions = []
147
155
 
148
156
  for tool in tools:
149
157
  if hasattr(tool, "func") and tool.func is not None:
@@ -152,10 +160,11 @@ def create_default_prompt(
152
160
  elif hasattr(tool, "coroutine") and tool.coroutine is not None:
153
161
  tool_callable = tool.coroutine
154
162
  is_async = True
155
- system_prompt += f'''{"async " if is_async else ""}{schema_to_signature(tool.args, tool.name)}:
163
+ tool_definitions.append(
164
+ f'''{"async " if is_async else ""}{schema_to_signature(tool.args, tool.name)}:
156
165
  """{tool.description}"""
157
- ...
158
- '''
166
+ ...'''
167
+ )
159
168
  safe_name = make_safe_function_name(tool.name)
160
169
  tools_context[safe_name] = tool_callable
161
170
 
@@ -166,34 +175,40 @@ def create_default_prompt(
166
175
  elif hasattr(tool, "coroutine") and tool.coroutine is not None:
167
176
  tool_callable = tool.coroutine
168
177
  is_async = True
169
- system_prompt += f'''{"async " if is_async else ""}def {tool.name} {str(inspect.signature(tool_callable))}:
178
+ tool_definitions.append(
179
+ f'''{"async " if is_async else ""}def {tool.name} {str(inspect.signature(tool_callable))}:
170
180
  """{tool.description}"""
171
- ...
172
- '''
181
+ ...'''
182
+ )
173
183
  safe_name = make_safe_function_name(tool.name)
174
184
  tools_context[safe_name] = tool_callable
175
185
 
176
- if base_prompt and base_prompt.strip():
177
- system_prompt += f"Your goal is to perform the following task:\n\n{base_prompt}"
178
-
179
- # Append existing playbook (plan + code) if provided
180
- try:
181
- if playbook and hasattr(playbook, "instructions"):
182
- pb = playbook.instructions or {}
183
- plan = pb.get("playbookPlan")
184
- code = pb.get("playbookScript")
185
- if plan or code:
186
- system_prompt += "\n\nExisting Playbook Provided:\n"
187
- if plan:
188
- if isinstance(plan, list):
189
- plan_block = "\n".join(f"- {str(s)}" for s in plan)
190
- else:
191
- plan_block = str(plan)
192
- system_prompt += f"Plan Steps:\n{plan_block}\n"
193
- if code:
194
- system_prompt += f"\nScript:\n```python\n{str(code)}\n```\n"
195
- except Exception:
196
- # Silently ignore formatting issues
197
- pass
186
+ system_prompt += "\n".join(tool_definitions)
187
+
188
+ if is_initial_prompt:
189
+ if base_prompt and base_prompt.strip():
190
+ system_prompt += (
191
+ f"\n\nUse the following information/instructions while completing your tasks:\n\n{base_prompt}"
192
+ )
193
+
194
+ # Append existing playbook (plan + code) if provided
195
+ try:
196
+ if playbook and hasattr(playbook, "instructions"):
197
+ pb = playbook.instructions or {}
198
+ plan = pb.get("playbookPlan")
199
+ code = pb.get("playbookScript")
200
+ if plan or code:
201
+ system_prompt += "\n\nExisting Playbook Provided:\n"
202
+ if plan:
203
+ if isinstance(plan, list):
204
+ plan_block = "\n".join(f"- {str(s)}" for s in plan)
205
+ else:
206
+ plan_block = str(plan)
207
+ system_prompt += f"Plan Steps:\n{plan_block}\n"
208
+ if code:
209
+ system_prompt += f"\nScript:\n```python\n{str(code)}\n```\n"
210
+ except Exception:
211
+ # Silently ignore formatting issues
212
+ pass
198
213
 
199
214
  return system_prompt, tools_context
@@ -1,11 +1,11 @@
1
- from typing import Annotated, Any, List
1
+ from typing import Annotated, Any
2
2
 
3
3
  from langgraph.prebuilt.chat_agent_executor import AgentState
4
4
  from pydantic import BaseModel, Field
5
5
 
6
6
 
7
7
  class PlaybookPlan(BaseModel):
8
- steps: List[str] = Field(description="The steps of the playbook.")
8
+ steps: list[str] = Field(description="The steps of the playbook.")
9
9
 
10
10
 
11
11
  class PlaybookCode(BaseModel):
@@ -4,22 +4,18 @@ from collections import defaultdict
4
4
  from typing import Annotated, Any
5
5
 
6
6
  from langchain_core.tools import tool
7
- from loguru import logger
8
7
  from pydantic import Field
9
8
  from universal_mcp.agentr.registry import AgentrRegistry
10
9
  from universal_mcp.types import ToolFormat
11
10
 
11
+ from universal_mcp.agents.codeact0.prompts import create_default_prompt
12
+
12
13
 
13
14
  def enter_playbook_mode():
14
15
  """Call this function to enter playbook mode. Playbook mode is when the user wants to store a repeated task as a script with some inputs for the future."""
15
16
  return
16
17
 
17
18
 
18
- def exit_playbook_mode():
19
- """Call this function to exit playbook mode. Playbook mode is when the user wants to store a repeated task as a script with some inputs for the future."""
20
- return
21
-
22
-
23
19
  def create_meta_tools(tool_registry: AgentrRegistry) -> dict[str, Any]:
24
20
  """Create the meta tools for searching and loading tools"""
25
21
 
@@ -105,7 +101,9 @@ def create_meta_tools(tool_registry: AgentrRegistry) -> dict[str, Any]:
105
101
  prioritized_app_id_list = [canonical_app_id]
106
102
  else:
107
103
  # 1. Perform an initial broad search for tools.
108
- initial_tool_search_tasks = [registry.search_tools(query=q, distance_threshold=THRESHOLD) for q in queries]
104
+ initial_tool_search_tasks = [
105
+ registry.search_tools(query=q, distance_threshold=THRESHOLD) for q in queries
106
+ ]
109
107
  initial_tool_results = await asyncio.gather(*initial_tool_search_tasks)
110
108
 
111
109
  # 2. Search for relevant apps.
@@ -198,15 +196,53 @@ def create_meta_tools(tool_registry: AgentrRegistry) -> dict[str, Any]:
198
196
 
199
197
  @tool
200
198
  async def load_functions(tool_ids: list[str]) -> str:
201
- """Load specific functions by their IDs for use in subsequent steps.
199
+ """
200
+ Loads specified functions and returns their Python signatures and docstrings.
201
+ This makes the functions available for use inside the 'execute_ipython_cell' tool.
202
+ The agent MUST use the returned information to understand how to call the functions correctly.
202
203
 
203
204
  Args:
204
- tool_ids: Function ids in the form 'app__function'. Example: 'google_mail__send_email'
205
+ tool_ids: A list of function IDs in the format 'app__function'. Example: ['google_mail__send_email']
205
206
 
206
207
  Returns:
207
- Confirmation message about loaded functions
208
+ A string containing the signatures and docstrings of the successfully loaded functions,
209
+ ready for the agent to use in its code.
208
210
  """
209
- return f"Successfully loaded {len(tool_ids)} functions: {tool_ids}"
211
+ if not tool_ids:
212
+ return "No tool IDs provided to load."
213
+
214
+ # Step 1: Validate which tools are usable and get login links for others.
215
+ valid_tool_ids, unconnected_links = await get_valid_tools(tool_ids=tool_ids, registry=tool_registry)
216
+
217
+ if not valid_tool_ids:
218
+ return "Error: None of the provided tool IDs could be validated or loaded."
219
+
220
+ # Step 2: Export the schemas of the valid tools.
221
+ try:
222
+ # Create a temporary, clean registry to export only the requested tools
223
+ temp_registry = AgentrRegistry()
224
+ exported_tools = await temp_registry.export_tools(valid_tool_ids, ToolFormat.LANGCHAIN)
225
+ except Exception as e:
226
+ return f"Error exporting tools: {e}"
227
+
228
+ # Step 3: Build the informational string for the agent.
229
+ tool_definitions, _ = create_default_prompt(exported_tools, [], is_initial_prompt=False)
230
+
231
+ result_parts = [
232
+ f"Successfully loaded {len(exported_tools)} functions. They are now available for use inside `execute_ipython_cell`:",
233
+ tool_definitions,
234
+ ]
235
+
236
+ response_string = "\n\n".join(result_parts)
237
+
238
+ # Append login links if any apps were not connected
239
+ if unconnected_links:
240
+ links = "\n".join(unconnected_links)
241
+ response_string += (
242
+ f"\n\nPlease ask the user to log in to the following app(s) to use their full functionality:\n{links}"
243
+ )
244
+
245
+ return response_string
210
246
 
211
247
  @tool
212
248
  async def web_search(query: str) -> dict:
@@ -10,6 +10,26 @@ from universal_mcp.types import ToolConfig
10
10
  MAX_CHARS = 5000
11
11
 
12
12
 
13
+ def build_anthropic_cache_message(text: str, role: str = "system", ttl: str = "1h") -> list[dict[str, Any]]:
14
+ """Build a complete Anthropic cache messages array from text.
15
+
16
+ Returns a list with a single cache message whose content is the
17
+ cached Anthropic content array with ephemeral cache control and TTL.
18
+ """
19
+ return [
20
+ {
21
+ "role": role,
22
+ "content": [
23
+ {
24
+ "type": "text",
25
+ "text": text,
26
+ "cache_control": {"type": "ephemeral", "ttl": ttl},
27
+ }
28
+ ],
29
+ }
30
+ ]
31
+
32
+
13
33
  def add_tools(tool_config: ToolConfig, tools_to_add: ToolConfig):
14
34
  for app_id, new_tools in tools_to_add.items():
15
35
  all_tools = tool_config.get(app_id, []) + new_tools
@@ -375,6 +395,7 @@ def schema_to_signature(schema: dict, func_name: str = "my_function") -> str:
375
395
  param_str = ",\n ".join(params)
376
396
  return f"def {func_name}(\n {param_str},\n):"
377
397
 
398
+
378
399
  def smart_truncate(
379
400
  output: str, max_chars_full: int = 2000, max_lines_headtail: int = 20, summary_threshold: int = 10000
380
401
  ) -> str:
@@ -413,21 +434,21 @@ async def get_connected_apps_string(registry) -> str:
413
434
  """Get a formatted string of connected applications from the registry."""
414
435
  if not registry:
415
436
  return ""
416
-
437
+
417
438
  try:
418
439
  # Get connected apps from registry
419
440
  connections = await registry.list_connected_apps()
420
441
  if not connections:
421
442
  return "No applications are currently connected."
422
-
443
+
423
444
  # Extract app names from connections
424
445
  connected_app_ids = {connection["app_id"] for connection in connections}
425
-
446
+
426
447
  # Format the apps list
427
448
  apps_list = []
428
449
  for app_id in connected_app_ids:
429
450
  apps_list.append(f"- {app_id}")
430
-
451
+
431
452
  return "\n".join(apps_list)
432
453
  except Exception:
433
- return "Unable to retrieve connected applications."
454
+ return "Unable to retrieve connected applications."
@@ -4976,7 +4976,7 @@ wheels = [
4976
4976
 
4977
4977
  [[package]]
4978
4978
  name = "universal-mcp-agents"
4979
- version = "0.1.21"
4979
+ version = "0.1.23rc1"
4980
4980
  source = { editable = "." }
4981
4981
  dependencies = [
4982
4982
  { name = "langchain-anthropic" },
@@ -5016,8 +5016,8 @@ requires-dist = [
5016
5016
  { name = "pytest-cov", marker = "extra == 'test'" },
5017
5017
  { name = "ruff", marker = "extra == 'dev'" },
5018
5018
  { name = "typer", specifier = ">=0.17.4" },
5019
- { name = "universal-mcp", specifier = ">=0.1.24rc25" },
5020
- { name = "universal-mcp-applications", specifier = ">=0.1.24" },
5019
+ { name = "universal-mcp", specifier = ">=0.1.24rc26" },
5020
+ { name = "universal-mcp-applications", specifier = ">=0.1.25" },
5021
5021
  ]
5022
5022
  provides-extras = ["test", "dev"]
5023
5023
 
@@ -1,38 +0,0 @@
1
- from universal_mcp.agentr.registry import AgentrRegistry
2
- from universal_mcp.agents import get_agent
3
- from langgraph.checkpoint.memory import MemorySaver
4
- from universal_mcp.agents.utils import messages_to_list
5
- import time
6
- from loguru import logger
7
-
8
-
9
- async def main():
10
- start_time = time.time()
11
- memory = MemorySaver()
12
- logger.info(f"Checkpointer: Time consumed: {time.time() - start_time}")
13
- agent_cls = get_agent("codeact-repl")
14
- logger.info(f"Get class: Time consumed: {time.time() - start_time}")
15
- registry = AgentrRegistry()
16
- logger.info(f"Init Registry: Time consumed: {time.time() - start_time}")
17
- agent = agent_cls(
18
- name="CodeAct Agent",
19
- instructions="Be very concise in your answers.",
20
- model="anthropic:claude-4-sonnet-20250514",
21
- tools={},
22
- registry=registry,
23
- memory=memory,
24
- )
25
- logger.info(f"Create agent: Time consumed: {time.time() - start_time}")
26
- print("Init agent...")
27
- await agent.ainit()
28
- logger.info(f"Init Agent: Time consumed: {time.time() - start_time}")
29
- print("Starting agent...")
30
- async for e in agent.stream(user_input="hi"):
31
- logger.info(f"First token: Time consumed: {time.time() - start_time}")
32
- print(e)
33
-
34
-
35
- if __name__ == "__main__":
36
- import asyncio
37
-
38
- asyncio.run(main())