universal-mcp-agents 0.1.23rc6__py3-none-any.whl → 0.1.23rc8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of universal-mcp-agents might be problematic. Click here for more details.

@@ -1,12 +1,13 @@
1
- from typing import Any, cast
2
- from uuid import uuid4
3
1
  import asyncio
2
+ from typing import cast
3
+ from uuid import uuid4
4
4
 
5
5
  from langchain_core.messages import AIMessageChunk
6
6
  from langgraph.checkpoint.base import BaseCheckpointSaver
7
7
  from langgraph.graph import StateGraph
8
8
  from langgraph.types import Command
9
9
  from universal_mcp.logger import logger
10
+
10
11
  from .utils import RichCLI
11
12
 
12
13
 
@@ -3,10 +3,9 @@ import json
3
3
  import re
4
4
  import uuid
5
5
  from typing import Literal, cast
6
- from types import SimpleNamespace
7
6
 
8
7
  from langchain_anthropic import ChatAnthropic
9
- from langchain_core.messages import AIMessage, ToolMessage
8
+ from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
10
9
  from langgraph.checkpoint.base import BaseCheckpointSaver
11
10
  from langgraph.graph import START, StateGraph
12
11
  from langgraph.types import Command, RetryPolicy, StreamWriter
@@ -19,6 +18,7 @@ from universal_mcp.agents.codeact0.prompts import (
19
18
  AGENT_BUILDER_GENERATING_PROMPT,
20
19
  AGENT_BUILDER_META_PROMPT,
21
20
  AGENT_BUILDER_PLANNING_PROMPT,
21
+ build_tool_definitions,
22
22
  create_default_prompt,
23
23
  )
24
24
  from universal_mcp.agents.codeact0.sandbox import eval_unsafe, execute_ipython_cell, handle_execute_ipython_cell
@@ -27,7 +27,7 @@ from universal_mcp.agents.codeact0.tools import (
27
27
  create_meta_tools,
28
28
  enter_agent_builder_mode,
29
29
  )
30
- from universal_mcp.agents.codeact0.utils import build_anthropic_cache_message, get_connected_apps_string, create_agent_call
30
+ from universal_mcp.agents.codeact0.utils import build_anthropic_cache_message, get_connected_apps_string
31
31
  from universal_mcp.agents.llm import load_chat_model
32
32
  from universal_mcp.agents.utils import convert_tool_ids_to_dict, filter_retry_on, get_message_text
33
33
 
@@ -52,12 +52,11 @@ class CodeActPlaybookAgent(BaseAgent):
52
52
  **kwargs,
53
53
  )
54
54
  self.model_instance = load_chat_model(model)
55
- self.agent_builder_model_instance = load_chat_model("azure/gpt-4.1")
55
+ self.agent_builder_model_instance = load_chat_model("anthropic:claude-sonnet-4-5-20250929")
56
56
  self.registry = registry
57
57
  self.agent_builder_registry = agent_builder_registry
58
58
  self.agent = agent_builder_registry.get_agent() if agent_builder_registry else None
59
59
 
60
-
61
60
  self.tools_config = self.agent.tools if self.agent else {}
62
61
  self.eval_fn = eval_unsafe
63
62
  self.sandbox_timeout = sandbox_timeout
@@ -66,21 +65,23 @@ class CodeActPlaybookAgent(BaseAgent):
66
65
  }
67
66
  self.final_instructions = ""
68
67
  self.tools_context = {}
68
+ self.eval_mode = kwargs.get("eval_mode", False)
69
69
 
70
70
  async def _build_graph(self): # noqa: PLR0915
71
71
  """Build the graph for the CodeAct Playbook Agent."""
72
72
  meta_tools = create_meta_tools(self.registry)
73
- self.additional_tools = [smart_print, meta_tools["web_search"]]
73
+ self.additional_tools = [
74
+ smart_print,
75
+ meta_tools["web_search"],
76
+ meta_tools["read_file"],
77
+ meta_tools["save_file"],
78
+ meta_tools["upload_file"],
79
+ ]
74
80
 
75
81
  if self.tools_config:
76
- if isinstance(self.tools_config, dict):
77
- self.tools_config = [
78
- f"{provider}__{tool}" for provider, tools in self.tools_config.items() for tool in tools
79
- ]
80
- if not self.registry:
81
- raise ValueError("Tools are configured but no registry is provided")
82
- await self.registry.load_tools(self.tools_config) # Load the default tools
83
- await self.registry.load_tools(self.default_tools_config) # Load more tools
82
+ await self.registry.load_tools(self.tools_config) # Load provided tools
83
+ if self.default_tools_config:
84
+ await self.registry.load_tools(self.default_tools_config) # Load default tools
84
85
 
85
86
  async def call_model(state: CodeActState) -> Command[Literal["execute_tools"]]:
86
87
  """This node now only ever binds the four meta-tools to the LLM."""
@@ -215,7 +216,7 @@ class CodeActPlaybookAgent(BaseAgent):
215
216
  if agent_builder_mode == "planning":
216
217
  plan_id = str(uuid.uuid4())
217
218
  writer({"type": "custom", id: plan_id, "name": "planning", "data": {"update": bool(self.agent)}})
218
- planning_instructions = self.instructions + AGENT_BUILDER_PLANNING_PROMPT
219
+ planning_instructions = self.instructions + AGENT_BUILDER_PLANNING_PROMPT + self.preloaded_defs
219
220
  messages = [{"role": "system", "content": planning_instructions}] + state["messages"]
220
221
 
221
222
  model_with_structured_output = self.agent_builder_model_instance.with_structured_output(
@@ -225,18 +226,29 @@ class CodeActPlaybookAgent(BaseAgent):
225
226
  plan = cast(AgentBuilderPlan, response)
226
227
 
227
228
  writer({"type": "custom", id: plan_id, "name": "planning", "data": {"plan": plan.steps}})
229
+ ai_msg = AIMessage(
230
+ content=json.dumps(plan.model_dump()),
231
+ additional_kwargs={
232
+ "type": "planning",
233
+ "plan": plan.steps,
234
+ "update": bool(self.agent),
235
+ },
236
+ )
237
+
238
+ if self.eval_mode:
239
+ mock_user_message = HumanMessage(content="yes, this is great")
240
+ return Command(
241
+ goto="agent_builder",
242
+ update={
243
+ "messages": [ai_msg, mock_user_message],
244
+ "agent_builder_mode": "generating",
245
+ "plan": plan.steps,
246
+ },
247
+ )
248
+
228
249
  return Command(
229
250
  update={
230
- "messages": [
231
- AIMessage(
232
- content=json.dumps(plan.model_dump()),
233
- additional_kwargs={
234
- "type": "planning",
235
- "plan": plan.steps,
236
- "update": bool(self.agent),
237
- },
238
- )
239
- ],
251
+ "messages": [ai_msg],
240
252
  "agent_builder_mode": "confirming",
241
253
  "plan": plan.steps,
242
254
  }
@@ -318,7 +330,7 @@ class CodeActPlaybookAgent(BaseAgent):
318
330
  return Command(goto="call_model", update={"agent_builder_mode": "inactive"})
319
331
 
320
332
  elif agent_builder_mode == "generating":
321
- generating_instructions = self.instructions + AGENT_BUILDER_GENERATING_PROMPT
333
+ generating_instructions = self.instructions + AGENT_BUILDER_GENERATING_PROMPT + self.preloaded_defs
322
334
  messages = [{"role": "system", "content": generating_instructions}] + state["messages"]
323
335
 
324
336
  model_with_structured_output = self.agent_builder_model_instance.with_structured_output(
@@ -358,8 +370,47 @@ class CodeActPlaybookAgent(BaseAgent):
358
370
  instructions=instructions_payload,
359
371
  tools=tool_dict,
360
372
  )
361
- except Exception as e:
362
- raise e
373
+ except Exception:
374
+ # In case of error, add the code to the exit message content
375
+
376
+ mock_exit_tool_call = {"name": "exit_agent_builder_mode", "args": {}, "id": "exit_builder_1"}
377
+
378
+ # Create a minimal assistant message to maintain flow
379
+ mock_assistant_message = AIMessage(
380
+ content=json.dumps(response.model_dump()),
381
+ tool_calls=[mock_exit_tool_call],
382
+ additional_kwargs={
383
+ "type": "generating",
384
+ "id": "ignore",
385
+ "update": bool(self.agent),
386
+ "name": final_name.replace(" ", "_"),
387
+ "description": final_description,
388
+ },
389
+ )
390
+ mock_exit_tool_response = ToolMessage(
391
+ content=json.dumps(
392
+ f"An error occurred. Displaying the function code:\n\n{func_code}\nFinal Name: {final_name}\nDescription: {final_description}"
393
+ ),
394
+ name="exit_agent_builder_mode",
395
+ tool_call_id="exit_builder_1",
396
+ )
397
+ if self.eval_mode:
398
+ human_msg = HumanMessage(
399
+ content="Run the generated agent code and check whether it works as expected"
400
+ )
401
+ return Command(
402
+ goto="call_model",
403
+ update={
404
+ "messages": [mock_assistant_message, mock_exit_tool_response, human_msg],
405
+ "agent_builder_mode": "normal",
406
+ },
407
+ )
408
+ return Command(
409
+ update={
410
+ "messages": [mock_assistant_message, mock_exit_tool_response],
411
+ "agent_builder_mode": "normal",
412
+ }
413
+ )
363
414
 
364
415
  writer(
365
416
  {
@@ -374,11 +425,7 @@ class CodeActPlaybookAgent(BaseAgent):
374
425
  },
375
426
  }
376
427
  )
377
- mock_exit_tool_call = {
378
- "name": "exit_agent_builder_mode",
379
- "args": {},
380
- "id": "exit_builder_1"
381
- }
428
+ mock_exit_tool_call = {"name": "exit_agent_builder_mode", "args": {}, "id": "exit_builder_1"}
382
429
  mock_assistant_message = AIMessage(
383
430
  content=json.dumps(response.model_dump()),
384
431
  tool_calls=[mock_exit_tool_call],
@@ -390,33 +437,50 @@ class CodeActPlaybookAgent(BaseAgent):
390
437
  "description": final_description,
391
438
  },
392
439
  )
393
-
440
+
394
441
  mock_exit_tool_response = ToolMessage(
395
- content=json.dumps("Exited Agent Builder Mode. Enter this mode again if you need to modify the saved agent."),
442
+ content=json.dumps(
443
+ "Exited Agent Builder Mode. Enter this mode again if you need to modify the saved agent."
444
+ ),
396
445
  name="exit_agent_builder_mode",
397
- tool_call_id="exit_builder_1"
446
+ tool_call_id="exit_builder_1",
398
447
  )
399
448
 
400
- return Command(update={"messages": [mock_assistant_message, mock_exit_tool_response], "agent_builder_mode": "normal"})
449
+ return Command(
450
+ update={
451
+ "messages": [mock_assistant_message, mock_exit_tool_response],
452
+ "agent_builder_mode": "normal",
453
+ }
454
+ )
401
455
 
402
456
  async def route_entry(state: CodeActState) -> Command[Literal["call_model", "agent_builder", "execute_tools"]]:
403
457
  """Route to either normal mode or agent builder creation"""
404
- await self.registry.load_tools(state["selected_tool_ids"])
405
- all_tools = await self.registry.export_tools(format=ToolFormat.NATIVE)
458
+ pre_tools = await self.registry.export_tools(format=ToolFormat.NATIVE)
406
459
 
407
460
  # Create the initial system prompt and tools_context in one go
408
461
  self.final_instructions, self.tools_context = create_default_prompt(
409
- all_tools,
462
+ pre_tools,
410
463
  self.additional_tools,
411
464
  self.instructions,
412
465
  await get_connected_apps_string(self.registry),
413
466
  self.agent,
414
467
  is_initial_prompt=True,
415
468
  )
416
- if len(state['messages']) == 1 and self.agent: # Inject the agent's script function into add_context for execution
417
- script = self.agent.instructions.get('script')
418
- add_context = {"functions":[script]}
419
- return Command(goto="call_model", update = {"add_context": add_context})
469
+ self.preloaded_defs, _ = build_tool_definitions(pre_tools)
470
+ self.preloaded_defs = "\n".join(self.preloaded_defs)
471
+ await self.registry.load_tools(state["selected_tool_ids"])
472
+ exported_tools = await self.registry.export_tools(
473
+ state["selected_tool_ids"], ToolFormat.NATIVE
474
+ ) # Get definition for only the new tools
475
+ _, loaded_tools_context = build_tool_definitions(exported_tools)
476
+ self.tools_context.update(loaded_tools_context)
477
+
478
+ if (
479
+ len(state["messages"]) == 1 and self.agent
480
+ ): # Inject the agent's script function into add_context for execution
481
+ script = self.agent.instructions.get("script")
482
+ add_context = {"functions": [script]}
483
+ return Command(goto="call_model", update={"add_context": add_context})
420
484
 
421
485
  if state.get("agent_builder_mode") in ["planning", "confirming", "generating"]:
422
486
  return Command(goto="agent_builder")
@@ -2,8 +2,6 @@ import inspect
2
2
  import re
3
3
  from collections.abc import Callable
4
4
 
5
- from loguru import logger
6
-
7
5
  uneditable_prompt = """
8
6
  You are **Ruzo**, an AI Assistant created by AgentR — a creative, straight-forward, and direct principal software engineer with access to tools.
9
7
 
@@ -25,7 +23,6 @@ Your job is to answer the user's question or perform the task they ask for.
25
23
  - If needed, feel free to ask for more information from the user (without using the `execute_ipython_cell` tool) to clarify the task.
26
24
  - Always describe in 2-3 lines about the current progress. In each step, mention what has been achieved and what you are planning to do next.
27
25
  - DO NOT use the code execution to communicate with the user. The user is not able to see the output of the code cells.
28
- - Always use `await` when calling an async function. Since this is a Jupyter/async environment, you must not use asyncio.run().
29
26
 
30
27
  **Coding Best Practices:**
31
28
  - Variables defined at the top level of previous code snippets can be referenced in your code.
@@ -35,49 +32,98 @@ Your job is to answer the user's question or perform the task they ask for.
35
32
  - You can only import libraries that come pre-installed with Python. However, do consider searching for external functions first, using the search and load tools to access them in the code.
36
33
  - For displaying final results to the user, you must present your output in markdown format, including image links, so that they are rendered and displayed to the user. The code output is NOT visible to the user.
37
34
  - Call all functions using keyword arguments only, never positional arguments.
38
-
35
+ - NEVER use execute_ipython_cell for:
36
+ - Static analysis or commentary
37
+ - Text that could be written as markdown
38
+ - Final output summarization after analysis
39
+ - Anything that's just formatted print statements
39
40
 
40
41
  **Final Output Requirements:**
41
- - Once you have all the information about the task, return the text directly to user in markdown format. No need to call `execute_ipython_cell` again.
42
+ - Once you have all the information about the task, return the text directly to user in markdown format. Do NOT call `execute_ipython_cell` again just for summarization.
42
43
  - Always respond in github flavoured markdown format.
43
44
  - For charts and diagrams, use mermaid chart in markdown directly.
44
45
  - Your final response should contain the complete answer to the user's request in a clear, well-formatted manner that directly addresses what they asked for.
46
+ - For file types like images, audio, documents, etc., you must use the `upload_file` tool to upload the file to the server and render the link in the markdown response.
45
47
  """
46
48
 
47
- AGENT_BUILDER_PLANNING_PROMPT = """Now, you are tasked with creating a reusable agent from the user's previous workflow.
49
+ AGENT_BUILDER_PLANNING_PROMPT = """TASK: Analyze the conversation history and code execution to create a step-by-step non-technical plan for a reusable function.
50
+ Rules:
51
+ - Do NOT include the searching and loading of functions. Assume that the functions have already been loaded.
52
+ - The plan is a sequence of steps corresponding to the key logical steps taken to achieve the user's task in the conversation history, without focusing on technical specifics.
53
+ - You must output a JSON object with a single key "steps", which is a list of strings. Each string is a step in the agent.
54
+ - Identify user-provided information as variables that should become the main agent input parameters using `variable_name` syntax, enclosed by backticks `...`. Intermediate variables should be highlighted using italics, i.e. *...*, NEVER `...`
55
+ - Keep the logic generic and reusable. Avoid hardcoding any names/constants. Instead, keep them as variables with defaults. They should be represented as `variable_name(default = default_value)`.
56
+ - Have a human-friendly plan and inputs format. That is, it must not use internal IDs or keys used by APIs as either inputs or outputs to the overall plan; using them internally is okay.
57
+ - Be as concise as possible, especially for internal processing steps.
58
+ - For steps where the assistant's intelligence was used outside of the code to infer/decide/analyse something, replace it with the use of *llm__* functions in the plan if required.
59
+
60
+ Example Conversation History:
61
+ User Message: "Create an image using Gemini for Marvel Cinematic Universe in comic style"
62
+ Code snippet: image_result = await google_gemini__generate_image(prompt=prompt)
63
+ Assistant Message: "The image has been successfully generated [image_result]."
64
+ User Message: "Save the image in my OneDrive"
65
+ Code snippet: image_data = base64.b64decode(image_result['data'])
66
+ temp_file_path = tempfile.mktemp(suffix='.png')
67
+ with open(temp_file_path, 'wb') as f:
68
+ f.write(image_data)
69
+ # Upload the image to OneDrive with a descriptive filename
70
+ onedrive_filename = "Marvel_Cinematic_Universe_Comic_Style.png"
71
+
72
+ print(f"Uploading to OneDrive as: {onedrive_filename}")
73
+
74
+ # Upload to OneDrive root folder
75
+ upload_result = onedrive__upload_file(
76
+ file_path=temp_file_path,
77
+ parent_id='root',
78
+ file_name=onedrive_filename
79
+ )
80
+
81
+ Generated Steps:
82
+ "steps": [
83
+ "Generate an image using Gemini model with `image_prompt` and `style(default = 'comic')`",
84
+ "Upload the obtained image to OneDrive using `onedrive_filename(default = 'generated_image.png')` and `onedrive_parent_folder(default = 'root')`",
85
+ "Return confirmation of upload including file name and destination path, and link to the upload"
86
+ ]
87
+ Note that internal variables like upload_result, image_result are not highlighted in the plan, and intermediate processing details are skipped.
88
+ Now create a plan based on the conversation history. Do not include any other text or explanation in your response. Just the JSON object.
89
+ Note that the following tools are pre-loaded for the agent's use, and can be inluded in your plan if needed as internal variables (especially the llm tools)-\n
90
+ """
48
91
 
49
- TASK: Analyze the conversation history and code execution to create a step-by-step plan for a reusable function.
50
- Do not include the searching and loading of tools. Assume that the tools have already been loaded.
51
- The plan is a sequence of steps.
52
- You must output a JSON object with a single key "steps", which is a list of strings. Each string is a step in the agent.
53
92
 
54
- Your plan should:
55
- 1. Identify the key steps in the workflow
56
- 2. Mark user-specific variables that should become the main agent function parameters using `variable_name` syntax. Intermediate variables MUST not be highlighted using ``
57
- 3. Keep the logic generic and reusable
58
- 4. Be clear and concise
93
+ AGENT_BUILDER_GENERATING_PROMPT = """
94
+ You are tasked with generating a reusable agent function based on the final confirmed agent plan and preceding conversation history including user messages, assistant messages, and code executions.
95
+ Create an appropriately named python function that combines relevent previously executed code from the conversation history to achieve the plan objectives.
96
+ Rules-
97
+ - Do NOT include the searching and loading of functions. Assume that the functions have already been loaded. Imports should be included.
98
+ - Your response must be **ONLY Python code** for the function.
99
+ - Do not include any text, explanations, or Markdown.
100
+ - The response must start with `def` or `async def` and define a single, complete, executable function.
101
+ - The function parameters **must exactly match the external variables** in the agent plan. External variables are marked using backticks `` `variable_name` ``. Any variables in italics (i.e. enclosed in *...*) are to be used internally, but not as the main function paramters.
102
+ - Any imports, variables, helper or child functions required must be defined **inside the main top-level function**.
103
+ - Ensure that the outer function is self-contained and can run independently, based on previously validated code snippets.
59
104
 
60
105
  Example:
61
- {
62
- "steps": [
63
- "Connect to database using `db_connection_string`",
64
- "Query user data for `user_id`",
65
- "Process results and calculate `metric_name`",
66
- "Send notification to `email_address`"
67
- ]
68
- }
69
106
 
70
- Now create a plan based on the conversation history. Do not include any other text or explanation in your response. Just the JSON object.
71
- """
107
+ If the plan has:
108
+
109
+ "steps": [
110
+ "Receive creative description as image_prompt",
111
+ "Generate image using Gemini with style(default = 'comic')",
112
+ "Save temporary image internally as *temp_file_path*",
113
+ "Upload *temp_file_path* to OneDrive folder onedrive_parent_folder(default = 'root')"
114
+ ]
115
+
116
+ Then the function signature should be:
117
+
118
+ ```python
119
+ def image_generator(image_prompt, style="comic", onedrive_parent_folder="root"):
120
+ #Code based on previously executed snippets
121
+
122
+ And all internal variables (e.g., *temp_file_path*) should be defined inside the function.
72
123
 
73
124
 
74
- AGENT_BUILDER_GENERATING_PROMPT = """Now, you are tasked with generating the agent function.
75
- Your response must be ONLY the Python code for the function.
76
- Do not include any other text, markdown, or explanations in your response.
77
- Your response should start with `def` or `async def`.
78
- The function should be a single, complete piece of code that can be executed independently, based on previously executed code snippets that executed correctly.
79
- The parameters of the function MUST be exactly the same as the final confirmed agent plan. The variables will are indicated using `` in the plan.
80
- Any additional functions you require should be child functions inside the main top level function, and thus the first function to appear must be the main agent executable function.
125
+ Use this convention consistently to generate the final agent function.
126
+ Note that the following tools are pre-loaded for the agent's use, and can be included in your code-\n
81
127
  """
82
128
 
83
129
 
@@ -114,17 +160,40 @@ def make_safe_function_name(name: str) -> str:
114
160
  return safe_name
115
161
 
116
162
 
163
+ # Compile regex once for better performance
164
+ _RAISES_PATTERN = re.compile(r"\n\s*[Rr]aises\s*:.*$", re.DOTALL)
165
+
166
+
167
+ def _clean_docstring(docstring: str | None) -> str:
168
+ """Remove the 'Raises:' section and everything after it from a docstring."""
169
+ if not docstring:
170
+ return ""
171
+
172
+ # Use pre-compiled regex for better performance
173
+ cleaned = _RAISES_PATTERN.sub("", docstring)
174
+ return cleaned.strip()
175
+
176
+
117
177
  def build_tool_definitions(tools: list[Callable]) -> tuple[list[str], dict[str, Callable]]:
118
178
  tool_definitions = []
119
179
  context = {}
120
- for tool in tools:
180
+
181
+ # Pre-allocate lists for better performance
182
+ tool_definitions = [None] * len(tools)
183
+
184
+ for i, tool in enumerate(tools):
121
185
  tool_name = tool.__name__
122
- tool_definitions.append(
123
- f'''{"async " if inspect.iscoroutinefunction(tool) else ""}def {tool_name} {str(inspect.signature(tool))}:
124
- """{tool.__doc__}"""
186
+ cleaned_docstring = _clean_docstring(tool.__doc__)
187
+
188
+ # Pre-compute string parts to avoid repeated string operations
189
+ async_prefix = "async " if inspect.iscoroutinefunction(tool) else ""
190
+ signature = str(inspect.signature(tool))
191
+
192
+ tool_definitions[i] = f'''{async_prefix}def {tool_name} {signature}:
193
+ """{cleaned_docstring}"""
125
194
  ...'''
126
- )
127
195
  context[tool_name] = tool
196
+
128
197
  return tool_definitions, context
129
198
 
130
199
 
@@ -139,7 +208,7 @@ def create_default_prompt(
139
208
  if is_initial_prompt:
140
209
  system_prompt = uneditable_prompt.strip()
141
210
  if apps_string:
142
- system_prompt += f"\n\n**Connected external applications (These apps have been logged into by the user):**\n{apps_string}\n\n Use `search_functions` to search for functions you can perform using the above. You can also discover more applications using the `search_functions` tool to find additional tools and integrations, if required.\n"
211
+ system_prompt += f"\n\n**Connected external applications (These apps have been logged into by the user):**\n{apps_string}\n\n Use `search_functions` to search for functions you can perform using the above. You can also discover more applications using the `search_functions` tool to find additional tools and integrations, if required. However, you MUST not assume the application when multiple apps are connected for a particular usecase.\n"
143
212
  system_prompt += (
144
213
  "\n\nIn addition to the Python Standard Library, you can use the following external functions:\n"
145
214
  )
@@ -162,7 +231,9 @@ def create_default_prompt(
162
231
  plan = pb.get("plan")
163
232
  code = pb.get("script")
164
233
  if plan or code:
165
- system_prompt += "\n\nYou have been provided an existing agent plan and code for performing a task.:\n"
234
+ system_prompt += (
235
+ "\n\nYou have been provided an existing agent plan and code for performing a task.:\n"
236
+ )
166
237
  if plan:
167
238
  if isinstance(plan, list):
168
239
  plan_block = "\n".join(f"- {str(s)}" for s in plan)
@@ -1,14 +1,14 @@
1
+ import ast
1
2
  import contextlib
2
3
  import inspect
3
4
  import io
5
+ import pickle
4
6
  import queue
5
7
  import re
6
8
  import socket
7
9
  import threading
8
10
  import types
9
11
  from typing import Any
10
- import pickle
11
- import ast
12
12
 
13
13
  from langchain_core.tools import tool
14
14
 
@@ -40,12 +40,12 @@ async def eval_unsafe(
40
40
  )
41
41
 
42
42
  result_container = {"output": "<no output>"}
43
-
43
+
44
44
  try:
45
45
  compiled_code = compile(code, "<string>", "exec", flags=ast.PyCF_ALLOW_TOP_LEVEL_AWAIT)
46
46
  with contextlib.redirect_stdout(io.StringIO()) as f:
47
47
  coroutine = eval(compiled_code, _locals, _locals)
48
- # Await the coroutine to run the code if it's async
48
+ # Await the coroutine to run the code if it's async
49
49
  if coroutine:
50
50
  await coroutine
51
51
  result_container["output"] = f.getvalue() or "<code ran, no output printed to stdout>"
@@ -1,11 +1,15 @@
1
1
  import asyncio
2
+ import base64
2
3
  import json
3
4
  from collections import defaultdict
5
+ from pathlib import Path
4
6
  from typing import Annotated, Any
5
7
 
6
8
  from langchain_core.tools import tool
7
9
  from pydantic import Field
10
+ from universal_mcp.agentr.client import AgentrClient
8
11
  from universal_mcp.agentr.registry import AgentrRegistry
12
+ from universal_mcp.applications.markitdown.app import MarkitdownApp
9
13
  from universal_mcp.types import ToolFormat
10
14
 
11
15
  from universal_mcp.agents.codeact0.prompts import build_tool_definitions
@@ -192,9 +196,11 @@ def create_meta_tools(tool_registry: AgentrRegistry) -> dict[str, Any]:
192
196
  )
193
197
 
194
198
  result_parts.append("Call load_functions to select the required functions only.")
195
- if len(connected_apps_in_results)>len(apps_in_results):
196
- result_parts.append("Unconnected app functions can also be loaded if required by the user, but prefer connected ones.")
197
- return " ".join(result_parts)
199
+ if len(connected_apps_in_results) < len(apps_in_results) and len(connected_apps_in_results) > 0:
200
+ result_parts.append(
201
+ "Unconnected app functions can also be loaded if required by the user, but prefer connected ones. And do ask the user to choose if none of the relevant apps are connected"
202
+ )
203
+ return "\n".join(result_parts)
198
204
 
199
205
  @tool
200
206
  async def load_functions(tool_ids: list[str]) -> str:
@@ -263,7 +269,95 @@ def create_meta_tools(tool_registry: AgentrRegistry) -> dict[str, Any]:
263
269
  "citations": response.get("citations", []),
264
270
  }
265
271
 
266
- return {"search_functions": search_functions, "load_functions": load_functions, "web_search": web_search}
272
+ async def read_file(uri: str) -> str:
273
+ """
274
+ Asynchronously reads a local file or uri and returns the content as a markdown string.
275
+
276
+ This tool aims to extract the main text content from various sources.
277
+ It automatically prepends 'file://' to the input string if it appears
278
+ to be a local path without a specified scheme (like http, https, data, file).
279
+
280
+ Args:
281
+ uri (str): The URI pointing to the resource or a local file path.
282
+ Supported schemes:
283
+ - http:// or https:// (Web pages, feeds, APIs)
284
+ - file:// (Local or accessible network files)
285
+ - data: (Embedded data)
286
+
287
+ Returns:
288
+ A string containing the markdown representation of the content at the specified URI
289
+
290
+ Raises:
291
+ ValueError: If the URI is invalid, empty, or uses an unsupported scheme
292
+ after automatic prefixing.
293
+
294
+ Tags:
295
+ convert, markdown, async, uri, transform, document, important
296
+ """
297
+ markitdown = MarkitdownApp()
298
+ response = await markitdown.convert_to_markdown(uri)
299
+ return response
300
+
301
+ async def save_file(file_name: str, content: str) -> dict:
302
+ """
303
+ Saves a file to the local filesystem.
304
+
305
+ Args:
306
+ file_name (str): The name of the file to save.
307
+ content (str): The content to save to the file.
308
+
309
+ Returns:
310
+ dict: A dictionary containing the result of the save operation with the following fields:
311
+ - status (str): "success" if the save succeeded, "error" otherwise.
312
+ - message (str): A message returned by the server, typically indicating success or providing error details.
313
+ """
314
+ with Path(file_name).open("w") as f:
315
+ f.write(content)
316
+
317
+ return {
318
+ "status": "success",
319
+ "message": f"File {file_name} saved successfully",
320
+ "file_path": Path(file_name).absolute(),
321
+ }
322
+
323
+ async def upload_file(file_name: str, mime_type: str, base64_data: str) -> dict:
324
+ """
325
+ Uploads a file to the server via the AgentrClient.
326
+
327
+ Args:
328
+ file_name (str): The name of the file to upload.
329
+ mime_type (str): The MIME type of the file.
330
+ base64_data (str): The file content encoded as a base64 string.
331
+
332
+ Returns:
333
+ dict: A dictionary containing the result of the upload operation with the following fields:
334
+ - status (str): "success" if the upload succeeded, "error" otherwise.
335
+ - message (str): A message returned by the server, typically indicating success or providing error details.
336
+ - signed_url (str or None): The signed URL to access the uploaded file if successful, None otherwise.
337
+ """
338
+ client: AgentrClient = tool_registry.client
339
+ bytes_data = base64.b64decode(base64_data)
340
+ response = client._upload_file(file_name, mime_type, bytes_data)
341
+ if response.get("status") != "success":
342
+ return {
343
+ "status": "error",
344
+ "message": response.get("message"),
345
+ "signed_url": None,
346
+ }
347
+ return {
348
+ "status": "success",
349
+ "message": response.get("message"),
350
+ "signed_url": response.get("signed_url"),
351
+ }
352
+
353
+ return {
354
+ "search_functions": search_functions,
355
+ "load_functions": load_functions,
356
+ "web_search": web_search,
357
+ "read_file": read_file,
358
+ "upload_file": upload_file,
359
+ "save_file": save_file,
360
+ }
267
361
 
268
362
 
269
363
  async def get_valid_tools(tool_ids: list[str], registry: AgentrRegistry) -> tuple[list[str], list[str]]:
@@ -4,7 +4,7 @@ import re
4
4
  from collections.abc import Sequence
5
5
  from typing import Any
6
6
 
7
- from langchain_core.messages import AIMessage, ToolMessage, BaseMessage
7
+ from langchain_core.messages import BaseMessage
8
8
  from universal_mcp.types import ToolConfig
9
9
 
10
10
  MAX_CHARS = 5000
@@ -452,78 +452,3 @@ async def get_connected_apps_string(registry) -> str:
452
452
  return "\n".join(apps_list)
453
453
  except Exception:
454
454
  return "Unable to retrieve connected applications."
455
-
456
-
457
- def create_agent_call(agent: object, agent_args: dict[str, Any]) -> AIMessage:
458
- """Create an assistant tool-call message to execute the agent script.
459
-
460
- This inspects the agent's generated script (expected at agent.instructions["script"]) to
461
- locate the topmost function or async function, then constructs a Python snippet that:
462
- - embeds the script as-is,
463
- - deserializes the provided arguments as keyword arguments,
464
- - invokes the detected function (awaiting it if async), and
465
- - prints the result via smart_print.
466
-
467
- If no top-level function is detected or the script cannot be parsed, a safe fallback
468
- snippet is produced which simply prints the provided arguments.
469
-
470
- Args:
471
- agent: Object that provides an `instructions` mapping with a `script` string.
472
- agent_args: Mapping of argument names to values to be passed as keyword args to the function.
473
-
474
- Returns:
475
- AIMessage: A synthetic assistant message containing a single tool call for
476
- `execute_ipython_cell` with the constructed snippet.
477
- """
478
- content = "Running the agent with your provided parameters"
479
- script = agent.instructions.get("script") if hasattr(agent, "instructions") else None
480
- args = agent_args or {}
481
-
482
- func_name = None
483
- is_async = False
484
-
485
- if isinstance(script, str) and script.strip():
486
- try:
487
- tree = ast.parse(script)
488
- for node in tree.body:
489
- if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
490
- func_name = node.name
491
- is_async = isinstance(node, ast.AsyncFunctionDef)
492
- break
493
- except SyntaxError:
494
- func_name = None
495
-
496
- # Fallback content/snippet if no callable function is found
497
- if not func_name:
498
- snippet = (
499
- "import asyncio\n\n# Test fallback: no function detected in script; printing args\n"
500
- f"smart_print({repr(args)})\n"
501
- )
502
- else:
503
- import json as _json
504
- args_json = _json.dumps(args)
505
- if is_async:
506
- snippet = (
507
- f"{script}\n\n"
508
- "import asyncio, json\n"
509
- f"_kwargs = json.loads('{args_json}')\n"
510
- f"async def __runner():\n result = await {func_name}(**_kwargs)\n smart_print(result)\n"
511
- "asyncio.run(__runner())\n"
512
- )
513
- else:
514
- snippet = (
515
- f"{script}\n\n"
516
- "import json\n"
517
- f"_kwargs = json.loads('{args_json}')\n"
518
- f"result = {func_name}(**_kwargs)\n"
519
- "smart_print(result)\n"
520
- )
521
-
522
- mock_agent_call = {
523
- "name": "execute_ipython_cell",
524
- "args": {"snippet": snippet},
525
- "id": "initial_agent_call",
526
- "type": "tool_call",
527
- }
528
- mock_assistant_message = AIMessage(content=content, tool_calls=[mock_agent_call])
529
- return mock_assistant_message
@@ -5,6 +5,7 @@ import io
5
5
  import traceback
6
6
 
7
7
  import cloudpickle as pickle
8
+ from loguru import logger
8
9
 
9
10
 
10
11
  class Sandbox:
@@ -26,17 +27,37 @@ class Sandbox:
26
27
  def save_context(self) -> str:
27
28
  """
28
29
  Saves the context to a base64 string.
30
+ files, IO, threads, etc. are not pickable. So we only pickle the context that is pickable.
29
31
  """
30
- pickled_data = pickle.dumps(self.context)
32
+ pickable_context = {}
33
+ for key, value in self.context.items():
34
+ try:
35
+ pickle.dumps(value)
36
+ pickable_context[key] = value
37
+ except Exception as e:
38
+ logger.error(f"Error picking {key}: {e}")
39
+ pickled_data = pickle.dumps(pickable_context)
31
40
  base64_encoded = base64.b64encode(pickled_data).decode("utf-8")
32
41
  return base64_encoded
33
42
 
34
- def load_context(self, context: str):
43
+ def load_context(self, context: str, add_context: list[str] = []):
35
44
  """
36
45
  Loads the context from a base64 string.
46
+ Also executes the add_context code strings to add to the context.
37
47
  """
38
- pickled_data = base64.b64decode(context)
39
- self.context = pickle.loads(pickled_data)
48
+ if context:
49
+ pickled_data = base64.b64decode(context)
50
+ new_context = pickle.loads(pickled_data)
51
+ self.context.update(new_context)
52
+ for code in add_context:
53
+ self.run(code)
54
+ return self.context
55
+
56
+ def _filter_context(self, context: dict[str, any]) -> dict[str, any]:
57
+ """
58
+ Filters the context to only include pickable variables.
59
+ """
60
+ return {k: v for k, v in context.items() if not k.startswith("__")}
40
61
 
41
62
  def run(self, code: str) -> dict[str, any]:
42
63
  """
@@ -64,7 +85,7 @@ class Sandbox:
64
85
 
65
86
  # Update the context with any new/modified variables
66
87
  # Filter out dunder methods/system keys that might be introduced by exec
67
- new_context = {k: v for k, v in exec_scope.items() if not k.startswith("__")}
88
+ new_context = self._filter_context(exec_scope)
68
89
  self.context.update(new_context)
69
90
 
70
91
  except Exception:
@@ -114,8 +135,9 @@ class Sandbox:
114
135
  await coroutine
115
136
 
116
137
  # Update the context with any new/modified variables
117
- new_context = {k: v for k, v in exec_scope.items() if not k.startswith("__")}
118
- self.context.update(new_context)
138
+ new_context = self._filter_context(exec_scope)
139
+ if new_context:
140
+ self.context.update(new_context)
119
141
 
120
142
  except Exception:
121
143
  stderr_output = traceback.format_exc()
@@ -1,7 +1,6 @@
1
1
  import json
2
2
  from typing import Any, Literal, cast
3
3
 
4
- from langchain.chat_models import init_chat_model
5
4
  from pydantic import BaseModel, Field
6
5
  from universal_mcp.applications.application import BaseApplication
7
6
 
@@ -38,7 +37,7 @@ class LlmApp(BaseApplication):
38
37
  """Initialize the LLMApp."""
39
38
  super().__init__(name="llm")
40
39
 
41
- def generate_text(
40
+ async def generate_text(
42
41
  self,
43
42
  task: str,
44
43
  context: str | list[str] | dict[str, str] = "",
@@ -92,10 +91,10 @@ class LlmApp(BaseApplication):
92
91
  full_prompt = f"{prompt}\n\nContext:\n{context_str}\n\n"
93
92
 
94
93
  model = load_chat_model("azure/gpt-5-mini")
95
- response = model.with_retry(stop_after_attempt=MAX_RETRIES).invoke(full_prompt)
94
+ response = await model.with_retry(stop_after_attempt=MAX_RETRIES).ainvoke(full_prompt)
96
95
  return str(response.content)
97
96
 
98
- def classify_data(
97
+ async def classify_data(
99
98
  self,
100
99
  classification_task_and_requirements: str,
101
100
  context: Any | list[Any] | dict[str, Any],
@@ -154,7 +153,7 @@ class LlmApp(BaseApplication):
154
153
  "Return ONLY a valid JSON object, no extra text."
155
154
  )
156
155
 
157
- model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
156
+ model = load_chat_model("azure/gpt-5-mini", temperature=0)
158
157
 
159
158
  class ClassificationResult(BaseModel):
160
159
  probabilities: dict[str, float] = Field(..., description="The probabilities for each class.")
@@ -162,13 +161,13 @@ class LlmApp(BaseApplication):
162
161
  top_class: str = Field(..., description="The class with the highest probability.")
163
162
 
164
163
  response = (
165
- model.with_structured_output(schema=ClassificationResult)
164
+ await model.with_structured_output(schema=ClassificationResult)
166
165
  .with_retry(stop_after_attempt=MAX_RETRIES)
167
- .invoke(prompt)
166
+ .ainvoke(prompt)
168
167
  )
169
168
  return response.model_dump()
170
169
 
171
- def extract_data(
170
+ async def extract_data(
172
171
  self,
173
172
  extraction_task: str,
174
173
  source: Any | list[Any] | dict[str, Any],
@@ -229,16 +228,16 @@ class LlmApp(BaseApplication):
229
228
  "Return ONLY a valid JSON object that conforms to the provided schema, with no extra text."
230
229
  )
231
230
 
232
- model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
231
+ model = load_chat_model("azure/gpt-5-mini", temperature=0)
233
232
 
234
- response = (
233
+ response = await (
235
234
  model.with_structured_output(schema=output_schema, method="json_mode")
236
235
  .with_retry(stop_after_attempt=MAX_RETRIES)
237
- .invoke(prompt)
236
+ .ainvoke(prompt)
238
237
  )
239
238
  return cast(dict[str, Any], response)
240
239
 
241
- def call_llm(
240
+ async def call_llm(
242
241
  self,
243
242
  task_instructions: str,
244
243
  context: Any | list[Any] | dict[str, Any],
@@ -282,14 +281,14 @@ class LlmApp(BaseApplication):
282
281
 
283
282
  prompt = f"{task_instructions}\n\nContext:\n{context_str}\n\nReturn ONLY a valid JSON object, no extra text."
284
283
 
285
- model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
284
+ model = load_chat_model("azure/gpt-5-mini", temperature=0)
286
285
 
287
- response = (
288
- model.with_structured_output(schema=output_schema, method="json_mode")
286
+ response = await (
287
+ model.with_structured_output(schema=output_schema)
289
288
  .with_retry(stop_after_attempt=MAX_RETRIES)
290
- .invoke(prompt)
289
+ .ainvoke(prompt)
291
290
  )
292
- return cast(dict[str, Any], response)
291
+ return response.model_dump()
293
292
 
294
293
  def list_tools(self):
295
294
  return [
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: universal-mcp-agents
3
- Version: 0.1.23rc6
3
+ Version: 0.1.23rc8
4
4
  Summary: Add your description here
5
5
  Project-URL: Homepage, https://github.com/universal-mcp/applications
6
6
  Project-URL: Repository, https://github.com/universal-mcp/applications
@@ -1,10 +1,10 @@
1
1
  universal_mcp/agents/__init__.py,sha256=Ythw8tyq7p-w1SPnuO2JtS4TvYEP75PkQpdyvZv-ww4,914
2
- universal_mcp/agents/base.py,sha256=pnPf5EgVVoycg_mrgdIwqEiENny1Dcx6GDZWmOVw2NU,7837
2
+ universal_mcp/agents/base.py,sha256=IyU1HUmB8rjHuCxv-c29RV-dWXfdiQiPq5rGkcCiSbU,7833
3
3
  universal_mcp/agents/cli.py,sha256=9CG7majpWUz7C6t0d8xr-Sg2ZPKBuQdykTbYS6KIZ3A,922
4
4
  universal_mcp/agents/hil.py,sha256=_5PCK6q0goGm8qylJq44aSp2MadP-yCPvhOJYKqWLMo,3808
5
5
  universal_mcp/agents/llm.py,sha256=hVRwjZs3MHl5_3BWedmurs2Jt1oZDfFX0Zj9F8KH7fk,1787
6
6
  universal_mcp/agents/react.py,sha256=ocYm94HOiJVI2zwTjO1K2PNfVY7EILLJ6cd__jnGHPs,3327
7
- universal_mcp/agents/sandbox.py,sha256=LL4OfavEzxbmTDcc_NxizRRpQnw5hc3G2bxvFY63scY,4241
7
+ universal_mcp/agents/sandbox.py,sha256=YxTGp_zsajuN7FUn0Q4PFjuXczgLht7oKql_gyb2Gf4,5112
8
8
  universal_mcp/agents/simple.py,sha256=NSATg5TWzsRNS7V3LFiDG28WSOCIwCdcC1g7NRwg2nM,2095
9
9
  universal_mcp/agents/utils.py,sha256=P6W9k6XAOBp6tdjC2VTP4tE0B2M4-b1EDmr-ylJ47Pw,7765
10
10
  universal_mcp/agents/bigtool/__init__.py,sha256=mZG8dsaCVyKlm82otxtiTA225GIFLUCUUYPEIPF24uw,2299
@@ -22,23 +22,23 @@ universal_mcp/agents/builder/prompts.py,sha256=8Xs6uzTUHguDRngVMLak3lkXFkk2VV_uQ
22
22
  universal_mcp/agents/builder/state.py,sha256=7DeWllxfN-yD6cd9wJ3KIgjO8TctkJvVjAbZT8W_zqk,922
23
23
  universal_mcp/agents/codeact0/__init__.py,sha256=8-fvUo1Sm6dURGI-lW-X3Kd78LqySYbb5NMkNJ4NDwg,76
24
24
  universal_mcp/agents/codeact0/__main__.py,sha256=YyIoecUcKVUhTcCACzLlSmYrayMDsdwzDEqaV4VV4CE,766
25
- universal_mcp/agents/codeact0/agent.py,sha256=jaBntdEGydWI6OvRPpDsrLjnNncDdvQtjJbAgkeYp-U,20545
25
+ universal_mcp/agents/codeact0/agent.py,sha256=CRgbKCBHSbMOpiNp_Sgdb-Wml7o9Uy72aA9_DaPNiJA,23449
26
26
  universal_mcp/agents/codeact0/config.py,sha256=H-1woj_nhSDwf15F63WYn723y4qlRefXzGxuH81uYF0,2215
27
27
  universal_mcp/agents/codeact0/langgraph_agent.py,sha256=8nz2wq-LexImx-l1y9_f81fK72IQetnCeljwgnduNGY,420
28
28
  universal_mcp/agents/codeact0/llm_tool.py,sha256=-pAz04OrbZ_dJ2ueysT1qZd02DrbLY4EbU0tiuF_UNU,798
29
- universal_mcp/agents/codeact0/prompts.py,sha256=zMZyaIb_T1ssPASm6tnEvPSG8SkHm80aJcLzqcTicIk,11144
30
- universal_mcp/agents/codeact0/sandbox.py,sha256=FcJgJ64upa8NMcFDLXkT7FT69AQvUvPBiXyqW937AUo,4701
29
+ universal_mcp/agents/codeact0/prompts.py,sha256=RiC_43GSeE4LDoiFhmJIOsKkoijOK9_7skwAH6ZqSWk,15501
30
+ universal_mcp/agents/codeact0/sandbox.py,sha256=Zcr7fvYtcGbwNWd7RPV7-Btl2HtycPIPofEGVmzxSmE,4696
31
31
  universal_mcp/agents/codeact0/state.py,sha256=cf-94hfVub-HSQJk6b7_SzqBS-oxMABjFa8jqyjdDK0,1925
32
- universal_mcp/agents/codeact0/tools.py,sha256=i2-WppqEfpJXPa7QouLfX3qXJgInBGVY9qxAGxFOUEg,14896
33
- universal_mcp/agents/codeact0/utils.py,sha256=F2aFnN0tNXbFfe8imO1iccHXTvWwSSulIbsrkwhhpno,21123
32
+ universal_mcp/agents/codeact0/tools.py,sha256=kWFlEfJdbOPugPMbsP7I-vxFMGfZj3FUilMI-aT7-Xw,18753
33
+ universal_mcp/agents/codeact0/utils.py,sha256=Gvft0W0Sg1qlFWm8ciX14yssCa8y3x037lql92yGsBQ,18164
34
34
  universal_mcp/agents/shared/__main__.py,sha256=XxH5qGDpgFWfq7fwQfgKULXGiUgeTp_YKfcxftuVZq8,1452
35
35
  universal_mcp/agents/shared/prompts.py,sha256=yjP3zbbuKi87qCj21qwTTicz8TqtkKgnyGSeEjMu3ho,3761
36
36
  universal_mcp/agents/shared/tool_node.py,sha256=DC9F-Ri28Pam0u3sXWNODVgmj9PtAEUb5qP1qOoGgfs,9169
37
37
  universal_mcp/applications/filesystem/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
38
  universal_mcp/applications/filesystem/app.py,sha256=0TRjjm8YnslVRSmfkXI7qQOAlqWlD1eEn8Jm0xBeigs,5561
39
39
  universal_mcp/applications/llm/__init__.py,sha256=_XGRxN3O1--ZS5joAsPf8IlI9Qa6negsJrwJ5VJXno0,46
40
- universal_mcp/applications/llm/app.py,sha256=4aMDlbBFCJIe_yzSq3Jphtk5ctvjWhHkHfSfnh3_Mso,12714
40
+ universal_mcp/applications/llm/app.py,sha256=zcMCcswJxvbk2jN_x4xpiv2IG2yoJ62jH1CQaNltmYs,12645
41
41
  universal_mcp/applications/ui/app.py,sha256=c7OkZsO2fRtndgAzAQbKu-1xXRuRp9Kjgml57YD2NR4,9459
42
- universal_mcp_agents-0.1.23rc6.dist-info/METADATA,sha256=_BtR77yJ3U-Rtfl0JD6fpYJY8M5Q1TEgp_6M3AFT9dw,931
43
- universal_mcp_agents-0.1.23rc6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
44
- universal_mcp_agents-0.1.23rc6.dist-info/RECORD,,
42
+ universal_mcp_agents-0.1.23rc8.dist-info/METADATA,sha256=q1hNCeGDqDDTJ5TWkQkh8JuHoXyNgNZEbDOXViM-xM0,931
43
+ universal_mcp_agents-0.1.23rc8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
44
+ universal_mcp_agents-0.1.23rc8.dist-info/RECORD,,