universal-mcp-agents 0.1.23rc7__py3-none-any.whl → 0.1.23rc8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of universal-mcp-agents might be problematic. Click here for more details.

@@ -1,12 +1,13 @@
1
- from typing import Any, cast
2
- from uuid import uuid4
3
1
  import asyncio
2
+ from typing import cast
3
+ from uuid import uuid4
4
4
 
5
5
  from langchain_core.messages import AIMessageChunk
6
6
  from langgraph.checkpoint.base import BaseCheckpointSaver
7
7
  from langgraph.graph import StateGraph
8
8
  from langgraph.types import Command
9
9
  from universal_mcp.logger import logger
10
+
10
11
  from .utils import RichCLI
11
12
 
12
13
 
@@ -3,10 +3,9 @@ import json
3
3
  import re
4
4
  import uuid
5
5
  from typing import Literal, cast
6
- from types import SimpleNamespace
7
6
 
8
7
  from langchain_anthropic import ChatAnthropic
9
- from langchain_core.messages import AIMessage, ToolMessage
8
+ from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
10
9
  from langgraph.checkpoint.base import BaseCheckpointSaver
11
10
  from langgraph.graph import START, StateGraph
12
11
  from langgraph.types import Command, RetryPolicy, StreamWriter
@@ -19,8 +18,8 @@ from universal_mcp.agents.codeact0.prompts import (
19
18
  AGENT_BUILDER_GENERATING_PROMPT,
20
19
  AGENT_BUILDER_META_PROMPT,
21
20
  AGENT_BUILDER_PLANNING_PROMPT,
21
+ build_tool_definitions,
22
22
  create_default_prompt,
23
- build_tool_definitions
24
23
  )
25
24
  from universal_mcp.agents.codeact0.sandbox import eval_unsafe, execute_ipython_cell, handle_execute_ipython_cell
26
25
  from universal_mcp.agents.codeact0.state import AgentBuilderCode, AgentBuilderMeta, AgentBuilderPlan, CodeActState
@@ -53,12 +52,11 @@ class CodeActPlaybookAgent(BaseAgent):
53
52
  **kwargs,
54
53
  )
55
54
  self.model_instance = load_chat_model(model)
56
- self.agent_builder_model_instance = load_chat_model("anthropic:claude-sonnet-4-5-20250929", thinking = False)
55
+ self.agent_builder_model_instance = load_chat_model("anthropic:claude-sonnet-4-5-20250929")
57
56
  self.registry = registry
58
57
  self.agent_builder_registry = agent_builder_registry
59
58
  self.agent = agent_builder_registry.get_agent() if agent_builder_registry else None
60
59
 
61
-
62
60
  self.tools_config = self.agent.tools if self.agent else {}
63
61
  self.eval_fn = eval_unsafe
64
62
  self.sandbox_timeout = sandbox_timeout
@@ -67,21 +65,23 @@ class CodeActPlaybookAgent(BaseAgent):
67
65
  }
68
66
  self.final_instructions = ""
69
67
  self.tools_context = {}
68
+ self.eval_mode = kwargs.get("eval_mode", False)
70
69
 
71
70
  async def _build_graph(self): # noqa: PLR0915
72
71
  """Build the graph for the CodeAct Playbook Agent."""
73
72
  meta_tools = create_meta_tools(self.registry)
74
- self.additional_tools = [smart_print, meta_tools["web_search"]]
73
+ self.additional_tools = [
74
+ smart_print,
75
+ meta_tools["web_search"],
76
+ meta_tools["read_file"],
77
+ meta_tools["save_file"],
78
+ meta_tools["upload_file"],
79
+ ]
75
80
 
76
81
  if self.tools_config:
77
- if isinstance(self.tools_config, dict):
78
- self.tools_config = [
79
- f"{provider}__{tool}" for provider, tools in self.tools_config.items() for tool in tools
80
- ]
81
- if not self.registry:
82
- raise ValueError("Tools are configured but no registry is provided")
83
- await self.registry.load_tools(self.tools_config) # Load the default tools
84
- await self.registry.load_tools(self.default_tools_config) # Load more tools
82
+ await self.registry.load_tools(self.tools_config) # Load provided tools
83
+ if self.default_tools_config:
84
+ await self.registry.load_tools(self.default_tools_config) # Load default tools
85
85
 
86
86
  async def call_model(state: CodeActState) -> Command[Literal["execute_tools"]]:
87
87
  """This node now only ever binds the four meta-tools to the LLM."""
@@ -226,18 +226,29 @@ class CodeActPlaybookAgent(BaseAgent):
226
226
  plan = cast(AgentBuilderPlan, response)
227
227
 
228
228
  writer({"type": "custom", id: plan_id, "name": "planning", "data": {"plan": plan.steps}})
229
+ ai_msg = AIMessage(
230
+ content=json.dumps(plan.model_dump()),
231
+ additional_kwargs={
232
+ "type": "planning",
233
+ "plan": plan.steps,
234
+ "update": bool(self.agent),
235
+ },
236
+ )
237
+
238
+ if self.eval_mode:
239
+ mock_user_message = HumanMessage(content="yes, this is great")
240
+ return Command(
241
+ goto="agent_builder",
242
+ update={
243
+ "messages": [ai_msg, mock_user_message],
244
+ "agent_builder_mode": "generating",
245
+ "plan": plan.steps,
246
+ },
247
+ )
248
+
229
249
  return Command(
230
250
  update={
231
- "messages": [
232
- AIMessage(
233
- content=json.dumps(plan.model_dump()),
234
- additional_kwargs={
235
- "type": "planning",
236
- "plan": plan.steps,
237
- "update": bool(self.agent),
238
- },
239
- )
240
- ],
251
+ "messages": [ai_msg],
241
252
  "agent_builder_mode": "confirming",
242
253
  "plan": plan.steps,
243
254
  }
@@ -319,7 +330,7 @@ class CodeActPlaybookAgent(BaseAgent):
319
330
  return Command(goto="call_model", update={"agent_builder_mode": "inactive"})
320
331
 
321
332
  elif agent_builder_mode == "generating":
322
- generating_instructions = self.instructions + AGENT_BUILDER_GENERATING_PROMPT + self.preloaded_defs
333
+ generating_instructions = self.instructions + AGENT_BUILDER_GENERATING_PROMPT + self.preloaded_defs
323
334
  messages = [{"role": "system", "content": generating_instructions}] + state["messages"]
324
335
 
325
336
  model_with_structured_output = self.agent_builder_model_instance.with_structured_output(
@@ -359,14 +370,10 @@ class CodeActPlaybookAgent(BaseAgent):
359
370
  instructions=instructions_payload,
360
371
  tools=tool_dict,
361
372
  )
362
- except Exception as e:
373
+ except Exception:
363
374
  # In case of error, add the code to the exit message content
364
375
 
365
- mock_exit_tool_call = {
366
- "name": "exit_agent_builder_mode",
367
- "args": {},
368
- "id": "exit_builder_1"
369
- }
376
+ mock_exit_tool_call = {"name": "exit_agent_builder_mode", "args": {}, "id": "exit_builder_1"}
370
377
 
371
378
  # Create a minimal assistant message to maintain flow
372
379
  mock_assistant_message = AIMessage(
@@ -385,9 +392,25 @@ class CodeActPlaybookAgent(BaseAgent):
385
392
  f"An error occurred. Displaying the function code:\n\n{func_code}\nFinal Name: {final_name}\nDescription: {final_description}"
386
393
  ),
387
394
  name="exit_agent_builder_mode",
388
- tool_call_id="exit_builder_1"
395
+ tool_call_id="exit_builder_1",
396
+ )
397
+ if self.eval_mode:
398
+ human_msg = HumanMessage(
399
+ content="Run the generated agent code and check whether it works as expected"
400
+ )
401
+ return Command(
402
+ goto="call_model",
403
+ update={
404
+ "messages": [mock_assistant_message, mock_exit_tool_response, human_msg],
405
+ "agent_builder_mode": "normal",
406
+ },
407
+ )
408
+ return Command(
409
+ update={
410
+ "messages": [mock_assistant_message, mock_exit_tool_response],
411
+ "agent_builder_mode": "normal",
412
+ }
389
413
  )
390
- return Command(update={"messages": [mock_assistant_message, mock_exit_tool_response], "agent_builder_mode": "normal"})
391
414
 
392
415
  writer(
393
416
  {
@@ -402,11 +425,7 @@ class CodeActPlaybookAgent(BaseAgent):
402
425
  },
403
426
  }
404
427
  )
405
- mock_exit_tool_call = {
406
- "name": "exit_agent_builder_mode",
407
- "args": {},
408
- "id": "exit_builder_1"
409
- }
428
+ mock_exit_tool_call = {"name": "exit_agent_builder_mode", "args": {}, "id": "exit_builder_1"}
410
429
  mock_assistant_message = AIMessage(
411
430
  content=json.dumps(response.model_dump()),
412
431
  tool_calls=[mock_exit_tool_call],
@@ -418,14 +437,21 @@ class CodeActPlaybookAgent(BaseAgent):
418
437
  "description": final_description,
419
438
  },
420
439
  )
421
-
440
+
422
441
  mock_exit_tool_response = ToolMessage(
423
- content=json.dumps("Exited Agent Builder Mode. Enter this mode again if you need to modify the saved agent."),
442
+ content=json.dumps(
443
+ "Exited Agent Builder Mode. Enter this mode again if you need to modify the saved agent."
444
+ ),
424
445
  name="exit_agent_builder_mode",
425
- tool_call_id="exit_builder_1"
446
+ tool_call_id="exit_builder_1",
426
447
  )
427
448
 
428
- return Command(update={"messages": [mock_assistant_message, mock_exit_tool_response], "agent_builder_mode": "normal"})
449
+ return Command(
450
+ update={
451
+ "messages": [mock_assistant_message, mock_exit_tool_response],
452
+ "agent_builder_mode": "normal",
453
+ }
454
+ )
429
455
 
430
456
  async def route_entry(state: CodeActState) -> Command[Literal["call_model", "agent_builder", "execute_tools"]]:
431
457
  """Route to either normal mode or agent builder creation"""
@@ -441,16 +467,20 @@ class CodeActPlaybookAgent(BaseAgent):
441
467
  is_initial_prompt=True,
442
468
  )
443
469
  self.preloaded_defs, _ = build_tool_definitions(pre_tools)
444
- self.preloaded_defs = '\n'.join(self.preloaded_defs)
470
+ self.preloaded_defs = "\n".join(self.preloaded_defs)
445
471
  await self.registry.load_tools(state["selected_tool_ids"])
446
- exported_tools = await self.registry.export_tools(state["selected_tool_ids"],ToolFormat.NATIVE) # Get definition for only the new tools
472
+ exported_tools = await self.registry.export_tools(
473
+ state["selected_tool_ids"], ToolFormat.NATIVE
474
+ ) # Get definition for only the new tools
447
475
  _, loaded_tools_context = build_tool_definitions(exported_tools)
448
476
  self.tools_context.update(loaded_tools_context)
449
-
450
- if len(state['messages']) == 1 and self.agent: # Inject the agent's script function into add_context for execution
451
- script = self.agent.instructions.get('script')
452
- add_context = {"functions":[script]}
453
- return Command(goto="call_model", update = {"add_context": add_context})
477
+
478
+ if (
479
+ len(state["messages"]) == 1 and self.agent
480
+ ): # Inject the agent's script function into add_context for execution
481
+ script = self.agent.instructions.get("script")
482
+ add_context = {"functions": [script]}
483
+ return Command(goto="call_model", update={"add_context": add_context})
454
484
 
455
485
  if state.get("agent_builder_mode") in ["planning", "confirming", "generating"]:
456
486
  return Command(goto="agent_builder")
@@ -2,8 +2,6 @@ import inspect
2
2
  import re
3
3
  from collections.abc import Callable
4
4
 
5
- from loguru import logger
6
-
7
5
  uneditable_prompt = """
8
6
  You are **Ruzo**, an AI Assistant created by AgentR — a creative, straight-forward, and direct principal software engineer with access to tools.
9
7
 
@@ -45,6 +43,7 @@ Your job is to answer the user's question or perform the task they ask for.
45
43
  - Always respond in github flavoured markdown format.
46
44
  - For charts and diagrams, use mermaid chart in markdown directly.
47
45
  - Your final response should contain the complete answer to the user's request in a clear, well-formatted manner that directly addresses what they asked for.
46
+ - For file types like images, audio, documents, etc., you must use the `upload_file` tool to upload the file to the server and render the link in the markdown response.
48
47
  """
49
48
 
50
49
  AGENT_BUILDER_PLANNING_PROMPT = """TASK: Analyze the conversation history and code execution to create a step-by-step non-technical plan for a reusable function.
@@ -100,7 +99,7 @@ Rules-
100
99
  - Do not include any text, explanations, or Markdown.
101
100
  - The response must start with `def` or `async def` and define a single, complete, executable function.
102
101
  - The function parameters **must exactly match the external variables** in the agent plan. External variables are marked using backticks `` `variable_name` ``. Any variables in italics (i.e. enclosed in *...*) are to be used internally, but not as the main function paramters.
103
- - Any imports, variables, helper or child functions required must be defined **inside the main top-level function**.
102
+ - Any imports, variables, helper or child functions required must be defined **inside the main top-level function**.
104
103
  - Ensure that the outer function is self-contained and can run independently, based on previously validated code snippets.
105
104
 
106
105
  Example:
@@ -162,38 +161,39 @@ def make_safe_function_name(name: str) -> str:
162
161
 
163
162
 
164
163
  # Compile regex once for better performance
165
- _RAISES_PATTERN = re.compile(r'\n\s*[Rr]aises\s*:.*$', re.DOTALL)
164
+ _RAISES_PATTERN = re.compile(r"\n\s*[Rr]aises\s*:.*$", re.DOTALL)
165
+
166
166
 
167
167
  def _clean_docstring(docstring: str | None) -> str:
168
168
  """Remove the 'Raises:' section and everything after it from a docstring."""
169
169
  if not docstring:
170
170
  return ""
171
-
171
+
172
172
  # Use pre-compiled regex for better performance
173
- cleaned = _RAISES_PATTERN.sub('', docstring)
173
+ cleaned = _RAISES_PATTERN.sub("", docstring)
174
174
  return cleaned.strip()
175
175
 
176
176
 
177
177
  def build_tool_definitions(tools: list[Callable]) -> tuple[list[str], dict[str, Callable]]:
178
178
  tool_definitions = []
179
179
  context = {}
180
-
180
+
181
181
  # Pre-allocate lists for better performance
182
182
  tool_definitions = [None] * len(tools)
183
-
183
+
184
184
  for i, tool in enumerate(tools):
185
185
  tool_name = tool.__name__
186
186
  cleaned_docstring = _clean_docstring(tool.__doc__)
187
-
187
+
188
188
  # Pre-compute string parts to avoid repeated string operations
189
189
  async_prefix = "async " if inspect.iscoroutinefunction(tool) else ""
190
190
  signature = str(inspect.signature(tool))
191
-
191
+
192
192
  tool_definitions[i] = f'''{async_prefix}def {tool_name} {signature}:
193
193
  """{cleaned_docstring}"""
194
194
  ...'''
195
195
  context[tool_name] = tool
196
-
196
+
197
197
  return tool_definitions, context
198
198
 
199
199
 
@@ -231,7 +231,9 @@ def create_default_prompt(
231
231
  plan = pb.get("plan")
232
232
  code = pb.get("script")
233
233
  if plan or code:
234
- system_prompt += "\n\nYou have been provided an existing agent plan and code for performing a task.:\n"
234
+ system_prompt += (
235
+ "\n\nYou have been provided an existing agent plan and code for performing a task.:\n"
236
+ )
235
237
  if plan:
236
238
  if isinstance(plan, list):
237
239
  plan_block = "\n".join(f"- {str(s)}" for s in plan)
@@ -1,14 +1,14 @@
1
+ import ast
1
2
  import contextlib
2
3
  import inspect
3
4
  import io
5
+ import pickle
4
6
  import queue
5
7
  import re
6
8
  import socket
7
9
  import threading
8
10
  import types
9
11
  from typing import Any
10
- import pickle
11
- import ast
12
12
 
13
13
  from langchain_core.tools import tool
14
14
 
@@ -40,12 +40,12 @@ async def eval_unsafe(
40
40
  )
41
41
 
42
42
  result_container = {"output": "<no output>"}
43
-
43
+
44
44
  try:
45
45
  compiled_code = compile(code, "<string>", "exec", flags=ast.PyCF_ALLOW_TOP_LEVEL_AWAIT)
46
46
  with contextlib.redirect_stdout(io.StringIO()) as f:
47
47
  coroutine = eval(compiled_code, _locals, _locals)
48
- # Await the coroutine to run the code if it's async
48
+ # Await the coroutine to run the code if it's async
49
49
  if coroutine:
50
50
  await coroutine
51
51
  result_container["output"] = f.getvalue() or "<code ran, no output printed to stdout>"
@@ -1,11 +1,15 @@
1
1
  import asyncio
2
+ import base64
2
3
  import json
3
4
  from collections import defaultdict
5
+ from pathlib import Path
4
6
  from typing import Annotated, Any
5
7
 
6
8
  from langchain_core.tools import tool
7
9
  from pydantic import Field
10
+ from universal_mcp.agentr.client import AgentrClient
8
11
  from universal_mcp.agentr.registry import AgentrRegistry
12
+ from universal_mcp.applications.markitdown.app import MarkitdownApp
9
13
  from universal_mcp.types import ToolFormat
10
14
 
11
15
  from universal_mcp.agents.codeact0.prompts import build_tool_definitions
@@ -192,8 +196,10 @@ def create_meta_tools(tool_registry: AgentrRegistry) -> dict[str, Any]:
192
196
  )
193
197
 
194
198
  result_parts.append("Call load_functions to select the required functions only.")
195
- if len(connected_apps_in_results)<len(apps_in_results) and len(connected_apps_in_results)>0:
196
- result_parts.append("Unconnected app functions can also be loaded if required by the user, but prefer connected ones. And do ask the user to choose if none of the relevant apps are connected")
199
+ if len(connected_apps_in_results) < len(apps_in_results) and len(connected_apps_in_results) > 0:
200
+ result_parts.append(
201
+ "Unconnected app functions can also be loaded if required by the user, but prefer connected ones. And do ask the user to choose if none of the relevant apps are connected"
202
+ )
197
203
  return "\n".join(result_parts)
198
204
 
199
205
  @tool
@@ -263,7 +269,95 @@ def create_meta_tools(tool_registry: AgentrRegistry) -> dict[str, Any]:
263
269
  "citations": response.get("citations", []),
264
270
  }
265
271
 
266
- return {"search_functions": search_functions, "load_functions": load_functions, "web_search": web_search}
272
+ async def read_file(uri: str) -> str:
273
+ """
274
+ Asynchronously reads a local file or uri and returns the content as a markdown string.
275
+
276
+ This tool aims to extract the main text content from various sources.
277
+ It automatically prepends 'file://' to the input string if it appears
278
+ to be a local path without a specified scheme (like http, https, data, file).
279
+
280
+ Args:
281
+ uri (str): The URI pointing to the resource or a local file path.
282
+ Supported schemes:
283
+ - http:// or https:// (Web pages, feeds, APIs)
284
+ - file:// (Local or accessible network files)
285
+ - data: (Embedded data)
286
+
287
+ Returns:
288
+ A string containing the markdown representation of the content at the specified URI
289
+
290
+ Raises:
291
+ ValueError: If the URI is invalid, empty, or uses an unsupported scheme
292
+ after automatic prefixing.
293
+
294
+ Tags:
295
+ convert, markdown, async, uri, transform, document, important
296
+ """
297
+ markitdown = MarkitdownApp()
298
+ response = await markitdown.convert_to_markdown(uri)
299
+ return response
300
+
301
+ async def save_file(file_name: str, content: str) -> dict:
302
+ """
303
+ Saves a file to the local filesystem.
304
+
305
+ Args:
306
+ file_name (str): The name of the file to save.
307
+ content (str): The content to save to the file.
308
+
309
+ Returns:
310
+ dict: A dictionary containing the result of the save operation with the following fields:
311
+ - status (str): "success" if the save succeeded, "error" otherwise.
312
+ - message (str): A message returned by the server, typically indicating success or providing error details.
313
+ """
314
+ with Path(file_name).open("w") as f:
315
+ f.write(content)
316
+
317
+ return {
318
+ "status": "success",
319
+ "message": f"File {file_name} saved successfully",
320
+ "file_path": Path(file_name).absolute(),
321
+ }
322
+
323
+ async def upload_file(file_name: str, mime_type: str, base64_data: str) -> dict:
324
+ """
325
+ Uploads a file to the server via the AgentrClient.
326
+
327
+ Args:
328
+ file_name (str): The name of the file to upload.
329
+ mime_type (str): The MIME type of the file.
330
+ base64_data (str): The file content encoded as a base64 string.
331
+
332
+ Returns:
333
+ dict: A dictionary containing the result of the upload operation with the following fields:
334
+ - status (str): "success" if the upload succeeded, "error" otherwise.
335
+ - message (str): A message returned by the server, typically indicating success or providing error details.
336
+ - signed_url (str or None): The signed URL to access the uploaded file if successful, None otherwise.
337
+ """
338
+ client: AgentrClient = tool_registry.client
339
+ bytes_data = base64.b64decode(base64_data)
340
+ response = client._upload_file(file_name, mime_type, bytes_data)
341
+ if response.get("status") != "success":
342
+ return {
343
+ "status": "error",
344
+ "message": response.get("message"),
345
+ "signed_url": None,
346
+ }
347
+ return {
348
+ "status": "success",
349
+ "message": response.get("message"),
350
+ "signed_url": response.get("signed_url"),
351
+ }
352
+
353
+ return {
354
+ "search_functions": search_functions,
355
+ "load_functions": load_functions,
356
+ "web_search": web_search,
357
+ "read_file": read_file,
358
+ "upload_file": upload_file,
359
+ "save_file": save_file,
360
+ }
267
361
 
268
362
 
269
363
  async def get_valid_tools(tool_ids: list[str], registry: AgentrRegistry) -> tuple[list[str], list[str]]:
@@ -4,7 +4,7 @@ import re
4
4
  from collections.abc import Sequence
5
5
  from typing import Any
6
6
 
7
- from langchain_core.messages import AIMessage, ToolMessage, BaseMessage
7
+ from langchain_core.messages import BaseMessage
8
8
  from universal_mcp.types import ToolConfig
9
9
 
10
10
  MAX_CHARS = 5000
@@ -5,6 +5,7 @@ import io
5
5
  import traceback
6
6
 
7
7
  import cloudpickle as pickle
8
+ from loguru import logger
8
9
 
9
10
 
10
11
  class Sandbox:
@@ -26,17 +27,37 @@ class Sandbox:
26
27
  def save_context(self) -> str:
27
28
  """
28
29
  Saves the context to a base64 string.
30
+ files, IO, threads, etc. are not pickable. So we only pickle the context that is pickable.
29
31
  """
30
- pickled_data = pickle.dumps(self.context)
32
+ pickable_context = {}
33
+ for key, value in self.context.items():
34
+ try:
35
+ pickle.dumps(value)
36
+ pickable_context[key] = value
37
+ except Exception as e:
38
+ logger.error(f"Error picking {key}: {e}")
39
+ pickled_data = pickle.dumps(pickable_context)
31
40
  base64_encoded = base64.b64encode(pickled_data).decode("utf-8")
32
41
  return base64_encoded
33
42
 
34
- def load_context(self, context: str):
43
+ def load_context(self, context: str, add_context: list[str] = []):
35
44
  """
36
45
  Loads the context from a base64 string.
46
+ Also executes the add_context code strings to add to the context.
37
47
  """
38
- pickled_data = base64.b64decode(context)
39
- self.context = pickle.loads(pickled_data)
48
+ if context:
49
+ pickled_data = base64.b64decode(context)
50
+ new_context = pickle.loads(pickled_data)
51
+ self.context.update(new_context)
52
+ for code in add_context:
53
+ self.run(code)
54
+ return self.context
55
+
56
+ def _filter_context(self, context: dict[str, any]) -> dict[str, any]:
57
+ """
58
+ Filters the context to only include pickable variables.
59
+ """
60
+ return {k: v for k, v in context.items() if not k.startswith("__")}
40
61
 
41
62
  def run(self, code: str) -> dict[str, any]:
42
63
  """
@@ -64,7 +85,7 @@ class Sandbox:
64
85
 
65
86
  # Update the context with any new/modified variables
66
87
  # Filter out dunder methods/system keys that might be introduced by exec
67
- new_context = {k: v for k, v in exec_scope.items() if not k.startswith("__")}
88
+ new_context = self._filter_context(exec_scope)
68
89
  self.context.update(new_context)
69
90
 
70
91
  except Exception:
@@ -114,8 +135,9 @@ class Sandbox:
114
135
  await coroutine
115
136
 
116
137
  # Update the context with any new/modified variables
117
- new_context = {k: v for k, v in exec_scope.items() if not k.startswith("__")}
118
- self.context.update(new_context)
138
+ new_context = self._filter_context(exec_scope)
139
+ if new_context:
140
+ self.context.update(new_context)
119
141
 
120
142
  except Exception:
121
143
  stderr_output = traceback.format_exc()
@@ -1,7 +1,6 @@
1
1
  import json
2
2
  from typing import Any, Literal, cast
3
3
 
4
- from langchain.chat_models import init_chat_model
5
4
  from pydantic import BaseModel, Field
6
5
  from universal_mcp.applications.application import BaseApplication
7
6
 
@@ -38,7 +37,7 @@ class LlmApp(BaseApplication):
38
37
  """Initialize the LLMApp."""
39
38
  super().__init__(name="llm")
40
39
 
41
- def generate_text(
40
+ async def generate_text(
42
41
  self,
43
42
  task: str,
44
43
  context: str | list[str] | dict[str, str] = "",
@@ -92,10 +91,10 @@ class LlmApp(BaseApplication):
92
91
  full_prompt = f"{prompt}\n\nContext:\n{context_str}\n\n"
93
92
 
94
93
  model = load_chat_model("azure/gpt-5-mini")
95
- response = model.with_retry(stop_after_attempt=MAX_RETRIES).invoke(full_prompt)
94
+ response = await model.with_retry(stop_after_attempt=MAX_RETRIES).ainvoke(full_prompt)
96
95
  return str(response.content)
97
96
 
98
- def classify_data(
97
+ async def classify_data(
99
98
  self,
100
99
  classification_task_and_requirements: str,
101
100
  context: Any | list[Any] | dict[str, Any],
@@ -154,7 +153,7 @@ class LlmApp(BaseApplication):
154
153
  "Return ONLY a valid JSON object, no extra text."
155
154
  )
156
155
 
157
- model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
156
+ model = load_chat_model("azure/gpt-5-mini", temperature=0)
158
157
 
159
158
  class ClassificationResult(BaseModel):
160
159
  probabilities: dict[str, float] = Field(..., description="The probabilities for each class.")
@@ -162,13 +161,13 @@ class LlmApp(BaseApplication):
162
161
  top_class: str = Field(..., description="The class with the highest probability.")
163
162
 
164
163
  response = (
165
- model.with_structured_output(schema=ClassificationResult)
164
+ await model.with_structured_output(schema=ClassificationResult)
166
165
  .with_retry(stop_after_attempt=MAX_RETRIES)
167
- .invoke(prompt)
166
+ .ainvoke(prompt)
168
167
  )
169
168
  return response.model_dump()
170
169
 
171
- def extract_data(
170
+ async def extract_data(
172
171
  self,
173
172
  extraction_task: str,
174
173
  source: Any | list[Any] | dict[str, Any],
@@ -229,16 +228,16 @@ class LlmApp(BaseApplication):
229
228
  "Return ONLY a valid JSON object that conforms to the provided schema, with no extra text."
230
229
  )
231
230
 
232
- model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
231
+ model = load_chat_model("azure/gpt-5-mini", temperature=0)
233
232
 
234
- response = (
233
+ response = await (
235
234
  model.with_structured_output(schema=output_schema, method="json_mode")
236
235
  .with_retry(stop_after_attempt=MAX_RETRIES)
237
- .invoke(prompt)
236
+ .ainvoke(prompt)
238
237
  )
239
238
  return cast(dict[str, Any], response)
240
239
 
241
- def call_llm(
240
+ async def call_llm(
242
241
  self,
243
242
  task_instructions: str,
244
243
  context: Any | list[Any] | dict[str, Any],
@@ -282,14 +281,14 @@ class LlmApp(BaseApplication):
282
281
 
283
282
  prompt = f"{task_instructions}\n\nContext:\n{context_str}\n\nReturn ONLY a valid JSON object, no extra text."
284
283
 
285
- model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
284
+ model = load_chat_model("azure/gpt-5-mini", temperature=0)
286
285
 
287
- response = (
288
- model.with_structured_output(schema=output_schema, method="json_mode")
286
+ response = await (
287
+ model.with_structured_output(schema=output_schema)
289
288
  .with_retry(stop_after_attempt=MAX_RETRIES)
290
- .invoke(prompt)
289
+ .ainvoke(prompt)
291
290
  )
292
- return cast(dict[str, Any], response)
291
+ return response.model_dump()
293
292
 
294
293
  def list_tools(self):
295
294
  return [
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: universal-mcp-agents
3
- Version: 0.1.23rc7
3
+ Version: 0.1.23rc8
4
4
  Summary: Add your description here
5
5
  Project-URL: Homepage, https://github.com/universal-mcp/applications
6
6
  Project-URL: Repository, https://github.com/universal-mcp/applications
@@ -1,10 +1,10 @@
1
1
  universal_mcp/agents/__init__.py,sha256=Ythw8tyq7p-w1SPnuO2JtS4TvYEP75PkQpdyvZv-ww4,914
2
- universal_mcp/agents/base.py,sha256=pnPf5EgVVoycg_mrgdIwqEiENny1Dcx6GDZWmOVw2NU,7837
2
+ universal_mcp/agents/base.py,sha256=IyU1HUmB8rjHuCxv-c29RV-dWXfdiQiPq5rGkcCiSbU,7833
3
3
  universal_mcp/agents/cli.py,sha256=9CG7majpWUz7C6t0d8xr-Sg2ZPKBuQdykTbYS6KIZ3A,922
4
4
  universal_mcp/agents/hil.py,sha256=_5PCK6q0goGm8qylJq44aSp2MadP-yCPvhOJYKqWLMo,3808
5
5
  universal_mcp/agents/llm.py,sha256=hVRwjZs3MHl5_3BWedmurs2Jt1oZDfFX0Zj9F8KH7fk,1787
6
6
  universal_mcp/agents/react.py,sha256=ocYm94HOiJVI2zwTjO1K2PNfVY7EILLJ6cd__jnGHPs,3327
7
- universal_mcp/agents/sandbox.py,sha256=LL4OfavEzxbmTDcc_NxizRRpQnw5hc3G2bxvFY63scY,4241
7
+ universal_mcp/agents/sandbox.py,sha256=YxTGp_zsajuN7FUn0Q4PFjuXczgLht7oKql_gyb2Gf4,5112
8
8
  universal_mcp/agents/simple.py,sha256=NSATg5TWzsRNS7V3LFiDG28WSOCIwCdcC1g7NRwg2nM,2095
9
9
  universal_mcp/agents/utils.py,sha256=P6W9k6XAOBp6tdjC2VTP4tE0B2M4-b1EDmr-ylJ47Pw,7765
10
10
  universal_mcp/agents/bigtool/__init__.py,sha256=mZG8dsaCVyKlm82otxtiTA225GIFLUCUUYPEIPF24uw,2299
@@ -22,23 +22,23 @@ universal_mcp/agents/builder/prompts.py,sha256=8Xs6uzTUHguDRngVMLak3lkXFkk2VV_uQ
22
22
  universal_mcp/agents/builder/state.py,sha256=7DeWllxfN-yD6cd9wJ3KIgjO8TctkJvVjAbZT8W_zqk,922
23
23
  universal_mcp/agents/codeact0/__init__.py,sha256=8-fvUo1Sm6dURGI-lW-X3Kd78LqySYbb5NMkNJ4NDwg,76
24
24
  universal_mcp/agents/codeact0/__main__.py,sha256=YyIoecUcKVUhTcCACzLlSmYrayMDsdwzDEqaV4VV4CE,766
25
- universal_mcp/agents/codeact0/agent.py,sha256=lixuPcFLEaWl3IgJ7pY9JSLz9UxH5t9F9FJVEIELydA,22507
25
+ universal_mcp/agents/codeact0/agent.py,sha256=CRgbKCBHSbMOpiNp_Sgdb-Wml7o9Uy72aA9_DaPNiJA,23449
26
26
  universal_mcp/agents/codeact0/config.py,sha256=H-1woj_nhSDwf15F63WYn723y4qlRefXzGxuH81uYF0,2215
27
27
  universal_mcp/agents/codeact0/langgraph_agent.py,sha256=8nz2wq-LexImx-l1y9_f81fK72IQetnCeljwgnduNGY,420
28
28
  universal_mcp/agents/codeact0/llm_tool.py,sha256=-pAz04OrbZ_dJ2ueysT1qZd02DrbLY4EbU0tiuF_UNU,798
29
- universal_mcp/agents/codeact0/prompts.py,sha256=re1DHkfC6kyy1Y2pgmPLMZ_TceKZHZk-0-csCPHnQjw,15344
30
- universal_mcp/agents/codeact0/sandbox.py,sha256=FcJgJ64upa8NMcFDLXkT7FT69AQvUvPBiXyqW937AUo,4701
29
+ universal_mcp/agents/codeact0/prompts.py,sha256=RiC_43GSeE4LDoiFhmJIOsKkoijOK9_7skwAH6ZqSWk,15501
30
+ universal_mcp/agents/codeact0/sandbox.py,sha256=Zcr7fvYtcGbwNWd7RPV7-Btl2HtycPIPofEGVmzxSmE,4696
31
31
  universal_mcp/agents/codeact0/state.py,sha256=cf-94hfVub-HSQJk6b7_SzqBS-oxMABjFa8jqyjdDK0,1925
32
- universal_mcp/agents/codeact0/tools.py,sha256=e-ucTRkXuHEagEAWo2OPWh28UGeYlKzeNhi5cM7lqPc,15007
33
- universal_mcp/agents/codeact0/utils.py,sha256=a0ux1icTSB6ETIZ_X2azZxlP44LBx95bi7wchQWpnuY,18188
32
+ universal_mcp/agents/codeact0/tools.py,sha256=kWFlEfJdbOPugPMbsP7I-vxFMGfZj3FUilMI-aT7-Xw,18753
33
+ universal_mcp/agents/codeact0/utils.py,sha256=Gvft0W0Sg1qlFWm8ciX14yssCa8y3x037lql92yGsBQ,18164
34
34
  universal_mcp/agents/shared/__main__.py,sha256=XxH5qGDpgFWfq7fwQfgKULXGiUgeTp_YKfcxftuVZq8,1452
35
35
  universal_mcp/agents/shared/prompts.py,sha256=yjP3zbbuKi87qCj21qwTTicz8TqtkKgnyGSeEjMu3ho,3761
36
36
  universal_mcp/agents/shared/tool_node.py,sha256=DC9F-Ri28Pam0u3sXWNODVgmj9PtAEUb5qP1qOoGgfs,9169
37
37
  universal_mcp/applications/filesystem/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
38
  universal_mcp/applications/filesystem/app.py,sha256=0TRjjm8YnslVRSmfkXI7qQOAlqWlD1eEn8Jm0xBeigs,5561
39
39
  universal_mcp/applications/llm/__init__.py,sha256=_XGRxN3O1--ZS5joAsPf8IlI9Qa6negsJrwJ5VJXno0,46
40
- universal_mcp/applications/llm/app.py,sha256=4aMDlbBFCJIe_yzSq3Jphtk5ctvjWhHkHfSfnh3_Mso,12714
40
+ universal_mcp/applications/llm/app.py,sha256=zcMCcswJxvbk2jN_x4xpiv2IG2yoJ62jH1CQaNltmYs,12645
41
41
  universal_mcp/applications/ui/app.py,sha256=c7OkZsO2fRtndgAzAQbKu-1xXRuRp9Kjgml57YD2NR4,9459
42
- universal_mcp_agents-0.1.23rc7.dist-info/METADATA,sha256=Up-hGsxUw2JH3vSa9FipSTF45ROWOMiySV0DItN4Cx8,931
43
- universal_mcp_agents-0.1.23rc7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
44
- universal_mcp_agents-0.1.23rc7.dist-info/RECORD,,
42
+ universal_mcp_agents-0.1.23rc8.dist-info/METADATA,sha256=q1hNCeGDqDDTJ5TWkQkh8JuHoXyNgNZEbDOXViM-xM0,931
43
+ universal_mcp_agents-0.1.23rc8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
44
+ universal_mcp_agents-0.1.23rc8.dist-info/RECORD,,