universal-mcp-agents 0.1.23rc7__py3-none-any.whl → 0.1.23rc9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/base.py +3 -2
- universal_mcp/agents/codeact0/agent.py +79 -49
- universal_mcp/agents/codeact0/prompts.py +14 -12
- universal_mcp/agents/codeact0/sandbox.py +4 -4
- universal_mcp/agents/codeact0/tools.py +345 -154
- universal_mcp/agents/codeact0/utils.py +1 -1
- universal_mcp/agents/llm.py +1 -2
- universal_mcp/agents/sandbox.py +29 -7
- universal_mcp/applications/llm/app.py +24 -23
- {universal_mcp_agents-0.1.23rc7.dist-info → universal_mcp_agents-0.1.23rc9.dist-info}/METADATA +1 -1
- {universal_mcp_agents-0.1.23rc7.dist-info → universal_mcp_agents-0.1.23rc9.dist-info}/RECORD +12 -12
- {universal_mcp_agents-0.1.23rc7.dist-info → universal_mcp_agents-0.1.23rc9.dist-info}/WHEEL +0 -0
universal_mcp/agents/base.py
CHANGED
|
@@ -1,12 +1,13 @@
|
|
|
1
|
-
from typing import Any, cast
|
|
2
|
-
from uuid import uuid4
|
|
3
1
|
import asyncio
|
|
2
|
+
from typing import cast
|
|
3
|
+
from uuid import uuid4
|
|
4
4
|
|
|
5
5
|
from langchain_core.messages import AIMessageChunk
|
|
6
6
|
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
7
7
|
from langgraph.graph import StateGraph
|
|
8
8
|
from langgraph.types import Command
|
|
9
9
|
from universal_mcp.logger import logger
|
|
10
|
+
|
|
10
11
|
from .utils import RichCLI
|
|
11
12
|
|
|
12
13
|
|
|
@@ -3,10 +3,9 @@ import json
|
|
|
3
3
|
import re
|
|
4
4
|
import uuid
|
|
5
5
|
from typing import Literal, cast
|
|
6
|
-
from types import SimpleNamespace
|
|
7
6
|
|
|
8
7
|
from langchain_anthropic import ChatAnthropic
|
|
9
|
-
from langchain_core.messages import AIMessage, ToolMessage
|
|
8
|
+
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
|
|
10
9
|
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
11
10
|
from langgraph.graph import START, StateGraph
|
|
12
11
|
from langgraph.types import Command, RetryPolicy, StreamWriter
|
|
@@ -19,8 +18,8 @@ from universal_mcp.agents.codeact0.prompts import (
|
|
|
19
18
|
AGENT_BUILDER_GENERATING_PROMPT,
|
|
20
19
|
AGENT_BUILDER_META_PROMPT,
|
|
21
20
|
AGENT_BUILDER_PLANNING_PROMPT,
|
|
21
|
+
build_tool_definitions,
|
|
22
22
|
create_default_prompt,
|
|
23
|
-
build_tool_definitions
|
|
24
23
|
)
|
|
25
24
|
from universal_mcp.agents.codeact0.sandbox import eval_unsafe, execute_ipython_cell, handle_execute_ipython_cell
|
|
26
25
|
from universal_mcp.agents.codeact0.state import AgentBuilderCode, AgentBuilderMeta, AgentBuilderPlan, CodeActState
|
|
@@ -53,12 +52,11 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
53
52
|
**kwargs,
|
|
54
53
|
)
|
|
55
54
|
self.model_instance = load_chat_model(model)
|
|
56
|
-
self.agent_builder_model_instance = load_chat_model("anthropic:claude-sonnet-4-5-20250929"
|
|
55
|
+
self.agent_builder_model_instance = load_chat_model("anthropic:claude-sonnet-4-5-20250929")
|
|
57
56
|
self.registry = registry
|
|
58
57
|
self.agent_builder_registry = agent_builder_registry
|
|
59
58
|
self.agent = agent_builder_registry.get_agent() if agent_builder_registry else None
|
|
60
59
|
|
|
61
|
-
|
|
62
60
|
self.tools_config = self.agent.tools if self.agent else {}
|
|
63
61
|
self.eval_fn = eval_unsafe
|
|
64
62
|
self.sandbox_timeout = sandbox_timeout
|
|
@@ -67,21 +65,23 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
67
65
|
}
|
|
68
66
|
self.final_instructions = ""
|
|
69
67
|
self.tools_context = {}
|
|
68
|
+
self.eval_mode = kwargs.get("eval_mode", False)
|
|
70
69
|
|
|
71
70
|
async def _build_graph(self): # noqa: PLR0915
|
|
72
71
|
"""Build the graph for the CodeAct Playbook Agent."""
|
|
73
72
|
meta_tools = create_meta_tools(self.registry)
|
|
74
|
-
self.additional_tools = [
|
|
73
|
+
self.additional_tools = [
|
|
74
|
+
smart_print,
|
|
75
|
+
meta_tools["web_search"],
|
|
76
|
+
meta_tools["read_file"],
|
|
77
|
+
meta_tools["save_file"],
|
|
78
|
+
meta_tools["upload_file"],
|
|
79
|
+
]
|
|
75
80
|
|
|
76
81
|
if self.tools_config:
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
]
|
|
81
|
-
if not self.registry:
|
|
82
|
-
raise ValueError("Tools are configured but no registry is provided")
|
|
83
|
-
await self.registry.load_tools(self.tools_config) # Load the default tools
|
|
84
|
-
await self.registry.load_tools(self.default_tools_config) # Load more tools
|
|
82
|
+
await self.registry.load_tools(self.tools_config) # Load provided tools
|
|
83
|
+
if self.default_tools_config:
|
|
84
|
+
await self.registry.load_tools(self.default_tools_config) # Load default tools
|
|
85
85
|
|
|
86
86
|
async def call_model(state: CodeActState) -> Command[Literal["execute_tools"]]:
|
|
87
87
|
"""This node now only ever binds the four meta-tools to the LLM."""
|
|
@@ -226,18 +226,29 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
226
226
|
plan = cast(AgentBuilderPlan, response)
|
|
227
227
|
|
|
228
228
|
writer({"type": "custom", id: plan_id, "name": "planning", "data": {"plan": plan.steps}})
|
|
229
|
+
ai_msg = AIMessage(
|
|
230
|
+
content=json.dumps(plan.model_dump()),
|
|
231
|
+
additional_kwargs={
|
|
232
|
+
"type": "planning",
|
|
233
|
+
"plan": plan.steps,
|
|
234
|
+
"update": bool(self.agent),
|
|
235
|
+
},
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
if self.eval_mode:
|
|
239
|
+
mock_user_message = HumanMessage(content="yes, this is great")
|
|
240
|
+
return Command(
|
|
241
|
+
goto="agent_builder",
|
|
242
|
+
update={
|
|
243
|
+
"messages": [ai_msg, mock_user_message],
|
|
244
|
+
"agent_builder_mode": "generating",
|
|
245
|
+
"plan": plan.steps,
|
|
246
|
+
},
|
|
247
|
+
)
|
|
248
|
+
|
|
229
249
|
return Command(
|
|
230
250
|
update={
|
|
231
|
-
"messages": [
|
|
232
|
-
AIMessage(
|
|
233
|
-
content=json.dumps(plan.model_dump()),
|
|
234
|
-
additional_kwargs={
|
|
235
|
-
"type": "planning",
|
|
236
|
-
"plan": plan.steps,
|
|
237
|
-
"update": bool(self.agent),
|
|
238
|
-
},
|
|
239
|
-
)
|
|
240
|
-
],
|
|
251
|
+
"messages": [ai_msg],
|
|
241
252
|
"agent_builder_mode": "confirming",
|
|
242
253
|
"plan": plan.steps,
|
|
243
254
|
}
|
|
@@ -319,7 +330,7 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
319
330
|
return Command(goto="call_model", update={"agent_builder_mode": "inactive"})
|
|
320
331
|
|
|
321
332
|
elif agent_builder_mode == "generating":
|
|
322
|
-
generating_instructions = self.instructions + AGENT_BUILDER_GENERATING_PROMPT +
|
|
333
|
+
generating_instructions = self.instructions + AGENT_BUILDER_GENERATING_PROMPT + self.preloaded_defs
|
|
323
334
|
messages = [{"role": "system", "content": generating_instructions}] + state["messages"]
|
|
324
335
|
|
|
325
336
|
model_with_structured_output = self.agent_builder_model_instance.with_structured_output(
|
|
@@ -359,14 +370,10 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
359
370
|
instructions=instructions_payload,
|
|
360
371
|
tools=tool_dict,
|
|
361
372
|
)
|
|
362
|
-
except Exception
|
|
373
|
+
except Exception:
|
|
363
374
|
# In case of error, add the code to the exit message content
|
|
364
375
|
|
|
365
|
-
mock_exit_tool_call = {
|
|
366
|
-
"name": "exit_agent_builder_mode",
|
|
367
|
-
"args": {},
|
|
368
|
-
"id": "exit_builder_1"
|
|
369
|
-
}
|
|
376
|
+
mock_exit_tool_call = {"name": "exit_agent_builder_mode", "args": {}, "id": "exit_builder_1"}
|
|
370
377
|
|
|
371
378
|
# Create a minimal assistant message to maintain flow
|
|
372
379
|
mock_assistant_message = AIMessage(
|
|
@@ -385,9 +392,25 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
385
392
|
f"An error occurred. Displaying the function code:\n\n{func_code}\nFinal Name: {final_name}\nDescription: {final_description}"
|
|
386
393
|
),
|
|
387
394
|
name="exit_agent_builder_mode",
|
|
388
|
-
tool_call_id="exit_builder_1"
|
|
395
|
+
tool_call_id="exit_builder_1",
|
|
396
|
+
)
|
|
397
|
+
if self.eval_mode:
|
|
398
|
+
human_msg = HumanMessage(
|
|
399
|
+
content="Run the generated agent code and check whether it works as expected"
|
|
400
|
+
)
|
|
401
|
+
return Command(
|
|
402
|
+
goto="call_model",
|
|
403
|
+
update={
|
|
404
|
+
"messages": [mock_assistant_message, mock_exit_tool_response, human_msg],
|
|
405
|
+
"agent_builder_mode": "normal",
|
|
406
|
+
},
|
|
407
|
+
)
|
|
408
|
+
return Command(
|
|
409
|
+
update={
|
|
410
|
+
"messages": [mock_assistant_message, mock_exit_tool_response],
|
|
411
|
+
"agent_builder_mode": "normal",
|
|
412
|
+
}
|
|
389
413
|
)
|
|
390
|
-
return Command(update={"messages": [mock_assistant_message, mock_exit_tool_response], "agent_builder_mode": "normal"})
|
|
391
414
|
|
|
392
415
|
writer(
|
|
393
416
|
{
|
|
@@ -402,11 +425,7 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
402
425
|
},
|
|
403
426
|
}
|
|
404
427
|
)
|
|
405
|
-
mock_exit_tool_call = {
|
|
406
|
-
"name": "exit_agent_builder_mode",
|
|
407
|
-
"args": {},
|
|
408
|
-
"id": "exit_builder_1"
|
|
409
|
-
}
|
|
428
|
+
mock_exit_tool_call = {"name": "exit_agent_builder_mode", "args": {}, "id": "exit_builder_1"}
|
|
410
429
|
mock_assistant_message = AIMessage(
|
|
411
430
|
content=json.dumps(response.model_dump()),
|
|
412
431
|
tool_calls=[mock_exit_tool_call],
|
|
@@ -418,14 +437,21 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
418
437
|
"description": final_description,
|
|
419
438
|
},
|
|
420
439
|
)
|
|
421
|
-
|
|
440
|
+
|
|
422
441
|
mock_exit_tool_response = ToolMessage(
|
|
423
|
-
content=json.dumps(
|
|
442
|
+
content=json.dumps(
|
|
443
|
+
"Exited Agent Builder Mode. Enter this mode again if you need to modify the saved agent."
|
|
444
|
+
),
|
|
424
445
|
name="exit_agent_builder_mode",
|
|
425
|
-
tool_call_id="exit_builder_1"
|
|
446
|
+
tool_call_id="exit_builder_1",
|
|
426
447
|
)
|
|
427
448
|
|
|
428
|
-
return Command(
|
|
449
|
+
return Command(
|
|
450
|
+
update={
|
|
451
|
+
"messages": [mock_assistant_message, mock_exit_tool_response],
|
|
452
|
+
"agent_builder_mode": "normal",
|
|
453
|
+
}
|
|
454
|
+
)
|
|
429
455
|
|
|
430
456
|
async def route_entry(state: CodeActState) -> Command[Literal["call_model", "agent_builder", "execute_tools"]]:
|
|
431
457
|
"""Route to either normal mode or agent builder creation"""
|
|
@@ -441,16 +467,20 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
441
467
|
is_initial_prompt=True,
|
|
442
468
|
)
|
|
443
469
|
self.preloaded_defs, _ = build_tool_definitions(pre_tools)
|
|
444
|
-
self.preloaded_defs =
|
|
470
|
+
self.preloaded_defs = "\n".join(self.preloaded_defs)
|
|
445
471
|
await self.registry.load_tools(state["selected_tool_ids"])
|
|
446
|
-
exported_tools = await self.registry.export_tools(
|
|
472
|
+
exported_tools = await self.registry.export_tools(
|
|
473
|
+
state["selected_tool_ids"], ToolFormat.NATIVE
|
|
474
|
+
) # Get definition for only the new tools
|
|
447
475
|
_, loaded_tools_context = build_tool_definitions(exported_tools)
|
|
448
476
|
self.tools_context.update(loaded_tools_context)
|
|
449
|
-
|
|
450
|
-
if
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
477
|
+
|
|
478
|
+
if (
|
|
479
|
+
len(state["messages"]) == 1 and self.agent
|
|
480
|
+
): # Inject the agent's script function into add_context for execution
|
|
481
|
+
script = self.agent.instructions.get("script")
|
|
482
|
+
add_context = {"functions": [script]}
|
|
483
|
+
return Command(goto="call_model", update={"add_context": add_context})
|
|
454
484
|
|
|
455
485
|
if state.get("agent_builder_mode") in ["planning", "confirming", "generating"]:
|
|
456
486
|
return Command(goto="agent_builder")
|
|
@@ -2,8 +2,6 @@ import inspect
|
|
|
2
2
|
import re
|
|
3
3
|
from collections.abc import Callable
|
|
4
4
|
|
|
5
|
-
from loguru import logger
|
|
6
|
-
|
|
7
5
|
uneditable_prompt = """
|
|
8
6
|
You are **Ruzo**, an AI Assistant created by AgentR — a creative, straight-forward, and direct principal software engineer with access to tools.
|
|
9
7
|
|
|
@@ -45,6 +43,7 @@ Your job is to answer the user's question or perform the task they ask for.
|
|
|
45
43
|
- Always respond in github flavoured markdown format.
|
|
46
44
|
- For charts and diagrams, use mermaid chart in markdown directly.
|
|
47
45
|
- Your final response should contain the complete answer to the user's request in a clear, well-formatted manner that directly addresses what they asked for.
|
|
46
|
+
- For file types like images, audio, documents, etc., you must use the `upload_file` tool to upload the file to the server and render the link in the markdown response.
|
|
48
47
|
"""
|
|
49
48
|
|
|
50
49
|
AGENT_BUILDER_PLANNING_PROMPT = """TASK: Analyze the conversation history and code execution to create a step-by-step non-technical plan for a reusable function.
|
|
@@ -100,7 +99,7 @@ Rules-
|
|
|
100
99
|
- Do not include any text, explanations, or Markdown.
|
|
101
100
|
- The response must start with `def` or `async def` and define a single, complete, executable function.
|
|
102
101
|
- The function parameters **must exactly match the external variables** in the agent plan. External variables are marked using backticks `` `variable_name` ``. Any variables in italics (i.e. enclosed in *...*) are to be used internally, but not as the main function paramters.
|
|
103
|
-
- Any imports, variables, helper or child functions required must be defined **inside the main top-level function**.
|
|
102
|
+
- Any imports, variables, helper or child functions required must be defined **inside the main top-level function**.
|
|
104
103
|
- Ensure that the outer function is self-contained and can run independently, based on previously validated code snippets.
|
|
105
104
|
|
|
106
105
|
Example:
|
|
@@ -162,38 +161,39 @@ def make_safe_function_name(name: str) -> str:
|
|
|
162
161
|
|
|
163
162
|
|
|
164
163
|
# Compile regex once for better performance
|
|
165
|
-
_RAISES_PATTERN = re.compile(r
|
|
164
|
+
_RAISES_PATTERN = re.compile(r"\n\s*[Rr]aises\s*:.*$", re.DOTALL)
|
|
165
|
+
|
|
166
166
|
|
|
167
167
|
def _clean_docstring(docstring: str | None) -> str:
|
|
168
168
|
"""Remove the 'Raises:' section and everything after it from a docstring."""
|
|
169
169
|
if not docstring:
|
|
170
170
|
return ""
|
|
171
|
-
|
|
171
|
+
|
|
172
172
|
# Use pre-compiled regex for better performance
|
|
173
|
-
cleaned = _RAISES_PATTERN.sub(
|
|
173
|
+
cleaned = _RAISES_PATTERN.sub("", docstring)
|
|
174
174
|
return cleaned.strip()
|
|
175
175
|
|
|
176
176
|
|
|
177
177
|
def build_tool_definitions(tools: list[Callable]) -> tuple[list[str], dict[str, Callable]]:
|
|
178
178
|
tool_definitions = []
|
|
179
179
|
context = {}
|
|
180
|
-
|
|
180
|
+
|
|
181
181
|
# Pre-allocate lists for better performance
|
|
182
182
|
tool_definitions = [None] * len(tools)
|
|
183
|
-
|
|
183
|
+
|
|
184
184
|
for i, tool in enumerate(tools):
|
|
185
185
|
tool_name = tool.__name__
|
|
186
186
|
cleaned_docstring = _clean_docstring(tool.__doc__)
|
|
187
|
-
|
|
187
|
+
|
|
188
188
|
# Pre-compute string parts to avoid repeated string operations
|
|
189
189
|
async_prefix = "async " if inspect.iscoroutinefunction(tool) else ""
|
|
190
190
|
signature = str(inspect.signature(tool))
|
|
191
|
-
|
|
191
|
+
|
|
192
192
|
tool_definitions[i] = f'''{async_prefix}def {tool_name} {signature}:
|
|
193
193
|
"""{cleaned_docstring}"""
|
|
194
194
|
...'''
|
|
195
195
|
context[tool_name] = tool
|
|
196
|
-
|
|
196
|
+
|
|
197
197
|
return tool_definitions, context
|
|
198
198
|
|
|
199
199
|
|
|
@@ -231,7 +231,9 @@ def create_default_prompt(
|
|
|
231
231
|
plan = pb.get("plan")
|
|
232
232
|
code = pb.get("script")
|
|
233
233
|
if plan or code:
|
|
234
|
-
system_prompt +=
|
|
234
|
+
system_prompt += (
|
|
235
|
+
"\n\nYou have been provided an existing agent plan and code for performing a task.:\n"
|
|
236
|
+
)
|
|
235
237
|
if plan:
|
|
236
238
|
if isinstance(plan, list):
|
|
237
239
|
plan_block = "\n".join(f"- {str(s)}" for s in plan)
|
|
@@ -1,14 +1,14 @@
|
|
|
1
|
+
import ast
|
|
1
2
|
import contextlib
|
|
2
3
|
import inspect
|
|
3
4
|
import io
|
|
5
|
+
import pickle
|
|
4
6
|
import queue
|
|
5
7
|
import re
|
|
6
8
|
import socket
|
|
7
9
|
import threading
|
|
8
10
|
import types
|
|
9
11
|
from typing import Any
|
|
10
|
-
import pickle
|
|
11
|
-
import ast
|
|
12
12
|
|
|
13
13
|
from langchain_core.tools import tool
|
|
14
14
|
|
|
@@ -40,12 +40,12 @@ async def eval_unsafe(
|
|
|
40
40
|
)
|
|
41
41
|
|
|
42
42
|
result_container = {"output": "<no output>"}
|
|
43
|
-
|
|
43
|
+
|
|
44
44
|
try:
|
|
45
45
|
compiled_code = compile(code, "<string>", "exec", flags=ast.PyCF_ALLOW_TOP_LEVEL_AWAIT)
|
|
46
46
|
with contextlib.redirect_stdout(io.StringIO()) as f:
|
|
47
47
|
coroutine = eval(compiled_code, _locals, _locals)
|
|
48
|
-
|
|
48
|
+
# Await the coroutine to run the code if it's async
|
|
49
49
|
if coroutine:
|
|
50
50
|
await coroutine
|
|
51
51
|
result_container["output"] = f.getvalue() or "<code ran, no output printed to stdout>"
|
|
@@ -1,11 +1,14 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
-
import
|
|
2
|
+
import base64
|
|
3
3
|
from collections import defaultdict
|
|
4
|
+
from pathlib import Path
|
|
4
5
|
from typing import Annotated, Any
|
|
5
6
|
|
|
6
7
|
from langchain_core.tools import tool
|
|
7
8
|
from pydantic import Field
|
|
9
|
+
from universal_mcp.agentr.client import AgentrClient
|
|
8
10
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
11
|
+
from universal_mcp.applications.markitdown.app import MarkitdownApp
|
|
9
12
|
from universal_mcp.types import ToolFormat
|
|
10
13
|
|
|
11
14
|
from universal_mcp.agents.codeact0.prompts import build_tool_definitions
|
|
@@ -22,179 +25,279 @@ def create_meta_tools(tool_registry: AgentrRegistry) -> dict[str, Any]:
|
|
|
22
25
|
@tool
|
|
23
26
|
async def search_functions(
|
|
24
27
|
queries: Annotated[
|
|
25
|
-
list[str] |
|
|
26
|
-
Field(
|
|
28
|
+
list[list[str]] | None,
|
|
29
|
+
Field(
|
|
30
|
+
description="A list of query lists. Each inner list contains one or more search terms that will be used together to find relevant tools."
|
|
31
|
+
),
|
|
27
32
|
] = None,
|
|
28
|
-
|
|
29
|
-
str | None,
|
|
30
|
-
Field(description="The ID or common
|
|
33
|
+
app_ids: Annotated[
|
|
34
|
+
list[str] | None,
|
|
35
|
+
Field(description="The ID or list of IDs (common names) of specific applications to search within."),
|
|
31
36
|
] = None,
|
|
32
37
|
) -> str:
|
|
33
38
|
"""
|
|
34
|
-
Searches for relevant functions
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
1. **Global Search (
|
|
38
|
-
-
|
|
39
|
-
-
|
|
40
|
-
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
-
|
|
50
|
-
|
|
51
|
-
|
|
39
|
+
Searches for relevant functions based on queries and/or applications. This function
|
|
40
|
+
operates in three powerful modes with support for multi-query searches:
|
|
41
|
+
|
|
42
|
+
1. **Global Search** (`queries` only as List[List[str]]):
|
|
43
|
+
- Searches all functions across all applications.
|
|
44
|
+
- Supports multiple independent searches in parallel.
|
|
45
|
+
- Each inner list represents a separate search query.
|
|
46
|
+
|
|
47
|
+
Examples:
|
|
48
|
+
- Single global search:
|
|
49
|
+
`search_functions(queries=[["create presentation"]])`
|
|
50
|
+
|
|
51
|
+
- Multiple independent global searches:
|
|
52
|
+
`search_functions(queries=[["send email"], ["schedule meeting"]])`
|
|
53
|
+
|
|
54
|
+
- Multi-term search for comprehensive results:
|
|
55
|
+
`search_functions(queries=[["send email", "draft email", "compose email"]])`
|
|
56
|
+
|
|
57
|
+
2. **App Discovery** (`app_ids` only as List[str]):
|
|
58
|
+
- Returns ALL available functions for one or more specific applications.
|
|
59
|
+
- Use this to explore the complete capability set of an application.
|
|
60
|
+
|
|
61
|
+
Examples:
|
|
62
|
+
- Single app discovery:
|
|
63
|
+
`search_functions(app_ids=["Gmail"])`
|
|
64
|
+
|
|
65
|
+
- Multiple app discovery:
|
|
66
|
+
`search_functions(app_ids=["Gmail", "Google Calendar", "Slack"])`
|
|
67
|
+
|
|
68
|
+
3. **Scoped Search** (`queries` as List[List[str]] and `app_ids` as List[str]):
|
|
69
|
+
- Performs targeted searches within specific applications in parallel.
|
|
70
|
+
- The number of app_ids must match the number of inner query lists.
|
|
71
|
+
- Each query list is searched within its corresponding app_id.
|
|
72
|
+
- Supports multiple search terms per app for comprehensive discovery.
|
|
73
|
+
|
|
74
|
+
Examples:
|
|
75
|
+
- Basic scoped search (one query per app):
|
|
76
|
+
`search_functions(queries=[["find email"], ["share file"]], app_ids=["Gmail", "Google_Drive"])`
|
|
77
|
+
|
|
78
|
+
- Multi-term scoped search (multiple queries per app):
|
|
79
|
+
`search_functions(
|
|
80
|
+
queries=[
|
|
81
|
+
["send email", "draft email", "compose email", "reply to email"],
|
|
82
|
+
["create event", "schedule meeting", "find free time"],
|
|
83
|
+
["upload file", "share file", "create folder", "search files"]
|
|
84
|
+
],
|
|
85
|
+
app_ids=["Gmail", "Google Calendar", "Google_Drive"]
|
|
86
|
+
)`
|
|
87
|
+
|
|
88
|
+
- Mixed complexity (some apps with single query, others with multiple):
|
|
89
|
+
`search_functions(
|
|
90
|
+
queries=[
|
|
91
|
+
["list messages"],
|
|
92
|
+
["create event", "delete event", "update event"]
|
|
93
|
+
],
|
|
94
|
+
app_ids=["Gmail", "Google Calendar"]
|
|
95
|
+
)`
|
|
96
|
+
|
|
97
|
+
**Pro Tips:**
|
|
98
|
+
- Use multiple search terms in a single query list to cast a wider net and discover related functionality
|
|
99
|
+
- Multi-term searches are more efficient than separate calls
|
|
100
|
+
- Scoped searches return more focused results than global searches
|
|
101
|
+
- The function returns connection status for each app (connected vs NOT connected)
|
|
102
|
+
- All searches within a single call execute in parallel for maximum efficiency
|
|
103
|
+
|
|
104
|
+
**Parameters:**
|
|
105
|
+
- `queries` (List[List[str]], optional): A list of query lists. Each inner list contains one or more
|
|
106
|
+
search terms that will be used together to find relevant tools.
|
|
107
|
+
- `app_ids` (List[str], optional): A list of application IDs to search within or discover.
|
|
108
|
+
|
|
109
|
+
**Returns:**
|
|
110
|
+
- A structured response containing:
|
|
111
|
+
- Matched tools with their descriptions
|
|
112
|
+
- Connection status for each app
|
|
113
|
+
- Recommendations for which tools to load next
|
|
52
114
|
"""
|
|
53
|
-
|
|
54
|
-
try:
|
|
55
|
-
queries = json.loads(queries)
|
|
56
|
-
except json.JSONDecodeError:
|
|
57
|
-
# If it's a single query as a string, convert to list
|
|
58
|
-
queries = [queries] if queries else None
|
|
115
|
+
registry = tool_registry
|
|
59
116
|
|
|
60
|
-
|
|
61
|
-
|
|
117
|
+
TOOL_THRESHOLD = 0.75
|
|
118
|
+
APP_THRESHOLD = 0.7
|
|
62
119
|
|
|
63
|
-
|
|
64
|
-
connections = await registry.list_connected_apps()
|
|
65
|
-
connected_app_ids = {connection["app_id"] for connection in connections}
|
|
120
|
+
# --- Helper Functions for Different Search Modes ---
|
|
66
121
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
relevant_apps = await registry.search_apps(query=app_id, distance_threshold=THRESHOLD)
|
|
73
|
-
if not relevant_apps:
|
|
74
|
-
return {
|
|
75
|
-
"found_tools": [],
|
|
76
|
-
"message": f"Search failed. Application '{app_id}' was not found.",
|
|
77
|
-
}
|
|
78
|
-
canonical_app_id = relevant_apps[0]["id"]
|
|
79
|
-
|
|
80
|
-
if canonical_app_id and not queries:
|
|
81
|
-
all_app_tools = await registry.search_tools(query="", app_id=canonical_app_id, limit=20)
|
|
82
|
-
|
|
83
|
-
tool_list = []
|
|
84
|
-
for tool in all_app_tools:
|
|
85
|
-
cleaned_description = tool.get("description", "").split("Context:")[0].strip()
|
|
86
|
-
tool_list.append({"id": tool["id"], "description": cleaned_description})
|
|
87
|
-
|
|
88
|
-
found_tools_result.append(
|
|
89
|
-
{
|
|
90
|
-
"app_id": canonical_app_id,
|
|
91
|
-
"connection_status": "connected" if canonical_app_id in connected_app_ids else "not_connected",
|
|
92
|
-
"tools": tool_list,
|
|
93
|
-
}
|
|
94
|
-
)
|
|
122
|
+
async def _handle_global_search(queries: list[str]) -> list[list[dict[str, Any]]]:
|
|
123
|
+
"""Performs a broad search across all apps to find relevant tools and apps."""
|
|
124
|
+
# 1. Perform initial broad searches for tools and apps concurrently.
|
|
125
|
+
initial_tool_tasks = [registry.search_tools(query=q, distance_threshold=TOOL_THRESHOLD) for q in queries]
|
|
126
|
+
app_search_tasks = [registry.search_apps(query=q, distance_threshold=APP_THRESHOLD) for q in queries]
|
|
95
127
|
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
128
|
+
initial_tool_results, app_search_results = await asyncio.gather(
|
|
129
|
+
asyncio.gather(*initial_tool_tasks), asyncio.gather(*app_search_tasks)
|
|
130
|
+
)
|
|
99
131
|
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
132
|
+
# 2. Create a prioritized list of app IDs for the final search.
|
|
133
|
+
app_ids_from_apps = {app["id"] for result_list in app_search_results for app in result_list}
|
|
134
|
+
prioritized_app_id_list = list(app_ids_from_apps)
|
|
135
|
+
|
|
136
|
+
app_ids_from_tools = {tool["app_id"] for result_list in initial_tool_results for tool in result_list}
|
|
137
|
+
for tool_app_id in app_ids_from_tools:
|
|
138
|
+
if tool_app_id not in app_ids_from_apps:
|
|
139
|
+
prioritized_app_id_list.append(tool_app_id)
|
|
140
|
+
|
|
141
|
+
if not prioritized_app_id_list:
|
|
142
|
+
return []
|
|
143
|
+
|
|
144
|
+
# 3. Perform the final, comprehensive tool search across the prioritized apps.
|
|
145
|
+
final_tool_search_tasks = [
|
|
146
|
+
registry.search_tools(query=query, app_id=app_id_to_search, distance_threshold=TOOL_THRESHOLD)
|
|
147
|
+
for app_id_to_search in prioritized_app_id_list
|
|
148
|
+
for query in queries
|
|
149
|
+
]
|
|
150
|
+
return await asyncio.gather(*final_tool_search_tasks)
|
|
151
|
+
|
|
152
|
+
async def _handle_scoped_search(app_ids: list[str], queries: list[list[str]]) -> list[list[dict[str, Any]]]:
|
|
153
|
+
"""Performs targeted searches for specific queries within specific applications."""
|
|
154
|
+
if len(app_ids) != len(queries):
|
|
155
|
+
raise ValueError("The number of app_ids must match the number of query lists.")
|
|
156
|
+
|
|
157
|
+
tasks = []
|
|
158
|
+
for app_id, query_list in zip(app_ids, queries):
|
|
159
|
+
for query in query_list:
|
|
160
|
+
# Create a search task for each query in the list for the corresponding app
|
|
161
|
+
tasks.append(registry.search_tools(query=query, app_id=app_id, distance_threshold=TOOL_THRESHOLD))
|
|
162
|
+
|
|
163
|
+
return await asyncio.gather(*tasks)
|
|
164
|
+
|
|
165
|
+
async def _handle_app_discovery(app_ids: list[str]) -> list[list[dict[str, Any]]]:
|
|
166
|
+
"""Fetches all tools for a list of applications."""
|
|
167
|
+
tasks = [registry.search_tools(query="", app_id=app_id, limit=20) for app_id in app_ids]
|
|
168
|
+
return await asyncio.gather(*tasks)
|
|
169
|
+
|
|
170
|
+
# --- Helper Functions for Structuring and Formatting Results ---
|
|
171
|
+
|
|
172
|
+
def _format_response(structured_results: list[dict[str, Any]]) -> str:
|
|
173
|
+
"""Builds the final, user-facing formatted string response from structured data."""
|
|
174
|
+
if not structured_results:
|
|
175
|
+
return "No relevant functions were found."
|
|
176
|
+
|
|
177
|
+
result_parts = []
|
|
178
|
+
apps_in_results = {app["app_id"] for app in structured_results}
|
|
179
|
+
connected_apps_in_results = {
|
|
180
|
+
app["app_id"] for app in structured_results if app["connection_status"] == "connected"
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
for app in structured_results:
|
|
184
|
+
app_id = app["app_id"]
|
|
185
|
+
app_status = "connected" if app["connection_status"] == "connected" else "NOT connected"
|
|
186
|
+
result_parts.append(f"Tools from {app_id} (status: {app_status} by user):")
|
|
187
|
+
|
|
188
|
+
for tool in app["tools"]:
|
|
189
|
+
result_parts.append(f" - {tool['id']}: {tool['description']}")
|
|
190
|
+
result_parts.append("") # Empty line for readability
|
|
191
|
+
|
|
192
|
+
# Add summary connection status messages
|
|
193
|
+
if not connected_apps_in_results and len(apps_in_results) > 1:
|
|
194
|
+
result_parts.append(
|
|
195
|
+
"Connection Status: None of the apps in the results are connected. "
|
|
196
|
+
"You must ask the user to choose the application."
|
|
197
|
+
)
|
|
198
|
+
elif len(connected_apps_in_results) > 1:
|
|
199
|
+
connected_list = ", ".join(sorted(list(connected_apps_in_results)))
|
|
200
|
+
result_parts.append(
|
|
201
|
+
f"Connection Status: Multiple apps are connected ({connected_list}). "
|
|
202
|
+
"You must ask the user to select which application they want to use."
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
result_parts.append("Call load_functions to select the required functions only.")
|
|
206
|
+
if 0 < len(connected_apps_in_results) < len(apps_in_results):
|
|
207
|
+
result_parts.append(
|
|
208
|
+
"Unconnected app functions can also be loaded if required by the user, "
|
|
209
|
+
"but prefer connected ones. Ask the user to choose if none of the "
|
|
210
|
+
"relevant apps are connected."
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
return "\n".join(result_parts)
|
|
214
|
+
|
|
215
|
+
def _structure_tool_results(
|
|
216
|
+
raw_tool_lists: list[list[dict[str, Any]]], connected_app_ids: set[str]
|
|
217
|
+
) -> list[dict[str, Any]]:
|
|
218
|
+
"""
|
|
219
|
+
Converts raw search results into a structured format, handling duplicates,
|
|
220
|
+
cleaning descriptions, and adding connection status.
|
|
221
|
+
"""
|
|
138
222
|
aggregated_tools = defaultdict(dict)
|
|
139
|
-
|
|
223
|
+
# Use a list to maintain the order of apps as they are found.
|
|
224
|
+
ordered_app_ids = []
|
|
225
|
+
|
|
226
|
+
for tool_list in raw_tool_lists:
|
|
140
227
|
for tool in tool_list:
|
|
141
|
-
|
|
228
|
+
app_id = tool.get("app_id", "unknown")
|
|
142
229
|
tool_id = tool.get("id")
|
|
143
|
-
|
|
230
|
+
|
|
231
|
+
if not tool_id:
|
|
144
232
|
continue
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
233
|
+
|
|
234
|
+
if app_id not in aggregated_tools:
|
|
235
|
+
ordered_app_ids.append(app_id)
|
|
236
|
+
|
|
237
|
+
if tool_id not in aggregated_tools[app_id]:
|
|
238
|
+
aggregated_tools[app_id][tool_id] = {
|
|
239
|
+
"id": tool_id,
|
|
240
|
+
"description": _clean_tool_description(tool.get("description", "")),
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
# Build the final results list respecting the discovery order.
|
|
244
|
+
found_tools_result = []
|
|
245
|
+
for app_id in ordered_app_ids:
|
|
246
|
+
if app_id in aggregated_tools and aggregated_tools[app_id]:
|
|
154
247
|
found_tools_result.append(
|
|
155
248
|
{
|
|
156
|
-
"app_id":
|
|
157
|
-
"connection_status": "connected"
|
|
158
|
-
|
|
159
|
-
else "not_connected",
|
|
160
|
-
"tools": list(aggregated_tools[app_id_from_list].values()),
|
|
249
|
+
"app_id": app_id,
|
|
250
|
+
"connection_status": "connected" if app_id in connected_app_ids else "not_connected",
|
|
251
|
+
"tools": list(aggregated_tools[app_id].values()),
|
|
161
252
|
}
|
|
162
253
|
)
|
|
254
|
+
return found_tools_result
|
|
163
255
|
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
256
|
+
def _clean_tool_description(description: str) -> str:
|
|
257
|
+
"""Consistently formats tool descriptions by removing implementation details."""
|
|
258
|
+
return description.split("Context:")[0].strip()
|
|
259
|
+
|
|
260
|
+
# Main Function Logic
|
|
261
|
+
|
|
262
|
+
if not queries and not app_ids:
|
|
263
|
+
raise ValueError("You must provide 'queries', 'app_ids', or both.")
|
|
264
|
+
|
|
265
|
+
# --- Initialization and Input Normalization ---
|
|
266
|
+
connections = await registry.list_connected_apps()
|
|
267
|
+
connected_app_ids = {connection["app_id"] for connection in connections}
|
|
268
|
+
|
|
269
|
+
canonical_app_ids = []
|
|
270
|
+
if app_ids:
|
|
271
|
+
# Concurrently search for all provided app names
|
|
272
|
+
app_search_tasks = [
|
|
273
|
+
registry.search_apps(query=app_name, distance_threshold=APP_THRESHOLD) for app_name in app_ids
|
|
274
|
+
]
|
|
275
|
+
app_search_results = await asyncio.gather(*app_search_tasks)
|
|
276
|
+
|
|
277
|
+
# Process results and build the list of canonical IDs, handling not found errors
|
|
278
|
+
for app_name, result_list in zip(app_ids, app_search_results):
|
|
279
|
+
if not result_list:
|
|
280
|
+
raise ValueError(f"Application '{app_name}' could not be found.")
|
|
281
|
+
# Assume the first result is the correct one
|
|
282
|
+
canonical_app_ids.append(result_list[0]["id"])
|
|
283
|
+
|
|
284
|
+
# --- Mode Dispatching ---
|
|
285
|
+
raw_results = []
|
|
286
|
+
|
|
287
|
+
if canonical_app_ids and queries:
|
|
288
|
+
raw_results = await _handle_scoped_search(canonical_app_ids, queries)
|
|
289
|
+
elif canonical_app_ids:
|
|
290
|
+
raw_results = await _handle_app_discovery(canonical_app_ids)
|
|
291
|
+
elif queries:
|
|
292
|
+
# Flatten list of lists to list of strings for global search
|
|
293
|
+
flat_queries = (
|
|
294
|
+
[q for sublist in queries for q in sublist] if queries and not isinstance(queries[0], str) else queries
|
|
192
295
|
)
|
|
296
|
+
raw_results = await _handle_global_search(flat_queries)
|
|
193
297
|
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
return "\n".join(result_parts)
|
|
298
|
+
# --- Structuring and Formatting ---
|
|
299
|
+
structured_data = _structure_tool_results(raw_results, connected_app_ids)
|
|
300
|
+
return _format_response(structured_data)
|
|
198
301
|
|
|
199
302
|
@tool
|
|
200
303
|
async def load_functions(tool_ids: list[str]) -> str:
|
|
@@ -263,7 +366,95 @@ def create_meta_tools(tool_registry: AgentrRegistry) -> dict[str, Any]:
|
|
|
263
366
|
"citations": response.get("citations", []),
|
|
264
367
|
}
|
|
265
368
|
|
|
266
|
-
|
|
369
|
+
async def read_file(uri: str) -> str:
|
|
370
|
+
"""
|
|
371
|
+
Asynchronously reads a local file or uri and returns the content as a markdown string.
|
|
372
|
+
|
|
373
|
+
This tool aims to extract the main text content from various sources.
|
|
374
|
+
It automatically prepends 'file://' to the input string if it appears
|
|
375
|
+
to be a local path without a specified scheme (like http, https, data, file).
|
|
376
|
+
|
|
377
|
+
Args:
|
|
378
|
+
uri (str): The URI pointing to the resource or a local file path.
|
|
379
|
+
Supported schemes:
|
|
380
|
+
- http:// or https:// (Web pages, feeds, APIs)
|
|
381
|
+
- file:// (Local or accessible network files)
|
|
382
|
+
- data: (Embedded data)
|
|
383
|
+
|
|
384
|
+
Returns:
|
|
385
|
+
A string containing the markdown representation of the content at the specified URI
|
|
386
|
+
|
|
387
|
+
Raises:
|
|
388
|
+
ValueError: If the URI is invalid, empty, or uses an unsupported scheme
|
|
389
|
+
after automatic prefixing.
|
|
390
|
+
|
|
391
|
+
Tags:
|
|
392
|
+
convert, markdown, async, uri, transform, document, important
|
|
393
|
+
"""
|
|
394
|
+
markitdown = MarkitdownApp()
|
|
395
|
+
response = await markitdown.convert_to_markdown(uri)
|
|
396
|
+
return response
|
|
397
|
+
|
|
398
|
+
async def save_file(file_name: str, content: str) -> dict:
|
|
399
|
+
"""
|
|
400
|
+
Saves a file to the local filesystem.
|
|
401
|
+
|
|
402
|
+
Args:
|
|
403
|
+
file_name (str): The name of the file to save.
|
|
404
|
+
content (str): The content to save to the file.
|
|
405
|
+
|
|
406
|
+
Returns:
|
|
407
|
+
dict: A dictionary containing the result of the save operation with the following fields:
|
|
408
|
+
- status (str): "success" if the save succeeded, "error" otherwise.
|
|
409
|
+
- message (str): A message returned by the server, typically indicating success or providing error details.
|
|
410
|
+
"""
|
|
411
|
+
with Path(file_name).open("w") as f:
|
|
412
|
+
f.write(content)
|
|
413
|
+
|
|
414
|
+
return {
|
|
415
|
+
"status": "success",
|
|
416
|
+
"message": f"File {file_name} saved successfully",
|
|
417
|
+
"file_path": Path(file_name).absolute(),
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
async def upload_file(file_name: str, mime_type: str, base64_data: str) -> dict:
|
|
421
|
+
"""
|
|
422
|
+
Uploads a file to the server via the AgentrClient.
|
|
423
|
+
|
|
424
|
+
Args:
|
|
425
|
+
file_name (str): The name of the file to upload.
|
|
426
|
+
mime_type (str): The MIME type of the file.
|
|
427
|
+
base64_data (str): The file content encoded as a base64 string.
|
|
428
|
+
|
|
429
|
+
Returns:
|
|
430
|
+
dict: A dictionary containing the result of the upload operation with the following fields:
|
|
431
|
+
- status (str): "success" if the upload succeeded, "error" otherwise.
|
|
432
|
+
- message (str): A message returned by the server, typically indicating success or providing error details.
|
|
433
|
+
- signed_url (str or None): The signed URL to access the uploaded file if successful, None otherwise.
|
|
434
|
+
"""
|
|
435
|
+
client: AgentrClient = tool_registry.client
|
|
436
|
+
bytes_data = base64.b64decode(base64_data)
|
|
437
|
+
response = client._upload_file(file_name, mime_type, bytes_data)
|
|
438
|
+
if response.get("status") != "success":
|
|
439
|
+
return {
|
|
440
|
+
"status": "error",
|
|
441
|
+
"message": response.get("message"),
|
|
442
|
+
"signed_url": None,
|
|
443
|
+
}
|
|
444
|
+
return {
|
|
445
|
+
"status": "success",
|
|
446
|
+
"message": response.get("message"),
|
|
447
|
+
"signed_url": response.get("signed_url"),
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
return {
|
|
451
|
+
"search_functions": search_functions,
|
|
452
|
+
"load_functions": load_functions,
|
|
453
|
+
"web_search": web_search,
|
|
454
|
+
"read_file": read_file,
|
|
455
|
+
"upload_file": upload_file,
|
|
456
|
+
"save_file": save_file,
|
|
457
|
+
}
|
|
267
458
|
|
|
268
459
|
|
|
269
460
|
async def get_valid_tools(tool_ids: list[str], registry: AgentrRegistry) -> tuple[list[str], list[str]]:
|
universal_mcp/agents/llm.py
CHANGED
|
@@ -4,6 +4,7 @@ from langchain_anthropic import ChatAnthropic
|
|
|
4
4
|
from langchain_core.language_models import BaseChatModel
|
|
5
5
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
6
6
|
from langchain_openai import AzureChatOpenAI
|
|
7
|
+
from loguru import logger
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
@lru_cache(maxsize=8)
|
|
@@ -41,8 +42,6 @@ def load_chat_model(
|
|
|
41
42
|
|
|
42
43
|
|
|
43
44
|
if __name__ == "__main__":
|
|
44
|
-
from loguru import logger
|
|
45
|
-
|
|
46
45
|
models_to_test = [
|
|
47
46
|
"azure/gpt-5-chat",
|
|
48
47
|
"anthropic/claude-4-sonnet-20250514",
|
universal_mcp/agents/sandbox.py
CHANGED
|
@@ -5,6 +5,7 @@ import io
|
|
|
5
5
|
import traceback
|
|
6
6
|
|
|
7
7
|
import cloudpickle as pickle
|
|
8
|
+
from loguru import logger
|
|
8
9
|
|
|
9
10
|
|
|
10
11
|
class Sandbox:
|
|
@@ -26,17 +27,37 @@ class Sandbox:
|
|
|
26
27
|
def save_context(self) -> str:
|
|
27
28
|
"""
|
|
28
29
|
Saves the context to a base64 string.
|
|
30
|
+
files, IO, threads, etc. are not pickable. So we only pickle the context that is pickable.
|
|
29
31
|
"""
|
|
30
|
-
|
|
32
|
+
pickable_context = {}
|
|
33
|
+
for key, value in self.context.items():
|
|
34
|
+
try:
|
|
35
|
+
pickle.dumps(value)
|
|
36
|
+
pickable_context[key] = value
|
|
37
|
+
except Exception as e:
|
|
38
|
+
logger.error(f"Error picking {key}: {e}")
|
|
39
|
+
pickled_data = pickle.dumps(pickable_context)
|
|
31
40
|
base64_encoded = base64.b64encode(pickled_data).decode("utf-8")
|
|
32
41
|
return base64_encoded
|
|
33
42
|
|
|
34
|
-
def load_context(self, context: str):
|
|
43
|
+
def load_context(self, context: str, add_context: list[str] = []):
|
|
35
44
|
"""
|
|
36
45
|
Loads the context from a base64 string.
|
|
46
|
+
Also executes the add_context code strings to add to the context.
|
|
37
47
|
"""
|
|
38
|
-
|
|
39
|
-
|
|
48
|
+
if context:
|
|
49
|
+
pickled_data = base64.b64decode(context)
|
|
50
|
+
new_context = pickle.loads(pickled_data)
|
|
51
|
+
self.context.update(new_context)
|
|
52
|
+
for code in add_context:
|
|
53
|
+
self.run(code)
|
|
54
|
+
return self.context
|
|
55
|
+
|
|
56
|
+
def _filter_context(self, context: dict[str, any]) -> dict[str, any]:
|
|
57
|
+
"""
|
|
58
|
+
Filters the context to only include pickable variables.
|
|
59
|
+
"""
|
|
60
|
+
return {k: v for k, v in context.items() if not k.startswith("__")}
|
|
40
61
|
|
|
41
62
|
def run(self, code: str) -> dict[str, any]:
|
|
42
63
|
"""
|
|
@@ -64,7 +85,7 @@ class Sandbox:
|
|
|
64
85
|
|
|
65
86
|
# Update the context with any new/modified variables
|
|
66
87
|
# Filter out dunder methods/system keys that might be introduced by exec
|
|
67
|
-
new_context =
|
|
88
|
+
new_context = self._filter_context(exec_scope)
|
|
68
89
|
self.context.update(new_context)
|
|
69
90
|
|
|
70
91
|
except Exception:
|
|
@@ -114,8 +135,9 @@ class Sandbox:
|
|
|
114
135
|
await coroutine
|
|
115
136
|
|
|
116
137
|
# Update the context with any new/modified variables
|
|
117
|
-
new_context =
|
|
118
|
-
|
|
138
|
+
new_context = self._filter_context(exec_scope)
|
|
139
|
+
if new_context:
|
|
140
|
+
self.context.update(new_context)
|
|
119
141
|
|
|
120
142
|
except Exception:
|
|
121
143
|
stderr_output = traceback.format_exc()
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from typing import Any, Literal, cast
|
|
3
3
|
|
|
4
|
-
from langchain.
|
|
4
|
+
from langchain.agents import create_agent
|
|
5
5
|
from pydantic import BaseModel, Field
|
|
6
6
|
from universal_mcp.applications.application import BaseApplication
|
|
7
7
|
|
|
@@ -38,7 +38,7 @@ class LlmApp(BaseApplication):
|
|
|
38
38
|
"""Initialize the LLMApp."""
|
|
39
39
|
super().__init__(name="llm")
|
|
40
40
|
|
|
41
|
-
def generate_text(
|
|
41
|
+
async def generate_text(
|
|
42
42
|
self,
|
|
43
43
|
task: str,
|
|
44
44
|
context: str | list[str] | dict[str, str] = "",
|
|
@@ -92,10 +92,10 @@ class LlmApp(BaseApplication):
|
|
|
92
92
|
full_prompt = f"{prompt}\n\nContext:\n{context_str}\n\n"
|
|
93
93
|
|
|
94
94
|
model = load_chat_model("azure/gpt-5-mini")
|
|
95
|
-
response = model.with_retry(stop_after_attempt=MAX_RETRIES).
|
|
95
|
+
response = await model.with_retry(stop_after_attempt=MAX_RETRIES).ainvoke(full_prompt)
|
|
96
96
|
return str(response.content)
|
|
97
97
|
|
|
98
|
-
def classify_data(
|
|
98
|
+
async def classify_data(
|
|
99
99
|
self,
|
|
100
100
|
classification_task_and_requirements: str,
|
|
101
101
|
context: Any | list[Any] | dict[str, Any],
|
|
@@ -151,24 +151,24 @@ class LlmApp(BaseApplication):
|
|
|
151
151
|
f"This is a classification task.\nPossible classes and descriptions:\n"
|
|
152
152
|
f"{json.dumps(class_descriptions, indent=2)}\n\n"
|
|
153
153
|
f"Context:\n{context_str}\n\n"
|
|
154
|
-
"Return ONLY a valid JSON object, no extra text."
|
|
155
154
|
)
|
|
156
155
|
|
|
157
|
-
model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
|
|
158
|
-
|
|
159
156
|
class ClassificationResult(BaseModel):
|
|
160
157
|
probabilities: dict[str, float] = Field(..., description="The probabilities for each class.")
|
|
161
158
|
reason: str = Field(..., description="The reasoning behind the classification.")
|
|
162
159
|
top_class: str = Field(..., description="The class with the highest probability.")
|
|
163
160
|
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
161
|
+
model = load_chat_model("azure/gpt-5-mini", temperature=0)
|
|
162
|
+
agent = create_agent(
|
|
163
|
+
model=model,
|
|
164
|
+
tools=[],
|
|
165
|
+
response_format=ClassificationResult, # Auto-selects ProviderStrategy
|
|
168
166
|
)
|
|
169
|
-
return response.model_dump()
|
|
170
167
|
|
|
171
|
-
|
|
168
|
+
result = await agent.ainvoke({"messages": [{"role": "user", "content": prompt}]})
|
|
169
|
+
return result["structured_response"].model_dump()
|
|
170
|
+
|
|
171
|
+
async def extract_data(
|
|
172
172
|
self,
|
|
173
173
|
extraction_task: str,
|
|
174
174
|
source: Any | list[Any] | dict[str, Any],
|
|
@@ -229,16 +229,16 @@ class LlmApp(BaseApplication):
|
|
|
229
229
|
"Return ONLY a valid JSON object that conforms to the provided schema, with no extra text."
|
|
230
230
|
)
|
|
231
231
|
|
|
232
|
-
model =
|
|
232
|
+
model = load_chat_model("azure/gpt-5-mini", temperature=0)
|
|
233
233
|
|
|
234
|
-
response = (
|
|
234
|
+
response = await (
|
|
235
235
|
model.with_structured_output(schema=output_schema, method="json_mode")
|
|
236
236
|
.with_retry(stop_after_attempt=MAX_RETRIES)
|
|
237
|
-
.
|
|
237
|
+
.ainvoke(prompt)
|
|
238
238
|
)
|
|
239
239
|
return cast(dict[str, Any], response)
|
|
240
240
|
|
|
241
|
-
def call_llm(
|
|
241
|
+
async def call_llm(
|
|
242
242
|
self,
|
|
243
243
|
task_instructions: str,
|
|
244
244
|
context: Any | list[Any] | dict[str, Any],
|
|
@@ -282,14 +282,15 @@ class LlmApp(BaseApplication):
|
|
|
282
282
|
|
|
283
283
|
prompt = f"{task_instructions}\n\nContext:\n{context_str}\n\nReturn ONLY a valid JSON object, no extra text."
|
|
284
284
|
|
|
285
|
-
model =
|
|
285
|
+
model = load_chat_model("azure/gpt-5-mini", temperature=0)
|
|
286
286
|
|
|
287
|
-
|
|
288
|
-
model
|
|
289
|
-
|
|
290
|
-
|
|
287
|
+
agent = create_agent(
|
|
288
|
+
model=model,
|
|
289
|
+
tools=[],
|
|
290
|
+
response_format=output_schema,
|
|
291
291
|
)
|
|
292
|
-
|
|
292
|
+
result = await agent.ainvoke({"messages": [{"role": "user", "content": prompt}]})
|
|
293
|
+
return result["structured_response"]
|
|
293
294
|
|
|
294
295
|
def list_tools(self):
|
|
295
296
|
return [
|
{universal_mcp_agents-0.1.23rc7.dist-info → universal_mcp_agents-0.1.23rc9.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: universal-mcp-agents
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.23rc9
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Project-URL: Homepage, https://github.com/universal-mcp/applications
|
|
6
6
|
Project-URL: Repository, https://github.com/universal-mcp/applications
|
{universal_mcp_agents-0.1.23rc7.dist-info → universal_mcp_agents-0.1.23rc9.dist-info}/RECORD
RENAMED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
universal_mcp/agents/__init__.py,sha256=Ythw8tyq7p-w1SPnuO2JtS4TvYEP75PkQpdyvZv-ww4,914
|
|
2
|
-
universal_mcp/agents/base.py,sha256=
|
|
2
|
+
universal_mcp/agents/base.py,sha256=IyU1HUmB8rjHuCxv-c29RV-dWXfdiQiPq5rGkcCiSbU,7833
|
|
3
3
|
universal_mcp/agents/cli.py,sha256=9CG7majpWUz7C6t0d8xr-Sg2ZPKBuQdykTbYS6KIZ3A,922
|
|
4
4
|
universal_mcp/agents/hil.py,sha256=_5PCK6q0goGm8qylJq44aSp2MadP-yCPvhOJYKqWLMo,3808
|
|
5
|
-
universal_mcp/agents/llm.py,sha256=
|
|
5
|
+
universal_mcp/agents/llm.py,sha256=S6dI3xaeVS8rKa2ttXToOYf_mI-6rm0E9XwE5nm3uko,1782
|
|
6
6
|
universal_mcp/agents/react.py,sha256=ocYm94HOiJVI2zwTjO1K2PNfVY7EILLJ6cd__jnGHPs,3327
|
|
7
|
-
universal_mcp/agents/sandbox.py,sha256=
|
|
7
|
+
universal_mcp/agents/sandbox.py,sha256=YxTGp_zsajuN7FUn0Q4PFjuXczgLht7oKql_gyb2Gf4,5112
|
|
8
8
|
universal_mcp/agents/simple.py,sha256=NSATg5TWzsRNS7V3LFiDG28WSOCIwCdcC1g7NRwg2nM,2095
|
|
9
9
|
universal_mcp/agents/utils.py,sha256=P6W9k6XAOBp6tdjC2VTP4tE0B2M4-b1EDmr-ylJ47Pw,7765
|
|
10
10
|
universal_mcp/agents/bigtool/__init__.py,sha256=mZG8dsaCVyKlm82otxtiTA225GIFLUCUUYPEIPF24uw,2299
|
|
@@ -22,23 +22,23 @@ universal_mcp/agents/builder/prompts.py,sha256=8Xs6uzTUHguDRngVMLak3lkXFkk2VV_uQ
|
|
|
22
22
|
universal_mcp/agents/builder/state.py,sha256=7DeWllxfN-yD6cd9wJ3KIgjO8TctkJvVjAbZT8W_zqk,922
|
|
23
23
|
universal_mcp/agents/codeact0/__init__.py,sha256=8-fvUo1Sm6dURGI-lW-X3Kd78LqySYbb5NMkNJ4NDwg,76
|
|
24
24
|
universal_mcp/agents/codeact0/__main__.py,sha256=YyIoecUcKVUhTcCACzLlSmYrayMDsdwzDEqaV4VV4CE,766
|
|
25
|
-
universal_mcp/agents/codeact0/agent.py,sha256=
|
|
25
|
+
universal_mcp/agents/codeact0/agent.py,sha256=CRgbKCBHSbMOpiNp_Sgdb-Wml7o9Uy72aA9_DaPNiJA,23449
|
|
26
26
|
universal_mcp/agents/codeact0/config.py,sha256=H-1woj_nhSDwf15F63WYn723y4qlRefXzGxuH81uYF0,2215
|
|
27
27
|
universal_mcp/agents/codeact0/langgraph_agent.py,sha256=8nz2wq-LexImx-l1y9_f81fK72IQetnCeljwgnduNGY,420
|
|
28
28
|
universal_mcp/agents/codeact0/llm_tool.py,sha256=-pAz04OrbZ_dJ2ueysT1qZd02DrbLY4EbU0tiuF_UNU,798
|
|
29
|
-
universal_mcp/agents/codeact0/prompts.py,sha256=
|
|
30
|
-
universal_mcp/agents/codeact0/sandbox.py,sha256=
|
|
29
|
+
universal_mcp/agents/codeact0/prompts.py,sha256=RiC_43GSeE4LDoiFhmJIOsKkoijOK9_7skwAH6ZqSWk,15501
|
|
30
|
+
universal_mcp/agents/codeact0/sandbox.py,sha256=Zcr7fvYtcGbwNWd7RPV7-Btl2HtycPIPofEGVmzxSmE,4696
|
|
31
31
|
universal_mcp/agents/codeact0/state.py,sha256=cf-94hfVub-HSQJk6b7_SzqBS-oxMABjFa8jqyjdDK0,1925
|
|
32
|
-
universal_mcp/agents/codeact0/tools.py,sha256=
|
|
33
|
-
universal_mcp/agents/codeact0/utils.py,sha256=
|
|
32
|
+
universal_mcp/agents/codeact0/tools.py,sha256=oaGBzto6yaysPPEwV0bpAHH8QASjEaTIey_zJHxmNyY,23182
|
|
33
|
+
universal_mcp/agents/codeact0/utils.py,sha256=Gvft0W0Sg1qlFWm8ciX14yssCa8y3x037lql92yGsBQ,18164
|
|
34
34
|
universal_mcp/agents/shared/__main__.py,sha256=XxH5qGDpgFWfq7fwQfgKULXGiUgeTp_YKfcxftuVZq8,1452
|
|
35
35
|
universal_mcp/agents/shared/prompts.py,sha256=yjP3zbbuKi87qCj21qwTTicz8TqtkKgnyGSeEjMu3ho,3761
|
|
36
36
|
universal_mcp/agents/shared/tool_node.py,sha256=DC9F-Ri28Pam0u3sXWNODVgmj9PtAEUb5qP1qOoGgfs,9169
|
|
37
37
|
universal_mcp/applications/filesystem/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
38
38
|
universal_mcp/applications/filesystem/app.py,sha256=0TRjjm8YnslVRSmfkXI7qQOAlqWlD1eEn8Jm0xBeigs,5561
|
|
39
39
|
universal_mcp/applications/llm/__init__.py,sha256=_XGRxN3O1--ZS5joAsPf8IlI9Qa6negsJrwJ5VJXno0,46
|
|
40
|
-
universal_mcp/applications/llm/app.py,sha256=
|
|
40
|
+
universal_mcp/applications/llm/app.py,sha256=D3j5f5BUD2ZL2CXfc9z2KD5_PCbeqo7-GcWA0rEhAw0,12757
|
|
41
41
|
universal_mcp/applications/ui/app.py,sha256=c7OkZsO2fRtndgAzAQbKu-1xXRuRp9Kjgml57YD2NR4,9459
|
|
42
|
-
universal_mcp_agents-0.1.
|
|
43
|
-
universal_mcp_agents-0.1.
|
|
44
|
-
universal_mcp_agents-0.1.
|
|
42
|
+
universal_mcp_agents-0.1.23rc9.dist-info/METADATA,sha256=WmURY6Ks9IBtLdL4gKfyife2jQ1IVQjjCI4yPW1xUw8,931
|
|
43
|
+
universal_mcp_agents-0.1.23rc9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
44
|
+
universal_mcp_agents-0.1.23rc9.dist-info/RECORD,,
|
|
File without changes
|