universal-mcp 0.1.24rc2__py3-none-any.whl → 0.1.24rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/agentr/__init__.py +6 -0
- universal_mcp/agentr/agentr.py +30 -0
- universal_mcp/{utils/agentr.py → agentr/client.py} +19 -3
- universal_mcp/agentr/integration.py +104 -0
- universal_mcp/agentr/registry.py +91 -0
- universal_mcp/agentr/server.py +51 -0
- universal_mcp/agents/__init__.py +6 -0
- universal_mcp/agents/auto.py +576 -0
- universal_mcp/agents/base.py +88 -0
- universal_mcp/agents/cli.py +27 -0
- universal_mcp/agents/codeact/__init__.py +243 -0
- universal_mcp/agents/codeact/sandbox.py +27 -0
- universal_mcp/agents/codeact/test.py +15 -0
- universal_mcp/agents/codeact/utils.py +61 -0
- universal_mcp/agents/hil.py +104 -0
- universal_mcp/agents/llm.py +10 -0
- universal_mcp/agents/react.py +58 -0
- universal_mcp/agents/simple.py +40 -0
- universal_mcp/agents/utils.py +111 -0
- universal_mcp/analytics.py +5 -7
- universal_mcp/applications/__init__.py +42 -75
- universal_mcp/applications/application.py +1 -1
- universal_mcp/applications/sample/app.py +245 -0
- universal_mcp/cli.py +10 -3
- universal_mcp/config.py +33 -7
- universal_mcp/exceptions.py +4 -0
- universal_mcp/integrations/__init__.py +0 -15
- universal_mcp/integrations/integration.py +9 -91
- universal_mcp/servers/__init__.py +2 -14
- universal_mcp/servers/server.py +10 -51
- universal_mcp/tools/__init__.py +3 -0
- universal_mcp/tools/adapters.py +20 -11
- universal_mcp/tools/manager.py +29 -56
- universal_mcp/tools/registry.py +41 -0
- universal_mcp/tools/tools.py +22 -1
- universal_mcp/types.py +10 -0
- universal_mcp/utils/common.py +245 -0
- universal_mcp/utils/openapi/api_generator.py +46 -18
- universal_mcp/utils/openapi/cli.py +445 -19
- universal_mcp/utils/openapi/openapi.py +284 -21
- universal_mcp/utils/openapi/postprocessor.py +275 -0
- universal_mcp/utils/openapi/preprocessor.py +1 -1
- universal_mcp/utils/openapi/test_generator.py +287 -0
- universal_mcp/utils/prompts.py +188 -341
- universal_mcp/utils/testing.py +190 -2
- {universal_mcp-0.1.24rc2.dist-info → universal_mcp-0.1.24rc3.dist-info}/METADATA +16 -2
- universal_mcp-0.1.24rc3.dist-info/RECORD +70 -0
- universal_mcp/applications/sample_tool_app.py +0 -80
- universal_mcp/client/agents/__init__.py +0 -4
- universal_mcp/client/agents/base.py +0 -38
- universal_mcp/client/agents/llm.py +0 -115
- universal_mcp/client/agents/react.py +0 -67
- universal_mcp/client/cli.py +0 -181
- universal_mcp-0.1.24rc2.dist-info/RECORD +0 -53
- {universal_mcp-0.1.24rc2.dist-info → universal_mcp-0.1.24rc3.dist-info}/WHEEL +0 -0
- {universal_mcp-0.1.24rc2.dist-info → universal_mcp-0.1.24rc3.dist-info}/entry_points.txt +0 -0
- {universal_mcp-0.1.24rc2.dist-info → universal_mcp-0.1.24rc3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,243 @@
|
|
1
|
+
import inspect
|
2
|
+
import re
|
3
|
+
from collections.abc import Awaitable, Callable, Sequence
|
4
|
+
from typing import Any, TypeVar
|
5
|
+
|
6
|
+
from langchain_core.language_models import BaseChatModel
|
7
|
+
from langchain_core.tools import StructuredTool
|
8
|
+
from langchain_core.tools import tool as create_tool
|
9
|
+
from langgraph.graph import END, START, MessagesState, StateGraph
|
10
|
+
from langgraph.types import Command
|
11
|
+
|
12
|
+
from .utils import extract_and_combine_codeblocks
|
13
|
+
|
14
|
+
EvalFunction = Callable[[str, dict[str, Any]], tuple[str, dict[str, Any]]]
|
15
|
+
EvalCoroutine = Callable[[str, dict[str, Any]], Awaitable[tuple[str, dict[str, Any]]]]
|
16
|
+
|
17
|
+
|
18
|
+
class CodeActState(MessagesState):
|
19
|
+
"""State for CodeAct agent."""
|
20
|
+
|
21
|
+
script: str | None
|
22
|
+
"""The Python code script to be executed."""
|
23
|
+
context: dict[str, Any]
|
24
|
+
"""Dictionary containing the execution context with available tools and variables."""
|
25
|
+
|
26
|
+
|
27
|
+
StateSchema = TypeVar("StateSchema", bound=CodeActState)
|
28
|
+
StateSchemaType = type[StateSchema]
|
29
|
+
|
30
|
+
|
31
|
+
def make_safe_function_name(name: str) -> str:
|
32
|
+
"""Convert a tool name to a valid Python function name."""
|
33
|
+
# Replace non-alphanumeric characters with underscores
|
34
|
+
safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
|
35
|
+
# Ensure the name doesn't start with a digit
|
36
|
+
if safe_name and safe_name[0].isdigit():
|
37
|
+
safe_name = f"tool_{safe_name}"
|
38
|
+
# Handle empty name edge case
|
39
|
+
if not safe_name:
|
40
|
+
safe_name = "unnamed_tool"
|
41
|
+
return safe_name
|
42
|
+
|
43
|
+
|
44
|
+
def create_default_prompt(tools: list[StructuredTool], base_prompt: str | None = None):
|
45
|
+
"""Create default prompt for the CodeAct agent."""
|
46
|
+
tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in tools]
|
47
|
+
prompt = f"{base_prompt}\n\n" if base_prompt else ""
|
48
|
+
prompt += """You will be given a task to perform. You should output either
|
49
|
+
- a Python code snippet that provides the solution to the task, or a step towards the solution. Any output you want to extract from the code should be printed to the console. Code should be output in a fenced code block.
|
50
|
+
- text to be shown directly to the user, if you want to ask for more information or provide the final answer.
|
51
|
+
|
52
|
+
In addition to the Python Standard Library, you can use the following functions:
|
53
|
+
"""
|
54
|
+
|
55
|
+
for tool in tools:
|
56
|
+
# Use coroutine if it exists, otherwise use func
|
57
|
+
tool_callable = tool.coroutine if hasattr(tool, "coroutine") and tool.coroutine is not None else tool.func
|
58
|
+
# Create a safe function name
|
59
|
+
safe_name = make_safe_function_name(tool.name)
|
60
|
+
# Determine if it's an async function
|
61
|
+
is_async = inspect.iscoroutinefunction(tool_callable)
|
62
|
+
# Add appropriate function definition
|
63
|
+
prompt += f'''
|
64
|
+
{"async " if is_async else ""}def {safe_name}{str(inspect.signature(tool_callable))}:
|
65
|
+
"""{tool.description}"""
|
66
|
+
...
|
67
|
+
'''
|
68
|
+
|
69
|
+
prompt += """
|
70
|
+
|
71
|
+
Variables defined at the top level of previous code snippets can be referenced in your code.
|
72
|
+
|
73
|
+
Reminder: use Python code snippets to call tools"""
|
74
|
+
return prompt
|
75
|
+
|
76
|
+
|
77
|
+
def create_codeact(
|
78
|
+
model: BaseChatModel,
|
79
|
+
tools: Sequence[StructuredTool | Callable],
|
80
|
+
eval_fn: EvalFunction | EvalCoroutine,
|
81
|
+
*,
|
82
|
+
prompt: str | None = None,
|
83
|
+
reflection_prompt: str | None = None,
|
84
|
+
reflection_model: BaseChatModel | None = None,
|
85
|
+
max_reflections: int = 3,
|
86
|
+
state_schema: StateSchemaType = CodeActState,
|
87
|
+
) -> StateGraph:
|
88
|
+
"""Create a CodeAct agent.
|
89
|
+
|
90
|
+
Args:
|
91
|
+
model: The language model to use for generating code
|
92
|
+
tools: List of tools available to the agent. Can be passed as python functions or StructuredTool instances.
|
93
|
+
eval_fn: Function or coroutine that executes code in a sandbox. Takes code string and locals dict,
|
94
|
+
returns a tuple of (stdout output, new variables dict)
|
95
|
+
prompt: Optional custom system prompt. If None, uses default prompt.
|
96
|
+
To customize default prompt you can use `create_default_prompt` helper:
|
97
|
+
`create_default_prompt(tools, "You are a helpful assistant.")`
|
98
|
+
reflection_prompt: Optional prompt for reflection. If provided, will be used to evaluate responses.
|
99
|
+
If the reflection output contains "NONE", the response is considered valid, otherwise the
|
100
|
+
reflection output is passed back to the model for regeneration.
|
101
|
+
reflection_model: Optional model to use for reflection. If None, uses the same model as for generation.
|
102
|
+
max_reflections: Maximum number of reflection iterations (default: 3).
|
103
|
+
state_schema: The state schema to use for the agent.
|
104
|
+
|
105
|
+
Returns:
|
106
|
+
A StateGraph implementing the CodeAct architecture
|
107
|
+
"""
|
108
|
+
tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in tools]
|
109
|
+
|
110
|
+
if prompt is None:
|
111
|
+
prompt = create_default_prompt(tools)
|
112
|
+
|
113
|
+
# If no reflection model is provided, use the main model
|
114
|
+
if reflection_model is None:
|
115
|
+
reflection_model = model
|
116
|
+
|
117
|
+
# Make tools available to the code sandbox - use safe names for keys
|
118
|
+
tools_context = {}
|
119
|
+
for tool in tools:
|
120
|
+
safe_name = make_safe_function_name(tool.name)
|
121
|
+
# Use coroutine if it exists, otherwise use func (same as in create_default_prompt)
|
122
|
+
tool_callable = tool.coroutine if hasattr(tool, "coroutine") and tool.coroutine is not None else tool.func
|
123
|
+
# Only use the safe name for consistency with the prompt
|
124
|
+
tools_context[safe_name] = tool_callable
|
125
|
+
|
126
|
+
def call_model(state: StateSchema) -> Command:
|
127
|
+
messages = [{"role": "system", "content": prompt}] + state["messages"]
|
128
|
+
|
129
|
+
# Run the model and potentially loop for reflection
|
130
|
+
response = model.invoke(messages)
|
131
|
+
|
132
|
+
# Extract and combine all code blocks
|
133
|
+
code = extract_and_combine_codeblocks(response.content)
|
134
|
+
|
135
|
+
# Loop for reflection if needed and if code is present
|
136
|
+
if reflection_prompt and code:
|
137
|
+
reflection_count = 0
|
138
|
+
while reflection_count < max_reflections:
|
139
|
+
# Format conversation history with XML-style tags
|
140
|
+
conversation_history = "\n".join(
|
141
|
+
[
|
142
|
+
f'<message role="{("user" if m.type == "human" else "assistant")}">\n{m.content}\n</message>'
|
143
|
+
for m in state["messages"]
|
144
|
+
]
|
145
|
+
)
|
146
|
+
|
147
|
+
# Add the current response
|
148
|
+
conversation_history += f'\n<message role="assistant">\n{response.content}\n</message>'
|
149
|
+
|
150
|
+
# Create the reflection prompt with the tagged conversation history
|
151
|
+
formatted_prompt = f"""
|
152
|
+
Review the assistant's latest code for as per the quality rules:
|
153
|
+
|
154
|
+
<conversation_history>
|
155
|
+
{conversation_history}
|
156
|
+
</conversation_history>
|
157
|
+
|
158
|
+
If you find ANY of these issues, describe the problem briefly and clearly.
|
159
|
+
If NO issues are found, respond with EXACTLY: "NONE"
|
160
|
+
"""
|
161
|
+
|
162
|
+
# Create messages for reflection with correct ordering
|
163
|
+
reflection_messages = [
|
164
|
+
{"role": "system", "content": reflection_prompt},
|
165
|
+
# Include the formatted reflection prompt as the final user message
|
166
|
+
{"role": "user", "content": formatted_prompt},
|
167
|
+
]
|
168
|
+
reflection_result = reflection_model.invoke(reflection_messages)
|
169
|
+
|
170
|
+
# Check if reflection passed
|
171
|
+
if "NONE" in reflection_result.content:
|
172
|
+
# Reflection passed, exit loop
|
173
|
+
break
|
174
|
+
|
175
|
+
# Reflection didn't pass, regenerate response
|
176
|
+
reflection_messages = [
|
177
|
+
{"role": "system", "content": prompt},
|
178
|
+
*state["messages"],
|
179
|
+
{"role": "assistant", "content": response.content},
|
180
|
+
{
|
181
|
+
"role": "user",
|
182
|
+
"content": f"""
|
183
|
+
I need you to completely regenerate your previous response based on this feedback:
|
184
|
+
|
185
|
+
'''
|
186
|
+
{reflection_result.content}
|
187
|
+
'''
|
188
|
+
|
189
|
+
DO NOT reference the feedback directly. Instead, provide a completely new response that addresses the issues.
|
190
|
+
""",
|
191
|
+
},
|
192
|
+
]
|
193
|
+
response = model.invoke(reflection_messages)
|
194
|
+
|
195
|
+
# Extract code from the new response
|
196
|
+
code = extract_and_combine_codeblocks(response.content)
|
197
|
+
|
198
|
+
# If no code in the new response, exit the reflection loop
|
199
|
+
if not code:
|
200
|
+
break
|
201
|
+
|
202
|
+
# Increment reflection count
|
203
|
+
reflection_count += 1
|
204
|
+
|
205
|
+
# Return appropriate command with only the latest response
|
206
|
+
if code:
|
207
|
+
return Command(goto="sandbox", update={"messages": [response], "script": code})
|
208
|
+
else:
|
209
|
+
# no code block, end the loop and respond to the user
|
210
|
+
return Command(update={"messages": [response], "script": None})
|
211
|
+
|
212
|
+
# If eval_fn is a async, we define async node function.
|
213
|
+
if inspect.iscoroutinefunction(eval_fn):
|
214
|
+
|
215
|
+
async def sandbox(state: StateSchema):
|
216
|
+
existing_context = state.get("context", {})
|
217
|
+
context = {**existing_context, **tools_context}
|
218
|
+
# Execute the script in the sandbox
|
219
|
+
output, new_vars = await eval_fn(state["script"], context)
|
220
|
+
new_context = {**existing_context, **new_vars}
|
221
|
+
return {
|
222
|
+
"messages": [{"role": "user", "content": output}],
|
223
|
+
"context": new_context,
|
224
|
+
}
|
225
|
+
else:
|
226
|
+
|
227
|
+
def sandbox(state: StateSchema):
|
228
|
+
existing_context = state.get("context", {})
|
229
|
+
context = {**existing_context, **tools_context}
|
230
|
+
# Execute the script in the sandbox
|
231
|
+
output, new_vars = eval_fn(state["script"], context)
|
232
|
+
new_context = {**existing_context, **new_vars}
|
233
|
+
return {
|
234
|
+
"messages": [{"role": "user", "content": output}],
|
235
|
+
"context": new_context,
|
236
|
+
}
|
237
|
+
|
238
|
+
agent = StateGraph(state_schema)
|
239
|
+
agent.add_node(call_model, destinations=(END, "sandbox"))
|
240
|
+
agent.add_node(sandbox)
|
241
|
+
agent.add_edge(START, "call_model")
|
242
|
+
agent.add_edge("sandbox", "call_model")
|
243
|
+
return agent
|
@@ -0,0 +1,27 @@
|
|
1
|
+
import builtins
|
2
|
+
import contextlib
|
3
|
+
import io
|
4
|
+
from typing import Any
|
5
|
+
|
6
|
+
|
7
|
+
def eval_unsafe(code: str, _locals: dict[str, Any]) -> tuple[str, dict[str, Any]]:
|
8
|
+
# Store original keys before execution
|
9
|
+
original_keys = set(_locals.keys())
|
10
|
+
result = f"Executing code...\n{code}\n\nOutput:\n"
|
11
|
+
result += "=" * 50 + "\n"
|
12
|
+
try:
|
13
|
+
with contextlib.redirect_stdout(io.StringIO()) as f:
|
14
|
+
# Execute the code in the provided locals context
|
15
|
+
# Using exec to allow dynamic code execution
|
16
|
+
# This is a simplified version; in production, consider security implications
|
17
|
+
exec(code, builtins.__dict__, _locals)
|
18
|
+
result += f.getvalue()
|
19
|
+
if not result:
|
20
|
+
result = "<code ran, no output printed to stdout>"
|
21
|
+
except Exception as e:
|
22
|
+
result += f"Error during execution: {repr(e)}"
|
23
|
+
|
24
|
+
# Determine new variables created during execution
|
25
|
+
new_keys = set(_locals.keys()) - original_keys
|
26
|
+
new_vars = {key: _locals[key] for key in new_keys}
|
27
|
+
return result, new_vars
|
@@ -0,0 +1,15 @@
|
|
1
|
+
from universal_mcp.agentr import Agentr
|
2
|
+
from universal_mcp.agents.codeact import create_codeact
|
3
|
+
from universal_mcp.agents.codeact.sandbox import eval_unsafe
|
4
|
+
from universal_mcp.agents.llm import get_llm
|
5
|
+
from universal_mcp.tools.adapters import ToolFormat
|
6
|
+
|
7
|
+
model = get_llm("gpt-4.1")
|
8
|
+
|
9
|
+
agentr = Agentr()
|
10
|
+
agentr.load_tools(["google-mail_send_email"])
|
11
|
+
|
12
|
+
tools = agentr.list_tools(format=ToolFormat.NATIVE)
|
13
|
+
|
14
|
+
code_act = create_codeact(model, tools, eval_unsafe)
|
15
|
+
agent = code_act.compile()
|
@@ -0,0 +1,61 @@
|
|
1
|
+
import re
|
2
|
+
|
3
|
+
BACKTICK_PATTERN = r"(?:^|\n)```(.*?)(?:```(?:\n|$))"
|
4
|
+
|
5
|
+
|
6
|
+
def extract_and_combine_codeblocks(text: str) -> str:
|
7
|
+
"""
|
8
|
+
Extracts all codeblocks from a text string and combines them into a single code string.
|
9
|
+
|
10
|
+
Args:
|
11
|
+
text: A string containing zero or more codeblocks, where each codeblock is
|
12
|
+
surrounded by triple backticks (```).
|
13
|
+
|
14
|
+
Returns:
|
15
|
+
A string containing the combined code from all codeblocks, with each codeblock
|
16
|
+
separated by a newline.
|
17
|
+
|
18
|
+
Example:
|
19
|
+
text = '''Here's some code:
|
20
|
+
|
21
|
+
```python
|
22
|
+
print('hello')
|
23
|
+
```
|
24
|
+
And more:
|
25
|
+
|
26
|
+
```
|
27
|
+
print('world')
|
28
|
+
```'''
|
29
|
+
|
30
|
+
result = extract_and_combine_codeblocks(text)
|
31
|
+
|
32
|
+
Result:
|
33
|
+
|
34
|
+
print('hello')
|
35
|
+
|
36
|
+
print('world')
|
37
|
+
"""
|
38
|
+
# Find all code blocks in the text using regex
|
39
|
+
# Pattern matches anything between triple backticks, with or without a language identifier
|
40
|
+
code_blocks = re.findall(BACKTICK_PATTERN, text, re.DOTALL)
|
41
|
+
|
42
|
+
if not code_blocks:
|
43
|
+
return ""
|
44
|
+
|
45
|
+
# Process each codeblock
|
46
|
+
processed_blocks = []
|
47
|
+
for block in code_blocks:
|
48
|
+
# Strip leading and trailing whitespace
|
49
|
+
block = block.strip()
|
50
|
+
|
51
|
+
# If the first line looks like a language identifier, remove it
|
52
|
+
lines = block.split("\n")
|
53
|
+
if lines and (not lines[0].strip() or " " not in lines[0].strip()):
|
54
|
+
# First line is empty or likely a language identifier (no spaces)
|
55
|
+
block = "\n".join(lines[1:])
|
56
|
+
|
57
|
+
processed_blocks.append(block)
|
58
|
+
|
59
|
+
# Combine all codeblocks with newlines between them
|
60
|
+
combined_code = "\n\n".join(processed_blocks)
|
61
|
+
return combined_code
|
@@ -0,0 +1,104 @@
|
|
1
|
+
from typing import Annotated, TypedDict
|
2
|
+
|
3
|
+
from langchain_core.messages import HumanMessage
|
4
|
+
from langgraph.constants import END, START
|
5
|
+
from langgraph.graph import StateGraph
|
6
|
+
from langgraph.graph.message import add_messages
|
7
|
+
from langgraph.types import Interrupt, interrupt
|
8
|
+
|
9
|
+
from .base import BaseAgent
|
10
|
+
from .llm import get_llm
|
11
|
+
|
12
|
+
|
13
|
+
class State(TypedDict):
|
14
|
+
messages: Annotated[list, add_messages]
|
15
|
+
name: str | None = None
|
16
|
+
favourite_color: str | None = None
|
17
|
+
human: bool | None = None
|
18
|
+
|
19
|
+
|
20
|
+
def ask_name_node(state: State) -> State:
|
21
|
+
if state.get("name") is not None:
|
22
|
+
return state
|
23
|
+
name = interrupt({"question": "What is your name?", "type": "text"})
|
24
|
+
state.update(name=name, messages=[HumanMessage(content=f"My name is {name}")])
|
25
|
+
return state
|
26
|
+
|
27
|
+
|
28
|
+
def ask_bool_node(state: State) -> State:
|
29
|
+
if state.get("human") is not None:
|
30
|
+
return state
|
31
|
+
bool = interrupt({"question": "Are you a human?", "type": "bool"})
|
32
|
+
|
33
|
+
if bool:
|
34
|
+
state.update(human=True, messages=[HumanMessage(content="I am human")])
|
35
|
+
else:
|
36
|
+
state.update(human=False, messages=[HumanMessage(content="I am AI agent")])
|
37
|
+
return state
|
38
|
+
|
39
|
+
|
40
|
+
def ask_favourite_color_node(state: State) -> State:
|
41
|
+
if state.get("favourite_color") is not None:
|
42
|
+
return state
|
43
|
+
favourite_color = interrupt(
|
44
|
+
{"question": "What is your favourite color?", "type": "choice", "choices": ["red", "green", "blue"]}
|
45
|
+
)
|
46
|
+
state.update(
|
47
|
+
favourite_color=favourite_color, messages=[HumanMessage(content=f"My favourite color is {favourite_color}")]
|
48
|
+
)
|
49
|
+
return state
|
50
|
+
|
51
|
+
|
52
|
+
def handle_interrupt(interrupt: Interrupt) -> str | bool:
|
53
|
+
interrupt_type = interrupt.value["type"]
|
54
|
+
if interrupt_type == "text":
|
55
|
+
value = input(interrupt.value["question"])
|
56
|
+
return value
|
57
|
+
elif interrupt_type == "bool":
|
58
|
+
value = input("Do you accept this? (y/n): " + interrupt.value["question"])
|
59
|
+
return value.lower() in ["y", "yes"]
|
60
|
+
elif interrupt_type == "choice":
|
61
|
+
value = input("Enter your choice: " + interrupt.value["question"] + " " + ", ".join(interrupt.value["choices"]))
|
62
|
+
if value in interrupt.value["choices"]:
|
63
|
+
return value
|
64
|
+
else:
|
65
|
+
return interrupt.value["choices"][0]
|
66
|
+
else:
|
67
|
+
raise ValueError(f"Invalid interrupt type: {interrupt.value['type']}")
|
68
|
+
|
69
|
+
|
70
|
+
class HilAgent(BaseAgent):
|
71
|
+
def __init__(self, name: str, instructions: str, model: str):
|
72
|
+
super().__init__(name, instructions, model)
|
73
|
+
self.llm = get_llm(model)
|
74
|
+
self._graph = self._build_graph()
|
75
|
+
|
76
|
+
def chatbot(self, state: State):
|
77
|
+
return {"messages": [self.llm.invoke(state["messages"])]}
|
78
|
+
|
79
|
+
def _build_graph(self):
|
80
|
+
graph_builder = StateGraph(State)
|
81
|
+
graph_builder.add_node("ask_name_node", ask_name_node)
|
82
|
+
graph_builder.add_node("ask_bool_node", ask_bool_node)
|
83
|
+
graph_builder.add_node("ask_favourite_color_node", ask_favourite_color_node)
|
84
|
+
graph_builder.add_node("chatbot", self.chatbot)
|
85
|
+
graph_builder.add_edge(START, "ask_name_node")
|
86
|
+
graph_builder.add_edge("ask_name_node", "ask_bool_node")
|
87
|
+
graph_builder.add_edge("ask_bool_node", "ask_favourite_color_node")
|
88
|
+
graph_builder.add_edge("ask_favourite_color_node", "chatbot")
|
89
|
+
graph_builder.add_edge("chatbot", END)
|
90
|
+
return graph_builder.compile(checkpointer=self.memory)
|
91
|
+
|
92
|
+
@property
|
93
|
+
def graph(self):
|
94
|
+
return self._graph
|
95
|
+
|
96
|
+
|
97
|
+
if __name__ == "__main__":
|
98
|
+
import asyncio
|
99
|
+
|
100
|
+
agent = HilAgent(
|
101
|
+
"Hil Agent", "You are a friendly agent that asks for the user's name and greets them.", "openrouter/auto"
|
102
|
+
)
|
103
|
+
|
104
|
+
asyncio.run(agent.run_interactive())
|
@@ -0,0 +1,10 @@
|
|
1
|
+
from langchain_openai import AzureChatOpenAI
|
2
|
+
|
3
|
+
|
4
|
+
def get_llm(model: str, tags: list[str] | None = None):
|
5
|
+
return AzureChatOpenAI(model=model, api_version="2024-12-01-preview", azure_deployment=model, tags=tags)
|
6
|
+
|
7
|
+
|
8
|
+
if __name__ == "__main__":
|
9
|
+
llm = get_llm("gpt-4.1")
|
10
|
+
print(llm.invoke("Hello, world!"))
|
@@ -0,0 +1,58 @@
|
|
1
|
+
from langgraph.prebuilt import create_react_agent
|
2
|
+
from loguru import logger
|
3
|
+
|
4
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
5
|
+
from universal_mcp.tools.adapters import ToolFormat
|
6
|
+
from universal_mcp.tools.manager import ToolManager
|
7
|
+
|
8
|
+
from .base import BaseAgent
|
9
|
+
from .llm import get_llm
|
10
|
+
|
11
|
+
|
12
|
+
class ReactAgent(BaseAgent):
|
13
|
+
def __init__(
|
14
|
+
self, name: str, instructions: str, model: str, tools: list[str] | None = None, max_iterations: int = 10
|
15
|
+
):
|
16
|
+
super().__init__(name, instructions, model)
|
17
|
+
self.llm = get_llm(model)
|
18
|
+
self.max_iterations = max_iterations
|
19
|
+
self.tool_manager = ToolManager()
|
20
|
+
registry = AgentrRegistry()
|
21
|
+
if tools:
|
22
|
+
registry.load_tools(tools, self.tool_manager)
|
23
|
+
logger.debug(f"Initialized ReactAgent: name={name}, model={model}")
|
24
|
+
self._graph = self._build_graph()
|
25
|
+
|
26
|
+
@property
|
27
|
+
def graph(self):
|
28
|
+
return self._graph
|
29
|
+
|
30
|
+
def _build_graph(self):
|
31
|
+
tools = self.tool_manager.list_tools(format=ToolFormat.LANGCHAIN) if self.tool_manager else []
|
32
|
+
return create_react_agent(
|
33
|
+
self.llm,
|
34
|
+
tools,
|
35
|
+
prompt=self._build_system_message(),
|
36
|
+
checkpointer=self.memory,
|
37
|
+
)
|
38
|
+
|
39
|
+
def _build_system_message(self) -> str:
|
40
|
+
system_message = f"""You are {self.name}. {self.instructions}
|
41
|
+
|
42
|
+
You have access to various tools that can help you answer questions and complete tasks. When you need to use a tool:
|
43
|
+
|
44
|
+
1. Think about what information you need
|
45
|
+
2. Call the appropriate tool with the right parameters
|
46
|
+
3. Use the tool results to provide a comprehensive answer
|
47
|
+
|
48
|
+
Always explain your reasoning and be thorough in your responses. If you need to use multiple tools to answer a question completely, do so."""
|
49
|
+
return system_message
|
50
|
+
|
51
|
+
|
52
|
+
if __name__ == "__main__":
|
53
|
+
import asyncio
|
54
|
+
|
55
|
+
agent = ReactAgent(
|
56
|
+
"Universal React Agent", "You are a helpful assistant", "gpt-4.1", tools=["google-mail_send_email"]
|
57
|
+
)
|
58
|
+
asyncio.run(agent.run_interactive())
|
@@ -0,0 +1,40 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import Annotated
|
3
|
+
|
4
|
+
from langgraph.graph import END, START, StateGraph
|
5
|
+
from langgraph.graph.message import add_messages
|
6
|
+
from typing_extensions import TypedDict
|
7
|
+
|
8
|
+
from .base import BaseAgent
|
9
|
+
from .llm import get_llm
|
10
|
+
|
11
|
+
|
12
|
+
class State(TypedDict):
|
13
|
+
messages: Annotated[list, add_messages]
|
14
|
+
|
15
|
+
|
16
|
+
class SimpleAgent(BaseAgent):
|
17
|
+
def __init__(self, name: str, instructions: str, model: str):
|
18
|
+
super().__init__(name, instructions, model)
|
19
|
+
self.llm = get_llm(model)
|
20
|
+
self._graph = self._build_graph()
|
21
|
+
|
22
|
+
def _build_graph(self):
|
23
|
+
graph_builder = StateGraph(State)
|
24
|
+
|
25
|
+
def chatbot(state: State):
|
26
|
+
return {"messages": [self.llm.invoke(state["messages"])]}
|
27
|
+
|
28
|
+
graph_builder.add_node("chatbot", chatbot)
|
29
|
+
graph_builder.add_edge(START, "chatbot")
|
30
|
+
graph_builder.add_edge("chatbot", END)
|
31
|
+
return graph_builder.compile(checkpointer=self.memory)
|
32
|
+
|
33
|
+
@property
|
34
|
+
def graph(self):
|
35
|
+
return self._graph
|
36
|
+
|
37
|
+
|
38
|
+
if __name__ == "__main__":
|
39
|
+
agent = SimpleAgent("Simple Agent", "You are a helpful assistant", "openrouter/auto")
|
40
|
+
asyncio.run(agent.run_interactive())
|
@@ -0,0 +1,111 @@
|
|
1
|
+
import json
|
2
|
+
from contextlib import contextmanager
|
3
|
+
|
4
|
+
from rich.console import Console
|
5
|
+
from rich.live import Live
|
6
|
+
from rich.markdown import Markdown
|
7
|
+
from rich.panel import Panel
|
8
|
+
from rich.prompt import Prompt
|
9
|
+
from rich.table import Table
|
10
|
+
|
11
|
+
|
12
|
+
class RichCLI:
|
13
|
+
def __init__(self):
|
14
|
+
self.console = Console()
|
15
|
+
|
16
|
+
def display_welcome(self, agent_name: str):
|
17
|
+
"""Display welcome message"""
|
18
|
+
welcome_text = f"""
|
19
|
+
# Welcome to {agent_name}!
|
20
|
+
|
21
|
+
Available commands:
|
22
|
+
- Type your questions naturally
|
23
|
+
- `/help` - Show help
|
24
|
+
- `/tools` - List available tools
|
25
|
+
- `/exit` - Exit the application
|
26
|
+
"""
|
27
|
+
self.console.print(Panel(Markdown(welcome_text), title="🤖 AI Agent CLI", border_style="blue"))
|
28
|
+
|
29
|
+
def display_agent_response(self, response: str, agent_name: str):
|
30
|
+
"""Display agent response with formatting"""
|
31
|
+
self.console.print(Panel(Markdown(response), title=f"🤖 {agent_name}", border_style="green", padding=(1, 2)))
|
32
|
+
|
33
|
+
@contextmanager
|
34
|
+
def display_agent_response_streaming(self, agent_name: str):
|
35
|
+
"""Context manager for streaming agent response updates."""
|
36
|
+
|
37
|
+
with Live(refresh_per_second=10, console=self.console) as live:
|
38
|
+
|
39
|
+
class StreamUpdater:
|
40
|
+
content = []
|
41
|
+
|
42
|
+
def update(self, chunk: str):
|
43
|
+
self.content.append(chunk)
|
44
|
+
panel = Panel(
|
45
|
+
Markdown("".join(self.content)),
|
46
|
+
title=f"🤖 {agent_name}",
|
47
|
+
border_style="green",
|
48
|
+
padding=(1, 2),
|
49
|
+
)
|
50
|
+
live.update(panel)
|
51
|
+
|
52
|
+
yield StreamUpdater()
|
53
|
+
|
54
|
+
def display_thinking(self, thought: str):
|
55
|
+
"""Display agent's thinking process"""
|
56
|
+
if thought:
|
57
|
+
self.console.print(Panel(thought, title="💭 Thinking", border_style="yellow", padding=(1, 2)))
|
58
|
+
|
59
|
+
def display_tools(self, tools: list):
|
60
|
+
"""Display available tools in a table"""
|
61
|
+
table = Table(title="🛠️ Available Tools")
|
62
|
+
table.add_column("Tool Name", style="cyan")
|
63
|
+
table.add_column("Description", style="white")
|
64
|
+
|
65
|
+
for tool in tools:
|
66
|
+
func_info = tool["function"]
|
67
|
+
table.add_row(func_info["name"], func_info["description"])
|
68
|
+
|
69
|
+
self.console.print(table)
|
70
|
+
|
71
|
+
def display_tool_call(self, tool_call: dict):
|
72
|
+
"""Display tool call"""
|
73
|
+
tool_call_str = json.dumps(tool_call, indent=2)
|
74
|
+
self.console.print(Panel(tool_call_str, title="🛠️ Tool Call", border_style="green", padding=(1, 2)))
|
75
|
+
|
76
|
+
def display_tool_result(self, tool_result: dict):
|
77
|
+
"""Display tool result"""
|
78
|
+
tool_result_str = json.dumps(tool_result, indent=2)
|
79
|
+
self.console.print(Panel(tool_result_str, title="🛠️ Tool Result", border_style="green", padding=(1, 2)))
|
80
|
+
|
81
|
+
def display_error(self, error: str):
|
82
|
+
"""Display error message"""
|
83
|
+
self.console.print(Panel(error, title="❌ Error", border_style="red"))
|
84
|
+
|
85
|
+
def get_user_input(self) -> str:
|
86
|
+
"""Get user input with rich prompt"""
|
87
|
+
return Prompt.ask("[bold blue]You[/bold blue]", console=self.console)
|
88
|
+
|
89
|
+
def display_info(self, message: str):
|
90
|
+
"""Display info message"""
|
91
|
+
self.console.print(f"[bold cyan]ℹ️ {message}[/bold cyan]")
|
92
|
+
|
93
|
+
def clear_screen(self):
|
94
|
+
"""Clear the screen"""
|
95
|
+
self.console.clear()
|
96
|
+
|
97
|
+
def handle_interrupt(self, interrupt) -> str | bool:
|
98
|
+
interrupt_type = interrupt.value["type"]
|
99
|
+
if interrupt_type == "text":
|
100
|
+
value = Prompt.ask(interrupt.value["question"])
|
101
|
+
return value
|
102
|
+
elif interrupt_type == "bool":
|
103
|
+
value = Prompt.ask(interrupt.value["question"], choices=["y", "n"], default="y")
|
104
|
+
return value
|
105
|
+
elif interrupt_type == "choice":
|
106
|
+
value = Prompt.ask(
|
107
|
+
interrupt.value["question"], choices=interrupt.value["choices"], default=interrupt.value["choices"][0]
|
108
|
+
)
|
109
|
+
return value
|
110
|
+
else:
|
111
|
+
raise ValueError(f"Invalid interrupt type: {interrupt.value['type']}")
|