universal-mcp 0.1.23rc1__py3-none-any.whl → 0.1.24rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/analytics.py +43 -11
- universal_mcp/applications/application.py +186 -239
- universal_mcp/applications/sample_tool_app.py +80 -0
- universal_mcp/cli.py +5 -228
- universal_mcp/client/agents/__init__.py +4 -0
- universal_mcp/client/agents/base.py +38 -0
- universal_mcp/client/agents/llm.py +115 -0
- universal_mcp/client/agents/react.py +67 -0
- universal_mcp/client/cli.py +181 -0
- universal_mcp/client/oauth.py +218 -0
- universal_mcp/client/token_store.py +91 -0
- universal_mcp/client/transport.py +277 -0
- universal_mcp/config.py +201 -28
- universal_mcp/exceptions.py +50 -6
- universal_mcp/integrations/__init__.py +1 -4
- universal_mcp/integrations/integration.py +220 -121
- universal_mcp/servers/__init__.py +1 -1
- universal_mcp/servers/server.py +114 -247
- universal_mcp/stores/store.py +126 -93
- universal_mcp/tools/adapters.py +16 -0
- universal_mcp/tools/func_metadata.py +1 -1
- universal_mcp/tools/manager.py +15 -3
- universal_mcp/tools/tools.py +2 -2
- universal_mcp/utils/agentr.py +3 -4
- universal_mcp/utils/installation.py +3 -4
- universal_mcp/utils/openapi/api_generator.py +28 -2
- universal_mcp/utils/openapi/api_splitter.py +8 -19
- universal_mcp/utils/openapi/cli.py +243 -0
- universal_mcp/utils/openapi/filters.py +114 -0
- universal_mcp/utils/openapi/openapi.py +45 -12
- universal_mcp/utils/openapi/preprocessor.py +62 -7
- universal_mcp/utils/prompts.py +787 -0
- universal_mcp/utils/singleton.py +4 -1
- universal_mcp/utils/testing.py +6 -6
- universal_mcp-0.1.24rc2.dist-info/METADATA +54 -0
- universal_mcp-0.1.24rc2.dist-info/RECORD +53 -0
- universal_mcp/applications/README.md +0 -122
- universal_mcp/integrations/README.md +0 -25
- universal_mcp/servers/README.md +0 -79
- universal_mcp/stores/README.md +0 -74
- universal_mcp/tools/README.md +0 -86
- universal_mcp-0.1.23rc1.dist-info/METADATA +0 -283
- universal_mcp-0.1.23rc1.dist-info/RECORD +0 -46
- /universal_mcp/{utils → tools}/docstring_parser.py +0 -0
- {universal_mcp-0.1.23rc1.dist-info → universal_mcp-0.1.24rc2.dist-info}/WHEEL +0 -0
- {universal_mcp-0.1.23rc1.dist-info → universal_mcp-0.1.24rc2.dist-info}/entry_points.txt +0 -0
- {universal_mcp-0.1.23rc1.dist-info → universal_mcp-0.1.24rc2.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,80 @@
|
|
1
|
+
import datetime
|
2
|
+
|
3
|
+
from universal_mcp.applications.application import BaseApplication
|
4
|
+
|
5
|
+
|
6
|
+
class SampleToolApp(BaseApplication):
|
7
|
+
"""A sample application providing basic utility tools."""
|
8
|
+
|
9
|
+
def __init__(self):
|
10
|
+
"""Initializes the SampleToolApp with the name 'sample_tool_app'."""
|
11
|
+
super().__init__("sample_tool_app")
|
12
|
+
|
13
|
+
def get_current_time(self):
|
14
|
+
"""Get the current system time as a formatted string.
|
15
|
+
|
16
|
+
Returns:
|
17
|
+
str: The current time in the format 'YYYY-MM-DD HH:MM:SS'.
|
18
|
+
"""
|
19
|
+
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
20
|
+
|
21
|
+
def get_current_date(self):
|
22
|
+
"""Get the current system date as a formatted string.
|
23
|
+
|
24
|
+
Returns:
|
25
|
+
str: The current date in the format 'YYYY-MM-DD'.
|
26
|
+
"""
|
27
|
+
return datetime.datetime.now().strftime("%Y-%m-%d")
|
28
|
+
|
29
|
+
def calculate(self, expression: str):
|
30
|
+
"""Safely evaluate a mathematical expression.
|
31
|
+
|
32
|
+
Args:
|
33
|
+
expression (str): The mathematical expression to evaluate.
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
str: The result of the calculation, or an error message if evaluation fails.
|
37
|
+
"""
|
38
|
+
try:
|
39
|
+
# Safe evaluation of mathematical expressions
|
40
|
+
result = eval(expression, {"__builtins__": {}}, {})
|
41
|
+
return f"Result: {result}"
|
42
|
+
except Exception as e:
|
43
|
+
return f"Error in calculation: {str(e)}"
|
44
|
+
|
45
|
+
def file_operations(self, operation: str, filename: str, content: str = ""):
|
46
|
+
"""Perform file read or write operations.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
operation (str): The operation to perform, either 'read' or 'write'.
|
50
|
+
filename (str): The name of the file to operate on.
|
51
|
+
content (str, optional): The content to write to the file (used only for 'write'). Defaults to "".
|
52
|
+
|
53
|
+
Returns:
|
54
|
+
str: The result of the file operation, or an error message if the operation fails.
|
55
|
+
"""
|
56
|
+
try:
|
57
|
+
if operation == "read":
|
58
|
+
with open(filename) as f:
|
59
|
+
return f"File content:\n{f.read()}"
|
60
|
+
elif operation == "write":
|
61
|
+
with open(filename, "w") as f:
|
62
|
+
f.write(content)
|
63
|
+
return f"Successfully wrote to {filename}"
|
64
|
+
else:
|
65
|
+
return "Invalid operation. Use 'read' or 'write'"
|
66
|
+
except Exception as e:
|
67
|
+
return f"File operation error: {str(e)}"
|
68
|
+
|
69
|
+
def list_tools(self):
|
70
|
+
"""List all available tool methods in this application.
|
71
|
+
|
72
|
+
Returns:
|
73
|
+
list: A list of callable tool methods.
|
74
|
+
"""
|
75
|
+
return [
|
76
|
+
self.get_current_time,
|
77
|
+
self.get_current_date,
|
78
|
+
self.calculate,
|
79
|
+
self.file_operations,
|
80
|
+
]
|
universal_mcp/cli.py
CHANGED
@@ -1,113 +1,22 @@
|
|
1
|
-
import re
|
2
1
|
from pathlib import Path
|
3
2
|
|
4
3
|
import typer
|
5
4
|
from rich.console import Console
|
6
5
|
from rich.panel import Panel
|
7
6
|
|
7
|
+
from universal_mcp.client.cli import app as client_app
|
8
8
|
from universal_mcp.utils.installation import (
|
9
9
|
get_supported_apps,
|
10
10
|
install_app,
|
11
11
|
)
|
12
|
+
from universal_mcp.utils.openapi.cli import app as codegen_app
|
12
13
|
|
13
14
|
# Setup rich console and logging
|
14
15
|
console = Console()
|
15
16
|
|
16
|
-
app = typer.Typer()
|
17
|
-
|
18
|
-
|
19
|
-
@app.command()
|
20
|
-
def generate(
|
21
|
-
schema_path: Path = typer.Option(..., "--schema", "-s"),
|
22
|
-
output_path: Path = typer.Option(
|
23
|
-
None,
|
24
|
-
"--output",
|
25
|
-
"-o",
|
26
|
-
help="Output file path - should match the API name (e.g., 'twitter.py' for Twitter API)",
|
27
|
-
),
|
28
|
-
class_name: str = typer.Option(
|
29
|
-
None,
|
30
|
-
"--class-name",
|
31
|
-
"-c",
|
32
|
-
help="Class name to use for the API client",
|
33
|
-
),
|
34
|
-
):
|
35
|
-
"""Generate API client from OpenAPI schema with optional docstring generation.
|
36
|
-
|
37
|
-
The output filename should match the name of the API in the schema (e.g., 'twitter.py' for Twitter API).
|
38
|
-
This name will be used for the folder in applications/.
|
39
|
-
"""
|
40
|
-
# Import here to avoid circular imports
|
41
|
-
from universal_mcp.utils.openapi.api_generator import generate_api_from_schema
|
42
|
-
|
43
|
-
if not schema_path.exists():
|
44
|
-
console.print(f"[red]Error: Schema file {schema_path} does not exist[/red]")
|
45
|
-
raise typer.Exit(1)
|
46
|
-
|
47
|
-
try:
|
48
|
-
app_file_data = generate_api_from_schema(
|
49
|
-
schema_path=schema_path,
|
50
|
-
output_path=output_path,
|
51
|
-
class_name=class_name,
|
52
|
-
)
|
53
|
-
if isinstance(app_file_data, dict) and "code" in app_file_data:
|
54
|
-
console.print("[yellow]No output path specified, printing generated code to console:[/yellow]")
|
55
|
-
console.print(app_file_data["code"])
|
56
|
-
elif isinstance(app_file_data, Path):
|
57
|
-
console.print("[green]API client successfully generated and installed.[/green]")
|
58
|
-
console.print(f"[blue]Application file: {app_file_data}[/blue]")
|
59
|
-
else:
|
60
|
-
# Handle the error case from api_generator if validation fails
|
61
|
-
if isinstance(app_file_data, dict) and "error" in app_file_data:
|
62
|
-
console.print(f"[red]{app_file_data['error']}[/red]")
|
63
|
-
raise typer.Exit(1)
|
64
|
-
else:
|
65
|
-
console.print("[red]Unexpected return value from API generator.[/red]")
|
66
|
-
raise typer.Exit(1)
|
67
|
-
|
68
|
-
except Exception as e:
|
69
|
-
console.print(f"[red]Error generating API client: {e}[/red]")
|
70
|
-
raise typer.Exit(1) from e
|
71
|
-
|
72
|
-
|
73
|
-
@app.command()
|
74
|
-
def readme(
|
75
|
-
file_path: Path = typer.Argument(..., help="Path to the Python file to process"),
|
76
|
-
):
|
77
|
-
"""Generate a README.md file for the API client."""
|
78
|
-
from universal_mcp.utils.openapi.readme import generate_readme
|
79
|
-
|
80
|
-
readme_file = generate_readme(file_path)
|
81
|
-
console.print(f"[green]README.md file generated at: {readme_file}[/green]")
|
82
|
-
|
83
|
-
|
84
|
-
@app.command()
|
85
|
-
def docgen(
|
86
|
-
file_path: Path = typer.Argument(..., help="Path to the Python file to process"),
|
87
|
-
model: str = typer.Option(
|
88
|
-
"perplexity/sonar",
|
89
|
-
"--model",
|
90
|
-
"-m",
|
91
|
-
help="Model to use for generating docstrings",
|
92
|
-
),
|
93
|
-
):
|
94
|
-
"""Generate docstrings for Python files using LLMs.
|
95
|
-
|
96
|
-
This command uses litellm with structured output to generate high-quality
|
97
|
-
Google-style docstrings for all functions in the specified Python file.
|
98
|
-
"""
|
99
|
-
from universal_mcp.utils.openapi.docgen import process_file
|
100
|
-
|
101
|
-
if not file_path.exists():
|
102
|
-
console.print(f"[red]Error: File not found: {file_path}[/red]")
|
103
|
-
raise typer.Exit(1)
|
104
|
-
|
105
|
-
try:
|
106
|
-
processed = process_file(str(file_path), model)
|
107
|
-
console.print(f"[green]Successfully processed {processed} functions[/green]")
|
108
|
-
except Exception as e:
|
109
|
-
console.print(f"[red]Error: {e}[/red]")
|
110
|
-
raise typer.Exit(1) from e
|
17
|
+
app = typer.Typer(name="mcp")
|
18
|
+
app.add_typer(codegen_app, name="codegen", help="Code generation and manipulation commands")
|
19
|
+
app.add_typer(client_app, name="client", help="Client commands")
|
111
20
|
|
112
21
|
|
113
22
|
@app.command()
|
@@ -162,137 +71,5 @@ def install(app_name: str = typer.Argument(..., help="Name of app to install")):
|
|
162
71
|
raise typer.Exit(1) from e
|
163
72
|
|
164
73
|
|
165
|
-
@app.command()
|
166
|
-
def init(
|
167
|
-
output_dir: Path | None = typer.Option(
|
168
|
-
None,
|
169
|
-
"--output-dir",
|
170
|
-
"-o",
|
171
|
-
help="Output directory for the project (must exist)",
|
172
|
-
),
|
173
|
-
app_name: str | None = typer.Option(
|
174
|
-
None,
|
175
|
-
"--app-name",
|
176
|
-
"-a",
|
177
|
-
help="App name (letters, numbers, hyphens, underscores only)",
|
178
|
-
),
|
179
|
-
integration_type: str | None = typer.Option(
|
180
|
-
None,
|
181
|
-
"--integration-type",
|
182
|
-
"-i",
|
183
|
-
help="Integration type (api_key, oauth, agentr, none)",
|
184
|
-
case_sensitive=False,
|
185
|
-
show_choices=True,
|
186
|
-
),
|
187
|
-
):
|
188
|
-
"""Initialize a new MCP project using the cookiecutter template."""
|
189
|
-
from cookiecutter.main import cookiecutter
|
190
|
-
|
191
|
-
NAME_PATTERN = r"^[a-zA-Z0-9_-]+$"
|
192
|
-
|
193
|
-
def validate_pattern(value: str, field_name: str) -> None:
|
194
|
-
if not re.match(NAME_PATTERN, value):
|
195
|
-
console.print(
|
196
|
-
f"[red]❌ Invalid {field_name}; only letters, numbers, hyphens, and underscores allowed.[/red]"
|
197
|
-
)
|
198
|
-
raise typer.Exit(code=1)
|
199
|
-
|
200
|
-
# App name
|
201
|
-
if not app_name:
|
202
|
-
app_name = typer.prompt(
|
203
|
-
"Enter the app name",
|
204
|
-
default="app_name",
|
205
|
-
prompt_suffix=" (e.g., reddit, youtube): ",
|
206
|
-
).strip()
|
207
|
-
validate_pattern(app_name, "app name")
|
208
|
-
app_name = app_name.lower()
|
209
|
-
if not output_dir:
|
210
|
-
path_str = typer.prompt(
|
211
|
-
"Enter the output directory for the project",
|
212
|
-
default=str(Path.cwd()),
|
213
|
-
prompt_suffix=": ",
|
214
|
-
).strip()
|
215
|
-
output_dir = Path(path_str)
|
216
|
-
|
217
|
-
if not output_dir.exists():
|
218
|
-
try:
|
219
|
-
output_dir.mkdir(parents=True, exist_ok=True)
|
220
|
-
console.print(f"[green]✅ Created output directory at '{output_dir}'[/green]")
|
221
|
-
except Exception as e:
|
222
|
-
console.print(f"[red]❌ Failed to create output directory '{output_dir}': {e}[/red]")
|
223
|
-
raise typer.Exit(code=1) from e
|
224
|
-
elif not output_dir.is_dir():
|
225
|
-
console.print(f"[red]❌ Output path '{output_dir}' exists but is not a directory.[/red]")
|
226
|
-
raise typer.Exit(code=1)
|
227
|
-
|
228
|
-
# Integration type
|
229
|
-
if not integration_type:
|
230
|
-
integration_type = typer.prompt(
|
231
|
-
"Choose the integration type",
|
232
|
-
default="agentr",
|
233
|
-
prompt_suffix=" (api_key, oauth, agentr, none): ",
|
234
|
-
).lower()
|
235
|
-
if integration_type not in ("api_key", "oauth", "agentr", "none"):
|
236
|
-
console.print("[red]❌ Integration type must be one of: api_key, oauth, agentr, none[/red]")
|
237
|
-
raise typer.Exit(code=1)
|
238
|
-
|
239
|
-
console.print("[blue]🚀 Generating project using cookiecutter...[/blue]")
|
240
|
-
try:
|
241
|
-
cookiecutter(
|
242
|
-
"https://github.com/AgentrDev/universal-mcp-app-template.git",
|
243
|
-
output_dir=str(output_dir),
|
244
|
-
no_input=True,
|
245
|
-
extra_context={
|
246
|
-
"app_name": app_name,
|
247
|
-
"integration_type": integration_type,
|
248
|
-
},
|
249
|
-
)
|
250
|
-
except Exception as exc:
|
251
|
-
console.print(f"❌ Project generation failed: {exc}")
|
252
|
-
raise typer.Exit(code=1) from exc
|
253
|
-
|
254
|
-
project_dir = output_dir / f"{app_name}"
|
255
|
-
console.print(f"✅ Project created at {project_dir}")
|
256
|
-
|
257
|
-
|
258
|
-
@app.command()
|
259
|
-
def preprocess(
|
260
|
-
schema_path: Path = typer.Option(None, "--schema", "-s", help="Path to the OpenAPI schema file."),
|
261
|
-
output_path: Path = typer.Option(None, "--output", "-o", help="Path to save the processed schema."),
|
262
|
-
):
|
263
|
-
from universal_mcp.utils.openapi.preprocessor import run_preprocessing
|
264
|
-
|
265
|
-
"""Preprocess an OpenAPI schema using LLM to fill or enhance descriptions."""
|
266
|
-
run_preprocessing(schema_path, output_path)
|
267
|
-
|
268
|
-
|
269
|
-
@app.command()
|
270
|
-
def split_api(
|
271
|
-
input_app_file: Path = typer.Argument(..., help="Path to the generated app.py file to split"),
|
272
|
-
output_dir: Path = typer.Option(..., "--output-dir", "-o", help="Directory to save the split files"),
|
273
|
-
):
|
274
|
-
"""Splits a single generated API client file into multiple files based on path groups."""
|
275
|
-
from universal_mcp.utils.openapi.api_splitter import split_generated_app_file
|
276
|
-
|
277
|
-
if not input_app_file.exists() or not input_app_file.is_file():
|
278
|
-
console.print(f"[red]Error: Input file {input_app_file} does not exist or is not a file.[/red]")
|
279
|
-
raise typer.Exit(1)
|
280
|
-
|
281
|
-
if not output_dir.exists():
|
282
|
-
output_dir.mkdir(parents=True, exist_ok=True)
|
283
|
-
console.print(f"[green]Created output directory: {output_dir}[/green]")
|
284
|
-
elif not output_dir.is_dir():
|
285
|
-
console.print(f"[red]Error: Output path {output_dir} is not a directory.[/red]")
|
286
|
-
raise typer.Exit(1)
|
287
|
-
|
288
|
-
try:
|
289
|
-
split_generated_app_file(input_app_file, output_dir)
|
290
|
-
console.print(f"[green]Successfully split {input_app_file} into {output_dir}[/green]")
|
291
|
-
except Exception as e:
|
292
|
-
console.print(f"[red]Error splitting API client: {e}[/red]")
|
293
|
-
|
294
|
-
raise typer.Exit(1) from e
|
295
|
-
|
296
|
-
|
297
74
|
if __name__ == "__main__":
|
298
75
|
app()
|
@@ -0,0 +1,38 @@
|
|
1
|
+
# agents/base.py
|
2
|
+
from abc import ABC, abstractmethod
|
3
|
+
from enum import Enum
|
4
|
+
from typing import Any
|
5
|
+
|
6
|
+
from pydantic import BaseModel
|
7
|
+
|
8
|
+
from universal_mcp.tools.manager import ToolManager
|
9
|
+
|
10
|
+
|
11
|
+
class AgentType(Enum):
|
12
|
+
REACT = "react"
|
13
|
+
CODEACT = "codeact"
|
14
|
+
SIMPLE = "simple"
|
15
|
+
|
16
|
+
|
17
|
+
class AgentResponse(BaseModel):
|
18
|
+
thought: str | None = None
|
19
|
+
action: str | None = None
|
20
|
+
action_input: dict[str, Any] | None = None
|
21
|
+
observation: str | None = None
|
22
|
+
answer: str | None = None
|
23
|
+
finished: bool = False
|
24
|
+
|
25
|
+
|
26
|
+
class BaseAgent(ABC):
|
27
|
+
def __init__(self, name: str, instructions: str, model: str, debug: bool = False):
|
28
|
+
self.name = name
|
29
|
+
self.instructions = instructions
|
30
|
+
self.model = model
|
31
|
+
self.conversation_history: list[dict[str, Any]] = []
|
32
|
+
|
33
|
+
@abstractmethod
|
34
|
+
def process_step(self, user_input: str, tool_manager: ToolManager):
|
35
|
+
pass
|
36
|
+
|
37
|
+
def reset_conversation(self):
|
38
|
+
self.conversation_history = []
|
@@ -0,0 +1,115 @@
|
|
1
|
+
import json
|
2
|
+
from typing import Any
|
3
|
+
|
4
|
+
from loguru import logger
|
5
|
+
from openai import OpenAI
|
6
|
+
from openai.types.chat import (
|
7
|
+
ChatCompletion,
|
8
|
+
ChatCompletionMessage,
|
9
|
+
ChatCompletionToolParam,
|
10
|
+
)
|
11
|
+
|
12
|
+
from universal_mcp.tools.adapters import ToolFormat
|
13
|
+
from universal_mcp.tools.manager import ToolManager
|
14
|
+
|
15
|
+
|
16
|
+
class LLMClient:
|
17
|
+
def __init__(self, model: str):
|
18
|
+
self.model = model
|
19
|
+
self.client = OpenAI()
|
20
|
+
logger.info(f"LLMClient initialized with model: {self.model}")
|
21
|
+
|
22
|
+
async def generate_response(
|
23
|
+
self,
|
24
|
+
messages: list[dict[str, str]],
|
25
|
+
tools: list[ChatCompletionToolParam] | None = None,
|
26
|
+
temperature: float = 0.7,
|
27
|
+
max_tokens: int = 1000,
|
28
|
+
) -> ChatCompletionMessage:
|
29
|
+
"""Generate response using OpenAI with native tool calling"""
|
30
|
+
try:
|
31
|
+
logger.debug(f"Generating response with messages: {messages}")
|
32
|
+
kwargs = {"model": self.model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
33
|
+
if tools:
|
34
|
+
kwargs["tools"] = tools
|
35
|
+
kwargs["tool_choice"] = "auto"
|
36
|
+
logger.debug(f"Using tools: {tools}")
|
37
|
+
response: ChatCompletion = self.client.chat.completions.create(**kwargs)
|
38
|
+
logger.debug(f"OpenAI response: {response}")
|
39
|
+
choice = response.choices[0]
|
40
|
+
message: ChatCompletionMessage = choice.message
|
41
|
+
logger.info(f"Generated message: {message}")
|
42
|
+
return message
|
43
|
+
except Exception as e:
|
44
|
+
logger.error(f"Error in generate_response: {e}")
|
45
|
+
raise e
|
46
|
+
|
47
|
+
async def handle_tool_calls(
|
48
|
+
self, tool_calls: list[ChatCompletionToolParam], tool_manager: ToolManager
|
49
|
+
) -> list[dict[str, Any]]:
|
50
|
+
"""Handle tool calls"""
|
51
|
+
messages: list[dict[str, Any]] = []
|
52
|
+
for tool_call in tool_calls:
|
53
|
+
tool_name = tool_call.function.name
|
54
|
+
tool_args = tool_call.function.arguments
|
55
|
+
logger.info(f"Handling tool call: {tool_name} with args: {tool_args}")
|
56
|
+
tool_result = await tool_manager.call_tool(tool_name, tool_args)
|
57
|
+
logger.debug(f"Tool result for {tool_name}: {tool_result}")
|
58
|
+
messages.append({"role": "tool", "tool_call_id": tool_call.id, "content": str(tool_result)})
|
59
|
+
return messages
|
60
|
+
|
61
|
+
async def generate_response_with_tool_results(
|
62
|
+
self, messages: list[dict[str, str]], tool_manager: ToolManager, max_iterations: int = 5
|
63
|
+
) -> ChatCompletionMessage:
|
64
|
+
"""Handle complete tool calling conversation loop using OpenAI"""
|
65
|
+
conversation = messages.copy()
|
66
|
+
iteration = 0
|
67
|
+
tools = tool_manager.list_tools(format=ToolFormat.OPENAI)
|
68
|
+
logger.info(f"Starting tool calling loop with max_iterations={max_iterations}")
|
69
|
+
|
70
|
+
while iteration < max_iterations:
|
71
|
+
iteration += 1
|
72
|
+
logger.info(f"Iteration {iteration}: Generating response with conversation: {conversation}")
|
73
|
+
|
74
|
+
# Generate response with tools
|
75
|
+
response = await self.generate_response(conversation, tools)
|
76
|
+
|
77
|
+
# If no tool calls, return the response
|
78
|
+
tool_calls = response.tool_calls
|
79
|
+
if not tool_calls:
|
80
|
+
logger.info("No tool calls detected, returning response.")
|
81
|
+
return response
|
82
|
+
|
83
|
+
logger.info(f"Tool calls detected: {tool_calls}")
|
84
|
+
|
85
|
+
# Add assistant message with tool calls
|
86
|
+
assistant_msg = {"role": "assistant", "content": response.content}
|
87
|
+
if tool_calls:
|
88
|
+
assistant_msg["tool_calls"] = tool_calls
|
89
|
+
conversation.append(assistant_msg)
|
90
|
+
|
91
|
+
# Execute tool calls and add results
|
92
|
+
for tool_call in tool_calls:
|
93
|
+
function = tool_call.function
|
94
|
+
tool_name = function.name
|
95
|
+
arguments = function.arguments
|
96
|
+
logger.info(f"Executing tool: {tool_name} with arguments: {arguments}")
|
97
|
+
try:
|
98
|
+
tool_args = json.loads(arguments)
|
99
|
+
except Exception as e:
|
100
|
+
logger.warning(f"Failed to parse tool arguments as JSON: {arguments}. Error: {e}")
|
101
|
+
tool_args = {"query": arguments}
|
102
|
+
# Execute the tool
|
103
|
+
try:
|
104
|
+
tool_result = await tool_manager.call_tool(tool_name, tool_args)
|
105
|
+
logger.debug(f"Tool result for {tool_name}: {tool_result}")
|
106
|
+
except Exception as e:
|
107
|
+
logger.error(f"Error executing tool {tool_name}: {e}")
|
108
|
+
tool_result = f"Error executing tool {tool_name}: {e}"
|
109
|
+
# Add tool result to conversation
|
110
|
+
conversation.append({"role": "tool", "tool_call_id": tool_call.id, "content": str(tool_result)})
|
111
|
+
# Continue the conversation loop
|
112
|
+
|
113
|
+
logger.info("Max iterations reached or tool loop complete. Generating final response.")
|
114
|
+
# Final response after tool execution
|
115
|
+
return await self.generate_response(conversation)
|
@@ -0,0 +1,67 @@
|
|
1
|
+
from loguru import logger
|
2
|
+
|
3
|
+
from universal_mcp.tools.manager import ToolManager
|
4
|
+
|
5
|
+
from .base import BaseAgent
|
6
|
+
from .llm import LLMClient
|
7
|
+
|
8
|
+
|
9
|
+
class ReActAgent(BaseAgent):
|
10
|
+
def __init__(self, name: str, instructions: str, model: str):
|
11
|
+
super().__init__(name, instructions, model)
|
12
|
+
self.llm_client = LLMClient(model)
|
13
|
+
self.max_iterations = 10
|
14
|
+
logger.debug(f"Initialized ReActAgent: name={name}, model={model}")
|
15
|
+
|
16
|
+
def _build_system_message(self) -> str:
|
17
|
+
system_message = f"""You are {self.name}. {self.instructions}
|
18
|
+
|
19
|
+
You have access to various tools that can help you answer questions and complete tasks. When you need to use a tool:
|
20
|
+
|
21
|
+
1. Think about what information you need
|
22
|
+
2. Call the appropriate tool with the right parameters
|
23
|
+
3. Use the tool results to provide a comprehensive answer
|
24
|
+
|
25
|
+
Always explain your reasoning and be thorough in your responses. If you need to use multiple tools to answer a question completely, do so."""
|
26
|
+
logger.debug(f"System message built: {system_message}")
|
27
|
+
return system_message
|
28
|
+
|
29
|
+
def _build_messages(self, user_input: str) -> list[dict[str, str]]:
|
30
|
+
"""Build message history for the conversation"""
|
31
|
+
messages = [{"role": "system", "content": self._build_system_message()}]
|
32
|
+
|
33
|
+
# Add conversation history
|
34
|
+
for entry in self.conversation_history:
|
35
|
+
messages.append({"role": "user", "content": entry["human"]})
|
36
|
+
messages.append({"role": "assistant", "content": entry["assistant"]})
|
37
|
+
|
38
|
+
# Add current user input
|
39
|
+
messages.append({"role": "user", "content": user_input})
|
40
|
+
|
41
|
+
logger.debug(f"Built messages for user_input='{user_input}': {messages}")
|
42
|
+
return messages
|
43
|
+
|
44
|
+
async def process_step(self, user_input: str, tool_manager: ToolManager) -> str:
|
45
|
+
"""Process user input using native tool calling"""
|
46
|
+
|
47
|
+
logger.info(f"Processing user input: {user_input}")
|
48
|
+
|
49
|
+
# Build conversation messages
|
50
|
+
messages = self._build_messages(user_input)
|
51
|
+
|
52
|
+
# Use native tool calling with conversation loop
|
53
|
+
try:
|
54
|
+
response = await self.llm_client.generate_response_with_tool_results(
|
55
|
+
messages, tool_manager, self.max_iterations
|
56
|
+
)
|
57
|
+
final_answer = response.content
|
58
|
+
logger.info(f"LLM response received: {final_answer}")
|
59
|
+
except Exception as e:
|
60
|
+
logger.error(f"Error during LLM response generation: {e}")
|
61
|
+
raise
|
62
|
+
|
63
|
+
# Store in conversation history
|
64
|
+
self.conversation_history.append({"human": user_input, "assistant": final_answer})
|
65
|
+
logger.debug(f"Updated conversation history: {self.conversation_history[-1]}")
|
66
|
+
|
67
|
+
return final_answer
|