mcp-use 1.3.8__py3-none-any.whl → 1.3.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-use might be problematic. Click here for more details.
- mcp_use/__init__.py +6 -3
- mcp_use/adapters/base.py +3 -3
- mcp_use/adapters/langchain_adapter.py +5 -4
- mcp_use/agents/mcpagent.py +101 -17
- mcp_use/agents/remote.py +102 -14
- mcp_use/cli.py +581 -0
- mcp_use/client.py +15 -1
- mcp_use/config.py +10 -8
- mcp_use/connectors/base.py +100 -15
- mcp_use/connectors/http.py +13 -2
- mcp_use/connectors/sandbox.py +12 -6
- mcp_use/connectors/stdio.py +11 -2
- mcp_use/errors/__init__.py +1 -0
- mcp_use/errors/error_formatting.py +29 -0
- mcp_use/logging.py +27 -12
- mcp_use/managers/base.py +36 -0
- mcp_use/managers/server_manager.py +3 -8
- mcp_use/managers/tools/connect_server.py +2 -1
- mcp_use/managers/tools/disconnect_server.py +2 -1
- mcp_use/managers/tools/list_servers_tool.py +2 -0
- mcp_use/observability/__init__.py +2 -1
- mcp_use/observability/callbacks_manager.py +162 -0
- mcp_use/observability/laminar.py +24 -3
- mcp_use/observability/langfuse.py +27 -3
- mcp_use/session.py +70 -0
- mcp_use/telemetry/telemetry.py +1 -4
- {mcp_use-1.3.8.dist-info → mcp_use-1.3.10.dist-info}/METADATA +73 -48
- mcp_use-1.3.10.dist-info/RECORD +55 -0
- mcp_use-1.3.10.dist-info/entry_points.txt +2 -0
- mcp_use-1.3.8.dist-info/RECORD +0 -49
- {mcp_use-1.3.8.dist-info → mcp_use-1.3.10.dist-info}/WHEEL +0 -0
- {mcp_use-1.3.8.dist-info → mcp_use-1.3.10.dist-info}/licenses/LICENSE +0 -0
mcp_use/__init__.py
CHANGED
|
@@ -7,12 +7,16 @@ to MCP tools through existing LangChain adapters.
|
|
|
7
7
|
|
|
8
8
|
from importlib.metadata import version
|
|
9
9
|
|
|
10
|
-
|
|
10
|
+
# Import logging FIRST to ensure it's configured before other modules
|
|
11
|
+
# This MUST happen before importing observability to ensure loggers are configured
|
|
12
|
+
from .logging import MCP_USE_DEBUG, Logger, logger # isort: skip
|
|
13
|
+
|
|
14
|
+
# Now import other modules - observability must come after logging
|
|
15
|
+
from . import observability # noqa: E402
|
|
11
16
|
from .agents.mcpagent import MCPAgent
|
|
12
17
|
from .client import MCPClient
|
|
13
18
|
from .config import load_config_file
|
|
14
19
|
from .connectors import BaseConnector, HttpConnector, StdioConnector, WebSocketConnector
|
|
15
|
-
from .logging import MCP_USE_DEBUG, Logger, logger
|
|
16
20
|
from .session import MCPSession
|
|
17
21
|
|
|
18
22
|
__version__ = version("mcp-use")
|
|
@@ -25,7 +29,6 @@ __all__ = [
|
|
|
25
29
|
"StdioConnector",
|
|
26
30
|
"WebSocketConnector",
|
|
27
31
|
"HttpConnector",
|
|
28
|
-
"create_session_from_config",
|
|
29
32
|
"load_config_file",
|
|
30
33
|
"logger",
|
|
31
34
|
"MCP_USE_DEBUG",
|
mcp_use/adapters/base.py
CHANGED
|
@@ -91,14 +91,14 @@ class BaseAdapter(ABC):
|
|
|
91
91
|
|
|
92
92
|
connector_tools = []
|
|
93
93
|
# Now create tools for each MCP tool
|
|
94
|
-
for tool in connector.
|
|
94
|
+
for tool in await connector.list_tools():
|
|
95
95
|
# Convert the tool and add it to the list if conversion was successful
|
|
96
96
|
converted_tool = self._convert_tool(tool, connector)
|
|
97
97
|
if converted_tool:
|
|
98
98
|
connector_tools.append(converted_tool)
|
|
99
99
|
|
|
100
100
|
# Convert resources to tools so that agents can access resource content directly
|
|
101
|
-
resources_list = connector.
|
|
101
|
+
resources_list = await connector.list_resources() or []
|
|
102
102
|
if resources_list:
|
|
103
103
|
for resource in resources_list:
|
|
104
104
|
converted_resource = self._convert_resource(resource, connector)
|
|
@@ -106,7 +106,7 @@ class BaseAdapter(ABC):
|
|
|
106
106
|
connector_tools.append(converted_resource)
|
|
107
107
|
|
|
108
108
|
# Convert prompts to tools so that agents can retrieve prompt content
|
|
109
|
-
prompts_list = connector.
|
|
109
|
+
prompts_list = await connector.list_prompts() or []
|
|
110
110
|
if prompts_list:
|
|
111
111
|
for prompt in prompts_list:
|
|
112
112
|
converted_prompt = self._convert_prompt(prompt, connector)
|
|
@@ -21,6 +21,7 @@ from mcp.types import (
|
|
|
21
21
|
from pydantic import BaseModel, Field, create_model
|
|
22
22
|
|
|
23
23
|
from ..connectors.base import BaseConnector
|
|
24
|
+
from ..errors.error_formatting import format_error
|
|
24
25
|
from ..logging import logger
|
|
25
26
|
from .base import BaseAdapter
|
|
26
27
|
|
|
@@ -159,11 +160,11 @@ class LangChainAdapter(BaseAdapter):
|
|
|
159
160
|
except Exception as e:
|
|
160
161
|
# Log the exception for debugging
|
|
161
162
|
logger.error(f"Error parsing tool result: {e}")
|
|
162
|
-
return
|
|
163
|
+
return format_error(e, tool=self.name, tool_content=tool_result.content)
|
|
163
164
|
|
|
164
165
|
except Exception as e:
|
|
165
166
|
if self.handle_tool_error:
|
|
166
|
-
return
|
|
167
|
+
return format_error(e, tool=self.name) # Format the error to make LLM understand it
|
|
167
168
|
raise
|
|
168
169
|
|
|
169
170
|
return McpToLangChainAdapter()
|
|
@@ -204,7 +205,7 @@ class LangChainAdapter(BaseAdapter):
|
|
|
204
205
|
return content_decoded
|
|
205
206
|
except Exception as e:
|
|
206
207
|
if self.handle_tool_error:
|
|
207
|
-
return
|
|
208
|
+
return format_error(e, tool=self.name) # Format the error to make LLM understand it
|
|
208
209
|
raise
|
|
209
210
|
|
|
210
211
|
return ResourceTool()
|
|
@@ -261,7 +262,7 @@ class LangChainAdapter(BaseAdapter):
|
|
|
261
262
|
return result.messages
|
|
262
263
|
except Exception as e:
|
|
263
264
|
if self.handle_tool_error:
|
|
264
|
-
return
|
|
265
|
+
return format_error(e, tool=self.name) # Format the error to make LLM understand it
|
|
265
266
|
raise
|
|
266
267
|
|
|
267
268
|
return PromptTool()
|
mcp_use/agents/mcpagent.py
CHANGED
|
@@ -30,7 +30,11 @@ from mcp_use.telemetry.utils import extract_model_info
|
|
|
30
30
|
|
|
31
31
|
from ..adapters.langchain_adapter import LangChainAdapter
|
|
32
32
|
from ..logging import logger
|
|
33
|
+
from ..managers.base import BaseServerManager
|
|
33
34
|
from ..managers.server_manager import ServerManager
|
|
35
|
+
|
|
36
|
+
# Import observability manager
|
|
37
|
+
from ..observability import ObservabilityManager
|
|
34
38
|
from .prompts.system_prompt_builder import create_system_message
|
|
35
39
|
from .prompts.templates import DEFAULT_SYSTEM_PROMPT_TEMPLATE, SERVER_MANAGER_SYSTEM_PROMPT_TEMPLATE
|
|
36
40
|
from .remote import RemoteAgent
|
|
@@ -62,10 +66,15 @@ class MCPAgent:
|
|
|
62
66
|
disallowed_tools: list[str] | None = None,
|
|
63
67
|
tools_used_names: list[str] | None = None,
|
|
64
68
|
use_server_manager: bool = False,
|
|
69
|
+
server_manager: BaseServerManager | None = None,
|
|
65
70
|
verbose: bool = False,
|
|
66
71
|
agent_id: str | None = None,
|
|
67
72
|
api_key: str | None = None,
|
|
68
73
|
base_url: str = "https://cloud.mcp-use.com",
|
|
74
|
+
callbacks: list | None = None,
|
|
75
|
+
chat_id: str | None = None,
|
|
76
|
+
retry_on_error: bool = True,
|
|
77
|
+
max_retries_per_step: int = 2,
|
|
69
78
|
):
|
|
70
79
|
"""Initialize a new MCPAgent instance.
|
|
71
80
|
|
|
@@ -84,10 +93,13 @@ class MCPAgent:
|
|
|
84
93
|
agent_id: Remote agent ID for remote execution. If provided, creates a remote agent.
|
|
85
94
|
api_key: API key for remote execution. If None, checks MCP_USE_API_KEY env var.
|
|
86
95
|
base_url: Base URL for remote API calls.
|
|
96
|
+
callbacks: List of LangChain callbacks to use. If None and Langfuse is configured, uses langfuse_handler.
|
|
97
|
+
retry_on_error: Whether to retry tool calls that fail due to validation errors.
|
|
98
|
+
max_retries_per_step: Maximum number of retries for validation errors per step.
|
|
87
99
|
"""
|
|
88
100
|
# Handle remote execution
|
|
89
101
|
if agent_id is not None:
|
|
90
|
-
self._remote_agent = RemoteAgent(agent_id=agent_id, api_key=api_key, base_url=base_url)
|
|
102
|
+
self._remote_agent = RemoteAgent(agent_id=agent_id, api_key=api_key, base_url=base_url, chat_id=chat_id)
|
|
91
103
|
self._is_remote = True
|
|
92
104
|
return
|
|
93
105
|
|
|
@@ -109,13 +121,20 @@ class MCPAgent:
|
|
|
109
121
|
self.disallowed_tools = disallowed_tools or []
|
|
110
122
|
self.tools_used_names = tools_used_names or []
|
|
111
123
|
self.use_server_manager = use_server_manager
|
|
124
|
+
self.server_manager = server_manager
|
|
112
125
|
self.verbose = verbose
|
|
126
|
+
self.retry_on_error = retry_on_error
|
|
127
|
+
self.max_retries_per_step = max_retries_per_step
|
|
113
128
|
# System prompt configuration
|
|
114
129
|
self.system_prompt = system_prompt # User-provided full prompt override
|
|
115
130
|
# User can provide a template override, otherwise use the imported default
|
|
116
131
|
self.system_prompt_template_override = system_prompt_template
|
|
117
132
|
self.additional_instructions = additional_instructions
|
|
118
133
|
|
|
134
|
+
# Set up observability callbacks using the ObservabilityManager
|
|
135
|
+
self.observability_manager = ObservabilityManager(custom_callbacks=callbacks)
|
|
136
|
+
self.callbacks = self.observability_manager.get_callbacks()
|
|
137
|
+
|
|
119
138
|
# Either client or connector must be provided
|
|
120
139
|
if not client and len(self.connectors) == 0:
|
|
121
140
|
raise ValueError("Either client or connector must be provided")
|
|
@@ -126,9 +145,7 @@ class MCPAgent:
|
|
|
126
145
|
# Initialize telemetry
|
|
127
146
|
self.telemetry = Telemetry()
|
|
128
147
|
|
|
129
|
-
|
|
130
|
-
self.server_manager = None
|
|
131
|
-
if self.use_server_manager:
|
|
148
|
+
if self.use_server_manager and self.server_manager is None:
|
|
132
149
|
if not self.client:
|
|
133
150
|
raise ValueError("Client must be provided when using server manager")
|
|
134
151
|
self.server_manager = ServerManager(self.client, self.adapter)
|
|
@@ -246,9 +263,15 @@ class MCPAgent:
|
|
|
246
263
|
# Use the standard create_tool_calling_agent
|
|
247
264
|
agent = create_tool_calling_agent(llm=self.llm, tools=self._tools, prompt=prompt)
|
|
248
265
|
|
|
249
|
-
# Use the standard AgentExecutor
|
|
250
|
-
executor = AgentExecutor(
|
|
251
|
-
|
|
266
|
+
# Use the standard AgentExecutor with callbacks
|
|
267
|
+
executor = AgentExecutor(
|
|
268
|
+
agent=agent,
|
|
269
|
+
tools=self._tools,
|
|
270
|
+
max_iterations=self.max_steps,
|
|
271
|
+
verbose=self.verbose,
|
|
272
|
+
callbacks=self.callbacks,
|
|
273
|
+
)
|
|
274
|
+
logger.debug(f"Created agent executor with max_iterations={self.max_steps} and {len(self.callbacks)} callbacks")
|
|
252
275
|
return executor
|
|
253
276
|
|
|
254
277
|
def get_conversation_history(self) -> list[BaseMessage]:
|
|
@@ -469,6 +492,22 @@ class MCPAgent:
|
|
|
469
492
|
|
|
470
493
|
logger.info(f"🏁 Starting agent execution with max_steps={steps}")
|
|
471
494
|
|
|
495
|
+
# Create a run manager with our callbacks if we have any - ONCE for the entire execution
|
|
496
|
+
run_manager = None
|
|
497
|
+
if self.callbacks:
|
|
498
|
+
# Create an async callback manager with our callbacks
|
|
499
|
+
from langchain_core.callbacks.manager import AsyncCallbackManager
|
|
500
|
+
|
|
501
|
+
callback_manager = AsyncCallbackManager.configure(
|
|
502
|
+
inheritable_callbacks=self.callbacks,
|
|
503
|
+
local_callbacks=self.callbacks,
|
|
504
|
+
)
|
|
505
|
+
# Create a run manager for this chain execution
|
|
506
|
+
run_manager = await callback_manager.on_chain_start(
|
|
507
|
+
{"name": "MCPAgent (mcp-use)"},
|
|
508
|
+
inputs,
|
|
509
|
+
)
|
|
510
|
+
|
|
472
511
|
for step_num in range(steps):
|
|
473
512
|
steps_taken = step_num + 1
|
|
474
513
|
# --- Check for tool updates if using server manager ---
|
|
@@ -498,20 +537,51 @@ class MCPAgent:
|
|
|
498
537
|
|
|
499
538
|
# --- Plan and execute the next step ---
|
|
500
539
|
try:
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
540
|
+
retry_count = 0
|
|
541
|
+
next_step_output = None
|
|
542
|
+
|
|
543
|
+
while retry_count <= self.max_retries_per_step:
|
|
544
|
+
try:
|
|
545
|
+
# Use the internal _atake_next_step which handles planning and execution
|
|
546
|
+
# This requires providing the necessary context like maps and intermediate steps
|
|
547
|
+
next_step_output = await self._agent_executor._atake_next_step(
|
|
548
|
+
name_to_tool_map=name_to_tool_map,
|
|
549
|
+
color_mapping=color_mapping,
|
|
550
|
+
inputs=inputs,
|
|
551
|
+
intermediate_steps=intermediate_steps,
|
|
552
|
+
run_manager=run_manager,
|
|
553
|
+
)
|
|
554
|
+
|
|
555
|
+
# If we get here, the step succeeded, break out of retry loop
|
|
556
|
+
break
|
|
557
|
+
|
|
558
|
+
except Exception as e:
|
|
559
|
+
if not self.retry_on_error or retry_count >= self.max_retries_per_step:
|
|
560
|
+
logger.error(f"❌ Validation error during step {step_num + 1}: {e}")
|
|
561
|
+
result = f"Agent stopped due to a validation error: {str(e)}"
|
|
562
|
+
success = False
|
|
563
|
+
yield result
|
|
564
|
+
return
|
|
565
|
+
|
|
566
|
+
retry_count += 1
|
|
567
|
+
logger.warning(
|
|
568
|
+
f"⚠️ Validation error, retrying ({retry_count}/{self.max_retries_per_step}): {e}"
|
|
569
|
+
)
|
|
570
|
+
|
|
571
|
+
# Create concise feedback for the LLM about the validation error
|
|
572
|
+
error_message = f"Error: {str(e)}"
|
|
573
|
+
inputs["input"] = error_message
|
|
574
|
+
|
|
575
|
+
# Continue to next iteration of retry loop
|
|
576
|
+
continue
|
|
510
577
|
|
|
511
578
|
# Process the output
|
|
512
579
|
if isinstance(next_step_output, AgentFinish):
|
|
513
580
|
logger.info(f"✅ Agent finished at step {step_num + 1}")
|
|
514
581
|
result = next_step_output.return_values.get("output", "No output generated")
|
|
582
|
+
# End the chain if we have a run manager
|
|
583
|
+
if run_manager:
|
|
584
|
+
await run_manager.on_chain_end({"output": result})
|
|
515
585
|
|
|
516
586
|
# If structured output is requested, attempt to create it
|
|
517
587
|
if output_schema and structured_llm:
|
|
@@ -563,6 +633,12 @@ class MCPAgent:
|
|
|
563
633
|
for agent_step in next_step_output:
|
|
564
634
|
yield agent_step
|
|
565
635
|
action, observation = agent_step
|
|
636
|
+
reasoning = getattr(action, "log", "")
|
|
637
|
+
if reasoning:
|
|
638
|
+
reasoning_str = reasoning.replace("\n", " ")
|
|
639
|
+
if len(reasoning_str) > 300:
|
|
640
|
+
reasoning_str = reasoning_str[:297] + "..."
|
|
641
|
+
logger.info(f"💭 Reasoning: {reasoning_str}")
|
|
566
642
|
tool_name = action.tool
|
|
567
643
|
self.tools_used_names.append(tool_name)
|
|
568
644
|
tool_input_str = str(action.tool_input)
|
|
@@ -589,12 +665,17 @@ class MCPAgent:
|
|
|
589
665
|
except OutputParserException as e:
|
|
590
666
|
logger.error(f"❌ Output parsing error during step {step_num + 1}: {e}")
|
|
591
667
|
result = f"Agent stopped due to a parsing error: {str(e)}"
|
|
668
|
+
if run_manager:
|
|
669
|
+
await run_manager.on_chain_error(e)
|
|
592
670
|
break
|
|
593
671
|
except Exception as e:
|
|
594
672
|
logger.error(f"❌ Error during agent execution step {step_num + 1}: {e}")
|
|
595
673
|
import traceback
|
|
596
674
|
|
|
597
675
|
traceback.print_exc()
|
|
676
|
+
# End the chain with error if we have a run manager
|
|
677
|
+
if run_manager:
|
|
678
|
+
await run_manager.on_chain_error(e)
|
|
598
679
|
result = f"Agent stopped due to an error: {str(e)}"
|
|
599
680
|
break
|
|
600
681
|
|
|
@@ -602,6 +683,8 @@ class MCPAgent:
|
|
|
602
683
|
if not result:
|
|
603
684
|
logger.warning(f"⚠️ Agent stopped after reaching max iterations ({steps})")
|
|
604
685
|
result = f"Agent stopped after reaching the maximum number of steps ({steps})."
|
|
686
|
+
if run_manager:
|
|
687
|
+
await run_manager.on_chain_end({"output": result})
|
|
605
688
|
|
|
606
689
|
# If structured output was requested but not achieved, attempt one final time
|
|
607
690
|
if output_schema and structured_llm and not success:
|
|
@@ -738,7 +821,8 @@ class MCPAgent:
|
|
|
738
821
|
"""
|
|
739
822
|
# Delegate to remote agent if in remote mode
|
|
740
823
|
if self._is_remote and self._remote_agent:
|
|
741
|
-
|
|
824
|
+
result = await self._remote_agent.run(query, max_steps, external_history, output_schema)
|
|
825
|
+
return result
|
|
742
826
|
|
|
743
827
|
success = True
|
|
744
828
|
start_time = time.time()
|
mcp_use/agents/remote.py
CHANGED
|
@@ -5,6 +5,7 @@ Remote agent implementation for executing agents via API.
|
|
|
5
5
|
import json
|
|
6
6
|
import os
|
|
7
7
|
from typing import Any, TypeVar
|
|
8
|
+
from uuid import UUID
|
|
8
9
|
|
|
9
10
|
import httpx
|
|
10
11
|
from langchain.schema import BaseMessage
|
|
@@ -14,19 +15,52 @@ from ..logging import logger
|
|
|
14
15
|
|
|
15
16
|
T = TypeVar("T", bound=BaseModel)
|
|
16
17
|
|
|
18
|
+
# API endpoint constants
|
|
19
|
+
API_CHATS_ENDPOINT = "/api/v1/chats/get-or-create"
|
|
20
|
+
API_CHAT_EXECUTE_ENDPOINT = "/api/v1/chats/{chat_id}/execute"
|
|
21
|
+
API_CHAT_DELETE_ENDPOINT = "/api/v1/chats/{chat_id}"
|
|
22
|
+
|
|
23
|
+
UUID_ERROR_MESSAGE = """A UUID is a 36 character string of the format xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx \n
|
|
24
|
+
Example: 123e4567-e89b-12d3-a456-426614174000
|
|
25
|
+
To generate a UUID, you can use the following command:
|
|
26
|
+
import uuid
|
|
27
|
+
|
|
28
|
+
# Generate a random UUID
|
|
29
|
+
my_uuid = uuid.uuid4()
|
|
30
|
+
print(my_uuid)
|
|
31
|
+
"""
|
|
32
|
+
|
|
17
33
|
|
|
18
34
|
class RemoteAgent:
|
|
19
35
|
"""Agent that executes remotely via API."""
|
|
20
36
|
|
|
21
|
-
def __init__(
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
agent_id: str,
|
|
40
|
+
chat_id: str | None = None,
|
|
41
|
+
api_key: str | None = None,
|
|
42
|
+
base_url: str = "https://cloud.mcp-use.com",
|
|
43
|
+
):
|
|
22
44
|
"""Initialize remote agent.
|
|
23
45
|
|
|
24
46
|
Args:
|
|
25
47
|
agent_id: The ID of the remote agent to execute
|
|
48
|
+
chat_id: The ID of the chat session to use. If None, a new chat session will be created.
|
|
26
49
|
api_key: API key for authentication. If None, will check MCP_USE_API_KEY env var
|
|
27
50
|
base_url: Base URL for the remote API
|
|
28
51
|
"""
|
|
52
|
+
|
|
53
|
+
if chat_id is not None:
|
|
54
|
+
try:
|
|
55
|
+
chat_id = str(UUID(chat_id))
|
|
56
|
+
except ValueError as e:
|
|
57
|
+
raise ValueError(
|
|
58
|
+
f"Invalid chat ID: {chat_id}, make sure to provide a valid UUID.\n{UUID_ERROR_MESSAGE}"
|
|
59
|
+
) from e
|
|
60
|
+
|
|
29
61
|
self.agent_id = agent_id
|
|
62
|
+
self.chat_id = chat_id
|
|
63
|
+
self._session_established = False
|
|
30
64
|
self.base_url = base_url
|
|
31
65
|
|
|
32
66
|
# Handle API key validation
|
|
@@ -103,11 +137,55 @@ class RemoteAgent:
|
|
|
103
137
|
return output_schema.model_validate({"content": str(result_data)})
|
|
104
138
|
raise
|
|
105
139
|
|
|
140
|
+
async def _upsert_chat_session(self) -> str:
|
|
141
|
+
"""Create or resume a persistent chat session for the agent via upsert.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
The chat session ID
|
|
145
|
+
"""
|
|
146
|
+
chat_payload = {
|
|
147
|
+
"id": self.chat_id, # Include chat_id for resuming or None for creating
|
|
148
|
+
"title": f"Remote Agent Session - {self.agent_id}",
|
|
149
|
+
"agent_id": self.agent_id,
|
|
150
|
+
"type": "agent_execution",
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
headers = {"Content-Type": "application/json", "x-api-key": self.api_key}
|
|
154
|
+
chat_url = f"{self.base_url}{API_CHATS_ENDPOINT}"
|
|
155
|
+
|
|
156
|
+
logger.info(f"📝 Upserting chat session for agent {self.agent_id}")
|
|
157
|
+
|
|
158
|
+
try:
|
|
159
|
+
chat_response = await self._client.post(chat_url, json=chat_payload, headers=headers)
|
|
160
|
+
chat_response.raise_for_status()
|
|
161
|
+
|
|
162
|
+
chat_data = chat_response.json()
|
|
163
|
+
chat_id = chat_data["id"]
|
|
164
|
+
if chat_response.status_code == 201:
|
|
165
|
+
logger.info(f"✅ New chat session created: {chat_id}")
|
|
166
|
+
else:
|
|
167
|
+
logger.info(f"✅ Resumed chat session: {chat_id}")
|
|
168
|
+
|
|
169
|
+
return chat_id
|
|
170
|
+
|
|
171
|
+
except httpx.HTTPStatusError as e:
|
|
172
|
+
status_code = e.response.status_code
|
|
173
|
+
response_text = e.response.text
|
|
174
|
+
|
|
175
|
+
if status_code == 404:
|
|
176
|
+
raise RuntimeError(
|
|
177
|
+
f"Agent not found: Agent '{self.agent_id}' does not exist or you don't have access to it. "
|
|
178
|
+
"Please verify the agent ID and ensure it exists in your account."
|
|
179
|
+
) from e
|
|
180
|
+
else:
|
|
181
|
+
raise RuntimeError(f"Failed to create chat session: {status_code} - {response_text}") from e
|
|
182
|
+
except Exception as e:
|
|
183
|
+
raise RuntimeError(f"Failed to create chat session: {str(e)}") from e
|
|
184
|
+
|
|
106
185
|
async def run(
|
|
107
186
|
self,
|
|
108
187
|
query: str,
|
|
109
188
|
max_steps: int | None = None,
|
|
110
|
-
manage_connector: bool = True,
|
|
111
189
|
external_history: list[BaseMessage] | None = None,
|
|
112
190
|
output_schema: type[T] | None = None,
|
|
113
191
|
) -> str | T:
|
|
@@ -116,8 +194,7 @@ class RemoteAgent:
|
|
|
116
194
|
Args:
|
|
117
195
|
query: The query to execute
|
|
118
196
|
max_steps: Maximum number of steps (default: 10)
|
|
119
|
-
|
|
120
|
-
external_history: Ignored for remote execution (not supported yet)
|
|
197
|
+
external_history: External history (not supported yet for remote execution)
|
|
121
198
|
output_schema: Optional Pydantic model for structured output
|
|
122
199
|
|
|
123
200
|
Returns:
|
|
@@ -126,20 +203,31 @@ class RemoteAgent:
|
|
|
126
203
|
if external_history is not None:
|
|
127
204
|
logger.warning("External history is not yet supported for remote execution")
|
|
128
205
|
|
|
129
|
-
|
|
206
|
+
try:
|
|
207
|
+
logger.info(f"🌐 Executing query on remote agent {self.agent_id}")
|
|
130
208
|
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
209
|
+
# Step 1: Ensure chat session exists on the backend by upserting.
|
|
210
|
+
# This happens once per agent instance.
|
|
211
|
+
if not self._session_established:
|
|
212
|
+
logger.info(f"🔧 Establishing chat session for agent {self.agent_id}")
|
|
213
|
+
self.chat_id = await self._upsert_chat_session()
|
|
214
|
+
self._session_established = True
|
|
135
215
|
|
|
136
|
-
|
|
216
|
+
chat_id = self.chat_id
|
|
137
217
|
|
|
138
|
-
|
|
218
|
+
# Step 2: Execute the agent within the chat context
|
|
219
|
+
execution_payload = {"query": query, "max_steps": max_steps or 10}
|
|
139
220
|
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
221
|
+
# Add structured output schema if provided
|
|
222
|
+
if output_schema is not None:
|
|
223
|
+
execution_payload["output_schema"] = self._pydantic_to_json_schema(output_schema)
|
|
224
|
+
logger.info(f"🔧 Using structured output with schema: {output_schema.__name__}")
|
|
225
|
+
|
|
226
|
+
headers = {"Content-Type": "application/json", "x-api-key": self.api_key}
|
|
227
|
+
execution_url = f"{self.base_url}{API_CHAT_EXECUTE_ENDPOINT.format(chat_id=chat_id)}"
|
|
228
|
+
logger.info(f"🚀 Executing agent in chat {chat_id}")
|
|
229
|
+
|
|
230
|
+
response = await self._client.post(execution_url, json=execution_payload, headers=headers)
|
|
143
231
|
response.raise_for_status()
|
|
144
232
|
|
|
145
233
|
result = response.json()
|