npcsh 1.1.12__py3-none-any.whl → 1.1.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +700 -377
- npcsh/alicanto.py +54 -1153
- npcsh/completion.py +206 -0
- npcsh/config.py +163 -0
- npcsh/corca.py +35 -1462
- npcsh/execution.py +185 -0
- npcsh/guac.py +31 -1986
- npcsh/npc_team/jinxs/code/sh.jinx +11 -15
- npcsh/npc_team/jinxs/modes/alicanto.jinx +186 -80
- npcsh/npc_team/jinxs/modes/corca.jinx +243 -22
- npcsh/npc_team/jinxs/modes/guac.jinx +313 -42
- npcsh/npc_team/jinxs/modes/plonk.jinx +209 -48
- npcsh/npc_team/jinxs/modes/pti.jinx +167 -25
- npcsh/npc_team/jinxs/modes/spool.jinx +158 -37
- npcsh/npc_team/jinxs/modes/wander.jinx +179 -74
- npcsh/npc_team/jinxs/modes/yap.jinx +258 -21
- npcsh/npc_team/jinxs/utils/chat.jinx +39 -12
- npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
- npcsh/npc_team/jinxs/utils/search.jinx +3 -3
- npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
- npcsh/npcsh.py +76 -20
- npcsh/parsing.py +118 -0
- npcsh/plonk.py +41 -329
- npcsh/pti.py +41 -201
- npcsh/spool.py +34 -239
- npcsh/ui.py +199 -0
- npcsh/wander.py +54 -542
- npcsh/yap.py +38 -570
- npcsh-1.1.14.data/data/npcsh/npc_team/alicanto.jinx +194 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/chat.jinx +44 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/cmd.jinx +44 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/corca.jinx +249 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/guac.jinx +317 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/plonk.jinx +214 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/pti.jinx +170 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/search.jinx +3 -3
- npcsh-1.1.14.data/data/npcsh/npc_team/sh.jinx +34 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/spool.jinx +161 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/usage.jinx +33 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/wander.jinx +186 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/yap.jinx +262 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/METADATA +1 -1
- npcsh-1.1.14.dist-info/RECORD +135 -0
- npcsh-1.1.12.data/data/npcsh/npc_team/alicanto.jinx +0 -88
- npcsh-1.1.12.data/data/npcsh/npc_team/chat.jinx +0 -17
- npcsh-1.1.12.data/data/npcsh/npc_team/corca.jinx +0 -28
- npcsh-1.1.12.data/data/npcsh/npc_team/guac.jinx +0 -46
- npcsh-1.1.12.data/data/npcsh/npc_team/plonk.jinx +0 -53
- npcsh-1.1.12.data/data/npcsh/npc_team/pti.jinx +0 -28
- npcsh-1.1.12.data/data/npcsh/npc_team/sh.jinx +0 -38
- npcsh-1.1.12.data/data/npcsh/npc_team/spool.jinx +0 -40
- npcsh-1.1.12.data/data/npcsh/npc_team/wander.jinx +0 -81
- npcsh-1.1.12.data/data/npcsh/npc_team/yap.jinx +0 -25
- npcsh-1.1.12.dist-info/RECORD +0 -126
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/agent.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/build.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compile.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compress.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/help.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/init.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/load_file.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/ots.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/python.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/roll.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sample.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/serve.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/set.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sleep.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sql.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/trigger.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/vixynt.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/WHEEL +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/entry_points.txt +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/top_level.txt +0 -0
npcsh/corca.py
CHANGED
|
@@ -1,1477 +1,50 @@
|
|
|
1
|
+
"""
|
|
2
|
+
corca - MCP-powered agentic shell CLI entry point
|
|
3
|
+
|
|
4
|
+
This is a thin wrapper that executes the corca.jinx through the jinx mechanism.
|
|
5
|
+
"""
|
|
6
|
+
import argparse
|
|
1
7
|
import os
|
|
2
8
|
import sys
|
|
3
|
-
import asyncio
|
|
4
|
-
import shlex
|
|
5
|
-
import argparse
|
|
6
|
-
from contextlib import AsyncExitStack
|
|
7
|
-
from typing import Optional, Callable, Dict, Any, Tuple, List
|
|
8
|
-
import shutil
|
|
9
|
-
import traceback
|
|
10
|
-
from litellm.exceptions import Timeout, ContextWindowExceededError, RateLimitError, BadRequestError
|
|
11
|
-
|
|
12
|
-
try:
|
|
13
|
-
from mcp import ClientSession, StdioServerParameters
|
|
14
|
-
from mcp.client.stdio import stdio_client
|
|
15
|
-
except ImportError:
|
|
16
|
-
print("FATAL: 'mcp-client' package not found. Please run 'pip install mcp-client'.", file=sys.stderr)
|
|
17
|
-
sys.exit(1)
|
|
18
|
-
|
|
19
|
-
from termcolor import colored, cprint
|
|
20
|
-
import json
|
|
21
|
-
import readline
|
|
22
|
-
from npcpy.llm_funcs import get_llm_response, breathe
|
|
23
|
-
from npcpy.npc_compiler import NPC
|
|
24
|
-
from npcpy.npc_sysenv import render_markdown, print_and_process_stream_with_markdown
|
|
25
|
-
from npcpy.memory.command_history import load_kg_from_db, save_conversation_message, save_kg_to_db
|
|
26
|
-
from npcpy.memory.knowledge_graph import kg_evolve_incremental, kg_dream_process, kg_initial, kg_sleep_process
|
|
27
|
-
from npcsh._state import (
|
|
28
|
-
ShellState,
|
|
29
|
-
CommandHistory,
|
|
30
|
-
execute_command as core_execute_command,
|
|
31
|
-
process_result,
|
|
32
|
-
get_multiline_input,
|
|
33
|
-
readline_safe_prompt,
|
|
34
|
-
setup_shell,
|
|
35
|
-
should_skip_kg_processing,
|
|
36
|
-
NPCSH_CHAT_PROVIDER,
|
|
37
|
-
NPCSH_CHAT_MODEL,
|
|
38
|
-
get_team_ctx_path,
|
|
39
|
-
make_completer,
|
|
40
|
-
execute_slash_command,
|
|
41
|
-
)
|
|
42
|
-
from npcsh.routes import router
|
|
43
|
-
import yaml
|
|
44
|
-
from pathlib import Path
|
|
45
|
-
|
|
46
|
-
class MCPClientNPC:
|
|
47
|
-
def __init__(self, debug: bool = True):
|
|
48
|
-
self.debug = debug
|
|
49
|
-
self.session: Optional[ClientSession] = None
|
|
50
|
-
try:
|
|
51
|
-
self._loop = asyncio.get_event_loop()
|
|
52
|
-
if self._loop.is_closed():
|
|
53
|
-
self._loop = asyncio.new_event_loop()
|
|
54
|
-
asyncio.set_event_loop(self._loop)
|
|
55
|
-
except RuntimeError:
|
|
56
|
-
self._loop = asyncio.new_event_loop()
|
|
57
|
-
asyncio.set_event_loop(self._loop)
|
|
58
|
-
|
|
59
|
-
self._exit_stack = self._loop.run_until_complete(self._init_stack())
|
|
60
|
-
self.available_tools_llm: List[Dict[str, Any]] = []
|
|
61
|
-
self.tool_map: Dict[str, Callable] = {}
|
|
62
|
-
self.server_script_path: Optional[str] = None
|
|
63
|
-
|
|
64
|
-
async def _init_stack(self):
|
|
65
|
-
return AsyncExitStack()
|
|
66
|
-
|
|
67
|
-
def _log(self, message: str, color: str = "cyan") -> None:
|
|
68
|
-
if self.debug:
|
|
69
|
-
cprint(f"[MCP Client] {message}", color, file=sys.stderr)
|
|
70
|
-
|
|
71
|
-
async def _connect_async(self, server_script_path: str) -> None:
|
|
72
|
-
self._log(f"Attempting to connect to MCP server: {server_script_path}")
|
|
73
|
-
self.server_script_path = server_script_path
|
|
74
|
-
abs_path = os.path.abspath(server_script_path)
|
|
75
|
-
if not os.path.exists(abs_path):
|
|
76
|
-
raise FileNotFoundError(f"MCP server script not found: {abs_path}")
|
|
77
|
-
|
|
78
|
-
if abs_path.endswith('.py'):
|
|
79
|
-
cmd_parts = [sys.executable, abs_path]
|
|
80
|
-
elif os.access(abs_path, os.X_OK):
|
|
81
|
-
cmd_parts = [abs_path]
|
|
82
|
-
else:
|
|
83
|
-
raise ValueError(f"Unsupported MCP server script type or not executable: {abs_path}")
|
|
84
|
-
|
|
85
|
-
server_params = StdioServerParameters(
|
|
86
|
-
command=cmd_parts[0],
|
|
87
|
-
args=[abs_path],
|
|
88
|
-
env=os.environ.copy(),
|
|
89
|
-
cwd=Path(abs_path).parent
|
|
90
|
-
)
|
|
91
|
-
if self.session:
|
|
92
|
-
await self._exit_stack.aclose()
|
|
93
|
-
|
|
94
|
-
self._exit_stack = AsyncExitStack()
|
|
95
|
-
|
|
96
|
-
stdio_transport = await self._exit_stack.enter_async_context(stdio_client(server_params))
|
|
97
|
-
self.session = await self._exit_stack.enter_async_context(ClientSession(*stdio_transport))
|
|
98
|
-
await self.session.initialize()
|
|
99
|
-
|
|
100
|
-
response = await self.session.list_tools()
|
|
101
|
-
self.available_tools_llm = []
|
|
102
|
-
self.tool_map = {}
|
|
103
|
-
|
|
104
|
-
if response.tools:
|
|
105
|
-
for mcp_tool in response.tools:
|
|
106
|
-
tool_def = {
|
|
107
|
-
"type": "function",
|
|
108
|
-
"function": {
|
|
109
|
-
"name": mcp_tool.name,
|
|
110
|
-
"description": mcp_tool.description or f"MCP tool: {mcp_tool.name}",
|
|
111
|
-
"parameters": getattr(mcp_tool, "inputSchema", {"type": "object", "properties": {}})
|
|
112
|
-
}
|
|
113
|
-
}
|
|
114
|
-
self.available_tools_llm.append(tool_def)
|
|
115
|
-
|
|
116
|
-
def make_tool_func(tool_name_closure):
|
|
117
|
-
async def tool_func(**kwargs):
|
|
118
|
-
if not self.session:
|
|
119
|
-
return {"error": "No MCP session"}
|
|
120
|
-
|
|
121
|
-
self._log(f"About to call MCP tool {tool_name_closure}")
|
|
122
|
-
try:
|
|
123
|
-
cleaned_kwargs = {}
|
|
124
|
-
for k, v in kwargs.items():
|
|
125
|
-
if v == 'None':
|
|
126
|
-
cleaned_kwargs[k] = None
|
|
127
|
-
else:
|
|
128
|
-
cleaned_kwargs[k] = v
|
|
129
|
-
result = await asyncio.wait_for(
|
|
130
|
-
self.session.call_tool(tool_name_closure, cleaned_kwargs),
|
|
131
|
-
timeout=30.0
|
|
132
|
-
)
|
|
133
|
-
self._log(f"MCP tool {tool_name_closure} returned: {type(result)}")
|
|
134
|
-
return result
|
|
135
|
-
except asyncio.TimeoutError:
|
|
136
|
-
self._log(f"Tool {tool_name_closure} timed out after 30 seconds", "red")
|
|
137
|
-
return {"error": f"Tool {tool_name_closure} timed out"}
|
|
138
|
-
except Exception as e:
|
|
139
|
-
self._log(f"Tool {tool_name_closure} error: {e}", "red")
|
|
140
|
-
return {"error": str(e)}
|
|
141
|
-
|
|
142
|
-
def sync_wrapper(**kwargs):
|
|
143
|
-
self._log(f"Sync wrapper called for {tool_name_closure}")
|
|
144
|
-
return self._loop.run_until_complete(tool_func(**kwargs))
|
|
145
|
-
|
|
146
|
-
return sync_wrapper
|
|
147
|
-
self.tool_map[mcp_tool.name] = make_tool_func(mcp_tool.name)
|
|
148
|
-
tool_names = list(self.tool_map.keys())
|
|
149
|
-
self._log(f"Connection successful. Tools: {', '.join(tool_names) if tool_names else 'None'}")
|
|
150
|
-
|
|
151
|
-
def connect_sync(self, server_script_path: str) -> bool:
|
|
152
|
-
loop = self._loop
|
|
153
|
-
if loop.is_closed():
|
|
154
|
-
self._loop = asyncio.new_event_loop()
|
|
155
|
-
asyncio.set_event_loop(self._loop)
|
|
156
|
-
loop = self._loop
|
|
157
|
-
|
|
158
|
-
try:
|
|
159
|
-
loop.run_until_complete(self._connect_async(server_script_path))
|
|
160
|
-
return True
|
|
161
|
-
except Exception as e:
|
|
162
|
-
cprint(f"MCP connection failed: {e}", "red", file=sys.stderr)
|
|
163
|
-
return False
|
|
164
|
-
|
|
165
|
-
def disconnect_sync(self):
|
|
166
|
-
if self.session:
|
|
167
|
-
self._log("Disconnecting MCP session.")
|
|
168
|
-
loop = self._loop
|
|
169
|
-
if not loop.is_closed():
|
|
170
|
-
try:
|
|
171
|
-
async def close_session():
|
|
172
|
-
await self.session.close()
|
|
173
|
-
await self._exit_stack.aclose()
|
|
174
|
-
loop.run_until_complete(close_session())
|
|
175
|
-
except RuntimeError:
|
|
176
|
-
pass
|
|
177
|
-
except Exception as e:
|
|
178
|
-
print(f"Error during MCP client disconnect: {e}", file=sys.stderr)
|
|
179
|
-
self.session = None
|
|
180
|
-
self._exit_stack = None
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
def process_mcp_stream(stream_response, active_npc):
|
|
184
|
-
collected_content = ""
|
|
185
|
-
tool_calls = []
|
|
186
|
-
|
|
187
|
-
interrupted = False
|
|
188
|
-
sys.stdout.write('\033[s')
|
|
189
|
-
sys.stdout.flush()
|
|
190
|
-
|
|
191
|
-
try:
|
|
192
|
-
for chunk in stream_response:
|
|
193
|
-
if hasattr(active_npc, 'provider') and active_npc.provider == "ollama" and 'gpt-oss' not in active_npc.model:
|
|
194
|
-
if hasattr(chunk, 'message') and hasattr(chunk.message, 'tool_calls') and chunk.message.tool_calls:
|
|
195
|
-
for tool_call in chunk.message.tool_calls:
|
|
196
|
-
tool_call_data = {'id': getattr(tool_call, 'id', ''),
|
|
197
|
-
'type': 'function',
|
|
198
|
-
'function': {
|
|
199
|
-
'name': getattr(tool_call.function, 'name', '') if hasattr(tool_call, 'function') else '',
|
|
200
|
-
'arguments': getattr(tool_call.function, 'arguments', {}) if hasattr(tool_call, 'function') else {}
|
|
201
|
-
}
|
|
202
|
-
}
|
|
203
|
-
if isinstance(tool_call_data['function']['arguments'], str):
|
|
204
|
-
try:
|
|
205
|
-
tool_call_data['function']['arguments'] = json.loads(tool_call_data['function']['arguments'])
|
|
206
|
-
except json.JSONDecodeError:
|
|
207
|
-
tool_call_data['function']['arguments'] = {'raw': tool_call_data['function']['arguments']}
|
|
208
|
-
|
|
209
|
-
tool_calls.append(tool_call_data)
|
|
210
|
-
if hasattr(chunk, 'message') and hasattr(chunk.message, 'content') and chunk.message.content:
|
|
211
|
-
collected_content += chunk.message.content
|
|
212
|
-
print(chunk.message.content, end='', flush=True)
|
|
213
|
-
|
|
214
|
-
else:
|
|
215
|
-
if hasattr(chunk, 'choices') and chunk.choices:
|
|
216
|
-
delta = chunk.choices[0].delta
|
|
217
|
-
|
|
218
|
-
if hasattr(delta, 'content') and delta.content:
|
|
219
|
-
collected_content += delta.content
|
|
220
|
-
print(delta.content, end='', flush=True)
|
|
221
|
-
|
|
222
|
-
if hasattr(delta, 'tool_calls') and delta.tool_calls:
|
|
223
|
-
for tool_call_delta in delta.tool_calls:
|
|
224
|
-
if hasattr(tool_call_delta, 'index'):
|
|
225
|
-
idx = tool_call_delta.index
|
|
226
|
-
|
|
227
|
-
while len(tool_calls) <= idx:
|
|
228
|
-
tool_calls.append({
|
|
229
|
-
'id': '',
|
|
230
|
-
'type': 'function',
|
|
231
|
-
'function': {'name': '', 'arguments': ''}
|
|
232
|
-
})
|
|
233
|
-
|
|
234
|
-
if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
|
|
235
|
-
tool_calls[idx]['id'] = tool_call_delta.id
|
|
236
|
-
if hasattr(tool_call_delta, 'function'):
|
|
237
|
-
if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
|
|
238
|
-
tool_calls[idx]['function']['name'] = tool_call_delta.function.name
|
|
239
|
-
|
|
240
|
-
if hasattr(tool_call_delta.function, 'arguments') and tool_call_delta.function.arguments:
|
|
241
|
-
tool_calls[idx]['function']['arguments'] += tool_call_delta.function.arguments
|
|
242
|
-
except KeyboardInterrupt:
|
|
243
|
-
interrupted = True
|
|
244
|
-
print('\n⚠️ Stream interrupted by user')
|
|
245
|
-
|
|
246
|
-
sys.stdout.write('\033[u')
|
|
247
|
-
sys.stdout.write('\033[0J')
|
|
248
|
-
sys.stdout.flush()
|
|
249
|
-
|
|
250
|
-
if collected_content:
|
|
251
|
-
render_markdown(collected_content)
|
|
252
|
-
|
|
253
|
-
return collected_content, tool_calls
|
|
254
|
-
|
|
255
|
-
def clean_orphaned_tool_calls(messages):
|
|
256
|
-
cleaned_messages = []
|
|
257
|
-
i = 0
|
|
258
|
-
while i < len(messages):
|
|
259
|
-
msg = messages[i]
|
|
260
|
-
|
|
261
|
-
if msg.get("role") == "tool":
|
|
262
|
-
# Check if there's a preceding assistant message with tool_calls
|
|
263
|
-
found_preceding_assistant = False
|
|
264
|
-
for j in range(i-1, -1, -1):
|
|
265
|
-
prev_msg = messages[j]
|
|
266
|
-
if prev_msg.get("role") == "assistant" and prev_msg.get("tool_calls"):
|
|
267
|
-
# Check if this tool response matches any tool call
|
|
268
|
-
tool_call_ids = {tc["id"] for tc in prev_msg["tool_calls"]}
|
|
269
|
-
if msg.get("tool_call_id") in tool_call_ids:
|
|
270
|
-
found_preceding_assistant = True
|
|
271
|
-
break
|
|
272
|
-
elif prev_msg.get("role") in ["user", "assistant"]:
|
|
273
|
-
break
|
|
274
|
-
|
|
275
|
-
if found_preceding_assistant:
|
|
276
|
-
cleaned_messages.append(msg)
|
|
277
|
-
# Skip orphaned tool responses
|
|
278
|
-
|
|
279
|
-
elif (msg.get("role") == "assistant" and msg.get("tool_calls")):
|
|
280
|
-
tool_call_ids = {tc["id"] for tc in msg["tool_calls"]}
|
|
281
|
-
j = i + 1
|
|
282
|
-
found_responses = set()
|
|
283
|
-
|
|
284
|
-
while j < len(messages):
|
|
285
|
-
next_msg = messages[j]
|
|
286
|
-
if next_msg.get("role") == "tool":
|
|
287
|
-
if next_msg.get("tool_call_id") in tool_call_ids:
|
|
288
|
-
found_responses.add(next_msg.get("tool_call_id"))
|
|
289
|
-
elif next_msg.get("role") in ["user", "assistant"]:
|
|
290
|
-
break
|
|
291
|
-
j += 1
|
|
292
|
-
|
|
293
|
-
missing_responses = tool_call_ids - found_responses
|
|
294
|
-
if missing_responses:
|
|
295
|
-
assistant_msg = msg.copy()
|
|
296
|
-
assistant_msg["tool_calls"] = [
|
|
297
|
-
tc for tc in msg["tool_calls"]
|
|
298
|
-
if tc["id"] not in missing_responses
|
|
299
|
-
]
|
|
300
|
-
if not assistant_msg["tool_calls"]:
|
|
301
|
-
del assistant_msg["tool_calls"]
|
|
302
|
-
cleaned_messages.append(assistant_msg)
|
|
303
|
-
else:
|
|
304
|
-
cleaned_messages.append(msg)
|
|
305
|
-
else:
|
|
306
|
-
cleaned_messages.append(msg)
|
|
307
|
-
i += 1
|
|
308
|
-
|
|
309
|
-
return cleaned_messages
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
def get_llm_response_with_handling(prompt, npc, messages, tools, stream, team, context=None):
|
|
313
|
-
"""Unified LLM response with exception handling."""
|
|
314
|
-
messages = clean_orphaned_tool_calls(messages)
|
|
315
|
-
|
|
316
|
-
try:
|
|
317
|
-
return get_llm_response(
|
|
318
|
-
prompt=prompt,
|
|
319
|
-
npc=npc,
|
|
320
|
-
messages=messages,
|
|
321
|
-
tools=tools,
|
|
322
|
-
auto_process_tool_calls=False,
|
|
323
|
-
stream=stream,
|
|
324
|
-
team=team,
|
|
325
|
-
context=context
|
|
326
|
-
)
|
|
327
|
-
except Timeout:
|
|
328
|
-
return get_llm_response(
|
|
329
|
-
prompt=prompt,
|
|
330
|
-
npc=npc,
|
|
331
|
-
messages=messages,
|
|
332
|
-
tools=tools,
|
|
333
|
-
auto_process_tool_calls=False,
|
|
334
|
-
stream=stream,
|
|
335
|
-
team=team
|
|
336
|
-
)
|
|
337
|
-
except ContextWindowExceededError:
|
|
338
|
-
print('compressing..... ')
|
|
339
|
-
compressed_state = npc.compress_planning_state(messages)
|
|
340
|
-
compressed_messages = [{"role": "system", "content": compressed_state}]
|
|
341
|
-
return get_llm_response(
|
|
342
|
-
prompt=prompt,
|
|
343
|
-
npc=npc,
|
|
344
|
-
messages=compressed_messages,
|
|
345
|
-
tools=tools,
|
|
346
|
-
auto_process_tool_calls=False,
|
|
347
|
-
stream=stream,
|
|
348
|
-
team=team
|
|
349
|
-
)
|
|
350
|
-
except RateLimitError:
|
|
351
|
-
import time
|
|
352
|
-
print('rate limit hit... waiting 60 seconds')
|
|
353
|
-
time.sleep(60)
|
|
354
|
-
print('compressing..... ')
|
|
355
|
-
compressed_state = npc.compress_planning_state(messages)
|
|
356
|
-
compressed_messages = [{"role": "system", "content": compressed_state}]
|
|
357
|
-
return get_llm_response(
|
|
358
|
-
prompt=prompt,
|
|
359
|
-
npc=npc,
|
|
360
|
-
messages=compressed_messages,
|
|
361
|
-
tools=tools,
|
|
362
|
-
auto_process_tool_calls=False,
|
|
363
|
-
stream=stream,
|
|
364
|
-
team=team
|
|
365
|
-
)
|
|
366
|
-
except BadRequestError as e:
|
|
367
|
-
if "tool_call_id" in str(e).lower():
|
|
368
|
-
cleaned_messages = clean_orphaned_tool_calls(messages)
|
|
369
|
-
return get_llm_response(
|
|
370
|
-
prompt=prompt,
|
|
371
|
-
npc=npc,
|
|
372
|
-
messages=cleaned_messages,
|
|
373
|
-
tools=tools,
|
|
374
|
-
auto_process_tool_calls=False,
|
|
375
|
-
stream=stream,
|
|
376
|
-
team=team,
|
|
377
|
-
context=context
|
|
378
|
-
)
|
|
379
|
-
else:
|
|
380
|
-
raise e
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
def process_mcp_stream(stream_response, active_npc):
|
|
385
|
-
collected_content = ""
|
|
386
|
-
tool_calls = []
|
|
387
|
-
|
|
388
|
-
interrupted = False
|
|
389
|
-
sys.stdout.write('\033[s')
|
|
390
|
-
sys.stdout.flush()
|
|
391
|
-
|
|
392
|
-
try:
|
|
393
|
-
for chunk in stream_response:
|
|
394
|
-
if hasattr(active_npc, 'provider') and active_npc.provider == "ollama" and 'gpt-oss' not in active_npc.model:
|
|
395
|
-
if hasattr(chunk, 'message') and hasattr(chunk.message, 'tool_calls') and chunk.message.tool_calls:
|
|
396
|
-
for tool_call in chunk.message.tool_calls:
|
|
397
|
-
tool_call_data = {'id': getattr(tool_call, 'id', ''),
|
|
398
|
-
'type': 'function',
|
|
399
|
-
'function': {
|
|
400
|
-
'name': getattr(tool_call.function, 'name', '') if hasattr(tool_call, 'function') else '',
|
|
401
|
-
'arguments': getattr(tool_call.function, 'arguments', {}) if hasattr(tool_call, 'function') else {}
|
|
402
|
-
}
|
|
403
|
-
}
|
|
404
|
-
if isinstance(tool_call_data['function']['arguments'], str):
|
|
405
|
-
try:
|
|
406
|
-
tool_call_data['function']['arguments'] = json.loads(tool_call_data['function']['arguments'])
|
|
407
|
-
except json.JSONDecodeError:
|
|
408
|
-
tool_call_data['function']['arguments'] = {'raw': tool_call_data['function']['arguments']}
|
|
409
|
-
|
|
410
|
-
tool_calls.append(tool_call_data)
|
|
411
|
-
if hasattr(chunk, 'message') and hasattr(chunk.message, 'content') and chunk.message.content:
|
|
412
|
-
collected_content += chunk.message.content
|
|
413
|
-
print(chunk.message.content, end='', flush=True)
|
|
414
|
-
|
|
415
|
-
else:
|
|
416
|
-
if hasattr(chunk, 'choices') and chunk.choices:
|
|
417
|
-
delta = chunk.choices[0].delta
|
|
418
|
-
|
|
419
|
-
if hasattr(delta, 'content') and delta.content:
|
|
420
|
-
collected_content += delta.content
|
|
421
|
-
print(delta.content, end='', flush=True)
|
|
422
|
-
|
|
423
|
-
if hasattr(delta, 'tool_calls') and delta.tool_calls:
|
|
424
|
-
for tool_call_delta in delta.tool_calls:
|
|
425
|
-
if hasattr(tool_call_delta, 'index'):
|
|
426
|
-
idx = tool_call_delta.index
|
|
427
|
-
|
|
428
|
-
while len(tool_calls) <= idx:
|
|
429
|
-
tool_calls.append({
|
|
430
|
-
'id': '',
|
|
431
|
-
'type': 'function',
|
|
432
|
-
'function': {'name': '', 'arguments': ''}
|
|
433
|
-
})
|
|
434
|
-
|
|
435
|
-
if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
|
|
436
|
-
tool_calls[idx]['id'] = tool_call_delta.id
|
|
437
|
-
if hasattr(tool_call_delta, 'function'):
|
|
438
|
-
if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
|
|
439
|
-
tool_calls[idx]['function']['name'] = tool_call_delta.function.name
|
|
440
|
-
|
|
441
|
-
if hasattr(tool_call_delta.function, 'arguments') and tool_call_delta.function.arguments:
|
|
442
|
-
tool_calls[idx]['function']['arguments'] += tool_call_delta.function.arguments
|
|
443
|
-
except KeyboardInterrupt:
|
|
444
|
-
interrupted = True
|
|
445
|
-
print('\n⚠️ Stream interrupted by user')
|
|
446
|
-
|
|
447
|
-
sys.stdout.write('\033[u')
|
|
448
|
-
sys.stdout.write('\033[0J')
|
|
449
|
-
sys.stdout.flush()
|
|
450
|
-
|
|
451
|
-
if collected_content:
|
|
452
|
-
render_markdown(collected_content)
|
|
453
|
-
|
|
454
|
-
return collected_content, tool_calls, interrupted
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
def execute_mcp_tool_calls(tool_calls, mcp_client, messages, npc, stream_output):
|
|
458
|
-
if not tool_calls or not mcp_client:
|
|
459
|
-
return None, messages, False
|
|
460
|
-
|
|
461
|
-
messages = clean_orphaned_tool_calls(messages)
|
|
462
|
-
|
|
463
|
-
print(colored("\n🔧 Executing MCP tools...", "cyan"))
|
|
464
|
-
user_interrupted = False
|
|
465
|
-
|
|
466
|
-
while tool_calls:
|
|
467
|
-
tool_responses = []
|
|
468
|
-
|
|
469
|
-
if len(messages) > 20:
|
|
470
|
-
compressed_state = npc.compress_planning_state(messages)
|
|
471
|
-
messages = [{"role": "system", "content": npc.get_system_prompt() + f' Your current task: {compressed_state}'}]
|
|
472
|
-
print("Compressed messages during tool execution.")
|
|
473
|
-
|
|
474
|
-
for tool_call in tool_calls:
|
|
475
|
-
tool_name = tool_call['function']['name']
|
|
476
|
-
tool_args = tool_call['function']['arguments']
|
|
477
|
-
tool_call_id = tool_call['id']
|
|
478
|
-
|
|
479
|
-
if isinstance(tool_args, str):
|
|
480
|
-
try:
|
|
481
|
-
tool_args = json.loads(tool_args) if tool_args.strip() else {}
|
|
482
|
-
except json.JSONDecodeError:
|
|
483
|
-
tool_args = {}
|
|
484
|
-
|
|
485
|
-
try:
|
|
486
|
-
print(f" Calling MCP tool: {tool_name} with args: {tool_args}")
|
|
487
|
-
|
|
488
|
-
loop = asyncio.get_event_loop()
|
|
489
|
-
if loop.is_closed():
|
|
490
|
-
loop = asyncio.new_event_loop()
|
|
491
|
-
asyncio.set_event_loop(loop)
|
|
492
|
-
|
|
493
|
-
mcp_result = loop.run_until_complete(
|
|
494
|
-
mcp_client.session.call_tool(tool_name, tool_args)
|
|
495
|
-
)
|
|
496
|
-
|
|
497
|
-
tool_content = ""
|
|
498
|
-
if hasattr(mcp_result, 'content') and mcp_result.content:
|
|
499
|
-
for content_item in mcp_result.content:
|
|
500
|
-
if hasattr(content_item, 'text'):
|
|
501
|
-
tool_content += content_item.text
|
|
502
|
-
elif hasattr(content_item, 'data'):
|
|
503
|
-
tool_content += str(content_item.data)
|
|
504
|
-
else:
|
|
505
|
-
tool_content += str(content_item)
|
|
506
|
-
else:
|
|
507
|
-
tool_content = str(mcp_result)
|
|
508
|
-
|
|
509
|
-
tool_responses.append({
|
|
510
|
-
"role": "tool",
|
|
511
|
-
"tool_call_id": tool_call_id,
|
|
512
|
-
"name": tool_name,
|
|
513
|
-
"content": tool_content
|
|
514
|
-
})
|
|
515
|
-
|
|
516
|
-
print(colored(f" ✓ {tool_name} completed", "green"))
|
|
517
|
-
|
|
518
|
-
except KeyboardInterrupt:
|
|
519
|
-
print(colored(f"\n ⚠️ Tool execution interrupted by user", "yellow"))
|
|
520
|
-
user_interrupted = True
|
|
521
|
-
break
|
|
522
|
-
except Exception as e:
|
|
523
|
-
print(colored(f" ✗ {tool_name} failed: {e}", "red"))
|
|
524
|
-
tool_responses.append({
|
|
525
|
-
"role": "tool",
|
|
526
|
-
"tool_call_id": tool_call_id,
|
|
527
|
-
"name": tool_name,
|
|
528
|
-
"content": f"Error: {str(e)}"
|
|
529
|
-
})
|
|
530
|
-
|
|
531
|
-
if user_interrupted:
|
|
532
|
-
return None, messages, True
|
|
533
|
-
|
|
534
|
-
current_messages = messages + tool_responses
|
|
535
|
-
|
|
536
|
-
try:
|
|
537
|
-
follow_up_response = get_llm_response_with_handling(
|
|
538
|
-
prompt="",
|
|
539
|
-
npc=npc,
|
|
540
|
-
messages=current_messages,
|
|
541
|
-
tools=mcp_client.available_tools_llm,
|
|
542
|
-
stream=stream_output,
|
|
543
|
-
team=None
|
|
544
|
-
)
|
|
545
|
-
except KeyboardInterrupt:
|
|
546
|
-
print(colored(f"\n ⚠️ Follow-up response interrupted by user", "yellow"))
|
|
547
|
-
return None, messages, True
|
|
548
|
-
|
|
549
|
-
follow_up_messages = follow_up_response.get('messages', current_messages)
|
|
550
|
-
follow_up_content = follow_up_response.get('response', '')
|
|
551
|
-
follow_up_tool_calls = []
|
|
552
|
-
follow_up_interrupted = False
|
|
553
|
-
|
|
554
|
-
if stream_output:
|
|
555
|
-
if hasattr(follow_up_content, '__iter__'):
|
|
556
|
-
collected_content, follow_up_tool_calls, follow_up_interrupted = process_mcp_stream(follow_up_content, npc)
|
|
557
|
-
else:
|
|
558
|
-
collected_content = str(follow_up_content)
|
|
559
|
-
follow_up_content = collected_content
|
|
560
|
-
else:
|
|
561
|
-
if follow_up_messages:
|
|
562
|
-
last_message = follow_up_messages[-1]
|
|
563
|
-
if last_message.get("role") == "assistant" and "tool_calls" in last_message:
|
|
564
|
-
follow_up_tool_calls = last_message["tool_calls"]
|
|
565
|
-
|
|
566
|
-
if follow_up_interrupted:
|
|
567
|
-
return follow_up_content, follow_up_messages, True
|
|
568
|
-
|
|
569
|
-
messages = follow_up_messages
|
|
570
|
-
|
|
571
|
-
if not follow_up_tool_calls:
|
|
572
|
-
if not stream_output:
|
|
573
|
-
print('\n')
|
|
574
|
-
render_markdown(follow_up_content)
|
|
575
|
-
return follow_up_content, messages, False
|
|
576
|
-
else:
|
|
577
|
-
if follow_up_content or follow_up_tool_calls:
|
|
578
|
-
assistant_message = {"role": "assistant", "content": follow_up_content}
|
|
579
|
-
if follow_up_tool_calls:
|
|
580
|
-
assistant_message["tool_calls"] = follow_up_tool_calls
|
|
581
|
-
messages.append(assistant_message)
|
|
582
|
-
|
|
583
|
-
tool_calls = follow_up_tool_calls
|
|
584
|
-
print(colored("\n🔧 Executing follow-up MCP tools...", "cyan"))
|
|
585
|
-
|
|
586
|
-
return None, messages, False
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
def execute_command_corca(command: str, state: ShellState, command_history, selected_mcp_tools_names: Optional[List[str]] = None) -> Tuple[ShellState, Any]:
|
|
590
|
-
mcp_tools_for_llm = []
|
|
591
|
-
|
|
592
|
-
if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
|
|
593
|
-
all_available_mcp_tools = state.mcp_client.available_tools_llm
|
|
594
|
-
|
|
595
|
-
if selected_mcp_tools_names and len(selected_mcp_tools_names) > 0:
|
|
596
|
-
mcp_tools_for_llm = [
|
|
597
|
-
tool_def for tool_def in all_available_mcp_tools
|
|
598
|
-
if tool_def['function']['name'] in selected_mcp_tools_names
|
|
599
|
-
]
|
|
600
|
-
if not mcp_tools_for_llm:
|
|
601
|
-
cprint("Warning: No selected MCP tools found or matched. Corca will proceed without tools.", "yellow", file=sys.stderr)
|
|
602
|
-
else:
|
|
603
|
-
mcp_tools_for_llm = all_available_mcp_tools
|
|
604
|
-
else:
|
|
605
|
-
cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
|
|
606
|
-
|
|
607
|
-
if len(state.messages) > 20:
|
|
608
|
-
compressed_state = state.npc.compress_planning_state(state.messages)
|
|
609
|
-
state.messages = [{"role": "system", "content": state.npc.get_system_prompt() + f' Your current task: {compressed_state}'}]
|
|
610
|
-
print("Compressed messages during tool execution.")
|
|
611
|
-
|
|
612
|
-
response_dict = get_llm_response_with_handling(
|
|
613
|
-
prompt=command,
|
|
614
|
-
npc=state.npc,
|
|
615
|
-
messages=state.messages,
|
|
616
|
-
tools=mcp_tools_for_llm,
|
|
617
|
-
stream=state.stream_output,
|
|
618
|
-
team=state.team,
|
|
619
|
-
context=f' The users working directory is {state.current_path}'
|
|
620
|
-
)
|
|
621
|
-
|
|
622
|
-
stream_response = response_dict.get('response')
|
|
623
|
-
messages = response_dict.get('messages', state.messages)
|
|
624
|
-
tool_calls = response_dict.get('tool_calls', [])
|
|
625
|
-
|
|
626
|
-
collected_content, stream_tool_calls, stream_interrupted = process_mcp_stream(stream_response, state.npc)
|
|
627
|
-
|
|
628
|
-
if stream_interrupted:
|
|
629
|
-
state.messages = messages
|
|
630
|
-
return state, {
|
|
631
|
-
"output": collected_content + "\n[Interrupted by user]",
|
|
632
|
-
"tool_calls": [],
|
|
633
|
-
"messages": state.messages,
|
|
634
|
-
"interrupted": True
|
|
635
|
-
}
|
|
636
|
-
|
|
637
|
-
if stream_tool_calls:
|
|
638
|
-
tool_calls = stream_tool_calls
|
|
639
|
-
|
|
640
|
-
state.messages = messages
|
|
641
|
-
|
|
642
|
-
if tool_calls and hasattr(state, 'mcp_client') and state.mcp_client:
|
|
643
|
-
final_content, state.messages, tools_interrupted = execute_mcp_tool_calls(
|
|
644
|
-
tool_calls,
|
|
645
|
-
state.mcp_client,
|
|
646
|
-
state.messages,
|
|
647
|
-
state.npc,
|
|
648
|
-
state.stream_output
|
|
649
|
-
)
|
|
650
|
-
if tools_interrupted:
|
|
651
|
-
return state, {
|
|
652
|
-
"output": (final_content or collected_content) + "\n[Interrupted by user]",
|
|
653
|
-
"tool_calls": tool_calls,
|
|
654
|
-
"messages": state.messages,
|
|
655
|
-
"interrupted": True
|
|
656
|
-
}
|
|
657
|
-
if final_content:
|
|
658
|
-
collected_content = final_content
|
|
659
|
-
|
|
660
|
-
return state, {
|
|
661
|
-
"output": collected_content,
|
|
662
|
-
"tool_calls": tool_calls,
|
|
663
|
-
"messages": state.messages,
|
|
664
|
-
"interrupted": False
|
|
665
|
-
}
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
def _resolve_and_copy_mcp_server_path(
|
|
669
|
-
explicit_path: Optional[str],
|
|
670
|
-
current_path: Optional[str],
|
|
671
|
-
team_ctx_mcp_servers: Optional[List[Dict[str, str]]],
|
|
672
|
-
interactive: bool = False,
|
|
673
|
-
auto_copy_bypass: bool = False
|
|
674
|
-
) -> Optional[str]:
|
|
675
|
-
default_mcp_server_name = "mcp_server.py"
|
|
676
|
-
npcsh_default_template_path = Path(__file__).parent / default_mcp_server_name
|
|
677
|
-
|
|
678
|
-
def _copy_template_if_missing(destination_dir: Path, description: str) -> Optional[Path]:
|
|
679
|
-
destination_file = destination_dir / default_mcp_server_name
|
|
680
|
-
if not npcsh_default_template_path.exists():
|
|
681
|
-
cprint(f"Error: Default {default_mcp_server_name} template not found at {npcsh_default_template_path}", "red")
|
|
682
|
-
return None
|
|
683
|
-
|
|
684
|
-
if not destination_file.exists():
|
|
685
|
-
if auto_copy_bypass or not interactive:
|
|
686
|
-
destination_dir.mkdir(parents=True, exist_ok=True)
|
|
687
|
-
shutil.copy(npcsh_default_template_path, destination_file)
|
|
688
|
-
print(colored(f"Automatically copied default {default_mcp_server_name} to {destination_file}", "green"))
|
|
689
|
-
return destination_file
|
|
690
|
-
else:
|
|
691
|
-
choice = input(colored(f"No {default_mcp_server_name} found in {description}. Copy default template to {destination_file}? (y/N): ", "yellow")).strip().lower()
|
|
692
|
-
if choice == 'y':
|
|
693
|
-
destination_dir.mkdir(parents=True, exist_ok=True)
|
|
694
|
-
shutil.copy(npcsh_default_template_path, destination_file)
|
|
695
|
-
print(colored(f"Copied default {default_mcp_server_name} to {destination_file}", "green"))
|
|
696
|
-
return destination_file
|
|
697
|
-
else:
|
|
698
|
-
print(colored("Skipping copy.", "yellow"))
|
|
699
|
-
return None
|
|
700
|
-
return destination_file
|
|
701
|
-
|
|
702
|
-
if explicit_path:
|
|
703
|
-
abs_explicit_path = Path(explicit_path).expanduser().resolve()
|
|
704
|
-
if abs_explicit_path.exists():
|
|
705
|
-
print(f"Using explicit MCP server path: {abs_explicit_path}")
|
|
706
|
-
return str(abs_explicit_path)
|
|
707
|
-
else:
|
|
708
|
-
cprint(f"Warning: Explicit MCP server path not found: {abs_explicit_path}", "yellow")
|
|
709
|
-
|
|
710
|
-
if team_ctx_mcp_servers:
|
|
711
|
-
for server_entry in team_ctx_mcp_servers:
|
|
712
|
-
server_path_from_ctx = server_entry.get("value")
|
|
713
|
-
if server_path_from_ctx:
|
|
714
|
-
abs_ctx_path = Path(server_path_from_ctx).expanduser().resolve()
|
|
715
|
-
if abs_ctx_path.exists():
|
|
716
|
-
print(f"Using MCP server path from team context: {abs_ctx_path}")
|
|
717
|
-
return str(abs_ctx_path)
|
|
718
|
-
else:
|
|
719
|
-
cprint(f"Warning: MCP server path from team context not found: {abs_ctx_path}", "yellow")
|
|
720
|
-
|
|
721
|
-
if current_path:
|
|
722
|
-
project_npc_team_dir = Path(current_path).resolve() / "npc_team"
|
|
723
|
-
project_mcp_server_file = project_npc_team_dir / default_mcp_server_name
|
|
724
|
-
|
|
725
|
-
if project_mcp_server_file.exists():
|
|
726
|
-
print(f"Using project-specific MCP server path: {project_mcp_server_file}")
|
|
727
|
-
return str(project_mcp_server_file)
|
|
728
|
-
else:
|
|
729
|
-
copied_path = _copy_template_if_missing(project_npc_team_dir, "project's npc_team directory")
|
|
730
|
-
if copied_path:
|
|
731
|
-
return str(copied_path)
|
|
732
|
-
|
|
733
|
-
global_npc_team_dir = Path.home() / ".npcsh" / "npc_team"
|
|
734
|
-
global_mcp_server_file = global_npc_team_dir / default_mcp_server_name
|
|
735
|
-
|
|
736
|
-
if global_mcp_server_file.exists():
|
|
737
|
-
print(f"Using global MCP server path: {global_mcp_server_file}")
|
|
738
|
-
return str(global_mcp_server_file)
|
|
739
|
-
else:
|
|
740
|
-
copied_path = _copy_template_if_missing(global_npc_team_dir, "global npc_team directory")
|
|
741
|
-
if copied_path:
|
|
742
|
-
return str(copied_path)
|
|
743
|
-
|
|
744
|
-
cprint("No MCP server script found in any expected location.", "yellow")
|
|
745
|
-
return None
|
|
746
|
-
def print_corca_welcome_message():
|
|
747
|
-
turq = "\033[38;2;64;224;208m"
|
|
748
|
-
chrome = "\033[38;2;211;211;211m"
|
|
749
|
-
orange = "\033[38;2;255;165;0m"
|
|
750
|
-
reset = "\033[0m"
|
|
751
|
-
|
|
752
|
-
print(
|
|
753
|
-
f"""
|
|
754
|
-
{turq} ██████ ██████ ██████ ██████ ██████{reset}
|
|
755
|
-
{turq}██ ██ ██ ██ ██ ██ ██ ██ ██🦌🦌██{reset}
|
|
756
|
-
{turq}██ ██ ██ ██ ██ ██ ██🦌🦌██{reset}
|
|
757
|
-
{chrome}██ ██ ██ ████████ ██ ████████{reset}
|
|
758
|
-
{chrome}██ ██ ██ ██ ███ ██ ██ ██{reset}
|
|
759
|
-
{chrome}██ ██ ██ ██ ██ ███ ██ ██ ██ ██{reset}
|
|
760
|
-
{orange} ██████ ██████ ██ ███ ███████ ██ ██{reset}
|
|
761
|
-
|
|
762
|
-
{chrome} 🦌 C O R C A 🦌{reset}
|
|
763
|
-
|
|
764
|
-
{turq}MCP-powered shell for agentic workflows{reset}
|
|
765
|
-
"""
|
|
766
|
-
)
|
|
767
|
-
|
|
768
|
-
def create_corca_state_and_mcp_client(conversation_id, command_history, npc=None, team=None,
|
|
769
|
-
current_path=None, mcp_server_path_from_request: Optional[str] = None):
|
|
770
|
-
from npcsh._state import ShellState
|
|
771
|
-
|
|
772
|
-
state = ShellState(
|
|
773
|
-
conversation_id=conversation_id,
|
|
774
|
-
stream_output=True,
|
|
775
|
-
current_mode="corca",
|
|
776
|
-
chat_model=os.environ.get("NPCSH_CHAT_MODEL", "gemma3:4b"),
|
|
777
|
-
chat_provider=os.environ.get("NPCSH_CHAT_PROVIDER", "ollama"),
|
|
778
|
-
current_path=current_path or os.getcwd(),
|
|
779
|
-
npc=npc,
|
|
780
|
-
team=team
|
|
781
|
-
)
|
|
782
|
-
state.command_history = command_history
|
|
783
|
-
|
|
784
|
-
team_ctx_mcp_servers = None
|
|
785
|
-
if team and hasattr(team, 'team_path'):
|
|
786
|
-
team_ctx = _load_team_context(team.team_path)
|
|
787
|
-
team_ctx_mcp_servers = team_ctx.get('mcp_servers', [])
|
|
788
|
-
|
|
789
|
-
if npc and isinstance(npc, NPC):
|
|
790
|
-
if not npc.model and team_ctx.get('model'):
|
|
791
|
-
npc.model = team_ctx['model']
|
|
792
|
-
if not npc.provider and team_ctx.get('provider'):
|
|
793
|
-
npc.provider = team_ctx['provider']
|
|
794
|
-
|
|
795
|
-
if not state.chat_model and team_ctx.get('model'):
|
|
796
|
-
state.chat_model = team_ctx['model']
|
|
797
|
-
if not state.chat_provider and team_ctx.get('provider'):
|
|
798
|
-
state.chat_provider = team_ctx['provider']
|
|
799
|
-
|
|
800
|
-
auto_copy_bypass = os.getenv("NPCSH_CORCA_AUTO_COPY_MCP_SERVER", "false").lower() == "true"
|
|
801
|
-
|
|
802
|
-
resolved_server_path = _resolve_and_copy_mcp_server_path(
|
|
803
|
-
explicit_path=mcp_server_path_from_request,
|
|
804
|
-
current_path=current_path,
|
|
805
|
-
team_ctx_mcp_servers=team_ctx_mcp_servers,
|
|
806
|
-
interactive=False,
|
|
807
|
-
auto_copy_bypass=auto_copy_bypass,
|
|
808
|
-
force_global=False
|
|
809
|
-
)
|
|
810
9
|
|
|
811
|
-
|
|
812
|
-
if resolved_server_path:
|
|
813
|
-
try:
|
|
814
|
-
client_instance = MCPClientNPC()
|
|
815
|
-
if client_instance.connect_sync(resolved_server_path):
|
|
816
|
-
state.mcp_client = client_instance
|
|
817
|
-
print(f"Successfully connected MCP client for {conversation_id} to {resolved_server_path}")
|
|
818
|
-
else:
|
|
819
|
-
print(f"Failed to connect MCP client for {conversation_id} to {resolved_server_path}. Tools will be unavailable.")
|
|
820
|
-
except ImportError:
|
|
821
|
-
print("WARNING: npcsh.corca or MCPClientNPC not found. Cannot initialize MCP client.", file=sys.stderr)
|
|
822
|
-
except FileNotFoundError as e:
|
|
823
|
-
print(f"MCP Client Error: {e}")
|
|
824
|
-
except ValueError as e:
|
|
825
|
-
print(f"MCP Client Error: {e}")
|
|
826
|
-
except Exception as e:
|
|
827
|
-
print(f"An unexpected error occurred during MCP client initialization: {e}")
|
|
828
|
-
traceback.print_exc()
|
|
829
|
-
|
|
830
|
-
return state
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
def process_corca_result(
|
|
834
|
-
user_input: str,
|
|
835
|
-
result_state: ShellState,
|
|
836
|
-
output: Any,
|
|
837
|
-
command_history: CommandHistory,
|
|
838
|
-
):
|
|
839
|
-
from npcpy.llm_funcs import get_facts
|
|
840
|
-
from npcpy.memory.memory_processor import memory_approval_ui
|
|
841
|
-
from npcsh._state import format_memory_context
|
|
842
|
-
|
|
843
|
-
team_name = result_state.team.name if result_state.team else "__none__"
|
|
844
|
-
npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "__none__"
|
|
845
|
-
|
|
846
|
-
active_npc = result_state.npc if isinstance(result_state.npc, NPC) else NPC(
|
|
847
|
-
name="default",
|
|
848
|
-
model=result_state.chat_model,
|
|
849
|
-
provider=result_state.chat_provider,
|
|
850
|
-
db_conn=command_history.engine
|
|
851
|
-
)
|
|
852
|
-
|
|
853
|
-
save_conversation_message(
|
|
854
|
-
command_history,
|
|
855
|
-
result_state.conversation_id,
|
|
856
|
-
"user",
|
|
857
|
-
user_input,
|
|
858
|
-
wd=result_state.current_path,
|
|
859
|
-
model=active_npc.model,
|
|
860
|
-
provider=active_npc.provider,
|
|
861
|
-
npc=npc_name,
|
|
862
|
-
team=team_name,
|
|
863
|
-
attachments=result_state.attachments,
|
|
864
|
-
)
|
|
865
|
-
result_state.attachments = None
|
|
866
|
-
|
|
867
|
-
output_content = output.get('output') if isinstance(output, dict) else output
|
|
868
|
-
tool_calls = output.get('tool_calls', []) if isinstance(output, dict) else []
|
|
869
|
-
final_output_str = None
|
|
870
|
-
|
|
871
|
-
if tool_calls and hasattr(result_state, 'mcp_client') and result_state.mcp_client:
|
|
872
|
-
final_output_str, result_state.messages, tools_interrupted = execute_mcp_tool_calls(
|
|
873
|
-
tool_calls,
|
|
874
|
-
result_state.mcp_client,
|
|
875
|
-
result_state.messages,
|
|
876
|
-
result_state.npc,
|
|
877
|
-
result_state.stream_output
|
|
878
|
-
)
|
|
879
|
-
if tools_interrupted:
|
|
880
|
-
print(colored("\n⚠️ Tool execution interrupted", "yellow"))
|
|
881
|
-
else:
|
|
882
|
-
print('\n')
|
|
883
|
-
if result_state.stream_output:
|
|
884
|
-
final_output_str = print_and_process_stream_with_markdown(
|
|
885
|
-
output_content,
|
|
886
|
-
result_state.npc.model,
|
|
887
|
-
result_state.npc.provider,
|
|
888
|
-
show=True
|
|
889
|
-
)
|
|
890
|
-
else:
|
|
891
|
-
final_output_str = str(output_content)
|
|
892
|
-
render_markdown(final_output_str)
|
|
893
|
-
|
|
894
|
-
if final_output_str:
|
|
895
|
-
if not result_state.messages or result_state.messages[-1].get("role") != "assistant" or result_state.messages[-1].get("content") != final_output_str:
|
|
896
|
-
result_state.messages.append({"role": "assistant", "content": final_output_str})
|
|
897
|
-
|
|
898
|
-
save_conversation_message(
|
|
899
|
-
command_history,
|
|
900
|
-
result_state.conversation_id,
|
|
901
|
-
"assistant",
|
|
902
|
-
final_output_str,
|
|
903
|
-
wd=result_state.current_path,
|
|
904
|
-
model=active_npc.model,
|
|
905
|
-
provider=active_npc.provider,
|
|
906
|
-
npc=npc_name,
|
|
907
|
-
team=team_name,
|
|
908
|
-
)
|
|
909
|
-
|
|
910
|
-
result_state.turn_count += 1
|
|
911
|
-
|
|
912
|
-
if result_state.turn_count > 0 and result_state.turn_count % 10 == 0:
|
|
913
|
-
conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
|
|
914
|
-
engine = command_history.engine
|
|
915
|
-
|
|
916
|
-
memory_examples = command_history.get_memory_examples_for_context(
|
|
917
|
-
npc=npc_name,
|
|
918
|
-
team=team_name,
|
|
919
|
-
directory_path=result_state.current_path
|
|
920
|
-
)
|
|
921
|
-
|
|
922
|
-
memory_context = format_memory_context(memory_examples)
|
|
923
|
-
|
|
924
|
-
approved_facts = []
|
|
925
|
-
try:
|
|
926
|
-
facts = get_facts(
|
|
927
|
-
conversation_turn_text,
|
|
928
|
-
model=active_npc.model,
|
|
929
|
-
provider=active_npc.provider,
|
|
930
|
-
npc=active_npc,
|
|
931
|
-
context=memory_context
|
|
932
|
-
)
|
|
933
|
-
|
|
934
|
-
if facts:
|
|
935
|
-
memories_for_approval = []
|
|
936
|
-
for i, fact in enumerate(facts):
|
|
937
|
-
memories_for_approval.append({
|
|
938
|
-
"memory_id": f"temp_{i}",
|
|
939
|
-
"content": fact['statement'],
|
|
940
|
-
"context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
|
|
941
|
-
"npc": npc_name,
|
|
942
|
-
"fact_data": fact
|
|
943
|
-
})
|
|
944
|
-
|
|
945
|
-
approvals = memory_approval_ui(memories_for_approval)
|
|
946
|
-
|
|
947
|
-
for approval in approvals:
|
|
948
|
-
fact_data = next(m['fact_data'] for m in memories_for_approval
|
|
949
|
-
if m['memory_id'] == approval['memory_id'])
|
|
950
|
-
|
|
951
|
-
command_history.add_memory_to_database(
|
|
952
|
-
message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
|
|
953
|
-
conversation_id=result_state.conversation_id,
|
|
954
|
-
npc=npc_name,
|
|
955
|
-
team=team_name,
|
|
956
|
-
directory_path=result_state.current_path,
|
|
957
|
-
initial_memory=fact_data['statement'],
|
|
958
|
-
status=approval['decision'],
|
|
959
|
-
model=active_npc.model,
|
|
960
|
-
provider=active_npc.provider,
|
|
961
|
-
final_memory=approval.get('final_memory')
|
|
962
|
-
)
|
|
963
|
-
|
|
964
|
-
if approval['decision'] in ['human-approved', 'human-edited']:
|
|
965
|
-
approved_fact = {
|
|
966
|
-
'statement': approval.get('final_memory') or fact_data['statement'],
|
|
967
|
-
'source_text': fact_data.get('source_text', ''),
|
|
968
|
-
'type': fact_data.get('type', 'explicit'),
|
|
969
|
-
'generation': 0
|
|
970
|
-
}
|
|
971
|
-
approved_facts.append(approved_fact)
|
|
972
|
-
|
|
973
|
-
except Exception as e:
|
|
974
|
-
print(colored(f"Memory generation error: {e}", "yellow"))
|
|
975
|
-
|
|
976
|
-
if result_state.build_kg and approved_facts:
|
|
977
|
-
try:
|
|
978
|
-
if not should_skip_kg_processing(user_input, final_output_str):
|
|
979
|
-
npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
|
|
980
|
-
evolved_npc_kg, _ = kg_evolve_incremental(
|
|
981
|
-
existing_kg=npc_kg,
|
|
982
|
-
new_facts=approved_facts,
|
|
983
|
-
model=active_npc.model,
|
|
984
|
-
provider=active_npc.provider,
|
|
985
|
-
npc=active_npc,
|
|
986
|
-
get_concepts=True,
|
|
987
|
-
link_concepts_facts=False,
|
|
988
|
-
link_concepts_concepts=False,
|
|
989
|
-
link_facts_facts=False,
|
|
990
|
-
)
|
|
991
|
-
save_kg_to_db(
|
|
992
|
-
engine,
|
|
993
|
-
evolved_npc_kg,
|
|
994
|
-
team_name,
|
|
995
|
-
npc_name,
|
|
996
|
-
result_state.current_path
|
|
997
|
-
)
|
|
998
|
-
except Exception as e:
|
|
999
|
-
print(colored(f"Error during real-time KG evolution: {e}", "red"))
|
|
1000
|
-
|
|
1001
|
-
print(colored("\nChecking for potential team improvements...", "cyan"))
|
|
1002
|
-
try:
|
|
1003
|
-
summary = breathe(messages=result_state.messages[-20:],
|
|
1004
|
-
npc=active_npc)
|
|
1005
|
-
characterization = summary.get('output')
|
|
1006
|
-
|
|
1007
|
-
if characterization and result_state.team:
|
|
1008
|
-
team_ctx_path = get_team_ctx_path(result_state.team.team_path)
|
|
1009
|
-
if not team_ctx_path:
|
|
1010
|
-
team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
|
|
1011
|
-
|
|
1012
|
-
ctx_data = _load_team_context(result_state.team.team_path)
|
|
1013
|
-
current_context = ctx_data.get('context', '')
|
|
1014
|
-
|
|
1015
|
-
prompt = f"""Based on this characterization: {characterization},
|
|
1016
|
-
|
|
1017
|
-
suggest changes (additions, deletions, edits) to the team's context.
|
|
1018
|
-
Additions need not be fully formed sentences and can simply be equations, relationships, or other plain clear items.
|
|
1019
|
-
|
|
1020
|
-
Current Context: "{current_context}".
|
|
1021
|
-
|
|
1022
|
-
Respond with JSON: """ + """
|
|
1023
|
-
{
|
|
1024
|
-
"suggestion": "Your sentence."
|
|
1025
|
-
}
|
|
1026
|
-
"""
|
|
1027
|
-
response = get_llm_response(prompt,
|
|
1028
|
-
npc=active_npc,
|
|
1029
|
-
format="json",
|
|
1030
|
-
team=result_state.team)
|
|
1031
|
-
suggestion = response.get("response", {}).get("suggestion")
|
|
1032
|
-
|
|
1033
|
-
if suggestion:
|
|
1034
|
-
new_context = (current_context + " " + suggestion).strip()
|
|
1035
|
-
print(colored(f"{result_state.npc.name} suggests updating team context:", "yellow"))
|
|
1036
|
-
print(f" - OLD: {current_context}\n + NEW: {new_context}")
|
|
1037
|
-
|
|
1038
|
-
choice = input("Apply? [y/N/e(dit)]: ").strip().lower()
|
|
1039
|
-
|
|
1040
|
-
if choice == 'y':
|
|
1041
|
-
ctx_data['context'] = new_context
|
|
1042
|
-
with open(team_ctx_path, 'w') as f:
|
|
1043
|
-
yaml.dump(ctx_data, f)
|
|
1044
|
-
print(colored("Team context updated.", "green"))
|
|
1045
|
-
elif choice == 'e':
|
|
1046
|
-
edited_context = input(f"Edit context [{new_context}]: ").strip()
|
|
1047
|
-
if edited_context:
|
|
1048
|
-
ctx_data['context'] = edited_context
|
|
1049
|
-
else:
|
|
1050
|
-
ctx_data['context'] = new_context
|
|
1051
|
-
with open(team_ctx_path, 'w') as f:
|
|
1052
|
-
yaml.dump(ctx_data, f)
|
|
1053
|
-
print(colored("Team context updated with edits.", "green"))
|
|
1054
|
-
else:
|
|
1055
|
-
print("Suggestion declined.")
|
|
1056
|
-
except Exception as e:
|
|
1057
|
-
import traceback
|
|
1058
|
-
print(colored(f"Could not generate team suggestions: {e}", "yellow"))
|
|
1059
|
-
traceback.print_exc()
|
|
1060
|
-
|
|
1061
|
-
def _read_npcsh_global_env() -> Dict[str, str]:
|
|
1062
|
-
global_env_file = Path(".npcsh_global")
|
|
1063
|
-
env_vars = {}
|
|
1064
|
-
if global_env_file.exists():
|
|
1065
|
-
try:
|
|
1066
|
-
with open(global_env_file, 'r') as f:
|
|
1067
|
-
for line in f:
|
|
1068
|
-
line = line.strip()
|
|
1069
|
-
if line and '=' in line and not line.startswith('#'):
|
|
1070
|
-
key, value = line.split('=', 1)
|
|
1071
|
-
env_vars[key.strip()] = value.strip()
|
|
1072
|
-
except Exception as e:
|
|
1073
|
-
print(f"Warning: Could not read .npcsh_global: {e}")
|
|
1074
|
-
return env_vars
|
|
1075
|
-
|
|
1076
|
-
def _load_team_context(team_path: str) -> Dict[str, Any]:
|
|
1077
|
-
"""Load team context from any .ctx file in the team directory"""
|
|
1078
|
-
ctx_path = get_team_ctx_path(team_path)
|
|
1079
|
-
if not ctx_path or not os.path.exists(ctx_path):
|
|
1080
|
-
return {}
|
|
1081
|
-
|
|
1082
|
-
try:
|
|
1083
|
-
with open(ctx_path, 'r') as f:
|
|
1084
|
-
return yaml.safe_load(f) or {}
|
|
1085
|
-
except Exception as e:
|
|
1086
|
-
print(f"Warning: Could not load team context from {ctx_path}: {e}")
|
|
1087
|
-
return {}
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
def _write_to_npcsh_global(key: str, value: str) -> None:
|
|
1091
|
-
global_env_file = Path(".npcsh_global")
|
|
1092
|
-
env_vars = _read_npcsh_global_env()
|
|
1093
|
-
env_vars[key] = value
|
|
1094
|
-
|
|
1095
|
-
try:
|
|
1096
|
-
with open(global_env_file, 'w') as f:
|
|
1097
|
-
for k, v in env_vars.items():
|
|
1098
|
-
f.write(f"{k}={v}\n")
|
|
1099
|
-
except Exception as e:
|
|
1100
|
-
print(f"Warning: Could not write to .npcsh_global: {e}")
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
def _resolve_and_copy_mcp_server_path(
|
|
1104
|
-
explicit_path: Optional[str],
|
|
1105
|
-
current_path: Optional[str],
|
|
1106
|
-
team_ctx_mcp_servers: Optional[List[Dict[str, str]]],
|
|
1107
|
-
interactive: bool = False,
|
|
1108
|
-
auto_copy_bypass: bool = False,
|
|
1109
|
-
force_global: bool = False
|
|
1110
|
-
) -> Optional[str]:
|
|
1111
|
-
default_mcp_server_name = "mcp_server.py"
|
|
1112
|
-
npcsh_default_template_path = Path(__file__).parent / default_mcp_server_name
|
|
1113
|
-
|
|
1114
|
-
global_env = _read_npcsh_global_env()
|
|
1115
|
-
prefer_global = global_env.get("NPCSH_PREFER_GLOBAL_MCP_SERVER", "false").lower() == "true"
|
|
1116
|
-
|
|
1117
|
-
def _copy_template_if_missing(destination_dir: Path, description: str) -> Optional[Path]:
|
|
1118
|
-
destination_file = destination_dir / default_mcp_server_name
|
|
1119
|
-
if not npcsh_default_template_path.exists():
|
|
1120
|
-
cprint(f"Error: Default {default_mcp_server_name} template not found at {npcsh_default_template_path}", "red")
|
|
1121
|
-
return None
|
|
1122
|
-
|
|
1123
|
-
if not destination_file.exists():
|
|
1124
|
-
if auto_copy_bypass or not interactive:
|
|
1125
|
-
destination_dir.mkdir(parents=True, exist_ok=True)
|
|
1126
|
-
shutil.copy(npcsh_default_template_path, destination_file)
|
|
1127
|
-
print(colored(f"Automatically copied default {default_mcp_server_name} to {destination_file}", "green"))
|
|
1128
|
-
return destination_file
|
|
1129
|
-
else:
|
|
1130
|
-
choice = input(colored(f"No {default_mcp_server_name} found in {description}. Copy default template to {destination_file}? (y/N/g for global): ", "yellow")).strip().lower()
|
|
1131
|
-
if choice == 'y':
|
|
1132
|
-
destination_dir.mkdir(parents=True, exist_ok=True)
|
|
1133
|
-
shutil.copy(npcsh_default_template_path, destination_file)
|
|
1134
|
-
print(colored(f"Copied default {default_mcp_server_name} to {destination_file}", "green"))
|
|
1135
|
-
return destination_file
|
|
1136
|
-
elif choice == 'g':
|
|
1137
|
-
_write_to_npcsh_global("NPCSH_PREFER_GLOBAL_MCP_SERVER", "true")
|
|
1138
|
-
print(colored("Set preference to use global MCP server.", "green"))
|
|
1139
|
-
return None
|
|
1140
|
-
else:
|
|
1141
|
-
print(colored("Skipping copy.", "yellow"))
|
|
1142
|
-
return None
|
|
1143
|
-
return destination_file
|
|
1144
|
-
|
|
1145
|
-
if explicit_path:
|
|
1146
|
-
abs_explicit_path = Path(explicit_path).expanduser().resolve()
|
|
1147
|
-
if abs_explicit_path.exists():
|
|
1148
|
-
print(f"Using explicit MCP server path: {abs_explicit_path}")
|
|
1149
|
-
return str(abs_explicit_path)
|
|
1150
|
-
else:
|
|
1151
|
-
cprint(f"Warning: Explicit MCP server path not found: {abs_explicit_path}", "yellow")
|
|
1152
|
-
|
|
1153
|
-
if team_ctx_mcp_servers:
|
|
1154
|
-
for server_entry in team_ctx_mcp_servers:
|
|
1155
|
-
server_path_from_ctx = server_entry.get("value")
|
|
1156
|
-
if server_path_from_ctx:
|
|
1157
|
-
abs_ctx_path = Path(server_path_from_ctx).expanduser().resolve()
|
|
1158
|
-
if abs_ctx_path.exists():
|
|
1159
|
-
print(f"Using MCP server path from team context: {abs_ctx_path}")
|
|
1160
|
-
return str(abs_ctx_path)
|
|
1161
|
-
else:
|
|
1162
|
-
cprint(f"Warning: MCP server path from team context not found: {abs_ctx_path}", "yellow")
|
|
1163
|
-
|
|
1164
|
-
if not (force_global or prefer_global):
|
|
1165
|
-
if current_path:
|
|
1166
|
-
project_npc_team_dir = Path(current_path).resolve() / "npc_team"
|
|
1167
|
-
project_mcp_server_file = project_npc_team_dir / default_mcp_server_name
|
|
1168
|
-
|
|
1169
|
-
if project_mcp_server_file.exists():
|
|
1170
|
-
print(f"Using project-specific MCP server path: {project_mcp_server_file}")
|
|
1171
|
-
return str(project_mcp_server_file)
|
|
1172
|
-
else:
|
|
1173
|
-
copied_path = _copy_template_if_missing(project_npc_team_dir, "project's npc_team directory")
|
|
1174
|
-
if copied_path:
|
|
1175
|
-
return str(copied_path)
|
|
1176
|
-
|
|
1177
|
-
global_npc_team_dir = Path.home() / ".npcsh" / "npc_team"
|
|
1178
|
-
global_mcp_server_file = global_npc_team_dir / default_mcp_server_name
|
|
1179
|
-
|
|
1180
|
-
if global_mcp_server_file.exists():
|
|
1181
|
-
print(f"Using global MCP server path: {global_mcp_server_file}")
|
|
1182
|
-
return str(global_mcp_server_file)
|
|
1183
|
-
else:
|
|
1184
|
-
copied_path = _copy_template_if_missing(global_npc_team_dir, "global npc_team directory")
|
|
1185
|
-
if copied_path:
|
|
1186
|
-
return str(copied_path)
|
|
1187
|
-
|
|
1188
|
-
cprint("No MCP server script found in any expected location.", "yellow")
|
|
1189
|
-
return None
|
|
1190
|
-
def create_corca_state_and_mcp_client(conversation_id,
|
|
1191
|
-
command_history,
|
|
1192
|
-
npc=None,
|
|
1193
|
-
team=None,
|
|
1194
|
-
current_path=None,
|
|
1195
|
-
mcp_server_path: Optional[str] = None):
|
|
1196
|
-
from npcsh._state import ShellState
|
|
1197
|
-
|
|
1198
|
-
state = ShellState(
|
|
1199
|
-
conversation_id=conversation_id,
|
|
1200
|
-
stream_output=True,
|
|
1201
|
-
current_mode="corca",
|
|
1202
|
-
chat_model=os.environ.get("NPCSH_CHAT_MODEL", "gemma3:4b"),
|
|
1203
|
-
chat_provider=os.environ.get("NPCSH_CHAT_PROVIDER", "ollama"),
|
|
1204
|
-
current_path=current_path or os.getcwd(),
|
|
1205
|
-
npc=npc,
|
|
1206
|
-
team=team
|
|
1207
|
-
)
|
|
1208
|
-
state.command_history = command_history
|
|
1209
|
-
|
|
1210
|
-
auto_copy_bypass = os.getenv("NPCSH_CORCA_AUTO_COPY_MCP_SERVER", "false").lower() == "true"
|
|
1211
|
-
|
|
1212
|
-
resolved_server_path = _resolve_and_copy_mcp_server_path(
|
|
1213
|
-
explicit_path=mcp_server_path,
|
|
1214
|
-
current_path=current_path,
|
|
1215
|
-
team_ctx_mcp_servers=team.team_ctx.get('mcp_servers', []) if team and hasattr(team, 'team_ctx') else None,
|
|
1216
|
-
interactive=False,
|
|
1217
|
-
auto_copy_bypass=auto_copy_bypass,
|
|
1218
|
-
force_global=False
|
|
1219
|
-
)
|
|
1220
|
-
|
|
1221
|
-
state.mcp_client = None
|
|
1222
|
-
if resolved_server_path:
|
|
1223
|
-
try:
|
|
1224
|
-
client_instance = MCPClientNPC()
|
|
1225
|
-
if client_instance.connect_sync(resolved_server_path):
|
|
1226
|
-
state.mcp_client = client_instance
|
|
1227
|
-
print(f"Successfully connected MCP client for {conversation_id} to {resolved_server_path}")
|
|
1228
|
-
else:
|
|
1229
|
-
print(f"Failed to connect MCP client for {conversation_id} to {resolved_server_path}. Tools will be unavailable.")
|
|
1230
|
-
except ImportError:
|
|
1231
|
-
print("WARNING: npcsh.corca or MCPClientNPC not found. Cannot initialize MCP client.", file=sys.stderr)
|
|
1232
|
-
except FileNotFoundError as e:
|
|
1233
|
-
print(f"MCP Client Error: {e}")
|
|
1234
|
-
except ValueError as e:
|
|
1235
|
-
print(f"MCP Client Error: {e}")
|
|
1236
|
-
except Exception as e:
|
|
1237
|
-
print(f"An unexpected error occurred during MCP client initialization: {e}")
|
|
1238
|
-
traceback.print_exc()
|
|
1239
|
-
|
|
1240
|
-
return state
|
|
1241
|
-
|
|
1242
|
-
def corca_session(
|
|
1243
|
-
command_history: CommandHistory,
|
|
1244
|
-
state: Optional[ShellState] = None,
|
|
1245
|
-
mcp_server_path: Optional[str] = None,
|
|
1246
|
-
force_global: bool = False,
|
|
1247
|
-
initial_command: Optional[str] = None
|
|
1248
|
-
) -> Dict[str, Any]:
|
|
1249
|
-
"""
|
|
1250
|
-
Clean programmatic entry to Corca mode.
|
|
1251
|
-
|
|
1252
|
-
Args:
|
|
1253
|
-
command_history: CommandHistory instance
|
|
1254
|
-
state: Optional existing ShellState, will create if None
|
|
1255
|
-
mcp_server_path: Optional explicit path to MCP server
|
|
1256
|
-
force_global: Force use of global MCP server
|
|
1257
|
-
initial_command: Optional command to execute before entering loop
|
|
1258
|
-
|
|
1259
|
-
Returns:
|
|
1260
|
-
Dict with 'output' and 'messages' keys
|
|
1261
|
-
"""
|
|
1262
|
-
# Setup state if not provided
|
|
1263
|
-
if state is None:
|
|
1264
|
-
_, team, default_npc = setup_shell()
|
|
1265
|
-
|
|
1266
|
-
# Load corca.npc if available
|
|
1267
|
-
project_corca_path = os.path.join('./npc_team/', "corca.npc")
|
|
1268
|
-
global_corca_path = os.path.expanduser('~/.npcsh/npc_team/corca.npc')
|
|
1269
|
-
|
|
1270
|
-
if os.path.exists(project_corca_path):
|
|
1271
|
-
default_npc = NPC(file=project_corca_path, db_conn=command_history.engine)
|
|
1272
|
-
elif os.path.exists(global_corca_path):
|
|
1273
|
-
default_npc = NPC(file=global_corca_path, db_conn=command_history.engine)
|
|
1274
|
-
|
|
1275
|
-
# Set defaults
|
|
1276
|
-
if default_npc.model is None:
|
|
1277
|
-
default_npc.model = team.model or NPCSH_CHAT_MODEL
|
|
1278
|
-
if default_npc.provider is None:
|
|
1279
|
-
default_npc.provider = team.provider or NPCSH_CHAT_PROVIDER
|
|
1280
|
-
|
|
1281
|
-
from npcsh._state import initial_state
|
|
1282
|
-
state = initial_state
|
|
1283
|
-
state.team = team
|
|
1284
|
-
state.npc = default_npc
|
|
1285
|
-
state.command_history = command_history
|
|
1286
|
-
|
|
1287
|
-
print_corca_welcome_message()
|
|
1288
|
-
try:
|
|
1289
|
-
readline.set_completer(make_completer(state, router))
|
|
1290
|
-
except Exception:
|
|
1291
|
-
pass
|
|
1292
|
-
|
|
1293
|
-
display_model = state.npc.model if state.npc and state.npc.model else state.chat_model
|
|
1294
|
-
display_provider = state.npc.provider if state.npc and state.npc.provider else state.chat_provider
|
|
1295
|
-
print(f"Current LLM model: {display_model} ({display_provider})")
|
|
1296
|
-
|
|
1297
|
-
# Resolve MCP server path
|
|
1298
|
-
auto_copy_bypass = os.getenv("NPCSH_CORCA_AUTO_COPY_MCP_SERVER", "false").lower() == "true"
|
|
1299
|
-
|
|
1300
|
-
resolved_server_path = _resolve_and_copy_mcp_server_path(
|
|
1301
|
-
explicit_path=mcp_server_path,
|
|
1302
|
-
current_path=state.current_path,
|
|
1303
|
-
team_ctx_mcp_servers=state.team.team_ctx.get('mcp_servers', []) if state.team and hasattr(state.team, 'team_ctx') else None,
|
|
1304
|
-
interactive=True,
|
|
1305
|
-
auto_copy_bypass=auto_copy_bypass,
|
|
1306
|
-
force_global=force_global
|
|
1307
|
-
)
|
|
1308
|
-
|
|
1309
|
-
# Connect to MCP server
|
|
1310
|
-
if resolved_server_path:
|
|
1311
|
-
try:
|
|
1312
|
-
mcp_client = MCPClientNPC()
|
|
1313
|
-
if mcp_client.connect_sync(resolved_server_path):
|
|
1314
|
-
state.mcp_client = mcp_client
|
|
1315
|
-
else:
|
|
1316
|
-
cprint(f"Failed to connect to MCP server. Limited functionality.", "yellow")
|
|
1317
|
-
state.mcp_client = None
|
|
1318
|
-
except Exception as e:
|
|
1319
|
-
cprint(f"Error connecting to MCP server: {e}", "red")
|
|
1320
|
-
traceback.print_exc()
|
|
1321
|
-
state.mcp_client = None
|
|
1322
|
-
else:
|
|
1323
|
-
cprint("No MCP server path found. Limited functionality.", "yellow")
|
|
1324
|
-
state.mcp_client = None
|
|
1325
|
-
|
|
1326
|
-
# Execute initial command if provided
|
|
1327
|
-
if initial_command:
|
|
1328
|
-
try:
|
|
1329
|
-
state, output = execute_command_corca(initial_command, state, command_history)
|
|
1330
|
-
if not (isinstance(output, dict) and output.get('interrupted')):
|
|
1331
|
-
process_corca_result(initial_command, state, output, command_history)
|
|
1332
|
-
except Exception as e:
|
|
1333
|
-
print(colored(f'Error executing initial command: {e}', "red"))
|
|
1334
|
-
|
|
1335
|
-
# Main loop
|
|
1336
|
-
while True:
|
|
1337
|
-
try:
|
|
1338
|
-
prompt_npc_name = state.npc.name if state.npc else "npc"
|
|
1339
|
-
current_model = state.npc.model if state.npc and state.npc.model else state.chat_model
|
|
1340
|
-
current_provider = state.npc.provider if state.npc and state.npc.provider else state.chat_provider
|
|
1341
|
-
model_segment = f"{current_model}@{current_provider}"
|
|
1342
|
-
prompt_str = f"{colored(os.path.basename(state.current_path), 'blue')}:{prompt_npc_name}:{model_segment}🦌> "
|
|
1343
|
-
prompt = readline_safe_prompt(prompt_str)
|
|
1344
|
-
user_input = get_multiline_input(prompt).strip()
|
|
1345
|
-
|
|
1346
|
-
if user_input.startswith('/'):
|
|
1347
|
-
state, slash_result = execute_slash_command(
|
|
1348
|
-
user_input,
|
|
1349
|
-
None,
|
|
1350
|
-
state,
|
|
1351
|
-
state.stream_output,
|
|
1352
|
-
router
|
|
1353
|
-
)
|
|
1354
|
-
process_result(user_input, state, slash_result, command_history)
|
|
1355
|
-
continue
|
|
1356
|
-
|
|
1357
|
-
if user_input.lower() in ["exit", "quit", "done"]:
|
|
1358
|
-
break
|
|
1359
|
-
|
|
1360
|
-
if not user_input:
|
|
1361
|
-
continue
|
|
1362
|
-
|
|
1363
|
-
try:
|
|
1364
|
-
state, output = execute_command_corca(user_input, state, command_history)
|
|
1365
|
-
|
|
1366
|
-
if isinstance(output, dict) and output.get('interrupted'):
|
|
1367
|
-
print(colored("\n⚠️ Command interrupted. MCP session maintained.", "yellow"))
|
|
1368
|
-
continue
|
|
1369
|
-
|
|
1370
|
-
process_corca_result(user_input, state, output, command_history)
|
|
1371
|
-
except KeyboardInterrupt:
|
|
1372
|
-
print(colored("\n⚠️ Interrupted. Type 'exit' to quit Corca mode.", "yellow"))
|
|
1373
|
-
continue
|
|
1374
|
-
except Exception as e:
|
|
1375
|
-
print(colored(f'An Exception has occurred: {e}', "red"))
|
|
1376
|
-
traceback.print_exc()
|
|
1377
|
-
|
|
1378
|
-
except KeyboardInterrupt:
|
|
1379
|
-
print(colored("\n⚠️ Interrupted. Type 'exit' to quit Corca mode.", "yellow"))
|
|
1380
|
-
continue
|
|
1381
|
-
except EOFError:
|
|
1382
|
-
print("\nExiting Corca Mode.")
|
|
1383
|
-
break
|
|
1384
|
-
|
|
1385
|
-
# Cleanup
|
|
1386
|
-
if state.mcp_client:
|
|
1387
|
-
state.mcp_client.disconnect_sync()
|
|
1388
|
-
state.mcp_client = None
|
|
1389
|
-
|
|
1390
|
-
render_markdown("\n# Exiting Corca Mode")
|
|
1391
|
-
return {"output": "", "messages": state.messages}
|
|
1392
|
-
def enter_corca_mode(command: str, **kwargs):
|
|
1393
|
-
"""Legacy wrapper for command-line entry"""
|
|
1394
|
-
state: ShellState = kwargs.get('shell_state')
|
|
1395
|
-
command_history: CommandHistory = kwargs.get('command_history')
|
|
1396
|
-
|
|
1397
|
-
if not state or not command_history:
|
|
1398
|
-
return {"output": "Error: Corca mode requires shell state and history.", "messages": kwargs.get('messages', [])}
|
|
1399
|
-
|
|
1400
|
-
# Parse command arguments
|
|
1401
|
-
all_command_parts = shlex.split(command)
|
|
1402
|
-
parser = argparse.ArgumentParser(prog="/corca", description="Enter Corca MCP-powered mode.")
|
|
1403
|
-
parser.add_argument("--mcp-server-path", type=str, help="Path to an MCP server script.")
|
|
1404
|
-
parser.add_argument("-g", "--global", dest="force_global", action="store_true", help="Force use of global MCP server.")
|
|
1405
|
-
|
|
1406
|
-
try:
|
|
1407
|
-
known_args, remaining_args = parser.parse_known_args(all_command_parts[1:])
|
|
1408
|
-
except SystemExit:
|
|
1409
|
-
return {"output": "Invalid arguments for /corca. See /help corca.", "messages": state.messages}
|
|
1410
|
-
|
|
1411
|
-
# Get initial command from remaining args
|
|
1412
|
-
initial_command = " ".join(remaining_args) if remaining_args else None
|
|
1413
|
-
|
|
1414
|
-
# Call the clean entry point
|
|
1415
|
-
return corca_session(
|
|
1416
|
-
command_history=command_history,
|
|
1417
|
-
state=state,
|
|
1418
|
-
mcp_server_path=known_args.mcp_server_path,
|
|
1419
|
-
force_global=known_args.force_global,
|
|
1420
|
-
initial_command=initial_command
|
|
1421
|
-
)
|
|
10
|
+
from npcsh._state import setup_shell
|
|
1422
11
|
|
|
1423
12
|
|
|
1424
13
|
def main():
|
|
1425
|
-
parser = argparse.ArgumentParser(description="
|
|
1426
|
-
parser.add_argument("
|
|
1427
|
-
parser.add_argument("
|
|
14
|
+
parser = argparse.ArgumentParser(description="corca - MCP-powered agentic shell")
|
|
15
|
+
parser.add_argument("command", nargs="*", help="Optional one-shot command to execute")
|
|
16
|
+
parser.add_argument("--model", "-m", type=str, help="LLM model to use")
|
|
17
|
+
parser.add_argument("--provider", "-p", type=str, help="LLM provider to use")
|
|
18
|
+
parser.add_argument("--mcp-server", type=str, help="Path to MCP server script")
|
|
1428
19
|
args = parser.parse_args()
|
|
1429
20
|
|
|
21
|
+
# Setup shell to get team and default NPC
|
|
1430
22
|
command_history, team, default_npc = setup_shell()
|
|
1431
|
-
|
|
1432
|
-
project_team_path = os.path.abspath('./npc_team/')
|
|
1433
|
-
global_team_path = os.path.expanduser('~/.npcsh/npc_team/')
|
|
1434
|
-
|
|
1435
|
-
project_corca_path = os.path.join(project_team_path, "corca.npc")
|
|
1436
|
-
global_corca_path = os.path.join(global_team_path, "corca.npc")
|
|
1437
|
-
|
|
1438
|
-
if os.path.exists(project_corca_path):
|
|
1439
|
-
default_npc = NPC(file=project_corca_path,
|
|
1440
|
-
db_conn=command_history.engine)
|
|
1441
|
-
elif os.path.exists(global_corca_path):
|
|
1442
|
-
default_npc = NPC(file=global_corca_path,
|
|
1443
|
-
db_conn=command_history.engine)
|
|
1444
23
|
|
|
1445
|
-
if
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
24
|
+
if not team or "corca" not in team.jinxs_dict:
|
|
25
|
+
print("Error: corca jinx not found. Ensure npc_team/jinxs/modes/corca.jinx exists.")
|
|
26
|
+
sys.exit(1)
|
|
27
|
+
|
|
28
|
+
# Build context for jinx execution
|
|
29
|
+
initial_command = " ".join(args.command) if args.command else None
|
|
30
|
+
|
|
31
|
+
context = {
|
|
32
|
+
"npc": default_npc,
|
|
33
|
+
"team": team,
|
|
34
|
+
"messages": [],
|
|
35
|
+
"model": args.model,
|
|
36
|
+
"provider": args.provider,
|
|
37
|
+
"mcp_server_path": args.mcp_server,
|
|
38
|
+
"initial_command": initial_command,
|
|
39
|
+
}
|
|
1450
40
|
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
41
|
+
# Execute the jinx
|
|
42
|
+
corca_jinx = team.jinxs_dict["corca"]
|
|
43
|
+
result = corca_jinx.execute(context=context, npc=default_npc)
|
|
44
|
+
|
|
45
|
+
if isinstance(result, dict) and result.get("output"):
|
|
46
|
+
print(result["output"])
|
|
1456
47
|
|
|
1457
|
-
from npcsh._state import initial_state
|
|
1458
|
-
initial_shell_state = initial_state
|
|
1459
|
-
initial_shell_state.team = team
|
|
1460
|
-
initial_shell_state.npc = default_npc
|
|
1461
|
-
|
|
1462
|
-
fake_command_str = "/corca"
|
|
1463
|
-
if args.mcp_server_path:
|
|
1464
|
-
fake_command_str = f'/corca --mcp-server-path "{args.mcp_server_path}"'
|
|
1465
|
-
elif args.force_global:
|
|
1466
|
-
fake_command_str = "/corca --global"
|
|
1467
|
-
|
|
1468
|
-
kwargs = {
|
|
1469
|
-
'command': fake_command_str,
|
|
1470
|
-
'shell_state': initial_shell_state,
|
|
1471
|
-
'command_history': command_history
|
|
1472
|
-
}
|
|
1473
|
-
|
|
1474
|
-
enter_corca_mode(**kwargs)
|
|
1475
48
|
|
|
1476
49
|
if __name__ == "__main__":
|
|
1477
50
|
main()
|