npcsh 1.0.16__py3-none-any.whl → 1.0.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcsh/corca.py ADDED
@@ -0,0 +1,709 @@
1
+ import os
2
+ import sys
3
+ import asyncio
4
+ import shlex
5
+ import argparse
6
+ from contextlib import AsyncExitStack
7
+ from typing import Optional, Callable, Dict, Any, Tuple, List
8
+
9
+ try:
10
+ from mcp import ClientSession, StdioServerParameters
11
+ from mcp.client.stdio import stdio_client
12
+ except ImportError:
13
+ print("FATAL: 'mcp-client' package not found. Please run 'pip install mcp-client'.", file=sys.stderr)
14
+ sys.exit(1)
15
+
16
+ from termcolor import colored, cprint
17
+ import json
18
+ from npcpy.llm_funcs import get_llm_response, breathe
19
+ from npcpy.npc_compiler import NPC
20
+ from npcpy.npc_sysenv import render_markdown, print_and_process_stream_with_markdown
21
+ from npcpy.memory.command_history import load_kg_from_db, save_conversation_message, save_kg_to_db
22
+ from npcpy.memory.knowledge_graph import kg_evolve_incremental, kg_dream_process, kg_initial, kg_sleep_process
23
+ from npcsh._state import (
24
+ ShellState,
25
+ CommandHistory,
26
+ execute_command as core_execute_command,
27
+ process_result,
28
+ get_multiline_input,
29
+ readline_safe_prompt,
30
+ setup_shell,
31
+ should_skip_kg_processing,
32
+
33
+ )
34
+ import yaml
35
+
36
+
37
+ class MCPClientNPC:
38
+ def __init__(self, debug: bool = True):
39
+ self.debug = debug
40
+ self.session: Optional[ClientSession] = None
41
+ self._exit_stack = asyncio.new_event_loop().run_until_complete(self._init_stack())
42
+ self.available_tools_llm: List[Dict[str, Any]] = []
43
+ self.tool_map: Dict[str, Callable] = {}
44
+ self.server_script_path: Optional[str] = None
45
+
46
+ async def _init_stack(self):
47
+ return AsyncExitStack()
48
+
49
+ def _log(self, message: str, color: str = "cyan") -> None:
50
+ if self.debug:
51
+ cprint(f"[MCP Client] {message}", color, file=sys.stderr)
52
+
53
+ async def _connect_async(self, server_script_path: str) -> None:
54
+ self._log(f"Attempting to connect to MCP server: {server_script_path}")
55
+ self.server_script_path = server_script_path
56
+ abs_path = os.path.abspath(server_script_path)
57
+ if not os.path.exists(abs_path):
58
+ raise FileNotFoundError(f"MCP server script not found: {abs_path}")
59
+
60
+ if abs_path.endswith('.py'):
61
+ cmd_parts = [sys.executable, abs_path]
62
+ elif os.access(abs_path, os.X_OK):
63
+ cmd_parts = [abs_path]
64
+ else:
65
+ raise ValueError(f"Unsupported MCP server script type or not executable: {abs_path}")
66
+
67
+ server_params = StdioServerParameters(
68
+ command=cmd_parts[0],
69
+ args=['-c', f'import sys; sys.path.pop(0) if sys.path[0] == "{os.path.dirname(abs_path)}" else None; exec(open("{abs_path}").read())'],
70
+ env=os.environ.copy(),
71
+ cwd=os.path.dirname(os.path.dirname(abs_path)) # Run from project root
72
+ )
73
+ if self.session:
74
+ await self._exit_stack.aclose()
75
+
76
+ self._exit_stack = AsyncExitStack()
77
+
78
+ stdio_transport = await self._exit_stack.enter_async_context(stdio_client(server_params))
79
+ self.session = await self._exit_stack.enter_async_context(ClientSession(*stdio_transport))
80
+ await self.session.initialize()
81
+
82
+ response = await self.session.list_tools()
83
+ self.available_tools_llm = []
84
+ self.tool_map = {}
85
+
86
+ if response.tools:
87
+ for mcp_tool in response.tools:
88
+ tool_def = {
89
+ "type": "function",
90
+ "function": {
91
+ "name": mcp_tool.name,
92
+ "description": mcp_tool.description or f"MCP tool: {mcp_tool.name}",
93
+ "parameters": getattr(mcp_tool, "inputSchema", {"type": "object", "properties": {}})
94
+ }
95
+ }
96
+ self.available_tools_llm.append(tool_def)
97
+
98
+ async def execute_tool(tool_name: str, args: dict):
99
+ if not self.session:
100
+ return {"error": "No MCP session"}
101
+
102
+ print(f"DEBUG: About to call MCP tool {tool_name}")
103
+ try:
104
+ # Add a timeout
105
+ result = await asyncio.wait_for(
106
+ self.session.call_tool(tool_name, args),
107
+ timeout=30.0
108
+ )
109
+ print(f"DEBUG: MCP tool {tool_name} returned: {type(result)}")
110
+ return result
111
+ except asyncio.TimeoutError:
112
+ print(f"DEBUG: Tool {tool_name} timed out after 30 seconds")
113
+ return {"error": f"Tool {tool_name} timed out"}
114
+ except Exception as e:
115
+ print(f"DEBUG: Tool {tool_name} error: {e}")
116
+ return {"error": str(e)}
117
+
118
+ def make_tool_func(tool_name):
119
+ async def tool_func(**kwargs):
120
+ print(f"DEBUG: Tool wrapper called for {tool_name} with {kwargs}")
121
+ # Clean up None string values
122
+ cleaned_kwargs = {}
123
+ for k, v in kwargs.items():
124
+ if v == 'None':
125
+ cleaned_kwargs[k] = None
126
+ else:
127
+ cleaned_kwargs[k] = v
128
+ result = await execute_tool(tool_name, cleaned_kwargs)
129
+ print(f"DEBUG: Tool wrapper got result: {type(result)}")
130
+ return result
131
+
132
+ def sync_wrapper(**kwargs):
133
+ print(f"DEBUG: Sync wrapper called for {tool_name}")
134
+ return asyncio.run(tool_func(**kwargs))
135
+
136
+ return sync_wrapper
137
+ self.tool_map[mcp_tool.name] = make_tool_func(mcp_tool.name)
138
+ tool_names = list(self.tool_map.keys())
139
+ self._log(f"Connection successful. Tools: {', '.join(tool_names) if tool_names else 'None'}")
140
+
141
+ def connect_sync(self, server_script_path: str) -> bool:
142
+ loop = asyncio.get_event_loop_policy().get_event_loop()
143
+ if loop.is_closed():
144
+ loop = asyncio.new_event_loop()
145
+ asyncio.set_event_loop(loop)
146
+ try:
147
+ loop.run_until_complete(self._connect_async(server_script_path))
148
+ return True
149
+ except Exception as e:
150
+ cprint(f"MCP connection failed: {e}", "red", file=sys.stderr)
151
+ return False
152
+
153
+ def disconnect_sync(self):
154
+ if self.session:
155
+ self._log("Disconnecting MCP session.")
156
+ loop = asyncio.get_event_loop_policy().get_event_loop()
157
+ if not loop.is_closed():
158
+ try:
159
+ async def close_session():
160
+ await self.session.close()
161
+ loop.run_until_complete(close_session())
162
+ except RuntimeError:
163
+ pass
164
+ self.session = None
165
+
166
+ def execute_command_corca(command: str, state: ShellState, command_history) -> Tuple[ShellState, Any]:
167
+ mcp_tools = []
168
+
169
+ if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
170
+ mcp_tools = state.mcp_client.available_tools_llm
171
+ else:
172
+ cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
173
+
174
+ active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
175
+
176
+ # Get the initial response with tools available but don't auto-process
177
+ response_dict = get_llm_response(
178
+ prompt=command,
179
+ model=active_npc.model or state.chat_model,
180
+ provider=active_npc.provider or state.chat_provider,
181
+ npc=state.npc,
182
+ messages=state.messages,
183
+ tools=mcp_tools,
184
+ auto_process_tool_calls=False,
185
+ stream=state.stream_output
186
+ )
187
+
188
+ # Process the streaming response to extract tool calls
189
+ stream_response = response_dict.get('response')
190
+ messages = response_dict.get('messages', state.messages)
191
+
192
+ # Collect the streamed content and extract tool calls
193
+ collected_content = ""
194
+ tool_calls = []
195
+ current_tool_call = None
196
+
197
+ print("DEBUG: Processing stream response...")
198
+
199
+ if hasattr(stream_response, '__iter__'):
200
+ # Process the stream to extract content and tool calls
201
+ for chunk in stream_response:
202
+ print(f"DEBUG: Chunk type: {type(chunk)}")
203
+
204
+ if hasattr(chunk, 'choices') and chunk.choices:
205
+ delta = chunk.choices[0].delta
206
+
207
+ if hasattr(delta, 'content') and delta.content:
208
+ collected_content += delta.content
209
+ print(delta.content, end='', flush=True)
210
+
211
+ if hasattr(delta, 'tool_calls') and delta.tool_calls:
212
+ for tool_call_delta in delta.tool_calls:
213
+ print(f"DEBUG: Tool call delta: {tool_call_delta}")
214
+
215
+ if hasattr(tool_call_delta, 'index'):
216
+ idx = tool_call_delta.index
217
+
218
+ # Initialize tool call if needed
219
+ while len(tool_calls) <= idx:
220
+ tool_calls.append({
221
+ 'id': '',
222
+ 'type': 'function',
223
+ 'function': {
224
+ 'name': '',
225
+ 'arguments': ''
226
+ }
227
+ })
228
+
229
+ # Update tool call data
230
+ if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
231
+ tool_calls[idx]['id'] = tool_call_delta.id
232
+
233
+ if hasattr(tool_call_delta, 'function'):
234
+ if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
235
+ tool_calls[idx]['function']['name'] = tool_call_delta.function.name
236
+
237
+ if hasattr(tool_call_delta.function, 'arguments') and tool_call_delta.function.arguments:
238
+ tool_calls[idx]['function']['arguments'] += tool_call_delta.function.arguments
239
+
240
+ print(f"\nDEBUG: Final collected_content: {collected_content}")
241
+ print(f"DEBUG: Final tool_calls: {tool_calls}")
242
+
243
+ # Update messages with the assistant response
244
+ state.messages = messages
245
+ if collected_content or tool_calls:
246
+ assistant_message = {"role": "assistant", "content": collected_content}
247
+ if tool_calls:
248
+ assistant_message["tool_calls"] = tool_calls
249
+ state.messages.append(assistant_message)
250
+
251
+ return state, {
252
+ "output": collected_content,
253
+ "tool_calls": tool_calls,
254
+ "messages": state.messages
255
+ }
256
+
257
+ def print_corca_welcome_message():
258
+ turq = "\033[38;2;64;224;208m"
259
+ chrome = "\033[38;2;211;211;211m"
260
+ reset = "\033[0m"
261
+
262
+ print(
263
+ f"""
264
+ Welcome to {turq}C{chrome}o{turq}r{chrome}c{turq}a{reset}!
265
+ {turq} {turq} {turq} {chrome} {chrome}
266
+ {turq} ____ {turq} ___ {turq} ____ {chrome} ____ {chrome} __ _
267
+ {turq} / __|{turq} / _ \\ {turq}| __\\{chrome} / __| {chrome}/ _` |
268
+ {turq} | |__ {turq}| (_) |{turq}| | {chrome}| |__{chrome} | (_| |
269
+ {turq} \\____| {turq}\\___/ {turq}| | {chrome}\\____| {chrome}\\__,_|
270
+ {turq} {turq} {turq} {chrome} {chrome}
271
+ {reset}
272
+ An MCP-powered shell for advanced agentic workflows.
273
+ """
274
+ )
275
+
276
+
277
+ def process_corca_result(
278
+ user_input: str,
279
+ result_state: ShellState,
280
+ output: Any,
281
+ command_history: CommandHistory,
282
+ ):
283
+ team_name = result_state.team.name if result_state.team else "__none__"
284
+ npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "__none__"
285
+
286
+ active_npc = result_state.npc if isinstance(result_state.npc, NPC) else NPC(
287
+ name="default",
288
+ model=result_state.chat_model,
289
+ provider=result_state.chat_provider,
290
+ db_conn=command_history.engine)
291
+
292
+ save_conversation_message(
293
+ command_history,
294
+ result_state.conversation_id,
295
+ "user",
296
+ user_input,
297
+ wd=result_state.current_path,
298
+ model=active_npc.model,
299
+ provider=active_npc.provider,
300
+ npc=npc_name,
301
+ team=team_name,
302
+ attachments=result_state.attachments,
303
+ )
304
+ result_state.attachments = None
305
+
306
+ output_content = output.get('output') if isinstance(output, dict) else output
307
+ tool_calls = output.get('tool_calls', []) if isinstance(output, dict) else []
308
+ final_output_str = None
309
+
310
+ if tool_calls and hasattr(result_state, 'mcp_client') and result_state.mcp_client:
311
+ print(colored("\n🔧 Executing MCP tools...", "cyan"))
312
+
313
+ tool_responses = []
314
+ for tool_call in tool_calls:
315
+ tool_name = tool_call['function']['name']
316
+ tool_args_str = tool_call['function']['arguments']
317
+ tool_call_id = tool_call['id']
318
+
319
+ try:
320
+ tool_args = json.loads(tool_args_str) if tool_args_str.strip() else {}
321
+ except json.JSONDecodeError:
322
+ tool_args = {}
323
+
324
+ try:
325
+ print(f" Calling MCP tool: {tool_name} with args: {tool_args}")
326
+
327
+ loop = asyncio.get_event_loop()
328
+ if loop.is_closed():
329
+ loop = asyncio.new_event_loop()
330
+ asyncio.set_event_loop(loop)
331
+
332
+ mcp_result = loop.run_until_complete(
333
+ result_state.mcp_client.session.call_tool(tool_name, tool_args)
334
+ )
335
+
336
+ print(f"DEBUG: MCP result type: {type(mcp_result)}")
337
+ print(f"DEBUG: MCP result: {mcp_result}")
338
+ print(f"DEBUG: MCP result attributes: {dir(mcp_result)}")
339
+
340
+ tool_content = ""
341
+ if hasattr(mcp_result, 'content') and mcp_result.content:
342
+ print(f"DEBUG: content type: {type(mcp_result.content)}")
343
+ for i, content_item in enumerate(mcp_result.content):
344
+ print(f"DEBUG: content_item[{i}]: {content_item} (type: {type(content_item)})")
345
+ if hasattr(content_item, 'text'):
346
+ tool_content += content_item.text
347
+ else:
348
+ tool_content += str(content_item)
349
+ else:
350
+ tool_content = str(mcp_result)
351
+
352
+ print(f"DEBUG: Extracted content length: {len(tool_content)}")
353
+ print(f"DEBUG: Extracted content preview: {tool_content[:200]}")
354
+
355
+ tool_responses.append({
356
+ "role": "tool",
357
+ "tool_call_id": tool_call_id,
358
+ "name": tool_name,
359
+ "content": tool_content
360
+ })
361
+
362
+ print(colored(f" ✓ {tool_name} completed", "green"))
363
+
364
+ except Exception as e:
365
+ print(colored(f" ✗ {tool_name} failed: {e}", "red"))
366
+ tool_responses.append({
367
+ "role": "tool",
368
+ "tool_call_id": tool_call_id,
369
+ "name": tool_name,
370
+ "content": f"Error: {str(e)}"
371
+ })
372
+
373
+ result_state.messages.extend(tool_responses)
374
+
375
+ while True:
376
+ follow_up_response = get_llm_response(
377
+ prompt="",
378
+ model=active_npc.model,
379
+ provider=active_npc.provider,
380
+ npc=active_npc,
381
+ messages=result_state.messages,
382
+ tools=result_state.mcp_client.available_tools_llm,
383
+ auto_process_tool_calls=False,
384
+ stream=result_state.stream_output
385
+ )
386
+
387
+ follow_up_messages = follow_up_response.get('messages', [])
388
+ follow_up_content = follow_up_response.get('response', '')
389
+ follow_up_tool_calls = []
390
+
391
+ if result_state.stream_output:
392
+ collected_content = ""
393
+ follow_up_tool_calls = []
394
+
395
+ if hasattr(follow_up_content, '__iter__'):
396
+ for chunk in follow_up_content:
397
+ if hasattr(chunk, 'choices') and chunk.choices:
398
+ delta = chunk.choices[0].delta
399
+
400
+ if hasattr(delta, 'content') and delta.content:
401
+ collected_content += delta.content
402
+ print(delta.content, end='', flush=True)
403
+
404
+ if hasattr(delta, 'tool_calls') and delta.tool_calls:
405
+ for tool_call_delta in delta.tool_calls:
406
+ if hasattr(tool_call_delta, 'index'):
407
+ idx = tool_call_delta.index
408
+
409
+ while len(follow_up_tool_calls) <= idx:
410
+ follow_up_tool_calls.append({
411
+ 'id': '',
412
+ 'type': 'function',
413
+ 'function': {
414
+ 'name': '',
415
+ 'arguments': ''
416
+ }
417
+ })
418
+
419
+ if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
420
+ follow_up_tool_calls[idx]['id'] = tool_call_delta.id
421
+
422
+ if hasattr(tool_call_delta, 'function'):
423
+ if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
424
+ follow_up_tool_calls[idx]['function']['name'] = tool_call_delta.function.name
425
+
426
+ if hasattr(tool_call_delta.function, 'arguments') and tool_call_delta.function.arguments:
427
+ follow_up_tool_calls[idx]['function']['arguments'] += tool_call_delta.function.arguments
428
+ else:
429
+ collected_content = str(follow_up_content)
430
+
431
+ follow_up_content = collected_content
432
+ else:
433
+ if follow_up_messages:
434
+ last_message = follow_up_messages[-1]
435
+ if last_message.get("role") == "assistant" and "tool_calls" in last_message:
436
+ follow_up_tool_calls = last_message["tool_calls"]
437
+
438
+ result_state.messages = follow_up_messages
439
+ if follow_up_content or follow_up_tool_calls:
440
+ assistant_message = {"role": "assistant", "content": follow_up_content}
441
+ if follow_up_tool_calls:
442
+ assistant_message["tool_calls"] = follow_up_tool_calls
443
+ result_state.messages.append(assistant_message)
444
+
445
+ if not follow_up_tool_calls:
446
+ final_output_str = follow_up_content
447
+ if not result_state.stream_output:
448
+ print('\n')
449
+ render_markdown(final_output_str)
450
+ break
451
+
452
+ print(colored("\n🔧 Executing follow-up MCP tools...", "cyan"))
453
+ for tool_call in follow_up_tool_calls:
454
+ tool_name = tool_call['function']['name']
455
+ tool_args_str = tool_call['function']['arguments']
456
+ tool_call_id = tool_call['id']
457
+
458
+ try:
459
+ tool_args = json.loads(tool_args_str) if tool_args_str.strip() else {}
460
+ except json.JSONDecodeError:
461
+ tool_args = {}
462
+
463
+ try:
464
+ print(f" Calling MCP tool: {tool_name} with args: {tool_args}")
465
+
466
+ loop = asyncio.get_event_loop()
467
+ if loop.is_closed():
468
+ loop = asyncio.new_event_loop()
469
+ asyncio.set_event_loop(loop)
470
+
471
+ mcp_result = loop.run_until_complete(
472
+ result_state.mcp_client.session.call_tool(tool_name, tool_args)
473
+ )
474
+
475
+ print(f"DEBUG: MCP result type: {type(mcp_result)}")
476
+ print(f"DEBUG: MCP result: {mcp_result}")
477
+ print(f"DEBUG: MCP result.isError: {mcp_result.isError}")
478
+ print(f"DEBUG: MCP result.meta: {mcp_result.meta}")
479
+ print(f"DEBUG: MCP result.content length: {len(mcp_result.content)}")
480
+
481
+ tool_content = ""
482
+ if hasattr(mcp_result, 'content') and mcp_result.content:
483
+ for i, content_item in enumerate(mcp_result.content):
484
+ print(f"DEBUG: content_item[{i}] full object: {repr(content_item)}")
485
+ print(f"DEBUG: content_item[{i}] text attribute: '{content_item.text}'")
486
+ print(f"DEBUG: content_item[{i}] text length: {len(content_item.text) if content_item.text else 0}")
487
+
488
+ if hasattr(content_item, 'text') and content_item.text:
489
+ tool_content += content_item.text
490
+ elif hasattr(content_item, 'data'):
491
+ print(f"DEBUG: content_item[{i}] has data: {content_item.data}")
492
+ tool_content += str(content_item.data)
493
+ else:
494
+ print(f"DEBUG: content_item[{i}] converting to string: {str(content_item)}")
495
+ tool_content += str(content_item)
496
+ result_state.messages.append({
497
+ "role": "tool",
498
+ "tool_call_id": tool_call_id,
499
+ "name": tool_name,
500
+ "content": tool_content
501
+ })
502
+
503
+ print(colored(f" ✓ {tool_name} completed", "green"))
504
+
505
+ except Exception as e:
506
+ print(colored(f" ✗ {tool_name} failed: {e}", "red"))
507
+ result_state.messages.append({
508
+ "role": "tool",
509
+ "tool_call_id": tool_call_id,
510
+ "name": tool_name,
511
+ "content": f"Error: {str(e)}"
512
+ })
513
+ else:
514
+ print('\n')
515
+ if result_state.stream_output:
516
+ final_output_str = print_and_process_stream_with_markdown(
517
+ output_content,
518
+ active_npc.model,
519
+ active_npc.provider,
520
+ show=True
521
+ )
522
+ else:
523
+ final_output_str = str(output_content)
524
+ render_markdown(final_output_str)
525
+
526
+ if final_output_str:
527
+ if not result_state.messages or result_state.messages[-1].get("role") != "assistant" or result_state.messages[-1].get("content") != final_output_str:
528
+ result_state.messages.append({"role": "assistant", "content": final_output_str})
529
+
530
+ save_conversation_message(
531
+ command_history,
532
+ result_state.conversation_id,
533
+ "assistant",
534
+ final_output_str,
535
+ wd=result_state.current_path,
536
+ model=active_npc.model,
537
+ provider=active_npc.provider,
538
+ npc=npc_name,
539
+ team=team_name,
540
+ )
541
+
542
+ conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
543
+ engine = command_history.engine
544
+
545
+ if result_state.build_kg:
546
+ try:
547
+ if not should_skip_kg_processing(user_input, final_output_str):
548
+ npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
549
+ evolved_npc_kg, _ = kg_evolve_incremental(
550
+ existing_kg=npc_kg,
551
+ new_content_text=conversation_turn_text,
552
+ model=active_npc.model,
553
+ provider=active_npc.provider,
554
+ get_concepts=True,
555
+ link_concepts_facts = False,
556
+ link_concepts_concepts = False,
557
+ link_facts_facts = False,
558
+ )
559
+ save_kg_to_db(engine,
560
+ evolved_npc_kg,
561
+ team_name,
562
+ npc_name,
563
+ result_state.current_path)
564
+ except Exception as e:
565
+ print(colored(f"Error during real-time KG evolution: {e}", "red"))
566
+
567
+ result_state.turn_count += 1
568
+
569
+ if result_state.turn_count > 0 and result_state.turn_count % 10 == 0:
570
+ print(colored("\nChecking for potential team improvements...", "cyan"))
571
+ try:
572
+ summary = breathe(messages=result_state.messages[-20:],
573
+ npc=active_npc)
574
+ characterization = summary.get('output')
575
+
576
+ if characterization and result_state.team:
577
+ team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
578
+ ctx_data = {}
579
+ if os.path.exists(team_ctx_path):
580
+ with open(team_ctx_path, 'r') as f:
581
+ ctx_data = yaml.safe_load(f) or {}
582
+ current_context = ctx_data.get('context', '')
583
+
584
+ prompt = f"""Based on this characterization: {characterization},
585
+
586
+ suggest changes (additions, deletions, edits) to the team's context.
587
+ Additions need not be fully formed sentences and can simply be equations, relationships, or other plain clear items.
588
+
589
+ Current Context: "{current_context}".
590
+
591
+ Respond with JSON: {{"suggestion": "Your sentence."
592
+ }}"""
593
+ response = get_llm_response(prompt, npc=active_npc, format="json")
594
+ suggestion = response.get("response", {}).get("suggestion")
595
+
596
+ if suggestion:
597
+ new_context = (current_context + " " + suggestion).strip()
598
+ print(colored(f"{result_state.npc.name} suggests updating team context:", "yellow"))
599
+ print(f" - OLD: {current_context}\n + NEW: {new_context}")
600
+ if input("Apply? [y/N]: ").strip().lower() == 'y':
601
+ ctx_data['context'] = new_context
602
+ with open(team_ctx_path, 'w') as f:
603
+ yaml.dump(ctx_data, f)
604
+ print(colored("Team context updated.", "green"))
605
+ else:
606
+ print("Suggestion declined.")
607
+ except Exception as e:
608
+ import traceback
609
+ print(colored(f"Could not generate team suggestions: {e}", "yellow"))
610
+ traceback.print_exc()
611
+
612
+ def enter_corca_mode(command: str,
613
+ **kwargs):
614
+ state: ShellState = kwargs.get('shell_state')
615
+ command_history: CommandHistory = kwargs.get('command_history')
616
+
617
+ if not state or not command_history:
618
+ return {"output": "Error: Corca mode requires shell state and history.", "messages": kwargs.get('messages', [])}
619
+
620
+ all_command_parts = shlex.split(command)
621
+ parsed_args = all_command_parts[1:]
622
+
623
+ parser = argparse.ArgumentParser(prog="/corca", description="Enter Corca MCP-powered mode.")
624
+ parser.add_argument("--mcp-server-path", type=str, help="Path to an MCP server script.")
625
+
626
+ try:
627
+ args = parser.parse_args(parsed_args)
628
+ except SystemExit:
629
+ return {"output": "Invalid arguments for /corca. See /help corca.", "messages": state.messages}
630
+
631
+ print_corca_welcome_message()
632
+
633
+ mcp_client = MCPClientNPC()
634
+ server_path = args.mcp_server_path
635
+ if not server_path and state.team and hasattr(state.team, 'team_ctx'):
636
+ server_path = state.team.team_ctx.get('mcp_server')
637
+
638
+ if server_path:
639
+ if mcp_client.connect_sync(server_path):
640
+ state.mcp_client = mcp_client
641
+ else:
642
+ cprint("No MCP server path provided. Corca mode will have limited agent functionality.", "yellow")
643
+ state.mcp_client = None
644
+
645
+ while True:
646
+ try:
647
+ prompt_npc_name = "npc"
648
+ if state.npc:
649
+ prompt_npc_name = state.npc.name
650
+
651
+ prompt_str = f"{colored(os.path.basename(state.current_path), 'blue')}:corca:{prompt_npc_name}🦌> "
652
+ prompt = readline_safe_prompt(prompt_str)
653
+
654
+ user_input = get_multiline_input(prompt).strip()
655
+
656
+ if user_input.lower() in ["exit", "quit", "done"]:
657
+ break
658
+
659
+ if not user_input:
660
+ continue
661
+
662
+ state, output = execute_command_corca(user_input, state, command_history)
663
+
664
+ process_corca_result(user_input,
665
+ state,
666
+ output,
667
+ command_history,
668
+ )
669
+
670
+ except KeyboardInterrupt:
671
+ print()
672
+ continue
673
+ except EOFError:
674
+ print("\nExiting Corca Mode.")
675
+ break
676
+
677
+ if state.mcp_client:
678
+ state.mcp_client.disconnect_sync()
679
+ state.mcp_client = None
680
+
681
+ render_markdown("\n# Exiting Corca Mode")
682
+ return {"output": "", "messages": state.messages}
683
+
684
+ def main():
685
+ parser = argparse.ArgumentParser(description="Corca - An MCP-powered npcsh shell.")
686
+ parser.add_argument("--mcp-server-path", type=str, help="Path to an MCP server script to connect to.")
687
+ args = parser.parse_args()
688
+
689
+ command_history, team, default_npc = setup_shell()
690
+
691
+ from npcsh._state import initial_state
692
+ initial_shell_state = initial_state
693
+ initial_shell_state.team = team
694
+ initial_shell_state.npc = default_npc
695
+
696
+ fake_command_str = "/corca"
697
+ if args.mcp_server_path:
698
+ fake_command_str = f'/corca --mcp-server-path "{args.mcp_server_path}"'
699
+
700
+ kwargs = {
701
+ 'command': fake_command_str,
702
+ 'shell_state': initial_shell_state,
703
+ 'command_history': command_history
704
+ }
705
+
706
+ enter_corca_mode(**kwargs)
707
+
708
+ if __name__ == "__main__":
709
+ main()