npcsh 1.1.12__py3-none-any.whl → 1.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. npcsh/_state.py +700 -377
  2. npcsh/alicanto.py +54 -1153
  3. npcsh/completion.py +206 -0
  4. npcsh/config.py +163 -0
  5. npcsh/corca.py +35 -1462
  6. npcsh/execution.py +185 -0
  7. npcsh/guac.py +31 -1986
  8. npcsh/npc_team/jinxs/code/sh.jinx +11 -15
  9. npcsh/npc_team/jinxs/modes/alicanto.jinx +186 -80
  10. npcsh/npc_team/jinxs/modes/corca.jinx +243 -22
  11. npcsh/npc_team/jinxs/modes/guac.jinx +313 -42
  12. npcsh/npc_team/jinxs/modes/plonk.jinx +209 -48
  13. npcsh/npc_team/jinxs/modes/pti.jinx +167 -25
  14. npcsh/npc_team/jinxs/modes/spool.jinx +158 -37
  15. npcsh/npc_team/jinxs/modes/wander.jinx +179 -74
  16. npcsh/npc_team/jinxs/modes/yap.jinx +258 -21
  17. npcsh/npc_team/jinxs/utils/chat.jinx +39 -12
  18. npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
  19. npcsh/npc_team/jinxs/utils/search.jinx +3 -3
  20. npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
  21. npcsh/npcsh.py +76 -20
  22. npcsh/parsing.py +118 -0
  23. npcsh/plonk.py +41 -329
  24. npcsh/pti.py +41 -201
  25. npcsh/spool.py +34 -239
  26. npcsh/ui.py +199 -0
  27. npcsh/wander.py +54 -542
  28. npcsh/yap.py +38 -570
  29. npcsh-1.1.14.data/data/npcsh/npc_team/alicanto.jinx +194 -0
  30. npcsh-1.1.14.data/data/npcsh/npc_team/chat.jinx +44 -0
  31. npcsh-1.1.14.data/data/npcsh/npc_team/cmd.jinx +44 -0
  32. npcsh-1.1.14.data/data/npcsh/npc_team/corca.jinx +249 -0
  33. npcsh-1.1.14.data/data/npcsh/npc_team/guac.jinx +317 -0
  34. npcsh-1.1.14.data/data/npcsh/npc_team/plonk.jinx +214 -0
  35. npcsh-1.1.14.data/data/npcsh/npc_team/pti.jinx +170 -0
  36. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/search.jinx +3 -3
  37. npcsh-1.1.14.data/data/npcsh/npc_team/sh.jinx +34 -0
  38. npcsh-1.1.14.data/data/npcsh/npc_team/spool.jinx +161 -0
  39. npcsh-1.1.14.data/data/npcsh/npc_team/usage.jinx +33 -0
  40. npcsh-1.1.14.data/data/npcsh/npc_team/wander.jinx +186 -0
  41. npcsh-1.1.14.data/data/npcsh/npc_team/yap.jinx +262 -0
  42. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/METADATA +1 -1
  43. npcsh-1.1.14.dist-info/RECORD +135 -0
  44. npcsh-1.1.12.data/data/npcsh/npc_team/alicanto.jinx +0 -88
  45. npcsh-1.1.12.data/data/npcsh/npc_team/chat.jinx +0 -17
  46. npcsh-1.1.12.data/data/npcsh/npc_team/corca.jinx +0 -28
  47. npcsh-1.1.12.data/data/npcsh/npc_team/guac.jinx +0 -46
  48. npcsh-1.1.12.data/data/npcsh/npc_team/plonk.jinx +0 -53
  49. npcsh-1.1.12.data/data/npcsh/npc_team/pti.jinx +0 -28
  50. npcsh-1.1.12.data/data/npcsh/npc_team/sh.jinx +0 -38
  51. npcsh-1.1.12.data/data/npcsh/npc_team/spool.jinx +0 -40
  52. npcsh-1.1.12.data/data/npcsh/npc_team/wander.jinx +0 -81
  53. npcsh-1.1.12.data/data/npcsh/npc_team/yap.jinx +0 -25
  54. npcsh-1.1.12.dist-info/RECORD +0 -126
  55. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/agent.jinx +0 -0
  56. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.npc +0 -0
  57. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.png +0 -0
  58. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/build.jinx +0 -0
  59. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compile.jinx +0 -0
  60. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compress.jinx +0 -0
  61. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.npc +0 -0
  62. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.png +0 -0
  63. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca_example.png +0 -0
  64. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
  65. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/foreman.npc +0 -0
  66. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic.npc +0 -0
  67. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic4.png +0 -0
  68. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/guac.png +0 -0
  69. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/help.jinx +0 -0
  70. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/init.jinx +0 -0
  71. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
  72. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
  73. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  74. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/load_file.jinx +0 -0
  75. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
  76. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  77. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  78. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/ots.jinx +0 -0
  79. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.npc +0 -0
  80. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.png +0 -0
  81. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
  82. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  83. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/python.jinx +0 -0
  84. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/roll.jinx +0 -0
  85. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sample.jinx +0 -0
  86. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/serve.jinx +0 -0
  87. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/set.jinx +0 -0
  88. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  89. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.png +0 -0
  90. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sleep.jinx +0 -0
  91. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/spool.png +0 -0
  92. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sql.jinx +0 -0
  93. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/trigger.jinx +0 -0
  94. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/vixynt.jinx +0 -0
  95. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/yap.png +0 -0
  96. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/WHEEL +0 -0
  97. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/entry_points.txt +0 -0
  98. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/licenses/LICENSE +0 -0
  99. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,194 @@
1
+ jinx_name: alicanto
2
+ description: Deep research mode - multi-perspective exploration with gold insights and cliff warnings
3
+ npc: forenpc
4
+ inputs:
5
+ - query: null
6
+ - num_npcs: 5
7
+ - depth: 3
8
+ - model: null
9
+ - provider: null
10
+ - max_steps: 20
11
+ - skip_research: true
12
+ - exploration: 0.3
13
+ - creativity: 0.5
14
+ - format: report
15
+
16
+ steps:
17
+ - name: alicanto_research
18
+ engine: python
19
+ code: |
20
+ import os
21
+ from termcolor import colored
22
+
23
+ from npcpy.llm_funcs import get_llm_response
24
+ from npcpy.data.web import search_web
25
+ from npcpy.npc_compiler import NPC
26
+
27
+ npc = context.get('npc')
28
+ team = context.get('team')
29
+ messages = context.get('messages', [])
30
+
31
+ query = context.get('query')
32
+ num_npcs = int(context.get('num_npcs', 5))
33
+ depth = int(context.get('depth', 3))
34
+ max_steps = int(context.get('max_steps', 20))
35
+ skip_research = context.get('skip_research', True)
36
+ exploration = float(context.get('exploration', 0.3))
37
+ creativity = float(context.get('creativity', 0.5))
38
+ output_format = context.get('format', 'report')
39
+
40
+ model = context.get('model') or (npc.model if npc else 'gemini-1.5-pro')
41
+ provider = context.get('provider') or (npc.provider if npc else 'gemini')
42
+
43
+ if not query:
44
+ context['output'] = """Usage: /alicanto <research query>
45
+
46
+ Options:
47
+ --num-npcs N Number of research perspectives (default: 5)
48
+ --depth N Research depth (default: 3)
49
+ --max-steps N Maximum research steps (default: 20)
50
+ --exploration F Exploration factor 0-1 (default: 0.3)
51
+ --creativity F Creativity factor 0-1 (default: 0.5)
52
+ --format FORMAT Output: report|summary|full (default: report)
53
+
54
+ Example: /alicanto What are the latest advances in quantum computing?"""
55
+ context['messages'] = messages
56
+ exit()
57
+
58
+ print(f"""
59
+ █████╗ ██╗ ██╗ ██████╗ █████╗ ███╗ ██╗████████╗ ██████╗
60
+ ██╔══██╗██║ ██║██╔════╝██╔══██╗████╗ ██║╚══██╔══╝██╔═══██╗
61
+ ███████║██║ ██║██║ ███████║██╔██╗ ██║ ██║ ██║ ██║
62
+ ██╔══██║██║ ██║██║ ██╔══██║██║╚██╗██║ ██║ ██║ ██║
63
+ ██║ ██║███████╗██║╚██████╗██║ ██║██║ ╚████║ ██║ ╚██████╔╝
64
+ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═════╝
65
+
66
+ Deep Research Mode
67
+ Query: {query}
68
+ Perspectives: {num_npcs} | Depth: {depth} | Max Steps: {max_steps}
69
+ """)
70
+
71
+ # Generate research perspectives
72
+ perspectives_prompt = f"""Generate {num_npcs} distinct research perspectives for investigating: "{query}"
73
+
74
+ For each perspective, provide:
75
+ 1. Name (a descriptive title)
76
+ 2. Approach (how this perspective would investigate)
77
+ 3. Key questions to explore
78
+
79
+ Return as a numbered list."""
80
+
81
+ print(colored("Generating research perspectives...", "cyan"))
82
+ resp = get_llm_response(
83
+ perspectives_prompt,
84
+ model=model,
85
+ provider=provider,
86
+ npc=npc
87
+ )
88
+ perspectives = str(resp.get('response', ''))
89
+ print(perspectives)
90
+
91
+ # Conduct web research if not skipped
92
+ research_findings = ""
93
+ if not skip_research:
94
+ print(colored("\nConducting web research...", "cyan"))
95
+ try:
96
+ search_results = search_web(query, n_results=5)
97
+ if search_results:
98
+ research_findings = "\n\nWeb Research Findings:\n"
99
+ for i, result in enumerate(search_results[:5], 1):
100
+ title = result.get('title', 'No title')
101
+ snippet = result.get('snippet', result.get('body', ''))[:200]
102
+ research_findings += f"\n{i}. {title}\n {snippet}...\n"
103
+ print(colored(f"Found {len(search_results)} sources", "green"))
104
+ except Exception as e:
105
+ print(colored(f"Web search error: {e}", "yellow"))
106
+
107
+ # Multi-step exploration from each perspective
108
+ all_insights = []
109
+ gold_insights = [] # Key valuable findings
110
+ cliff_warnings = [] # Potential pitfalls or caveats
111
+
112
+ for step in range(min(depth, max_steps)):
113
+ print(colored(f"\n--- Research Depth {step + 1}/{depth} ---", "cyan"))
114
+
115
+ explore_prompt = f"""Research query: "{query}"
116
+
117
+ Perspectives generated:
118
+ {perspectives}
119
+
120
+ {research_findings}
121
+
122
+ Previous insights: {all_insights[-3:] if all_insights else 'None yet'}
123
+
124
+ For depth level {step + 1}:
125
+ 1. Explore deeper implications from each perspective
126
+ 2. Identify GOLD insights (valuable, non-obvious findings) - mark with [GOLD]
127
+ 3. Identify CLIFF warnings (pitfalls, caveats, risks) - mark with [CLIFF]
128
+ 4. Connect insights across perspectives
129
+
130
+ Exploration factor: {exploration} (higher = more diverse exploration)
131
+ Creativity factor: {creativity} (higher = more novel connections)"""
132
+
133
+ resp = get_llm_response(
134
+ explore_prompt,
135
+ model=model,
136
+ provider=provider,
137
+ temperature=creativity,
138
+ npc=npc
139
+ )
140
+
141
+ step_insights = str(resp.get('response', ''))
142
+ print(step_insights)
143
+
144
+ # Extract gold and cliff markers
145
+ if '[GOLD]' in step_insights:
146
+ gold_insights.extend([line.strip() for line in step_insights.split('\n') if '[GOLD]' in line])
147
+ if '[CLIFF]' in step_insights:
148
+ cliff_warnings.extend([line.strip() for line in step_insights.split('\n') if '[CLIFF]' in line])
149
+
150
+ all_insights.append(step_insights)
151
+
152
+ # Generate final synthesis
153
+ print(colored("\n--- Synthesizing Research ---", "cyan"))
154
+
155
+ synthesis_prompt = f"""Synthesize research on: "{query}"
156
+
157
+ All insights gathered:
158
+ {chr(10).join(all_insights)}
159
+
160
+ Gold insights identified:
161
+ {chr(10).join(gold_insights) if gold_insights else 'None explicitly marked'}
162
+
163
+ Cliff warnings identified:
164
+ {chr(10).join(cliff_warnings) if cliff_warnings else 'None explicitly marked'}
165
+
166
+ Generate a {output_format} that:
167
+ 1. Summarizes key findings
168
+ 2. Highlights the most valuable insights (gold)
169
+ 3. Notes important caveats and risks (cliffs)
170
+ 4. Provides actionable conclusions"""
171
+
172
+ resp = get_llm_response(
173
+ synthesis_prompt,
174
+ model=model,
175
+ provider=provider,
176
+ npc=npc
177
+ )
178
+
179
+ final_report = str(resp.get('response', ''))
180
+ print("\n" + "="*60)
181
+ print(colored("ALICANTO RESEARCH REPORT", "green", attrs=['bold']))
182
+ print("="*60)
183
+ print(final_report)
184
+
185
+ context['output'] = final_report
186
+ context['messages'] = messages
187
+ context['alicanto_result'] = {
188
+ 'query': query,
189
+ 'perspectives': perspectives,
190
+ 'insights': all_insights,
191
+ 'gold': gold_insights,
192
+ 'cliffs': cliff_warnings,
193
+ 'report': final_report
194
+ }
@@ -0,0 +1,44 @@
1
+ jinx_name: chat
2
+ description: Simple chat mode - LLM conversation without tool execution
3
+ inputs:
4
+ - query: null
5
+ - model: null
6
+ - provider: null
7
+ - stream: true
8
+
9
+ steps:
10
+ - name: chat_response
11
+ engine: python
12
+ code: |
13
+ from npcpy.llm_funcs import get_llm_response
14
+
15
+ npc = context.get('npc')
16
+ messages = context.get('messages', [])
17
+ query = context.get('query', '')
18
+ stream = context.get('stream', True)
19
+
20
+ model = context.get('model') or (npc.model if npc else None)
21
+ provider = context.get('provider') or (npc.provider if npc else None)
22
+
23
+ if not query:
24
+ context['output'] = ''
25
+ context['messages'] = messages
26
+ else:
27
+ response = get_llm_response(
28
+ query,
29
+ model=model,
30
+ provider=provider,
31
+ npc=npc,
32
+ stream=stream,
33
+ messages=messages
34
+ )
35
+
36
+ context['output'] = response.get('response', '')
37
+ context['messages'] = response.get('messages', messages)
38
+
39
+ # Track usage
40
+ if 'usage' in response and npc and hasattr(npc, 'shared_context'):
41
+ usage = response['usage']
42
+ npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
43
+ npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
44
+ npc.shared_context['turn_count'] += 1
@@ -0,0 +1,44 @@
1
+ jinx_name: cmd
2
+ description: Command mode - LLM generates and executes shell commands
3
+ inputs:
4
+ - query: null
5
+ - model: null
6
+ - provider: null
7
+ - stream: true
8
+
9
+ steps:
10
+ - name: cmd_execute
11
+ engine: python
12
+ code: |
13
+ from npcpy.llm_funcs import execute_llm_command
14
+
15
+ npc = context.get('npc')
16
+ messages = context.get('messages', [])
17
+ query = context.get('query', '')
18
+ stream = context.get('stream', True)
19
+
20
+ model = context.get('model') or (npc.model if npc else None)
21
+ provider = context.get('provider') or (npc.provider if npc else None)
22
+
23
+ if not query:
24
+ context['output'] = ''
25
+ context['messages'] = messages
26
+ else:
27
+ response = execute_llm_command(
28
+ query,
29
+ model=model,
30
+ provider=provider,
31
+ npc=npc,
32
+ stream=stream,
33
+ messages=messages
34
+ )
35
+
36
+ context['output'] = response.get('response', '')
37
+ context['messages'] = response.get('messages', messages)
38
+
39
+ # Track usage
40
+ if 'usage' in response and npc and hasattr(npc, 'shared_context'):
41
+ usage = response['usage']
42
+ npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
43
+ npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
44
+ npc.shared_context['turn_count'] += 1
@@ -0,0 +1,249 @@
1
+ jinx_name: corca
2
+ description: MCP-powered agentic shell - LLM with tool use via MCP servers
3
+ inputs:
4
+ - mcp_server_path: null
5
+ - initial_command: null
6
+ - model: null
7
+ - provider: null
8
+
9
+ steps:
10
+ - name: corca_repl
11
+ engine: python
12
+ code: |
13
+ import os
14
+ import sys
15
+ import asyncio
16
+ import json
17
+ from contextlib import AsyncExitStack
18
+ from termcolor import colored
19
+
20
+ from npcpy.llm_funcs import get_llm_response
21
+ from npcpy.npc_sysenv import render_markdown, get_system_message
22
+
23
+ # MCP imports
24
+ try:
25
+ from mcp import ClientSession, StdioServerParameters
26
+ from mcp.client.stdio import stdio_client
27
+ MCP_AVAILABLE = True
28
+ except ImportError:
29
+ MCP_AVAILABLE = False
30
+ print(colored("MCP not available. Install with: pip install mcp-client", "yellow"))
31
+
32
+ npc = context.get('npc')
33
+ team = context.get('team')
34
+ messages = context.get('messages', [])
35
+ mcp_server_path = context.get('mcp_server_path')
36
+ initial_command = context.get('initial_command')
37
+
38
+ model = context.get('model') or (npc.model if npc else None)
39
+ provider = context.get('provider') or (npc.provider if npc else None)
40
+
41
+ # Use shared_context for MCP state
42
+ shared_ctx = npc.shared_context if npc and hasattr(npc, 'shared_context') else {}
43
+
44
+ print("""
45
+ ██████╗ ██████╗ ██████╗ ██████╗ █████╗
46
+ ██╔════╝██╔═══██╗██╔══██╗██╔════╝██╔══██╗
47
+ ██║ ██║ ██║██████╔╝██║ ███████║
48
+ ██║ ██║ ██║██╔══██╗██║ ██╔══██╗
49
+ ╚██████╗╚██████╔╝██║ ██║╚██████╗██║ ██║
50
+ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝
51
+ """)
52
+
53
+ npc_name = npc.name if npc else "corca"
54
+ print(f"Entering corca mode (NPC: {npc_name}). Type '/cq' to exit.")
55
+
56
+ # ========== MCP Connection Setup ==========
57
+ async def connect_mcp(server_path):
58
+ """Connect to MCP server and return tools"""
59
+ if not MCP_AVAILABLE:
60
+ return [], {}
61
+
62
+ abs_path = os.path.abspath(os.path.expanduser(server_path))
63
+ if not os.path.exists(abs_path):
64
+ print(colored(f"MCP server not found: {abs_path}", "red"))
65
+ return [], {}
66
+
67
+ try:
68
+ loop = asyncio.get_event_loop()
69
+ except RuntimeError:
70
+ loop = asyncio.new_event_loop()
71
+ asyncio.set_event_loop(loop)
72
+
73
+ exit_stack = AsyncExitStack()
74
+
75
+ if abs_path.endswith('.py'):
76
+ cmd_parts = [sys.executable, abs_path]
77
+ else:
78
+ cmd_parts = [abs_path]
79
+
80
+ server_params = StdioServerParameters(
81
+ command=cmd_parts[0],
82
+ args=[abs_path],
83
+ env=os.environ.copy()
84
+ )
85
+
86
+ stdio_transport = await exit_stack.enter_async_context(stdio_client(server_params))
87
+ session = await exit_stack.enter_async_context(ClientSession(*stdio_transport))
88
+ await session.initialize()
89
+
90
+ response = await session.list_tools()
91
+ tools_llm = []
92
+ tool_map = {}
93
+
94
+ if response.tools:
95
+ for mcp_tool in response.tools:
96
+ tool_def = {
97
+ "type": "function",
98
+ "function": {
99
+ "name": mcp_tool.name,
100
+ "description": mcp_tool.description or f"MCP tool: {mcp_tool.name}",
101
+ "parameters": getattr(mcp_tool, "inputSchema", {"type": "object", "properties": {}})
102
+ }
103
+ }
104
+ tools_llm.append(tool_def)
105
+
106
+ # Create sync wrapper for async tool call
107
+ def make_tool_func(tool_name, sess, lp):
108
+ async def call_tool(**kwargs):
109
+ cleaned = {k: (None if v == 'None' else v) for k, v in kwargs.items()}
110
+ result = await asyncio.wait_for(sess.call_tool(tool_name, cleaned), timeout=30.0)
111
+ return result
112
+ def sync_call(**kwargs):
113
+ return lp.run_until_complete(call_tool(**kwargs))
114
+ return sync_call
115
+
116
+ tool_map[mcp_tool.name] = make_tool_func(mcp_tool.name, session, loop)
117
+
118
+ # Store in shared context
119
+ shared_ctx['mcp_client'] = session
120
+ shared_ctx['mcp_tools'] = tools_llm
121
+ shared_ctx['mcp_tool_map'] = tool_map
122
+ shared_ctx['_mcp_exit_stack'] = exit_stack
123
+ shared_ctx['_mcp_loop'] = loop
124
+
125
+ print(colored(f"Connected to MCP server. Tools: {', '.join(tool_map.keys())}", "green"))
126
+ return tools_llm, tool_map
127
+
128
+ # Try to connect if server path provided
129
+ tools_llm = shared_ctx.get('mcp_tools', [])
130
+ tool_map = shared_ctx.get('mcp_tool_map', {})
131
+
132
+ if mcp_server_path and not tools_llm:
133
+ try:
134
+ loop = asyncio.get_event_loop()
135
+ except RuntimeError:
136
+ loop = asyncio.new_event_loop()
137
+ asyncio.set_event_loop(loop)
138
+ tools_llm, tool_map = loop.run_until_complete(connect_mcp(mcp_server_path))
139
+
140
+ # Find default MCP server if none provided
141
+ if not tools_llm:
142
+ default_paths = [
143
+ os.path.expanduser("~/.npcsh/npc_team/mcp_server.py"),
144
+ os.path.join(team.team_path, "mcp_server.py") if team and hasattr(team, 'team_path') else None,
145
+ ]
146
+ for path in default_paths:
147
+ if path and os.path.exists(path):
148
+ try:
149
+ loop = asyncio.get_event_loop()
150
+ except RuntimeError:
151
+ loop = asyncio.new_event_loop()
152
+ asyncio.set_event_loop(loop)
153
+ tools_llm, tool_map = loop.run_until_complete(connect_mcp(path))
154
+ if tools_llm:
155
+ break
156
+
157
+ # Ensure system message
158
+ if not messages or messages[0].get("role") != "system":
159
+ sys_msg = get_system_message(npc) if npc else "You are an AI assistant with access to tools."
160
+ if tools_llm:
161
+ sys_msg += f"\n\nYou have access to these tools: {', '.join(t['function']['name'] for t in tools_llm)}"
162
+ messages.insert(0, {"role": "system", "content": sys_msg})
163
+
164
+ # Handle initial command if provided (one-shot mode)
165
+ if initial_command:
166
+ resp = get_llm_response(
167
+ initial_command,
168
+ model=model,
169
+ provider=provider,
170
+ messages=messages,
171
+ tools=tools_llm if tools_llm else None,
172
+ tool_map=tool_map if tool_map else None,
173
+ auto_process_tool_calls=True,
174
+ npc=npc
175
+ )
176
+ messages = resp.get('messages', messages)
177
+ render_markdown(str(resp.get('response', '')))
178
+ context['output'] = resp.get('response', 'Done.')
179
+ context['messages'] = messages
180
+ # Don't enter REPL for one-shot
181
+ exit()
182
+
183
+ # REPL loop
184
+ while True:
185
+ try:
186
+ prompt_str = f"{npc_name}:corca> "
187
+ user_input = input(prompt_str).strip()
188
+
189
+ if not user_input:
190
+ continue
191
+
192
+ if user_input.lower() == "/cq":
193
+ print("Exiting corca mode.")
194
+ break
195
+
196
+ # Handle /tools to list available tools
197
+ if user_input.lower() == "/tools":
198
+ if tools_llm:
199
+ print(colored("Available MCP tools:", "cyan"))
200
+ for t in tools_llm:
201
+ print(f" - {t['function']['name']}: {t['function'].get('description', '')[:60]}")
202
+ else:
203
+ print(colored("No MCP tools connected.", "yellow"))
204
+ continue
205
+
206
+ # Handle /connect to connect to new MCP server
207
+ if user_input.startswith("/connect "):
208
+ new_path = user_input[9:].strip()
209
+ try:
210
+ loop = asyncio.get_event_loop()
211
+ except RuntimeError:
212
+ loop = asyncio.new_event_loop()
213
+ asyncio.set_event_loop(loop)
214
+ tools_llm, tool_map = loop.run_until_complete(connect_mcp(new_path))
215
+ continue
216
+
217
+ # Get LLM response with tools
218
+ resp = get_llm_response(
219
+ user_input,
220
+ model=model,
221
+ provider=provider,
222
+ messages=messages,
223
+ tools=tools_llm if tools_llm else None,
224
+ tool_map=tool_map if tool_map else None,
225
+ auto_process_tool_calls=True,
226
+ stream=False, # Tool calls don't work well with streaming
227
+ npc=npc
228
+ )
229
+
230
+ messages = resp.get('messages', messages)
231
+ response_text = resp.get('response', '')
232
+ render_markdown(str(response_text))
233
+
234
+ # Track usage
235
+ if 'usage' in resp and npc and hasattr(npc, 'shared_context'):
236
+ usage = resp['usage']
237
+ npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
238
+ npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
239
+ npc.shared_context['turn_count'] += 1
240
+
241
+ except KeyboardInterrupt:
242
+ print("\nUse '/cq' to exit or continue.")
243
+ continue
244
+ except EOFError:
245
+ print("\nExiting corca mode.")
246
+ break
247
+
248
+ context['output'] = "Exited corca mode."
249
+ context['messages'] = messages