npcsh 1.1.12__py3-none-any.whl → 1.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. npcsh/_state.py +700 -377
  2. npcsh/alicanto.py +54 -1153
  3. npcsh/completion.py +206 -0
  4. npcsh/config.py +163 -0
  5. npcsh/corca.py +35 -1462
  6. npcsh/execution.py +185 -0
  7. npcsh/guac.py +31 -1986
  8. npcsh/npc_team/jinxs/code/sh.jinx +11 -15
  9. npcsh/npc_team/jinxs/modes/alicanto.jinx +186 -80
  10. npcsh/npc_team/jinxs/modes/corca.jinx +243 -22
  11. npcsh/npc_team/jinxs/modes/guac.jinx +313 -42
  12. npcsh/npc_team/jinxs/modes/plonk.jinx +209 -48
  13. npcsh/npc_team/jinxs/modes/pti.jinx +167 -25
  14. npcsh/npc_team/jinxs/modes/spool.jinx +158 -37
  15. npcsh/npc_team/jinxs/modes/wander.jinx +179 -74
  16. npcsh/npc_team/jinxs/modes/yap.jinx +258 -21
  17. npcsh/npc_team/jinxs/utils/chat.jinx +39 -12
  18. npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
  19. npcsh/npc_team/jinxs/utils/search.jinx +3 -3
  20. npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
  21. npcsh/npcsh.py +76 -20
  22. npcsh/parsing.py +118 -0
  23. npcsh/plonk.py +41 -329
  24. npcsh/pti.py +41 -201
  25. npcsh/spool.py +34 -239
  26. npcsh/ui.py +199 -0
  27. npcsh/wander.py +54 -542
  28. npcsh/yap.py +38 -570
  29. npcsh-1.1.14.data/data/npcsh/npc_team/alicanto.jinx +194 -0
  30. npcsh-1.1.14.data/data/npcsh/npc_team/chat.jinx +44 -0
  31. npcsh-1.1.14.data/data/npcsh/npc_team/cmd.jinx +44 -0
  32. npcsh-1.1.14.data/data/npcsh/npc_team/corca.jinx +249 -0
  33. npcsh-1.1.14.data/data/npcsh/npc_team/guac.jinx +317 -0
  34. npcsh-1.1.14.data/data/npcsh/npc_team/plonk.jinx +214 -0
  35. npcsh-1.1.14.data/data/npcsh/npc_team/pti.jinx +170 -0
  36. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/search.jinx +3 -3
  37. npcsh-1.1.14.data/data/npcsh/npc_team/sh.jinx +34 -0
  38. npcsh-1.1.14.data/data/npcsh/npc_team/spool.jinx +161 -0
  39. npcsh-1.1.14.data/data/npcsh/npc_team/usage.jinx +33 -0
  40. npcsh-1.1.14.data/data/npcsh/npc_team/wander.jinx +186 -0
  41. npcsh-1.1.14.data/data/npcsh/npc_team/yap.jinx +262 -0
  42. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/METADATA +1 -1
  43. npcsh-1.1.14.dist-info/RECORD +135 -0
  44. npcsh-1.1.12.data/data/npcsh/npc_team/alicanto.jinx +0 -88
  45. npcsh-1.1.12.data/data/npcsh/npc_team/chat.jinx +0 -17
  46. npcsh-1.1.12.data/data/npcsh/npc_team/corca.jinx +0 -28
  47. npcsh-1.1.12.data/data/npcsh/npc_team/guac.jinx +0 -46
  48. npcsh-1.1.12.data/data/npcsh/npc_team/plonk.jinx +0 -53
  49. npcsh-1.1.12.data/data/npcsh/npc_team/pti.jinx +0 -28
  50. npcsh-1.1.12.data/data/npcsh/npc_team/sh.jinx +0 -38
  51. npcsh-1.1.12.data/data/npcsh/npc_team/spool.jinx +0 -40
  52. npcsh-1.1.12.data/data/npcsh/npc_team/wander.jinx +0 -81
  53. npcsh-1.1.12.data/data/npcsh/npc_team/yap.jinx +0 -25
  54. npcsh-1.1.12.dist-info/RECORD +0 -126
  55. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/agent.jinx +0 -0
  56. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.npc +0 -0
  57. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.png +0 -0
  58. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/build.jinx +0 -0
  59. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compile.jinx +0 -0
  60. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compress.jinx +0 -0
  61. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.npc +0 -0
  62. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.png +0 -0
  63. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca_example.png +0 -0
  64. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
  65. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/foreman.npc +0 -0
  66. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic.npc +0 -0
  67. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic4.png +0 -0
  68. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/guac.png +0 -0
  69. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/help.jinx +0 -0
  70. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/init.jinx +0 -0
  71. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
  72. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
  73. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  74. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/load_file.jinx +0 -0
  75. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
  76. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  77. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  78. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/ots.jinx +0 -0
  79. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.npc +0 -0
  80. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.png +0 -0
  81. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
  82. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  83. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/python.jinx +0 -0
  84. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/roll.jinx +0 -0
  85. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sample.jinx +0 -0
  86. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/serve.jinx +0 -0
  87. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/set.jinx +0 -0
  88. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  89. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.png +0 -0
  90. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sleep.jinx +0 -0
  91. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/spool.png +0 -0
  92. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sql.jinx +0 -0
  93. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/trigger.jinx +0 -0
  94. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/vixynt.jinx +0 -0
  95. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/yap.png +0 -0
  96. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/WHEEL +0 -0
  97. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/entry_points.txt +0 -0
  98. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/licenses/LICENSE +0 -0
  99. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/top_level.txt +0 -0
@@ -7,28 +7,24 @@ steps:
7
7
  engine: python
8
8
  code: |
9
9
  import subprocess
10
- import sys # Import sys to explicitly write to stderr for visibility
11
-
12
- # Force a simple print to see if anything comes out
13
- print("--- Jinx 'sh' code started ---", file=sys.stderr)
10
+ import os
14
11
 
15
12
  cmd = '{{ bash_command }}'
16
-
17
- # Initialize output to an empty string to ensure it always exists
18
- output = ""
19
-
13
+ output = ""
20
14
 
21
15
  process = subprocess.Popen(
22
- cmd,
23
- shell=True,
24
- stdout=subprocess.PIPE,
16
+ cmd,
17
+ shell=True,
18
+ stdout=subprocess.PIPE,
25
19
  stderr=subprocess.PIPE
26
20
  )
27
21
  stdout, stderr = process.communicate()
28
-
29
- # Print raw stdout/stderr to sys.stderr for better visibility in some environments
30
- print(f"Jinx 'sh' raw stdout: {stdout.decode('utf-8', errors='ignore')}", file=sys.stderr)
31
- print(f"Jinx 'sh' raw stderr: {stderr.decode('utf-8', errors='ignore')}", file=sys.stderr)
22
+
23
+ # Only show debug output if NPCSH_DEBUG is set
24
+ if os.environ.get("NPCSH_DEBUG") == "1":
25
+ import sys
26
+ print(f"[sh] cmd: {cmd}", file=sys.stderr)
27
+ print(f"[sh] stdout: {stdout.decode('utf-8', errors='ignore')[:200]}", file=sys.stderr)
32
28
 
33
29
  if stderr:
34
30
  output = f"Error: {stderr.decode('utf-8')}"
@@ -1,88 +1,194 @@
1
- jinx_name: "alicanto"
2
- description: "Conduct deep research with multiple perspectives, identifying gold insights and cliff warnings"
1
+ jinx_name: alicanto
2
+ description: Deep research mode - multi-perspective exploration with gold insights and cliff warnings
3
+ npc: forenpc
3
4
  inputs:
4
- - query: "" # Required research query.
5
- - num_npcs: 5 # Number of NPCs to involve in research.
6
- - depth: 3 # Depth of research.
7
- - model: "" # LLM model to use. Defaults to NPCSH_CHAT_MODEL or NPC's model.
8
- - provider: "" # LLM provider to use. Defaults to NPCSH_CHAT_PROVIDER or NPC's provider.
9
- - max_steps: 20 # Maximum number of steps in Alicanto research.
10
- - skip_research: True # Whether to skip the research phase.
11
- - exploration: "" # Exploration factor (float).
12
- - creativity: "" # Creativity factor (float).
13
- - format: "" # Output format (report, summary, full).
5
+ - query: null
6
+ - num_npcs: 5
7
+ - depth: 3
8
+ - model: null
9
+ - provider: null
10
+ - max_steps: 20
11
+ - skip_research: true
12
+ - exploration: 0.3
13
+ - creativity: 0.5
14
+ - format: report
15
+
14
16
  steps:
15
- - name: "conduct_alicanto_research"
16
- engine: "python"
17
+ - name: alicanto_research
18
+ engine: python
17
19
  code: |
18
- import traceback
19
- import logging
20
- from npcsh.alicanto import alicanto
21
- # Assuming NPCSH_CHAT_MODEL and NPCSH_CHAT_PROVIDER are accessible
20
+ import os
21
+ from termcolor import colored
22
+
23
+ from npcpy.llm_funcs import get_llm_response
24
+ from npcpy.data.web import search_web
25
+ from npcpy.npc_compiler import NPC
26
+
27
+ npc = context.get('npc')
28
+ team = context.get('team')
29
+ messages = context.get('messages', [])
22
30
 
23
31
  query = context.get('query')
24
- num_npcs = int(context.get('num_npcs', 5)) # Ensure int type
25
- depth = int(context.get('depth', 3)) # Ensure int type
26
- llm_model = context.get('model')
27
- llm_provider = context.get('provider')
28
- max_steps = int(context.get('max_steps', 20)) # Ensure int type
32
+ num_npcs = int(context.get('num_npcs', 5))
33
+ depth = int(context.get('depth', 3))
34
+ max_steps = int(context.get('max_steps', 20))
29
35
  skip_research = context.get('skip_research', True)
30
- exploration_factor = context.get('exploration')
31
- creativity_factor = context.get('creativity')
32
- output_format = context.get('format')
33
- output_messages = context.get('messages', [])
34
- current_npc = context.get('npc')
35
-
36
- if not query or not query.strip():
37
- context['output'] = "Usage: /alicanto <research query> [--num-npcs N] [--depth N] [--exploration 0.3] [--creativity 0.5] [--format report|summary|full]"
38
- context['messages'] = output_messages
36
+ exploration = float(context.get('exploration', 0.3))
37
+ creativity = float(context.get('creativity', 0.5))
38
+ output_format = context.get('format', 'report')
39
+
40
+ model = context.get('model') or (npc.model if npc else 'gemini-1.5-pro')
41
+ provider = context.get('provider') or (npc.provider if npc else 'gemini')
42
+
43
+ if not query:
44
+ context['output'] = """Usage: /alicanto <research query>
45
+
46
+ Options:
47
+ --num-npcs N Number of research perspectives (default: 5)
48
+ --depth N Research depth (default: 3)
49
+ --max-steps N Maximum research steps (default: 20)
50
+ --exploration F Exploration factor 0-1 (default: 0.3)
51
+ --creativity F Creativity factor 0-1 (default: 0.5)
52
+ --format FORMAT Output: report|summary|full (default: report)
53
+
54
+ Example: /alicanto What are the latest advances in quantum computing?"""
55
+ context['messages'] = messages
39
56
  exit()
40
57
 
41
- # Fallback for model/provider if not explicitly set in Jinx inputs
42
- if not llm_model and current_npc and current_npc.model:
43
- llm_model = current_npc.model
44
- if not llm_provider and current_npc and current_npc.provider:
45
- llm_provider = current_npc.provider
46
-
47
- # Final fallbacks (these would ideally come from npcsh._state config)
48
- # Assuming NPCSH_CHAT_MODEL and NPCSH_CHAT_PROVIDER exist and are imported implicitly or set by environment
49
- # Hardcoding defaults for demonstration if not available through NPC or _state
50
- if not llm_model: llm_model = "gemini-1.5-pro"
51
- if not llm_provider: llm_provider = "gemini"
52
-
53
- try:
54
- logging.info(f"Starting Alicanto research on: {query}")
55
-
56
- alicanto_kwargs = {
57
- 'query': query,
58
- 'num_npcs': num_npcs,
59
- 'depth': depth,
60
- 'model': llm_model,
61
- 'provider': llm_provider,
62
- 'max_steps': max_steps,
63
- 'skip_research': skip_research,
64
- }
65
-
66
- if exploration_factor: alicanto_kwargs['exploration_factor'] = float(exploration_factor)
67
- if creativity_factor: alicanto_kwargs['creativity_factor'] = float(creativity_factor)
68
- if output_format: alicanto_kwargs['output_format'] = output_format
69
-
70
- result = alicanto(**alicanto_kwargs)
71
-
72
- output_result = ""
73
- if isinstance(result, dict):
74
- if "integration" in result:
75
- output_result = result["integration"]
76
- else:
77
- output_result = "Alicanto research completed. Full results available in returned data."
78
- else:
79
- output_result = str(result)
80
-
81
- context['output'] = output_result
82
- context['messages'] = output_messages
83
- context['alicanto_result'] = result # Store full result in context
84
- except Exception as e:
85
- traceback.print_exc()
86
- logging.error(f"Error during Alicanto research: {e}")
87
- context['output'] = f"Error during Alicanto research: {e}"
88
- context['messages'] = output_messages
58
+ print(f"""
59
+ █████╗ ██╗ ██╗ ██████╗ █████╗ ███╗ ██╗████████╗ ██████╗
60
+ ██╔══██╗██║ ██║██╔════╝██╔══██╗████╗ ██║╚══██╔══╝██╔═══██╗
61
+ ███████║██║ ██║██║ ███████║██╔██╗ ██║ ██║ ██║ ██║
62
+ ██╔══██║██║ ██║██║ ██╔══██║██║╚██╗██║ ██║ ██║ ██║
63
+ ██║ ██║███████╗██║╚██████╗██║ ██║██║ ╚████║ ██║ ╚██████╔╝
64
+ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═════╝
65
+
66
+ Deep Research Mode
67
+ Query: {query}
68
+ Perspectives: {num_npcs} | Depth: {depth} | Max Steps: {max_steps}
69
+ """)
70
+
71
+ # Generate research perspectives
72
+ perspectives_prompt = f"""Generate {num_npcs} distinct research perspectives for investigating: "{query}"
73
+
74
+ For each perspective, provide:
75
+ 1. Name (a descriptive title)
76
+ 2. Approach (how this perspective would investigate)
77
+ 3. Key questions to explore
78
+
79
+ Return as a numbered list."""
80
+
81
+ print(colored("Generating research perspectives...", "cyan"))
82
+ resp = get_llm_response(
83
+ perspectives_prompt,
84
+ model=model,
85
+ provider=provider,
86
+ npc=npc
87
+ )
88
+ perspectives = str(resp.get('response', ''))
89
+ print(perspectives)
90
+
91
+ # Conduct web research if not skipped
92
+ research_findings = ""
93
+ if not skip_research:
94
+ print(colored("\nConducting web research...", "cyan"))
95
+ try:
96
+ search_results = search_web(query, n_results=5)
97
+ if search_results:
98
+ research_findings = "\n\nWeb Research Findings:\n"
99
+ for i, result in enumerate(search_results[:5], 1):
100
+ title = result.get('title', 'No title')
101
+ snippet = result.get('snippet', result.get('body', ''))[:200]
102
+ research_findings += f"\n{i}. {title}\n {snippet}...\n"
103
+ print(colored(f"Found {len(search_results)} sources", "green"))
104
+ except Exception as e:
105
+ print(colored(f"Web search error: {e}", "yellow"))
106
+
107
+ # Multi-step exploration from each perspective
108
+ all_insights = []
109
+ gold_insights = [] # Key valuable findings
110
+ cliff_warnings = [] # Potential pitfalls or caveats
111
+
112
+ for step in range(min(depth, max_steps)):
113
+ print(colored(f"\n--- Research Depth {step + 1}/{depth} ---", "cyan"))
114
+
115
+ explore_prompt = f"""Research query: "{query}"
116
+
117
+ Perspectives generated:
118
+ {perspectives}
119
+
120
+ {research_findings}
121
+
122
+ Previous insights: {all_insights[-3:] if all_insights else 'None yet'}
123
+
124
+ For depth level {step + 1}:
125
+ 1. Explore deeper implications from each perspective
126
+ 2. Identify GOLD insights (valuable, non-obvious findings) - mark with [GOLD]
127
+ 3. Identify CLIFF warnings (pitfalls, caveats, risks) - mark with [CLIFF]
128
+ 4. Connect insights across perspectives
129
+
130
+ Exploration factor: {exploration} (higher = more diverse exploration)
131
+ Creativity factor: {creativity} (higher = more novel connections)"""
132
+
133
+ resp = get_llm_response(
134
+ explore_prompt,
135
+ model=model,
136
+ provider=provider,
137
+ temperature=creativity,
138
+ npc=npc
139
+ )
140
+
141
+ step_insights = str(resp.get('response', ''))
142
+ print(step_insights)
143
+
144
+ # Extract gold and cliff markers
145
+ if '[GOLD]' in step_insights:
146
+ gold_insights.extend([line.strip() for line in step_insights.split('\n') if '[GOLD]' in line])
147
+ if '[CLIFF]' in step_insights:
148
+ cliff_warnings.extend([line.strip() for line in step_insights.split('\n') if '[CLIFF]' in line])
149
+
150
+ all_insights.append(step_insights)
151
+
152
+ # Generate final synthesis
153
+ print(colored("\n--- Synthesizing Research ---", "cyan"))
154
+
155
+ synthesis_prompt = f"""Synthesize research on: "{query}"
156
+
157
+ All insights gathered:
158
+ {chr(10).join(all_insights)}
159
+
160
+ Gold insights identified:
161
+ {chr(10).join(gold_insights) if gold_insights else 'None explicitly marked'}
162
+
163
+ Cliff warnings identified:
164
+ {chr(10).join(cliff_warnings) if cliff_warnings else 'None explicitly marked'}
165
+
166
+ Generate a {output_format} that:
167
+ 1. Summarizes key findings
168
+ 2. Highlights the most valuable insights (gold)
169
+ 3. Notes important caveats and risks (cliffs)
170
+ 4. Provides actionable conclusions"""
171
+
172
+ resp = get_llm_response(
173
+ synthesis_prompt,
174
+ model=model,
175
+ provider=provider,
176
+ npc=npc
177
+ )
178
+
179
+ final_report = str(resp.get('response', ''))
180
+ print("\n" + "="*60)
181
+ print(colored("ALICANTO RESEARCH REPORT", "green", attrs=['bold']))
182
+ print("="*60)
183
+ print(final_report)
184
+
185
+ context['output'] = final_report
186
+ context['messages'] = messages
187
+ context['alicanto_result'] = {
188
+ 'query': query,
189
+ 'perspectives': perspectives,
190
+ 'insights': all_insights,
191
+ 'gold': gold_insights,
192
+ 'cliffs': cliff_warnings,
193
+ 'report': final_report
194
+ }
@@ -1,28 +1,249 @@
1
- jinx_name: "corca"
2
- description: "Enter the Corca MCP-powered agentic shell"
1
+ jinx_name: corca
2
+ description: MCP-powered agentic shell - LLM with tool use via MCP servers
3
3
  inputs:
4
- - mcp_server_path: '~/.npcsh/npc_team/mcp_server.py'
5
- - force_global: false
4
+ - mcp_server_path: null
6
5
  - initial_command: null
6
+ - model: null
7
+ - provider: null
8
+
7
9
  steps:
8
- - name: "enter_corca"
9
- engine: "python"
10
+ - name: corca_repl
11
+ engine: python
10
12
  code: |
11
- from npcsh._state import setup_shell
12
- from npcsh.corca import corca_session
13
-
13
+ import os
14
+ import sys
15
+ import asyncio
16
+ import json
17
+ from contextlib import AsyncExitStack
18
+ from termcolor import colored
19
+
20
+ from npcpy.llm_funcs import get_llm_response
21
+ from npcpy.npc_sysenv import render_markdown, get_system_message
22
+
23
+ # MCP imports
24
+ try:
25
+ from mcp import ClientSession, StdioServerParameters
26
+ from mcp.client.stdio import stdio_client
27
+ MCP_AVAILABLE = True
28
+ except ImportError:
29
+ MCP_AVAILABLE = False
30
+ print(colored("MCP not available. Install with: pip install mcp-client", "yellow"))
31
+
32
+ npc = context.get('npc')
33
+ team = context.get('team')
34
+ messages = context.get('messages', [])
14
35
  mcp_server_path = context.get('mcp_server_path')
15
- force_global = context.get('force_global', False)
16
36
  initial_command = context.get('initial_command')
17
-
18
- command_history, _, _ = setup_shell()
19
-
20
- result = corca_session(
21
- command_history=command_history,
22
- mcp_server_path=mcp_server_path,
23
- force_global=force_global,
24
- initial_command=initial_command
25
- )
26
-
27
- context['output'] = result.get('output', 'Exited Corca mode.')
28
- context['messages'] = result.get('messages', [])
37
+
38
+ model = context.get('model') or (npc.model if npc else None)
39
+ provider = context.get('provider') or (npc.provider if npc else None)
40
+
41
+ # Use shared_context for MCP state
42
+ shared_ctx = npc.shared_context if npc and hasattr(npc, 'shared_context') else {}
43
+
44
+ print("""
45
+ ██████╗ ██████╗ ██████╗ ██████╗ █████╗
46
+ ██╔════╝██╔═══██╗██╔══██╗██╔════╝██╔══██╗
47
+ ██║ ██║ ██║██████╔╝██║ ███████║
48
+ ██║ ██║ ██║██╔══██╗██║ ██╔══██╗
49
+ ╚██████╗╚██████╔╝██║ ██║╚██████╗██║ ██║
50
+ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝
51
+ """)
52
+
53
+ npc_name = npc.name if npc else "corca"
54
+ print(f"Entering corca mode (NPC: {npc_name}). Type '/cq' to exit.")
55
+
56
+ # ========== MCP Connection Setup ==========
57
+ async def connect_mcp(server_path):
58
+ """Connect to MCP server and return tools"""
59
+ if not MCP_AVAILABLE:
60
+ return [], {}
61
+
62
+ abs_path = os.path.abspath(os.path.expanduser(server_path))
63
+ if not os.path.exists(abs_path):
64
+ print(colored(f"MCP server not found: {abs_path}", "red"))
65
+ return [], {}
66
+
67
+ try:
68
+ loop = asyncio.get_event_loop()
69
+ except RuntimeError:
70
+ loop = asyncio.new_event_loop()
71
+ asyncio.set_event_loop(loop)
72
+
73
+ exit_stack = AsyncExitStack()
74
+
75
+ if abs_path.endswith('.py'):
76
+ cmd_parts = [sys.executable, abs_path]
77
+ else:
78
+ cmd_parts = [abs_path]
79
+
80
+ server_params = StdioServerParameters(
81
+ command=cmd_parts[0],
82
+ args=[abs_path],
83
+ env=os.environ.copy()
84
+ )
85
+
86
+ stdio_transport = await exit_stack.enter_async_context(stdio_client(server_params))
87
+ session = await exit_stack.enter_async_context(ClientSession(*stdio_transport))
88
+ await session.initialize()
89
+
90
+ response = await session.list_tools()
91
+ tools_llm = []
92
+ tool_map = {}
93
+
94
+ if response.tools:
95
+ for mcp_tool in response.tools:
96
+ tool_def = {
97
+ "type": "function",
98
+ "function": {
99
+ "name": mcp_tool.name,
100
+ "description": mcp_tool.description or f"MCP tool: {mcp_tool.name}",
101
+ "parameters": getattr(mcp_tool, "inputSchema", {"type": "object", "properties": {}})
102
+ }
103
+ }
104
+ tools_llm.append(tool_def)
105
+
106
+ # Create sync wrapper for async tool call
107
+ def make_tool_func(tool_name, sess, lp):
108
+ async def call_tool(**kwargs):
109
+ cleaned = {k: (None if v == 'None' else v) for k, v in kwargs.items()}
110
+ result = await asyncio.wait_for(sess.call_tool(tool_name, cleaned), timeout=30.0)
111
+ return result
112
+ def sync_call(**kwargs):
113
+ return lp.run_until_complete(call_tool(**kwargs))
114
+ return sync_call
115
+
116
+ tool_map[mcp_tool.name] = make_tool_func(mcp_tool.name, session, loop)
117
+
118
+ # Store in shared context
119
+ shared_ctx['mcp_client'] = session
120
+ shared_ctx['mcp_tools'] = tools_llm
121
+ shared_ctx['mcp_tool_map'] = tool_map
122
+ shared_ctx['_mcp_exit_stack'] = exit_stack
123
+ shared_ctx['_mcp_loop'] = loop
124
+
125
+ print(colored(f"Connected to MCP server. Tools: {', '.join(tool_map.keys())}", "green"))
126
+ return tools_llm, tool_map
127
+
128
+ # Try to connect if server path provided
129
+ tools_llm = shared_ctx.get('mcp_tools', [])
130
+ tool_map = shared_ctx.get('mcp_tool_map', {})
131
+
132
+ if mcp_server_path and not tools_llm:
133
+ try:
134
+ loop = asyncio.get_event_loop()
135
+ except RuntimeError:
136
+ loop = asyncio.new_event_loop()
137
+ asyncio.set_event_loop(loop)
138
+ tools_llm, tool_map = loop.run_until_complete(connect_mcp(mcp_server_path))
139
+
140
+ # Find default MCP server if none provided
141
+ if not tools_llm:
142
+ default_paths = [
143
+ os.path.expanduser("~/.npcsh/npc_team/mcp_server.py"),
144
+ os.path.join(team.team_path, "mcp_server.py") if team and hasattr(team, 'team_path') else None,
145
+ ]
146
+ for path in default_paths:
147
+ if path and os.path.exists(path):
148
+ try:
149
+ loop = asyncio.get_event_loop()
150
+ except RuntimeError:
151
+ loop = asyncio.new_event_loop()
152
+ asyncio.set_event_loop(loop)
153
+ tools_llm, tool_map = loop.run_until_complete(connect_mcp(path))
154
+ if tools_llm:
155
+ break
156
+
157
+ # Ensure system message
158
+ if not messages or messages[0].get("role") != "system":
159
+ sys_msg = get_system_message(npc) if npc else "You are an AI assistant with access to tools."
160
+ if tools_llm:
161
+ sys_msg += f"\n\nYou have access to these tools: {', '.join(t['function']['name'] for t in tools_llm)}"
162
+ messages.insert(0, {"role": "system", "content": sys_msg})
163
+
164
+ # Handle initial command if provided (one-shot mode)
165
+ if initial_command:
166
+ resp = get_llm_response(
167
+ initial_command,
168
+ model=model,
169
+ provider=provider,
170
+ messages=messages,
171
+ tools=tools_llm if tools_llm else None,
172
+ tool_map=tool_map if tool_map else None,
173
+ auto_process_tool_calls=True,
174
+ npc=npc
175
+ )
176
+ messages = resp.get('messages', messages)
177
+ render_markdown(str(resp.get('response', '')))
178
+ context['output'] = resp.get('response', 'Done.')
179
+ context['messages'] = messages
180
+ # Don't enter REPL for one-shot
181
+ exit()
182
+
183
+ # REPL loop
184
+ while True:
185
+ try:
186
+ prompt_str = f"{npc_name}:corca> "
187
+ user_input = input(prompt_str).strip()
188
+
189
+ if not user_input:
190
+ continue
191
+
192
+ if user_input.lower() == "/cq":
193
+ print("Exiting corca mode.")
194
+ break
195
+
196
+ # Handle /tools to list available tools
197
+ if user_input.lower() == "/tools":
198
+ if tools_llm:
199
+ print(colored("Available MCP tools:", "cyan"))
200
+ for t in tools_llm:
201
+ print(f" - {t['function']['name']}: {t['function'].get('description', '')[:60]}")
202
+ else:
203
+ print(colored("No MCP tools connected.", "yellow"))
204
+ continue
205
+
206
+ # Handle /connect to connect to new MCP server
207
+ if user_input.startswith("/connect "):
208
+ new_path = user_input[9:].strip()
209
+ try:
210
+ loop = asyncio.get_event_loop()
211
+ except RuntimeError:
212
+ loop = asyncio.new_event_loop()
213
+ asyncio.set_event_loop(loop)
214
+ tools_llm, tool_map = loop.run_until_complete(connect_mcp(new_path))
215
+ continue
216
+
217
+ # Get LLM response with tools
218
+ resp = get_llm_response(
219
+ user_input,
220
+ model=model,
221
+ provider=provider,
222
+ messages=messages,
223
+ tools=tools_llm if tools_llm else None,
224
+ tool_map=tool_map if tool_map else None,
225
+ auto_process_tool_calls=True,
226
+ stream=False, # Tool calls don't work well with streaming
227
+ npc=npc
228
+ )
229
+
230
+ messages = resp.get('messages', messages)
231
+ response_text = resp.get('response', '')
232
+ render_markdown(str(response_text))
233
+
234
+ # Track usage
235
+ if 'usage' in resp and npc and hasattr(npc, 'shared_context'):
236
+ usage = resp['usage']
237
+ npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
238
+ npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
239
+ npc.shared_context['turn_count'] += 1
240
+
241
+ except KeyboardInterrupt:
242
+ print("\nUse '/cq' to exit or continue.")
243
+ continue
244
+ except EOFError:
245
+ print("\nExiting corca mode.")
246
+ break
247
+
248
+ context['output'] = "Exited corca mode."
249
+ context['messages'] = messages