npcsh 1.0.16__py3-none-any.whl → 1.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +1541 -78
- npcsh/corca.py +709 -0
- npcsh/guac.py +1433 -596
- npcsh/mcp_server.py +64 -60
- npcsh/npc.py +5 -4
- npcsh/npcsh.py +27 -1334
- npcsh/pti.py +195 -215
- npcsh/routes.py +99 -26
- npcsh/spool.py +138 -144
- npcsh-1.0.18.dist-info/METADATA +483 -0
- npcsh-1.0.18.dist-info/RECORD +21 -0
- {npcsh-1.0.16.dist-info → npcsh-1.0.18.dist-info}/entry_points.txt +1 -1
- npcsh/mcp_npcsh.py +0 -822
- npcsh-1.0.16.dist-info/METADATA +0 -825
- npcsh-1.0.16.dist-info/RECORD +0 -21
- {npcsh-1.0.16.dist-info → npcsh-1.0.18.dist-info}/WHEEL +0 -0
- {npcsh-1.0.16.dist-info → npcsh-1.0.18.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.0.16.dist-info → npcsh-1.0.18.dist-info}/top_level.txt +0 -0
npcsh/spool.py
CHANGED
|
@@ -4,174 +4,126 @@ from npcpy.data.image import capture_screenshot
|
|
|
4
4
|
from npcpy.data.text import rag_search
|
|
5
5
|
|
|
6
6
|
import os
|
|
7
|
+
import sys
|
|
7
8
|
from npcpy.npc_sysenv import (
|
|
8
9
|
print_and_process_stream_with_markdown,
|
|
10
|
+
get_system_message,
|
|
11
|
+
render_markdown,
|
|
9
12
|
)
|
|
10
|
-
from
|
|
11
|
-
get_system_message,
|
|
12
|
-
render_markdown,
|
|
13
|
-
|
|
14
|
-
)
|
|
15
|
-
from npcsh._state import (
|
|
13
|
+
from npcsh._state import (
|
|
16
14
|
orange,
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
15
|
+
ShellState,
|
|
16
|
+
execute_command,
|
|
17
|
+
get_multiline_input,
|
|
18
|
+
readline_safe_prompt,
|
|
19
|
+
setup_shell,
|
|
20
|
+
get_npc_path,
|
|
21
|
+
process_result,
|
|
22
|
+
initial_state,
|
|
22
23
|
)
|
|
23
|
-
from npcpy.llm_funcs import
|
|
24
|
-
|
|
24
|
+
from npcpy.llm_funcs import get_llm_response
|
|
25
25
|
from npcpy.npc_compiler import NPC
|
|
26
26
|
from typing import Any, List, Dict, Union
|
|
27
27
|
from npcsh.yap import enter_yap_mode
|
|
28
|
-
|
|
29
|
-
|
|
28
|
+
from termcolor import colored
|
|
29
|
+
def print_spool_ascii():
|
|
30
|
+
spool_art = """
|
|
31
|
+
██████╗██████╗ ████████╗ ████████╗ ██╗
|
|
32
|
+
██╔════╝██╔══██╗██╔🧵🧵🧵██ ██╔🧵🧵🧵██ ██║
|
|
33
|
+
╚█████╗ ██████╔╝██║🧵🔴🧵██ ██║🧵🔴🧵██ ██║
|
|
34
|
+
╚═══██╗██╔═══╝ ██║🧵🧵🧵██ ██║🧵🧵🧵██ ██║
|
|
35
|
+
██████╔╝██║ ██╚══════██ ██ ══════██ ██║
|
|
36
|
+
╚═════╝ ╚═╝ ╚═████████ ███████═╝ █████████╗
|
|
37
|
+
"""
|
|
38
|
+
print(spool_art)
|
|
30
39
|
def enter_spool_mode(
|
|
31
40
|
npc: NPC = None,
|
|
32
41
|
team = None,
|
|
33
42
|
model: str = None,
|
|
34
43
|
provider: str = None,
|
|
35
|
-
|
|
36
|
-
|
|
44
|
+
vmodel: str = None,
|
|
45
|
+
vprovider: str = None,
|
|
37
46
|
attachments: List[str] = None,
|
|
38
47
|
rag_similarity_threshold: float = 0.3,
|
|
39
48
|
messages: List[Dict] = None,
|
|
40
49
|
conversation_id: str = None,
|
|
41
|
-
stream: bool =
|
|
50
|
+
stream: bool = None,
|
|
42
51
|
**kwargs,
|
|
43
52
|
) -> Dict:
|
|
53
|
+
print_spool_ascii()
|
|
54
|
+
# Initialize state using existing infrastructure
|
|
55
|
+
command_history, state_team, default_npc = setup_shell()
|
|
44
56
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
57
|
+
# Create spool state, inheriting from initial_state
|
|
58
|
+
spool_state = ShellState(
|
|
59
|
+
npc=npc or default_npc,
|
|
60
|
+
team=team or state_team,
|
|
61
|
+
messages=messages.copy() if messages else [],
|
|
62
|
+
conversation_id=conversation_id or start_new_conversation(),
|
|
63
|
+
current_path=os.getcwd(),
|
|
64
|
+
stream_output=stream if stream is not None else initial_state.stream_output,
|
|
65
|
+
attachments=None,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Override models/providers if specified
|
|
69
|
+
if model:
|
|
70
|
+
spool_state.chat_model = model
|
|
71
|
+
if provider:
|
|
72
|
+
spool_state.chat_provider = provider
|
|
73
|
+
if vmodel:
|
|
74
|
+
spool_state.vision_model = vmodel
|
|
75
|
+
if vprovider:
|
|
76
|
+
spool_state.vision_provider = vprovider
|
|
77
|
+
|
|
78
|
+
npc_info = f" (NPC: {spool_state.npc.name})" if spool_state.npc else ""
|
|
79
|
+
print(f"🧵 Entering spool mode{npc_info}. Type '/sq' to exit spool mode.")
|
|
52
80
|
print("💡 Tip: Press Ctrl+C during streaming to interrupt and continue with a new message.")
|
|
53
81
|
|
|
54
|
-
|
|
82
|
+
# Handle file loading
|
|
55
83
|
loaded_chunks = {}
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
command_history = CommandHistory()
|
|
61
|
-
|
|
62
|
-
files_to_load = attachments
|
|
63
|
-
if files_to_load:
|
|
64
|
-
if isinstance(files_to_load, str):
|
|
65
|
-
files_to_load = [f.strip() for f in files_to_load.split(',')]
|
|
84
|
+
if attachments:
|
|
85
|
+
if isinstance(attachments, str):
|
|
86
|
+
attachments = [f.strip() for f in attachments.split(',')]
|
|
66
87
|
|
|
67
|
-
for file_path in
|
|
88
|
+
for file_path in attachments:
|
|
68
89
|
file_path = os.path.expanduser(file_path)
|
|
69
90
|
if not os.path.exists(file_path):
|
|
70
|
-
print(f"Error: File not found at {file_path}")
|
|
91
|
+
print(colored(f"Error: File not found at {file_path}", "red"))
|
|
71
92
|
continue
|
|
72
93
|
try:
|
|
73
94
|
chunks = load_file_contents(file_path)
|
|
74
95
|
loaded_chunks[file_path] = chunks
|
|
75
|
-
print(f"Loaded {len(chunks)} chunks from: {file_path}")
|
|
96
|
+
print(colored(f"Loaded {len(chunks)} chunks from: {file_path}", "green"))
|
|
76
97
|
except Exception as e:
|
|
77
|
-
print(f"Error loading {file_path}: {str(e)}")
|
|
78
|
-
|
|
79
|
-
system_message = get_system_message(npc) if npc else "You are a helpful assistant."
|
|
80
|
-
if not spool_context or spool_context[0].get("role") != "system":
|
|
81
|
-
spool_context.insert(0, {"role": "system", "content": system_message})
|
|
82
|
-
|
|
83
|
-
if loaded_chunks:
|
|
84
|
-
initial_file_context = "\n\n--- The user has loaded the following files for this session ---\n"
|
|
85
|
-
for filename, chunks in loaded_chunks.items():
|
|
86
|
-
initial_file_context += f"\n\n--- Start of content from {filename} ---\n"
|
|
87
|
-
initial_file_context += "\n".join(chunks)
|
|
88
|
-
initial_file_context += f"\n--- End of content from {filename} ---\n"
|
|
89
|
-
|
|
90
|
-
def _handle_llm_interaction(
|
|
91
|
-
prompt,
|
|
92
|
-
current_context,
|
|
93
|
-
model_to_use,
|
|
94
|
-
provider_to_use,
|
|
95
|
-
images_to_use=None
|
|
96
|
-
):
|
|
97
|
-
|
|
98
|
-
current_context.append({"role": "user", "content": prompt})
|
|
98
|
+
print(colored(f"Error loading {file_path}: {str(e)}", "red"))
|
|
99
99
|
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
prompt,
|
|
105
|
-
wd=os.getcwd(),
|
|
106
|
-
model=model_to_use,
|
|
107
|
-
provider=provider_to_use,
|
|
108
|
-
npc=npc.name if npc else None,
|
|
109
|
-
team=team.name if team else None,
|
|
110
|
-
)
|
|
111
|
-
|
|
112
|
-
assistant_reply = ""
|
|
113
|
-
|
|
114
|
-
try:
|
|
115
|
-
response = get_llm_response(
|
|
116
|
-
prompt,
|
|
117
|
-
model=model_to_use,
|
|
118
|
-
provider=provider_to_use,
|
|
119
|
-
messages=current_context,
|
|
120
|
-
images=images_to_use,
|
|
121
|
-
stream=stream,
|
|
122
|
-
npc=npc
|
|
123
|
-
)
|
|
124
|
-
assistant_reply = response.get('response')
|
|
125
|
-
|
|
126
|
-
if stream:
|
|
127
|
-
print(orange(f'{npc.name if npc else "🧵"}....> '), end='', flush=True)
|
|
128
|
-
|
|
129
|
-
# The streaming function now handles KeyboardInterrupt internally
|
|
130
|
-
assistant_reply = print_and_process_stream_with_markdown(
|
|
131
|
-
assistant_reply,
|
|
132
|
-
model=model_to_use,
|
|
133
|
-
provider=provider_to_use
|
|
134
|
-
)
|
|
135
|
-
else:
|
|
136
|
-
render_markdown(assistant_reply)
|
|
137
|
-
|
|
138
|
-
except Exception as e:
|
|
139
|
-
assistant_reply = f"[Error during response generation: {str(e)}]"
|
|
140
|
-
print(f"\n❌ Error: {str(e)}")
|
|
141
|
-
|
|
142
|
-
current_context.append({"role": "assistant", "content": assistant_reply})
|
|
143
|
-
|
|
144
|
-
if assistant_reply and assistant_reply.count("```") % 2 != 0:
|
|
145
|
-
assistant_reply += "```"
|
|
146
|
-
|
|
147
|
-
save_conversation_message(
|
|
148
|
-
command_history,
|
|
149
|
-
conversation_id,
|
|
150
|
-
"assistant",
|
|
151
|
-
assistant_reply,
|
|
152
|
-
wd=os.getcwd(),
|
|
153
|
-
model=model_to_use,
|
|
154
|
-
provider=provider_to_use,
|
|
155
|
-
npc=npc.name if npc else None,
|
|
156
|
-
team=team.name if team else None,
|
|
157
|
-
)
|
|
158
|
-
|
|
159
|
-
return current_context
|
|
100
|
+
# Initialize context with system message if needed
|
|
101
|
+
if not spool_state.messages or spool_state.messages[0].get("role") != "system":
|
|
102
|
+
system_message = get_system_message(spool_state.npc) if spool_state.npc else "You are a helpful assistant."
|
|
103
|
+
spool_state.messages.insert(0, {"role": "system", "content": system_message})
|
|
160
104
|
|
|
161
105
|
while True:
|
|
162
106
|
try:
|
|
163
|
-
|
|
164
|
-
|
|
107
|
+
# Use consistent prompt styling with npcsh
|
|
108
|
+
npc_name = spool_state.npc.name if spool_state.npc else "chat"
|
|
109
|
+
display_model = spool_state.npc.model if spool_state.npc and spool_state.npc.model else spool_state.chat_model
|
|
110
|
+
|
|
111
|
+
prompt_str = f"{orange(npc_name)}:{display_model}🧵> "
|
|
112
|
+
prompt = readline_safe_prompt(prompt_str)
|
|
113
|
+
user_input = get_multiline_input(prompt).strip()
|
|
165
114
|
|
|
166
115
|
if not user_input:
|
|
167
116
|
continue
|
|
117
|
+
|
|
168
118
|
if user_input.lower() == "/sq":
|
|
169
119
|
print("Exiting spool mode.")
|
|
170
120
|
break
|
|
121
|
+
|
|
171
122
|
if user_input.lower() == "/yap":
|
|
172
|
-
|
|
123
|
+
spool_state.messages = enter_yap_mode(spool_state.messages, spool_state.npc)
|
|
173
124
|
continue
|
|
174
125
|
|
|
126
|
+
# Handle vision commands
|
|
175
127
|
if user_input.startswith("/ots"):
|
|
176
128
|
command_parts = user_input.split()
|
|
177
129
|
image_paths = []
|
|
@@ -179,26 +131,42 @@ def enter_spool_mode(
|
|
|
179
131
|
if len(command_parts) > 1:
|
|
180
132
|
for img_path in command_parts[1:]:
|
|
181
133
|
full_path = os.path.expanduser(img_path)
|
|
182
|
-
if os.path.exists(full_path):
|
|
183
|
-
|
|
134
|
+
if os.path.exists(full_path):
|
|
135
|
+
image_paths.append(full_path)
|
|
136
|
+
else:
|
|
137
|
+
print(colored(f"Error: Image file not found at {full_path}", "red"))
|
|
184
138
|
else:
|
|
185
139
|
screenshot = capture_screenshot()
|
|
186
140
|
if screenshot and "file_path" in screenshot:
|
|
187
141
|
image_paths.append(screenshot["file_path"])
|
|
188
|
-
print(f"Screenshot captured: {screenshot['filename']}")
|
|
142
|
+
print(colored(f"Screenshot captured: {screenshot['filename']}", "green"))
|
|
189
143
|
|
|
190
|
-
if not image_paths:
|
|
144
|
+
if not image_paths:
|
|
145
|
+
continue
|
|
191
146
|
|
|
192
147
|
vision_prompt = input("Prompt for image(s) (or press Enter): ").strip() or "Describe these images."
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
148
|
+
|
|
149
|
+
# Use vision models for image processing
|
|
150
|
+
response = get_llm_response(
|
|
151
|
+
vision_prompt,
|
|
152
|
+
model=spool_state.vision_model,
|
|
153
|
+
provider=spool_state.vision_provider,
|
|
154
|
+
messages=spool_state.messages,
|
|
155
|
+
images=image_paths,
|
|
156
|
+
stream=spool_state.stream_output,
|
|
157
|
+
npc=spool_state.npc,
|
|
158
|
+
**kwargs
|
|
159
|
+
|
|
199
160
|
)
|
|
161
|
+
|
|
162
|
+
spool_state.messages = response.get('messages', spool_state.messages)
|
|
163
|
+
output = response.get('response')
|
|
164
|
+
|
|
165
|
+
# Process and display the result
|
|
166
|
+
process_result(vision_prompt, spool_state, {'output': output}, command_history)
|
|
200
167
|
continue
|
|
201
168
|
|
|
169
|
+
# Handle RAG context if files are loaded
|
|
202
170
|
current_prompt = user_input
|
|
203
171
|
if loaded_chunks:
|
|
204
172
|
context_content = ""
|
|
@@ -214,24 +182,32 @@ def enter_spool_mode(
|
|
|
214
182
|
|
|
215
183
|
if context_content:
|
|
216
184
|
current_prompt += f"\n\n--- Relevant context from loaded files ---\n{context_content}"
|
|
217
|
-
print(f'prepped context_content : {context_content}')
|
|
218
185
|
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
186
|
+
# Use standard LLM processing
|
|
187
|
+
response = get_llm_response(
|
|
188
|
+
current_prompt,
|
|
189
|
+
model=spool_state.npc.model if spool_state.npc and spool_state.npc.model else spool_state.chat_model,
|
|
190
|
+
provider=spool_state.npc.provider if spool_state.npc and spool_state.npc.provider else spool_state.chat_provider,
|
|
191
|
+
messages=spool_state.messages,
|
|
192
|
+
stream=spool_state.stream_output,
|
|
193
|
+
npc=spool_state.npc,
|
|
194
|
+
**kwargs
|
|
224
195
|
)
|
|
196
|
+
|
|
197
|
+
spool_state.messages = response.get('messages', spool_state.messages)
|
|
198
|
+
output = response.get('response')
|
|
199
|
+
|
|
200
|
+
# Use existing result processing
|
|
201
|
+
process_result(current_prompt, spool_state, {'output': output}, command_history)
|
|
225
202
|
|
|
226
203
|
except (EOFError,):
|
|
227
204
|
print("\nExiting spool mode.")
|
|
228
205
|
break
|
|
229
206
|
except KeyboardInterrupt:
|
|
230
|
-
# This handles Ctrl+C at the input prompt (not during streaming)
|
|
231
207
|
print("\n🔄 Use '/sq' to exit or continue with a new message.")
|
|
232
208
|
continue
|
|
233
209
|
|
|
234
|
-
return {"messages":
|
|
210
|
+
return {"messages": spool_state.messages, "output": "Exited spool mode."}
|
|
235
211
|
|
|
236
212
|
|
|
237
213
|
def main():
|
|
@@ -241,14 +217,32 @@ def main():
|
|
|
241
217
|
parser.add_argument("--provider", help="Provider to use")
|
|
242
218
|
parser.add_argument("--attachments", nargs="*", help="Files to load into context")
|
|
243
219
|
parser.add_argument("--stream", default="true", help="Use streaming mode")
|
|
244
|
-
parser.add_argument("--npc", type=str,
|
|
220
|
+
parser.add_argument("--npc", type=str, help="NPC name or path to NPC file", default='sibiji',)
|
|
245
221
|
|
|
246
222
|
args = parser.parse_args()
|
|
247
223
|
|
|
248
|
-
|
|
224
|
+
# Use existing infrastructure to get NPC
|
|
225
|
+
command_history, team, default_npc = setup_shell()
|
|
226
|
+
|
|
227
|
+
npc = None
|
|
228
|
+
if args.npc:
|
|
229
|
+
if os.path.exists(os.path.expanduser(args.npc)):
|
|
230
|
+
npc = NPC(file=args.npc)
|
|
231
|
+
elif team and args.npc in team.npcs:
|
|
232
|
+
npc = team.npcs[args.npc]
|
|
233
|
+
else:
|
|
234
|
+
try:
|
|
235
|
+
npc_path = get_npc_path(args.npc, command_history.db_path)
|
|
236
|
+
npc = NPC(file=npc_path)
|
|
237
|
+
except ValueError:
|
|
238
|
+
print(colored(f"NPC '{args.npc}' not found. Using default.", "yellow"))
|
|
239
|
+
npc = default_npc
|
|
240
|
+
else:
|
|
241
|
+
npc = default_npc
|
|
249
242
|
|
|
250
243
|
enter_spool_mode(
|
|
251
244
|
npc=npc,
|
|
245
|
+
team=team,
|
|
252
246
|
model=args.model,
|
|
253
247
|
provider=args.provider,
|
|
254
248
|
attachments=args.attachments,
|