npcsh 1.0.11__py3-none-any.whl → 1.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +89 -1
- npcsh/alicanto.py +22 -7
- npcsh/npcsh.py +434 -492
- npcsh/plonk.py +300 -367
- npcsh/routes.py +367 -162
- npcsh/spool.py +162 -221
- npcsh-1.0.13.dist-info/METADATA +775 -0
- npcsh-1.0.13.dist-info/RECORD +21 -0
- npcsh-1.0.11.dist-info/METADATA +0 -596
- npcsh-1.0.11.dist-info/RECORD +0 -21
- {npcsh-1.0.11.dist-info → npcsh-1.0.13.dist-info}/WHEEL +0 -0
- {npcsh-1.0.11.dist-info → npcsh-1.0.13.dist-info}/entry_points.txt +0 -0
- {npcsh-1.0.11.dist-info → npcsh-1.0.13.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.0.11.dist-info → npcsh-1.0.13.dist-info}/top_level.txt +0 -0
npcsh/spool.py
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
from npcpy.memory.command_history import CommandHistory, start_new_conversation, save_conversation_message
|
|
2
|
-
from npcpy.data.load import
|
|
2
|
+
from npcpy.data.load import load_file_contents
|
|
3
3
|
from npcpy.data.image import capture_screenshot
|
|
4
4
|
from npcpy.data.text import rag_search
|
|
5
5
|
|
|
6
6
|
import os
|
|
7
7
|
from npcpy.npc_sysenv import (
|
|
8
8
|
print_and_process_stream_with_markdown,
|
|
9
|
-
|
|
9
|
+
)
|
|
10
10
|
from npcpy.npc_sysenv import (
|
|
11
11
|
get_system_message,
|
|
12
12
|
render_markdown,
|
|
@@ -28,290 +28,231 @@ from npcsh.yap import enter_yap_mode
|
|
|
28
28
|
|
|
29
29
|
|
|
30
30
|
def enter_spool_mode(
|
|
31
|
-
npc = None,
|
|
31
|
+
npc: NPC = None,
|
|
32
32
|
team = None,
|
|
33
|
-
model: str =
|
|
34
|
-
provider: str =
|
|
35
|
-
vision_model:str =
|
|
36
|
-
vision_provider:str =
|
|
37
|
-
|
|
33
|
+
model: str = None,
|
|
34
|
+
provider: str = None,
|
|
35
|
+
vision_model:str = None,
|
|
36
|
+
vision_provider:str = None,
|
|
37
|
+
attachments: List[str] = None,
|
|
38
38
|
rag_similarity_threshold: float = 0.3,
|
|
39
39
|
messages: List[Dict] = None,
|
|
40
40
|
conversation_id: str = None,
|
|
41
41
|
stream: bool = NPCSH_STREAM_OUTPUT,
|
|
42
|
+
**kwargs,
|
|
42
43
|
) -> Dict:
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
npc : Any : The NPC object.
|
|
49
|
-
files : List[str] : List of file paths to load into the context.
|
|
50
|
-
Returns:
|
|
51
|
-
Dict : The messages and output.
|
|
52
|
-
"""
|
|
44
|
+
|
|
45
|
+
session_model = model or (npc.model if npc else NPCSH_CHAT_MODEL)
|
|
46
|
+
session_provider = provider or (npc.provider if npc else NPCSH_CHAT_PROVIDER)
|
|
47
|
+
session_vision_model = vision_model or NPCSH_VISION_MODEL
|
|
48
|
+
session_vision_provider = vision_provider or NPCSH_VISION_PROVIDER
|
|
53
49
|
|
|
54
50
|
npc_info = f" (NPC: {npc.name})" if npc else ""
|
|
55
51
|
print(f"Entering spool mode{npc_info}. Type '/sq' to exit spool mode.")
|
|
52
|
+
print("💡 Tip: Press Ctrl+C during streaming to interrupt and continue with a new message.")
|
|
56
53
|
|
|
57
|
-
spool_context = (
|
|
58
|
-
|
|
59
|
-
) # Initialize context with messages
|
|
54
|
+
spool_context = messages.copy() if messages else []
|
|
55
|
+
loaded_chunks = {}
|
|
60
56
|
|
|
61
|
-
loaded_content = {} # New dictionary to hold loaded content
|
|
62
|
-
|
|
63
|
-
# Create conversation ID if not provided
|
|
64
57
|
if not conversation_id:
|
|
65
58
|
conversation_id = start_new_conversation()
|
|
66
59
|
|
|
67
60
|
command_history = CommandHistory()
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
61
|
+
|
|
62
|
+
files_to_load = attachments
|
|
63
|
+
if files_to_load:
|
|
64
|
+
if isinstance(files_to_load, str):
|
|
65
|
+
files_to_load = [f.strip() for f in files_to_load.split(',')]
|
|
66
|
+
|
|
67
|
+
for file_path in files_to_load:
|
|
68
|
+
file_path = os.path.expanduser(file_path)
|
|
69
|
+
if not os.path.exists(file_path):
|
|
70
|
+
print(f"Error: File not found at {file_path}")
|
|
71
|
+
continue
|
|
72
72
|
try:
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
content = load_csv(file)
|
|
77
|
-
else:
|
|
78
|
-
print(f"Unsupported file type: {file}")
|
|
79
|
-
continue
|
|
80
|
-
loaded_content[file] = content
|
|
81
|
-
print(f"Loaded content from: {file}")
|
|
73
|
+
chunks = load_file_contents(file_path)
|
|
74
|
+
loaded_chunks[file_path] = chunks
|
|
75
|
+
print(f"Loaded {len(chunks)} chunks from: {file_path}")
|
|
82
76
|
except Exception as e:
|
|
83
|
-
print(f"Error loading {
|
|
77
|
+
print(f"Error loading {file_path}: {str(e)}")
|
|
84
78
|
|
|
85
|
-
# Add system message to context
|
|
86
79
|
system_message = get_system_message(npc) if npc else "You are a helpful assistant."
|
|
87
|
-
if
|
|
88
|
-
|
|
89
|
-
spool_context.insert(0, {"role": "system", "content": system_message})
|
|
90
|
-
else:
|
|
91
|
-
spool_context.append({"role": "system", "content": system_message})
|
|
92
|
-
# Inherit last n messages if specified
|
|
93
|
-
if npc is not None:
|
|
94
|
-
if model is None:
|
|
95
|
-
model = npc.model
|
|
96
|
-
if provider is None:
|
|
97
|
-
provider = npc.provider
|
|
80
|
+
if not spool_context or spool_context[0].get("role") != "system":
|
|
81
|
+
spool_context.insert(0, {"role": "system", "content": system_message})
|
|
98
82
|
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
83
|
+
if loaded_chunks:
|
|
84
|
+
initial_file_context = "\n\n--- The user has loaded the following files for this session ---\n"
|
|
85
|
+
for filename, chunks in loaded_chunks.items():
|
|
86
|
+
initial_file_context += f"\n\n--- Start of content from {filename} ---\n"
|
|
87
|
+
initial_file_context += "\n".join(chunks)
|
|
88
|
+
initial_file_context += f"\n--- End of content from {filename} ---\n"
|
|
103
89
|
|
|
90
|
+
def _handle_llm_interaction(
|
|
91
|
+
prompt,
|
|
92
|
+
current_context,
|
|
93
|
+
model_to_use,
|
|
94
|
+
provider_to_use,
|
|
95
|
+
images_to_use=None
|
|
96
|
+
):
|
|
97
|
+
|
|
98
|
+
current_context.append({"role": "user", "content": prompt})
|
|
99
|
+
|
|
100
|
+
save_conversation_message(
|
|
101
|
+
command_history,
|
|
102
|
+
conversation_id,
|
|
103
|
+
"user",
|
|
104
|
+
prompt,
|
|
105
|
+
wd=os.getcwd(),
|
|
106
|
+
model=model_to_use,
|
|
107
|
+
provider=provider_to_use,
|
|
108
|
+
npc=npc.name if npc else None,
|
|
109
|
+
team=team.name if team else None,
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
assistant_reply = ""
|
|
113
|
+
|
|
104
114
|
try:
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
115
|
+
response = get_llm_response(
|
|
116
|
+
prompt,
|
|
117
|
+
model=model_to_use,
|
|
118
|
+
provider=provider_to_use,
|
|
119
|
+
messages=current_context,
|
|
120
|
+
images=images_to_use,
|
|
121
|
+
stream=stream,
|
|
122
|
+
npc=npc
|
|
123
|
+
)
|
|
124
|
+
assistant_reply = response.get('response')
|
|
125
|
+
|
|
126
|
+
if stream:
|
|
127
|
+
print(orange(f'{npc.name if npc else "🧵"}....> '), end='', flush=True)
|
|
128
|
+
|
|
129
|
+
# The streaming function now handles KeyboardInterrupt internally
|
|
130
|
+
assistant_reply = print_and_process_stream_with_markdown(
|
|
131
|
+
assistant_reply,
|
|
132
|
+
model=model_to_use,
|
|
133
|
+
provider=provider_to_use
|
|
134
|
+
)
|
|
135
|
+
else:
|
|
136
|
+
render_markdown(assistant_reply)
|
|
137
|
+
|
|
138
|
+
except Exception as e:
|
|
139
|
+
assistant_reply = f"[Error during response generation: {str(e)}]"
|
|
140
|
+
print(f"\n❌ Error: {str(e)}")
|
|
141
|
+
|
|
142
|
+
current_context.append({"role": "assistant", "content": assistant_reply})
|
|
143
|
+
|
|
144
|
+
if assistant_reply and assistant_reply.count("```") % 2 != 0:
|
|
145
|
+
assistant_reply += "```"
|
|
146
|
+
|
|
147
|
+
save_conversation_message(
|
|
148
|
+
command_history,
|
|
149
|
+
conversation_id,
|
|
150
|
+
"assistant",
|
|
151
|
+
assistant_reply,
|
|
152
|
+
wd=os.getcwd(),
|
|
153
|
+
model=model_to_use,
|
|
154
|
+
provider=provider_to_use,
|
|
155
|
+
npc=npc.name if npc else None,
|
|
156
|
+
team=team.name if team else None,
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
return current_context
|
|
160
|
+
|
|
161
|
+
while True:
|
|
162
|
+
try:
|
|
163
|
+
prompt_text = orange(f"🧵:{npc.name if npc else 'chat'}:{session_model}> ")
|
|
164
|
+
user_input = input(prompt_text).strip()
|
|
165
|
+
|
|
166
|
+
if not user_input:
|
|
108
167
|
continue
|
|
109
168
|
if user_input.lower() == "/sq":
|
|
110
169
|
print("Exiting spool mode.")
|
|
111
170
|
break
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
messages = enter_yap_mode(spool_context, npc)
|
|
171
|
+
if user_input.lower() == "/yap":
|
|
172
|
+
spool_context = enter_yap_mode(spool_context, npc)
|
|
115
173
|
continue
|
|
116
174
|
|
|
117
175
|
if user_input.startswith("/ots"):
|
|
118
176
|
command_parts = user_input.split()
|
|
119
177
|
image_paths = []
|
|
120
|
-
print('using vision model: ', vision_model)
|
|
121
178
|
|
|
122
|
-
# Handle image loading/capturing
|
|
123
179
|
if len(command_parts) > 1:
|
|
124
|
-
# User provided image path(s)
|
|
125
180
|
for img_path in command_parts[1:]:
|
|
126
|
-
full_path = os.path.
|
|
127
|
-
if os.path.exists(full_path):
|
|
128
|
-
|
|
129
|
-
else:
|
|
130
|
-
print(f"Error: Image file not found at {full_path}")
|
|
181
|
+
full_path = os.path.expanduser(img_path)
|
|
182
|
+
if os.path.exists(full_path): image_paths.append(full_path)
|
|
183
|
+
else: print(f"Error: Image file not found at {full_path}")
|
|
131
184
|
else:
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
print(f"Screenshot captured: {output['filename']}")
|
|
185
|
+
screenshot = capture_screenshot()
|
|
186
|
+
if screenshot and "file_path" in screenshot:
|
|
187
|
+
image_paths.append(screenshot["file_path"])
|
|
188
|
+
print(f"Screenshot captured: {screenshot['filename']}")
|
|
137
189
|
|
|
138
|
-
if not image_paths:
|
|
139
|
-
print("No valid images provided.")
|
|
140
|
-
continue
|
|
190
|
+
if not image_paths: continue
|
|
141
191
|
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
192
|
+
vision_prompt = input("Prompt for image(s) (or press Enter): ").strip() or "Describe these images."
|
|
193
|
+
spool_context = _handle_llm_interaction(
|
|
194
|
+
vision_prompt,
|
|
195
|
+
spool_context,
|
|
196
|
+
session_vision_model,
|
|
197
|
+
session_vision_provider,
|
|
198
|
+
images_to_use=image_paths
|
|
145
199
|
)
|
|
146
|
-
if not user_prompt:
|
|
147
|
-
user_prompt = "Please analyze these images."
|
|
148
|
-
|
|
149
|
-
model= vision_model
|
|
150
|
-
provider= vision_provider
|
|
151
|
-
# Save the user message
|
|
152
|
-
message_id = save_conversation_message(
|
|
153
|
-
command_history,
|
|
154
|
-
conversation_id,
|
|
155
|
-
"user",
|
|
156
|
-
user_prompt,
|
|
157
|
-
wd=os.getcwd(),
|
|
158
|
-
model=vision_model,
|
|
159
|
-
provider=vision_provider,
|
|
160
|
-
npc=npc.name if npc else None,
|
|
161
|
-
team=team.name if team else None,
|
|
162
|
-
|
|
163
|
-
)
|
|
164
|
-
|
|
165
|
-
# Process the request with our unified approach
|
|
166
|
-
response = get_llm_response(
|
|
167
|
-
user_prompt,
|
|
168
|
-
model=vision_model,
|
|
169
|
-
provider=provider,
|
|
170
|
-
messages=spool_context,
|
|
171
|
-
images=image_paths,
|
|
172
|
-
stream=stream,
|
|
173
|
-
**kwargs_to_pass
|
|
174
|
-
)
|
|
175
|
-
|
|
176
|
-
# Extract the assistant's response
|
|
177
|
-
assistant_reply = response['response']
|
|
178
|
-
|
|
179
|
-
spool_context = response['messages']
|
|
180
|
-
|
|
181
|
-
if stream:
|
|
182
|
-
print(orange(f'spool:{npc.name}:{vision_model}>'), end='', flush=True)
|
|
183
|
-
|
|
184
|
-
assistant_reply = print_and_process_stream_with_markdown(assistant_reply, model=model, provider=provider)
|
|
185
|
-
|
|
186
|
-
spool_context.append({"role": "assistant", "content": assistant_reply})
|
|
187
|
-
if assistant_reply.count("```") % 2 != 0:
|
|
188
|
-
assistant_reply = assistant_reply + "```"
|
|
189
|
-
# Save the assistant's response
|
|
190
|
-
save_conversation_message(
|
|
191
|
-
command_history,
|
|
192
|
-
conversation_id,
|
|
193
|
-
"assistant",
|
|
194
|
-
assistant_reply,
|
|
195
|
-
wd=os.getcwd(),
|
|
196
|
-
model=vision_model,
|
|
197
|
-
provider=vision_provider,
|
|
198
|
-
npc=npc.name if npc else None,
|
|
199
|
-
team=team.name if team else None,
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
)
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
# Display the response
|
|
207
|
-
if not stream:
|
|
208
|
-
render_markdown(assistant_reply)
|
|
209
|
-
|
|
210
200
|
continue
|
|
211
|
-
|
|
212
|
-
|
|
213
201
|
|
|
214
|
-
|
|
215
|
-
if
|
|
202
|
+
current_prompt = user_input
|
|
203
|
+
if loaded_chunks:
|
|
216
204
|
context_content = ""
|
|
217
|
-
for filename,
|
|
205
|
+
for filename, chunks in loaded_chunks.items():
|
|
206
|
+
full_content_str = "\n".join(chunks)
|
|
218
207
|
retrieved_docs = rag_search(
|
|
219
208
|
user_input,
|
|
220
|
-
|
|
209
|
+
full_content_str,
|
|
221
210
|
similarity_threshold=rag_similarity_threshold,
|
|
222
211
|
)
|
|
223
212
|
if retrieved_docs:
|
|
224
|
-
context_content +=
|
|
225
|
-
f"\n\nLoaded content from: {filename}\n{content}\n\n"
|
|
226
|
-
)
|
|
227
|
-
if len(context_content) > 0:
|
|
228
|
-
user_input += f"""
|
|
229
|
-
Here is the loaded content that may be relevant to your query:
|
|
230
|
-
{context_content}
|
|
231
|
-
Please reference it explicitly in your response and use it for answering.
|
|
232
|
-
"""
|
|
233
|
-
|
|
234
|
-
# Save user message
|
|
235
|
-
message_id = save_conversation_message(
|
|
236
|
-
command_history,
|
|
237
|
-
conversation_id,
|
|
238
|
-
"user",
|
|
239
|
-
user_input,
|
|
240
|
-
wd=os.getcwd(),
|
|
241
|
-
model=model,
|
|
242
|
-
provider=provider,
|
|
243
|
-
npc=npc.name if npc else None,
|
|
244
|
-
team=team.name if team else None,
|
|
213
|
+
context_content += f"\n\nContext from: {filename}\n{retrieved_docs}\n"
|
|
245
214
|
|
|
246
|
-
|
|
215
|
+
if context_content:
|
|
216
|
+
current_prompt += f"\n\n--- Relevant context from loaded files ---\n{context_content}"
|
|
217
|
+
print(f'prepped context_content : {context_content}')
|
|
247
218
|
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
stream=stream,
|
|
254
|
-
**kwargs_to_pass
|
|
219
|
+
spool_context = _handle_llm_interaction(
|
|
220
|
+
current_prompt,
|
|
221
|
+
spool_context,
|
|
222
|
+
session_model,
|
|
223
|
+
session_provider
|
|
255
224
|
)
|
|
256
225
|
|
|
257
|
-
|
|
258
|
-
if stream:
|
|
259
|
-
print(orange(f'{npc.name if npc else "spool"}:{npc.model if npc else model}>'), end='', flush=True)
|
|
260
|
-
assistant_reply = print_and_process_stream_with_markdown(assistant_reply, model=model, provider=provider)
|
|
261
|
-
# Save assistant message
|
|
262
|
-
save_conversation_message(
|
|
263
|
-
command_history,
|
|
264
|
-
conversation_id,
|
|
265
|
-
"assistant",
|
|
266
|
-
assistant_reply,
|
|
267
|
-
wd=os.getcwd(),
|
|
268
|
-
model=model,
|
|
269
|
-
provider=provider,
|
|
270
|
-
npc=npc.name if npc else None,
|
|
271
|
-
team=team.name if team else None,
|
|
272
|
-
|
|
273
|
-
)
|
|
274
|
-
|
|
275
|
-
# Fix unfinished markdown notation
|
|
276
|
-
if assistant_reply.count("```") % 2 != 0:
|
|
277
|
-
assistant_reply = assistant_reply + "```"
|
|
278
|
-
|
|
279
|
-
if not stream:
|
|
280
|
-
render_markdown(assistant_reply)
|
|
281
|
-
|
|
282
|
-
except (KeyboardInterrupt, EOFError):
|
|
226
|
+
except (EOFError,):
|
|
283
227
|
print("\nExiting spool mode.")
|
|
284
228
|
break
|
|
229
|
+
except KeyboardInterrupt:
|
|
230
|
+
# This handles Ctrl+C at the input prompt (not during streaming)
|
|
231
|
+
print("\n🔄 Use '/sq' to exit or continue with a new message.")
|
|
232
|
+
continue
|
|
233
|
+
|
|
234
|
+
return {"messages": spool_context, "output": "Exited spool mode."}
|
|
235
|
+
|
|
285
236
|
|
|
286
|
-
return {
|
|
287
|
-
"messages": spool_context,
|
|
288
|
-
"output": "\n".join(
|
|
289
|
-
[msg["content"] for msg in spool_context if msg["role"] == "assistant"]
|
|
290
|
-
),
|
|
291
|
-
}
|
|
292
237
|
def main():
|
|
293
|
-
# Example usage
|
|
294
238
|
import argparse
|
|
295
239
|
parser = argparse.ArgumentParser(description="Enter spool mode for chatting with an LLM")
|
|
296
|
-
parser.add_argument("--model",
|
|
297
|
-
parser.add_argument("--provider",
|
|
298
|
-
parser.add_argument("--
|
|
240
|
+
parser.add_argument("--model", help="Model to use")
|
|
241
|
+
parser.add_argument("--provider", help="Provider to use")
|
|
242
|
+
parser.add_argument("--attachments", nargs="*", help="Files to load into context")
|
|
299
243
|
parser.add_argument("--stream", default="true", help="Use streaming mode")
|
|
300
244
|
parser.add_argument("--npc", type=str, default=os.path.expanduser('~/.npcsh/npc_team/sibiji.npc'), help="Path to NPC file")
|
|
301
245
|
|
|
302
|
-
|
|
303
246
|
args = parser.parse_args()
|
|
304
247
|
|
|
305
|
-
npc = NPC(file=args.npc)
|
|
306
|
-
|
|
307
|
-
print(args.stream)
|
|
308
|
-
# Enter spool mode
|
|
248
|
+
npc = NPC(file=args.npc) if os.path.exists(os.path.expanduser(args.npc)) else None
|
|
249
|
+
|
|
309
250
|
enter_spool_mode(
|
|
310
251
|
npc=npc,
|
|
311
252
|
model=args.model,
|
|
312
253
|
provider=args.provider,
|
|
313
|
-
|
|
314
|
-
stream=
|
|
254
|
+
attachments=args.attachments,
|
|
255
|
+
stream=args.stream.lower() == "true",
|
|
315
256
|
)
|
|
316
257
|
|
|
317
258
|
if __name__ == "__main__":
|