npcsh 0.3.31__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +942 -0
- npcsh/alicanto.py +1074 -0
- npcsh/guac.py +785 -0
- npcsh/mcp_helpers.py +357 -0
- npcsh/mcp_npcsh.py +822 -0
- npcsh/mcp_server.py +184 -0
- npcsh/npc.py +218 -0
- npcsh/npcsh.py +1161 -0
- npcsh/plonk.py +387 -269
- npcsh/pti.py +234 -0
- npcsh/routes.py +958 -0
- npcsh/spool.py +315 -0
- npcsh/wander.py +550 -0
- npcsh/yap.py +573 -0
- npcsh-1.0.0.dist-info/METADATA +596 -0
- npcsh-1.0.0.dist-info/RECORD +21 -0
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/WHEEL +1 -1
- npcsh-1.0.0.dist-info/entry_points.txt +9 -0
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/licenses/LICENSE +1 -1
- npcsh/audio.py +0 -210
- npcsh/cli.py +0 -545
- npcsh/command_history.py +0 -566
- npcsh/conversation.py +0 -291
- npcsh/data_models.py +0 -46
- npcsh/dataframes.py +0 -163
- npcsh/embeddings.py +0 -168
- npcsh/helpers.py +0 -641
- npcsh/image.py +0 -298
- npcsh/image_gen.py +0 -79
- npcsh/knowledge_graph.py +0 -1006
- npcsh/llm_funcs.py +0 -2027
- npcsh/load_data.py +0 -83
- npcsh/main.py +0 -5
- npcsh/model_runner.py +0 -189
- npcsh/npc_compiler.py +0 -2870
- npcsh/npc_sysenv.py +0 -383
- npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
- npcsh/npc_team/corca.npc +0 -13
- npcsh/npc_team/foreman.npc +0 -7
- npcsh/npc_team/npcsh.ctx +0 -11
- npcsh/npc_team/sibiji.npc +0 -4
- npcsh/npc_team/templates/analytics/celona.npc +0 -0
- npcsh/npc_team/templates/hr_support/raone.npc +0 -0
- npcsh/npc_team/templates/humanities/eriane.npc +0 -4
- npcsh/npc_team/templates/it_support/lineru.npc +0 -0
- npcsh/npc_team/templates/marketing/slean.npc +0 -4
- npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
- npcsh/npc_team/templates/sales/turnic.npc +0 -4
- npcsh/npc_team/templates/software/welxor.npc +0 -0
- npcsh/npc_team/tools/bash_executer.tool +0 -32
- npcsh/npc_team/tools/calculator.tool +0 -8
- npcsh/npc_team/tools/code_executor.tool +0 -16
- npcsh/npc_team/tools/generic_search.tool +0 -27
- npcsh/npc_team/tools/image_generation.tool +0 -25
- npcsh/npc_team/tools/local_search.tool +0 -149
- npcsh/npc_team/tools/npcsh_executor.tool +0 -9
- npcsh/npc_team/tools/screen_cap.tool +0 -27
- npcsh/npc_team/tools/sql_executor.tool +0 -26
- npcsh/response.py +0 -623
- npcsh/search.py +0 -248
- npcsh/serve.py +0 -1460
- npcsh/shell.py +0 -538
- npcsh/shell_helpers.py +0 -3529
- npcsh/stream.py +0 -700
- npcsh/video.py +0 -49
- npcsh-0.3.31.data/data/npcsh/npc_team/bash_executer.tool +0 -32
- npcsh-0.3.31.data/data/npcsh/npc_team/calculator.tool +0 -8
- npcsh-0.3.31.data/data/npcsh/npc_team/celona.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/code_executor.tool +0 -16
- npcsh-0.3.31.data/data/npcsh/npc_team/corca.npc +0 -13
- npcsh-0.3.31.data/data/npcsh/npc_team/eriane.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/foreman.npc +0 -7
- npcsh-0.3.31.data/data/npcsh/npc_team/generic_search.tool +0 -27
- npcsh-0.3.31.data/data/npcsh/npc_team/image_generation.tool +0 -25
- npcsh-0.3.31.data/data/npcsh/npc_team/lineru.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/local_search.tool +0 -149
- npcsh-0.3.31.data/data/npcsh/npc_team/maurawa.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/npcsh.ctx +0 -11
- npcsh-0.3.31.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
- npcsh-0.3.31.data/data/npcsh/npc_team/raone.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/screen_cap.tool +0 -27
- npcsh-0.3.31.data/data/npcsh/npc_team/sibiji.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/slean.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/sql_executor.tool +0 -26
- npcsh-0.3.31.data/data/npcsh/npc_team/test_pipeline.py +0 -181
- npcsh-0.3.31.data/data/npcsh/npc_team/turnic.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/welxor.npc +0 -0
- npcsh-0.3.31.dist-info/METADATA +0 -1853
- npcsh-0.3.31.dist-info/RECORD +0 -76
- npcsh-0.3.31.dist-info/entry_points.txt +0 -3
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/top_level.txt +0 -0
npcsh/spool.py
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
1
|
+
from npcpy.memory.command_history import CommandHistory, start_new_conversation, save_conversation_message
|
|
2
|
+
from npcpy.data.load import load_pdf, load_csv, load_json, load_excel, load_txt
|
|
3
|
+
from npcpy.data.image import capture_screenshot
|
|
4
|
+
from npcpy.data.text import rag_search
|
|
5
|
+
|
|
6
|
+
import os
|
|
7
|
+
from npcpy.npc_sysenv import (
|
|
8
|
+
print_and_process_stream_with_markdown,
|
|
9
|
+
)
|
|
10
|
+
from npcsh._state import (
|
|
11
|
+
orange,
|
|
12
|
+
get_system_message,
|
|
13
|
+
render_markdown,
|
|
14
|
+
NPCSH_VISION_MODEL,
|
|
15
|
+
NPCSH_VISION_PROVIDER,
|
|
16
|
+
NPCSH_CHAT_MODEL,
|
|
17
|
+
NPCSH_CHAT_PROVIDER,
|
|
18
|
+
NPCSH_STREAM_OUTPUT
|
|
19
|
+
)
|
|
20
|
+
from npcpy.llm_funcs import (get_llm_response,)
|
|
21
|
+
|
|
22
|
+
from npcpy.npc_compiler import NPC
|
|
23
|
+
from typing import Any, List, Dict, Union
|
|
24
|
+
from npcpy.modes.yap import enter_yap_mode
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def enter_spool_mode(
|
|
28
|
+
npc = None,
|
|
29
|
+
team = None,
|
|
30
|
+
model: str = NPCSH_CHAT_MODEL,
|
|
31
|
+
provider: str = NPCSH_CHAT_PROVIDER,
|
|
32
|
+
vision_model:str = NPCSH_VISION_MODEL,
|
|
33
|
+
vision_provider:str = NPCSH_VISION_PROVIDER,
|
|
34
|
+
files: List[str] = None,
|
|
35
|
+
rag_similarity_threshold: float = 0.3,
|
|
36
|
+
messages: List[Dict] = None,
|
|
37
|
+
conversation_id: str = None,
|
|
38
|
+
stream: bool = NPCSH_STREAM_OUTPUT,
|
|
39
|
+
) -> Dict:
|
|
40
|
+
"""
|
|
41
|
+
Function Description:
|
|
42
|
+
This function is used to enter the spool mode where files can be loaded into memory.
|
|
43
|
+
Args:
|
|
44
|
+
|
|
45
|
+
npc : Any : The NPC object.
|
|
46
|
+
files : List[str] : List of file paths to load into the context.
|
|
47
|
+
Returns:
|
|
48
|
+
Dict : The messages and output.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
npc_info = f" (NPC: {npc.name})" if npc else ""
|
|
52
|
+
print(f"Entering spool mode{npc_info}. Type '/sq' to exit spool mode.")
|
|
53
|
+
|
|
54
|
+
spool_context = (
|
|
55
|
+
messages.copy() if messages else []
|
|
56
|
+
) # Initialize context with messages
|
|
57
|
+
|
|
58
|
+
loaded_content = {} # New dictionary to hold loaded content
|
|
59
|
+
|
|
60
|
+
# Create conversation ID if not provided
|
|
61
|
+
if not conversation_id:
|
|
62
|
+
conversation_id = start_new_conversation()
|
|
63
|
+
|
|
64
|
+
command_history = CommandHistory()
|
|
65
|
+
# Load specified files if any
|
|
66
|
+
if files:
|
|
67
|
+
for file in files:
|
|
68
|
+
extension = os.path.splitext(file)[1].lower()
|
|
69
|
+
try:
|
|
70
|
+
if extension == ".pdf":
|
|
71
|
+
content = load_pdf(file)["texts"].iloc[0]
|
|
72
|
+
elif extension == ".csv":
|
|
73
|
+
content = load_csv(file)
|
|
74
|
+
else:
|
|
75
|
+
print(f"Unsupported file type: {file}")
|
|
76
|
+
continue
|
|
77
|
+
loaded_content[file] = content
|
|
78
|
+
print(f"Loaded content from: {file}")
|
|
79
|
+
except Exception as e:
|
|
80
|
+
print(f"Error loading {file}: {str(e)}")
|
|
81
|
+
|
|
82
|
+
# Add system message to context
|
|
83
|
+
system_message = get_system_message(npc) if npc else "You are a helpful assistant."
|
|
84
|
+
if len(spool_context) > 0:
|
|
85
|
+
if spool_context[0]["role"] != "system":
|
|
86
|
+
spool_context.insert(0, {"role": "system", "content": system_message})
|
|
87
|
+
else:
|
|
88
|
+
spool_context.append({"role": "system", "content": system_message})
|
|
89
|
+
# Inherit last n messages if specified
|
|
90
|
+
if npc is not None:
|
|
91
|
+
if model is None:
|
|
92
|
+
model = npc.model
|
|
93
|
+
if provider is None:
|
|
94
|
+
provider = npc.provider
|
|
95
|
+
|
|
96
|
+
while True:
|
|
97
|
+
kwargs_to_pass = {}
|
|
98
|
+
if npc:
|
|
99
|
+
kwargs_to_pass["npc"] = npc
|
|
100
|
+
|
|
101
|
+
try:
|
|
102
|
+
|
|
103
|
+
user_input = input("spool:in> ").strip()
|
|
104
|
+
if len(user_input) == 0:
|
|
105
|
+
continue
|
|
106
|
+
if user_input.lower() == "/sq":
|
|
107
|
+
print("Exiting spool mode.")
|
|
108
|
+
break
|
|
109
|
+
|
|
110
|
+
if user_input.lower() == "/whisper": # Check for whisper command
|
|
111
|
+
messages = enter_yap_mode(spool_context, npc)
|
|
112
|
+
continue
|
|
113
|
+
|
|
114
|
+
if user_input.startswith("/ots"):
|
|
115
|
+
command_parts = user_input.split()
|
|
116
|
+
image_paths = []
|
|
117
|
+
print('using vision model: ', vision_model)
|
|
118
|
+
|
|
119
|
+
# Handle image loading/capturing
|
|
120
|
+
if len(command_parts) > 1:
|
|
121
|
+
# User provided image path(s)
|
|
122
|
+
for img_path in command_parts[1:]:
|
|
123
|
+
full_path = os.path.join(os.getcwd(), img_path)
|
|
124
|
+
if os.path.exists(full_path):
|
|
125
|
+
image_paths.append(full_path)
|
|
126
|
+
else:
|
|
127
|
+
print(f"Error: Image file not found at {full_path}")
|
|
128
|
+
else:
|
|
129
|
+
# Capture screenshot
|
|
130
|
+
output = capture_screenshot(npc=npc)
|
|
131
|
+
if output and "file_path" in output:
|
|
132
|
+
image_paths.append(output["file_path"])
|
|
133
|
+
print(f"Screenshot captured: {output['filename']}")
|
|
134
|
+
|
|
135
|
+
if not image_paths:
|
|
136
|
+
print("No valid images provided.")
|
|
137
|
+
continue
|
|
138
|
+
|
|
139
|
+
# Get user prompt about the image(s)
|
|
140
|
+
user_prompt = input(
|
|
141
|
+
"Enter a prompt for the LLM about these images (or press Enter to skip): "
|
|
142
|
+
)
|
|
143
|
+
if not user_prompt:
|
|
144
|
+
user_prompt = "Please analyze these images."
|
|
145
|
+
|
|
146
|
+
model= vision_model
|
|
147
|
+
provider= vision_provider
|
|
148
|
+
# Save the user message
|
|
149
|
+
message_id = save_conversation_message(
|
|
150
|
+
command_history,
|
|
151
|
+
conversation_id,
|
|
152
|
+
"user",
|
|
153
|
+
user_prompt,
|
|
154
|
+
wd=os.getcwd(),
|
|
155
|
+
model=vision_model,
|
|
156
|
+
provider=vision_provider,
|
|
157
|
+
npc=npc.name if npc else None,
|
|
158
|
+
team=team.name if team else None,
|
|
159
|
+
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Process the request with our unified approach
|
|
163
|
+
response = get_llm_response(
|
|
164
|
+
user_prompt,
|
|
165
|
+
model=vision_model,
|
|
166
|
+
provider=provider,
|
|
167
|
+
messages=spool_context,
|
|
168
|
+
images=image_paths,
|
|
169
|
+
stream=stream,
|
|
170
|
+
**kwargs_to_pass
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
# Extract the assistant's response
|
|
174
|
+
assistant_reply = response['response']
|
|
175
|
+
|
|
176
|
+
spool_context = response['messages']
|
|
177
|
+
|
|
178
|
+
if stream:
|
|
179
|
+
print(orange(f'spool:{npc.name}:{vision_model}>'), end='', flush=True)
|
|
180
|
+
|
|
181
|
+
assistant_reply = print_and_process_stream_with_markdown(assistant_reply, model=model, provider=provider)
|
|
182
|
+
|
|
183
|
+
spool_context.append({"role": "assistant", "content": assistant_reply})
|
|
184
|
+
if assistant_reply.count("```") % 2 != 0:
|
|
185
|
+
assistant_reply = assistant_reply + "```"
|
|
186
|
+
# Save the assistant's response
|
|
187
|
+
save_conversation_message(
|
|
188
|
+
command_history,
|
|
189
|
+
conversation_id,
|
|
190
|
+
"assistant",
|
|
191
|
+
assistant_reply,
|
|
192
|
+
wd=os.getcwd(),
|
|
193
|
+
model=vision_model,
|
|
194
|
+
provider=vision_provider,
|
|
195
|
+
npc=npc.name if npc else None,
|
|
196
|
+
team=team.name if team else None,
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
# Display the response
|
|
204
|
+
if not stream:
|
|
205
|
+
render_markdown(assistant_reply)
|
|
206
|
+
|
|
207
|
+
continue
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
# Handle RAG context
|
|
212
|
+
if loaded_content:
|
|
213
|
+
context_content = ""
|
|
214
|
+
for filename, content in loaded_content.items():
|
|
215
|
+
retrieved_docs = rag_search(
|
|
216
|
+
user_input,
|
|
217
|
+
content,
|
|
218
|
+
similarity_threshold=rag_similarity_threshold,
|
|
219
|
+
)
|
|
220
|
+
if retrieved_docs:
|
|
221
|
+
context_content += (
|
|
222
|
+
f"\n\nLoaded content from: {filename}\n{content}\n\n"
|
|
223
|
+
)
|
|
224
|
+
if len(context_content) > 0:
|
|
225
|
+
user_input += f"""
|
|
226
|
+
Here is the loaded content that may be relevant to your query:
|
|
227
|
+
{context_content}
|
|
228
|
+
Please reference it explicitly in your response and use it for answering.
|
|
229
|
+
"""
|
|
230
|
+
|
|
231
|
+
# Save user message
|
|
232
|
+
message_id = save_conversation_message(
|
|
233
|
+
command_history,
|
|
234
|
+
conversation_id,
|
|
235
|
+
"user",
|
|
236
|
+
user_input,
|
|
237
|
+
wd=os.getcwd(),
|
|
238
|
+
model=model,
|
|
239
|
+
provider=provider,
|
|
240
|
+
npc=npc.name if npc else None,
|
|
241
|
+
team=team.name if team else None,
|
|
242
|
+
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
response = get_llm_response(
|
|
246
|
+
user_input,
|
|
247
|
+
model=model,
|
|
248
|
+
provider=provider,
|
|
249
|
+
messages=spool_context,
|
|
250
|
+
stream=stream,
|
|
251
|
+
**kwargs_to_pass
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
assistant_reply, spool_context = response['response'], response['messages']
|
|
255
|
+
if stream:
|
|
256
|
+
print(orange(f'{npc.name if npc else "spool"}:{npc.model if npc else model}>'), end='', flush=True)
|
|
257
|
+
assistant_reply = print_and_process_stream_with_markdown(assistant_reply, model=model, provider=provider)
|
|
258
|
+
# Save assistant message
|
|
259
|
+
save_conversation_message(
|
|
260
|
+
command_history,
|
|
261
|
+
conversation_id,
|
|
262
|
+
"assistant",
|
|
263
|
+
assistant_reply,
|
|
264
|
+
wd=os.getcwd(),
|
|
265
|
+
model=model,
|
|
266
|
+
provider=provider,
|
|
267
|
+
npc=npc.name if npc else None,
|
|
268
|
+
team=team.name if team else None,
|
|
269
|
+
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
# Fix unfinished markdown notation
|
|
273
|
+
if assistant_reply.count("```") % 2 != 0:
|
|
274
|
+
assistant_reply = assistant_reply + "```"
|
|
275
|
+
|
|
276
|
+
if not stream:
|
|
277
|
+
render_markdown(assistant_reply)
|
|
278
|
+
|
|
279
|
+
except (KeyboardInterrupt, EOFError):
|
|
280
|
+
print("\nExiting spool mode.")
|
|
281
|
+
break
|
|
282
|
+
|
|
283
|
+
return {
|
|
284
|
+
"messages": spool_context,
|
|
285
|
+
"output": "\n".join(
|
|
286
|
+
[msg["content"] for msg in spool_context if msg["role"] == "assistant"]
|
|
287
|
+
),
|
|
288
|
+
}
|
|
289
|
+
def main():
|
|
290
|
+
# Example usage
|
|
291
|
+
import argparse
|
|
292
|
+
parser = argparse.ArgumentParser(description="Enter spool mode for chatting with an LLM")
|
|
293
|
+
parser.add_argument("--model", default=NPCSH_CHAT_MODEL, help="Model to use")
|
|
294
|
+
parser.add_argument("--provider", default=NPCSH_CHAT_PROVIDER, help="Provider to use")
|
|
295
|
+
parser.add_argument("--files", nargs="*", help="Files to load into context")
|
|
296
|
+
parser.add_argument("--stream", default="true", help="Use streaming mode")
|
|
297
|
+
parser.add_argument("--npc", type=str, default=os.path.expanduser('~/.npcsh/npc_team/sibiji.npc'), help="Path to NPC file")
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
args = parser.parse_args()
|
|
301
|
+
|
|
302
|
+
npc = NPC(file=args.npc)
|
|
303
|
+
print('npc: ', args.npc)
|
|
304
|
+
print(args.stream)
|
|
305
|
+
# Enter spool mode
|
|
306
|
+
enter_spool_mode(
|
|
307
|
+
npc=npc,
|
|
308
|
+
model=args.model,
|
|
309
|
+
provider=args.provider,
|
|
310
|
+
files=args.files,
|
|
311
|
+
stream= args.stream.lower() == "true",
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
if __name__ == "__main__":
|
|
315
|
+
main()
|