npcsh 1.0.12__py3-none-any.whl → 1.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +88 -1
- npcsh/alicanto.py +22 -7
- npcsh/npcsh.py +239 -455
- npcsh/plonk.py +300 -367
- npcsh/routes.py +367 -162
- npcsh/spool.py +162 -221
- npcsh-1.0.13.dist-info/METADATA +775 -0
- npcsh-1.0.13.dist-info/RECORD +21 -0
- npcsh-1.0.12.dist-info/METADATA +0 -596
- npcsh-1.0.12.dist-info/RECORD +0 -21
- {npcsh-1.0.12.dist-info → npcsh-1.0.13.dist-info}/WHEEL +0 -0
- {npcsh-1.0.12.dist-info → npcsh-1.0.13.dist-info}/entry_points.txt +0 -0
- {npcsh-1.0.12.dist-info → npcsh-1.0.13.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.0.12.dist-info → npcsh-1.0.13.dist-info}/top_level.txt +0 -0
npcsh/routes.py
CHANGED
|
@@ -9,15 +9,7 @@ import time
|
|
|
9
9
|
from datetime import datetime
|
|
10
10
|
from sqlalchemy import create_engine
|
|
11
11
|
import logging
|
|
12
|
-
|
|
13
|
-
from npcsh._state import (
|
|
14
|
-
NPCSH_VISION_MODEL, NPCSH_VISION_PROVIDER, NPCSH_API_URL,
|
|
15
|
-
NPCSH_CHAT_MODEL, NPCSH_CHAT_PROVIDER, NPCSH_STREAM_OUTPUT,
|
|
16
|
-
NPCSH_IMAGE_GEN_MODEL, NPCSH_IMAGE_GEN_PROVIDER,
|
|
17
|
-
NPCSH_EMBEDDING_MODEL, NPCSH_EMBEDDING_PROVIDER,
|
|
18
|
-
NPCSH_REASONING_MODEL, NPCSH_REASONING_PROVIDER,
|
|
19
|
-
NPCSH_SEARCH_PROVIDER,
|
|
20
|
-
)
|
|
12
|
+
import json
|
|
21
13
|
from npcpy.data.load import load_file_contents
|
|
22
14
|
|
|
23
15
|
from npcpy.llm_funcs import (
|
|
@@ -28,24 +20,43 @@ from npcpy.llm_funcs import (
|
|
|
28
20
|
)
|
|
29
21
|
from npcpy.npc_compiler import NPC, Team, Jinx
|
|
30
22
|
from npcpy.npc_compiler import initialize_npc_project
|
|
31
|
-
|
|
32
|
-
|
|
23
|
+
from npcpy.npc_sysenv import render_markdown
|
|
33
24
|
from npcpy.work.plan import execute_plan_command
|
|
34
25
|
from npcpy.work.trigger import execute_trigger_command
|
|
35
26
|
from npcpy.work.desktop import perform_action
|
|
36
|
-
|
|
37
|
-
|
|
38
27
|
from npcpy.memory.search import execute_rag_command, execute_search_command, execute_brainblast_command
|
|
39
|
-
from npcpy.memory.command_history import CommandHistory
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
28
|
+
from npcpy.memory.command_history import CommandHistory, load_kg_from_db, save_kg_to_db
|
|
44
29
|
from npcpy.serve import start_flask_server
|
|
30
|
+
from npcpy.mix.debate import run_debate
|
|
31
|
+
from npcpy.data.image import capture_screenshot
|
|
32
|
+
from npcpy.npc_compiler import NPC, Team, Jinx
|
|
33
|
+
from npcpy.npc_compiler import initialize_npc_project
|
|
34
|
+
from npcpy.data.web import search_web
|
|
35
|
+
from npcpy.memory.knowledge_graph import kg_sleep_process, kg_dream_process
|
|
45
36
|
|
|
46
37
|
|
|
38
|
+
from npcsh._state import (
|
|
39
|
+
NPCSH_VISION_MODEL,
|
|
40
|
+
NPCSH_VISION_PROVIDER,
|
|
41
|
+
NPCSH_API_URL,
|
|
42
|
+
NPCSH_CHAT_MODEL,
|
|
43
|
+
NPCSH_CHAT_PROVIDER,
|
|
44
|
+
NPCSH_STREAM_OUTPUT,
|
|
45
|
+
NPCSH_IMAGE_GEN_MODEL,
|
|
46
|
+
NPCSH_IMAGE_GEN_PROVIDER,
|
|
47
|
+
NPCSH_VIDEO_GEN_MODEL,
|
|
48
|
+
NPCSH_VIDEO_GEN_PROVIDER,
|
|
49
|
+
NPCSH_EMBEDDING_MODEL,
|
|
50
|
+
NPCSH_EMBEDDING_PROVIDER,
|
|
51
|
+
NPCSH_REASONING_MODEL,
|
|
52
|
+
NPCSH_REASONING_PROVIDER,
|
|
53
|
+
NPCSH_SEARCH_PROVIDER,
|
|
54
|
+
CANONICAL_ARGS,
|
|
55
|
+
normalize_and_expand_flags,
|
|
56
|
+
get_argument_help
|
|
57
|
+
)
|
|
47
58
|
from npcsh.guac import enter_guac_mode
|
|
48
|
-
from npcsh.plonk import execute_plonk_command
|
|
59
|
+
from npcsh.plonk import execute_plonk_command, format_plonk_summary
|
|
49
60
|
from npcsh.alicanto import alicanto
|
|
50
61
|
from npcsh.spool import enter_spool_mode
|
|
51
62
|
from npcsh.wander import enter_wander_mode
|
|
@@ -53,12 +64,6 @@ from npcsh.yap import enter_yap_mode
|
|
|
53
64
|
|
|
54
65
|
|
|
55
66
|
|
|
56
|
-
from npcpy.mix.debate import run_debate
|
|
57
|
-
from npcpy.data.image import capture_screenshot
|
|
58
|
-
from npcpy.npc_compiler import NPC, Team, Jinx
|
|
59
|
-
from npcpy.npc_compiler import initialize_npc_project
|
|
60
|
-
from npcpy.data.web import search_web
|
|
61
|
-
|
|
62
67
|
class CommandRouter:
|
|
63
68
|
def __init__(self):
|
|
64
69
|
self.routes = {}
|
|
@@ -97,7 +102,6 @@ class CommandRouter:
|
|
|
97
102
|
return self.help_info
|
|
98
103
|
|
|
99
104
|
router = CommandRouter()
|
|
100
|
-
|
|
101
105
|
def get_help_text():
|
|
102
106
|
commands = router.get_commands()
|
|
103
107
|
help_info = router.help_info
|
|
@@ -107,30 +111,88 @@ def get_help_text():
|
|
|
107
111
|
for cmd in commands:
|
|
108
112
|
help_text = help_info.get(cmd, "")
|
|
109
113
|
output += f"/{cmd} - {help_text}\n\n"
|
|
114
|
+
|
|
115
|
+
arg_help_map = get_argument_help()
|
|
116
|
+
if arg_help_map:
|
|
117
|
+
output += "## Common Command-Line Flags\n\n"
|
|
118
|
+
output += "The shortest unambiguous prefix works (e.g., `-t` for `--temperature`).\n\n"
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
output += "```\n"
|
|
122
|
+
|
|
123
|
+
all_args_to_show = CANONICAL_ARGS[:]
|
|
124
|
+
all_args_to_show.sort()
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
NUM_COLUMNS = 4
|
|
128
|
+
FLAG_WIDTH = 18
|
|
129
|
+
ALIAS_WIDTH = 12
|
|
130
|
+
COLUMN_SEPARATOR = " | "
|
|
131
|
+
|
|
132
|
+
rows_per_column = (len(all_args_to_show) + NUM_COLUMNS - 1) // NUM_COLUMNS
|
|
133
|
+
columns = [all_args_to_show[i:i + rows_per_column] for i in range(0, len(all_args_to_show), rows_per_column)]
|
|
134
|
+
|
|
135
|
+
def get_shortest_alias(arg):
|
|
136
|
+
if arg in arg_help_map and arg_help_map[arg]:
|
|
137
|
+
return min(arg_help_map[arg], key=len)
|
|
138
|
+
return ""
|
|
139
|
+
|
|
140
|
+
header_parts = []
|
|
141
|
+
for _ in range(NUM_COLUMNS):
|
|
142
|
+
flag_header = "Flag".ljust(FLAG_WIDTH)
|
|
143
|
+
alias_header = "Shorthand".ljust(ALIAS_WIDTH)
|
|
144
|
+
header_parts.append(f"{flag_header}{alias_header}")
|
|
145
|
+
output += COLUMN_SEPARATOR.join(header_parts) + "\n"
|
|
146
|
+
|
|
147
|
+
divider_parts = []
|
|
148
|
+
for _ in range(NUM_COLUMNS):
|
|
149
|
+
|
|
150
|
+
divider_part = "-" * (FLAG_WIDTH + ALIAS_WIDTH)
|
|
151
|
+
divider_parts.append(divider_part)
|
|
152
|
+
output += COLUMN_SEPARATOR.join(divider_parts) + "\n"
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
for i in range(rows_per_column):
|
|
156
|
+
row_parts = []
|
|
157
|
+
for col_idx in range(NUM_COLUMNS):
|
|
158
|
+
if col_idx < len(columns) and i < len(columns[col_idx]):
|
|
159
|
+
arg = columns[col_idx][i]
|
|
160
|
+
alias = get_shortest_alias(arg)
|
|
161
|
+
alias_display = f"(-{alias})" if alias else ""
|
|
162
|
+
|
|
163
|
+
flag_part = f"--{arg}".ljust(FLAG_WIDTH)
|
|
164
|
+
alias_part = alias_display.ljust(ALIAS_WIDTH)
|
|
165
|
+
row_parts.append(f"{flag_part}{alias_part}")
|
|
166
|
+
else:
|
|
167
|
+
|
|
168
|
+
row_parts.append(" " * (FLAG_WIDTH + ALIAS_WIDTH))
|
|
169
|
+
|
|
170
|
+
output += COLUMN_SEPARATOR.join(row_parts) + "\n"
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
output += "```\n"
|
|
174
|
+
|
|
110
175
|
output += """
|
|
111
|
-
|
|
176
|
+
\n## Note
|
|
112
177
|
- Bash commands and programs can be executed directly (try bash first, then LLM).
|
|
113
178
|
- Use '/exit' or '/quit' to exit the current NPC mode or the npcsh shell.
|
|
114
179
|
- Jinxs defined for the current NPC or Team can also be used like commands (e.g., /screenshot).
|
|
115
180
|
"""
|
|
116
181
|
return output
|
|
117
|
-
|
|
118
182
|
def safe_get(kwargs, key, default=None):
|
|
119
183
|
return kwargs.get(key, default)
|
|
120
184
|
|
|
121
185
|
@router.route("breathe", "Condense context on a regular cadence")
|
|
122
186
|
def breathe_handler(command: str, **kwargs):
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
result
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
except
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
traceback.print_exc()
|
|
133
|
-
return {"output": f"Error during breathe: {e}", "messages": messages}
|
|
187
|
+
#try:
|
|
188
|
+
result = breathe(**kwargs)
|
|
189
|
+
if isinstance(result, dict):
|
|
190
|
+
return result
|
|
191
|
+
#except NameError:
|
|
192
|
+
# return {"output": "Breathe function not available."}
|
|
193
|
+
#except Exception as e:
|
|
194
|
+
# traceback.print_exc()
|
|
195
|
+
# return {"output": f"Error during breathe: {e}"}
|
|
134
196
|
|
|
135
197
|
@router.route("compile", "Compile NPC profiles")
|
|
136
198
|
def compile_handler(command: str, **kwargs):
|
|
@@ -227,9 +289,58 @@ def guac_handler(command, **kwargs):
|
|
|
227
289
|
return {"output": 'Exiting Guac Mode', "messages": safe_get(kwargs, "messages", [])}
|
|
228
290
|
|
|
229
291
|
|
|
230
|
-
@router.route("help", "Show help
|
|
231
|
-
def help_handler(command, **kwargs):
|
|
232
|
-
|
|
292
|
+
@router.route("help", "Show help for commands, NPCs, or Jinxs. Usage: /help [topic]")
|
|
293
|
+
def help_handler(command: str, **kwargs):
|
|
294
|
+
messages = safe_get(kwargs, "messages", [])
|
|
295
|
+
parts = shlex.split(command)
|
|
296
|
+
if len(parts) < 2:
|
|
297
|
+
return {"output": get_help_text(), "messages": messages}
|
|
298
|
+
target = parts[1].lstrip('/') # User might type /help /sample, so we clean it.
|
|
299
|
+
output = ""
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
if target in router.get_commands():
|
|
304
|
+
help_text = router.get_help(target).get(target, "No description available.")
|
|
305
|
+
output = f"## Help for Command: `/{target}`\n\n- **Description**: {help_text}"
|
|
306
|
+
return {"output": output, "messages": messages}
|
|
307
|
+
|
|
308
|
+
team = safe_get(kwargs, 'team')
|
|
309
|
+
if team and target in team.npcs:
|
|
310
|
+
npc_obj = team.npcs[target]
|
|
311
|
+
output = f"## Help for NPC: `{target}`\n\n"
|
|
312
|
+
output += f"- **Primary Directive**: {npc_obj.primary_directive}\n"
|
|
313
|
+
output += f"- **Default Model**: `{npc_obj.model}`\n"
|
|
314
|
+
output += f"- **Default Provider**: `{npc_obj.provider}`\n"
|
|
315
|
+
if hasattr(npc_obj, 'jinxs_dict') and npc_obj.jinxs_dict:
|
|
316
|
+
jinx_names = ", ".join([f"`{j}`" for j in npc_obj.jinxs_dict.keys()])
|
|
317
|
+
output += f"- **Associated Jinxs**: {jinx_names}\n"
|
|
318
|
+
return {"output": output, "messages": messages}
|
|
319
|
+
|
|
320
|
+
# 3. Is it a Jinx?
|
|
321
|
+
npc = safe_get(kwargs, 'npc')
|
|
322
|
+
jinx_obj = None
|
|
323
|
+
source = ""
|
|
324
|
+
if npc and hasattr(npc, 'jinxs_dict') and target in npc.jinxs_dict:
|
|
325
|
+
jinx_obj = npc.jinxs_dict[target]
|
|
326
|
+
source = f" (from NPC: `{npc.name}`)"
|
|
327
|
+
elif team and hasattr(team, 'jinxs_dict') and target in team.jinxs_dict:
|
|
328
|
+
jinx_obj = team.jinxs_dict[target]
|
|
329
|
+
source = f" (from Team: `{team.name}`)"
|
|
330
|
+
|
|
331
|
+
if jinx_obj:
|
|
332
|
+
output = f"## Help for Jinx: `/{target}`{source}\n\n"
|
|
333
|
+
output += f"- **Description**: {jinx_obj.description}\n"
|
|
334
|
+
if hasattr(jinx_obj, 'inputs') and jinx_obj.inputs:
|
|
335
|
+
inputs_str = json.dumps(jinx_obj.inputs, indent=2)
|
|
336
|
+
output += f"- **Inputs**:\n```json\n{inputs_str}\n```\n"
|
|
337
|
+
return {"output": output, "messages": messages}
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
return {"output": f"Sorry, no help topic found for `{target}`.", "messages": messages}
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
|
|
233
344
|
|
|
234
345
|
@router.route("init", "Initialize NPC project")
|
|
235
346
|
def init_handler(command: str, **kwargs):
|
|
@@ -262,20 +373,23 @@ def init_handler(command: str, **kwargs):
|
|
|
262
373
|
|
|
263
374
|
|
|
264
375
|
|
|
265
|
-
@router.route("ots", "Take screenshot and
|
|
376
|
+
@router.route("ots", "Take screenshot and analyze with vision model")
|
|
266
377
|
def ots_handler(command: str, **kwargs):
|
|
267
378
|
command_parts = command.split()
|
|
268
379
|
image_paths = []
|
|
269
380
|
npc = safe_get(kwargs, 'npc')
|
|
270
|
-
vision_model = safe_get(kwargs,
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
381
|
+
vision_model = safe_get(kwargs,
|
|
382
|
+
'vmodel',
|
|
383
|
+
NPCSH_VISION_MODEL)
|
|
384
|
+
vision_provider = safe_get(kwargs,
|
|
385
|
+
'vprovider',
|
|
386
|
+
NPCSH_VISION_PROVIDER)
|
|
387
|
+
messages = safe_get(kwargs,
|
|
388
|
+
'messages',
|
|
389
|
+
[])
|
|
390
|
+
stream = safe_get(kwargs,
|
|
391
|
+
'stream',
|
|
392
|
+
NPCSH_STREAM_OUTPUT)
|
|
279
393
|
|
|
280
394
|
try:
|
|
281
395
|
if len(command_parts) > 1:
|
|
@@ -341,46 +455,48 @@ def plan_handler(command: str, **kwargs):
|
|
|
341
455
|
return {"output": f"Error executing plan: {e}", "messages": messages}
|
|
342
456
|
|
|
343
457
|
@router.route("pti", "Use pardon-the-interruption mode to interact with the LLM")
|
|
344
|
-
def
|
|
458
|
+
def pti_handler(command: str, **kwargs):
|
|
345
459
|
return
|
|
346
460
|
|
|
347
|
-
@router.route("plonk", "Use vision model to interact with GUI")
|
|
461
|
+
@router.route("plonk", "Use vision model to interact with GUI. Usage: /plonk <task description>")
|
|
348
462
|
def plonk_handler(command: str, **kwargs):
|
|
349
463
|
messages = safe_get(kwargs, "messages", [])
|
|
350
|
-
|
|
464
|
+
|
|
465
|
+
# FIXED: Use the pre-parsed positional arguments for the request,
|
|
466
|
+
# leaving flags to be handled by kwargs.
|
|
467
|
+
positional_args = safe_get(kwargs, 'positional_args', [])
|
|
468
|
+
request_str = " ".join(positional_args)
|
|
469
|
+
|
|
351
470
|
if not request_str:
|
|
352
|
-
return {"output": "Usage: /plonk <task_description>", "messages": messages}
|
|
353
|
-
|
|
354
|
-
action_space = {
|
|
355
|
-
"click": {"x": "int (0-100)", "y": "int (0-100)"},
|
|
356
|
-
"type": {"text": "string"},
|
|
357
|
-
"scroll": {"direction": "up/down/left/right", "amount": "int"},
|
|
358
|
-
"bash": {"command": "string"},
|
|
359
|
-
"wait": {"duration": "int (seconds)"}
|
|
360
|
-
}
|
|
471
|
+
return {"output": "Usage: /plonk <task_description> [--vmodel model_name] [--vprovider provider_name]", "messages": messages}
|
|
472
|
+
|
|
361
473
|
try:
|
|
362
|
-
|
|
474
|
+
plonk_context = safe_get(kwargs, 'plonk_context')
|
|
475
|
+
|
|
476
|
+
# This part now works automatically with CLI flags because they are in kwargs
|
|
477
|
+
summary_data = execute_plonk_command(
|
|
363
478
|
request=request_str,
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
)
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
479
|
+
model=safe_get(kwargs, 'vmodel', NPCSH_VISION_MODEL),
|
|
480
|
+
provider=safe_get(kwargs, 'vprovider', NPCSH_VISION_PROVIDER),
|
|
481
|
+
npc=safe_get(kwargs, 'npc'),
|
|
482
|
+
plonk_context=plonk_context,
|
|
483
|
+
debug=True # Or could be controlled by a flag: safe_get(kwargs, 'debug', False)
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
if summary_data and isinstance(summary_data, list):
|
|
487
|
+
output_report = format_plonk_summary(summary_data)
|
|
488
|
+
return {"output": output_report, "messages": messages}
|
|
372
489
|
else:
|
|
373
|
-
return {"output":
|
|
374
|
-
|
|
375
|
-
return {"output": "Plonk function (execute_plonk_command) not available.", "messages": messages}
|
|
490
|
+
return {"output": "Plonk command did not complete within the maximum number of iterations.", "messages": messages}
|
|
491
|
+
|
|
376
492
|
except Exception as e:
|
|
377
493
|
traceback.print_exc()
|
|
378
494
|
return {"output": f"Error executing plonk command: {e}", "messages": messages}
|
|
495
|
+
|
|
496
|
+
|
|
379
497
|
@router.route("brainblast", "Execute an advanced chunked search on command history")
|
|
380
498
|
def brainblast_handler(command: str, **kwargs):
|
|
381
|
-
messages = safe_get(kwargs, "messages", [])
|
|
382
|
-
|
|
383
|
-
# Parse command to get the search query
|
|
499
|
+
messages = safe_get(kwargs, "messages", [])
|
|
384
500
|
parts = shlex.split(command)
|
|
385
501
|
search_query = " ".join(parts[1:]) if len(parts) > 1 else ""
|
|
386
502
|
|
|
@@ -420,12 +536,10 @@ def brainblast_handler(command: str, **kwargs):
|
|
|
420
536
|
def rag_handler(command: str, **kwargs):
|
|
421
537
|
messages = safe_get(kwargs, "messages", [])
|
|
422
538
|
|
|
423
|
-
# Parse command with shlex to properly handle quoted strings
|
|
424
539
|
parts = shlex.split(command)
|
|
425
540
|
user_command = []
|
|
426
541
|
file_paths = []
|
|
427
542
|
|
|
428
|
-
# Process arguments
|
|
429
543
|
i = 1 # Skip the first element which is "rag"
|
|
430
544
|
while i < len(parts):
|
|
431
545
|
if parts[i] == "-f" or parts[i] == "--file":
|
|
@@ -443,8 +557,8 @@ def rag_handler(command: str, **kwargs):
|
|
|
443
557
|
user_command = " ".join(user_command)
|
|
444
558
|
|
|
445
559
|
vector_db_path = safe_get(kwargs, "vector_db_path", os.path.expanduser('~/npcsh_chroma.db'))
|
|
446
|
-
embedding_model = safe_get(kwargs, "
|
|
447
|
-
embedding_provider = safe_get(kwargs, "
|
|
560
|
+
embedding_model = safe_get(kwargs, "emodel", NPCSH_EMBEDDING_MODEL)
|
|
561
|
+
embedding_provider = safe_get(kwargs, "eprovider", NPCSH_EMBEDDING_PROVIDER)
|
|
448
562
|
|
|
449
563
|
if not user_command and not file_paths:
|
|
450
564
|
return {"output": "Usage: /rag [-f file_path] <query>", "messages": messages}
|
|
@@ -486,8 +600,8 @@ def roll_handler(command: str, **kwargs):
|
|
|
486
600
|
try:
|
|
487
601
|
result = gen_video(
|
|
488
602
|
prompt=prompt,
|
|
489
|
-
model=safe_get(kwargs, '
|
|
490
|
-
provider=safe_get(kwargs, '
|
|
603
|
+
model=safe_get(kwargs, 'vgmodel', NPCSH_VIDEO_GEN_MODEL),
|
|
604
|
+
provider=safe_get(kwargs, 'vgprovider', NPCSH_VIDEO_GEN_PROVIDER),
|
|
491
605
|
npc=safe_get(kwargs, 'npc'),
|
|
492
606
|
num_frames = num_frames,
|
|
493
607
|
width = width,
|
|
@@ -505,42 +619,59 @@ def roll_handler(command: str, **kwargs):
|
|
|
505
619
|
@router.route("sample", "Send a prompt directly to the LLM")
|
|
506
620
|
def sample_handler(command: str, **kwargs):
|
|
507
621
|
messages = safe_get(kwargs, "messages", [])
|
|
508
|
-
|
|
622
|
+
|
|
623
|
+
|
|
624
|
+
positional_args = safe_get(kwargs, 'positional_args', [])
|
|
625
|
+
prompt = " ".join(positional_args)
|
|
626
|
+
|
|
509
627
|
if not prompt:
|
|
510
|
-
return {"output": "Usage: /sample <your prompt>",
|
|
628
|
+
return {"output": "Usage: /sample <your prompt> [-m --model] model [-p --provider] provider",
|
|
629
|
+
"messages": messages}
|
|
511
630
|
|
|
512
631
|
try:
|
|
513
632
|
result = get_llm_response(
|
|
514
633
|
prompt=prompt,
|
|
515
|
-
|
|
516
|
-
model=safe_get(kwargs, 'model'),
|
|
517
|
-
images=safe_get(kwargs, 'attachments'),
|
|
518
|
-
npc=safe_get(kwargs, 'npc'),
|
|
519
|
-
team=safe_get(kwargs, 'team'),
|
|
520
|
-
messages=messages,
|
|
521
|
-
api_url=safe_get(kwargs, 'api_url'),
|
|
522
|
-
api_key=safe_get(kwargs, 'api_key'),
|
|
523
|
-
context=safe_get(kwargs, 'context'),
|
|
524
|
-
stream=safe_get(kwargs, 'stream')
|
|
634
|
+
**kwargs
|
|
525
635
|
)
|
|
526
|
-
|
|
636
|
+
if result and isinstance(result, dict):
|
|
637
|
+
return {
|
|
638
|
+
"output": result.get('response'),
|
|
639
|
+
"messages": result.get('messages', messages),
|
|
640
|
+
"model": kwargs.get('model'),
|
|
641
|
+
"provider":kwargs.get('provider'),
|
|
642
|
+
"npc":kwargs.get("npc"),
|
|
643
|
+
}
|
|
644
|
+
else:
|
|
645
|
+
# Handle cases where get_llm_response might fail and return something unexpected
|
|
646
|
+
return {"output": str(result), "messages": messages}
|
|
647
|
+
|
|
527
648
|
except Exception as e:
|
|
528
649
|
traceback.print_exc()
|
|
529
650
|
return {"output": f"Error sampling LLM: {e}", "messages": messages}
|
|
530
|
-
|
|
531
651
|
@router.route("search", "Execute a web search command")
|
|
532
652
|
def search_handler(command: str, **kwargs):
|
|
533
653
|
"""
|
|
534
654
|
Executes a search command.
|
|
535
655
|
# search commands will bel ike :
|
|
536
|
-
# '/search
|
|
537
|
-
# '/search -
|
|
538
|
-
# '/search -
|
|
656
|
+
# '/search "search term" '
|
|
657
|
+
# '/search -sp perplexity ..
|
|
658
|
+
# '/search -sp google ..
|
|
539
659
|
# extract provider if its there
|
|
540
660
|
# check for either -p or --p
|
|
541
661
|
"""
|
|
542
662
|
messages = safe_get(kwargs, "messages", [])
|
|
543
|
-
|
|
663
|
+
|
|
664
|
+
# The query is now in 'positional_args'
|
|
665
|
+
positional_args = safe_get(kwargs, 'positional_args', [])
|
|
666
|
+
query = " ".join(positional_args)
|
|
667
|
+
|
|
668
|
+
if not query:
|
|
669
|
+
return {"output": "Usage: /search [-sp name --sprovider name] query",
|
|
670
|
+
"messages": messages}
|
|
671
|
+
search_provider = safe_get(kwargs, 'sprovider', NPCSH_SEARCH_PROVIDER)
|
|
672
|
+
render_markdown(f'- Searching {search_provider} for "{query}"' )
|
|
673
|
+
|
|
674
|
+
|
|
544
675
|
|
|
545
676
|
if not query:
|
|
546
677
|
return {"output": "Usage: /search <query>", "messages": messages}
|
|
@@ -555,7 +686,7 @@ def search_handler(command: str, **kwargs):
|
|
|
555
686
|
|
|
556
687
|
|
|
557
688
|
|
|
558
|
-
@router.route("serve", "
|
|
689
|
+
@router.route("serve", "Serve an NPC Team")
|
|
559
690
|
def serve_handler(command: str, **kwargs):
|
|
560
691
|
#print('calling serve handler')
|
|
561
692
|
#print(kwargs)
|
|
@@ -599,39 +730,134 @@ def set_handler(command: str, **kwargs):
|
|
|
599
730
|
output = f"Error setting configuration '{key}': {e}"
|
|
600
731
|
return {"output": output, "messages": messages}
|
|
601
732
|
|
|
602
|
-
@router.route("sleep", "
|
|
733
|
+
@router.route("sleep", "Evolve knowledge graph. Use --dream to also run creative synthesis.")
|
|
603
734
|
def sleep_handler(command: str, **kwargs):
|
|
604
735
|
messages = safe_get(kwargs, "messages", [])
|
|
605
|
-
|
|
736
|
+
npc = safe_get(kwargs, 'npc')
|
|
737
|
+
team = safe_get(kwargs, 'team')
|
|
738
|
+
model = safe_get(kwargs, 'model')
|
|
739
|
+
provider = safe_get(kwargs, 'provider')
|
|
740
|
+
|
|
741
|
+
is_dreaming = safe_get(kwargs, 'dream', False)
|
|
742
|
+
operations_str = safe_get(kwargs, 'ops')
|
|
743
|
+
|
|
744
|
+
operations_config = None
|
|
745
|
+
if operations_str and isinstance(operations_str, str):
|
|
746
|
+
operations_config = [op.strip() for op in operations_str.split(',')]
|
|
747
|
+
|
|
748
|
+
# Define the scope variables clearly at the start
|
|
749
|
+
team_name = team.name if team else "__none__"
|
|
750
|
+
npc_name = npc.name if isinstance(npc, NPC) else "__none__"
|
|
751
|
+
current_path = os.getcwd()
|
|
752
|
+
scope_str = f"Team: '{team_name}', NPC: '{npc_name}', Path: '{current_path}'"
|
|
753
|
+
|
|
754
|
+
# ADDED: Log the scope being checked for clarity
|
|
755
|
+
render_markdown(f"- Checking knowledge graph for scope: {scope_str}")
|
|
756
|
+
|
|
606
757
|
try:
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
output = f"Slept for {seconds} seconds."
|
|
611
|
-
except (ValueError, IndexError):
|
|
612
|
-
output = "Usage: /sleep <seconds>"
|
|
758
|
+
db_path = os.getenv("NPCSH_DB_PATH", os.path.expanduser("~/npcsh_history.db"))
|
|
759
|
+
command_history = CommandHistory(db_path)
|
|
760
|
+
conn = command_history.conn
|
|
613
761
|
except Exception as e:
|
|
762
|
+
return {"output": f"Error connecting to history database for KG access: {e}", "messages": messages}
|
|
763
|
+
|
|
764
|
+
try:
|
|
765
|
+
current_kg = load_kg_from_db(conn, team_name, npc_name, current_path)
|
|
766
|
+
|
|
767
|
+
# FIXED: Provide a detailed and helpful message when the KG is empty
|
|
768
|
+
if not current_kg or not current_kg.get('facts'):
|
|
769
|
+
output_msg = f"Knowledge graph for the current scope is empty. Nothing to process.\n"
|
|
770
|
+
output_msg += f" - Scope Checked: {scope_str}\n\n"
|
|
771
|
+
output_msg += "**Hint:** Have a conversation or run some commands first to build up knowledge in this specific context. The KG is unique to each combination of Team, NPC, and directory."
|
|
772
|
+
return {"output": output_msg, "messages": messages}
|
|
773
|
+
|
|
774
|
+
# Store initial stats for the final report
|
|
775
|
+
original_facts = len(current_kg.get('facts', []))
|
|
776
|
+
original_concepts = len(current_kg.get('concepts', []))
|
|
777
|
+
|
|
778
|
+
# --- SEQUENTIAL EXECUTION ---
|
|
779
|
+
|
|
780
|
+
# 1. Always run the sleep process for maintenance first.
|
|
781
|
+
process_type = "Sleep"
|
|
782
|
+
ops_display = f"with operations: {operations_config}" if operations_config else "with random operations"
|
|
783
|
+
render_markdown(f"- Initiating sleep process {ops_display}")
|
|
784
|
+
|
|
785
|
+
evolved_kg, _ = kg_sleep_process(
|
|
786
|
+
existing_kg=current_kg,
|
|
787
|
+
model=model,
|
|
788
|
+
provider=provider,
|
|
789
|
+
npc=npc,
|
|
790
|
+
operations_config=operations_config
|
|
791
|
+
)
|
|
792
|
+
|
|
793
|
+
# 2. If --dream is specified, run the dream process on the *result* of the sleep process.
|
|
794
|
+
if is_dreaming:
|
|
795
|
+
process_type += " & Dream"
|
|
796
|
+
render_markdown(f"- Initiating dream process on the evolved KG...")
|
|
797
|
+
evolved_kg, _ = kg_dream_process(
|
|
798
|
+
existing_kg=evolved_kg,
|
|
799
|
+
model=model,
|
|
800
|
+
provider=provider,
|
|
801
|
+
npc=npc
|
|
802
|
+
)
|
|
803
|
+
|
|
804
|
+
# 3. Save the final state of the KG back to the database
|
|
805
|
+
save_kg_to_db(conn, evolved_kg, team_name, npc_name, current_path)
|
|
806
|
+
|
|
807
|
+
# 4. Report the final, cumulative changes back to the user
|
|
808
|
+
new_facts = len(evolved_kg.get('facts', []))
|
|
809
|
+
new_concepts = len(evolved_kg.get('concepts', []))
|
|
810
|
+
|
|
811
|
+
output = f"{process_type} process complete.\n"
|
|
812
|
+
output += f"- Facts: {original_facts} -> {new_facts} ({new_facts - original_facts:+})\n"
|
|
813
|
+
output += f"- Concepts: {original_concepts} -> {new_concepts} ({new_concepts - original_concepts:+})"
|
|
814
|
+
|
|
815
|
+
print(evolved_kg.get('facts'))
|
|
816
|
+
print(evolved_kg.get('concepts'))
|
|
817
|
+
|
|
818
|
+
return {"output": output, "messages": messages}
|
|
819
|
+
|
|
820
|
+
except Exception as e:
|
|
821
|
+
import traceback
|
|
614
822
|
traceback.print_exc()
|
|
615
|
-
output
|
|
616
|
-
|
|
823
|
+
return {"output": f"Error during KG evolution process: {e}", "messages": messages}
|
|
824
|
+
finally:
|
|
825
|
+
if 'command_history' in locals() and command_history:
|
|
826
|
+
command_history.close()
|
|
827
|
+
|
|
828
|
+
|
|
829
|
+
|
|
617
830
|
|
|
618
831
|
@router.route("spool", "Enter interactive chat (spool) mode")
|
|
619
832
|
def spool_handler(command: str, **kwargs):
|
|
620
833
|
try:
|
|
834
|
+
# Handle NPC loading if npc is passed as a string (name)
|
|
835
|
+
npc = safe_get(kwargs, 'npc')
|
|
836
|
+
team = safe_get(kwargs, 'team')
|
|
837
|
+
|
|
838
|
+
# If npc is a string, try to load it from the team
|
|
839
|
+
if isinstance(npc, str) and team:
|
|
840
|
+
npc_name = npc
|
|
841
|
+
if npc_name in team.npcs:
|
|
842
|
+
npc = team.npcs[npc_name]
|
|
843
|
+
else:
|
|
844
|
+
return {"output": f"Error: NPC '{npc_name}' not found in team. Available NPCs: {', '.join(team.npcs.keys())}", "messages": safe_get(kwargs, "messages", [])}
|
|
845
|
+
|
|
621
846
|
return enter_spool_mode(
|
|
622
847
|
model=safe_get(kwargs, 'model', NPCSH_CHAT_MODEL),
|
|
623
848
|
provider=safe_get(kwargs, 'provider', NPCSH_CHAT_PROVIDER),
|
|
624
|
-
npc=
|
|
849
|
+
npc=npc,
|
|
850
|
+
team=team,
|
|
625
851
|
messages=safe_get(kwargs, 'messages'),
|
|
626
852
|
conversation_id=safe_get(kwargs, 'conversation_id'),
|
|
627
853
|
stream=safe_get(kwargs, 'stream', NPCSH_STREAM_OUTPUT),
|
|
628
|
-
|
|
854
|
+
attachments=safe_get(kwargs, 'attachments'),
|
|
855
|
+
rag_similarity_threshold = safe_get(kwargs, 'rag_similarity_threshold', 0.3),
|
|
629
856
|
)
|
|
630
857
|
except Exception as e:
|
|
631
858
|
traceback.print_exc()
|
|
632
859
|
return {"output": f"Error entering spool mode: {e}", "messages": safe_get(kwargs, "messages", [])}
|
|
633
|
-
|
|
634
|
-
|
|
860
|
+
|
|
635
861
|
@router.route("jinxs", "Show available jinxs for the current NPC/Team")
|
|
636
862
|
def jinxs_handler(command: str, **kwargs):
|
|
637
863
|
npc = safe_get(kwargs, 'npc')
|
|
@@ -681,46 +907,18 @@ def trigger_handler(command: str, **kwargs):
|
|
|
681
907
|
@router.route("vixynt", "Generate images from text descriptions")
|
|
682
908
|
def vixynt_handler(command: str, **kwargs):
|
|
683
909
|
npc = safe_get(kwargs, 'npc')
|
|
684
|
-
model = safe_get(kwargs, '
|
|
685
|
-
provider = safe_get(kwargs, '
|
|
910
|
+
model = safe_get(kwargs, 'igmodel', NPCSH_IMAGE_GEN_MODEL)
|
|
911
|
+
provider = safe_get(kwargs, 'igprovider', NPCSH_IMAGE_GEN_PROVIDER)
|
|
686
912
|
height = safe_get(kwargs, 'height', 1024)
|
|
687
913
|
width = safe_get(kwargs, 'width', 1024)
|
|
688
|
-
|
|
689
|
-
attachments =
|
|
690
|
-
if model == NPCSH_CHAT_MODEL: model = NPCSH_IMAGE_GEN_MODEL
|
|
691
|
-
if provider == NPCSH_CHAT_PROVIDER: provider = NPCSH_IMAGE_GEN_PROVIDER
|
|
692
|
-
|
|
914
|
+
output_file = safe_get(kwargs, 'output_file')
|
|
915
|
+
attachments = safe_get(kwargs, 'attachments')
|
|
693
916
|
messages = safe_get(kwargs, 'messages', [])
|
|
694
917
|
|
|
695
|
-
|
|
918
|
+
user_prompt = " ".join(safe_get(kwargs, 'positional_args', []))
|
|
696
919
|
|
|
697
|
-
prompt_parts = []
|
|
698
|
-
try:
|
|
699
|
-
parts = shlex.split(command)
|
|
700
|
-
for part in parts[1:]:
|
|
701
|
-
if part.startswith("filename="):
|
|
702
|
-
filename = part.split("=", 1)[1]
|
|
703
|
-
elif part.startswith("height="):
|
|
704
|
-
try:
|
|
705
|
-
height = int(part.split("=", 1)[1])
|
|
706
|
-
except ValueError:
|
|
707
|
-
pass
|
|
708
|
-
elif part.startswith("width="):
|
|
709
|
-
try:
|
|
710
|
-
width = int(part.split("=", 1)[1])
|
|
711
|
-
except ValueError:
|
|
712
|
-
pass
|
|
713
|
-
elif part.startswith("attachments="): # New parameter for image editing
|
|
714
|
-
# split at comma
|
|
715
|
-
attachments = part.split("=", 1)[1].split(",")
|
|
716
|
-
|
|
717
|
-
else:
|
|
718
|
-
prompt_parts.append(part)
|
|
719
|
-
except Exception as parse_err:
|
|
720
|
-
return {"output": f"Error parsing arguments: {parse_err}. Usage: /vixynt <prompt> [filename=...] [height=...] [width=...] [input=...for editing]", "messages": messages}
|
|
721
|
-
user_prompt = " ".join(prompt_parts)
|
|
722
920
|
if not user_prompt:
|
|
723
|
-
return {"output": "Usage: /vixynt <prompt> [
|
|
921
|
+
return {"output": "Usage: /vixynt <prompt> [--output_file path] [--attachments path]", "messages": messages}
|
|
724
922
|
|
|
725
923
|
try:
|
|
726
924
|
image = gen_image(
|
|
@@ -730,28 +928,35 @@ def vixynt_handler(command: str, **kwargs):
|
|
|
730
928
|
npc=npc,
|
|
731
929
|
height=height,
|
|
732
930
|
width=width,
|
|
733
|
-
input_images=attachments
|
|
931
|
+
input_images=attachments
|
|
734
932
|
)
|
|
735
|
-
|
|
736
|
-
|
|
933
|
+
|
|
934
|
+
if output_file is None:
|
|
737
935
|
os.makedirs(os.path.expanduser("~/.npcsh/images/"), exist_ok=True)
|
|
738
|
-
|
|
936
|
+
output_file = (
|
|
739
937
|
os.path.expanduser("~/.npcsh/images/")
|
|
740
938
|
+ f"image_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
|
|
741
|
-
)
|
|
742
|
-
|
|
939
|
+
)
|
|
940
|
+
else:
|
|
941
|
+
output_file = os.path.expanduser(output_file)
|
|
942
|
+
|
|
943
|
+
image.save(output_file)
|
|
743
944
|
image.show()
|
|
744
945
|
|
|
745
946
|
if attachments:
|
|
746
|
-
output = f"Image edited and saved to: {
|
|
947
|
+
output = f"Image edited and saved to: {output_file}"
|
|
747
948
|
else:
|
|
748
|
-
output = f"Image generated and saved to: {
|
|
949
|
+
output = f"Image generated and saved to: {output_file}"
|
|
749
950
|
except Exception as e:
|
|
750
951
|
traceback.print_exc()
|
|
751
952
|
output = f"Error {'editing' if attachments else 'generating'} image: {e}"
|
|
752
953
|
|
|
753
|
-
return {
|
|
754
|
-
|
|
954
|
+
return {
|
|
955
|
+
"output": output,
|
|
956
|
+
"messages": messages,
|
|
957
|
+
"model": model,
|
|
958
|
+
"provider": provider
|
|
959
|
+
}
|
|
755
960
|
@router.route("wander", "Enter wander mode (experimental)")
|
|
756
961
|
def wander_handler(command: str, **kwargs):
|
|
757
962
|
messages = safe_get(kwargs, "messages", [])
|