npcsh 1.0.9__py3-none-any.whl → 1.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/alicanto.py +3 -3
- npcsh/npcsh.py +38 -32
- npcsh/routes.py +9 -26
- {npcsh-1.0.9.dist-info → npcsh-1.0.11.dist-info}/METADATA +1 -1
- {npcsh-1.0.9.dist-info → npcsh-1.0.11.dist-info}/RECORD +9 -9
- {npcsh-1.0.9.dist-info → npcsh-1.0.11.dist-info}/WHEEL +0 -0
- {npcsh-1.0.9.dist-info → npcsh-1.0.11.dist-info}/entry_points.txt +0 -0
- {npcsh-1.0.9.dist-info → npcsh-1.0.11.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.0.9.dist-info → npcsh-1.0.11.dist-info}/top_level.txt +0 -0
npcsh/alicanto.py
CHANGED
|
@@ -14,11 +14,11 @@ import subprocess
|
|
|
14
14
|
import networkx as nx
|
|
15
15
|
|
|
16
16
|
from npcpy.npc_compiler import NPC
|
|
17
|
-
from npcpy.llm_funcs import get_llm_response
|
|
17
|
+
from npcpy.llm_funcs import get_llm_response, extract_facts, identify_groups, assign_groups_to_fact
|
|
18
18
|
from npcsh._state import NPCSH_CHAT_MODEL, NPCSH_CHAT_PROVIDER
|
|
19
19
|
from npcpy.npc_sysenv import print_and_process_stream_with_markdown
|
|
20
|
-
|
|
21
|
-
|
|
20
|
+
|
|
21
|
+
|
|
22
22
|
|
|
23
23
|
def generate_random_npcs(num_npcs: int, model: str, provider: str, request: str) -> List[NPC]:
|
|
24
24
|
"""
|
npcsh/npcsh.py
CHANGED
|
@@ -54,8 +54,6 @@ from npcpy.memory.command_history import (
|
|
|
54
54
|
CommandHistory,
|
|
55
55
|
save_conversation_message,
|
|
56
56
|
)
|
|
57
|
-
from npcpy.memory.knowledge_graph import breathe
|
|
58
|
-
from npcpy.memory.sleep import sleep, forget
|
|
59
57
|
from npcpy.npc_compiler import NPC, Team, load_jinxs_from_directory
|
|
60
58
|
from npcpy.llm_funcs import check_llm_command, get_llm_response, execute_llm_command
|
|
61
59
|
from npcpy.gen.embeddings import get_embeddings
|
|
@@ -563,6 +561,15 @@ def execute_slash_command(command: str, stdin_input: Optional[str], state: Shell
|
|
|
563
561
|
"""Executes slash commands using the router or checking NPC/Team jinxs."""
|
|
564
562
|
command_parts = command.split()
|
|
565
563
|
command_name = command_parts[0].lstrip('/')
|
|
564
|
+
|
|
565
|
+
if command_name in ['n', 'npc']:
|
|
566
|
+
npc_to_switch_to = command_parts[1] if len(command_parts) > 1 else None
|
|
567
|
+
if npc_to_switch_to and state.team and npc_to_switch_to in state.team.npcs:
|
|
568
|
+
state.npc = state.team.npcs[npc_to_switch_to]
|
|
569
|
+
return state, f"Switched to NPC: {npc_to_switch_to}"
|
|
570
|
+
else:
|
|
571
|
+
available_npcs = list(state.team.npcs.keys()) if state.team else []
|
|
572
|
+
return state, colored(f"NPC '{npc_to_switch_to}' not found. Available NPCs: {', '.join(available_npcs)}", "red")
|
|
566
573
|
handler = router.get_route(command_name)
|
|
567
574
|
#print(handler)
|
|
568
575
|
if handler:
|
|
@@ -585,10 +592,8 @@ def execute_slash_command(command: str, stdin_input: Optional[str], state: Shell
|
|
|
585
592
|
result_dict = handler(command, **handler_kwargs)
|
|
586
593
|
|
|
587
594
|
if isinstance(result_dict, dict):
|
|
588
|
-
#some respond with output, some with response, needs to be fixed upstream
|
|
589
|
-
output = result_dict.get("output") or result_dict.get("response")
|
|
590
595
|
state.messages = result_dict.get("messages", state.messages)
|
|
591
|
-
return state,
|
|
596
|
+
return state, result_dict
|
|
592
597
|
else:
|
|
593
598
|
return state, result_dict
|
|
594
599
|
|
|
@@ -702,7 +707,7 @@ def process_pipeline_command(
|
|
|
702
707
|
images=state.attachments,
|
|
703
708
|
stream=stream_final,
|
|
704
709
|
context=info,
|
|
705
|
-
|
|
710
|
+
|
|
706
711
|
)
|
|
707
712
|
if isinstance(llm_result, dict):
|
|
708
713
|
state.messages = llm_result.get("messages", state.messages)
|
|
@@ -859,8 +864,6 @@ def execute_command(
|
|
|
859
864
|
try:
|
|
860
865
|
bash_state, bash_output = handle_bash_command(cmd_parts, command, None, state)
|
|
861
866
|
return bash_state, bash_output
|
|
862
|
-
except CommandNotFoundError:
|
|
863
|
-
return state, colored(f"Command not found: {command_name}", "red")
|
|
864
867
|
except Exception as bash_err:
|
|
865
868
|
return state, colored(f"Bash execution failed: {bash_err}", "red")
|
|
866
869
|
except Exception:
|
|
@@ -1137,12 +1140,13 @@ def execute_todo_item(todo: Dict[str, Any], ride_state: RideState, shell_state:
|
|
|
1137
1140
|
team=shell_state.team,
|
|
1138
1141
|
messages=[],
|
|
1139
1142
|
stream=shell_state.stream_output,
|
|
1140
|
-
|
|
1143
|
+
|
|
1141
1144
|
)
|
|
1142
1145
|
|
|
1143
1146
|
output_payload = result.get("output", "")
|
|
1144
1147
|
output_str = ""
|
|
1145
1148
|
|
|
1149
|
+
|
|
1146
1150
|
if isgenerator(output_payload):
|
|
1147
1151
|
output_str = print_and_process_stream_with_markdown(output_payload, shell_state.chat_model, shell_state.chat_provider)
|
|
1148
1152
|
elif isinstance(output_payload, dict):
|
|
@@ -1252,7 +1256,7 @@ def print_welcome_message():
|
|
|
1252
1256
|
print(
|
|
1253
1257
|
"""
|
|
1254
1258
|
Welcome to \033[1;94mnpc\033[0m\033[1;38;5;202msh\033[0m!
|
|
1255
|
-
\033[1;94m \033[0m\033[1;38;5;202m
|
|
1259
|
+
\033[1;94m \033[0m\033[1;38;5;202m _ \\\\
|
|
1256
1260
|
\033[1;94m _ __ _ __ ___ \033[0m\033[1;38;5;202m ___ | |___ \\\\
|
|
1257
1261
|
\033[1;94m| '_ \\ | ' \\ / __|\033[0m\033[1;38;5;202m / __/ | |_ _| \\\\
|
|
1258
1262
|
\033[1;94m| | | || |_) |( |__ \033[0m\033[1;38;5;202m \\_ \\ | | | | //
|
|
@@ -1397,35 +1401,32 @@ def process_result(
|
|
|
1397
1401
|
final_output_str = None
|
|
1398
1402
|
if user_input =='/help':
|
|
1399
1403
|
render_markdown(output)
|
|
1404
|
+
|
|
1400
1405
|
elif result_state.stream_output:
|
|
1401
|
-
|
|
1402
|
-
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
elif isinstance(output, dict):
|
|
1416
|
-
if 'output' in output:
|
|
1417
|
-
final_output_str = output['output']
|
|
1418
|
-
render_markdown(final_output_str)
|
|
1406
|
+
|
|
1407
|
+
if isinstance(output, dict):
|
|
1408
|
+
output_gen = output.get('output')
|
|
1409
|
+
model = output.get('model', result_state.chat_model)
|
|
1410
|
+
provider = output.get('provider', result_state.chat_provider)
|
|
1411
|
+
else:
|
|
1412
|
+
output_gen = output
|
|
1413
|
+
model = result_state.chat_model
|
|
1414
|
+
provider = result_state.chat_provider
|
|
1415
|
+
print('processing stream output with markdown...')
|
|
1416
|
+
|
|
1417
|
+
final_output_str = print_and_process_stream_with_markdown(output_gen,
|
|
1418
|
+
model,
|
|
1419
|
+
provider)
|
|
1419
1420
|
|
|
1420
1421
|
elif output is not None:
|
|
1421
1422
|
final_output_str = str(output)
|
|
1422
|
-
render_markdown(final_output_str)
|
|
1423
|
+
render_markdown( final_output_str)
|
|
1423
1424
|
if final_output_str and result_state.messages and result_state.messages[-1].get("role") != "assistant":
|
|
1424
1425
|
result_state.messages.append({"role": "assistant", "content": final_output_str})
|
|
1425
1426
|
|
|
1426
1427
|
#print(result_state.messages)
|
|
1427
1428
|
|
|
1428
|
-
|
|
1429
|
+
|
|
1429
1430
|
|
|
1430
1431
|
if final_output_str:
|
|
1431
1432
|
save_conversation_message(
|
|
@@ -1443,8 +1444,13 @@ def process_result(
|
|
|
1443
1444
|
def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
1444
1445
|
state = initial_state
|
|
1445
1446
|
print_welcome_message()
|
|
1446
|
-
|
|
1447
|
-
|
|
1447
|
+
|
|
1448
|
+
|
|
1449
|
+
render_markdown(f'- Using {state.current_mode} mode. Use /agent, /cmd, /chat, or /ride to switch to other modes')
|
|
1450
|
+
render_markdown(f'- To switch to a different NPC, type /npc <npc_name> or /n <npc_name> to switch to that NPC.')
|
|
1451
|
+
render_markdown('\n- Here are the current NPCs available in your team: ' + ', '.join([npc_name for npc_name in state.team.npcs.keys()]))
|
|
1452
|
+
|
|
1453
|
+
|
|
1448
1454
|
is_windows = platform.system().lower().startswith("win")
|
|
1449
1455
|
try:
|
|
1450
1456
|
completer = make_completer(state)
|
npcsh/routes.py
CHANGED
|
@@ -24,6 +24,7 @@ from npcpy.llm_funcs import (
|
|
|
24
24
|
get_llm_response,
|
|
25
25
|
gen_image,
|
|
26
26
|
gen_video,
|
|
27
|
+
breathe,
|
|
27
28
|
)
|
|
28
29
|
from npcpy.npc_compiler import NPC, Team, Jinx
|
|
29
30
|
from npcpy.npc_compiler import initialize_npc_project
|
|
@@ -38,8 +39,8 @@ from npcpy.memory.search import execute_rag_command, execute_search_command, exe
|
|
|
38
39
|
from npcpy.memory.command_history import CommandHistory
|
|
39
40
|
|
|
40
41
|
|
|
41
|
-
|
|
42
|
-
|
|
42
|
+
|
|
43
|
+
|
|
43
44
|
from npcpy.serve import start_flask_server
|
|
44
45
|
|
|
45
46
|
|
|
@@ -257,27 +258,7 @@ def init_handler(command: str, **kwargs):
|
|
|
257
258
|
traceback.print_exc()
|
|
258
259
|
output = f"Error initializing project: {e}"
|
|
259
260
|
return {"output": output, "messages": messages}
|
|
260
|
-
|
|
261
|
-
@router.route("n")
|
|
262
|
-
@router.route("npc")
|
|
263
|
-
def switch_npc_handler(command: str, **kwargs) -> dict:
|
|
264
|
-
"""Switch to a different NPC"""
|
|
265
|
-
team = kwargs.get('team')
|
|
266
|
-
parts = command.split()
|
|
267
|
-
|
|
268
|
-
if len(parts) < 2:
|
|
269
|
-
if team:
|
|
270
|
-
available_npcs = list(team.npcs.keys())
|
|
271
|
-
return {"output": f"Available NPCs: {', '.join(available_npcs)}"}
|
|
272
|
-
return {"output": "No team loaded or no NPC specified"}
|
|
273
|
-
|
|
274
|
-
npc_name = parts[1]
|
|
275
|
-
if team and npc_name in team.npcs:
|
|
276
|
-
# We can't directly modify the state here, so return a special signal
|
|
277
|
-
return {"output": f"SWITCH_NPC:{npc_name}"}
|
|
278
|
-
else:
|
|
279
|
-
available_npcs = list(team.npcs.keys()) if team else []
|
|
280
|
-
return {"output": f"NPC '{npc_name}' not found. Available: {', '.join(available_npcs)}"}
|
|
261
|
+
|
|
281
262
|
|
|
282
263
|
|
|
283
264
|
|
|
@@ -288,8 +269,10 @@ def ots_handler(command: str, **kwargs):
|
|
|
288
269
|
npc = safe_get(kwargs, 'npc')
|
|
289
270
|
vision_model = safe_get(kwargs, 'model', NPCSH_VISION_MODEL)
|
|
290
271
|
vision_provider = safe_get(kwargs, 'provider', NPCSH_VISION_PROVIDER)
|
|
291
|
-
if vision_model == NPCSH_CHAT_MODEL:
|
|
292
|
-
|
|
272
|
+
if vision_model == NPCSH_CHAT_MODEL:
|
|
273
|
+
vision_model = NPCSH_VISION_MODEL
|
|
274
|
+
if vision_provider == NPCSH_CHAT_PROVIDER:
|
|
275
|
+
vision_provider = NPCSH_VISION_PROVIDER
|
|
293
276
|
|
|
294
277
|
messages = safe_get(kwargs, 'messages', [])
|
|
295
278
|
stream = safe_get(kwargs, 'stream', NPCSH_STREAM_OUTPUT)
|
|
@@ -336,7 +319,7 @@ def ots_handler(command: str, **kwargs):
|
|
|
336
319
|
api_url=safe_get(kwargs, 'api_url'),
|
|
337
320
|
api_key=safe_get(kwargs, 'api_key')
|
|
338
321
|
)
|
|
339
|
-
return {"output": response_data.get('response'), "messages": response_data.get('messages')}
|
|
322
|
+
return {"output": response_data.get('response'), "messages": response_data.get('messages'), "model": vision_model, "provider": vision_provider}
|
|
340
323
|
|
|
341
324
|
except Exception as e:
|
|
342
325
|
traceback.print_exc()
|
|
@@ -1,21 +1,21 @@
|
|
|
1
1
|
npcsh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
npcsh/_state.py,sha256=GCMUIwgIBlS7LEBLYlfBiPNKVaK19ZyxT833NFU-djU,31109
|
|
3
|
-
npcsh/alicanto.py,sha256=
|
|
3
|
+
npcsh/alicanto.py,sha256=F-zZGjBTo3a_PQHvPC8-DNF6t4mJELo_zx7gBGvDehg,44611
|
|
4
4
|
npcsh/guac.py,sha256=Ocmk_c4NUtGsC3JOtmkbgLvD6u-XtBPRFRYcckpgUJU,33099
|
|
5
5
|
npcsh/mcp_helpers.py,sha256=Ktd2yXuBnLL2P7OMalgGLj84PXJSzaucjqmJVvWx6HA,12723
|
|
6
6
|
npcsh/mcp_npcsh.py,sha256=SfmplH62GS9iI6q4vuQLVUS6tkrok6L7JxODx_iH7ps,36158
|
|
7
7
|
npcsh/mcp_server.py,sha256=l2Ra0lpFrUu334pvp0Q9ajF2n73KvZswFi0FgbDhh9k,5884
|
|
8
8
|
npcsh/npc.py,sha256=7ujKrMQFgkeGJ4sX5Kn_dB5tjrPN58xeC91PNt453aM,7827
|
|
9
|
-
npcsh/npcsh.py,sha256=
|
|
9
|
+
npcsh/npcsh.py,sha256=_vpphATMKlM2FtUZR3RrwKoRh-eg9QiAp7axIIDAITg,59986
|
|
10
10
|
npcsh/plonk.py,sha256=U2e9yUJZN95Girzzvgrh-40zOdl5zO3AHPsIjoyLv2M,15261
|
|
11
11
|
npcsh/pti.py,sha256=jGHGE5SeIcDkV8WlOEHCKQCnYAL4IPS-kUBHrUz0oDA,10019
|
|
12
|
-
npcsh/routes.py,sha256=
|
|
12
|
+
npcsh/routes.py,sha256=5u23bFTbdXXJ3V7I8BJMq42wWUZFeMbzItwBf8WHlpY,36917
|
|
13
13
|
npcsh/spool.py,sha256=GhnSFX9uAtrB4m_ijuyA5tufH12DrWdABw0z8FmiCHc,11497
|
|
14
14
|
npcsh/wander.py,sha256=BiN6eYyFnEsFzo8MFLRkdZ8xS9sTKkQpjiCcy9chMcc,23225
|
|
15
15
|
npcsh/yap.py,sha256=h5KNt9sNOrDPhGe_zfn_yFIeQhizX09zocjcPWH7m3k,20905
|
|
16
|
-
npcsh-1.0.
|
|
17
|
-
npcsh-1.0.
|
|
18
|
-
npcsh-1.0.
|
|
19
|
-
npcsh-1.0.
|
|
20
|
-
npcsh-1.0.
|
|
21
|
-
npcsh-1.0.
|
|
16
|
+
npcsh-1.0.11.dist-info/licenses/LICENSE,sha256=IKBvAECHP-aCiJtE4cHGCE5Yl0tozYz02PomGeWS3y4,1070
|
|
17
|
+
npcsh-1.0.11.dist-info/METADATA,sha256=kv7abgprmCXu31NjmmaYZhfLz_x1zIVlzUZAIb1RliI,22748
|
|
18
|
+
npcsh-1.0.11.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
19
|
+
npcsh-1.0.11.dist-info/entry_points.txt,sha256=qxOYTm3ym3JWyWf2nv2Mk71uMcJIdUoNEJ8VYMkyHiY,214
|
|
20
|
+
npcsh-1.0.11.dist-info/top_level.txt,sha256=kHSNgKMCkfjV95-DH0YSp1LLBi0HXdF3w57j7MQON3E,6
|
|
21
|
+
npcsh-1.0.11.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|