npcsh 1.0.11__py3-none-any.whl → 1.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +89 -1
- npcsh/alicanto.py +22 -7
- npcsh/npcsh.py +434 -492
- npcsh/plonk.py +300 -367
- npcsh/routes.py +367 -162
- npcsh/spool.py +162 -221
- npcsh-1.0.13.dist-info/METADATA +775 -0
- npcsh-1.0.13.dist-info/RECORD +21 -0
- npcsh-1.0.11.dist-info/METADATA +0 -596
- npcsh-1.0.11.dist-info/RECORD +0 -21
- {npcsh-1.0.11.dist-info → npcsh-1.0.13.dist-info}/WHEEL +0 -0
- {npcsh-1.0.11.dist-info → npcsh-1.0.13.dist-info}/entry_points.txt +0 -0
- {npcsh-1.0.11.dist-info → npcsh-1.0.13.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.0.11.dist-info → npcsh-1.0.13.dist-info}/top_level.txt +0 -0
npcsh/npcsh.py
CHANGED
|
@@ -10,10 +10,6 @@ import importlib.metadata
|
|
|
10
10
|
import textwrap
|
|
11
11
|
from typing import Optional, List, Dict, Any, Tuple, Union
|
|
12
12
|
from dataclasses import dataclass, field
|
|
13
|
-
try:
|
|
14
|
-
from inspect import isgenerator
|
|
15
|
-
except:
|
|
16
|
-
pass
|
|
17
13
|
import platform
|
|
18
14
|
try:
|
|
19
15
|
from termcolor import colored
|
|
@@ -25,10 +21,11 @@ try:
|
|
|
25
21
|
except ImportError:
|
|
26
22
|
chromadb = None
|
|
27
23
|
import shutil
|
|
28
|
-
|
|
24
|
+
import json
|
|
25
|
+
import sqlite3
|
|
26
|
+
import copy
|
|
29
27
|
import yaml
|
|
30
28
|
|
|
31
|
-
# Local Application Imports
|
|
32
29
|
from npcsh._state import (
|
|
33
30
|
setup_npcsh_config,
|
|
34
31
|
initial_state,
|
|
@@ -39,7 +36,9 @@ from npcsh._state import (
|
|
|
39
36
|
interactive_commands,
|
|
40
37
|
BASH_COMMANDS,
|
|
41
38
|
start_interactive_session,
|
|
42
|
-
validate_bash_command
|
|
39
|
+
validate_bash_command,
|
|
40
|
+
normalize_and_expand_flags,
|
|
41
|
+
|
|
43
42
|
)
|
|
44
43
|
|
|
45
44
|
from npcpy.npc_sysenv import (
|
|
@@ -47,16 +46,29 @@ from npcpy.npc_sysenv import (
|
|
|
47
46
|
render_markdown,
|
|
48
47
|
get_locally_available_models,
|
|
49
48
|
get_model_and_provider,
|
|
49
|
+
lookup_provider
|
|
50
50
|
)
|
|
51
51
|
from npcsh.routes import router
|
|
52
52
|
from npcpy.data.image import capture_screenshot
|
|
53
53
|
from npcpy.memory.command_history import (
|
|
54
54
|
CommandHistory,
|
|
55
55
|
save_conversation_message,
|
|
56
|
+
load_kg_from_db,
|
|
57
|
+
save_kg_to_db,
|
|
56
58
|
)
|
|
57
59
|
from npcpy.npc_compiler import NPC, Team, load_jinxs_from_directory
|
|
58
|
-
from npcpy.llm_funcs import
|
|
60
|
+
from npcpy.llm_funcs import (
|
|
61
|
+
check_llm_command,
|
|
62
|
+
get_llm_response,
|
|
63
|
+
execute_llm_command,
|
|
64
|
+
breathe
|
|
65
|
+
)
|
|
66
|
+
from npcpy.memory.knowledge_graph import (
|
|
67
|
+
kg_initial,
|
|
68
|
+
kg_evolve_incremental
|
|
69
|
+
)
|
|
59
70
|
from npcpy.gen.embeddings import get_embeddings
|
|
71
|
+
|
|
60
72
|
try:
|
|
61
73
|
import readline
|
|
62
74
|
except:
|
|
@@ -201,7 +213,7 @@ def get_slash_commands(state: ShellState) -> List[str]:
|
|
|
201
213
|
completion_logger.debug(f"NPC commands: {npc_cmds}")
|
|
202
214
|
|
|
203
215
|
# Mode switching commands
|
|
204
|
-
mode_cmds = ['/cmd', '/agent', '/chat'
|
|
216
|
+
mode_cmds = ['/cmd', '/agent', '/chat']
|
|
205
217
|
commands.extend(mode_cmds)
|
|
206
218
|
completion_logger.debug(f"Mode commands: {mode_cmds}")
|
|
207
219
|
|
|
@@ -557,13 +569,107 @@ def handle_bash_command(
|
|
|
557
569
|
except PermissionError:
|
|
558
570
|
return False, f"Permission denied: {cmd_str}"
|
|
559
571
|
|
|
572
|
+
def _try_convert_type(value: str) -> Union[str, int, float, bool]:
|
|
573
|
+
"""Helper to convert string values to appropriate types."""
|
|
574
|
+
if value.lower() in ['true', 'yes']:
|
|
575
|
+
return True
|
|
576
|
+
if value.lower() in ['false', 'no']:
|
|
577
|
+
return False
|
|
578
|
+
try:
|
|
579
|
+
return int(value)
|
|
580
|
+
except (ValueError, TypeError):
|
|
581
|
+
pass
|
|
582
|
+
try:
|
|
583
|
+
return float(value)
|
|
584
|
+
except (ValueError, TypeError):
|
|
585
|
+
pass
|
|
586
|
+
return value
|
|
587
|
+
|
|
588
|
+
def parse_generic_command_flags(parts: List[str]) -> Tuple[Dict[str, Any], List[str]]:
|
|
589
|
+
"""
|
|
590
|
+
Parses a list of command parts into a dictionary of keyword arguments and a list of positional arguments.
|
|
591
|
+
Handles: -f val, --flag val, --flag=val, flag=val, --boolean-flag
|
|
592
|
+
"""
|
|
593
|
+
parsed_kwargs = {}
|
|
594
|
+
positional_args = []
|
|
595
|
+
i = 0
|
|
596
|
+
while i < len(parts):
|
|
597
|
+
part = parts[i]
|
|
598
|
+
|
|
599
|
+
if part.startswith('--'):
|
|
600
|
+
key_part = part[2:]
|
|
601
|
+
if '=' in key_part:
|
|
602
|
+
key, value = key_part.split('=', 1)
|
|
603
|
+
parsed_kwargs[key] = _try_convert_type(value)
|
|
604
|
+
else:
|
|
605
|
+
# Look ahead for a value
|
|
606
|
+
if i + 1 < len(parts) and not parts[i + 1].startswith('-'):
|
|
607
|
+
parsed_kwargs[key_part] = _try_convert_type(parts[i + 1])
|
|
608
|
+
i += 1 # Consume the value
|
|
609
|
+
else:
|
|
610
|
+
parsed_kwargs[key_part] = True # Boolean flag
|
|
611
|
+
|
|
612
|
+
elif part.startswith('-'):
|
|
613
|
+
key = part[1:]
|
|
614
|
+
# Look ahead for a value
|
|
615
|
+
if i + 1 < len(parts) and not parts[i + 1].startswith('-'):
|
|
616
|
+
parsed_kwargs[key] = _try_convert_type(parts[i + 1])
|
|
617
|
+
i += 1 # Consume the value
|
|
618
|
+
else:
|
|
619
|
+
parsed_kwargs[key] = True # Boolean flag
|
|
620
|
+
|
|
621
|
+
elif '=' in part and not part.startswith('-'):
|
|
622
|
+
key, value = part.split('=', 1)
|
|
623
|
+
parsed_kwargs[key] = _try_convert_type(value)
|
|
624
|
+
|
|
625
|
+
else:
|
|
626
|
+
positional_args.append(part)
|
|
627
|
+
|
|
628
|
+
i += 1
|
|
629
|
+
|
|
630
|
+
return parsed_kwargs, positional_args
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
def should_skip_kg_processing(user_input: str, assistant_output: str) -> bool:
|
|
634
|
+
"""Determine if this interaction is too trivial for KG processing"""
|
|
635
|
+
|
|
636
|
+
# Skip if user input is too short or trivial
|
|
637
|
+
trivial_inputs = {
|
|
638
|
+
'/sq', '/exit', '/quit', 'exit', 'quit', 'hey', 'hi', 'hello',
|
|
639
|
+
'fwah!', 'test', 'ping', 'ok', 'thanks', 'ty'
|
|
640
|
+
}
|
|
641
|
+
|
|
642
|
+
if user_input.lower().strip() in trivial_inputs:
|
|
643
|
+
return True
|
|
644
|
+
|
|
645
|
+
# Skip if user input is very short (less than 10 chars)
|
|
646
|
+
if len(user_input.strip()) < 10:
|
|
647
|
+
return True
|
|
648
|
+
|
|
649
|
+
# Skip simple bash commands
|
|
650
|
+
simple_bash = {'ls', 'pwd', 'cd', 'mkdir', 'touch', 'rm', 'mv', 'cp'}
|
|
651
|
+
first_word = user_input.strip().split()[0] if user_input.strip() else ""
|
|
652
|
+
if first_word in simple_bash:
|
|
653
|
+
return True
|
|
654
|
+
|
|
655
|
+
# Skip if assistant output is very short (less than 20 chars)
|
|
656
|
+
if len(assistant_output.strip()) < 20:
|
|
657
|
+
return True
|
|
658
|
+
|
|
659
|
+
# Skip if it's just a mode exit message
|
|
660
|
+
if "exiting" in assistant_output.lower() or "exited" in assistant_output.lower():
|
|
661
|
+
return True
|
|
662
|
+
|
|
663
|
+
return False
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
|
|
560
667
|
def execute_slash_command(command: str, stdin_input: Optional[str], state: ShellState, stream: bool) -> Tuple[ShellState, Any]:
|
|
561
668
|
"""Executes slash commands using the router or checking NPC/Team jinxs."""
|
|
562
|
-
|
|
563
|
-
command_name =
|
|
564
|
-
|
|
669
|
+
all_command_parts = shlex.split(command)
|
|
670
|
+
command_name = all_command_parts[0].lstrip('/')
|
|
565
671
|
if command_name in ['n', 'npc']:
|
|
566
|
-
npc_to_switch_to =
|
|
672
|
+
npc_to_switch_to = all_command_parts[1] if len(all_command_parts) > 1 else None
|
|
567
673
|
if npc_to_switch_to and state.team and npc_to_switch_to in state.team.npcs:
|
|
568
674
|
state.npc = state.team.npcs[npc_to_switch_to]
|
|
569
675
|
return state, f"Switched to NPC: {npc_to_switch_to}"
|
|
@@ -571,38 +677,75 @@ def execute_slash_command(command: str, stdin_input: Optional[str], state: Shell
|
|
|
571
677
|
available_npcs = list(state.team.npcs.keys()) if state.team else []
|
|
572
678
|
return state, colored(f"NPC '{npc_to_switch_to}' not found. Available NPCs: {', '.join(available_npcs)}", "red")
|
|
573
679
|
handler = router.get_route(command_name)
|
|
574
|
-
#print(handler)
|
|
575
680
|
if handler:
|
|
576
|
-
|
|
681
|
+
parsed_flags, positional_args = parse_generic_command_flags(all_command_parts[1:])
|
|
682
|
+
|
|
683
|
+
normalized_flags = normalize_and_expand_flags(parsed_flags)
|
|
684
|
+
|
|
577
685
|
handler_kwargs = {
|
|
578
686
|
'stream': stream,
|
|
579
|
-
'npc': state.npc,
|
|
580
687
|
'team': state.team,
|
|
581
688
|
'messages': state.messages,
|
|
582
|
-
'model': state.chat_model,
|
|
583
|
-
'provider': state.chat_provider,
|
|
584
689
|
'api_url': state.api_url,
|
|
585
690
|
'api_key': state.api_key,
|
|
691
|
+
'stdin_input': stdin_input,
|
|
692
|
+
'positional_args': positional_args,
|
|
693
|
+
'plonk_context': state.team.shared_context.get('PLONK_CONTEXT') if state.team and hasattr(state.team, 'shared_context') else None,
|
|
694
|
+
|
|
695
|
+
# Default chat model/provider
|
|
696
|
+
'model': state.npc.model if isinstance(state.npc, NPC) and state.npc.model else state.chat_model,
|
|
697
|
+
'provider': state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else state.chat_provider,
|
|
698
|
+
'npc': state.npc,
|
|
699
|
+
|
|
700
|
+
# All other specific defaults
|
|
701
|
+
'sprovider': state.search_provider,
|
|
702
|
+
'emodel': state.embedding_model,
|
|
703
|
+
'eprovider': state.embedding_provider,
|
|
704
|
+
'igmodel': state.image_gen_model,
|
|
705
|
+
'igprovider': state.image_gen_provider,
|
|
706
|
+
'vgmodel': state.video_gen_model,
|
|
707
|
+
'vgprovider':state.video_gen_provider,
|
|
708
|
+
'vmodel': state.vision_model,
|
|
709
|
+
'vprovider': state.vision_provider,
|
|
710
|
+
'rmodel': state.reasoning_model,
|
|
711
|
+
'rprovider': state.reasoning_provider,
|
|
586
712
|
}
|
|
587
|
-
|
|
588
|
-
if
|
|
589
|
-
|
|
590
|
-
|
|
713
|
+
|
|
714
|
+
if len(normalized_flags)>0:
|
|
715
|
+
kwarg_part = 'with kwargs: \n -' + '\n -'.join(f'{key}={item}' for key, item in normalized_flags.items())
|
|
716
|
+
else:
|
|
717
|
+
kwarg_part = ''
|
|
718
|
+
|
|
719
|
+
# 4. Merge the clean, normalized flags. This will correctly overwrite defaults.
|
|
720
|
+
render_markdown(f'- Calling {command_name} handler {kwarg_part} ')
|
|
721
|
+
if 'model' in normalized_flags and 'provider' not in normalized_flags:
|
|
722
|
+
# Call your existing, centralized lookup_provider function
|
|
723
|
+
inferred_provider = lookup_provider(normalized_flags['model'])
|
|
724
|
+
if inferred_provider:
|
|
725
|
+
# Update the provider that will be used for this command.
|
|
726
|
+
handler_kwargs['provider'] = inferred_provider
|
|
727
|
+
print(colored(f"Info: Inferred provider '{inferred_provider}' for model '{normalized_flags['model']}'.", "cyan"))
|
|
728
|
+
if 'provider' in normalized_flags and 'model' not in normalized_flags:
|
|
729
|
+
# loop up mhandler_kwargs model's provider
|
|
730
|
+
current_provider = lookup_provider(handler_kwargs['model'])
|
|
731
|
+
if current_provider != normalized_flags['provider']:
|
|
732
|
+
print(f'Please specify a model for the provider: {normalized_flags['provider']}')
|
|
733
|
+
handler_kwargs.update(normalized_flags)
|
|
734
|
+
|
|
735
|
+
|
|
591
736
|
try:
|
|
592
|
-
result_dict = handler(command, **handler_kwargs)
|
|
593
|
-
|
|
737
|
+
result_dict = handler(command=command, **handler_kwargs)
|
|
738
|
+
# add the output model and provider for the print_and_process_stream downstream processing
|
|
594
739
|
if isinstance(result_dict, dict):
|
|
595
740
|
state.messages = result_dict.get("messages", state.messages)
|
|
596
741
|
return state, result_dict
|
|
597
742
|
else:
|
|
598
743
|
return state, result_dict
|
|
599
|
-
|
|
600
744
|
except Exception as e:
|
|
601
745
|
import traceback
|
|
602
746
|
print(f"Error executing slash command '{command_name}':", file=sys.stderr)
|
|
603
747
|
traceback.print_exc()
|
|
604
748
|
return state, colored(f"Error executing slash command '{command_name}': {e}", "red")
|
|
605
|
-
|
|
606
749
|
active_npc = state.npc if isinstance(state.npc, NPC) else None
|
|
607
750
|
jinx_to_execute = None
|
|
608
751
|
executor = None
|
|
@@ -636,6 +779,7 @@ def execute_slash_command(command: str, stdin_input: Optional[str], state: Shell
|
|
|
636
779
|
|
|
637
780
|
return state, colored(f"Unknown slash command or jinx: {command_name}", "red")
|
|
638
781
|
|
|
782
|
+
|
|
639
783
|
def process_pipeline_command(
|
|
640
784
|
cmd_segment: str,
|
|
641
785
|
stdin_input: Optional[str],
|
|
@@ -648,6 +792,7 @@ def process_pipeline_command(
|
|
|
648
792
|
|
|
649
793
|
available_models_all = get_locally_available_models(state.current_path)
|
|
650
794
|
available_models_all_list = [item for key, item in available_models_all.items()]
|
|
795
|
+
|
|
651
796
|
model_override, provider_override, cmd_cleaned = get_model_and_provider(
|
|
652
797
|
cmd_segment, available_models_all_list
|
|
653
798
|
)
|
|
@@ -655,8 +800,11 @@ def process_pipeline_command(
|
|
|
655
800
|
if not cmd_to_process:
|
|
656
801
|
return state, stdin_input
|
|
657
802
|
|
|
658
|
-
|
|
659
|
-
|
|
803
|
+
npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
|
|
804
|
+
npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
|
|
805
|
+
|
|
806
|
+
exec_model = model_override or npc_model or state.chat_model
|
|
807
|
+
exec_provider = provider_override or npc_provider or state.chat_provider
|
|
660
808
|
|
|
661
809
|
if cmd_to_process.startswith("/"):
|
|
662
810
|
return execute_slash_command(cmd_to_process, stdin_input, state, stream_final)
|
|
@@ -665,23 +813,25 @@ def process_pipeline_command(
|
|
|
665
813
|
if not cmd_parts:
|
|
666
814
|
return state, stdin_input
|
|
667
815
|
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
816
|
+
command_name = cmd_parts[0]
|
|
817
|
+
|
|
818
|
+
if command_name == "cd":
|
|
819
|
+
return handle_cd_command(cmd_parts, state)
|
|
820
|
+
|
|
821
|
+
if command_name in interactive_commands:
|
|
822
|
+
return handle_interactive_command(cmd_parts, state)
|
|
674
823
|
|
|
824
|
+
if validate_bash_command(cmd_parts):
|
|
675
825
|
success, result = handle_bash_command(cmd_parts, cmd_to_process, stdin_input, state)
|
|
676
826
|
if success:
|
|
677
827
|
return state, result
|
|
678
828
|
else:
|
|
679
|
-
print(colored(f"Bash command failed. Asking LLM for a fix
|
|
829
|
+
print(colored(f"Bash command failed: {result}. Asking LLM for a fix...", "yellow"), file=sys.stderr)
|
|
680
830
|
fixer_prompt = f"The command '{cmd_to_process}' failed with the error: '{result}'. Provide the correct command."
|
|
681
831
|
response = execute_llm_command(
|
|
682
832
|
fixer_prompt,
|
|
683
|
-
model=exec_model,
|
|
684
|
-
provider=exec_provider,
|
|
833
|
+
model=exec_model,
|
|
834
|
+
provider=exec_provider,
|
|
685
835
|
npc=state.npc,
|
|
686
836
|
stream=stream_final,
|
|
687
837
|
messages=state.messages
|
|
@@ -697,8 +847,8 @@ def process_pipeline_command(
|
|
|
697
847
|
|
|
698
848
|
llm_result = check_llm_command(
|
|
699
849
|
full_llm_cmd,
|
|
700
|
-
model=exec_model,
|
|
701
|
-
provider=exec_provider,
|
|
850
|
+
model=exec_model,
|
|
851
|
+
provider=exec_provider,
|
|
702
852
|
api_url=state.api_url,
|
|
703
853
|
api_key=state.api_key,
|
|
704
854
|
npc=state.npc,
|
|
@@ -707,7 +857,6 @@ def process_pipeline_command(
|
|
|
707
857
|
images=state.attachments,
|
|
708
858
|
stream=stream_final,
|
|
709
859
|
context=info,
|
|
710
|
-
|
|
711
860
|
)
|
|
712
861
|
if isinstance(llm_result, dict):
|
|
713
862
|
state.messages = llm_result.get("messages", state.messages)
|
|
@@ -716,7 +865,7 @@ def process_pipeline_command(
|
|
|
716
865
|
else:
|
|
717
866
|
return state, llm_result
|
|
718
867
|
def check_mode_switch(command:str , state: ShellState):
|
|
719
|
-
if command in ['/cmd', '/agent', '/chat',
|
|
868
|
+
if command in ['/cmd', '/agent', '/chat',]:
|
|
720
869
|
state.current_mode = command[1:]
|
|
721
870
|
return True, state
|
|
722
871
|
|
|
@@ -737,6 +886,10 @@ def execute_command(
|
|
|
737
886
|
stdin_for_next = None
|
|
738
887
|
final_output = None
|
|
739
888
|
current_state = state
|
|
889
|
+
npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
|
|
890
|
+
npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
|
|
891
|
+
active_model = npc_model or state.chat_model
|
|
892
|
+
active_provider = npc_provider or state.chat_provider
|
|
740
893
|
|
|
741
894
|
if state.current_mode == 'agent':
|
|
742
895
|
for i, cmd_segment in enumerate(commands):
|
|
@@ -753,26 +906,25 @@ def execute_command(
|
|
|
753
906
|
|
|
754
907
|
if is_last_command:
|
|
755
908
|
final_output = output # Capture the output of the last command
|
|
756
|
-
|
|
757
909
|
if isinstance(output, str):
|
|
758
910
|
stdin_for_next = output
|
|
759
|
-
elif
|
|
760
|
-
|
|
761
|
-
full_stream_output =
|
|
911
|
+
elif not isinstance(output, str):
|
|
912
|
+
try:
|
|
913
|
+
full_stream_output = print_and_process_stream_with_markdown(output,
|
|
914
|
+
state.npc.model,
|
|
915
|
+
state.npc.provider)
|
|
762
916
|
stdin_for_next = full_stream_output
|
|
763
917
|
if is_last_command:
|
|
764
918
|
final_output = full_stream_output
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
else: # Output was None
|
|
775
|
-
stdin_for_next = None
|
|
919
|
+
except:
|
|
920
|
+
if output is not None: # Try converting other types to string
|
|
921
|
+
try:
|
|
922
|
+
stdin_for_next = str(output)
|
|
923
|
+
except Exception:
|
|
924
|
+
print(f"Warning: Cannot convert output to string for piping: {type(output)}", file=sys.stderr)
|
|
925
|
+
stdin_for_next = None
|
|
926
|
+
else: # Output was None
|
|
927
|
+
stdin_for_next = None
|
|
776
928
|
|
|
777
929
|
|
|
778
930
|
except Exception as pipeline_error:
|
|
@@ -783,7 +935,7 @@ def execute_command(
|
|
|
783
935
|
return current_state, error_msg
|
|
784
936
|
|
|
785
937
|
# Store embeddings using the final state
|
|
786
|
-
if final_output is not None and
|
|
938
|
+
if final_output is not None and isinstance(final_output,str):
|
|
787
939
|
store_command_embeddings(original_command_for_embedding, final_output, current_state)
|
|
788
940
|
|
|
789
941
|
# Return the final state and the final output
|
|
@@ -821,8 +973,8 @@ def execute_command(
|
|
|
821
973
|
# Otherwise, treat as chat (LLM)
|
|
822
974
|
response = get_llm_response(
|
|
823
975
|
command,
|
|
824
|
-
model=
|
|
825
|
-
provider=
|
|
976
|
+
model=active_model,
|
|
977
|
+
provider=active_provider,
|
|
826
978
|
npc=state.npc,
|
|
827
979
|
stream=state.stream_output,
|
|
828
980
|
messages=state.messages
|
|
@@ -833,14 +985,17 @@ def execute_command(
|
|
|
833
985
|
elif state.current_mode == 'cmd':
|
|
834
986
|
|
|
835
987
|
response = execute_llm_command(command,
|
|
836
|
-
|
|
837
|
-
|
|
988
|
+
model=active_model,
|
|
989
|
+
provider=active_provider,
|
|
838
990
|
npc = state.npc,
|
|
839
991
|
stream = state.stream_output,
|
|
840
992
|
messages = state.messages)
|
|
841
993
|
state.messages = response['messages']
|
|
842
994
|
return state, response['response']
|
|
843
995
|
|
|
996
|
+
"""
|
|
997
|
+
# to be replaced with a standalone corca mode
|
|
998
|
+
|
|
844
999
|
elif state.current_mode == 'ride':
|
|
845
1000
|
# Allow bash commands in /ride mode
|
|
846
1001
|
cmd_parts = parse_command_safely(command)
|
|
@@ -871,379 +1026,8 @@ def execute_command(
|
|
|
871
1026
|
|
|
872
1027
|
# Otherwise, run the agentic ride loop
|
|
873
1028
|
return agentic_ride_loop(command, state)
|
|
874
|
-
@dataclass
|
|
875
|
-
class RideState:
|
|
876
|
-
"""Lightweight state tracking for /ride mode"""
|
|
877
|
-
todos: List[Dict[str, Any]] = field(default_factory=list)
|
|
878
|
-
constraints: List[str] = field(default_factory=list)
|
|
879
|
-
facts: List[str] = field(default_factory=list)
|
|
880
|
-
mistakes: List[str] = field(default_factory=list)
|
|
881
|
-
successes: List[str] = field(default_factory=list)
|
|
882
|
-
current_todo_index: int = 0
|
|
883
|
-
current_subtodo_index: int = 0
|
|
884
|
-
|
|
885
|
-
def get_context_summary(self) -> str:
|
|
886
|
-
"""Generate lightweight context for LLM prompts"""
|
|
887
|
-
context = []
|
|
888
|
-
if self.facts:
|
|
889
|
-
context.append(f"Facts: {'; '.join(self.facts[:5])}") # Limit to 5 most recent
|
|
890
|
-
if self.mistakes:
|
|
891
|
-
context.append(f"Recent mistakes: {'; '.join(self.mistakes[-3:])}")
|
|
892
|
-
if self.successes:
|
|
893
|
-
context.append(f"Recent successes: {'; '.join(self.successes[-3:])}")
|
|
894
|
-
return "\n".join(context)
|
|
895
|
-
|
|
896
|
-
def interactive_edit_list(items: List[str], item_type: str) -> List[str]:
|
|
897
|
-
"""Interactive REPL for editing lists of items with regeneration options"""
|
|
898
|
-
while True:
|
|
899
|
-
print(f"\nCurrent {item_type}:")
|
|
900
|
-
for i, item in enumerate(items, 1):
|
|
901
|
-
print(f"{i}. {item}")
|
|
902
|
-
|
|
903
|
-
choice = input(f"\nEdit {item_type} (e<num> to edit, d<num> to delete, a to add, r to regenerate, c to add context, ok to continue): ").strip()
|
|
904
|
-
|
|
905
|
-
if choice.lower() == 'ok':
|
|
906
|
-
break
|
|
907
|
-
elif choice.lower() == 'r':
|
|
908
|
-
print("Regenerating list...")
|
|
909
|
-
return "REGENERATE" # Special signal to regenerate
|
|
910
|
-
elif choice.lower() == 'c':
|
|
911
|
-
additional_context = input("Add more context: ").strip()
|
|
912
|
-
if additional_context:
|
|
913
|
-
return {"ADD_CONTEXT": additional_context, "items": items}
|
|
914
|
-
elif choice.lower() == 'a':
|
|
915
|
-
new_item = input(f"Enter new {item_type[:-1]}: ").strip()
|
|
916
|
-
if new_item:
|
|
917
|
-
items.append(new_item)
|
|
918
|
-
elif choice.lower().startswith('e'):
|
|
919
|
-
try:
|
|
920
|
-
idx = int(choice[1:]) - 1
|
|
921
|
-
if 0 <= idx < len(items):
|
|
922
|
-
print(f"Current: {items[idx]}")
|
|
923
|
-
new_item = input("New version: ").strip()
|
|
924
|
-
if new_item:
|
|
925
|
-
items[idx] = new_item
|
|
926
|
-
except ValueError:
|
|
927
|
-
print("Invalid format. Use e<number>")
|
|
928
|
-
elif choice.lower().startswith('d'):
|
|
929
|
-
try:
|
|
930
|
-
idx = int(choice[1:]) - 1
|
|
931
|
-
if 0 <= idx < len(items):
|
|
932
|
-
items.pop(idx)
|
|
933
|
-
except ValueError:
|
|
934
|
-
print("Invalid format. Use d<number>")
|
|
935
|
-
else:
|
|
936
|
-
print("Invalid choice. Use: e<num>, d<num>, a, r (regenerate), c (add context), or ok")
|
|
937
|
-
|
|
938
|
-
return items
|
|
939
|
-
def generate_todos(user_goal: str, state: ShellState, additional_context: str = "") -> List[Dict[str, Any]]:
|
|
940
|
-
"""Generate high-level todos for the user's goal"""
|
|
941
|
-
path_cmd = 'The current working directory is: ' + state.current_path
|
|
942
|
-
ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(state.current_path, f) for f in os.listdir(state.current_path)]) if os.path.exists(state.current_path) else 'No files found in the current directory.'
|
|
943
|
-
platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
|
|
944
|
-
info = path_cmd + '\n' + ls_files + '\n' + platform_info
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
high_level_planning_instruction = """
|
|
949
|
-
You are a high-level project planner. When a user asks to work on a file or code,
|
|
950
|
-
structure your plan using a simple, high-level software development lifecycle:
|
|
951
|
-
1. First, understand the current state (e.g., read the relevant file).
|
|
952
|
-
2. Second, make the required changes based on the user's goal.
|
|
953
|
-
3. Third, verify the changes work as intended (e.g., test the code).
|
|
954
|
-
Your generated todos should reflect this high-level thinking.
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
"""
|
|
959
|
-
|
|
960
|
-
prompt = f"""
|
|
961
|
-
{high_level_planning_instruction}
|
|
962
|
-
|
|
963
|
-
User goal: {user_goal}
|
|
964
|
-
|
|
965
|
-
{additional_context}
|
|
966
|
-
|
|
967
|
-
Generate a list of 3 todos to accomplish this goal. Use specific actionable language based on the user request.
|
|
968
|
-
Do not make assumptions about user needs.
|
|
969
|
-
Every todo must be directly sourced from the user's request.
|
|
970
|
-
If users request specific files to be incorporated, you MUST include the full path to the file in the todo.
|
|
971
|
-
Here is some relevant information for the current folder and working directory that may be relevant:
|
|
972
|
-
{info}
|
|
973
|
-
|
|
974
|
-
For example, if the user says "I need to add a new function to calculate the average of a list of numbers my research.py script" and the current working directory is /home/user/projects and one
|
|
975
|
-
of the available files in the current directory is /home/user/projects/research.py then one of the todos should be:
|
|
976
|
-
- "Add a new function to /home/user/projects/research.py to calculate the average of a list of numbers"
|
|
977
|
-
Do not truncate paths. Do not additional paths. Use them exactly as they are provided here.
|
|
978
|
-
|
|
979
|
-
Each todo should be:
|
|
980
|
-
- Specific and actionable
|
|
981
|
-
- Independent where possible
|
|
982
|
-
- Focused on a single major component
|
|
983
|
-
|
|
984
|
-
Remember, it is critical to provide as much relevant information as possible. Even if the user only refers to a file or something by a relative path, it is
|
|
985
|
-
critical for operation that you provide the full path to the file in the todo item.
|
|
986
|
-
|
|
987
|
-
Return JSON with format:
|
|
988
|
-
{{
|
|
989
|
-
"todos": [
|
|
990
|
-
todo1, todo2, todo3,
|
|
991
|
-
]
|
|
992
|
-
}}
|
|
993
|
-
"""
|
|
994
|
-
|
|
995
|
-
response = get_llm_response(
|
|
996
|
-
prompt,
|
|
997
|
-
model=state.chat_model,
|
|
998
|
-
provider=state.chat_provider,
|
|
999
|
-
npc=state.npc,
|
|
1000
|
-
format="json"
|
|
1001
|
-
)
|
|
1002
|
-
|
|
1003
|
-
todos_data = response.get("response", {}).get("todos", [])
|
|
1004
|
-
return todos_data
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
def generate_constraints(todos: List[Dict[str, Any]], user_goal: str, state: ShellState) -> List[str]:
|
|
1008
|
-
"""Generate constraints and requirements that define relationships between todos"""
|
|
1009
|
-
prompt = f"""
|
|
1010
|
-
User goal: {user_goal}
|
|
1011
|
-
|
|
1012
|
-
Todos to accomplish:
|
|
1013
|
-
{chr(10).join([f"- {todo}" for todo in todos])}
|
|
1014
|
-
|
|
1015
|
-
Based ONLY on what the user explicitly stated in their goal, identify any constraints or requirements they mentioned.
|
|
1016
|
-
Do NOT invent new constraints. Only extract constraints that are directly stated or clearly implied by the user's request.
|
|
1017
|
-
|
|
1018
|
-
Examples of valid constraints:
|
|
1019
|
-
- If user says "without breaking existing functionality" -> "Maintain existing functionality"
|
|
1020
|
-
- If user says "must be fast" -> "Performance must be optimized"
|
|
1021
|
-
- If user says "should integrate with X" -> "Must integrate with X"
|
|
1022
|
-
|
|
1023
|
-
If the user didn't specify any constraints, return an empty list.
|
|
1024
|
-
|
|
1025
|
-
Return JSON with format:
|
|
1026
|
-
{{
|
|
1027
|
-
"constraints": ["constraint 1", "constraint 2", ...]
|
|
1028
|
-
}}
|
|
1029
|
-
"""
|
|
1030
|
-
|
|
1031
|
-
response = get_llm_response(
|
|
1032
|
-
prompt,
|
|
1033
|
-
model=state.chat_model,
|
|
1034
|
-
provider=state.chat_provider,
|
|
1035
|
-
npc=state.npc,
|
|
1036
|
-
format="json"
|
|
1037
|
-
)
|
|
1038
|
-
|
|
1039
|
-
constraints_data = response.get("response", {})
|
|
1040
|
-
|
|
1041
|
-
if isinstance(constraints_data, dict):
|
|
1042
|
-
constraints = constraints_data.get("constraints", [])
|
|
1043
|
-
# Make sure we're getting strings, not dicts
|
|
1044
|
-
cleaned_constraints = []
|
|
1045
|
-
for c in constraints:
|
|
1046
|
-
if isinstance(c, str):
|
|
1047
|
-
cleaned_constraints.append(c)
|
|
1048
|
-
return cleaned_constraints
|
|
1049
|
-
else:
|
|
1050
|
-
return []
|
|
1051
|
-
def should_break_down_todo(todo, state: ShellState) -> bool:
|
|
1052
|
-
"""Ask LLM if a todo needs breakdown, then ask user for confirmation"""
|
|
1053
|
-
prompt = f"""
|
|
1054
|
-
Todo: {todo}
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
Does this todo need to be broken down into smaller, more atomic components?
|
|
1058
|
-
Consider:
|
|
1059
|
-
- Is it complex enough to warrant breakdown?
|
|
1060
|
-
- Would breaking it down make execution clearer?
|
|
1061
|
-
- Are there multiple distinct steps involved?
|
|
1062
|
-
|
|
1063
|
-
Return JSON: {{"should_break_down": true/false, "reason": "explanation"}}
|
|
1064
|
-
"""
|
|
1065
|
-
|
|
1066
|
-
response = get_llm_response(
|
|
1067
|
-
prompt,
|
|
1068
|
-
model=state.chat_model,
|
|
1069
|
-
provider=state.chat_provider,
|
|
1070
|
-
npc=state.npc,
|
|
1071
|
-
format="json"
|
|
1072
|
-
)
|
|
1073
|
-
|
|
1074
|
-
result = response.get("response", {})
|
|
1075
|
-
llm_suggests = result.get("should_break_down", False)
|
|
1076
|
-
reason = result.get("reason", "No reason provided")
|
|
1077
|
-
|
|
1078
|
-
if llm_suggests:
|
|
1079
|
-
print(f"\nLLM suggests breaking down: '{todo}'")
|
|
1080
|
-
print(f"Reason: {reason}")
|
|
1081
|
-
user_choice = input("Break it down? [y/N]: ").strip().lower()
|
|
1082
|
-
return user_choice in ['y', 'yes']
|
|
1083
|
-
|
|
1084
|
-
return False
|
|
1085
|
-
|
|
1086
|
-
def generate_subtodos(todo, state: ShellState) -> List[Dict[str, Any]]:
|
|
1087
|
-
"""Generate atomic subtodos for a complex todo"""
|
|
1088
|
-
prompt = f"""
|
|
1089
|
-
Parent todo: {todo}
|
|
1090
|
-
|
|
1091
|
-
Break this down into atomic, executable subtodos. Each subtodo should be:
|
|
1092
|
-
- A single, concrete action
|
|
1093
|
-
- Executable in one step
|
|
1094
|
-
- Clear and unambiguous
|
|
1095
|
-
|
|
1096
|
-
Return JSON with format:
|
|
1097
|
-
{{
|
|
1098
|
-
"subtodos": [
|
|
1099
|
-
"subtodo description",
|
|
1100
|
-
...
|
|
1101
|
-
]
|
|
1102
|
-
}}
|
|
1103
|
-
"""
|
|
1104
|
-
|
|
1105
|
-
response = get_llm_response(
|
|
1106
|
-
prompt,
|
|
1107
|
-
model=state.chat_model,
|
|
1108
|
-
provider=state.chat_provider,
|
|
1109
|
-
npc=state.npc,
|
|
1110
|
-
format="json"
|
|
1111
|
-
)
|
|
1112
|
-
|
|
1113
|
-
return response.get("response", {}).get("subtodos", [])
|
|
1114
|
-
def execute_todo_item(todo: Dict[str, Any], ride_state: RideState, shell_state: ShellState) -> bool:
|
|
1115
|
-
"""Execute a single todo item using the existing jinx system"""
|
|
1116
|
-
path_cmd = 'The current working directory is: ' + shell_state.current_path
|
|
1117
|
-
ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(shell_state.current_path, f) for f in os.listdir(shell_state.current_path)]) if os.path.exists(shell_state.current_path) else 'No files found in the current directory.'
|
|
1118
|
-
platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
|
|
1119
|
-
info = path_cmd + '\n' + ls_files + '\n' + platform_info
|
|
1120
|
-
|
|
1121
|
-
command = f"""
|
|
1122
|
-
|
|
1123
|
-
General information:
|
|
1124
|
-
{info}
|
|
1125
|
-
|
|
1126
|
-
Execute this todo: {todo}
|
|
1127
|
-
|
|
1128
|
-
Constraints to follow:
|
|
1129
|
-
{chr(10).join([f"- {c}" for c in ride_state.constraints])}
|
|
1130
|
-
"""
|
|
1131
|
-
|
|
1132
|
-
print(f"\nExecuting: {todo}")
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
result = check_llm_command(
|
|
1136
|
-
command,
|
|
1137
|
-
model=shell_state.chat_model,
|
|
1138
|
-
provider=shell_state.chat_provider,
|
|
1139
|
-
npc=shell_state.npc,
|
|
1140
|
-
team=shell_state.team,
|
|
1141
|
-
messages=[],
|
|
1142
|
-
stream=shell_state.stream_output,
|
|
1143
|
-
|
|
1144
|
-
)
|
|
1145
|
-
|
|
1146
|
-
output_payload = result.get("output", "")
|
|
1147
|
-
output_str = ""
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
if isgenerator(output_payload):
|
|
1151
|
-
output_str = print_and_process_stream_with_markdown(output_payload, shell_state.chat_model, shell_state.chat_provider)
|
|
1152
|
-
elif isinstance(output_payload, dict):
|
|
1153
|
-
output_str = output_payload.get('output', str(output_payload))
|
|
1154
|
-
if 'output' in output_str:
|
|
1155
|
-
output_str = output_payload['output']
|
|
1156
|
-
elif 'response' in output_str:
|
|
1157
|
-
output_str = output_payload['response']
|
|
1158
|
-
render_markdown(output_str)
|
|
1159
|
-
elif output_payload:
|
|
1160
|
-
output_str = str(output_payload)
|
|
1161
|
-
render_markdown(output_str)
|
|
1162
|
-
|
|
1163
|
-
user_feedback = input(f"\nTodo completed successfully? [y/N/notes]: ").strip()
|
|
1164
|
-
|
|
1165
|
-
if user_feedback.lower() in ['y', 'yes']:
|
|
1166
|
-
return True, output_str
|
|
1167
|
-
elif user_feedback.lower() in ['n', 'no']:
|
|
1168
|
-
mistake = input("What went wrong? ").strip()
|
|
1169
|
-
ride_state.mistakes.append(f"Failed {todo}: {mistake}")
|
|
1170
|
-
return False, output_str
|
|
1171
|
-
else:
|
|
1172
|
-
ride_state.facts.append(f"Re: {todo}: {user_feedback}")
|
|
1173
|
-
success = input("Mark as completed? [y/N]: ").strip().lower() in ['y', 'yes']
|
|
1174
|
-
return success, output_str
|
|
1175
|
-
|
|
1176
|
-
def agentic_ride_loop(user_goal: str, state: ShellState) -> tuple:
|
|
1177
|
-
"""
|
|
1178
|
-
New /ride mode: hierarchical planning with human-in-the-loop control
|
|
1179
1029
|
"""
|
|
1180
|
-
ride_state = RideState()
|
|
1181
|
-
|
|
1182
|
-
# 1. Generate high-level todos
|
|
1183
|
-
print("🚀 Generating high-level todos...")
|
|
1184
|
-
todos = generate_todos(user_goal, state)
|
|
1185
|
-
|
|
1186
|
-
# 2. User reviews/edits todos
|
|
1187
|
-
print("\n📋 Review and edit todos:")
|
|
1188
|
-
todo_descriptions = [todo for todo in todos]
|
|
1189
|
-
edited_descriptions = interactive_edit_list(todo_descriptions, "todos")
|
|
1190
|
-
|
|
1191
1030
|
|
|
1192
|
-
ride_state.todos = edited_descriptions
|
|
1193
|
-
|
|
1194
|
-
# 3. Generate constraints
|
|
1195
|
-
print("\n🔒 Generating constraints...")
|
|
1196
|
-
constraints = generate_constraints(edited_descriptions, user_goal, state)
|
|
1197
|
-
|
|
1198
|
-
# 4. User reviews/edits constraints
|
|
1199
|
-
print("\n📐 Review and edit constraints:")
|
|
1200
|
-
edited_constraints = interactive_edit_list(constraints, "constraints")
|
|
1201
|
-
ride_state.constraints = edited_constraints
|
|
1202
|
-
|
|
1203
|
-
# 5. Execution loop
|
|
1204
|
-
print("\n⚡ Starting execution...")
|
|
1205
|
-
|
|
1206
|
-
for i, todo in enumerate(edited_descriptions):
|
|
1207
|
-
print(f"\n--- Todo {i+1}/{len(todos)}: {todo} ---")
|
|
1208
|
-
|
|
1209
|
-
def attempt_execution(current_todo):
|
|
1210
|
-
# This inner function handles the execution and retry logic
|
|
1211
|
-
success, output_str = execute_todo_item(current_todo, ride_state, state)
|
|
1212
|
-
if not success:
|
|
1213
|
-
retry = input("Retry this todo? [y/N]: ").strip().lower()
|
|
1214
|
-
if retry in ['y', 'yes']:
|
|
1215
|
-
success, output_str = execute_todo_item(current_todo, ride_state, state)
|
|
1216
|
-
return success, output_str
|
|
1217
|
-
|
|
1218
|
-
if should_break_down_todo(todo, state):
|
|
1219
|
-
print("Breaking down todo...")
|
|
1220
|
-
subtodos = generate_subtodos(todo, state)
|
|
1221
|
-
subtodo_descriptions = [st for st in subtodos]
|
|
1222
|
-
edited_subtodos = interactive_edit_list(subtodo_descriptions, "subtodos")
|
|
1223
|
-
|
|
1224
|
-
for j, subtodo_desc in enumerate(edited_subtodos):
|
|
1225
|
-
subtodo = {"description": subtodo_desc, "type": "atomic"}
|
|
1226
|
-
success, output = attempt_execution(subtodo)
|
|
1227
|
-
if success:
|
|
1228
|
-
ride_state.successes.append({"description": subtodo_desc, "output": output})
|
|
1229
|
-
else:
|
|
1230
|
-
print("Subtodo failed. Continuing to next...")
|
|
1231
|
-
else:
|
|
1232
|
-
success, output = attempt_execution(todo)
|
|
1233
|
-
if success:
|
|
1234
|
-
ride_state.successes.append({"description": todo, "output": output})
|
|
1235
|
-
# 6. Final summary
|
|
1236
|
-
print("\n🎯 Execution Summary:")
|
|
1237
|
-
print(f"Successes: {len(ride_state.successes)}")
|
|
1238
|
-
print(f"Mistakes: {len(ride_state.mistakes)}")
|
|
1239
|
-
print(f"Facts learned: {len(ride_state.facts)}")
|
|
1240
|
-
|
|
1241
|
-
return state, {
|
|
1242
|
-
"todos_completed": len(ride_state.successes),
|
|
1243
|
-
"ride_state": ride_state,
|
|
1244
|
-
"final_context": ride_state.get_context_summary()
|
|
1245
|
-
}
|
|
1246
|
-
# --- Main Application Logic ---
|
|
1247
1031
|
|
|
1248
1032
|
def check_deprecation_warnings():
|
|
1249
1033
|
if os.getenv("NPCSH_MODEL"):
|
|
@@ -1279,6 +1063,7 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
|
1279
1063
|
os.makedirs(os.path.dirname(db_path), exist_ok=True)
|
|
1280
1064
|
command_history = CommandHistory(db_path)
|
|
1281
1065
|
|
|
1066
|
+
|
|
1282
1067
|
try:
|
|
1283
1068
|
history_file = setup_readline()
|
|
1284
1069
|
atexit.register(save_readline_history)
|
|
@@ -1324,7 +1109,7 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
|
1324
1109
|
if use_jinxs == "c":
|
|
1325
1110
|
global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
|
|
1326
1111
|
if os.path.exists(global_jinxs_dir):
|
|
1327
|
-
shutil.copytree(global_jinxs_dir,
|
|
1112
|
+
shutil.copytree(global_jinxs_dir, team_dir, dirs_exist_ok=True)
|
|
1328
1113
|
else:
|
|
1329
1114
|
team_ctx_data["use_global_jinxs"] = True
|
|
1330
1115
|
|
|
@@ -1352,7 +1137,7 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
|
1352
1137
|
print(f"Warning: Could not load context file {filename}: {e}")
|
|
1353
1138
|
|
|
1354
1139
|
forenpc_name = team_ctx.get("forenpc", default_forenpc_name)
|
|
1355
|
-
|
|
1140
|
+
#render_markdown(f"- Using forenpc: {forenpc_name}")
|
|
1356
1141
|
|
|
1357
1142
|
if team_ctx.get("use_global_jinxs", False):
|
|
1358
1143
|
jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
|
|
@@ -1364,89 +1149,196 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
|
1364
1149
|
|
|
1365
1150
|
forenpc_obj = None
|
|
1366
1151
|
forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
|
|
1367
|
-
#print('forenpc_path', forenpc_path)
|
|
1368
|
-
#print('jinx list', jinxs_list)
|
|
1369
|
-
if os.path.exists(forenpc_path):
|
|
1370
1152
|
|
|
1371
|
-
|
|
1153
|
+
|
|
1154
|
+
#render_markdown('- Loaded team context'+ json.dumps(team_ctx, indent=2))
|
|
1155
|
+
|
|
1156
|
+
|
|
1157
|
+
|
|
1158
|
+
if os.path.exists(forenpc_path):
|
|
1159
|
+
forenpc_obj = NPC(file = forenpc_path,
|
|
1160
|
+
jinxs=jinxs_list)
|
|
1161
|
+
if forenpc_obj.model is None:
|
|
1162
|
+
forenpc_obj.model= team_ctx.get("model", initial_state.chat_model)
|
|
1163
|
+
if forenpc_obj.provider is None:
|
|
1164
|
+
forenpc_obj.provider=team_ctx.get('provider', initial_state.chat_provider)
|
|
1165
|
+
|
|
1372
1166
|
else:
|
|
1373
1167
|
print(f"Warning: Forenpc file '{forenpc_name}.npc' not found in {team_dir}.")
|
|
1374
1168
|
|
|
1375
|
-
team = Team(team_path=team_dir,
|
|
1169
|
+
team = Team(team_path=team_dir,
|
|
1170
|
+
forenpc=forenpc_obj,
|
|
1171
|
+
jinxs=jinxs_dict)
|
|
1172
|
+
|
|
1173
|
+
for npc_name, npc_obj in team.npcs.items():
|
|
1174
|
+
if not npc_obj.model:
|
|
1175
|
+
npc_obj.model = initial_state.chat_model
|
|
1176
|
+
if not npc_obj.provider:
|
|
1177
|
+
npc_obj.provider = initial_state.chat_provider
|
|
1178
|
+
|
|
1179
|
+
# Also apply to the forenpc specifically
|
|
1180
|
+
if team.forenpc and isinstance(team.forenpc, NPC):
|
|
1181
|
+
if not team.forenpc.model:
|
|
1182
|
+
team.forenpc.model = initial_state.chat_model
|
|
1183
|
+
if not team.forenpc.provider:
|
|
1184
|
+
team.forenpc.provider = initial_state.chat_provider
|
|
1185
|
+
team_name_from_ctx = team_ctx.get("name")
|
|
1186
|
+
if team_name_from_ctx:
|
|
1187
|
+
team.name = team_name_from_ctx
|
|
1188
|
+
elif team_dir and os.path.basename(team_dir) != 'npc_team':
|
|
1189
|
+
team.name = os.path.basename(team_dir)
|
|
1190
|
+
else:
|
|
1191
|
+
team.name = "global_team" # fallback for ~/.npcsh/npc_team
|
|
1192
|
+
|
|
1376
1193
|
return command_history, team, forenpc_obj
|
|
1377
1194
|
|
|
1195
|
+
# In your main npcsh.py file
|
|
1196
|
+
|
|
1378
1197
|
def process_result(
|
|
1379
1198
|
user_input: str,
|
|
1380
1199
|
result_state: ShellState,
|
|
1381
1200
|
output: Any,
|
|
1382
|
-
command_history: CommandHistory
|
|
1201
|
+
command_history: CommandHistory
|
|
1202
|
+
):
|
|
1203
|
+
# --- Part 1: Save Conversation & Determine Output ---
|
|
1204
|
+
|
|
1205
|
+
# Define team and NPC names early for consistent logging
|
|
1206
|
+
team_name = result_state.team.name if result_state.team else "__none__"
|
|
1207
|
+
npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "__none__"
|
|
1208
|
+
|
|
1209
|
+
# Determine the actual NPC object to use for this turn's operations
|
|
1210
|
+
active_npc = result_state.npc if isinstance(result_state.npc, NPC) else NPC(
|
|
1211
|
+
name="default",
|
|
1212
|
+
model=result_state.chat_model,
|
|
1213
|
+
provider=result_state.chat_provider
|
|
1214
|
+
)
|
|
1383
1215
|
|
|
1384
|
-
npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else result_state.npc
|
|
1385
|
-
team_name = result_state.team.name if isinstance(result_state.team, Team) else result_state.team
|
|
1386
1216
|
save_conversation_message(
|
|
1387
1217
|
command_history,
|
|
1388
1218
|
result_state.conversation_id,
|
|
1389
1219
|
"user",
|
|
1390
1220
|
user_input,
|
|
1391
1221
|
wd=result_state.current_path,
|
|
1392
|
-
model=
|
|
1393
|
-
provider=
|
|
1222
|
+
model=active_npc.model,
|
|
1223
|
+
provider=active_npc.provider,
|
|
1394
1224
|
npc=npc_name,
|
|
1395
1225
|
team=team_name,
|
|
1396
1226
|
attachments=result_state.attachments,
|
|
1397
1227
|
)
|
|
1398
|
-
|
|
1399
|
-
result_state.attachments = None # Clear attachments after logging user message
|
|
1228
|
+
result_state.attachments = None
|
|
1400
1229
|
|
|
1401
1230
|
final_output_str = None
|
|
1231
|
+
output_content = output.get('output') if isinstance(output, dict) else output
|
|
1232
|
+
model_for_stream = output.get('model', active_npc.model) if isinstance(output, dict) else active_npc.model
|
|
1233
|
+
provider_for_stream = output.get('provider', active_npc.provider) if isinstance(output, dict) else active_npc.provider
|
|
1234
|
+
|
|
1235
|
+
print('\n')
|
|
1402
1236
|
if user_input =='/help':
|
|
1403
|
-
render_markdown(output)
|
|
1404
|
-
|
|
1237
|
+
render_markdown(output.get('output'))
|
|
1405
1238
|
elif result_state.stream_output:
|
|
1406
|
-
|
|
1407
|
-
if isinstance(output, dict):
|
|
1408
|
-
output_gen = output.get('output')
|
|
1409
|
-
model = output.get('model', result_state.chat_model)
|
|
1410
|
-
provider = output.get('provider', result_state.chat_provider)
|
|
1411
|
-
else:
|
|
1412
|
-
output_gen = output
|
|
1413
|
-
model = result_state.chat_model
|
|
1414
|
-
provider = result_state.chat_provider
|
|
1415
|
-
print('processing stream output with markdown...')
|
|
1416
|
-
|
|
1417
|
-
final_output_str = print_and_process_stream_with_markdown(output_gen,
|
|
1418
|
-
model,
|
|
1419
|
-
provider)
|
|
1420
|
-
|
|
1421
|
-
elif output is not None:
|
|
1422
|
-
final_output_str = str(output)
|
|
1423
|
-
render_markdown( final_output_str)
|
|
1424
|
-
if final_output_str and result_state.messages and result_state.messages[-1].get("role") != "assistant":
|
|
1425
|
-
result_state.messages.append({"role": "assistant", "content": final_output_str})
|
|
1426
|
-
|
|
1427
|
-
#print(result_state.messages)
|
|
1428
1239
|
|
|
1429
1240
|
|
|
1241
|
+
final_output_str = print_and_process_stream_with_markdown(output_content, model_for_stream, provider_for_stream)
|
|
1242
|
+
elif output_content is not None:
|
|
1243
|
+
final_output_str = str(output_content)
|
|
1244
|
+
render_markdown(final_output_str)
|
|
1430
1245
|
|
|
1431
1246
|
if final_output_str:
|
|
1247
|
+
|
|
1248
|
+
if result_state.messages and (not result_state.messages or result_state.messages[-1].get("role") != "assistant"):
|
|
1249
|
+
result_state.messages.append({"role": "assistant", "content": final_output_str})
|
|
1432
1250
|
save_conversation_message(
|
|
1433
1251
|
command_history,
|
|
1434
1252
|
result_state.conversation_id,
|
|
1435
1253
|
"assistant",
|
|
1436
1254
|
final_output_str,
|
|
1437
1255
|
wd=result_state.current_path,
|
|
1438
|
-
model=
|
|
1439
|
-
provider=
|
|
1256
|
+
model=active_npc.model,
|
|
1257
|
+
provider=active_npc.provider,
|
|
1440
1258
|
npc=npc_name,
|
|
1441
1259
|
team=team_name,
|
|
1442
1260
|
)
|
|
1443
1261
|
|
|
1262
|
+
conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
|
|
1263
|
+
conn = command_history.conn
|
|
1264
|
+
|
|
1265
|
+
try:
|
|
1266
|
+
if not should_skip_kg_processing(user_input, final_output_str):
|
|
1267
|
+
|
|
1268
|
+
npc_kg = load_kg_from_db(conn, team_name, npc_name, result_state.current_path)
|
|
1269
|
+
evolved_npc_kg, _ = kg_evolve_incremental(
|
|
1270
|
+
existing_kg=npc_kg,
|
|
1271
|
+
new_content_text=conversation_turn_text,
|
|
1272
|
+
model=active_npc.model,
|
|
1273
|
+
provider=active_npc.provider,
|
|
1274
|
+
get_concepts=True,
|
|
1275
|
+
link_concepts_facts = False,
|
|
1276
|
+
link_concepts_concepts = False,
|
|
1277
|
+
link_facts_facts = False,
|
|
1278
|
+
|
|
1279
|
+
|
|
1280
|
+
)
|
|
1281
|
+
save_kg_to_db(conn,
|
|
1282
|
+
evolved_npc_kg,
|
|
1283
|
+
team_name,
|
|
1284
|
+
npc_name,
|
|
1285
|
+
result_state.current_path)
|
|
1286
|
+
except Exception as e:
|
|
1287
|
+
print(colored(f"Error during real-time KG evolution: {e}", "red"))
|
|
1288
|
+
|
|
1289
|
+
# --- Part 3: Periodic Team Context Suggestions ---
|
|
1290
|
+
result_state.turn_count += 1
|
|
1291
|
+
if result_state.turn_count > 0 and result_state.turn_count % 10 == 0:
|
|
1292
|
+
print(colored("\nChecking for potential team improvements...", "cyan"))
|
|
1293
|
+
try:
|
|
1294
|
+
summary = breathe(messages=result_state.messages[-20:],
|
|
1295
|
+
npc=active_npc)
|
|
1296
|
+
characterization = summary.get('output')
|
|
1297
|
+
|
|
1298
|
+
if characterization and result_state.team:
|
|
1299
|
+
team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
|
|
1300
|
+
ctx_data = {}
|
|
1301
|
+
if os.path.exists(team_ctx_path):
|
|
1302
|
+
with open(team_ctx_path, 'r') as f:
|
|
1303
|
+
ctx_data = yaml.safe_load(f) or {}
|
|
1304
|
+
current_context = ctx_data.get('context', '')
|
|
1305
|
+
|
|
1306
|
+
prompt = f"""Based on this characterization: {characterization},
|
|
1307
|
+
|
|
1308
|
+
suggest changes (additions, deletions, edits) to the team's context.
|
|
1309
|
+
Additions need not be fully formed sentences and can simply be equations, relationships, or other plain clear items.
|
|
1310
|
+
|
|
1311
|
+
Current Context: "{current_context}".
|
|
1312
|
+
|
|
1313
|
+
Respond with JSON: {{"suggestion": "Your sentence."
|
|
1314
|
+
}}"""
|
|
1315
|
+
response = get_llm_response(prompt, npc=active_npc, format="json")
|
|
1316
|
+
suggestion = response.get("response", {}).get("suggestion")
|
|
1317
|
+
|
|
1318
|
+
if suggestion:
|
|
1319
|
+
new_context = (current_context + " " + suggestion).strip()
|
|
1320
|
+
print(colored("AI suggests updating team context:", "yellow"))
|
|
1321
|
+
print(f" - OLD: {current_context}\n + NEW: {new_context}")
|
|
1322
|
+
if input("Apply? [y/N]: ").strip().lower() == 'y':
|
|
1323
|
+
ctx_data['context'] = new_context
|
|
1324
|
+
with open(team_ctx_path, 'w') as f:
|
|
1325
|
+
yaml.dump(ctx_data, f)
|
|
1326
|
+
print(colored("Team context updated.", "green"))
|
|
1327
|
+
else:
|
|
1328
|
+
print("Suggestion declined.")
|
|
1329
|
+
except Exception as e:
|
|
1330
|
+
import traceback
|
|
1331
|
+
print(colored(f"Could not generate team suggestions: {e}", "yellow"))
|
|
1332
|
+
traceback.print_exc()
|
|
1333
|
+
|
|
1334
|
+
|
|
1335
|
+
|
|
1444
1336
|
def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
1445
1337
|
state = initial_state
|
|
1446
1338
|
print_welcome_message()
|
|
1447
1339
|
|
|
1448
1340
|
|
|
1449
|
-
render_markdown(f'- Using {state.current_mode} mode. Use /agent, /cmd,
|
|
1341
|
+
render_markdown(f'- Using {state.current_mode} mode. Use /agent, /cmd, or /chat to switch to other modes')
|
|
1450
1342
|
render_markdown(f'- To switch to a different NPC, type /npc <npc_name> or /n <npc_name> to switch to that NPC.')
|
|
1451
1343
|
render_markdown('\n- Here are the current NPCs available in your team: ' + ', '.join([npc_name for npc_name in state.team.npcs.keys()]))
|
|
1452
1344
|
|
|
@@ -1457,23 +1349,68 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
|
1457
1349
|
readline.set_completer(completer)
|
|
1458
1350
|
except:
|
|
1459
1351
|
pass
|
|
1352
|
+
session_scopes = set()
|
|
1460
1353
|
|
|
1461
|
-
|
|
1354
|
+
|
|
1355
|
+
def exit_shell(current_state: ShellState):
|
|
1356
|
+
"""
|
|
1357
|
+
On exit, iterates through all active scopes from the session and
|
|
1358
|
+
creates/updates the specific knowledge graph for each one.
|
|
1359
|
+
"""
|
|
1462
1360
|
print("\nGoodbye!")
|
|
1463
|
-
|
|
1361
|
+
print(colored("Processing and archiving all session knowledge...", "cyan"))
|
|
1362
|
+
|
|
1363
|
+
conn = command_history.conn
|
|
1364
|
+
integrator_npc = NPC(name="integrator", model=current_state.chat_model, provider=current_state.chat_provider)
|
|
1464
1365
|
|
|
1366
|
+
# Process each unique scope that was active during the session
|
|
1367
|
+
for team_name, npc_name, path in session_scopes:
|
|
1368
|
+
try:
|
|
1369
|
+
print(f" -> Archiving knowledge for: T='{team_name}', N='{npc_name}', P='{path}'")
|
|
1370
|
+
|
|
1371
|
+
# Get all messages for the current conversation that happened in this specific path
|
|
1372
|
+
convo_id = current_state.conversation_id
|
|
1373
|
+
all_messages = command_history.get_conversations_by_id(convo_id)
|
|
1374
|
+
|
|
1375
|
+
scope_messages = [
|
|
1376
|
+
m for m in all_messages
|
|
1377
|
+
if m.get('directory_path') == path and m.get('team') == team_name and m.get('npc') == npc_name
|
|
1378
|
+
]
|
|
1379
|
+
|
|
1380
|
+
full_text = "\n".join([f"{m['role']}: {m['content']}" for m in scope_messages if m.get('content')])
|
|
1465
1381
|
|
|
1382
|
+
if not full_text.strip():
|
|
1383
|
+
print(" ...No content for this scope, skipping.")
|
|
1384
|
+
continue
|
|
1466
1385
|
|
|
1386
|
+
# Load the existing KG for this specific, real scope
|
|
1387
|
+
current_kg = load_kg_from_db(conn, team_name, npc_name, path)
|
|
1388
|
+
|
|
1389
|
+
# Evolve it with the full text from the session for this scope
|
|
1390
|
+
evolved_kg, _ = kg_evolve_incremental(
|
|
1391
|
+
existing_kg=current_kg,
|
|
1392
|
+
new_content_text=full_text,
|
|
1393
|
+
model=integrator_npc.model,
|
|
1394
|
+
provider=integrator_npc.provider,
|
|
1395
|
+
get_concepts=True,
|
|
1396
|
+
link_concepts_facts = True,
|
|
1397
|
+
link_concepts_concepts = True,
|
|
1398
|
+
link_facts_facts = True,
|
|
1467
1399
|
|
|
1400
|
+
)
|
|
1401
|
+
|
|
1402
|
+
# Save the updated KG back to the database under the same exact scope
|
|
1403
|
+
save_kg_to_db(conn, evolved_kg, team_name, npc_name, path)
|
|
1404
|
+
|
|
1405
|
+
except Exception as e:
|
|
1406
|
+
import traceback
|
|
1407
|
+
print(colored(f"Failed to process KG for scope ({team_name}, {npc_name}, {path}): {e}", "red"))
|
|
1408
|
+
traceback.print_exc()
|
|
1468
1409
|
|
|
1469
|
-
#print('beginning knowledge consolidation')
|
|
1470
|
-
#try:
|
|
1471
|
-
# breathe_result = breathe(state.messages, state.chat_model, state.chat_provider, state.npc)
|
|
1472
|
-
# print(breathe_result)
|
|
1473
|
-
#except KeyboardInterrupt:
|
|
1474
|
-
# print("Knowledge consolidation interrupted. Exiting immediately.")
|
|
1475
1410
|
sys.exit(0)
|
|
1476
1411
|
|
|
1412
|
+
|
|
1413
|
+
|
|
1477
1414
|
while True:
|
|
1478
1415
|
try:
|
|
1479
1416
|
try:
|
|
@@ -1482,17 +1419,21 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
|
1482
1419
|
except:
|
|
1483
1420
|
pass
|
|
1484
1421
|
|
|
1422
|
+
display_model = state.chat_model
|
|
1423
|
+
if isinstance(state.npc, NPC) and state.npc.model:
|
|
1424
|
+
display_model = state.npc.model
|
|
1425
|
+
|
|
1485
1426
|
if is_windows:
|
|
1486
1427
|
cwd_part = os.path.basename(state.current_path)
|
|
1487
1428
|
if isinstance(state.npc, NPC):
|
|
1488
|
-
prompt_end = f":{state.npc.name}> "
|
|
1429
|
+
prompt_end = f":{state.npc.name}:{display_model}> "
|
|
1489
1430
|
else:
|
|
1490
1431
|
prompt_end = ":npcsh> "
|
|
1491
1432
|
prompt = f"{cwd_part}{prompt_end}"
|
|
1492
1433
|
else:
|
|
1493
1434
|
cwd_colored = colored(os.path.basename(state.current_path), "blue")
|
|
1494
1435
|
if isinstance(state.npc, NPC):
|
|
1495
|
-
prompt_end = f":🤖{orange(state.npc.name)}:{
|
|
1436
|
+
prompt_end = f":🤖{orange(state.npc.name)}:{display_model}> "
|
|
1496
1437
|
else:
|
|
1497
1438
|
prompt_end = f":🤖{colored('npc', 'blue', attrs=['bold'])}{colored('sh', 'yellow')}> "
|
|
1498
1439
|
prompt = readline_safe_prompt(f"{cwd_colored}{prompt_end}")
|
|
@@ -1512,11 +1453,15 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
|
1512
1453
|
continue
|
|
1513
1454
|
else:
|
|
1514
1455
|
exit_shell(state)
|
|
1456
|
+
team_name = state.team.name if state.team else "__none__"
|
|
1457
|
+
npc_name = state.npc.name if isinstance(state.npc, NPC) else "__none__"
|
|
1458
|
+
session_scopes.add((team_name, npc_name, state.current_path))
|
|
1515
1459
|
|
|
1516
|
-
state.current_path = os.getcwd()
|
|
1517
1460
|
state, output = execute_command(user_input, state)
|
|
1518
|
-
process_result(user_input, state,
|
|
1519
|
-
|
|
1461
|
+
process_result(user_input, state,
|
|
1462
|
+
output,
|
|
1463
|
+
command_history)
|
|
1464
|
+
|
|
1520
1465
|
except KeyboardInterrupt:
|
|
1521
1466
|
if is_windows:
|
|
1522
1467
|
# On Windows, Ctrl+C cancels the current input line, show prompt again
|
|
@@ -1528,8 +1473,6 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
|
1528
1473
|
except EOFError:
|
|
1529
1474
|
# Ctrl+D: exit shell cleanly
|
|
1530
1475
|
exit_shell(state)
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
1476
|
def main() -> None:
|
|
1534
1477
|
parser = argparse.ArgumentParser(description="npcsh - An NPC-powered shell.")
|
|
1535
1478
|
parser.add_argument(
|
|
@@ -1544,8 +1487,6 @@ def main() -> None:
|
|
|
1544
1487
|
|
|
1545
1488
|
initial_state.npc = default_npc
|
|
1546
1489
|
initial_state.team = team
|
|
1547
|
-
#import pdb
|
|
1548
|
-
#pdb.set_trace()
|
|
1549
1490
|
|
|
1550
1491
|
# add a -g global command to indicate if to use the global or project, otherwise go thru normal flow
|
|
1551
1492
|
|
|
@@ -1553,8 +1494,9 @@ def main() -> None:
|
|
|
1553
1494
|
state = initial_state
|
|
1554
1495
|
state.current_path = os.getcwd()
|
|
1555
1496
|
final_state, output = execute_command(args.command, state)
|
|
1556
|
-
if final_state.stream_output
|
|
1557
|
-
for chunk in output:
|
|
1497
|
+
if final_state.stream_output:
|
|
1498
|
+
for chunk in output:
|
|
1499
|
+
print(str(chunk), end='')
|
|
1558
1500
|
print()
|
|
1559
1501
|
elif output is not None:
|
|
1560
1502
|
print(output)
|