npcsh 1.0.12__py3-none-any.whl → 1.0.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +110 -41
- npcsh/alicanto.py +22 -7
- npcsh/npcsh.py +270 -469
- npcsh/plonk.py +300 -367
- npcsh/routes.py +369 -170
- npcsh/spool.py +162 -221
- npcsh/yap.py +115 -106
- npcsh-1.0.14.dist-info/METADATA +777 -0
- npcsh-1.0.14.dist-info/RECORD +21 -0
- npcsh-1.0.12.dist-info/METADATA +0 -596
- npcsh-1.0.12.dist-info/RECORD +0 -21
- {npcsh-1.0.12.dist-info → npcsh-1.0.14.dist-info}/WHEEL +0 -0
- {npcsh-1.0.12.dist-info → npcsh-1.0.14.dist-info}/entry_points.txt +0 -0
- {npcsh-1.0.12.dist-info → npcsh-1.0.14.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.0.12.dist-info → npcsh-1.0.14.dist-info}/top_level.txt +0 -0
npcsh/npcsh.py
CHANGED
|
@@ -10,10 +10,6 @@ import importlib.metadata
|
|
|
10
10
|
import textwrap
|
|
11
11
|
from typing import Optional, List, Dict, Any, Tuple, Union
|
|
12
12
|
from dataclasses import dataclass, field
|
|
13
|
-
try:
|
|
14
|
-
from inspect import isgenerator
|
|
15
|
-
except:
|
|
16
|
-
pass
|
|
17
13
|
import platform
|
|
18
14
|
try:
|
|
19
15
|
from termcolor import colored
|
|
@@ -30,7 +26,6 @@ import sqlite3
|
|
|
30
26
|
import copy
|
|
31
27
|
import yaml
|
|
32
28
|
|
|
33
|
-
# Local Application Imports
|
|
34
29
|
from npcsh._state import (
|
|
35
30
|
setup_npcsh_config,
|
|
36
31
|
initial_state,
|
|
@@ -40,8 +35,11 @@ from npcsh._state import (
|
|
|
40
35
|
ShellState,
|
|
41
36
|
interactive_commands,
|
|
42
37
|
BASH_COMMANDS,
|
|
38
|
+
|
|
43
39
|
start_interactive_session,
|
|
44
|
-
validate_bash_command
|
|
40
|
+
validate_bash_command,
|
|
41
|
+
normalize_and_expand_flags,
|
|
42
|
+
|
|
45
43
|
)
|
|
46
44
|
|
|
47
45
|
from npcpy.npc_sysenv import (
|
|
@@ -49,6 +47,7 @@ from npcpy.npc_sysenv import (
|
|
|
49
47
|
render_markdown,
|
|
50
48
|
get_locally_available_models,
|
|
51
49
|
get_model_and_provider,
|
|
50
|
+
lookup_provider
|
|
52
51
|
)
|
|
53
52
|
from npcsh.routes import router
|
|
54
53
|
from npcpy.data.image import capture_screenshot
|
|
@@ -215,7 +214,7 @@ def get_slash_commands(state: ShellState) -> List[str]:
|
|
|
215
214
|
completion_logger.debug(f"NPC commands: {npc_cmds}")
|
|
216
215
|
|
|
217
216
|
# Mode switching commands
|
|
218
|
-
mode_cmds = ['/cmd', '/agent', '/chat'
|
|
217
|
+
mode_cmds = ['/cmd', '/agent', '/chat']
|
|
219
218
|
commands.extend(mode_cmds)
|
|
220
219
|
completion_logger.debug(f"Mode commands: {mode_cmds}")
|
|
221
220
|
|
|
@@ -571,13 +570,107 @@ def handle_bash_command(
|
|
|
571
570
|
except PermissionError:
|
|
572
571
|
return False, f"Permission denied: {cmd_str}"
|
|
573
572
|
|
|
573
|
+
def _try_convert_type(value: str) -> Union[str, int, float, bool]:
|
|
574
|
+
"""Helper to convert string values to appropriate types."""
|
|
575
|
+
if value.lower() in ['true', 'yes']:
|
|
576
|
+
return True
|
|
577
|
+
if value.lower() in ['false', 'no']:
|
|
578
|
+
return False
|
|
579
|
+
try:
|
|
580
|
+
return int(value)
|
|
581
|
+
except (ValueError, TypeError):
|
|
582
|
+
pass
|
|
583
|
+
try:
|
|
584
|
+
return float(value)
|
|
585
|
+
except (ValueError, TypeError):
|
|
586
|
+
pass
|
|
587
|
+
return value
|
|
588
|
+
|
|
589
|
+
def parse_generic_command_flags(parts: List[str]) -> Tuple[Dict[str, Any], List[str]]:
|
|
590
|
+
"""
|
|
591
|
+
Parses a list of command parts into a dictionary of keyword arguments and a list of positional arguments.
|
|
592
|
+
Handles: -f val, --flag val, --flag=val, flag=val, --boolean-flag
|
|
593
|
+
"""
|
|
594
|
+
parsed_kwargs = {}
|
|
595
|
+
positional_args = []
|
|
596
|
+
i = 0
|
|
597
|
+
while i < len(parts):
|
|
598
|
+
part = parts[i]
|
|
599
|
+
|
|
600
|
+
if part.startswith('--'):
|
|
601
|
+
key_part = part[2:]
|
|
602
|
+
if '=' in key_part:
|
|
603
|
+
key, value = key_part.split('=', 1)
|
|
604
|
+
parsed_kwargs[key] = _try_convert_type(value)
|
|
605
|
+
else:
|
|
606
|
+
# Look ahead for a value
|
|
607
|
+
if i + 1 < len(parts) and not parts[i + 1].startswith('-'):
|
|
608
|
+
parsed_kwargs[key_part] = _try_convert_type(parts[i + 1])
|
|
609
|
+
i += 1 # Consume the value
|
|
610
|
+
else:
|
|
611
|
+
parsed_kwargs[key_part] = True # Boolean flag
|
|
612
|
+
|
|
613
|
+
elif part.startswith('-'):
|
|
614
|
+
key = part[1:]
|
|
615
|
+
# Look ahead for a value
|
|
616
|
+
if i + 1 < len(parts) and not parts[i + 1].startswith('-'):
|
|
617
|
+
parsed_kwargs[key] = _try_convert_type(parts[i + 1])
|
|
618
|
+
i += 1 # Consume the value
|
|
619
|
+
else:
|
|
620
|
+
parsed_kwargs[key] = True # Boolean flag
|
|
621
|
+
|
|
622
|
+
elif '=' in part and not part.startswith('-'):
|
|
623
|
+
key, value = part.split('=', 1)
|
|
624
|
+
parsed_kwargs[key] = _try_convert_type(value)
|
|
625
|
+
|
|
626
|
+
else:
|
|
627
|
+
positional_args.append(part)
|
|
628
|
+
|
|
629
|
+
i += 1
|
|
630
|
+
|
|
631
|
+
return parsed_kwargs, positional_args
|
|
632
|
+
|
|
633
|
+
|
|
634
|
+
def should_skip_kg_processing(user_input: str, assistant_output: str) -> bool:
|
|
635
|
+
"""Determine if this interaction is too trivial for KG processing"""
|
|
636
|
+
|
|
637
|
+
# Skip if user input is too short or trivial
|
|
638
|
+
trivial_inputs = {
|
|
639
|
+
'/sq', '/exit', '/quit', 'exit', 'quit', 'hey', 'hi', 'hello',
|
|
640
|
+
'fwah!', 'test', 'ping', 'ok', 'thanks', 'ty'
|
|
641
|
+
}
|
|
642
|
+
|
|
643
|
+
if user_input.lower().strip() in trivial_inputs:
|
|
644
|
+
return True
|
|
645
|
+
|
|
646
|
+
# Skip if user input is very short (less than 10 chars)
|
|
647
|
+
if len(user_input.strip()) < 10:
|
|
648
|
+
return True
|
|
649
|
+
|
|
650
|
+
# Skip simple bash commands
|
|
651
|
+
simple_bash = {'ls', 'pwd', 'cd', 'mkdir', 'touch', 'rm', 'mv', 'cp'}
|
|
652
|
+
first_word = user_input.strip().split()[0] if user_input.strip() else ""
|
|
653
|
+
if first_word in simple_bash:
|
|
654
|
+
return True
|
|
655
|
+
|
|
656
|
+
# Skip if assistant output is very short (less than 20 chars)
|
|
657
|
+
if len(assistant_output.strip()) < 20:
|
|
658
|
+
return True
|
|
659
|
+
|
|
660
|
+
# Skip if it's just a mode exit message
|
|
661
|
+
if "exiting" in assistant_output.lower() or "exited" in assistant_output.lower():
|
|
662
|
+
return True
|
|
663
|
+
|
|
664
|
+
return False
|
|
665
|
+
|
|
666
|
+
|
|
667
|
+
|
|
574
668
|
def execute_slash_command(command: str, stdin_input: Optional[str], state: ShellState, stream: bool) -> Tuple[ShellState, Any]:
|
|
575
669
|
"""Executes slash commands using the router or checking NPC/Team jinxs."""
|
|
576
|
-
|
|
577
|
-
command_name =
|
|
578
|
-
|
|
670
|
+
all_command_parts = shlex.split(command)
|
|
671
|
+
command_name = all_command_parts[0].lstrip('/')
|
|
579
672
|
if command_name in ['n', 'npc']:
|
|
580
|
-
npc_to_switch_to =
|
|
673
|
+
npc_to_switch_to = all_command_parts[1] if len(all_command_parts) > 1 else None
|
|
581
674
|
if npc_to_switch_to and state.team and npc_to_switch_to in state.team.npcs:
|
|
582
675
|
state.npc = state.team.npcs[npc_to_switch_to]
|
|
583
676
|
return state, f"Switched to NPC: {npc_to_switch_to}"
|
|
@@ -585,38 +678,75 @@ def execute_slash_command(command: str, stdin_input: Optional[str], state: Shell
|
|
|
585
678
|
available_npcs = list(state.team.npcs.keys()) if state.team else []
|
|
586
679
|
return state, colored(f"NPC '{npc_to_switch_to}' not found. Available NPCs: {', '.join(available_npcs)}", "red")
|
|
587
680
|
handler = router.get_route(command_name)
|
|
588
|
-
#print(handler)
|
|
589
681
|
if handler:
|
|
590
|
-
|
|
682
|
+
parsed_flags, positional_args = parse_generic_command_flags(all_command_parts[1:])
|
|
683
|
+
|
|
684
|
+
normalized_flags = normalize_and_expand_flags(parsed_flags)
|
|
685
|
+
|
|
591
686
|
handler_kwargs = {
|
|
592
687
|
'stream': stream,
|
|
593
|
-
'npc': state.npc,
|
|
594
688
|
'team': state.team,
|
|
595
689
|
'messages': state.messages,
|
|
596
|
-
'model': state.chat_model,
|
|
597
|
-
'provider': state.chat_provider,
|
|
598
690
|
'api_url': state.api_url,
|
|
599
691
|
'api_key': state.api_key,
|
|
692
|
+
'stdin_input': stdin_input,
|
|
693
|
+
'positional_args': positional_args,
|
|
694
|
+
'plonk_context': state.team.shared_context.get('PLONK_CONTEXT') if state.team and hasattr(state.team, 'shared_context') else None,
|
|
695
|
+
|
|
696
|
+
# Default chat model/provider
|
|
697
|
+
'model': state.npc.model if isinstance(state.npc, NPC) and state.npc.model else state.chat_model,
|
|
698
|
+
'provider': state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else state.chat_provider,
|
|
699
|
+
'npc': state.npc,
|
|
700
|
+
|
|
701
|
+
# All other specific defaults
|
|
702
|
+
'sprovider': state.search_provider,
|
|
703
|
+
'emodel': state.embedding_model,
|
|
704
|
+
'eprovider': state.embedding_provider,
|
|
705
|
+
'igmodel': state.image_gen_model,
|
|
706
|
+
'igprovider': state.image_gen_provider,
|
|
707
|
+
'vgmodel': state.video_gen_model,
|
|
708
|
+
'vgprovider':state.video_gen_provider,
|
|
709
|
+
'vmodel': state.vision_model,
|
|
710
|
+
'vprovider': state.vision_provider,
|
|
711
|
+
'rmodel': state.reasoning_model,
|
|
712
|
+
'rprovider': state.reasoning_provider,
|
|
600
713
|
}
|
|
601
|
-
|
|
602
|
-
if
|
|
603
|
-
|
|
604
|
-
|
|
714
|
+
|
|
715
|
+
if len(normalized_flags)>0:
|
|
716
|
+
kwarg_part = 'with kwargs: \n -' + '\n -'.join(f'{key}={item}' for key, item in normalized_flags.items())
|
|
717
|
+
else:
|
|
718
|
+
kwarg_part = ''
|
|
719
|
+
|
|
720
|
+
# 4. Merge the clean, normalized flags. This will correctly overwrite defaults.
|
|
721
|
+
render_markdown(f'- Calling {command_name} handler {kwarg_part} ')
|
|
722
|
+
if 'model' in normalized_flags and 'provider' not in normalized_flags:
|
|
723
|
+
# Call your existing, centralized lookup_provider function
|
|
724
|
+
inferred_provider = lookup_provider(normalized_flags['model'])
|
|
725
|
+
if inferred_provider:
|
|
726
|
+
# Update the provider that will be used for this command.
|
|
727
|
+
handler_kwargs['provider'] = inferred_provider
|
|
728
|
+
print(colored(f"Info: Inferred provider '{inferred_provider}' for model '{normalized_flags['model']}'.", "cyan"))
|
|
729
|
+
if 'provider' in normalized_flags and 'model' not in normalized_flags:
|
|
730
|
+
# loop up mhandler_kwargs model's provider
|
|
731
|
+
current_provider = lookup_provider(handler_kwargs['model'])
|
|
732
|
+
if current_provider != normalized_flags['provider']:
|
|
733
|
+
print(f'Please specify a model for the provider: {normalized_flags['provider']}')
|
|
734
|
+
handler_kwargs.update(normalized_flags)
|
|
735
|
+
|
|
736
|
+
|
|
605
737
|
try:
|
|
606
|
-
result_dict = handler(command, **handler_kwargs)
|
|
607
|
-
|
|
738
|
+
result_dict = handler(command=command, **handler_kwargs)
|
|
739
|
+
# add the output model and provider for the print_and_process_stream downstream processing
|
|
608
740
|
if isinstance(result_dict, dict):
|
|
609
741
|
state.messages = result_dict.get("messages", state.messages)
|
|
610
742
|
return state, result_dict
|
|
611
743
|
else:
|
|
612
744
|
return state, result_dict
|
|
613
|
-
|
|
614
745
|
except Exception as e:
|
|
615
746
|
import traceback
|
|
616
747
|
print(f"Error executing slash command '{command_name}':", file=sys.stderr)
|
|
617
748
|
traceback.print_exc()
|
|
618
749
|
return state, colored(f"Error executing slash command '{command_name}': {e}", "red")
|
|
619
|
-
|
|
620
750
|
active_npc = state.npc if isinstance(state.npc, NPC) else None
|
|
621
751
|
jinx_to_execute = None
|
|
622
752
|
executor = None
|
|
@@ -650,6 +780,7 @@ def execute_slash_command(command: str, stdin_input: Optional[str], state: Shell
|
|
|
650
780
|
|
|
651
781
|
return state, colored(f"Unknown slash command or jinx: {command_name}", "red")
|
|
652
782
|
|
|
783
|
+
|
|
653
784
|
def process_pipeline_command(
|
|
654
785
|
cmd_segment: str,
|
|
655
786
|
stdin_input: Optional[str],
|
|
@@ -662,6 +793,7 @@ def process_pipeline_command(
|
|
|
662
793
|
|
|
663
794
|
available_models_all = get_locally_available_models(state.current_path)
|
|
664
795
|
available_models_all_list = [item for key, item in available_models_all.items()]
|
|
796
|
+
|
|
665
797
|
model_override, provider_override, cmd_cleaned = get_model_and_provider(
|
|
666
798
|
cmd_segment, available_models_all_list
|
|
667
799
|
)
|
|
@@ -669,14 +801,11 @@ def process_pipeline_command(
|
|
|
669
801
|
if not cmd_to_process:
|
|
670
802
|
return state, stdin_input
|
|
671
803
|
|
|
672
|
-
# --- Corrected Model Resolution ---
|
|
673
|
-
# Priority: 1. Inline Override, 2. NPC Model, 3. Global Model
|
|
674
804
|
npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
|
|
675
805
|
npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
|
|
676
806
|
|
|
677
807
|
exec_model = model_override or npc_model or state.chat_model
|
|
678
808
|
exec_provider = provider_override or npc_provider or state.chat_provider
|
|
679
|
-
# --- End of Correction ---
|
|
680
809
|
|
|
681
810
|
if cmd_to_process.startswith("/"):
|
|
682
811
|
return execute_slash_command(cmd_to_process, stdin_input, state, stream_final)
|
|
@@ -685,23 +814,25 @@ def process_pipeline_command(
|
|
|
685
814
|
if not cmd_parts:
|
|
686
815
|
return state, stdin_input
|
|
687
816
|
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
817
|
+
command_name = cmd_parts[0]
|
|
818
|
+
|
|
819
|
+
if command_name == "cd":
|
|
820
|
+
return handle_cd_command(cmd_parts, state)
|
|
821
|
+
|
|
822
|
+
if command_name in interactive_commands:
|
|
823
|
+
return handle_interactive_command(cmd_parts, state)
|
|
694
824
|
|
|
825
|
+
if validate_bash_command(cmd_parts):
|
|
695
826
|
success, result = handle_bash_command(cmd_parts, cmd_to_process, stdin_input, state)
|
|
696
827
|
if success:
|
|
697
828
|
return state, result
|
|
698
829
|
else:
|
|
699
|
-
print(colored(f"Bash command failed. Asking LLM for a fix
|
|
830
|
+
print(colored(f"Bash command failed: {result}. Asking LLM for a fix...", "yellow"), file=sys.stderr)
|
|
700
831
|
fixer_prompt = f"The command '{cmd_to_process}' failed with the error: '{result}'. Provide the correct command."
|
|
701
832
|
response = execute_llm_command(
|
|
702
833
|
fixer_prompt,
|
|
703
|
-
model=exec_model,
|
|
704
|
-
provider=exec_provider,
|
|
834
|
+
model=exec_model,
|
|
835
|
+
provider=exec_provider,
|
|
705
836
|
npc=state.npc,
|
|
706
837
|
stream=stream_final,
|
|
707
838
|
messages=state.messages
|
|
@@ -717,8 +848,8 @@ def process_pipeline_command(
|
|
|
717
848
|
|
|
718
849
|
llm_result = check_llm_command(
|
|
719
850
|
full_llm_cmd,
|
|
720
|
-
model=exec_model,
|
|
721
|
-
provider=exec_provider,
|
|
851
|
+
model=exec_model,
|
|
852
|
+
provider=exec_provider,
|
|
722
853
|
api_url=state.api_url,
|
|
723
854
|
api_key=state.api_key,
|
|
724
855
|
npc=state.npc,
|
|
@@ -727,17 +858,15 @@ def process_pipeline_command(
|
|
|
727
858
|
images=state.attachments,
|
|
728
859
|
stream=stream_final,
|
|
729
860
|
context=info,
|
|
730
|
-
|
|
731
861
|
)
|
|
732
862
|
if isinstance(llm_result, dict):
|
|
733
863
|
state.messages = llm_result.get("messages", state.messages)
|
|
734
864
|
output = llm_result.get("output")
|
|
735
865
|
return state, output
|
|
736
866
|
else:
|
|
737
|
-
return state, llm_result
|
|
738
|
-
|
|
867
|
+
return state, llm_result
|
|
739
868
|
def check_mode_switch(command:str , state: ShellState):
|
|
740
|
-
if command in ['/cmd', '/agent', '/chat',
|
|
869
|
+
if command in ['/cmd', '/agent', '/chat',]:
|
|
741
870
|
state.current_mode = command[1:]
|
|
742
871
|
return True, state
|
|
743
872
|
|
|
@@ -764,9 +893,13 @@ def execute_command(
|
|
|
764
893
|
active_provider = npc_provider or state.chat_provider
|
|
765
894
|
|
|
766
895
|
if state.current_mode == 'agent':
|
|
896
|
+
print(len(commands), commands)
|
|
767
897
|
for i, cmd_segment in enumerate(commands):
|
|
898
|
+
|
|
899
|
+
render_markdown(f'- executing command {i+1}/{len(commands)}')
|
|
768
900
|
is_last_command = (i == len(commands) - 1)
|
|
769
|
-
|
|
901
|
+
|
|
902
|
+
stream_this_segment = state.stream_output and not is_last_command
|
|
770
903
|
|
|
771
904
|
try:
|
|
772
905
|
current_state, output = process_pipeline_command(
|
|
@@ -777,27 +910,27 @@ def execute_command(
|
|
|
777
910
|
)
|
|
778
911
|
|
|
779
912
|
if is_last_command:
|
|
780
|
-
|
|
781
|
-
|
|
913
|
+
return current_state, output
|
|
782
914
|
if isinstance(output, str):
|
|
783
915
|
stdin_for_next = output
|
|
784
|
-
elif
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
916
|
+
elif not isinstance(output, str):
|
|
917
|
+
try:
|
|
918
|
+
if stream_this_segment:
|
|
919
|
+
full_stream_output = print_and_process_stream_with_markdown(output,
|
|
920
|
+
state.npc.model,
|
|
921
|
+
state.npc.provider)
|
|
922
|
+
stdin_for_next = full_stream_output
|
|
923
|
+
if is_last_command:
|
|
924
|
+
final_output = full_stream_output
|
|
925
|
+
except:
|
|
926
|
+
if output is not None: # Try converting other types to string
|
|
927
|
+
try:
|
|
928
|
+
stdin_for_next = str(output)
|
|
929
|
+
except Exception:
|
|
930
|
+
print(f"Warning: Cannot convert output to string for piping: {type(output)}", file=sys.stderr)
|
|
931
|
+
stdin_for_next = None
|
|
932
|
+
else: # Output was None
|
|
933
|
+
stdin_for_next = None
|
|
801
934
|
|
|
802
935
|
|
|
803
936
|
except Exception as pipeline_error:
|
|
@@ -808,7 +941,7 @@ def execute_command(
|
|
|
808
941
|
return current_state, error_msg
|
|
809
942
|
|
|
810
943
|
# Store embeddings using the final state
|
|
811
|
-
if final_output is not None and
|
|
944
|
+
if final_output is not None and isinstance(final_output,str):
|
|
812
945
|
store_command_embeddings(original_command_for_embedding, final_output, current_state)
|
|
813
946
|
|
|
814
947
|
# Return the final state and the final output
|
|
@@ -866,6 +999,9 @@ def execute_command(
|
|
|
866
999
|
state.messages = response['messages']
|
|
867
1000
|
return state, response['response']
|
|
868
1001
|
|
|
1002
|
+
"""
|
|
1003
|
+
# to be replaced with a standalone corca mode
|
|
1004
|
+
|
|
869
1005
|
elif state.current_mode == 'ride':
|
|
870
1006
|
# Allow bash commands in /ride mode
|
|
871
1007
|
cmd_parts = parse_command_safely(command)
|
|
@@ -896,379 +1032,8 @@ def execute_command(
|
|
|
896
1032
|
|
|
897
1033
|
# Otherwise, run the agentic ride loop
|
|
898
1034
|
return agentic_ride_loop(command, state)
|
|
899
|
-
@dataclass
|
|
900
|
-
class RideState:
|
|
901
|
-
"""Lightweight state tracking for /ride mode"""
|
|
902
|
-
todos: List[Dict[str, Any]] = field(default_factory=list)
|
|
903
|
-
constraints: List[str] = field(default_factory=list)
|
|
904
|
-
facts: List[str] = field(default_factory=list)
|
|
905
|
-
mistakes: List[str] = field(default_factory=list)
|
|
906
|
-
successes: List[str] = field(default_factory=list)
|
|
907
|
-
current_todo_index: int = 0
|
|
908
|
-
current_subtodo_index: int = 0
|
|
909
|
-
|
|
910
|
-
def get_context_summary(self) -> str:
|
|
911
|
-
"""Generate lightweight context for LLM prompts"""
|
|
912
|
-
context = []
|
|
913
|
-
if self.facts:
|
|
914
|
-
context.append(f"Facts: {'; '.join(self.facts[:5])}") # Limit to 5 most recent
|
|
915
|
-
if self.mistakes:
|
|
916
|
-
context.append(f"Recent mistakes: {'; '.join(self.mistakes[-3:])}")
|
|
917
|
-
if self.successes:
|
|
918
|
-
context.append(f"Recent successes: {'; '.join(self.successes[-3:])}")
|
|
919
|
-
return "\n".join(context)
|
|
920
|
-
|
|
921
|
-
def interactive_edit_list(items: List[str], item_type: str) -> List[str]:
|
|
922
|
-
"""Interactive REPL for editing lists of items with regeneration options"""
|
|
923
|
-
while True:
|
|
924
|
-
print(f"\nCurrent {item_type}:")
|
|
925
|
-
for i, item in enumerate(items, 1):
|
|
926
|
-
print(f"{i}. {item}")
|
|
927
|
-
|
|
928
|
-
choice = input(f"\nEdit {item_type} (e<num> to edit, d<num> to delete, a to add, r to regenerate, c to add context, ok to continue): ").strip()
|
|
929
|
-
|
|
930
|
-
if choice.lower() == 'ok':
|
|
931
|
-
break
|
|
932
|
-
elif choice.lower() == 'r':
|
|
933
|
-
print("Regenerating list...")
|
|
934
|
-
return "REGENERATE" # Special signal to regenerate
|
|
935
|
-
elif choice.lower() == 'c':
|
|
936
|
-
additional_context = input("Add more context: ").strip()
|
|
937
|
-
if additional_context:
|
|
938
|
-
return {"ADD_CONTEXT": additional_context, "items": items}
|
|
939
|
-
elif choice.lower() == 'a':
|
|
940
|
-
new_item = input(f"Enter new {item_type[:-1]}: ").strip()
|
|
941
|
-
if new_item:
|
|
942
|
-
items.append(new_item)
|
|
943
|
-
elif choice.lower().startswith('e'):
|
|
944
|
-
try:
|
|
945
|
-
idx = int(choice[1:]) - 1
|
|
946
|
-
if 0 <= idx < len(items):
|
|
947
|
-
print(f"Current: {items[idx]}")
|
|
948
|
-
new_item = input("New version: ").strip()
|
|
949
|
-
if new_item:
|
|
950
|
-
items[idx] = new_item
|
|
951
|
-
except ValueError:
|
|
952
|
-
print("Invalid format. Use e<number>")
|
|
953
|
-
elif choice.lower().startswith('d'):
|
|
954
|
-
try:
|
|
955
|
-
idx = int(choice[1:]) - 1
|
|
956
|
-
if 0 <= idx < len(items):
|
|
957
|
-
items.pop(idx)
|
|
958
|
-
except ValueError:
|
|
959
|
-
print("Invalid format. Use d<number>")
|
|
960
|
-
else:
|
|
961
|
-
print("Invalid choice. Use: e<num>, d<num>, a, r (regenerate), c (add context), or ok")
|
|
962
|
-
|
|
963
|
-
return items
|
|
964
|
-
def generate_todos(user_goal: str, state: ShellState, additional_context: str = "") -> List[Dict[str, Any]]:
|
|
965
|
-
"""Generate high-level todos for the user's goal"""
|
|
966
|
-
path_cmd = 'The current working directory is: ' + state.current_path
|
|
967
|
-
ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(state.current_path, f) for f in os.listdir(state.current_path)]) if os.path.exists(state.current_path) else 'No files found in the current directory.'
|
|
968
|
-
platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
|
|
969
|
-
info = path_cmd + '\n' + ls_files + '\n' + platform_info
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
high_level_planning_instruction = """
|
|
974
|
-
You are a high-level project planner. When a user asks to work on a file or code,
|
|
975
|
-
structure your plan using a simple, high-level software development lifecycle:
|
|
976
|
-
1. First, understand the current state (e.g., read the relevant file).
|
|
977
|
-
2. Second, make the required changes based on the user's goal.
|
|
978
|
-
3. Third, verify the changes work as intended (e.g., test the code).
|
|
979
|
-
Your generated todos should reflect this high-level thinking.
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
"""
|
|
984
|
-
|
|
985
|
-
prompt = f"""
|
|
986
|
-
{high_level_planning_instruction}
|
|
987
|
-
|
|
988
|
-
User goal: {user_goal}
|
|
989
|
-
|
|
990
|
-
{additional_context}
|
|
991
|
-
|
|
992
|
-
Generate a list of 3 todos to accomplish this goal. Use specific actionable language based on the user request.
|
|
993
|
-
Do not make assumptions about user needs.
|
|
994
|
-
Every todo must be directly sourced from the user's request.
|
|
995
|
-
If users request specific files to be incorporated, you MUST include the full path to the file in the todo.
|
|
996
|
-
Here is some relevant information for the current folder and working directory that may be relevant:
|
|
997
|
-
{info}
|
|
998
|
-
|
|
999
|
-
For example, if the user says "I need to add a new function to calculate the average of a list of numbers my research.py script" and the current working directory is /home/user/projects and one
|
|
1000
|
-
of the available files in the current directory is /home/user/projects/research.py then one of the todos should be:
|
|
1001
|
-
- "Add a new function to /home/user/projects/research.py to calculate the average of a list of numbers"
|
|
1002
|
-
Do not truncate paths. Do not additional paths. Use them exactly as they are provided here.
|
|
1003
|
-
|
|
1004
|
-
Each todo should be:
|
|
1005
|
-
- Specific and actionable
|
|
1006
|
-
- Independent where possible
|
|
1007
|
-
- Focused on a single major component
|
|
1008
|
-
|
|
1009
|
-
Remember, it is critical to provide as much relevant information as possible. Even if the user only refers to a file or something by a relative path, it is
|
|
1010
|
-
critical for operation that you provide the full path to the file in the todo item.
|
|
1011
|
-
|
|
1012
|
-
Return JSON with format:
|
|
1013
|
-
{{
|
|
1014
|
-
"todos": [
|
|
1015
|
-
todo1, todo2, todo3,
|
|
1016
|
-
]
|
|
1017
|
-
}}
|
|
1018
|
-
"""
|
|
1019
|
-
|
|
1020
|
-
response = get_llm_response(
|
|
1021
|
-
prompt,
|
|
1022
|
-
model=state.chat_model,
|
|
1023
|
-
provider=state.chat_provider,
|
|
1024
|
-
npc=state.npc,
|
|
1025
|
-
format="json"
|
|
1026
|
-
)
|
|
1027
|
-
|
|
1028
|
-
todos_data = response.get("response", {}).get("todos", [])
|
|
1029
|
-
return todos_data
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
def generate_constraints(todos: List[Dict[str, Any]], user_goal: str, state: ShellState) -> List[str]:
|
|
1033
|
-
"""Generate constraints and requirements that define relationships between todos"""
|
|
1034
|
-
prompt = f"""
|
|
1035
|
-
User goal: {user_goal}
|
|
1036
|
-
|
|
1037
|
-
Todos to accomplish:
|
|
1038
|
-
{chr(10).join([f"- {todo}" for todo in todos])}
|
|
1039
|
-
|
|
1040
|
-
Based ONLY on what the user explicitly stated in their goal, identify any constraints or requirements they mentioned.
|
|
1041
|
-
Do NOT invent new constraints. Only extract constraints that are directly stated or clearly implied by the user's request.
|
|
1042
|
-
|
|
1043
|
-
Examples of valid constraints:
|
|
1044
|
-
- If user says "without breaking existing functionality" -> "Maintain existing functionality"
|
|
1045
|
-
- If user says "must be fast" -> "Performance must be optimized"
|
|
1046
|
-
- If user says "should integrate with X" -> "Must integrate with X"
|
|
1047
|
-
|
|
1048
|
-
If the user didn't specify any constraints, return an empty list.
|
|
1049
|
-
|
|
1050
|
-
Return JSON with format:
|
|
1051
|
-
{{
|
|
1052
|
-
"constraints": ["constraint 1", "constraint 2", ...]
|
|
1053
|
-
}}
|
|
1054
|
-
"""
|
|
1055
|
-
|
|
1056
|
-
response = get_llm_response(
|
|
1057
|
-
prompt,
|
|
1058
|
-
model=state.chat_model,
|
|
1059
|
-
provider=state.chat_provider,
|
|
1060
|
-
npc=state.npc,
|
|
1061
|
-
format="json"
|
|
1062
|
-
)
|
|
1063
|
-
|
|
1064
|
-
constraints_data = response.get("response", {})
|
|
1065
|
-
|
|
1066
|
-
if isinstance(constraints_data, dict):
|
|
1067
|
-
constraints = constraints_data.get("constraints", [])
|
|
1068
|
-
# Make sure we're getting strings, not dicts
|
|
1069
|
-
cleaned_constraints = []
|
|
1070
|
-
for c in constraints:
|
|
1071
|
-
if isinstance(c, str):
|
|
1072
|
-
cleaned_constraints.append(c)
|
|
1073
|
-
return cleaned_constraints
|
|
1074
|
-
else:
|
|
1075
|
-
return []
|
|
1076
|
-
def should_break_down_todo(todo, state: ShellState) -> bool:
|
|
1077
|
-
"""Ask LLM if a todo needs breakdown, then ask user for confirmation"""
|
|
1078
|
-
prompt = f"""
|
|
1079
|
-
Todo: {todo}
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
Does this todo need to be broken down into smaller, more atomic components?
|
|
1083
|
-
Consider:
|
|
1084
|
-
- Is it complex enough to warrant breakdown?
|
|
1085
|
-
- Would breaking it down make execution clearer?
|
|
1086
|
-
- Are there multiple distinct steps involved?
|
|
1087
|
-
|
|
1088
|
-
Return JSON: {{"should_break_down": true/false, "reason": "explanation"}}
|
|
1089
|
-
"""
|
|
1090
|
-
|
|
1091
|
-
response = get_llm_response(
|
|
1092
|
-
prompt,
|
|
1093
|
-
model=state.chat_model,
|
|
1094
|
-
provider=state.chat_provider,
|
|
1095
|
-
npc=state.npc,
|
|
1096
|
-
format="json"
|
|
1097
|
-
)
|
|
1098
|
-
|
|
1099
|
-
result = response.get("response", {})
|
|
1100
|
-
llm_suggests = result.get("should_break_down", False)
|
|
1101
|
-
reason = result.get("reason", "No reason provided")
|
|
1102
|
-
|
|
1103
|
-
if llm_suggests:
|
|
1104
|
-
print(f"\nLLM suggests breaking down: '{todo}'")
|
|
1105
|
-
print(f"Reason: {reason}")
|
|
1106
|
-
user_choice = input("Break it down? [y/N]: ").strip().lower()
|
|
1107
|
-
return user_choice in ['y', 'yes']
|
|
1108
|
-
|
|
1109
|
-
return False
|
|
1110
|
-
|
|
1111
|
-
def generate_subtodos(todo, state: ShellState) -> List[Dict[str, Any]]:
|
|
1112
|
-
"""Generate atomic subtodos for a complex todo"""
|
|
1113
|
-
prompt = f"""
|
|
1114
|
-
Parent todo: {todo}
|
|
1115
|
-
|
|
1116
|
-
Break this down into atomic, executable subtodos. Each subtodo should be:
|
|
1117
|
-
- A single, concrete action
|
|
1118
|
-
- Executable in one step
|
|
1119
|
-
- Clear and unambiguous
|
|
1120
|
-
|
|
1121
|
-
Return JSON with format:
|
|
1122
|
-
{{
|
|
1123
|
-
"subtodos": [
|
|
1124
|
-
"subtodo description",
|
|
1125
|
-
...
|
|
1126
|
-
]
|
|
1127
|
-
}}
|
|
1128
|
-
"""
|
|
1129
|
-
|
|
1130
|
-
response = get_llm_response(
|
|
1131
|
-
prompt,
|
|
1132
|
-
model=state.chat_model,
|
|
1133
|
-
provider=state.chat_provider,
|
|
1134
|
-
npc=state.npc,
|
|
1135
|
-
format="json"
|
|
1136
|
-
)
|
|
1137
|
-
|
|
1138
|
-
return response.get("response", {}).get("subtodos", [])
|
|
1139
|
-
def execute_todo_item(todo: Dict[str, Any], ride_state: RideState, shell_state: ShellState) -> bool:
|
|
1140
|
-
"""Execute a single todo item using the existing jinx system"""
|
|
1141
|
-
path_cmd = 'The current working directory is: ' + shell_state.current_path
|
|
1142
|
-
ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(shell_state.current_path, f) for f in os.listdir(shell_state.current_path)]) if os.path.exists(shell_state.current_path) else 'No files found in the current directory.'
|
|
1143
|
-
platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
|
|
1144
|
-
info = path_cmd + '\n' + ls_files + '\n' + platform_info
|
|
1145
|
-
|
|
1146
|
-
command = f"""
|
|
1147
|
-
|
|
1148
|
-
General information:
|
|
1149
|
-
{info}
|
|
1150
|
-
|
|
1151
|
-
Execute this todo: {todo}
|
|
1152
|
-
|
|
1153
|
-
Constraints to follow:
|
|
1154
|
-
{chr(10).join([f"- {c}" for c in ride_state.constraints])}
|
|
1155
1035
|
"""
|
|
1156
|
-
|
|
1157
|
-
print(f"\nExecuting: {todo}")
|
|
1158
|
-
|
|
1159
1036
|
|
|
1160
|
-
result = check_llm_command(
|
|
1161
|
-
command,
|
|
1162
|
-
model=shell_state.chat_model,
|
|
1163
|
-
provider=shell_state.chat_provider,
|
|
1164
|
-
npc=shell_state.npc,
|
|
1165
|
-
team=shell_state.team,
|
|
1166
|
-
messages=[],
|
|
1167
|
-
stream=shell_state.stream_output,
|
|
1168
|
-
|
|
1169
|
-
)
|
|
1170
|
-
|
|
1171
|
-
output_payload = result.get("output", "")
|
|
1172
|
-
output_str = ""
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
if isgenerator(output_payload):
|
|
1176
|
-
output_str = print_and_process_stream_with_markdown(output_payload, shell_state.chat_model, shell_state.chat_provider)
|
|
1177
|
-
elif isinstance(output_payload, dict):
|
|
1178
|
-
output_str = output_payload.get('output', str(output_payload))
|
|
1179
|
-
if 'output' in output_str:
|
|
1180
|
-
output_str = output_payload['output']
|
|
1181
|
-
elif 'response' in output_str:
|
|
1182
|
-
output_str = output_payload['response']
|
|
1183
|
-
render_markdown(output_str)
|
|
1184
|
-
elif output_payload:
|
|
1185
|
-
output_str = str(output_payload)
|
|
1186
|
-
render_markdown(output_str)
|
|
1187
|
-
|
|
1188
|
-
user_feedback = input(f"\nTodo completed successfully? [y/N/notes]: ").strip()
|
|
1189
|
-
|
|
1190
|
-
if user_feedback.lower() in ['y', 'yes']:
|
|
1191
|
-
return True, output_str
|
|
1192
|
-
elif user_feedback.lower() in ['n', 'no']:
|
|
1193
|
-
mistake = input("What went wrong? ").strip()
|
|
1194
|
-
ride_state.mistakes.append(f"Failed {todo}: {mistake}")
|
|
1195
|
-
return False, output_str
|
|
1196
|
-
else:
|
|
1197
|
-
ride_state.facts.append(f"Re: {todo}: {user_feedback}")
|
|
1198
|
-
success = input("Mark as completed? [y/N]: ").strip().lower() in ['y', 'yes']
|
|
1199
|
-
return success, output_str
|
|
1200
|
-
|
|
1201
|
-
def agentic_ride_loop(user_goal: str, state: ShellState) -> tuple:
|
|
1202
|
-
"""
|
|
1203
|
-
New /ride mode: hierarchical planning with human-in-the-loop control
|
|
1204
|
-
"""
|
|
1205
|
-
ride_state = RideState()
|
|
1206
|
-
|
|
1207
|
-
# 1. Generate high-level todos
|
|
1208
|
-
print("🚀 Generating high-level todos...")
|
|
1209
|
-
todos = generate_todos(user_goal, state)
|
|
1210
|
-
|
|
1211
|
-
# 2. User reviews/edits todos
|
|
1212
|
-
print("\n📋 Review and edit todos:")
|
|
1213
|
-
todo_descriptions = [todo for todo in todos]
|
|
1214
|
-
edited_descriptions = interactive_edit_list(todo_descriptions, "todos")
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
ride_state.todos = edited_descriptions
|
|
1218
|
-
|
|
1219
|
-
# 3. Generate constraints
|
|
1220
|
-
print("\n🔒 Generating constraints...")
|
|
1221
|
-
constraints = generate_constraints(edited_descriptions, user_goal, state)
|
|
1222
|
-
|
|
1223
|
-
# 4. User reviews/edits constraints
|
|
1224
|
-
print("\n📐 Review and edit constraints:")
|
|
1225
|
-
edited_constraints = interactive_edit_list(constraints, "constraints")
|
|
1226
|
-
ride_state.constraints = edited_constraints
|
|
1227
|
-
|
|
1228
|
-
# 5. Execution loop
|
|
1229
|
-
print("\n⚡ Starting execution...")
|
|
1230
|
-
|
|
1231
|
-
for i, todo in enumerate(edited_descriptions):
|
|
1232
|
-
print(f"\n--- Todo {i+1}/{len(todos)}: {todo} ---")
|
|
1233
|
-
|
|
1234
|
-
def attempt_execution(current_todo):
|
|
1235
|
-
# This inner function handles the execution and retry logic
|
|
1236
|
-
success, output_str = execute_todo_item(current_todo, ride_state, state)
|
|
1237
|
-
if not success:
|
|
1238
|
-
retry = input("Retry this todo? [y/N]: ").strip().lower()
|
|
1239
|
-
if retry in ['y', 'yes']:
|
|
1240
|
-
success, output_str = execute_todo_item(current_todo, ride_state, state)
|
|
1241
|
-
return success, output_str
|
|
1242
|
-
|
|
1243
|
-
if should_break_down_todo(todo, state):
|
|
1244
|
-
print("Breaking down todo...")
|
|
1245
|
-
subtodos = generate_subtodos(todo, state)
|
|
1246
|
-
subtodo_descriptions = [st for st in subtodos]
|
|
1247
|
-
edited_subtodos = interactive_edit_list(subtodo_descriptions, "subtodos")
|
|
1248
|
-
|
|
1249
|
-
for j, subtodo_desc in enumerate(edited_subtodos):
|
|
1250
|
-
subtodo = {"description": subtodo_desc, "type": "atomic"}
|
|
1251
|
-
success, output = attempt_execution(subtodo)
|
|
1252
|
-
if success:
|
|
1253
|
-
ride_state.successes.append({"description": subtodo_desc, "output": output})
|
|
1254
|
-
else:
|
|
1255
|
-
print("Subtodo failed. Continuing to next...")
|
|
1256
|
-
else:
|
|
1257
|
-
success, output = attempt_execution(todo)
|
|
1258
|
-
if success:
|
|
1259
|
-
ride_state.successes.append({"description": todo, "output": output})
|
|
1260
|
-
# 6. Final summary
|
|
1261
|
-
print("\n🎯 Execution Summary:")
|
|
1262
|
-
print(f"Successes: {len(ride_state.successes)}")
|
|
1263
|
-
print(f"Mistakes: {len(ride_state.mistakes)}")
|
|
1264
|
-
print(f"Facts learned: {len(ride_state.facts)}")
|
|
1265
|
-
|
|
1266
|
-
return state, {
|
|
1267
|
-
"todos_completed": len(ride_state.successes),
|
|
1268
|
-
"ride_state": ride_state,
|
|
1269
|
-
"final_context": ride_state.get_context_summary()
|
|
1270
|
-
}
|
|
1271
|
-
# --- Main Application Logic ---
|
|
1272
1037
|
|
|
1273
1038
|
def check_deprecation_warnings():
|
|
1274
1039
|
if os.getenv("NPCSH_MODEL"):
|
|
@@ -1305,6 +1070,13 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
|
1305
1070
|
command_history = CommandHistory(db_path)
|
|
1306
1071
|
|
|
1307
1072
|
|
|
1073
|
+
if not is_npcsh_initialized():
|
|
1074
|
+
print("Initializing NPCSH...")
|
|
1075
|
+
initialize_base_npcs_if_needed(db_path)
|
|
1076
|
+
print("NPCSH initialization complete. Restart or source ~/.npcshrc.")
|
|
1077
|
+
|
|
1078
|
+
|
|
1079
|
+
|
|
1308
1080
|
try:
|
|
1309
1081
|
history_file = setup_readline()
|
|
1310
1082
|
atexit.register(save_readline_history)
|
|
@@ -1470,20 +1242,24 @@ def process_result(
|
|
|
1470
1242
|
|
|
1471
1243
|
final_output_str = None
|
|
1472
1244
|
output_content = output.get('output') if isinstance(output, dict) else output
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1245
|
+
model_for_stream = output.get('model', active_npc.model) if isinstance(output, dict) else active_npc.model
|
|
1246
|
+
provider_for_stream = output.get('provider', active_npc.provider) if isinstance(output, dict) else active_npc.provider
|
|
1247
|
+
|
|
1248
|
+
print('\n')
|
|
1249
|
+
if user_input =='/help':
|
|
1250
|
+
render_markdown(output.get('output'))
|
|
1251
|
+
elif result_state.stream_output:
|
|
1252
|
+
|
|
1253
|
+
|
|
1254
|
+
final_output_str = print_and_process_stream_with_markdown(output_content, model_for_stream, provider_for_stream)
|
|
1476
1255
|
elif output_content is not None:
|
|
1477
1256
|
final_output_str = str(output_content)
|
|
1478
1257
|
render_markdown(final_output_str)
|
|
1479
1258
|
|
|
1480
|
-
# --- Part 2: Process Output and Evolve Knowledge ---
|
|
1481
1259
|
if final_output_str:
|
|
1482
|
-
|
|
1260
|
+
|
|
1483
1261
|
if result_state.messages and (not result_state.messages or result_state.messages[-1].get("role") != "assistant"):
|
|
1484
1262
|
result_state.messages.append({"role": "assistant", "content": final_output_str})
|
|
1485
|
-
|
|
1486
|
-
# Save assistant message to the database
|
|
1487
1263
|
save_conversation_message(
|
|
1488
1264
|
command_history,
|
|
1489
1265
|
result_state.conversation_id,
|
|
@@ -1496,50 +1272,68 @@ def process_result(
|
|
|
1496
1272
|
team=team_name,
|
|
1497
1273
|
)
|
|
1498
1274
|
|
|
1499
|
-
# --- Hierarchical Knowledge Graph Evolution ---
|
|
1500
1275
|
conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
|
|
1501
|
-
|
|
1276
|
+
engine = command_history.engine
|
|
1502
1277
|
|
|
1503
|
-
try:
|
|
1504
1278
|
|
|
1505
|
-
|
|
1506
|
-
|
|
1507
|
-
|
|
1508
|
-
|
|
1509
|
-
|
|
1510
|
-
|
|
1511
|
-
|
|
1512
|
-
|
|
1279
|
+
if result_state.build_kg:
|
|
1280
|
+
try:
|
|
1281
|
+
if not should_skip_kg_processing(user_input, final_output_str):
|
|
1282
|
+
|
|
1283
|
+
npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
|
|
1284
|
+
evolved_npc_kg, _ = kg_evolve_incremental(
|
|
1285
|
+
existing_kg=npc_kg,
|
|
1286
|
+
new_content_text=conversation_turn_text,
|
|
1287
|
+
model=active_npc.model,
|
|
1288
|
+
provider=active_npc.provider,
|
|
1289
|
+
get_concepts=True,
|
|
1290
|
+
link_concepts_facts = False,
|
|
1291
|
+
link_concepts_concepts = False,
|
|
1292
|
+
link_facts_facts = False,
|
|
1293
|
+
|
|
1294
|
+
|
|
1295
|
+
)
|
|
1296
|
+
save_kg_to_db(engine,
|
|
1297
|
+
evolved_npc_kg,
|
|
1298
|
+
team_name,
|
|
1299
|
+
npc_name,
|
|
1300
|
+
result_state.current_path)
|
|
1301
|
+
except Exception as e:
|
|
1302
|
+
print(colored(f"Error during real-time KG evolution: {e}", "red"))
|
|
1513
1303
|
|
|
1514
1304
|
# --- Part 3: Periodic Team Context Suggestions ---
|
|
1515
1305
|
result_state.turn_count += 1
|
|
1306
|
+
|
|
1516
1307
|
if result_state.turn_count > 0 and result_state.turn_count % 10 == 0:
|
|
1517
1308
|
print(colored("\nChecking for potential team improvements...", "cyan"))
|
|
1518
1309
|
try:
|
|
1519
|
-
summary = breathe(messages=result_state.messages[-20:],
|
|
1520
|
-
|
|
1310
|
+
summary = breathe(messages=result_state.messages[-20:],
|
|
1311
|
+
npc=active_npc)
|
|
1312
|
+
characterization = summary.get('output')
|
|
1521
1313
|
|
|
1522
|
-
if
|
|
1314
|
+
if characterization and result_state.team:
|
|
1523
1315
|
team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
|
|
1524
1316
|
ctx_data = {}
|
|
1525
1317
|
if os.path.exists(team_ctx_path):
|
|
1526
1318
|
with open(team_ctx_path, 'r') as f:
|
|
1527
|
-
|
|
1319
|
+
ctx_data = yaml.safe_load(f) or {}
|
|
1528
1320
|
current_context = ctx_data.get('context', '')
|
|
1529
1321
|
|
|
1530
|
-
prompt = f"""Based on
|
|
1322
|
+
prompt = f"""Based on this characterization: {characterization},
|
|
1323
|
+
|
|
1531
1324
|
suggest changes (additions, deletions, edits) to the team's context.
|
|
1532
1325
|
Additions need not be fully formed sentences and can simply be equations, relationships, or other plain clear items.
|
|
1533
1326
|
|
|
1534
1327
|
Current Context: "{current_context}".
|
|
1535
1328
|
|
|
1536
|
-
Respond with JSON: {{"suggestion": "Your sentence."
|
|
1329
|
+
Respond with JSON: {{"suggestion": "Your sentence."
|
|
1330
|
+
}}"""
|
|
1537
1331
|
response = get_llm_response(prompt, npc=active_npc, format="json")
|
|
1538
1332
|
suggestion = response.get("response", {}).get("suggestion")
|
|
1539
1333
|
|
|
1540
1334
|
if suggestion:
|
|
1541
1335
|
new_context = (current_context + " " + suggestion).strip()
|
|
1542
|
-
print(colored("
|
|
1336
|
+
print(colored(f"{result_state.npc.name} suggests updating team context:", "yellow"))
|
|
1543
1337
|
print(f" - OLD: {current_context}\n + NEW: {new_context}")
|
|
1544
1338
|
if input("Apply? [y/N]: ").strip().lower() == 'y':
|
|
1545
1339
|
ctx_data['context'] = new_context
|
|
@@ -1560,7 +1354,7 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
|
1560
1354
|
print_welcome_message()
|
|
1561
1355
|
|
|
1562
1356
|
|
|
1563
|
-
render_markdown(f'- Using {state.current_mode} mode. Use /agent, /cmd,
|
|
1357
|
+
render_markdown(f'- Using {state.current_mode} mode. Use /agent, /cmd, or /chat to switch to other modes')
|
|
1564
1358
|
render_markdown(f'- To switch to a different NPC, type /npc <npc_name> or /n <npc_name> to switch to that NPC.')
|
|
1565
1359
|
render_markdown('\n- Here are the current NPCs available in your team: ' + ', '.join([npc_name for npc_name in state.team.npcs.keys()]))
|
|
1566
1360
|
|
|
@@ -1582,7 +1376,7 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
|
1582
1376
|
print("\nGoodbye!")
|
|
1583
1377
|
print(colored("Processing and archiving all session knowledge...", "cyan"))
|
|
1584
1378
|
|
|
1585
|
-
|
|
1379
|
+
engine = command_history.engine
|
|
1586
1380
|
integrator_npc = NPC(name="integrator", model=current_state.chat_model, provider=current_state.chat_provider)
|
|
1587
1381
|
|
|
1588
1382
|
# Process each unique scope that was active during the session
|
|
@@ -1606,18 +1400,23 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
|
1606
1400
|
continue
|
|
1607
1401
|
|
|
1608
1402
|
# Load the existing KG for this specific, real scope
|
|
1609
|
-
current_kg = load_kg_from_db(
|
|
1403
|
+
current_kg = load_kg_from_db(engine, team_name, npc_name, path)
|
|
1610
1404
|
|
|
1611
1405
|
# Evolve it with the full text from the session for this scope
|
|
1612
1406
|
evolved_kg, _ = kg_evolve_incremental(
|
|
1613
1407
|
existing_kg=current_kg,
|
|
1614
1408
|
new_content_text=full_text,
|
|
1615
1409
|
model=integrator_npc.model,
|
|
1616
|
-
provider=integrator_npc.provider
|
|
1410
|
+
provider=integrator_npc.provider,
|
|
1411
|
+
get_concepts=True,
|
|
1412
|
+
link_concepts_facts = True,
|
|
1413
|
+
link_concepts_concepts = True,
|
|
1414
|
+
link_facts_facts = True,
|
|
1415
|
+
|
|
1617
1416
|
)
|
|
1618
1417
|
|
|
1619
1418
|
# Save the updated KG back to the database under the same exact scope
|
|
1620
|
-
save_kg_to_db(
|
|
1419
|
+
save_kg_to_db(engine, evolved_kg, team_name, npc_name, path)
|
|
1621
1420
|
|
|
1622
1421
|
except Exception as e:
|
|
1623
1422
|
import traceback
|
|
@@ -1675,7 +1474,9 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
|
1675
1474
|
session_scopes.add((team_name, npc_name, state.current_path))
|
|
1676
1475
|
|
|
1677
1476
|
state, output = execute_command(user_input, state)
|
|
1678
|
-
process_result(user_input, state,
|
|
1477
|
+
process_result(user_input, state,
|
|
1478
|
+
output,
|
|
1479
|
+
command_history)
|
|
1679
1480
|
|
|
1680
1481
|
except KeyboardInterrupt:
|
|
1681
1482
|
if is_windows:
|
|
@@ -1702,8 +1503,7 @@ def main() -> None:
|
|
|
1702
1503
|
|
|
1703
1504
|
initial_state.npc = default_npc
|
|
1704
1505
|
initial_state.team = team
|
|
1705
|
-
|
|
1706
|
-
#pdb.set_trace()
|
|
1506
|
+
|
|
1707
1507
|
|
|
1708
1508
|
# add a -g global command to indicate if to use the global or project, otherwise go thru normal flow
|
|
1709
1509
|
|
|
@@ -1711,8 +1511,9 @@ def main() -> None:
|
|
|
1711
1511
|
state = initial_state
|
|
1712
1512
|
state.current_path = os.getcwd()
|
|
1713
1513
|
final_state, output = execute_command(args.command, state)
|
|
1714
|
-
if final_state.stream_output
|
|
1715
|
-
for chunk in output:
|
|
1514
|
+
if final_state.stream_output:
|
|
1515
|
+
for chunk in output:
|
|
1516
|
+
print(str(chunk), end='')
|
|
1716
1517
|
print()
|
|
1717
1518
|
elif output is not None:
|
|
1718
1519
|
print(output)
|