npcsh 1.0.12__py3-none-any.whl → 1.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcsh/npcsh.py CHANGED
@@ -10,10 +10,6 @@ import importlib.metadata
10
10
  import textwrap
11
11
  from typing import Optional, List, Dict, Any, Tuple, Union
12
12
  from dataclasses import dataclass, field
13
- try:
14
- from inspect import isgenerator
15
- except:
16
- pass
17
13
  import platform
18
14
  try:
19
15
  from termcolor import colored
@@ -30,7 +26,6 @@ import sqlite3
30
26
  import copy
31
27
  import yaml
32
28
 
33
- # Local Application Imports
34
29
  from npcsh._state import (
35
30
  setup_npcsh_config,
36
31
  initial_state,
@@ -41,7 +36,9 @@ from npcsh._state import (
41
36
  interactive_commands,
42
37
  BASH_COMMANDS,
43
38
  start_interactive_session,
44
- validate_bash_command
39
+ validate_bash_command,
40
+ normalize_and_expand_flags,
41
+
45
42
  )
46
43
 
47
44
  from npcpy.npc_sysenv import (
@@ -49,6 +46,7 @@ from npcpy.npc_sysenv import (
49
46
  render_markdown,
50
47
  get_locally_available_models,
51
48
  get_model_and_provider,
49
+ lookup_provider
52
50
  )
53
51
  from npcsh.routes import router
54
52
  from npcpy.data.image import capture_screenshot
@@ -215,7 +213,7 @@ def get_slash_commands(state: ShellState) -> List[str]:
215
213
  completion_logger.debug(f"NPC commands: {npc_cmds}")
216
214
 
217
215
  # Mode switching commands
218
- mode_cmds = ['/cmd', '/agent', '/chat', '/ride']
216
+ mode_cmds = ['/cmd', '/agent', '/chat']
219
217
  commands.extend(mode_cmds)
220
218
  completion_logger.debug(f"Mode commands: {mode_cmds}")
221
219
 
@@ -571,13 +569,107 @@ def handle_bash_command(
571
569
  except PermissionError:
572
570
  return False, f"Permission denied: {cmd_str}"
573
571
 
572
+ def _try_convert_type(value: str) -> Union[str, int, float, bool]:
573
+ """Helper to convert string values to appropriate types."""
574
+ if value.lower() in ['true', 'yes']:
575
+ return True
576
+ if value.lower() in ['false', 'no']:
577
+ return False
578
+ try:
579
+ return int(value)
580
+ except (ValueError, TypeError):
581
+ pass
582
+ try:
583
+ return float(value)
584
+ except (ValueError, TypeError):
585
+ pass
586
+ return value
587
+
588
+ def parse_generic_command_flags(parts: List[str]) -> Tuple[Dict[str, Any], List[str]]:
589
+ """
590
+ Parses a list of command parts into a dictionary of keyword arguments and a list of positional arguments.
591
+ Handles: -f val, --flag val, --flag=val, flag=val, --boolean-flag
592
+ """
593
+ parsed_kwargs = {}
594
+ positional_args = []
595
+ i = 0
596
+ while i < len(parts):
597
+ part = parts[i]
598
+
599
+ if part.startswith('--'):
600
+ key_part = part[2:]
601
+ if '=' in key_part:
602
+ key, value = key_part.split('=', 1)
603
+ parsed_kwargs[key] = _try_convert_type(value)
604
+ else:
605
+ # Look ahead for a value
606
+ if i + 1 < len(parts) and not parts[i + 1].startswith('-'):
607
+ parsed_kwargs[key_part] = _try_convert_type(parts[i + 1])
608
+ i += 1 # Consume the value
609
+ else:
610
+ parsed_kwargs[key_part] = True # Boolean flag
611
+
612
+ elif part.startswith('-'):
613
+ key = part[1:]
614
+ # Look ahead for a value
615
+ if i + 1 < len(parts) and not parts[i + 1].startswith('-'):
616
+ parsed_kwargs[key] = _try_convert_type(parts[i + 1])
617
+ i += 1 # Consume the value
618
+ else:
619
+ parsed_kwargs[key] = True # Boolean flag
620
+
621
+ elif '=' in part and not part.startswith('-'):
622
+ key, value = part.split('=', 1)
623
+ parsed_kwargs[key] = _try_convert_type(value)
624
+
625
+ else:
626
+ positional_args.append(part)
627
+
628
+ i += 1
629
+
630
+ return parsed_kwargs, positional_args
631
+
632
+
633
+ def should_skip_kg_processing(user_input: str, assistant_output: str) -> bool:
634
+ """Determine if this interaction is too trivial for KG processing"""
635
+
636
+ # Skip if user input is too short or trivial
637
+ trivial_inputs = {
638
+ '/sq', '/exit', '/quit', 'exit', 'quit', 'hey', 'hi', 'hello',
639
+ 'fwah!', 'test', 'ping', 'ok', 'thanks', 'ty'
640
+ }
641
+
642
+ if user_input.lower().strip() in trivial_inputs:
643
+ return True
644
+
645
+ # Skip if user input is very short (less than 10 chars)
646
+ if len(user_input.strip()) < 10:
647
+ return True
648
+
649
+ # Skip simple bash commands
650
+ simple_bash = {'ls', 'pwd', 'cd', 'mkdir', 'touch', 'rm', 'mv', 'cp'}
651
+ first_word = user_input.strip().split()[0] if user_input.strip() else ""
652
+ if first_word in simple_bash:
653
+ return True
654
+
655
+ # Skip if assistant output is very short (less than 20 chars)
656
+ if len(assistant_output.strip()) < 20:
657
+ return True
658
+
659
+ # Skip if it's just a mode exit message
660
+ if "exiting" in assistant_output.lower() or "exited" in assistant_output.lower():
661
+ return True
662
+
663
+ return False
664
+
665
+
666
+
574
667
  def execute_slash_command(command: str, stdin_input: Optional[str], state: ShellState, stream: bool) -> Tuple[ShellState, Any]:
575
668
  """Executes slash commands using the router or checking NPC/Team jinxs."""
576
- command_parts = command.split()
577
- command_name = command_parts[0].lstrip('/')
578
-
669
+ all_command_parts = shlex.split(command)
670
+ command_name = all_command_parts[0].lstrip('/')
579
671
  if command_name in ['n', 'npc']:
580
- npc_to_switch_to = command_parts[1] if len(command_parts) > 1 else None
672
+ npc_to_switch_to = all_command_parts[1] if len(all_command_parts) > 1 else None
581
673
  if npc_to_switch_to and state.team and npc_to_switch_to in state.team.npcs:
582
674
  state.npc = state.team.npcs[npc_to_switch_to]
583
675
  return state, f"Switched to NPC: {npc_to_switch_to}"
@@ -585,38 +677,75 @@ def execute_slash_command(command: str, stdin_input: Optional[str], state: Shell
585
677
  available_npcs = list(state.team.npcs.keys()) if state.team else []
586
678
  return state, colored(f"NPC '{npc_to_switch_to}' not found. Available NPCs: {', '.join(available_npcs)}", "red")
587
679
  handler = router.get_route(command_name)
588
- #print(handler)
589
680
  if handler:
590
- # Prepare kwargs for the handler
681
+ parsed_flags, positional_args = parse_generic_command_flags(all_command_parts[1:])
682
+
683
+ normalized_flags = normalize_and_expand_flags(parsed_flags)
684
+
591
685
  handler_kwargs = {
592
686
  'stream': stream,
593
- 'npc': state.npc,
594
687
  'team': state.team,
595
688
  'messages': state.messages,
596
- 'model': state.chat_model,
597
- 'provider': state.chat_provider,
598
689
  'api_url': state.api_url,
599
690
  'api_key': state.api_key,
691
+ 'stdin_input': stdin_input,
692
+ 'positional_args': positional_args,
693
+ 'plonk_context': state.team.shared_context.get('PLONK_CONTEXT') if state.team and hasattr(state.team, 'shared_context') else None,
694
+
695
+ # Default chat model/provider
696
+ 'model': state.npc.model if isinstance(state.npc, NPC) and state.npc.model else state.chat_model,
697
+ 'provider': state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else state.chat_provider,
698
+ 'npc': state.npc,
699
+
700
+ # All other specific defaults
701
+ 'sprovider': state.search_provider,
702
+ 'emodel': state.embedding_model,
703
+ 'eprovider': state.embedding_provider,
704
+ 'igmodel': state.image_gen_model,
705
+ 'igprovider': state.image_gen_provider,
706
+ 'vgmodel': state.video_gen_model,
707
+ 'vgprovider':state.video_gen_provider,
708
+ 'vmodel': state.vision_model,
709
+ 'vprovider': state.vision_provider,
710
+ 'rmodel': state.reasoning_model,
711
+ 'rprovider': state.reasoning_provider,
600
712
  }
601
- #print(handler_kwargs, command)
602
- if stdin_input is not None:
603
- handler_kwargs['stdin_input'] = stdin_input
604
-
713
+
714
+ if len(normalized_flags)>0:
715
+ kwarg_part = 'with kwargs: \n -' + '\n -'.join(f'{key}={item}' for key, item in normalized_flags.items())
716
+ else:
717
+ kwarg_part = ''
718
+
719
+ # 4. Merge the clean, normalized flags. This will correctly overwrite defaults.
720
+ render_markdown(f'- Calling {command_name} handler {kwarg_part} ')
721
+ if 'model' in normalized_flags and 'provider' not in normalized_flags:
722
+ # Call your existing, centralized lookup_provider function
723
+ inferred_provider = lookup_provider(normalized_flags['model'])
724
+ if inferred_provider:
725
+ # Update the provider that will be used for this command.
726
+ handler_kwargs['provider'] = inferred_provider
727
+ print(colored(f"Info: Inferred provider '{inferred_provider}' for model '{normalized_flags['model']}'.", "cyan"))
728
+ if 'provider' in normalized_flags and 'model' not in normalized_flags:
729
+ # loop up mhandler_kwargs model's provider
730
+ current_provider = lookup_provider(handler_kwargs['model'])
731
+ if current_provider != normalized_flags['provider']:
732
+ print(f'Please specify a model for the provider: {normalized_flags['provider']}')
733
+ handler_kwargs.update(normalized_flags)
734
+
735
+
605
736
  try:
606
- result_dict = handler(command, **handler_kwargs)
607
-
737
+ result_dict = handler(command=command, **handler_kwargs)
738
+ # add the output model and provider for the print_and_process_stream downstream processing
608
739
  if isinstance(result_dict, dict):
609
740
  state.messages = result_dict.get("messages", state.messages)
610
741
  return state, result_dict
611
742
  else:
612
743
  return state, result_dict
613
-
614
744
  except Exception as e:
615
745
  import traceback
616
746
  print(f"Error executing slash command '{command_name}':", file=sys.stderr)
617
747
  traceback.print_exc()
618
748
  return state, colored(f"Error executing slash command '{command_name}': {e}", "red")
619
-
620
749
  active_npc = state.npc if isinstance(state.npc, NPC) else None
621
750
  jinx_to_execute = None
622
751
  executor = None
@@ -650,6 +779,7 @@ def execute_slash_command(command: str, stdin_input: Optional[str], state: Shell
650
779
 
651
780
  return state, colored(f"Unknown slash command or jinx: {command_name}", "red")
652
781
 
782
+
653
783
  def process_pipeline_command(
654
784
  cmd_segment: str,
655
785
  stdin_input: Optional[str],
@@ -662,6 +792,7 @@ def process_pipeline_command(
662
792
 
663
793
  available_models_all = get_locally_available_models(state.current_path)
664
794
  available_models_all_list = [item for key, item in available_models_all.items()]
795
+
665
796
  model_override, provider_override, cmd_cleaned = get_model_and_provider(
666
797
  cmd_segment, available_models_all_list
667
798
  )
@@ -669,14 +800,11 @@ def process_pipeline_command(
669
800
  if not cmd_to_process:
670
801
  return state, stdin_input
671
802
 
672
- # --- Corrected Model Resolution ---
673
- # Priority: 1. Inline Override, 2. NPC Model, 3. Global Model
674
803
  npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
675
804
  npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
676
805
 
677
806
  exec_model = model_override or npc_model or state.chat_model
678
807
  exec_provider = provider_override or npc_provider or state.chat_provider
679
- # --- End of Correction ---
680
808
 
681
809
  if cmd_to_process.startswith("/"):
682
810
  return execute_slash_command(cmd_to_process, stdin_input, state, stream_final)
@@ -685,23 +813,25 @@ def process_pipeline_command(
685
813
  if not cmd_parts:
686
814
  return state, stdin_input
687
815
 
688
- if validate_bash_command(cmd_parts):
689
- command_name = cmd_parts[0]
690
- if command_name in interactive_commands:
691
- return handle_interactive_command(cmd_parts, state)
692
- if command_name == "cd":
693
- return handle_cd_command(cmd_parts, state)
816
+ command_name = cmd_parts[0]
817
+
818
+ if command_name == "cd":
819
+ return handle_cd_command(cmd_parts, state)
820
+
821
+ if command_name in interactive_commands:
822
+ return handle_interactive_command(cmd_parts, state)
694
823
 
824
+ if validate_bash_command(cmd_parts):
695
825
  success, result = handle_bash_command(cmd_parts, cmd_to_process, stdin_input, state)
696
826
  if success:
697
827
  return state, result
698
828
  else:
699
- print(colored(f"Bash command failed. Asking LLM for a fix: {result}", "yellow"), file=sys.stderr)
829
+ print(colored(f"Bash command failed: {result}. Asking LLM for a fix...", "yellow"), file=sys.stderr)
700
830
  fixer_prompt = f"The command '{cmd_to_process}' failed with the error: '{result}'. Provide the correct command."
701
831
  response = execute_llm_command(
702
832
  fixer_prompt,
703
- model=exec_model, # Uses corrected model
704
- provider=exec_provider, # Uses corrected provider
833
+ model=exec_model,
834
+ provider=exec_provider,
705
835
  npc=state.npc,
706
836
  stream=stream_final,
707
837
  messages=state.messages
@@ -717,8 +847,8 @@ def process_pipeline_command(
717
847
 
718
848
  llm_result = check_llm_command(
719
849
  full_llm_cmd,
720
- model=exec_model, # Uses corrected model
721
- provider=exec_provider, # Uses corrected provider
850
+ model=exec_model,
851
+ provider=exec_provider,
722
852
  api_url=state.api_url,
723
853
  api_key=state.api_key,
724
854
  npc=state.npc,
@@ -727,17 +857,15 @@ def process_pipeline_command(
727
857
  images=state.attachments,
728
858
  stream=stream_final,
729
859
  context=info,
730
-
731
860
  )
732
861
  if isinstance(llm_result, dict):
733
862
  state.messages = llm_result.get("messages", state.messages)
734
863
  output = llm_result.get("output")
735
864
  return state, output
736
865
  else:
737
- return state, llm_result
738
-
866
+ return state, llm_result
739
867
  def check_mode_switch(command:str , state: ShellState):
740
- if command in ['/cmd', '/agent', '/chat', '/ride']:
868
+ if command in ['/cmd', '/agent', '/chat',]:
741
869
  state.current_mode = command[1:]
742
870
  return True, state
743
871
 
@@ -778,26 +906,25 @@ def execute_command(
778
906
 
779
907
  if is_last_command:
780
908
  final_output = output # Capture the output of the last command
781
-
782
909
  if isinstance(output, str):
783
910
  stdin_for_next = output
784
- elif isgenerator(output):
785
- if not stream_this_segment: # If intermediate output is a stream, consume for piping
786
- full_stream_output = "".join(map(str, output))
911
+ elif not isinstance(output, str):
912
+ try:
913
+ full_stream_output = print_and_process_stream_with_markdown(output,
914
+ state.npc.model,
915
+ state.npc.provider)
787
916
  stdin_for_next = full_stream_output
788
917
  if is_last_command:
789
918
  final_output = full_stream_output
790
- else: # Final output is a stream, don't consume, can't pipe
791
- stdin_for_next = None
792
- final_output = output
793
- elif output is not None: # Try converting other types to string
794
- try:
795
- stdin_for_next = str(output)
796
- except Exception:
797
- print(f"Warning: Cannot convert output to string for piping: {type(output)}", file=sys.stderr)
798
- stdin_for_next = None
799
- else: # Output was None
800
- stdin_for_next = None
919
+ except:
920
+ if output is not None: # Try converting other types to string
921
+ try:
922
+ stdin_for_next = str(output)
923
+ except Exception:
924
+ print(f"Warning: Cannot convert output to string for piping: {type(output)}", file=sys.stderr)
925
+ stdin_for_next = None
926
+ else: # Output was None
927
+ stdin_for_next = None
801
928
 
802
929
 
803
930
  except Exception as pipeline_error:
@@ -808,7 +935,7 @@ def execute_command(
808
935
  return current_state, error_msg
809
936
 
810
937
  # Store embeddings using the final state
811
- if final_output is not None and not (isgenerator(final_output) and current_state.stream_output):
938
+ if final_output is not None and isinstance(final_output,str):
812
939
  store_command_embeddings(original_command_for_embedding, final_output, current_state)
813
940
 
814
941
  # Return the final state and the final output
@@ -866,6 +993,9 @@ def execute_command(
866
993
  state.messages = response['messages']
867
994
  return state, response['response']
868
995
 
996
+ """
997
+ # to be replaced with a standalone corca mode
998
+
869
999
  elif state.current_mode == 'ride':
870
1000
  # Allow bash commands in /ride mode
871
1001
  cmd_parts = parse_command_safely(command)
@@ -896,380 +1026,9 @@ def execute_command(
896
1026
 
897
1027
  # Otherwise, run the agentic ride loop
898
1028
  return agentic_ride_loop(command, state)
899
- @dataclass
900
- class RideState:
901
- """Lightweight state tracking for /ride mode"""
902
- todos: List[Dict[str, Any]] = field(default_factory=list)
903
- constraints: List[str] = field(default_factory=list)
904
- facts: List[str] = field(default_factory=list)
905
- mistakes: List[str] = field(default_factory=list)
906
- successes: List[str] = field(default_factory=list)
907
- current_todo_index: int = 0
908
- current_subtodo_index: int = 0
909
-
910
- def get_context_summary(self) -> str:
911
- """Generate lightweight context for LLM prompts"""
912
- context = []
913
- if self.facts:
914
- context.append(f"Facts: {'; '.join(self.facts[:5])}") # Limit to 5 most recent
915
- if self.mistakes:
916
- context.append(f"Recent mistakes: {'; '.join(self.mistakes[-3:])}")
917
- if self.successes:
918
- context.append(f"Recent successes: {'; '.join(self.successes[-3:])}")
919
- return "\n".join(context)
920
-
921
- def interactive_edit_list(items: List[str], item_type: str) -> List[str]:
922
- """Interactive REPL for editing lists of items with regeneration options"""
923
- while True:
924
- print(f"\nCurrent {item_type}:")
925
- for i, item in enumerate(items, 1):
926
- print(f"{i}. {item}")
927
-
928
- choice = input(f"\nEdit {item_type} (e<num> to edit, d<num> to delete, a to add, r to regenerate, c to add context, ok to continue): ").strip()
929
-
930
- if choice.lower() == 'ok':
931
- break
932
- elif choice.lower() == 'r':
933
- print("Regenerating list...")
934
- return "REGENERATE" # Special signal to regenerate
935
- elif choice.lower() == 'c':
936
- additional_context = input("Add more context: ").strip()
937
- if additional_context:
938
- return {"ADD_CONTEXT": additional_context, "items": items}
939
- elif choice.lower() == 'a':
940
- new_item = input(f"Enter new {item_type[:-1]}: ").strip()
941
- if new_item:
942
- items.append(new_item)
943
- elif choice.lower().startswith('e'):
944
- try:
945
- idx = int(choice[1:]) - 1
946
- if 0 <= idx < len(items):
947
- print(f"Current: {items[idx]}")
948
- new_item = input("New version: ").strip()
949
- if new_item:
950
- items[idx] = new_item
951
- except ValueError:
952
- print("Invalid format. Use e<number>")
953
- elif choice.lower().startswith('d'):
954
- try:
955
- idx = int(choice[1:]) - 1
956
- if 0 <= idx < len(items):
957
- items.pop(idx)
958
- except ValueError:
959
- print("Invalid format. Use d<number>")
960
- else:
961
- print("Invalid choice. Use: e<num>, d<num>, a, r (regenerate), c (add context), or ok")
962
-
963
- return items
964
- def generate_todos(user_goal: str, state: ShellState, additional_context: str = "") -> List[Dict[str, Any]]:
965
- """Generate high-level todos for the user's goal"""
966
- path_cmd = 'The current working directory is: ' + state.current_path
967
- ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(state.current_path, f) for f in os.listdir(state.current_path)]) if os.path.exists(state.current_path) else 'No files found in the current directory.'
968
- platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
969
- info = path_cmd + '\n' + ls_files + '\n' + platform_info
970
-
971
-
972
-
973
- high_level_planning_instruction = """
974
- You are a high-level project planner. When a user asks to work on a file or code,
975
- structure your plan using a simple, high-level software development lifecycle:
976
- 1. First, understand the current state (e.g., read the relevant file).
977
- 2. Second, make the required changes based on the user's goal.
978
- 3. Third, verify the changes work as intended (e.g., test the code).
979
- Your generated todos should reflect this high-level thinking.
980
-
981
-
982
-
983
- """
984
-
985
- prompt = f"""
986
- {high_level_planning_instruction}
987
-
988
- User goal: {user_goal}
989
-
990
- {additional_context}
991
-
992
- Generate a list of 3 todos to accomplish this goal. Use specific actionable language based on the user request.
993
- Do not make assumptions about user needs.
994
- Every todo must be directly sourced from the user's request.
995
- If users request specific files to be incorporated, you MUST include the full path to the file in the todo.
996
- Here is some relevant information for the current folder and working directory that may be relevant:
997
- {info}
998
-
999
- For example, if the user says "I need to add a new function to calculate the average of a list of numbers my research.py script" and the current working directory is /home/user/projects and one
1000
- of the available files in the current directory is /home/user/projects/research.py then one of the todos should be:
1001
- - "Add a new function to /home/user/projects/research.py to calculate the average of a list of numbers"
1002
- Do not truncate paths. Do not additional paths. Use them exactly as they are provided here.
1003
-
1004
- Each todo should be:
1005
- - Specific and actionable
1006
- - Independent where possible
1007
- - Focused on a single major component
1008
-
1009
- Remember, it is critical to provide as much relevant information as possible. Even if the user only refers to a file or something by a relative path, it is
1010
- critical for operation that you provide the full path to the file in the todo item.
1011
-
1012
- Return JSON with format:
1013
- {{
1014
- "todos": [
1015
- todo1, todo2, todo3,
1016
- ]
1017
- }}
1018
1029
  """
1019
-
1020
- response = get_llm_response(
1021
- prompt,
1022
- model=state.chat_model,
1023
- provider=state.chat_provider,
1024
- npc=state.npc,
1025
- format="json"
1026
- )
1027
-
1028
- todos_data = response.get("response", {}).get("todos", [])
1029
- return todos_data
1030
1030
 
1031
1031
 
1032
- def generate_constraints(todos: List[Dict[str, Any]], user_goal: str, state: ShellState) -> List[str]:
1033
- """Generate constraints and requirements that define relationships between todos"""
1034
- prompt = f"""
1035
- User goal: {user_goal}
1036
-
1037
- Todos to accomplish:
1038
- {chr(10).join([f"- {todo}" for todo in todos])}
1039
-
1040
- Based ONLY on what the user explicitly stated in their goal, identify any constraints or requirements they mentioned.
1041
- Do NOT invent new constraints. Only extract constraints that are directly stated or clearly implied by the user's request.
1042
-
1043
- Examples of valid constraints:
1044
- - If user says "without breaking existing functionality" -> "Maintain existing functionality"
1045
- - If user says "must be fast" -> "Performance must be optimized"
1046
- - If user says "should integrate with X" -> "Must integrate with X"
1047
-
1048
- If the user didn't specify any constraints, return an empty list.
1049
-
1050
- Return JSON with format:
1051
- {{
1052
- "constraints": ["constraint 1", "constraint 2", ...]
1053
- }}
1054
- """
1055
-
1056
- response = get_llm_response(
1057
- prompt,
1058
- model=state.chat_model,
1059
- provider=state.chat_provider,
1060
- npc=state.npc,
1061
- format="json"
1062
- )
1063
-
1064
- constraints_data = response.get("response", {})
1065
-
1066
- if isinstance(constraints_data, dict):
1067
- constraints = constraints_data.get("constraints", [])
1068
- # Make sure we're getting strings, not dicts
1069
- cleaned_constraints = []
1070
- for c in constraints:
1071
- if isinstance(c, str):
1072
- cleaned_constraints.append(c)
1073
- return cleaned_constraints
1074
- else:
1075
- return []
1076
- def should_break_down_todo(todo, state: ShellState) -> bool:
1077
- """Ask LLM if a todo needs breakdown, then ask user for confirmation"""
1078
- prompt = f"""
1079
- Todo: {todo}
1080
-
1081
-
1082
- Does this todo need to be broken down into smaller, more atomic components?
1083
- Consider:
1084
- - Is it complex enough to warrant breakdown?
1085
- - Would breaking it down make execution clearer?
1086
- - Are there multiple distinct steps involved?
1087
-
1088
- Return JSON: {{"should_break_down": true/false, "reason": "explanation"}}
1089
- """
1090
-
1091
- response = get_llm_response(
1092
- prompt,
1093
- model=state.chat_model,
1094
- provider=state.chat_provider,
1095
- npc=state.npc,
1096
- format="json"
1097
- )
1098
-
1099
- result = response.get("response", {})
1100
- llm_suggests = result.get("should_break_down", False)
1101
- reason = result.get("reason", "No reason provided")
1102
-
1103
- if llm_suggests:
1104
- print(f"\nLLM suggests breaking down: '{todo}'")
1105
- print(f"Reason: {reason}")
1106
- user_choice = input("Break it down? [y/N]: ").strip().lower()
1107
- return user_choice in ['y', 'yes']
1108
-
1109
- return False
1110
-
1111
- def generate_subtodos(todo, state: ShellState) -> List[Dict[str, Any]]:
1112
- """Generate atomic subtodos for a complex todo"""
1113
- prompt = f"""
1114
- Parent todo: {todo}
1115
-
1116
- Break this down into atomic, executable subtodos. Each subtodo should be:
1117
- - A single, concrete action
1118
- - Executable in one step
1119
- - Clear and unambiguous
1120
-
1121
- Return JSON with format:
1122
- {{
1123
- "subtodos": [
1124
- "subtodo description",
1125
- ...
1126
- ]
1127
- }}
1128
- """
1129
-
1130
- response = get_llm_response(
1131
- prompt,
1132
- model=state.chat_model,
1133
- provider=state.chat_provider,
1134
- npc=state.npc,
1135
- format="json"
1136
- )
1137
-
1138
- return response.get("response", {}).get("subtodos", [])
1139
- def execute_todo_item(todo: Dict[str, Any], ride_state: RideState, shell_state: ShellState) -> bool:
1140
- """Execute a single todo item using the existing jinx system"""
1141
- path_cmd = 'The current working directory is: ' + shell_state.current_path
1142
- ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(shell_state.current_path, f) for f in os.listdir(shell_state.current_path)]) if os.path.exists(shell_state.current_path) else 'No files found in the current directory.'
1143
- platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
1144
- info = path_cmd + '\n' + ls_files + '\n' + platform_info
1145
-
1146
- command = f"""
1147
-
1148
- General information:
1149
- {info}
1150
-
1151
- Execute this todo: {todo}
1152
-
1153
- Constraints to follow:
1154
- {chr(10).join([f"- {c}" for c in ride_state.constraints])}
1155
- """
1156
-
1157
- print(f"\nExecuting: {todo}")
1158
-
1159
-
1160
- result = check_llm_command(
1161
- command,
1162
- model=shell_state.chat_model,
1163
- provider=shell_state.chat_provider,
1164
- npc=shell_state.npc,
1165
- team=shell_state.team,
1166
- messages=[],
1167
- stream=shell_state.stream_output,
1168
-
1169
- )
1170
-
1171
- output_payload = result.get("output", "")
1172
- output_str = ""
1173
-
1174
-
1175
- if isgenerator(output_payload):
1176
- output_str = print_and_process_stream_with_markdown(output_payload, shell_state.chat_model, shell_state.chat_provider)
1177
- elif isinstance(output_payload, dict):
1178
- output_str = output_payload.get('output', str(output_payload))
1179
- if 'output' in output_str:
1180
- output_str = output_payload['output']
1181
- elif 'response' in output_str:
1182
- output_str = output_payload['response']
1183
- render_markdown(output_str)
1184
- elif output_payload:
1185
- output_str = str(output_payload)
1186
- render_markdown(output_str)
1187
-
1188
- user_feedback = input(f"\nTodo completed successfully? [y/N/notes]: ").strip()
1189
-
1190
- if user_feedback.lower() in ['y', 'yes']:
1191
- return True, output_str
1192
- elif user_feedback.lower() in ['n', 'no']:
1193
- mistake = input("What went wrong? ").strip()
1194
- ride_state.mistakes.append(f"Failed {todo}: {mistake}")
1195
- return False, output_str
1196
- else:
1197
- ride_state.facts.append(f"Re: {todo}: {user_feedback}")
1198
- success = input("Mark as completed? [y/N]: ").strip().lower() in ['y', 'yes']
1199
- return success, output_str
1200
-
1201
- def agentic_ride_loop(user_goal: str, state: ShellState) -> tuple:
1202
- """
1203
- New /ride mode: hierarchical planning with human-in-the-loop control
1204
- """
1205
- ride_state = RideState()
1206
-
1207
- # 1. Generate high-level todos
1208
- print("🚀 Generating high-level todos...")
1209
- todos = generate_todos(user_goal, state)
1210
-
1211
- # 2. User reviews/edits todos
1212
- print("\n📋 Review and edit todos:")
1213
- todo_descriptions = [todo for todo in todos]
1214
- edited_descriptions = interactive_edit_list(todo_descriptions, "todos")
1215
-
1216
-
1217
- ride_state.todos = edited_descriptions
1218
-
1219
- # 3. Generate constraints
1220
- print("\n🔒 Generating constraints...")
1221
- constraints = generate_constraints(edited_descriptions, user_goal, state)
1222
-
1223
- # 4. User reviews/edits constraints
1224
- print("\n📐 Review and edit constraints:")
1225
- edited_constraints = interactive_edit_list(constraints, "constraints")
1226
- ride_state.constraints = edited_constraints
1227
-
1228
- # 5. Execution loop
1229
- print("\n⚡ Starting execution...")
1230
-
1231
- for i, todo in enumerate(edited_descriptions):
1232
- print(f"\n--- Todo {i+1}/{len(todos)}: {todo} ---")
1233
-
1234
- def attempt_execution(current_todo):
1235
- # This inner function handles the execution and retry logic
1236
- success, output_str = execute_todo_item(current_todo, ride_state, state)
1237
- if not success:
1238
- retry = input("Retry this todo? [y/N]: ").strip().lower()
1239
- if retry in ['y', 'yes']:
1240
- success, output_str = execute_todo_item(current_todo, ride_state, state)
1241
- return success, output_str
1242
-
1243
- if should_break_down_todo(todo, state):
1244
- print("Breaking down todo...")
1245
- subtodos = generate_subtodos(todo, state)
1246
- subtodo_descriptions = [st for st in subtodos]
1247
- edited_subtodos = interactive_edit_list(subtodo_descriptions, "subtodos")
1248
-
1249
- for j, subtodo_desc in enumerate(edited_subtodos):
1250
- subtodo = {"description": subtodo_desc, "type": "atomic"}
1251
- success, output = attempt_execution(subtodo)
1252
- if success:
1253
- ride_state.successes.append({"description": subtodo_desc, "output": output})
1254
- else:
1255
- print("Subtodo failed. Continuing to next...")
1256
- else:
1257
- success, output = attempt_execution(todo)
1258
- if success:
1259
- ride_state.successes.append({"description": todo, "output": output})
1260
- # 6. Final summary
1261
- print("\n🎯 Execution Summary:")
1262
- print(f"Successes: {len(ride_state.successes)}")
1263
- print(f"Mistakes: {len(ride_state.mistakes)}")
1264
- print(f"Facts learned: {len(ride_state.facts)}")
1265
-
1266
- return state, {
1267
- "todos_completed": len(ride_state.successes),
1268
- "ride_state": ride_state,
1269
- "final_context": ride_state.get_context_summary()
1270
- }
1271
- # --- Main Application Logic ---
1272
-
1273
1032
  def check_deprecation_warnings():
1274
1033
  if os.getenv("NPCSH_MODEL"):
1275
1034
  cprint(
@@ -1470,20 +1229,24 @@ def process_result(
1470
1229
 
1471
1230
  final_output_str = None
1472
1231
  output_content = output.get('output') if isinstance(output, dict) else output
1473
-
1474
- if result_state.stream_output and isgenerator(output_content):
1475
- final_output_str = print_and_process_stream_with_markdown(output_content, active_npc.model, active_npc.provider)
1232
+ model_for_stream = output.get('model', active_npc.model) if isinstance(output, dict) else active_npc.model
1233
+ provider_for_stream = output.get('provider', active_npc.provider) if isinstance(output, dict) else active_npc.provider
1234
+
1235
+ print('\n')
1236
+ if user_input =='/help':
1237
+ render_markdown(output.get('output'))
1238
+ elif result_state.stream_output:
1239
+
1240
+
1241
+ final_output_str = print_and_process_stream_with_markdown(output_content, model_for_stream, provider_for_stream)
1476
1242
  elif output_content is not None:
1477
1243
  final_output_str = str(output_content)
1478
1244
  render_markdown(final_output_str)
1479
1245
 
1480
- # --- Part 2: Process Output and Evolve Knowledge ---
1481
1246
  if final_output_str:
1482
- # Append assistant message to state for context continuity
1247
+
1483
1248
  if result_state.messages and (not result_state.messages or result_state.messages[-1].get("role") != "assistant"):
1484
1249
  result_state.messages.append({"role": "assistant", "content": final_output_str})
1485
-
1486
- # Save assistant message to the database
1487
1250
  save_conversation_message(
1488
1251
  command_history,
1489
1252
  result_state.conversation_id,
@@ -1496,18 +1259,30 @@ def process_result(
1496
1259
  team=team_name,
1497
1260
  )
1498
1261
 
1499
- # --- Hierarchical Knowledge Graph Evolution ---
1500
1262
  conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
1501
1263
  conn = command_history.conn
1502
1264
 
1503
1265
  try:
1266
+ if not should_skip_kg_processing(user_input, final_output_str):
1267
+
1268
+ npc_kg = load_kg_from_db(conn, team_name, npc_name, result_state.current_path)
1269
+ evolved_npc_kg, _ = kg_evolve_incremental(
1270
+ existing_kg=npc_kg,
1271
+ new_content_text=conversation_turn_text,
1272
+ model=active_npc.model,
1273
+ provider=active_npc.provider,
1274
+ get_concepts=True,
1275
+ link_concepts_facts = False,
1276
+ link_concepts_concepts = False,
1277
+ link_facts_facts = False,
1504
1278
 
1505
- npc_kg = load_kg_from_db(conn, team_name, npc_name, "__npc_global__")
1506
- evolved_npc_kg, _ = kg_evolve_incremental(
1507
- existing_kg=npc_kg, new_content_text=conversation_turn_text,
1508
- model=active_npc.model, provider=active_npc.provider
1509
- )
1510
- save_kg_to_db(conn, evolved_npc_kg, team_name, npc_name, result_state.current_path)
1279
+
1280
+ )
1281
+ save_kg_to_db(conn,
1282
+ evolved_npc_kg,
1283
+ team_name,
1284
+ npc_name,
1285
+ result_state.current_path)
1511
1286
  except Exception as e:
1512
1287
  print(colored(f"Error during real-time KG evolution: {e}", "red"))
1513
1288
 
@@ -1516,10 +1291,11 @@ def process_result(
1516
1291
  if result_state.turn_count > 0 and result_state.turn_count % 10 == 0:
1517
1292
  print(colored("\nChecking for potential team improvements...", "cyan"))
1518
1293
  try:
1519
- summary = breathe(messages=result_state.messages[-20:], npc=active_npc)
1520
- key_facts = summary.get('output', {}).get('facts', [])
1294
+ summary = breathe(messages=result_state.messages[-20:],
1295
+ npc=active_npc)
1296
+ characterization = summary.get('output')
1521
1297
 
1522
- if key_facts and result_state.team:
1298
+ if characterization and result_state.team:
1523
1299
  team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
1524
1300
  ctx_data = {}
1525
1301
  if os.path.exists(team_ctx_path):
@@ -1527,13 +1303,15 @@ def process_result(
1527
1303
  ctx_data = yaml.safe_load(f) or {}
1528
1304
  current_context = ctx_data.get('context', '')
1529
1305
 
1530
- prompt = f"""Based on these key topics: {key_facts},
1306
+ prompt = f"""Based on this characterization: {characterization},
1307
+
1531
1308
  suggest changes (additions, deletions, edits) to the team's context.
1532
1309
  Additions need not be fully formed sentences and can simply be equations, relationships, or other plain clear items.
1533
1310
 
1534
1311
  Current Context: "{current_context}".
1535
1312
 
1536
- Respond with JSON: {{"suggestion": "Your sentence."}}"""
1313
+ Respond with JSON: {{"suggestion": "Your sentence."
1314
+ }}"""
1537
1315
  response = get_llm_response(prompt, npc=active_npc, format="json")
1538
1316
  suggestion = response.get("response", {}).get("suggestion")
1539
1317
 
@@ -1560,7 +1338,7 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1560
1338
  print_welcome_message()
1561
1339
 
1562
1340
 
1563
- render_markdown(f'- Using {state.current_mode} mode. Use /agent, /cmd, /chat, or /ride to switch to other modes')
1341
+ render_markdown(f'- Using {state.current_mode} mode. Use /agent, /cmd, or /chat to switch to other modes')
1564
1342
  render_markdown(f'- To switch to a different NPC, type /npc <npc_name> or /n <npc_name> to switch to that NPC.')
1565
1343
  render_markdown('\n- Here are the current NPCs available in your team: ' + ', '.join([npc_name for npc_name in state.team.npcs.keys()]))
1566
1344
 
@@ -1613,7 +1391,12 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1613
1391
  existing_kg=current_kg,
1614
1392
  new_content_text=full_text,
1615
1393
  model=integrator_npc.model,
1616
- provider=integrator_npc.provider
1394
+ provider=integrator_npc.provider,
1395
+ get_concepts=True,
1396
+ link_concepts_facts = True,
1397
+ link_concepts_concepts = True,
1398
+ link_facts_facts = True,
1399
+
1617
1400
  )
1618
1401
 
1619
1402
  # Save the updated KG back to the database under the same exact scope
@@ -1675,7 +1458,9 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1675
1458
  session_scopes.add((team_name, npc_name, state.current_path))
1676
1459
 
1677
1460
  state, output = execute_command(user_input, state)
1678
- process_result(user_input, state, output, command_history)
1461
+ process_result(user_input, state,
1462
+ output,
1463
+ command_history)
1679
1464
 
1680
1465
  except KeyboardInterrupt:
1681
1466
  if is_windows:
@@ -1702,8 +1487,6 @@ def main() -> None:
1702
1487
 
1703
1488
  initial_state.npc = default_npc
1704
1489
  initial_state.team = team
1705
- #import pdb
1706
- #pdb.set_trace()
1707
1490
 
1708
1491
  # add a -g global command to indicate if to use the global or project, otherwise go thru normal flow
1709
1492
 
@@ -1711,8 +1494,9 @@ def main() -> None:
1711
1494
  state = initial_state
1712
1495
  state.current_path = os.getcwd()
1713
1496
  final_state, output = execute_command(args.command, state)
1714
- if final_state.stream_output and isgenerator(output):
1715
- for chunk in output: print(str(chunk), end='')
1497
+ if final_state.stream_output:
1498
+ for chunk in output:
1499
+ print(str(chunk), end='')
1716
1500
  print()
1717
1501
  elif output is not None:
1718
1502
  print(output)