npcsh 1.0.2__tar.gz → 1.0.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.2
4
- Summary: npcsh is a command-line toolkit for using AI agents.
3
+ Version: 1.0.4
4
+ Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
7
7
  Author-email: info@npcworldwi.de
@@ -402,7 +402,7 @@ Read the docs at [npcsh.readthedocs.io](https://npcsh.readthedocs.io/en/latest/)
402
402
 
403
403
 
404
404
  ## NPC Studio
405
- There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables (soon) at [our website](https://www.npcworldwi.de/npc-studio).
405
+ There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables at [our website](https://enpisi.com/npc-studio).
406
406
 
407
407
 
408
408
  ## Mailing List
@@ -307,7 +307,7 @@ Read the docs at [npcsh.readthedocs.io](https://npcsh.readthedocs.io/en/latest/)
307
307
 
308
308
 
309
309
  ## NPC Studio
310
- There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables (soon) at [our website](https://www.npcworldwi.de/npc-studio).
310
+ There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables at [our website](https://enpisi.com/npc-studio).
311
311
 
312
312
 
313
313
  ## Mailing List
@@ -607,11 +607,30 @@ def execute_guac_command(command: str, state: GuacState) -> Tuple[GuacState, Any
607
607
  state.command_history.add_command(nl_input_for_llm, [history_output], "", state.current_path)
608
608
 
609
609
  elif state.current_mode == "cmd":
610
+ locals_context_string = "Current Python environment variables and functions:\n"
611
+ if state.locals:
612
+ for k, v in state.locals.items():
613
+ if not k.startswith('__'): # Exclude Python built-ins and internal vars
614
+ try:
615
+ # Use repr() for a developer-friendly representation
616
+ value_repr = repr(v)
617
+ # Truncate long representations to prevent context window bloat
618
+ if len(value_repr) > 200:
619
+ value_repr = value_repr[:197] + "..."
620
+ locals_context_string += f"- {k} (type: {type(v).__name__}) = {value_repr}\n"
621
+ except Exception:
622
+ locals_context_string += f"- {k} (type: {type(v).__name__}) = <unrepresentable>\n"
623
+ # Add a clear separator for LLM to distinguish this context
624
+ locals_context_string += "\n--- End of Environment Context ---\n"
625
+ else:
626
+ locals_context_string += "(Environment is empty)\n"
627
+
610
628
  prompt_cmd = (
611
629
  f"User input for Python CMD mode: '{nl_input_for_llm}'.\n"
612
630
  f"Generate ONLY executable Python code required to fulfill this.\n"
613
631
  f"Do not include any explanations, leading markdown like ```python, or any text other than the Python code itself.\n"
614
632
  )
633
+
615
634
  llm_response = get_llm_response(
616
635
  prompt_cmd,
617
636
  model=state.chat_model,
@@ -1,4 +1,3 @@
1
- # Standard Library Imports
2
1
  import os
3
2
  import sys
4
3
  import atexit
@@ -548,60 +547,91 @@ def process_pipeline_command(
548
547
  if not cmd_to_process:
549
548
  return state, stdin_input
550
549
 
551
- exec_model = model_override or state.chat_model
552
- exec_provider = provider_override or state.chat_provider
550
+ exec_model = model_override or state.chat_model
551
+ exec_provider = provider_override or state.chat_provider
553
552
 
554
553
  if cmd_to_process.startswith("/"):
555
- #print(cmd_to_process)
556
554
  return execute_slash_command(cmd_to_process, stdin_input, state, stream_final)
557
- else:
558
- try:
559
- cmd_parts = parse_command_safely(cmd_to_process)
560
- if not cmd_parts:
561
- return state, stdin_input
555
+
556
+ try:
557
+ cmd_parts = parse_command_safely(cmd_to_process)
558
+ if not cmd_parts:
559
+ return state, stdin_input
562
560
 
563
- command_name = cmd_parts[0]
561
+ command_name = cmd_parts[0]
564
562
 
563
+ is_unambiguous_bash = (
564
+ command_name in BASH_COMMANDS or
565
+ command_name in interactive_commands or
566
+ command_name == "cd" or
567
+ cmd_to_process.startswith("./")
568
+ )
569
+
570
+ if is_unambiguous_bash:
565
571
  if command_name in interactive_commands:
566
572
  return handle_interactive_command(cmd_parts, state)
567
573
  elif command_name == "cd":
568
574
  return handle_cd_command(cmd_parts, state)
569
575
  else:
570
- try:
571
- bash_state, bash_output = handle_bash_command(cmd_parts, cmd_to_process, stdin_input, state)
572
- return bash_state, bash_output
573
- except CommandNotFoundError:
574
- full_llm_cmd = f"{cmd_to_process} {stdin_input}" if stdin_input else cmd_to_process
575
-
576
- llm_result = check_llm_command(
577
- command = full_llm_cmd,
578
- model = exec_model,
579
- provider = exec_provider,
580
- api_url = state.api_url,
581
- api_key = state.api_key,
582
- npc = state.npc,
583
- team = state.team,
584
- messages = state.messages,
585
- images = state.attachments,
586
- stream = stream_final,
587
- context = None ,
588
- shell = True,
589
-
590
- )
591
- if isinstance(llm_result, dict):
592
- state.messages = llm_result.get("messages", state.messages)
593
- output = llm_result.get("output")
594
- return state, output
595
- else:
596
- return state, llm_result
597
-
598
- except Exception as bash_err:
599
- return state, colored(f"Bash execution failed: {bash_err}", "red")
600
-
601
- except Exception as e:
602
- import traceback
603
- traceback.print_exc()
604
- return state, colored(f"Error processing command '{cmd_segment[:50]}...': {e}", "red")
576
+ return handle_bash_command(cmd_parts, cmd_to_process, stdin_input, state)
577
+ else:
578
+ full_llm_cmd = f"{cmd_to_process} {stdin_input}" if stdin_input else cmd_to_process
579
+
580
+ path_cmd = 'The current working directory is: ' + state.current_path
581
+ ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(state.current_path, f) for f in os.listdir(state.current_path)]) if os.path.exists(state.current_path) else 'No files found in the current directory.'
582
+ platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
583
+ info = path_cmd + '\n' + ls_files + '\n' + platform_info + '\n'
584
+
585
+ llm_result = check_llm_command(
586
+ full_llm_cmd,
587
+ model=exec_model,
588
+ provider=exec_provider,
589
+ api_url=state.api_url,
590
+ api_key=state.api_key,
591
+ npc=state.npc,
592
+ team=state.team,
593
+ messages=state.messages,
594
+ images=state.attachments,
595
+ stream=stream_final,
596
+ context=info,
597
+ shell=True,
598
+ )
599
+ if isinstance(llm_result, dict):
600
+ state.messages = llm_result.get("messages", state.messages)
601
+ output = llm_result.get("output")
602
+ return state, output
603
+ else:
604
+ return state, llm_result
605
+
606
+ except CommandNotFoundError as e:
607
+ print(colored(f"Command not found, falling back to LLM: {e}", "yellow"), file=sys.stderr)
608
+ full_llm_cmd = f"{cmd_to_process} {stdin_input}" if stdin_input else cmd_to_process
609
+ llm_result = check_llm_command(
610
+ full_llm_cmd,
611
+ model=exec_model,
612
+ provider=exec_provider,
613
+ api_url=state.api_url,
614
+ api_key=state.api_key,
615
+ npc=state.npc,
616
+ team=state.team,
617
+ messages=state.messages,
618
+ images=state.attachments,
619
+ stream=stream_final,
620
+ context=None,
621
+ shell=True
622
+ )
623
+ if isinstance(llm_result, dict):
624
+ state.messages = llm_result.get("messages", state.messages)
625
+ output = llm_result.get("output")
626
+ return state, output
627
+ else:
628
+ return state, llm_result
629
+
630
+ except Exception as e:
631
+ import traceback
632
+ traceback.print_exc()
633
+ return state, colored(f"Error processing command '{cmd_segment[:50]}...': {e}", "red")
634
+
605
635
  def check_mode_switch(command:str , state: ShellState):
606
636
  if command in ['/cmd', '/agent', '/chat', '/ride']:
607
637
  state.current_mode = command[1:]
@@ -624,6 +654,7 @@ def execute_command(
624
654
  stdin_for_next = None
625
655
  final_output = None
626
656
  current_state = state
657
+
627
658
  if state.current_mode == 'agent':
628
659
  for i, cmd_segment in enumerate(commands):
629
660
  is_last_command = (i == len(commands) - 1)
@@ -646,12 +677,14 @@ def execute_command(
646
677
  if not stream_this_segment: # If intermediate output is a stream, consume for piping
647
678
  full_stream_output = "".join(map(str, output))
648
679
  stdin_for_next = full_stream_output
649
- if is_last_command: final_output = full_stream_output
680
+ if is_last_command:
681
+ final_output = full_stream_output
650
682
  else: # Final output is a stream, don't consume, can't pipe
651
683
  stdin_for_next = None
652
684
  final_output = output
653
685
  elif output is not None: # Try converting other types to string
654
- try: stdin_for_next = str(output)
686
+ try:
687
+ stdin_for_next = str(output)
655
688
  except Exception:
656
689
  print(f"Warning: Cannot convert output to string for piping: {type(output)}", file=sys.stderr)
657
690
  stdin_for_next = None
@@ -759,91 +792,377 @@ def execute_command(
759
792
 
760
793
  # Otherwise, run the agentic ride loop
761
794
  return agentic_ride_loop(command, state)
795
+ @dataclass
796
+ class RideState:
797
+ """Lightweight state tracking for /ride mode"""
798
+ todos: List[Dict[str, Any]] = field(default_factory=list)
799
+ constraints: List[str] = field(default_factory=list)
800
+ facts: List[str] = field(default_factory=list)
801
+ mistakes: List[str] = field(default_factory=list)
802
+ successes: List[str] = field(default_factory=list)
803
+ current_todo_index: int = 0
804
+ current_subtodo_index: int = 0
805
+
806
+ def get_context_summary(self) -> str:
807
+ """Generate lightweight context for LLM prompts"""
808
+ context = []
809
+ if self.facts:
810
+ context.append(f"Facts: {'; '.join(self.facts[:5])}") # Limit to 5 most recent
811
+ if self.mistakes:
812
+ context.append(f"Recent mistakes: {'; '.join(self.mistakes[-3:])}")
813
+ if self.successes:
814
+ context.append(f"Recent successes: {'; '.join(self.successes[-3:])}")
815
+ return "\n".join(context)
816
+
817
+ def interactive_edit_list(items: List[str], item_type: str) -> List[str]:
818
+ """Interactive REPL for editing lists of items with regeneration options"""
819
+ while True:
820
+ print(f"\nCurrent {item_type}:")
821
+ for i, item in enumerate(items, 1):
822
+ print(f"{i}. {item}")
823
+
824
+ choice = input(f"\nEdit {item_type} (e<num> to edit, d<num> to delete, a to add, r to regenerate, c to add context, ok to continue): ").strip()
825
+
826
+ if choice.lower() == 'ok':
827
+ break
828
+ elif choice.lower() == 'r':
829
+ print("Regenerating list...")
830
+ return "REGENERATE" # Special signal to regenerate
831
+ elif choice.lower() == 'c':
832
+ additional_context = input("Add more context: ").strip()
833
+ if additional_context:
834
+ return {"ADD_CONTEXT": additional_context, "items": items}
835
+ elif choice.lower() == 'a':
836
+ new_item = input(f"Enter new {item_type[:-1]}: ").strip()
837
+ if new_item:
838
+ items.append(new_item)
839
+ elif choice.lower().startswith('e'):
840
+ try:
841
+ idx = int(choice[1:]) - 1
842
+ if 0 <= idx < len(items):
843
+ print(f"Current: {items[idx]}")
844
+ new_item = input("New version: ").strip()
845
+ if new_item:
846
+ items[idx] = new_item
847
+ except ValueError:
848
+ print("Invalid format. Use e<number>")
849
+ elif choice.lower().startswith('d'):
850
+ try:
851
+ idx = int(choice[1:]) - 1
852
+ if 0 <= idx < len(items):
853
+ items.pop(idx)
854
+ except ValueError:
855
+ print("Invalid format. Use d<number>")
856
+ else:
857
+ print("Invalid choice. Use: e<num>, d<num>, a, r (regenerate), c (add context), or ok")
858
+
859
+ return items
860
+ def generate_todos(user_goal: str, state: ShellState, additional_context: str = "") -> List[Dict[str, Any]]:
861
+ """Generate high-level todos for the user's goal"""
862
+ path_cmd = 'The current working directory is: ' + state.current_path
863
+ ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(state.current_path, f) for f in os.listdir(state.current_path)]) if os.path.exists(state.current_path) else 'No files found in the current directory.'
864
+ platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
865
+ info = path_cmd + '\n' + ls_files + '\n' + platform_info
762
866
 
763
867
 
764
- def agentic_ride_loop(user_goal: str, state: ShellState) -> tuple:
765
- """
766
- /ride mode: orchestrate via team, then LLM suggests 3 next steps, user picks or provides alternative input
767
- repeat until quit.
868
+
869
+ high_level_planning_instruction = """
870
+ You are a high-level project planner. When a user asks to work on a file or code,
871
+ structure your plan using a simple, high-level software development lifecycle:
872
+ 1. First, understand the current state (e.g., read the relevant file).
873
+ 2. Second, make the required changes based on the user's goal.
874
+ 3. Third, verify the changes work as intended (e.g., test the code).
875
+ Your generated todos should reflect this high-level thinking.
768
876
 
877
+
878
+
769
879
  """
770
- if not hasattr(state, "team") or state.team is None:
771
- raise ValueError("No team found in shell state for orchestration.")
880
+
881
+ prompt = f"""
882
+ {high_level_planning_instruction}
772
883
 
773
- request = user_goal
774
- all_results = []
884
+ User goal: {user_goal}
885
+
886
+ {additional_context}
887
+
888
+ Generate a list of 3 todos to accomplish this goal. Use specific actionable language based on the user request.
889
+ Do not make assumptions about user needs.
890
+ Every todo must be directly sourced from the user's request.
891
+ If users request specific files to be incorporated, you MUST include the full path to the file in the todo.
892
+ Here is some relevant information for the current folder and working directory that may be relevant:
893
+ {info}
894
+
895
+ For example, if the user says "I need to add a new function to calculate the average of a list of numbers my research.py script" and the current working directory is /home/user/projects and one
896
+ of the available files in the current directory is /home/user/projects/research.py then one of the todos should be:
897
+ - "Add a new function to /home/user/projects/research.py to calculate the average of a list of numbers"
898
+ Do not truncate paths. Do not additional paths. Use them exactly as they are provided here.
899
+
900
+ Each todo should be:
901
+ - Specific and actionable
902
+ - Independent where possible
903
+ - Focused on a single major component
904
+
905
+ Remember, it is critical to provide as much relevant information as possible. Even if the user only refers to a file or something by a relative path, it is
906
+ critical for operation that you provide the full path to the file in the todo item.
907
+
908
+ Return JSON with format:
909
+ {{
910
+ "todos": [
911
+ todo1, todo2, todo3,
912
+ ]
913
+ }}
914
+ """
915
+
916
+ response = get_llm_response(
917
+ prompt,
918
+ model=state.chat_model,
919
+ provider=state.chat_provider,
920
+ npc=state.npc,
921
+ format="json"
922
+ )
923
+
924
+ todos_data = response.get("response", {}).get("todos", [])
925
+ return todos_data
775
926
 
776
- while True:
777
- # 1. Orchestrate the current request
778
- result = state.team.orchestrate(request)
779
- all_results.append(result)
780
- render_markdown("# Orchestration Result")
781
- render_markdown(f"- Request: {request}")
782
- render_markdown(f"- Final response: {result.get('output')}")
783
-
784
- render_markdown('- Summary: '+result['debrief']['summary'])
785
- recommendations = result['debrief']['recommendations']
786
- render_markdown(f'- Recommendations: {recommendations}')
787
-
788
-
789
- # 2. Ask LLM for three next possible steps
790
- suggestion_prompt = f"""
791
- Given the following user goal and orchestration result, suggest three new
792
- avenues to go down that are related but distinct from the original goal and from each other.
793
-
794
- Be concise. Each step should be a single actionable instruction or question.
795
-
796
- User goal: {user_goal}
797
- Orchestration result: {result}
798
-
799
- Return a JSON object with a "steps" key, whose value is a list of three strings, each string being a next step.
800
- Return only the JSON object.
801
- """
802
- suggestions = get_llm_response(
803
- suggestion_prompt,
804
- model=state.chat_model,
805
- provider=state.chat_provider,
806
- api_url=state.api_url,
807
- api_key=state.api_key,
808
- npc=state.npc,
809
- format="json"
810
- )
811
- # No custom parsing: just use the parsed output
812
- steps = suggestions.get("response", {}).get("steps", [])
813
- if not steps or len(steps) < 1:
814
- print("No further steps suggested by LLM. Exiting.")
815
- break
816
927
 
817
- print("\nNext possible steps:")
818
- for idx, step in enumerate(steps, 1):
819
- print(f"{idx}. {step}")
928
+ def generate_constraints(todos: List[Dict[str, Any]], user_goal: str, state: ShellState) -> List[str]:
929
+ """Generate constraints and requirements that define relationships between todos"""
930
+ prompt = f"""
931
+ User goal: {user_goal}
932
+
933
+ Todos to accomplish:
934
+ {chr(10).join([f"- {todo}" for todo in todos])}
935
+
936
+ Based ONLY on what the user explicitly stated in their goal, identify any constraints or requirements they mentioned.
937
+ Do NOT invent new constraints. Only extract constraints that are directly stated or clearly implied by the user's request.
938
+
939
+ Examples of valid constraints:
940
+ - If user says "without breaking existing functionality" -> "Maintain existing functionality"
941
+ - If user says "must be fast" -> "Performance must be optimized"
942
+ - If user says "should integrate with X" -> "Must integrate with X"
943
+
944
+ If the user didn't specify any constraints, return an empty list.
945
+
946
+ Return JSON with format:
947
+ {{
948
+ "constraints": ["constraint 1", "constraint 2", ...]
949
+ }}
950
+ """
951
+
952
+ response = get_llm_response(
953
+ prompt,
954
+ model=state.chat_model,
955
+ provider=state.chat_provider,
956
+ npc=state.npc,
957
+ format="json"
958
+ )
959
+
960
+ constraints_data = response.get("response", {})
961
+
962
+ if isinstance(constraints_data, dict):
963
+ constraints = constraints_data.get("constraints", [])
964
+ # Make sure we're getting strings, not dicts
965
+ cleaned_constraints = []
966
+ for c in constraints:
967
+ if isinstance(c, str):
968
+ cleaned_constraints.append(c)
969
+ return cleaned_constraints
970
+ else:
971
+ return []
972
+ def should_break_down_todo(todo, state: ShellState) -> bool:
973
+ """Ask LLM if a todo needs breakdown, then ask user for confirmation"""
974
+ prompt = f"""
975
+ Todo: {todo}
976
+
977
+
978
+ Does this todo need to be broken down into smaller, more atomic components?
979
+ Consider:
980
+ - Is it complex enough to warrant breakdown?
981
+ - Would breaking it down make execution clearer?
982
+ - Are there multiple distinct steps involved?
983
+
984
+ Return JSON: {{"should_break_down": true/false, "reason": "explanation"}}
985
+ """
986
+
987
+ response = get_llm_response(
988
+ prompt,
989
+ model=state.chat_model,
990
+ provider=state.chat_provider,
991
+ npc=state.npc,
992
+ format="json"
993
+ )
994
+
995
+ result = response.get("response", {})
996
+ llm_suggests = result.get("should_break_down", False)
997
+ reason = result.get("reason", "No reason provided")
998
+
999
+ if llm_suggests:
1000
+ print(f"\nLLM suggests breaking down: '{todo}'")
1001
+ print(f"Reason: {reason}")
1002
+ user_choice = input("Break it down? [y/N]: ").strip().lower()
1003
+ return user_choice in ['y', 'yes']
1004
+
1005
+ return False
820
1006
 
821
- user_input = input("\nChoose next step (1/2/3) or q to quit: ").strip().lower()
822
- if user_input in ("q", "quit", "exit"):
823
- print("Exiting /ride agentic loop.")
824
- break
825
- try:
1007
+ def generate_subtodos(todo, state: ShellState) -> List[Dict[str, Any]]:
1008
+ """Generate atomic subtodos for a complex todo"""
1009
+ prompt = f"""
1010
+ Parent todo: {todo}
1011
+
1012
+ Break this down into atomic, executable subtodos. Each subtodo should be:
1013
+ - A single, concrete action
1014
+ - Executable in one step
1015
+ - Clear and unambiguous
1016
+
1017
+ Return JSON with format:
1018
+ {{
1019
+ "subtodos": [
1020
+ "subtodo description",
1021
+ ...
1022
+ ]
1023
+ }}
1024
+ """
1025
+
1026
+ response = get_llm_response(
1027
+ prompt,
1028
+ model=state.chat_model,
1029
+ provider=state.chat_provider,
1030
+ npc=state.npc,
1031
+ format="json"
1032
+ )
1033
+
1034
+ return response.get("response", {}).get("subtodos", [])
1035
+ def execute_todo_item(todo: Dict[str, Any], ride_state: RideState, shell_state: ShellState) -> bool:
1036
+ """Execute a single todo item using the existing jinx system"""
1037
+ path_cmd = 'The current working directory is: ' + shell_state.current_path
1038
+ ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(shell_state.current_path, f) for f in os.listdir(shell_state.current_path)]) if os.path.exists(shell_state.current_path) else 'No files found in the current directory.'
1039
+ platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
1040
+ info = path_cmd + '\n' + ls_files + '\n' + platform_info
1041
+
1042
+ command = f"""
1043
+
1044
+ General information:
1045
+ {info}
1046
+
1047
+ Execute this todo: {todo}
1048
+
1049
+ Constraints to follow:
1050
+ {chr(10).join([f"- {c}" for c in ride_state.constraints])}
1051
+ """
1052
+
1053
+ print(f"\nExecuting: {todo}")
1054
+
826
1055
 
827
- choice = int(user_input)
828
- if 1 <= choice <= len(steps):
829
- request = f"""
830
- My initial goal was: {user_goal}
831
- The orchestration result was: {result.get('output')}
832
- I have chosen to pursue the next step: {steps[choice - 1]}
833
- Now work on this next problem.
834
- """
835
- else:
836
- print("Invalid choice, please enter 1, 2, 3, or q.")
837
- continue
838
- except Exception:
839
- # assume it is natural language input from the user on what to do next, not a number,
840
-
841
- request = user_input
842
- print("Invalid input, please enter 1, 2, 3, or q.")
843
- continue
1056
+ result = check_llm_command(
1057
+ command,
1058
+ model=shell_state.chat_model,
1059
+ provider=shell_state.chat_provider,
1060
+ npc=shell_state.npc,
1061
+ team=shell_state.team,
1062
+ messages=[],
1063
+ stream=shell_state.stream_output,
1064
+ shell=True,
1065
+ )
1066
+
1067
+ output_payload = result.get("output", "")
1068
+ output_str = ""
1069
+
1070
+ if isgenerator(output_payload):
1071
+ output_str = print_and_process_stream_with_markdown(output_payload, shell_state.chat_model, shell_state.chat_provider)
1072
+ elif isinstance(output_payload, dict):
1073
+ output_str = output_payload.get('output', str(output_payload))
1074
+ if 'output' in output_str:
1075
+ output_str = output_payload['output']
1076
+ elif 'response' in output_str:
1077
+ output_str = output_payload['response']
1078
+ render_markdown(output_str)
1079
+ elif output_payload:
1080
+ output_str = str(output_payload)
1081
+ render_markdown(output_str)
1082
+
1083
+ user_feedback = input(f"\nTodo completed successfully? [y/N/notes]: ").strip()
1084
+
1085
+ if user_feedback.lower() in ['y', 'yes']:
1086
+ return True, output_str
1087
+ elif user_feedback.lower() in ['n', 'no']:
1088
+ mistake = input("What went wrong? ").strip()
1089
+ ride_state.mistakes.append(f"Failed {todo}: {mistake}")
1090
+ return False, output_str
1091
+ else:
1092
+ ride_state.facts.append(f"Re: {todo}: {user_feedback}")
1093
+ success = input("Mark as completed? [y/N]: ").strip().lower() in ['y', 'yes']
1094
+ return success, output_str
844
1095
 
845
- return state, all_results
1096
+ def agentic_ride_loop(user_goal: str, state: ShellState) -> tuple:
1097
+ """
1098
+ New /ride mode: hierarchical planning with human-in-the-loop control
1099
+ """
1100
+ ride_state = RideState()
1101
+
1102
+ # 1. Generate high-level todos
1103
+ print("🚀 Generating high-level todos...")
1104
+ todos = generate_todos(user_goal, state)
1105
+
1106
+ # 2. User reviews/edits todos
1107
+ print("\n📋 Review and edit todos:")
1108
+ todo_descriptions = [todo for todo in todos]
1109
+ edited_descriptions = interactive_edit_list(todo_descriptions, "todos")
1110
+
846
1111
 
1112
+ ride_state.todos = edited_descriptions
1113
+
1114
+ # 3. Generate constraints
1115
+ print("\n🔒 Generating constraints...")
1116
+ constraints = generate_constraints(edited_descriptions, user_goal, state)
1117
+
1118
+ # 4. User reviews/edits constraints
1119
+ print("\n📐 Review and edit constraints:")
1120
+ edited_constraints = interactive_edit_list(constraints, "constraints")
1121
+ ride_state.constraints = edited_constraints
1122
+
1123
+ # 5. Execution loop
1124
+ print("\n⚡ Starting execution...")
1125
+
1126
+ for i, todo in enumerate(edited_descriptions):
1127
+ print(f"\n--- Todo {i+1}/{len(todos)}: {todo} ---")
1128
+
1129
+ def attempt_execution(current_todo):
1130
+ # This inner function handles the execution and retry logic
1131
+ success, output_str = execute_todo_item(current_todo, ride_state, state)
1132
+ if not success:
1133
+ retry = input("Retry this todo? [y/N]: ").strip().lower()
1134
+ if retry in ['y', 'yes']:
1135
+ success, output_str = execute_todo_item(current_todo, ride_state, state)
1136
+ return success, output_str
1137
+
1138
+ if should_break_down_todo(todo, state):
1139
+ print("Breaking down todo...")
1140
+ subtodos = generate_subtodos(todo, state)
1141
+ subtodo_descriptions = [st for st in subtodos]
1142
+ edited_subtodos = interactive_edit_list(subtodo_descriptions, "subtodos")
1143
+
1144
+ for j, subtodo_desc in enumerate(edited_subtodos):
1145
+ subtodo = {"description": subtodo_desc, "type": "atomic"}
1146
+ success, output = attempt_execution(subtodo)
1147
+ if success:
1148
+ ride_state.successes.append({"description": subtodo_desc, "output": output})
1149
+ else:
1150
+ print("Subtodo failed. Continuing to next...")
1151
+ else:
1152
+ success, output = attempt_execution(todo)
1153
+ if success:
1154
+ ride_state.successes.append({"description": todo, "output": output})
1155
+ # 6. Final summary
1156
+ print("\n🎯 Execution Summary:")
1157
+ print(f"Successes: {len(ride_state.successes)}")
1158
+ print(f"Mistakes: {len(ride_state.mistakes)}")
1159
+ print(f"Facts learned: {len(ride_state.facts)}")
1160
+
1161
+ return state, {
1162
+ "todos_completed": len(ride_state.successes),
1163
+ "ride_state": ride_state,
1164
+ "final_context": ride_state.get_context_summary()
1165
+ }
847
1166
  # --- Main Application Logic ---
848
1167
 
849
1168
  def check_deprecation_warnings():
@@ -857,12 +1176,12 @@ def print_welcome_message():
857
1176
  print(
858
1177
  """
859
1178
  Welcome to \033[1;94mnpc\033[0m\033[1;38;5;202msh\033[0m!
860
- \033[1;94m \033[0m\033[1;38;5;202m \\\\
861
- \033[1;94m _ __ _ __ ___ \033[0m\033[1;38;5;202m ___ | |___ \\\\
862
- \033[1;94m| '_ \ | '_ \ / __|\033[0m\033[1;38;5;202m/ __/ | |_ _| \\\\
863
- \033[1;94m| | | || |_) |( |__ \033[0m\033[1;38;5;202m\_ \ | | | | //
864
- \033[1;94m|_| |_|| .__/ \___|\033[0m\033[1;38;5;202m|___/ |_| |_| //
865
- \033[1;94m| | \033[0m\033[1;38;5;202m //
1179
+ \033[1;94m \033[0m\033[1;38;5;202m \\\\
1180
+ \033[1;94m _ __ _ __ ___ \033[0m\033[1;38;5;202m ___ | |___ \\\\
1181
+ \033[1;94m| '_ \\ | ' \\ / __|\033[0m\033[1;38;5;202m / __/ | |_ _| \\\\
1182
+ \033[1;94m| | | || |_) |( |__ \033[0m\033[1;38;5;202m \\_ \\ | | | | //
1183
+ \033[1;94m|_| |_|| .__/ \\___|\033[0m\033[1;38;5;202m |___/ |_| |_| //
1184
+ \033[1;94m| | \033[0m\033[1;38;5;202m //
866
1185
  \033[1;94m| |
867
1186
  \033[1;94m|_|
868
1187
 
@@ -871,7 +1190,6 @@ Begin by asking a question, issuing a bash command, or typing '/help' for more i
871
1190
  """
872
1191
  )
873
1192
 
874
-
875
1193
  def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
876
1194
  check_deprecation_warnings()
877
1195
  setup_npcsh_config()
@@ -892,89 +1210,87 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
892
1210
  project_team_path = os.path.abspath(PROJECT_NPC_TEAM_PATH)
893
1211
  global_team_path = os.path.expanduser(DEFAULT_NPC_TEAM_PATH)
894
1212
  team_dir = None
895
- forenpc_obj = None
896
- team_ctx = {}
1213
+ default_forenpc_name = None
897
1214
 
898
- # --- Always prefer local/project team first ---
899
1215
  if os.path.exists(project_team_path):
900
1216
  team_dir = project_team_path
901
- forenpc_name = "forenpc"
1217
+ default_forenpc_name = "forenpc"
902
1218
  else:
903
1219
  resp = input(f"No npc_team found in {os.getcwd()}. Create a new team here? [Y/n]: ").strip().lower()
904
1220
  if resp in ("", "y", "yes"):
905
1221
  team_dir = project_team_path
906
1222
  os.makedirs(team_dir, exist_ok=True)
907
- forenpc_name = "forenpc"
1223
+ default_forenpc_name = "forenpc"
908
1224
  forenpc_directive = input(
909
- f"Enter a primary directive for {forenpc_name} (default: 'You are the forenpc of the team, coordinating activities between NPCs on the team, verifying that results from NPCs are high quality and can help to adequately answer user requests.'): "
1225
+ f"Enter a primary directive for {default_forenpc_name} (default: 'You are the forenpc of the team...'): "
910
1226
  ).strip() or "You are the forenpc of the team, coordinating activities between NPCs on the team, verifying that results from NPCs are high quality and can help to adequately answer user requests."
911
1227
  forenpc_model = input("Enter a model for your forenpc (default: llama3.2): ").strip() or "llama3.2"
912
1228
  forenpc_provider = input("Enter a provider for your forenpc (default: ollama): ").strip() or "ollama"
913
- forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
914
- if not os.path.exists(forenpc_path):
915
- with open(forenpc_path, "w") as f:
916
- yaml.dump({
917
- "name": forenpc_name,
918
- "primary_directive": forenpc_directive,
919
- "model": forenpc_model,
920
- "provider": forenpc_provider
921
- }, f)
1229
+
1230
+ with open(os.path.join(team_dir, f"{default_forenpc_name}.npc"), "w") as f:
1231
+ yaml.dump({
1232
+ "name": default_forenpc_name, "primary_directive": forenpc_directive,
1233
+ "model": forenpc_model, "provider": forenpc_provider
1234
+ }, f)
1235
+
922
1236
  ctx_path = os.path.join(team_dir, "team.ctx")
923
- folder_context = input("Enter a short description or context for this project/team (optional): ").strip()
924
- team_ctx = {
925
- "forenpc": forenpc_name,
926
- "model": forenpc_model,
927
- "provider": forenpc_provider,
928
- "api_key": None,
929
- "api_url": None,
1237
+ folder_context = input("Enter a short description for this project/team (optional): ").strip()
1238
+ team_ctx_data = {
1239
+ "forenpc": default_forenpc_name, "model": forenpc_model,
1240
+ "provider": forenpc_provider, "api_key": None, "api_url": None,
930
1241
  "context": folder_context if folder_context else None
931
1242
  }
932
- use_jinxs = input("Do you want to copy jinxs from the global folder to this project (c), or use them from the global folder (g)? [c/g, default: g]: ").strip().lower()
933
- global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
934
- project_jinxs_dir = os.path.join(team_dir, "jinxs")
1243
+ use_jinxs = input("Use global jinxs folder (g) or copy to this project (c)? [g/c, default: g]: ").strip().lower()
935
1244
  if use_jinxs == "c":
1245
+ global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
936
1246
  if os.path.exists(global_jinxs_dir):
937
- shutil.copytree(global_jinxs_dir, project_jinxs_dir, dirs_exist_ok=True)
938
- print(f"Copied jinxs from {global_jinxs_dir} to {project_jinxs_dir}")
939
- else:
940
- print(f"No global jinxs found at {global_jinxs_dir}")
1247
+ shutil.copytree(global_jinxs_dir, os.path.join(team_dir, "jinxs"), dirs_exist_ok=True)
941
1248
  else:
942
- team_ctx["use_global_jinxs"] = True
1249
+ team_ctx_data["use_global_jinxs"] = True
943
1250
 
944
1251
  with open(ctx_path, "w") as f:
945
- yaml.dump(team_ctx, f)
1252
+ yaml.dump(team_ctx_data, f)
946
1253
  elif os.path.exists(global_team_path):
947
1254
  team_dir = global_team_path
948
- forenpc_name = "sibiji"
1255
+ default_forenpc_name = "sibiji"
949
1256
  else:
950
1257
  print("No global npc_team found. Please run 'npcpy init' or create a team first.")
951
1258
  sys.exit(1)
952
1259
 
953
- # --- Load team context if it exists ---
954
- ctx_path = os.path.join(team_dir, "team.ctx")
955
- if os.path.exists(ctx_path):
956
- with open(ctx_path, "r") as f:
957
- team_ctx = yaml.safe_load(f) or team_ctx
1260
+ team_ctx = {}
1261
+ for filename in os.listdir(team_dir):
1262
+ if filename.endswith(".ctx"):
1263
+ try:
1264
+ with open(os.path.join(team_dir, filename), "r") as f:
1265
+ team_ctx = yaml.safe_load(f) or {}
1266
+ break
1267
+ except Exception as e:
1268
+ print(f"Warning: Could not load context file {filename}: {e}")
958
1269
 
959
- # --- Load the forenpc_obj ---
960
- forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
961
- if os.path.exists(forenpc_path):
962
- forenpc_obj = NPC(forenpc_path)
963
- else:
964
- forenpc_obj = None
1270
+ forenpc_name = team_ctx.get("forenpc", default_forenpc_name)
1271
+ print(f"Using forenpc: {forenpc_name}")
965
1272
 
966
- # --- Decide which jinxs directory to use ---
967
1273
  if team_ctx.get("use_global_jinxs", False):
968
1274
  jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
969
1275
  else:
970
1276
  jinxs_dir = os.path.join(team_dir, "jinxs")
971
- from npcpy.npc_compiler import load_jinxs_from_directory
1277
+
972
1278
  jinxs_list = load_jinxs_from_directory(jinxs_dir)
973
1279
  jinxs_dict = {jinx.jinx_name: jinx for jinx in jinxs_list}
974
1280
 
1281
+ forenpc_obj = None
1282
+ forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
1283
+ #print('forenpc_path', forenpc_path)
1284
+ #print('jinx list', jinxs_list)
1285
+ if os.path.exists(forenpc_path):
1286
+
1287
+ forenpc_obj = NPC(file = forenpc_path, jinxs=jinxs_list)
1288
+ else:
1289
+ print(f"Warning: Forenpc file '{forenpc_name}.npc' not found in {team_dir}.")
1290
+
975
1291
  team = Team(team_path=team_dir, forenpc=forenpc_obj, jinxs=jinxs_dict)
976
1292
  return command_history, team, forenpc_obj
977
-
1293
+
978
1294
  def process_result(
979
1295
  user_input: str,
980
1296
  result_state: ShellState,
@@ -1002,6 +1318,7 @@ def process_result(
1002
1318
  if user_input =='/help':
1003
1319
  render_markdown(output)
1004
1320
  elif result_state.stream_output:
1321
+
1005
1322
  try:
1006
1323
  final_output_str = print_and_process_stream_with_markdown(output, result_state.chat_model, result_state.chat_provider)
1007
1324
  except AttributeError as e:
@@ -1009,6 +1326,16 @@ def process_result(
1009
1326
  if len(output) > 0:
1010
1327
  final_output_str = output
1011
1328
  render_markdown(final_output_str)
1329
+ except TypeError as e:
1330
+
1331
+ if isinstance(output, str):
1332
+ if len(output) > 0:
1333
+ final_output_str = output
1334
+ render_markdown(final_output_str)
1335
+ elif isinstance(output, dict):
1336
+ if 'output' in output:
1337
+ final_output_str = output['output']
1338
+ render_markdown(final_output_str)
1012
1339
 
1013
1340
  elif output is not None:
1014
1341
  final_output_str = str(output)
@@ -1042,12 +1369,12 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1042
1369
 
1043
1370
  def exit_shell(state):
1044
1371
  print("\nGoodbye!")
1045
- print('beginning knowledge consolidation')
1046
- try:
1047
- breathe_result = breathe(state.messages, state.chat_model, state.chat_provider, state.npc)
1048
- print(breathe_result)
1049
- except KeyboardInterrupt:
1050
- print("Knowledge consolidation interrupted. Exiting immediately.")
1372
+ #print('beginning knowledge consolidation')
1373
+ #try:
1374
+ # breathe_result = breathe(state.messages, state.chat_model, state.chat_provider, state.npc)
1375
+ # print(breathe_result)
1376
+ #except KeyboardInterrupt:
1377
+ # print("Knowledge consolidation interrupted. Exiting immediately.")
1051
1378
  sys.exit(0)
1052
1379
 
1053
1380
  while True:
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.2
4
- Summary: npcsh is a command-line toolkit for using AI agents.
3
+ Version: 1.0.4
4
+ Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
7
7
  Author-email: info@npcworldwi.de
@@ -402,7 +402,7 @@ Read the docs at [npcsh.readthedocs.io](https://npcsh.readthedocs.io/en/latest/)
402
402
 
403
403
 
404
404
  ## NPC Studio
405
- There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables (soon) at [our website](https://www.npcworldwi.de/npc-studio).
405
+ There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables at [our website](https://enpisi.com/npc-studio).
406
406
 
407
407
 
408
408
  ## Mailing List
@@ -84,7 +84,7 @@ extra_files = package_files("npcpy/npc_team/")
84
84
 
85
85
  setup(
86
86
  name="npcsh",
87
- version="1.0.2",
87
+ version="1.0.4",
88
88
  packages=find_packages(exclude=["tests*"]),
89
89
  install_requires=base_requirements, # Only install base requirements by default
90
90
  extras_require={
@@ -108,7 +108,7 @@ setup(
108
108
  },
109
109
  author="Christopher Agostino",
110
110
  author_email="info@npcworldwi.de",
111
- description="npcsh is a command-line toolkit for using AI agents.",
111
+ description="npcsh is a command-line toolkit for using AI agents in novel ways.",
112
112
  long_description=open("README.md").read(),
113
113
  long_description_content_type="text/markdown",
114
114
  url="https://github.com/NPC-Worldwide/npcsh",
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes