npcsh 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcsh/guac.py CHANGED
@@ -20,7 +20,7 @@ import importlib.util
20
20
  from npcpy.memory.command_history import CommandHistory
21
21
  from npcpy.npc_compiler import Team, NPC
22
22
  from npcpy.llm_funcs import get_llm_response, check_llm_command, execute_llm_command
23
- from npcpy.modes._state import initial_state as npcsh_initial_state
23
+ from npcsh._state import initial_state as npcsh_initial_state
24
24
  from npcpy.npc_sysenv import render_markdown, print_and_process_stream_with_markdown
25
25
 
26
26
  try:
@@ -607,11 +607,30 @@ def execute_guac_command(command: str, state: GuacState) -> Tuple[GuacState, Any
607
607
  state.command_history.add_command(nl_input_for_llm, [history_output], "", state.current_path)
608
608
 
609
609
  elif state.current_mode == "cmd":
610
+ locals_context_string = "Current Python environment variables and functions:\n"
611
+ if state.locals:
612
+ for k, v in state.locals.items():
613
+ if not k.startswith('__'): # Exclude Python built-ins and internal vars
614
+ try:
615
+ # Use repr() for a developer-friendly representation
616
+ value_repr = repr(v)
617
+ # Truncate long representations to prevent context window bloat
618
+ if len(value_repr) > 200:
619
+ value_repr = value_repr[:197] + "..."
620
+ locals_context_string += f"- {k} (type: {type(v).__name__}) = {value_repr}\n"
621
+ except Exception:
622
+ locals_context_string += f"- {k} (type: {type(v).__name__}) = <unrepresentable>\n"
623
+ # Add a clear separator for LLM to distinguish this context
624
+ locals_context_string += "\n--- End of Environment Context ---\n"
625
+ else:
626
+ locals_context_string += "(Environment is empty)\n"
627
+
610
628
  prompt_cmd = (
611
629
  f"User input for Python CMD mode: '{nl_input_for_llm}'.\n"
612
630
  f"Generate ONLY executable Python code required to fulfill this.\n"
613
631
  f"Do not include any explanations, leading markdown like ```python, or any text other than the Python code itself.\n"
614
632
  )
633
+
615
634
  llm_response = get_llm_response(
616
635
  prompt_cmd,
617
636
  model=state.chat_model,
npcsh/npcsh.py CHANGED
@@ -1,4 +1,3 @@
1
- # Standard Library Imports
2
1
  import os
3
2
  import sys
4
3
  import atexit
@@ -28,6 +27,7 @@ except ImportError:
28
27
  import shutil
29
28
 
30
29
  import yaml
30
+
31
31
  # Local Application Imports
32
32
  from npcsh._state import (
33
33
  setup_npcsh_config,
@@ -36,17 +36,17 @@ from npcsh._state import (
36
36
  orange,
37
37
  interactive_commands,
38
38
  BASH_COMMANDS,
39
- log_action
39
+ start_interactive_session,
40
+
40
41
  )
41
42
 
42
43
  from npcpy.npc_sysenv import (
43
44
  print_and_process_stream_with_markdown,
44
45
  render_markdown,
45
46
  get_locally_available_models,
46
- start_interactive_session,
47
47
  get_model_and_provider,
48
48
  )
49
- from npcpy.routes import router
49
+ from npcsh.routes import router
50
50
  from npcpy.data.image import capture_screenshot
51
51
  from npcpy.memory.command_history import (
52
52
  CommandHistory,
@@ -86,7 +86,7 @@ class CommandNotFoundError(Exception):
86
86
  pass
87
87
 
88
88
 
89
- from npcpy.modes._state import initial_state, ShellState
89
+ from npcsh._state import initial_state, ShellState
90
90
 
91
91
  def readline_safe_prompt(prompt: str) -> str:
92
92
  ansi_escape = re.compile(r"(\033\[[0-9;]*[a-zA-Z])")
@@ -547,60 +547,90 @@ def process_pipeline_command(
547
547
  if not cmd_to_process:
548
548
  return state, stdin_input
549
549
 
550
- exec_model = model_override or state.chat_model
551
- exec_provider = provider_override or state.chat_provider
550
+ exec_model = model_override or state.chat_model
551
+ exec_provider = provider_override or state.chat_provider
552
552
 
553
553
  if cmd_to_process.startswith("/"):
554
- #print(cmd_to_process)
555
554
  return execute_slash_command(cmd_to_process, stdin_input, state, stream_final)
556
- else:
557
- try:
558
- cmd_parts = parse_command_safely(cmd_to_process)
559
- if not cmd_parts:
560
- return state, stdin_input
555
+
556
+ try:
557
+ cmd_parts = parse_command_safely(cmd_to_process)
558
+ if not cmd_parts:
559
+ return state, stdin_input
561
560
 
562
- command_name = cmd_parts[0]
561
+ command_name = cmd_parts[0]
563
562
 
563
+ is_unambiguous_bash = (
564
+ command_name in BASH_COMMANDS or
565
+ command_name in interactive_commands or
566
+ command_name == "cd" or
567
+ cmd_to_process.startswith("./")
568
+ )
569
+
570
+ if is_unambiguous_bash:
564
571
  if command_name in interactive_commands:
565
572
  return handle_interactive_command(cmd_parts, state)
566
573
  elif command_name == "cd":
567
574
  return handle_cd_command(cmd_parts, state)
568
575
  else:
569
- try:
570
- bash_state, bash_output = handle_bash_command(cmd_parts, cmd_to_process, stdin_input, state)
571
- return bash_state, bash_output
572
- except CommandNotFoundError:
573
- full_llm_cmd = f"{cmd_to_process} {stdin_input}" if stdin_input else cmd_to_process
574
-
575
- llm_result = check_llm_command(
576
- command = full_llm_cmd,
577
- model = exec_model,
578
- provider = exec_provider,
579
- api_url = state.api_url,
580
- api_key = state.api_key,
581
- npc = state.npc,
582
- team = state.team,
583
- messages = state.messages,
584
- images = state.attachments,
585
- stream = stream_final,
586
- context = None ,
587
- shell = True,
588
-
589
- )
590
- if isinstance(llm_result, dict):
591
- state.messages = llm_result.get("messages", state.messages)
592
- output = llm_result.get("output")
593
- return state, output
594
- else:
595
- return state, llm_result
596
-
597
- except Exception as bash_err:
598
- return state, colored(f"Bash execution failed: {bash_err}", "red")
599
-
600
- except Exception as e:
601
- import traceback
602
- traceback.print_exc()
603
- return state, colored(f"Error processing command '{cmd_segment[:50]}...': {e}", "red")
576
+ return handle_bash_command(cmd_parts, cmd_to_process, stdin_input, state)
577
+ else:
578
+ full_llm_cmd = f"{cmd_to_process} {stdin_input}" if stdin_input else cmd_to_process
579
+
580
+ path_cmd = 'The current working directory is: ' + state.current_path
581
+ ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(state.current_path, f) for f in os.listdir(state.current_path)]) if os.path.exists(state.current_path) else 'No files found in the current directory.'
582
+ platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
583
+ full_llm_cmd = path_cmd + '\n' + ls_files + '\n' + platform_info + '\n' + full_llm_cmd
584
+ llm_result = check_llm_command(
585
+ full_llm_cmd,
586
+ model=exec_model,
587
+ provider=exec_provider,
588
+ api_url=state.api_url,
589
+ api_key=state.api_key,
590
+ npc=state.npc,
591
+ team=state.team,
592
+ messages=state.messages,
593
+ images=state.attachments,
594
+ stream=stream_final,
595
+ context=None,
596
+ shell=True,
597
+ )
598
+ if isinstance(llm_result, dict):
599
+ state.messages = llm_result.get("messages", state.messages)
600
+ output = llm_result.get("output")
601
+ return state, output
602
+ else:
603
+ return state, llm_result
604
+
605
+ except CommandNotFoundError as e:
606
+ print(colored(f"Command not found, falling back to LLM: {e}", "yellow"), file=sys.stderr)
607
+ full_llm_cmd = f"{cmd_to_process} {stdin_input}" if stdin_input else cmd_to_process
608
+ llm_result = check_llm_command(
609
+ full_llm_cmd,
610
+ model=exec_model,
611
+ provider=exec_provider,
612
+ api_url=state.api_url,
613
+ api_key=state.api_key,
614
+ npc=state.npc,
615
+ team=state.team,
616
+ messages=state.messages,
617
+ images=state.attachments,
618
+ stream=stream_final,
619
+ context=None,
620
+ shell=True
621
+ )
622
+ if isinstance(llm_result, dict):
623
+ state.messages = llm_result.get("messages", state.messages)
624
+ output = llm_result.get("output")
625
+ return state, output
626
+ else:
627
+ return state, llm_result
628
+
629
+ except Exception as e:
630
+ import traceback
631
+ traceback.print_exc()
632
+ return state, colored(f"Error processing command '{cmd_segment[:50]}...': {e}", "red")
633
+
604
634
  def check_mode_switch(command:str , state: ShellState):
605
635
  if command in ['/cmd', '/agent', '/chat', '/ride']:
606
636
  state.current_mode = command[1:]
@@ -623,6 +653,7 @@ def execute_command(
623
653
  stdin_for_next = None
624
654
  final_output = None
625
655
  current_state = state
656
+
626
657
  if state.current_mode == 'agent':
627
658
  for i, cmd_segment in enumerate(commands):
628
659
  is_last_command = (i == len(commands) - 1)
@@ -645,12 +676,14 @@ def execute_command(
645
676
  if not stream_this_segment: # If intermediate output is a stream, consume for piping
646
677
  full_stream_output = "".join(map(str, output))
647
678
  stdin_for_next = full_stream_output
648
- if is_last_command: final_output = full_stream_output
679
+ if is_last_command:
680
+ final_output = full_stream_output
649
681
  else: # Final output is a stream, don't consume, can't pipe
650
682
  stdin_for_next = None
651
683
  final_output = output
652
684
  elif output is not None: # Try converting other types to string
653
- try: stdin_for_next = str(output)
685
+ try:
686
+ stdin_for_next = str(output)
654
687
  except Exception:
655
688
  print(f"Warning: Cannot convert output to string for piping: {type(output)}", file=sys.stderr)
656
689
  stdin_for_next = None
@@ -758,91 +791,377 @@ def execute_command(
758
791
 
759
792
  # Otherwise, run the agentic ride loop
760
793
  return agentic_ride_loop(command, state)
794
+ @dataclass
795
+ class RideState:
796
+ """Lightweight state tracking for /ride mode"""
797
+ todos: List[Dict[str, Any]] = field(default_factory=list)
798
+ constraints: List[str] = field(default_factory=list)
799
+ facts: List[str] = field(default_factory=list)
800
+ mistakes: List[str] = field(default_factory=list)
801
+ successes: List[str] = field(default_factory=list)
802
+ current_todo_index: int = 0
803
+ current_subtodo_index: int = 0
804
+
805
+ def get_context_summary(self) -> str:
806
+ """Generate lightweight context for LLM prompts"""
807
+ context = []
808
+ if self.facts:
809
+ context.append(f"Facts: {'; '.join(self.facts[:5])}") # Limit to 5 most recent
810
+ if self.mistakes:
811
+ context.append(f"Recent mistakes: {'; '.join(self.mistakes[-3:])}")
812
+ if self.successes:
813
+ context.append(f"Recent successes: {'; '.join(self.successes[-3:])}")
814
+ return "\n".join(context)
815
+
816
+ def interactive_edit_list(items: List[str], item_type: str) -> List[str]:
817
+ """Interactive REPL for editing lists of items with regeneration options"""
818
+ while True:
819
+ print(f"\nCurrent {item_type}:")
820
+ for i, item in enumerate(items, 1):
821
+ print(f"{i}. {item}")
822
+
823
+ choice = input(f"\nEdit {item_type} (e<num> to edit, d<num> to delete, a to add, r to regenerate, c to add context, ok to continue): ").strip()
824
+
825
+ if choice.lower() == 'ok':
826
+ break
827
+ elif choice.lower() == 'r':
828
+ print("Regenerating list...")
829
+ return "REGENERATE" # Special signal to regenerate
830
+ elif choice.lower() == 'c':
831
+ additional_context = input("Add more context: ").strip()
832
+ if additional_context:
833
+ return {"ADD_CONTEXT": additional_context, "items": items}
834
+ elif choice.lower() == 'a':
835
+ new_item = input(f"Enter new {item_type[:-1]}: ").strip()
836
+ if new_item:
837
+ items.append(new_item)
838
+ elif choice.lower().startswith('e'):
839
+ try:
840
+ idx = int(choice[1:]) - 1
841
+ if 0 <= idx < len(items):
842
+ print(f"Current: {items[idx]}")
843
+ new_item = input("New version: ").strip()
844
+ if new_item:
845
+ items[idx] = new_item
846
+ except ValueError:
847
+ print("Invalid format. Use e<number>")
848
+ elif choice.lower().startswith('d'):
849
+ try:
850
+ idx = int(choice[1:]) - 1
851
+ if 0 <= idx < len(items):
852
+ items.pop(idx)
853
+ except ValueError:
854
+ print("Invalid format. Use d<number>")
855
+ else:
856
+ print("Invalid choice. Use: e<num>, d<num>, a, r (regenerate), c (add context), or ok")
857
+
858
+ return items
859
+ def generate_todos(user_goal: str, state: ShellState, additional_context: str = "") -> List[Dict[str, Any]]:
860
+ """Generate high-level todos for the user's goal"""
861
+ path_cmd = 'The current working directory is: ' + state.current_path
862
+ ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(state.current_path, f) for f in os.listdir(state.current_path)]) if os.path.exists(state.current_path) else 'No files found in the current directory.'
863
+ platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
864
+ info = path_cmd + '\n' + ls_files + '\n' + platform_info
761
865
 
762
866
 
763
- def agentic_ride_loop(user_goal: str, state: ShellState) -> tuple:
867
+
868
+ high_level_planning_instruction = """
869
+ You are a high-level project planner. When a user asks to work on a file or code,
870
+ structure your plan using a simple, high-level software development lifecycle:
871
+ 1. First, understand the current state (e.g., read the relevant file).
872
+ 2. Second, make the required changes based on the user's goal.
873
+ 3. Third, verify the changes work as intended (e.g., test the code).
874
+ Your generated todos should reflect this high-level thinking.
875
+
876
+
877
+
764
878
  """
765
- /ride mode: orchestrate via team, then LLM suggests 3 next steps, user picks or provides alternative input
766
- repeat until quit.
879
+
880
+ prompt = f"""
881
+ {high_level_planning_instruction}
767
882
 
883
+ User goal: {user_goal}
884
+
885
+ {additional_context}
886
+
887
+ Generate a list of 3 todos to accomplish this goal. Use specific actionable language based on the user request.
888
+ Do not make assumptions about user needs.
889
+ Every todo must be directly sourced from the user's request.
890
+ If users request specific files to be incorporated, you MUST include the full path to the file in the todo.
891
+ Here is some relevant information for the current folder and working directory that may be relevant:
892
+ {info}
893
+
894
+ For example, if the user says "I need to add a new function to calculate the average of a list of numbers my research.py script" and the current working directory is /home/user/projects and one
895
+ of the available files in the current directory is /home/user/projects/research.py then one of the todos should be:
896
+ - "Add a new function to /home/user/projects/research.py to calculate the average of a list of numbers"
897
+ Do not truncate paths. Do not additional paths. Use them exactly as they are provided here.
898
+
899
+ Each todo should be:
900
+ - Specific and actionable
901
+ - Independent where possible
902
+ - Focused on a single major component
903
+
904
+ Remember, it is critical to provide as much relevant information as possible. Even if the user only refers to a file or something by a relative path, it is
905
+ critical for operation that you provide the full path to the file in the todo item.
906
+
907
+ Return JSON with format:
908
+ {{
909
+ "todos": [
910
+ todo1, todo2, todo3,
911
+ ]
912
+ }}
768
913
  """
769
- if not hasattr(state, "team") or state.team is None:
770
- raise ValueError("No team found in shell state for orchestration.")
914
+
915
+ response = get_llm_response(
916
+ prompt,
917
+ model=state.chat_model,
918
+ provider=state.chat_provider,
919
+ npc=state.npc,
920
+ format="json"
921
+ )
922
+
923
+ todos_data = response.get("response", {}).get("todos", [])
924
+ return todos_data
771
925
 
772
- request = user_goal
773
- all_results = []
774
926
 
775
- while True:
776
- # 1. Orchestrate the current request
777
- result = state.team.orchestrate(request)
778
- all_results.append(result)
779
- render_markdown("# Orchestration Result")
780
- render_markdown(f"- Request: {request}")
781
- render_markdown(f"- Final response: {result.get('output')}")
782
-
783
- render_markdown('- Summary: '+result['debrief']['summary'])
784
- recommendations = result['debrief']['recommendations']
785
- render_markdown(f'- Recommendations: {recommendations}')
786
-
787
-
788
- # 2. Ask LLM for three next possible steps
789
- suggestion_prompt = f"""
790
- Given the following user goal and orchestration result, suggest three new
791
- avenues to go down that are related but distinct from the original goal and from each other.
792
-
793
- Be concise. Each step should be a single actionable instruction or question.
794
-
795
- User goal: {user_goal}
796
- Orchestration result: {result}
797
-
798
- Return a JSON object with a "steps" key, whose value is a list of three strings, each string being a next step.
799
- Return only the JSON object.
800
- """
801
- suggestions = get_llm_response(
802
- suggestion_prompt,
803
- model=state.chat_model,
804
- provider=state.chat_provider,
805
- api_url=state.api_url,
806
- api_key=state.api_key,
807
- npc=state.npc,
808
- format="json"
809
- )
810
- # No custom parsing: just use the parsed output
811
- steps = suggestions.get("response", {}).get("steps", [])
812
- if not steps or len(steps) < 1:
813
- print("No further steps suggested by LLM. Exiting.")
814
- break
815
-
816
- print("\nNext possible steps:")
817
- for idx, step in enumerate(steps, 1):
818
- print(f"{idx}. {step}")
927
+ def generate_constraints(todos: List[Dict[str, Any]], user_goal: str, state: ShellState) -> List[str]:
928
+ """Generate constraints and requirements that define relationships between todos"""
929
+ prompt = f"""
930
+ User goal: {user_goal}
931
+
932
+ Todos to accomplish:
933
+ {chr(10).join([f"- {todo}" for todo in todos])}
934
+
935
+ Based ONLY on what the user explicitly stated in their goal, identify any constraints or requirements they mentioned.
936
+ Do NOT invent new constraints. Only extract constraints that are directly stated or clearly implied by the user's request.
937
+
938
+ Examples of valid constraints:
939
+ - If user says "without breaking existing functionality" -> "Maintain existing functionality"
940
+ - If user says "must be fast" -> "Performance must be optimized"
941
+ - If user says "should integrate with X" -> "Must integrate with X"
942
+
943
+ If the user didn't specify any constraints, return an empty list.
944
+
945
+ Return JSON with format:
946
+ {{
947
+ "constraints": ["constraint 1", "constraint 2", ...]
948
+ }}
949
+ """
950
+
951
+ response = get_llm_response(
952
+ prompt,
953
+ model=state.chat_model,
954
+ provider=state.chat_provider,
955
+ npc=state.npc,
956
+ format="json"
957
+ )
958
+
959
+ constraints_data = response.get("response", {})
960
+
961
+ if isinstance(constraints_data, dict):
962
+ constraints = constraints_data.get("constraints", [])
963
+ # Make sure we're getting strings, not dicts
964
+ cleaned_constraints = []
965
+ for c in constraints:
966
+ if isinstance(c, str):
967
+ cleaned_constraints.append(c)
968
+ return cleaned_constraints
969
+ else:
970
+ return []
971
+ def should_break_down_todo(todo, state: ShellState) -> bool:
972
+ """Ask LLM if a todo needs breakdown, then ask user for confirmation"""
973
+ prompt = f"""
974
+ Todo: {todo}
975
+
976
+
977
+ Does this todo need to be broken down into smaller, more atomic components?
978
+ Consider:
979
+ - Is it complex enough to warrant breakdown?
980
+ - Would breaking it down make execution clearer?
981
+ - Are there multiple distinct steps involved?
982
+
983
+ Return JSON: {{"should_break_down": true/false, "reason": "explanation"}}
984
+ """
985
+
986
+ response = get_llm_response(
987
+ prompt,
988
+ model=state.chat_model,
989
+ provider=state.chat_provider,
990
+ npc=state.npc,
991
+ format="json"
992
+ )
993
+
994
+ result = response.get("response", {})
995
+ llm_suggests = result.get("should_break_down", False)
996
+ reason = result.get("reason", "No reason provided")
997
+
998
+ if llm_suggests:
999
+ print(f"\nLLM suggests breaking down: '{todo}'")
1000
+ print(f"Reason: {reason}")
1001
+ user_choice = input("Break it down? [y/N]: ").strip().lower()
1002
+ return user_choice in ['y', 'yes']
1003
+
1004
+ return False
819
1005
 
820
- user_input = input("\nChoose next step (1/2/3) or q to quit: ").strip().lower()
821
- if user_input in ("q", "quit", "exit"):
822
- print("Exiting /ride agentic loop.")
823
- break
824
- try:
1006
+ def generate_subtodos(todo, state: ShellState) -> List[Dict[str, Any]]:
1007
+ """Generate atomic subtodos for a complex todo"""
1008
+ prompt = f"""
1009
+ Parent todo: {todo}
1010
+
1011
+ Break this down into atomic, executable subtodos. Each subtodo should be:
1012
+ - A single, concrete action
1013
+ - Executable in one step
1014
+ - Clear and unambiguous
1015
+
1016
+ Return JSON with format:
1017
+ {{
1018
+ "subtodos": [
1019
+ "subtodo description",
1020
+ ...
1021
+ ]
1022
+ }}
1023
+ """
1024
+
1025
+ response = get_llm_response(
1026
+ prompt,
1027
+ model=state.chat_model,
1028
+ provider=state.chat_provider,
1029
+ npc=state.npc,
1030
+ format="json"
1031
+ )
1032
+
1033
+ return response.get("response", {}).get("subtodos", [])
1034
+ def execute_todo_item(todo: Dict[str, Any], ride_state: RideState, shell_state: ShellState) -> bool:
1035
+ """Execute a single todo item using the existing jinx system"""
1036
+ path_cmd = 'The current working directory is: ' + shell_state.current_path
1037
+ ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(shell_state.current_path, f) for f in os.listdir(shell_state.current_path)]) if os.path.exists(shell_state.current_path) else 'No files found in the current directory.'
1038
+ platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
1039
+ info = path_cmd + '\n' + ls_files + '\n' + platform_info
1040
+
1041
+ command = f"""
1042
+
1043
+ General information:
1044
+ {info}
1045
+
1046
+ Execute this todo: {todo}
1047
+
1048
+ Constraints to follow:
1049
+ {chr(10).join([f"- {c}" for c in ride_state.constraints])}
1050
+ """
1051
+
1052
+ print(f"\nExecuting: {todo}")
1053
+
825
1054
 
826
- choice = int(user_input)
827
- if 1 <= choice <= len(steps):
828
- request = f"""
829
- My initial goal was: {user_goal}
830
- The orchestration result was: {result.get('output')}
831
- I have chosen to pursue the next step: {steps[choice - 1]}
832
- Now work on this next problem.
833
- """
834
- else:
835
- print("Invalid choice, please enter 1, 2, 3, or q.")
836
- continue
837
- except Exception:
838
- # assume it is natural language input from the user on what to do next, not a number,
839
-
840
- request = user_input
841
- print("Invalid input, please enter 1, 2, 3, or q.")
842
- continue
1055
+ result = check_llm_command(
1056
+ command,
1057
+ model=shell_state.chat_model,
1058
+ provider=shell_state.chat_provider,
1059
+ npc=shell_state.npc,
1060
+ team=shell_state.team,
1061
+ messages=[],
1062
+ stream=shell_state.stream_output,
1063
+ shell=True,
1064
+ )
1065
+
1066
+ output_payload = result.get("output", "")
1067
+ output_str = ""
1068
+
1069
+ if isgenerator(output_payload):
1070
+ output_str = print_and_process_stream_with_markdown(output_payload, shell_state.chat_model, shell_state.chat_provider)
1071
+ elif isinstance(output_payload, dict):
1072
+ output_str = output_payload.get('output', str(output_payload))
1073
+ if 'output' in output_str:
1074
+ output_str = output_payload['output']
1075
+ elif 'response' in output_str:
1076
+ output_str = output_payload['response']
1077
+ render_markdown(output_str)
1078
+ elif output_payload:
1079
+ output_str = str(output_payload)
1080
+ render_markdown(output_str)
1081
+
1082
+ user_feedback = input(f"\nTodo completed successfully? [y/N/notes]: ").strip()
1083
+
1084
+ if user_feedback.lower() in ['y', 'yes']:
1085
+ return True, output_str
1086
+ elif user_feedback.lower() in ['n', 'no']:
1087
+ mistake = input("What went wrong? ").strip()
1088
+ ride_state.mistakes.append(f"Failed {todo}: {mistake}")
1089
+ return False, output_str
1090
+ else:
1091
+ ride_state.facts.append(f"Re: {todo}: {user_feedback}")
1092
+ success = input("Mark as completed? [y/N]: ").strip().lower() in ['y', 'yes']
1093
+ return success, output_str
843
1094
 
844
- return state, all_results
1095
+ def agentic_ride_loop(user_goal: str, state: ShellState) -> tuple:
1096
+ """
1097
+ New /ride mode: hierarchical planning with human-in-the-loop control
1098
+ """
1099
+ ride_state = RideState()
1100
+
1101
+ # 1. Generate high-level todos
1102
+ print("🚀 Generating high-level todos...")
1103
+ todos = generate_todos(user_goal, state)
1104
+
1105
+ # 2. User reviews/edits todos
1106
+ print("\n📋 Review and edit todos:")
1107
+ todo_descriptions = [todo for todo in todos]
1108
+ edited_descriptions = interactive_edit_list(todo_descriptions, "todos")
1109
+
845
1110
 
1111
+ ride_state.todos = edited_descriptions
1112
+
1113
+ # 3. Generate constraints
1114
+ print("\n🔒 Generating constraints...")
1115
+ constraints = generate_constraints(edited_descriptions, user_goal, state)
1116
+
1117
+ # 4. User reviews/edits constraints
1118
+ print("\n📐 Review and edit constraints:")
1119
+ edited_constraints = interactive_edit_list(constraints, "constraints")
1120
+ ride_state.constraints = edited_constraints
1121
+
1122
+ # 5. Execution loop
1123
+ print("\n⚡ Starting execution...")
1124
+
1125
+ for i, todo in enumerate(edited_descriptions):
1126
+ print(f"\n--- Todo {i+1}/{len(todos)}: {todo} ---")
1127
+
1128
+ def attempt_execution(current_todo):
1129
+ # This inner function handles the execution and retry logic
1130
+ success, output_str = execute_todo_item(current_todo, ride_state, state)
1131
+ if not success:
1132
+ retry = input("Retry this todo? [y/N]: ").strip().lower()
1133
+ if retry in ['y', 'yes']:
1134
+ success, output_str = execute_todo_item(current_todo, ride_state, state)
1135
+ return success, output_str
1136
+
1137
+ if should_break_down_todo(todo, state):
1138
+ print("Breaking down todo...")
1139
+ subtodos = generate_subtodos(todo, state)
1140
+ subtodo_descriptions = [st for st in subtodos]
1141
+ edited_subtodos = interactive_edit_list(subtodo_descriptions, "subtodos")
1142
+
1143
+ for j, subtodo_desc in enumerate(edited_subtodos):
1144
+ subtodo = {"description": subtodo_desc, "type": "atomic"}
1145
+ success, output = attempt_execution(subtodo)
1146
+ if success:
1147
+ ride_state.successes.append({"description": subtodo_desc, "output": output})
1148
+ else:
1149
+ print("Subtodo failed. Continuing to next...")
1150
+ else:
1151
+ success, output = attempt_execution(todo)
1152
+ if success:
1153
+ ride_state.successes.append({"description": todo, "output": output})
1154
+ # 6. Final summary
1155
+ print("\n🎯 Execution Summary:")
1156
+ print(f"Successes: {len(ride_state.successes)}")
1157
+ print(f"Mistakes: {len(ride_state.mistakes)}")
1158
+ print(f"Facts learned: {len(ride_state.facts)}")
1159
+
1160
+ return state, {
1161
+ "todos_completed": len(ride_state.successes),
1162
+ "ride_state": ride_state,
1163
+ "final_context": ride_state.get_context_summary()
1164
+ }
846
1165
  # --- Main Application Logic ---
847
1166
 
848
1167
  def check_deprecation_warnings():
@@ -856,12 +1175,12 @@ def print_welcome_message():
856
1175
  print(
857
1176
  """
858
1177
  Welcome to \033[1;94mnpc\033[0m\033[1;38;5;202msh\033[0m!
859
- \033[1;94m \033[0m\033[1;38;5;202m \\\\
860
- \033[1;94m _ __ _ __ ___ \033[0m\033[1;38;5;202m ___ | |___ \\\\
861
- \033[1;94m| '_ \ | '_ \ / __|\033[0m\033[1;38;5;202m/ __/ | |_ _| \\\\
862
- \033[1;94m| | | || |_) |( |__ \033[0m\033[1;38;5;202m\_ \ | | | | //
863
- \033[1;94m|_| |_|| .__/ \___|\033[0m\033[1;38;5;202m|___/ |_| |_| //
864
- \033[1;94m| | \033[0m\033[1;38;5;202m //
1178
+ \033[1;94m \033[0m\033[1;38;5;202m \\\\
1179
+ \033[1;94m _ __ _ __ ___ \033[0m\033[1;38;5;202m ___ | |___ \\\\
1180
+ \033[1;94m| '_ \\ | ' \\ / __|\033[0m\033[1;38;5;202m / __/ | |_ _| \\\\
1181
+ \033[1;94m| | | || |_) |( |__ \033[0m\033[1;38;5;202m \\_ \\ | | | | //
1182
+ \033[1;94m|_| |_|| .__/ \\___|\033[0m\033[1;38;5;202m |___/ |_| |_| //
1183
+ \033[1;94m| | \033[0m\033[1;38;5;202m //
865
1184
  \033[1;94m| |
866
1185
  \033[1;94m|_|
867
1186
 
@@ -870,7 +1189,6 @@ Begin by asking a question, issuing a bash command, or typing '/help' for more i
870
1189
  """
871
1190
  )
872
1191
 
873
-
874
1192
  def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
875
1193
  check_deprecation_warnings()
876
1194
  setup_npcsh_config()
@@ -891,89 +1209,87 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
891
1209
  project_team_path = os.path.abspath(PROJECT_NPC_TEAM_PATH)
892
1210
  global_team_path = os.path.expanduser(DEFAULT_NPC_TEAM_PATH)
893
1211
  team_dir = None
894
- forenpc_obj = None
895
- team_ctx = {}
1212
+ default_forenpc_name = None
896
1213
 
897
- # --- Always prefer local/project team first ---
898
1214
  if os.path.exists(project_team_path):
899
1215
  team_dir = project_team_path
900
- forenpc_name = "forenpc"
1216
+ default_forenpc_name = "forenpc"
901
1217
  else:
902
1218
  resp = input(f"No npc_team found in {os.getcwd()}. Create a new team here? [Y/n]: ").strip().lower()
903
1219
  if resp in ("", "y", "yes"):
904
1220
  team_dir = project_team_path
905
1221
  os.makedirs(team_dir, exist_ok=True)
906
- forenpc_name = "forenpc"
1222
+ default_forenpc_name = "forenpc"
907
1223
  forenpc_directive = input(
908
- f"Enter a primary directive for {forenpc_name} (default: 'You are the forenpc of the team, coordinating activities between NPCs on the team, verifying that results from NPCs are high quality and can help to adequately answer user requests.'): "
1224
+ f"Enter a primary directive for {default_forenpc_name} (default: 'You are the forenpc of the team...'): "
909
1225
  ).strip() or "You are the forenpc of the team, coordinating activities between NPCs on the team, verifying that results from NPCs are high quality and can help to adequately answer user requests."
910
1226
  forenpc_model = input("Enter a model for your forenpc (default: llama3.2): ").strip() or "llama3.2"
911
1227
  forenpc_provider = input("Enter a provider for your forenpc (default: ollama): ").strip() or "ollama"
912
- forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
913
- if not os.path.exists(forenpc_path):
914
- with open(forenpc_path, "w") as f:
915
- yaml.dump({
916
- "name": forenpc_name,
917
- "primary_directive": forenpc_directive,
918
- "model": forenpc_model,
919
- "provider": forenpc_provider
920
- }, f)
1228
+
1229
+ with open(os.path.join(team_dir, f"{default_forenpc_name}.npc"), "w") as f:
1230
+ yaml.dump({
1231
+ "name": default_forenpc_name, "primary_directive": forenpc_directive,
1232
+ "model": forenpc_model, "provider": forenpc_provider
1233
+ }, f)
1234
+
921
1235
  ctx_path = os.path.join(team_dir, "team.ctx")
922
- folder_context = input("Enter a short description or context for this project/team (optional): ").strip()
923
- team_ctx = {
924
- "forenpc": forenpc_name,
925
- "model": forenpc_model,
926
- "provider": forenpc_provider,
927
- "api_key": None,
928
- "api_url": None,
1236
+ folder_context = input("Enter a short description for this project/team (optional): ").strip()
1237
+ team_ctx_data = {
1238
+ "forenpc": default_forenpc_name, "model": forenpc_model,
1239
+ "provider": forenpc_provider, "api_key": None, "api_url": None,
929
1240
  "context": folder_context if folder_context else None
930
1241
  }
931
- use_jinxs = input("Do you want to copy jinxs from the global folder to this project (c), or use them from the global folder (g)? [c/g, default: g]: ").strip().lower()
932
- global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
933
- project_jinxs_dir = os.path.join(team_dir, "jinxs")
1242
+ use_jinxs = input("Use global jinxs folder (g) or copy to this project (c)? [g/c, default: g]: ").strip().lower()
934
1243
  if use_jinxs == "c":
1244
+ global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
935
1245
  if os.path.exists(global_jinxs_dir):
936
- shutil.copytree(global_jinxs_dir, project_jinxs_dir, dirs_exist_ok=True)
937
- print(f"Copied jinxs from {global_jinxs_dir} to {project_jinxs_dir}")
938
- else:
939
- print(f"No global jinxs found at {global_jinxs_dir}")
1246
+ shutil.copytree(global_jinxs_dir, os.path.join(team_dir, "jinxs"), dirs_exist_ok=True)
940
1247
  else:
941
- team_ctx["use_global_jinxs"] = True
1248
+ team_ctx_data["use_global_jinxs"] = True
942
1249
 
943
1250
  with open(ctx_path, "w") as f:
944
- yaml.dump(team_ctx, f)
1251
+ yaml.dump(team_ctx_data, f)
945
1252
  elif os.path.exists(global_team_path):
946
1253
  team_dir = global_team_path
947
- forenpc_name = "sibiji"
1254
+ default_forenpc_name = "sibiji"
948
1255
  else:
949
1256
  print("No global npc_team found. Please run 'npcpy init' or create a team first.")
950
1257
  sys.exit(1)
951
1258
 
952
- # --- Load team context if it exists ---
953
- ctx_path = os.path.join(team_dir, "team.ctx")
954
- if os.path.exists(ctx_path):
955
- with open(ctx_path, "r") as f:
956
- team_ctx = yaml.safe_load(f) or team_ctx
1259
+ team_ctx = {}
1260
+ for filename in os.listdir(team_dir):
1261
+ if filename.endswith(".ctx"):
1262
+ try:
1263
+ with open(os.path.join(team_dir, filename), "r") as f:
1264
+ team_ctx = yaml.safe_load(f) or {}
1265
+ break
1266
+ except Exception as e:
1267
+ print(f"Warning: Could not load context file {filename}: {e}")
957
1268
 
958
- # --- Load the forenpc_obj ---
959
- forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
960
- if os.path.exists(forenpc_path):
961
- forenpc_obj = NPC(forenpc_path)
962
- else:
963
- forenpc_obj = None
1269
+ forenpc_name = team_ctx.get("forenpc", default_forenpc_name)
1270
+ print(f"Using forenpc: {forenpc_name}")
964
1271
 
965
- # --- Decide which jinxs directory to use ---
966
1272
  if team_ctx.get("use_global_jinxs", False):
967
1273
  jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
968
1274
  else:
969
1275
  jinxs_dir = os.path.join(team_dir, "jinxs")
970
- from npcpy.npc_compiler import load_jinxs_from_directory
1276
+
971
1277
  jinxs_list = load_jinxs_from_directory(jinxs_dir)
972
1278
  jinxs_dict = {jinx.jinx_name: jinx for jinx in jinxs_list}
973
1279
 
1280
+ forenpc_obj = None
1281
+ forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
1282
+ #print('forenpc_path', forenpc_path)
1283
+ #print('jinx list', jinxs_list)
1284
+ if os.path.exists(forenpc_path):
1285
+
1286
+ forenpc_obj = NPC(file = forenpc_path, jinxs=jinxs_list)
1287
+ else:
1288
+ print(f"Warning: Forenpc file '{forenpc_name}.npc' not found in {team_dir}.")
1289
+
974
1290
  team = Team(team_path=team_dir, forenpc=forenpc_obj, jinxs=jinxs_dict)
975
1291
  return command_history, team, forenpc_obj
976
-
1292
+
977
1293
  def process_result(
978
1294
  user_input: str,
979
1295
  result_state: ShellState,
@@ -1001,6 +1317,7 @@ def process_result(
1001
1317
  if user_input =='/help':
1002
1318
  render_markdown(output)
1003
1319
  elif result_state.stream_output:
1320
+
1004
1321
  try:
1005
1322
  final_output_str = print_and_process_stream_with_markdown(output, result_state.chat_model, result_state.chat_provider)
1006
1323
  except AttributeError as e:
@@ -1008,6 +1325,16 @@ def process_result(
1008
1325
  if len(output) > 0:
1009
1326
  final_output_str = output
1010
1327
  render_markdown(final_output_str)
1328
+ except TypeError as e:
1329
+
1330
+ if isinstance(output, str):
1331
+ if len(output) > 0:
1332
+ final_output_str = output
1333
+ render_markdown(final_output_str)
1334
+ elif isinstance(output, dict):
1335
+ if 'output' in output:
1336
+ final_output_str = output['output']
1337
+ render_markdown(final_output_str)
1011
1338
 
1012
1339
  elif output is not None:
1013
1340
  final_output_str = str(output)
@@ -1041,12 +1368,12 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1041
1368
 
1042
1369
  def exit_shell(state):
1043
1370
  print("\nGoodbye!")
1044
- print('beginning knowledge consolidation')
1045
- try:
1046
- breathe_result = breathe(state.messages, state.chat_model, state.chat_provider, state.npc)
1047
- print(breathe_result)
1048
- except KeyboardInterrupt:
1049
- print("Knowledge consolidation interrupted. Exiting immediately.")
1371
+ #print('beginning knowledge consolidation')
1372
+ #try:
1373
+ # breathe_result = breathe(state.messages, state.chat_model, state.chat_provider, state.npc)
1374
+ # print(breathe_result)
1375
+ #except KeyboardInterrupt:
1376
+ # print("Knowledge consolidation interrupted. Exiting immediately.")
1050
1377
  sys.exit(0)
1051
1378
 
1052
1379
  while True:
npcsh/routes.py CHANGED
@@ -10,8 +10,7 @@ from datetime import datetime
10
10
  from sqlalchemy import create_engine
11
11
  import logging
12
12
 
13
- from npcpy.npc_sysenv import (
14
- render_code_block, render_markdown,
13
+ from npcsh._state import (
15
14
  NPCSH_VISION_MODEL, NPCSH_VISION_PROVIDER, NPCSH_API_URL,
16
15
  NPCSH_CHAT_MODEL, NPCSH_CHAT_PROVIDER, NPCSH_STREAM_OUTPUT,
17
16
  NPCSH_IMAGE_GEN_MODEL, NPCSH_IMAGE_GEN_PROVIDER,
@@ -41,14 +40,15 @@ from npcpy.memory.command_history import CommandHistory
41
40
 
42
41
  from npcpy.memory.knowledge_graph import breathe
43
42
  from npcpy.memory.sleep import sleep, forget
43
+ from npcpy.serve import start_flask_server
44
44
 
45
- from npcpy.modes.guac import enter_guac_mode
46
- from npcpy.modes.plonk import execute_plonk_command
47
- from npcpy.modes.serve import start_flask_server
48
- from npcpy.modes.alicanto import alicanto
49
- from npcpy.modes.spool import enter_spool_mode
50
- from npcpy.modes.wander import enter_wander_mode
51
- from npcpy.modes.yap import enter_yap_mode
45
+
46
+ from npcsh.guac import enter_guac_mode
47
+ from npcsh.plonk import execute_plonk_command
48
+ from npcsh.alicanto import alicanto
49
+ from npcsh.spool import enter_spool_mode
50
+ from npcsh.wander import enter_wander_mode
51
+ from npcsh.yap import enter_yap_mode
52
52
 
53
53
 
54
54
 
npcsh/spool.py CHANGED
@@ -7,10 +7,13 @@ import os
7
7
  from npcpy.npc_sysenv import (
8
8
  print_and_process_stream_with_markdown,
9
9
  )
10
+ from npcpy.npc_sysenv import (
11
+ get_system_message,
12
+ render_markdown,
13
+
14
+ )
10
15
  from npcsh._state import (
11
16
  orange,
12
- get_system_message,
13
- render_markdown,
14
17
  NPCSH_VISION_MODEL,
15
18
  NPCSH_VISION_PROVIDER,
16
19
  NPCSH_CHAT_MODEL,
@@ -21,7 +24,7 @@ from npcpy.llm_funcs import (get_llm_response,)
21
24
 
22
25
  from npcpy.npc_compiler import NPC
23
26
  from typing import Any, List, Dict, Union
24
- from npcpy.modes.yap import enter_yap_mode
27
+ from npcsh.yap import enter_yap_mode
25
28
 
26
29
 
27
30
  def enter_spool_mode(
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.1
4
- Summary: npcsh is a command-line toolkit for using AI agents.
3
+ Version: 1.0.3
4
+ Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
7
7
  Author-email: info@npcworldwi.de
@@ -402,7 +402,7 @@ Read the docs at [npcsh.readthedocs.io](https://npcsh.readthedocs.io/en/latest/)
402
402
 
403
403
 
404
404
  ## NPC Studio
405
- There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables (soon) at [our website](https://www.npcworldwi.de/npc-studio).
405
+ There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables at [our website](https://enpisi.com/npc-studio).
406
406
 
407
407
 
408
408
  ## Mailing List
@@ -1,21 +1,21 @@
1
1
  npcsh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  npcsh/_state.py,sha256=ScwAYt01ZCksd3fuFBVa_VmgFzd3lR5Pg3q__sFmJQc,27450
3
3
  npcsh/alicanto.py,sha256=zJF5YwSNvtbK2EUKXzG45WOCMsSFu5cek5jCR7FgiuE,44709
4
- npcsh/guac.py,sha256=qvuaJWP1RKnYIf6s7_VIbKdwM7R--EtOdUbN_kjioLQ,32029
4
+ npcsh/guac.py,sha256=Ocmk_c4NUtGsC3JOtmkbgLvD6u-XtBPRFRYcckpgUJU,33099
5
5
  npcsh/mcp_helpers.py,sha256=Ktd2yXuBnLL2P7OMalgGLj84PXJSzaucjqmJVvWx6HA,12723
6
6
  npcsh/mcp_npcsh.py,sha256=SfmplH62GS9iI6q4vuQLVUS6tkrok6L7JxODx_iH7ps,36158
7
7
  npcsh/mcp_server.py,sha256=l2Ra0lpFrUu334pvp0Q9ajF2n73KvZswFi0FgbDhh9k,5884
8
8
  npcsh/npc.py,sha256=JEP0nqbqRGvAthj9uT0ZfbGc322g3Ge3rRDKbpIdI1s,7907
9
- npcsh/npcsh.py,sha256=37E4SJTwokiL0V8XoI3KOaF_1vj-oK9SPmvBFuLHrXg,44661
9
+ npcsh/npcsh.py,sha256=brkRTACeuKOWWUooLLVMLXk3NzqqajhY038z-ax1UZ8,57437
10
10
  npcsh/plonk.py,sha256=U2e9yUJZN95Girzzvgrh-40zOdl5zO3AHPsIjoyLv2M,15261
11
11
  npcsh/pti.py,sha256=jGHGE5SeIcDkV8WlOEHCKQCnYAL4IPS-kUBHrUz0oDA,10019
12
- npcsh/routes.py,sha256=7OUsGhoxD_IPcYMlhkOFEUmFz4ESOwvOR0bY4niXe3w,37293
13
- npcsh/spool.py,sha256=k5L2My_nFkyVdgovOcxyijy2r1tq0N9Gv58m1Qec09k,11461
12
+ npcsh/routes.py,sha256=ufQVc6aqgC14_YHV88iwV53TN1Pk095NB6gFDqQqfB4,37208
13
+ npcsh/spool.py,sha256=GhnSFX9uAtrB4m_ijuyA5tufH12DrWdABw0z8FmiCHc,11497
14
14
  npcsh/wander.py,sha256=BiN6eYyFnEsFzo8MFLRkdZ8xS9sTKkQpjiCcy9chMcc,23225
15
15
  npcsh/yap.py,sha256=h5KNt9sNOrDPhGe_zfn_yFIeQhizX09zocjcPWH7m3k,20905
16
- npcsh-1.0.1.dist-info/licenses/LICENSE,sha256=IKBvAECHP-aCiJtE4cHGCE5Yl0tozYz02PomGeWS3y4,1070
17
- npcsh-1.0.1.dist-info/METADATA,sha256=vAD4k3ZVEXAoz21Icu1X2NiJP72cFieOam9lAO-sMDM,22747
18
- npcsh-1.0.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
- npcsh-1.0.1.dist-info/entry_points.txt,sha256=qlhb6CZt5rJug9ByxZTvS5fO9_aFPBtV-J2sh6GU7GQ,214
20
- npcsh-1.0.1.dist-info/top_level.txt,sha256=kHSNgKMCkfjV95-DH0YSp1LLBi0HXdF3w57j7MQON3E,6
21
- npcsh-1.0.1.dist-info/RECORD,,
16
+ npcsh-1.0.3.dist-info/licenses/LICENSE,sha256=IKBvAECHP-aCiJtE4cHGCE5Yl0tozYz02PomGeWS3y4,1070
17
+ npcsh-1.0.3.dist-info/METADATA,sha256=9XxVGZ81JI7bDibWnSq4Wjc4XoecuuEuEUopSVhVh0w,22747
18
+ npcsh-1.0.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ npcsh-1.0.3.dist-info/entry_points.txt,sha256=qxOYTm3ym3JWyWf2nv2Mk71uMcJIdUoNEJ8VYMkyHiY,214
20
+ npcsh-1.0.3.dist-info/top_level.txt,sha256=kHSNgKMCkfjV95-DH0YSp1LLBi0HXdF3w57j7MQON3E,6
21
+ npcsh-1.0.3.dist-info/RECORD,,
@@ -0,0 +1,9 @@
1
+ [console_scripts]
2
+ guac = npcsh.guac:main
3
+ npc = npcsh.npc:main
4
+ npcsh = npcsh.npcsh:main
5
+ npcsh-mcp = npcsh.mcp_npcsh:main
6
+ pti = npcsh.pti:main
7
+ spool = npcsh.spool:main
8
+ wander = npcsh.wander:main
9
+ yap = npcsh.yap:main
@@ -1,9 +0,0 @@
1
- [console_scripts]
2
- guac = npcpy.guac:main
3
- npc = npcpy.npc:main
4
- npcsh = npcpy.npcsh:main
5
- npcsh-mcp = npcpy.mcp_npcsh:main
6
- pti = npcpy.pti:main
7
- spool = npcpy.spool:main
8
- wander = npcpy.wander:main
9
- yap = npcpy.yap:main
File without changes