npcsh 1.0.2__tar.gz → 1.0.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcsh-1.0.2 → npcsh-1.0.3}/PKG-INFO +3 -3
- {npcsh-1.0.2 → npcsh-1.0.3}/README.md +1 -1
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/guac.py +19 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/npcsh.py +506 -180
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh.egg-info/PKG-INFO +3 -3
- {npcsh-1.0.2 → npcsh-1.0.3}/setup.py +2 -2
- {npcsh-1.0.2 → npcsh-1.0.3}/LICENSE +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/__init__.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/_state.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/alicanto.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/mcp_helpers.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/mcp_npcsh.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/mcp_server.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/npc.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/plonk.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/pti.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/routes.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/spool.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/wander.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh/yap.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh.egg-info/SOURCES.txt +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh.egg-info/dependency_links.txt +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh.egg-info/entry_points.txt +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh.egg-info/requires.txt +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/npcsh.egg-info/top_level.txt +0 -0
- {npcsh-1.0.2 → npcsh-1.0.3}/setup.cfg +0 -0
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: npcsh
|
|
3
|
-
Version: 1.0.
|
|
4
|
-
Summary: npcsh is a command-line toolkit for using AI agents.
|
|
3
|
+
Version: 1.0.3
|
|
4
|
+
Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
|
|
5
5
|
Home-page: https://github.com/NPC-Worldwide/npcsh
|
|
6
6
|
Author: Christopher Agostino
|
|
7
7
|
Author-email: info@npcworldwi.de
|
|
@@ -402,7 +402,7 @@ Read the docs at [npcsh.readthedocs.io](https://npcsh.readthedocs.io/en/latest/)
|
|
|
402
402
|
|
|
403
403
|
|
|
404
404
|
## NPC Studio
|
|
405
|
-
There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables
|
|
405
|
+
There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables at [our website](https://enpisi.com/npc-studio).
|
|
406
406
|
|
|
407
407
|
|
|
408
408
|
## Mailing List
|
|
@@ -307,7 +307,7 @@ Read the docs at [npcsh.readthedocs.io](https://npcsh.readthedocs.io/en/latest/)
|
|
|
307
307
|
|
|
308
308
|
|
|
309
309
|
## NPC Studio
|
|
310
|
-
There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables
|
|
310
|
+
There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables at [our website](https://enpisi.com/npc-studio).
|
|
311
311
|
|
|
312
312
|
|
|
313
313
|
## Mailing List
|
|
@@ -607,11 +607,30 @@ def execute_guac_command(command: str, state: GuacState) -> Tuple[GuacState, Any
|
|
|
607
607
|
state.command_history.add_command(nl_input_for_llm, [history_output], "", state.current_path)
|
|
608
608
|
|
|
609
609
|
elif state.current_mode == "cmd":
|
|
610
|
+
locals_context_string = "Current Python environment variables and functions:\n"
|
|
611
|
+
if state.locals:
|
|
612
|
+
for k, v in state.locals.items():
|
|
613
|
+
if not k.startswith('__'): # Exclude Python built-ins and internal vars
|
|
614
|
+
try:
|
|
615
|
+
# Use repr() for a developer-friendly representation
|
|
616
|
+
value_repr = repr(v)
|
|
617
|
+
# Truncate long representations to prevent context window bloat
|
|
618
|
+
if len(value_repr) > 200:
|
|
619
|
+
value_repr = value_repr[:197] + "..."
|
|
620
|
+
locals_context_string += f"- {k} (type: {type(v).__name__}) = {value_repr}\n"
|
|
621
|
+
except Exception:
|
|
622
|
+
locals_context_string += f"- {k} (type: {type(v).__name__}) = <unrepresentable>\n"
|
|
623
|
+
# Add a clear separator for LLM to distinguish this context
|
|
624
|
+
locals_context_string += "\n--- End of Environment Context ---\n"
|
|
625
|
+
else:
|
|
626
|
+
locals_context_string += "(Environment is empty)\n"
|
|
627
|
+
|
|
610
628
|
prompt_cmd = (
|
|
611
629
|
f"User input for Python CMD mode: '{nl_input_for_llm}'.\n"
|
|
612
630
|
f"Generate ONLY executable Python code required to fulfill this.\n"
|
|
613
631
|
f"Do not include any explanations, leading markdown like ```python, or any text other than the Python code itself.\n"
|
|
614
632
|
)
|
|
633
|
+
|
|
615
634
|
llm_response = get_llm_response(
|
|
616
635
|
prompt_cmd,
|
|
617
636
|
model=state.chat_model,
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# Standard Library Imports
|
|
2
1
|
import os
|
|
3
2
|
import sys
|
|
4
3
|
import atexit
|
|
@@ -548,60 +547,90 @@ def process_pipeline_command(
|
|
|
548
547
|
if not cmd_to_process:
|
|
549
548
|
return state, stdin_input
|
|
550
549
|
|
|
551
|
-
exec_model = model_override or state.chat_model
|
|
552
|
-
exec_provider = provider_override or state.chat_provider
|
|
550
|
+
exec_model = model_override or state.chat_model
|
|
551
|
+
exec_provider = provider_override or state.chat_provider
|
|
553
552
|
|
|
554
553
|
if cmd_to_process.startswith("/"):
|
|
555
|
-
#print(cmd_to_process)
|
|
556
554
|
return execute_slash_command(cmd_to_process, stdin_input, state, stream_final)
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
555
|
+
|
|
556
|
+
try:
|
|
557
|
+
cmd_parts = parse_command_safely(cmd_to_process)
|
|
558
|
+
if not cmd_parts:
|
|
559
|
+
return state, stdin_input
|
|
562
560
|
|
|
563
|
-
|
|
561
|
+
command_name = cmd_parts[0]
|
|
564
562
|
|
|
563
|
+
is_unambiguous_bash = (
|
|
564
|
+
command_name in BASH_COMMANDS or
|
|
565
|
+
command_name in interactive_commands or
|
|
566
|
+
command_name == "cd" or
|
|
567
|
+
cmd_to_process.startswith("./")
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
if is_unambiguous_bash:
|
|
565
571
|
if command_name in interactive_commands:
|
|
566
572
|
return handle_interactive_command(cmd_parts, state)
|
|
567
573
|
elif command_name == "cd":
|
|
568
574
|
return handle_cd_command(cmd_parts, state)
|
|
569
575
|
else:
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
576
|
+
return handle_bash_command(cmd_parts, cmd_to_process, stdin_input, state)
|
|
577
|
+
else:
|
|
578
|
+
full_llm_cmd = f"{cmd_to_process} {stdin_input}" if stdin_input else cmd_to_process
|
|
579
|
+
|
|
580
|
+
path_cmd = 'The current working directory is: ' + state.current_path
|
|
581
|
+
ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(state.current_path, f) for f in os.listdir(state.current_path)]) if os.path.exists(state.current_path) else 'No files found in the current directory.'
|
|
582
|
+
platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
|
|
583
|
+
full_llm_cmd = path_cmd + '\n' + ls_files + '\n' + platform_info + '\n' + full_llm_cmd
|
|
584
|
+
llm_result = check_llm_command(
|
|
585
|
+
full_llm_cmd,
|
|
586
|
+
model=exec_model,
|
|
587
|
+
provider=exec_provider,
|
|
588
|
+
api_url=state.api_url,
|
|
589
|
+
api_key=state.api_key,
|
|
590
|
+
npc=state.npc,
|
|
591
|
+
team=state.team,
|
|
592
|
+
messages=state.messages,
|
|
593
|
+
images=state.attachments,
|
|
594
|
+
stream=stream_final,
|
|
595
|
+
context=None,
|
|
596
|
+
shell=True,
|
|
597
|
+
)
|
|
598
|
+
if isinstance(llm_result, dict):
|
|
599
|
+
state.messages = llm_result.get("messages", state.messages)
|
|
600
|
+
output = llm_result.get("output")
|
|
601
|
+
return state, output
|
|
602
|
+
else:
|
|
603
|
+
return state, llm_result
|
|
604
|
+
|
|
605
|
+
except CommandNotFoundError as e:
|
|
606
|
+
print(colored(f"Command not found, falling back to LLM: {e}", "yellow"), file=sys.stderr)
|
|
607
|
+
full_llm_cmd = f"{cmd_to_process} {stdin_input}" if stdin_input else cmd_to_process
|
|
608
|
+
llm_result = check_llm_command(
|
|
609
|
+
full_llm_cmd,
|
|
610
|
+
model=exec_model,
|
|
611
|
+
provider=exec_provider,
|
|
612
|
+
api_url=state.api_url,
|
|
613
|
+
api_key=state.api_key,
|
|
614
|
+
npc=state.npc,
|
|
615
|
+
team=state.team,
|
|
616
|
+
messages=state.messages,
|
|
617
|
+
images=state.attachments,
|
|
618
|
+
stream=stream_final,
|
|
619
|
+
context=None,
|
|
620
|
+
shell=True
|
|
621
|
+
)
|
|
622
|
+
if isinstance(llm_result, dict):
|
|
623
|
+
state.messages = llm_result.get("messages", state.messages)
|
|
624
|
+
output = llm_result.get("output")
|
|
625
|
+
return state, output
|
|
626
|
+
else:
|
|
627
|
+
return state, llm_result
|
|
628
|
+
|
|
629
|
+
except Exception as e:
|
|
630
|
+
import traceback
|
|
631
|
+
traceback.print_exc()
|
|
632
|
+
return state, colored(f"Error processing command '{cmd_segment[:50]}...': {e}", "red")
|
|
633
|
+
|
|
605
634
|
def check_mode_switch(command:str , state: ShellState):
|
|
606
635
|
if command in ['/cmd', '/agent', '/chat', '/ride']:
|
|
607
636
|
state.current_mode = command[1:]
|
|
@@ -624,6 +653,7 @@ def execute_command(
|
|
|
624
653
|
stdin_for_next = None
|
|
625
654
|
final_output = None
|
|
626
655
|
current_state = state
|
|
656
|
+
|
|
627
657
|
if state.current_mode == 'agent':
|
|
628
658
|
for i, cmd_segment in enumerate(commands):
|
|
629
659
|
is_last_command = (i == len(commands) - 1)
|
|
@@ -646,12 +676,14 @@ def execute_command(
|
|
|
646
676
|
if not stream_this_segment: # If intermediate output is a stream, consume for piping
|
|
647
677
|
full_stream_output = "".join(map(str, output))
|
|
648
678
|
stdin_for_next = full_stream_output
|
|
649
|
-
if is_last_command:
|
|
679
|
+
if is_last_command:
|
|
680
|
+
final_output = full_stream_output
|
|
650
681
|
else: # Final output is a stream, don't consume, can't pipe
|
|
651
682
|
stdin_for_next = None
|
|
652
683
|
final_output = output
|
|
653
684
|
elif output is not None: # Try converting other types to string
|
|
654
|
-
try:
|
|
685
|
+
try:
|
|
686
|
+
stdin_for_next = str(output)
|
|
655
687
|
except Exception:
|
|
656
688
|
print(f"Warning: Cannot convert output to string for piping: {type(output)}", file=sys.stderr)
|
|
657
689
|
stdin_for_next = None
|
|
@@ -759,91 +791,377 @@ def execute_command(
|
|
|
759
791
|
|
|
760
792
|
# Otherwise, run the agentic ride loop
|
|
761
793
|
return agentic_ride_loop(command, state)
|
|
794
|
+
@dataclass
|
|
795
|
+
class RideState:
|
|
796
|
+
"""Lightweight state tracking for /ride mode"""
|
|
797
|
+
todos: List[Dict[str, Any]] = field(default_factory=list)
|
|
798
|
+
constraints: List[str] = field(default_factory=list)
|
|
799
|
+
facts: List[str] = field(default_factory=list)
|
|
800
|
+
mistakes: List[str] = field(default_factory=list)
|
|
801
|
+
successes: List[str] = field(default_factory=list)
|
|
802
|
+
current_todo_index: int = 0
|
|
803
|
+
current_subtodo_index: int = 0
|
|
804
|
+
|
|
805
|
+
def get_context_summary(self) -> str:
|
|
806
|
+
"""Generate lightweight context for LLM prompts"""
|
|
807
|
+
context = []
|
|
808
|
+
if self.facts:
|
|
809
|
+
context.append(f"Facts: {'; '.join(self.facts[:5])}") # Limit to 5 most recent
|
|
810
|
+
if self.mistakes:
|
|
811
|
+
context.append(f"Recent mistakes: {'; '.join(self.mistakes[-3:])}")
|
|
812
|
+
if self.successes:
|
|
813
|
+
context.append(f"Recent successes: {'; '.join(self.successes[-3:])}")
|
|
814
|
+
return "\n".join(context)
|
|
815
|
+
|
|
816
|
+
def interactive_edit_list(items: List[str], item_type: str) -> List[str]:
|
|
817
|
+
"""Interactive REPL for editing lists of items with regeneration options"""
|
|
818
|
+
while True:
|
|
819
|
+
print(f"\nCurrent {item_type}:")
|
|
820
|
+
for i, item in enumerate(items, 1):
|
|
821
|
+
print(f"{i}. {item}")
|
|
822
|
+
|
|
823
|
+
choice = input(f"\nEdit {item_type} (e<num> to edit, d<num> to delete, a to add, r to regenerate, c to add context, ok to continue): ").strip()
|
|
824
|
+
|
|
825
|
+
if choice.lower() == 'ok':
|
|
826
|
+
break
|
|
827
|
+
elif choice.lower() == 'r':
|
|
828
|
+
print("Regenerating list...")
|
|
829
|
+
return "REGENERATE" # Special signal to regenerate
|
|
830
|
+
elif choice.lower() == 'c':
|
|
831
|
+
additional_context = input("Add more context: ").strip()
|
|
832
|
+
if additional_context:
|
|
833
|
+
return {"ADD_CONTEXT": additional_context, "items": items}
|
|
834
|
+
elif choice.lower() == 'a':
|
|
835
|
+
new_item = input(f"Enter new {item_type[:-1]}: ").strip()
|
|
836
|
+
if new_item:
|
|
837
|
+
items.append(new_item)
|
|
838
|
+
elif choice.lower().startswith('e'):
|
|
839
|
+
try:
|
|
840
|
+
idx = int(choice[1:]) - 1
|
|
841
|
+
if 0 <= idx < len(items):
|
|
842
|
+
print(f"Current: {items[idx]}")
|
|
843
|
+
new_item = input("New version: ").strip()
|
|
844
|
+
if new_item:
|
|
845
|
+
items[idx] = new_item
|
|
846
|
+
except ValueError:
|
|
847
|
+
print("Invalid format. Use e<number>")
|
|
848
|
+
elif choice.lower().startswith('d'):
|
|
849
|
+
try:
|
|
850
|
+
idx = int(choice[1:]) - 1
|
|
851
|
+
if 0 <= idx < len(items):
|
|
852
|
+
items.pop(idx)
|
|
853
|
+
except ValueError:
|
|
854
|
+
print("Invalid format. Use d<number>")
|
|
855
|
+
else:
|
|
856
|
+
print("Invalid choice. Use: e<num>, d<num>, a, r (regenerate), c (add context), or ok")
|
|
857
|
+
|
|
858
|
+
return items
|
|
859
|
+
def generate_todos(user_goal: str, state: ShellState, additional_context: str = "") -> List[Dict[str, Any]]:
|
|
860
|
+
"""Generate high-level todos for the user's goal"""
|
|
861
|
+
path_cmd = 'The current working directory is: ' + state.current_path
|
|
862
|
+
ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(state.current_path, f) for f in os.listdir(state.current_path)]) if os.path.exists(state.current_path) else 'No files found in the current directory.'
|
|
863
|
+
platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
|
|
864
|
+
info = path_cmd + '\n' + ls_files + '\n' + platform_info
|
|
762
865
|
|
|
763
866
|
|
|
764
|
-
|
|
765
|
-
"""
|
|
766
|
-
|
|
767
|
-
|
|
867
|
+
|
|
868
|
+
high_level_planning_instruction = """
|
|
869
|
+
You are a high-level project planner. When a user asks to work on a file or code,
|
|
870
|
+
structure your plan using a simple, high-level software development lifecycle:
|
|
871
|
+
1. First, understand the current state (e.g., read the relevant file).
|
|
872
|
+
2. Second, make the required changes based on the user's goal.
|
|
873
|
+
3. Third, verify the changes work as intended (e.g., test the code).
|
|
874
|
+
Your generated todos should reflect this high-level thinking.
|
|
768
875
|
|
|
876
|
+
|
|
877
|
+
|
|
769
878
|
"""
|
|
770
|
-
|
|
771
|
-
|
|
879
|
+
|
|
880
|
+
prompt = f"""
|
|
881
|
+
{high_level_planning_instruction}
|
|
772
882
|
|
|
773
|
-
|
|
774
|
-
|
|
883
|
+
User goal: {user_goal}
|
|
884
|
+
|
|
885
|
+
{additional_context}
|
|
886
|
+
|
|
887
|
+
Generate a list of 3 todos to accomplish this goal. Use specific actionable language based on the user request.
|
|
888
|
+
Do not make assumptions about user needs.
|
|
889
|
+
Every todo must be directly sourced from the user's request.
|
|
890
|
+
If users request specific files to be incorporated, you MUST include the full path to the file in the todo.
|
|
891
|
+
Here is some relevant information for the current folder and working directory that may be relevant:
|
|
892
|
+
{info}
|
|
893
|
+
|
|
894
|
+
For example, if the user says "I need to add a new function to calculate the average of a list of numbers my research.py script" and the current working directory is /home/user/projects and one
|
|
895
|
+
of the available files in the current directory is /home/user/projects/research.py then one of the todos should be:
|
|
896
|
+
- "Add a new function to /home/user/projects/research.py to calculate the average of a list of numbers"
|
|
897
|
+
Do not truncate paths. Do not additional paths. Use them exactly as they are provided here.
|
|
898
|
+
|
|
899
|
+
Each todo should be:
|
|
900
|
+
- Specific and actionable
|
|
901
|
+
- Independent where possible
|
|
902
|
+
- Focused on a single major component
|
|
903
|
+
|
|
904
|
+
Remember, it is critical to provide as much relevant information as possible. Even if the user only refers to a file or something by a relative path, it is
|
|
905
|
+
critical for operation that you provide the full path to the file in the todo item.
|
|
906
|
+
|
|
907
|
+
Return JSON with format:
|
|
908
|
+
{{
|
|
909
|
+
"todos": [
|
|
910
|
+
todo1, todo2, todo3,
|
|
911
|
+
]
|
|
912
|
+
}}
|
|
913
|
+
"""
|
|
914
|
+
|
|
915
|
+
response = get_llm_response(
|
|
916
|
+
prompt,
|
|
917
|
+
model=state.chat_model,
|
|
918
|
+
provider=state.chat_provider,
|
|
919
|
+
npc=state.npc,
|
|
920
|
+
format="json"
|
|
921
|
+
)
|
|
922
|
+
|
|
923
|
+
todos_data = response.get("response", {}).get("todos", [])
|
|
924
|
+
return todos_data
|
|
775
925
|
|
|
776
|
-
while True:
|
|
777
|
-
# 1. Orchestrate the current request
|
|
778
|
-
result = state.team.orchestrate(request)
|
|
779
|
-
all_results.append(result)
|
|
780
|
-
render_markdown("# Orchestration Result")
|
|
781
|
-
render_markdown(f"- Request: {request}")
|
|
782
|
-
render_markdown(f"- Final response: {result.get('output')}")
|
|
783
|
-
|
|
784
|
-
render_markdown('- Summary: '+result['debrief']['summary'])
|
|
785
|
-
recommendations = result['debrief']['recommendations']
|
|
786
|
-
render_markdown(f'- Recommendations: {recommendations}')
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
# 2. Ask LLM for three next possible steps
|
|
790
|
-
suggestion_prompt = f"""
|
|
791
|
-
Given the following user goal and orchestration result, suggest three new
|
|
792
|
-
avenues to go down that are related but distinct from the original goal and from each other.
|
|
793
|
-
|
|
794
|
-
Be concise. Each step should be a single actionable instruction or question.
|
|
795
|
-
|
|
796
|
-
User goal: {user_goal}
|
|
797
|
-
Orchestration result: {result}
|
|
798
|
-
|
|
799
|
-
Return a JSON object with a "steps" key, whose value is a list of three strings, each string being a next step.
|
|
800
|
-
Return only the JSON object.
|
|
801
|
-
"""
|
|
802
|
-
suggestions = get_llm_response(
|
|
803
|
-
suggestion_prompt,
|
|
804
|
-
model=state.chat_model,
|
|
805
|
-
provider=state.chat_provider,
|
|
806
|
-
api_url=state.api_url,
|
|
807
|
-
api_key=state.api_key,
|
|
808
|
-
npc=state.npc,
|
|
809
|
-
format="json"
|
|
810
|
-
)
|
|
811
|
-
# No custom parsing: just use the parsed output
|
|
812
|
-
steps = suggestions.get("response", {}).get("steps", [])
|
|
813
|
-
if not steps or len(steps) < 1:
|
|
814
|
-
print("No further steps suggested by LLM. Exiting.")
|
|
815
|
-
break
|
|
816
926
|
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
927
|
+
def generate_constraints(todos: List[Dict[str, Any]], user_goal: str, state: ShellState) -> List[str]:
|
|
928
|
+
"""Generate constraints and requirements that define relationships between todos"""
|
|
929
|
+
prompt = f"""
|
|
930
|
+
User goal: {user_goal}
|
|
931
|
+
|
|
932
|
+
Todos to accomplish:
|
|
933
|
+
{chr(10).join([f"- {todo}" for todo in todos])}
|
|
934
|
+
|
|
935
|
+
Based ONLY on what the user explicitly stated in their goal, identify any constraints or requirements they mentioned.
|
|
936
|
+
Do NOT invent new constraints. Only extract constraints that are directly stated or clearly implied by the user's request.
|
|
937
|
+
|
|
938
|
+
Examples of valid constraints:
|
|
939
|
+
- If user says "without breaking existing functionality" -> "Maintain existing functionality"
|
|
940
|
+
- If user says "must be fast" -> "Performance must be optimized"
|
|
941
|
+
- If user says "should integrate with X" -> "Must integrate with X"
|
|
942
|
+
|
|
943
|
+
If the user didn't specify any constraints, return an empty list.
|
|
944
|
+
|
|
945
|
+
Return JSON with format:
|
|
946
|
+
{{
|
|
947
|
+
"constraints": ["constraint 1", "constraint 2", ...]
|
|
948
|
+
}}
|
|
949
|
+
"""
|
|
950
|
+
|
|
951
|
+
response = get_llm_response(
|
|
952
|
+
prompt,
|
|
953
|
+
model=state.chat_model,
|
|
954
|
+
provider=state.chat_provider,
|
|
955
|
+
npc=state.npc,
|
|
956
|
+
format="json"
|
|
957
|
+
)
|
|
958
|
+
|
|
959
|
+
constraints_data = response.get("response", {})
|
|
960
|
+
|
|
961
|
+
if isinstance(constraints_data, dict):
|
|
962
|
+
constraints = constraints_data.get("constraints", [])
|
|
963
|
+
# Make sure we're getting strings, not dicts
|
|
964
|
+
cleaned_constraints = []
|
|
965
|
+
for c in constraints:
|
|
966
|
+
if isinstance(c, str):
|
|
967
|
+
cleaned_constraints.append(c)
|
|
968
|
+
return cleaned_constraints
|
|
969
|
+
else:
|
|
970
|
+
return []
|
|
971
|
+
def should_break_down_todo(todo, state: ShellState) -> bool:
|
|
972
|
+
"""Ask LLM if a todo needs breakdown, then ask user for confirmation"""
|
|
973
|
+
prompt = f"""
|
|
974
|
+
Todo: {todo}
|
|
975
|
+
|
|
976
|
+
|
|
977
|
+
Does this todo need to be broken down into smaller, more atomic components?
|
|
978
|
+
Consider:
|
|
979
|
+
- Is it complex enough to warrant breakdown?
|
|
980
|
+
- Would breaking it down make execution clearer?
|
|
981
|
+
- Are there multiple distinct steps involved?
|
|
982
|
+
|
|
983
|
+
Return JSON: {{"should_break_down": true/false, "reason": "explanation"}}
|
|
984
|
+
"""
|
|
985
|
+
|
|
986
|
+
response = get_llm_response(
|
|
987
|
+
prompt,
|
|
988
|
+
model=state.chat_model,
|
|
989
|
+
provider=state.chat_provider,
|
|
990
|
+
npc=state.npc,
|
|
991
|
+
format="json"
|
|
992
|
+
)
|
|
993
|
+
|
|
994
|
+
result = response.get("response", {})
|
|
995
|
+
llm_suggests = result.get("should_break_down", False)
|
|
996
|
+
reason = result.get("reason", "No reason provided")
|
|
997
|
+
|
|
998
|
+
if llm_suggests:
|
|
999
|
+
print(f"\nLLM suggests breaking down: '{todo}'")
|
|
1000
|
+
print(f"Reason: {reason}")
|
|
1001
|
+
user_choice = input("Break it down? [y/N]: ").strip().lower()
|
|
1002
|
+
return user_choice in ['y', 'yes']
|
|
1003
|
+
|
|
1004
|
+
return False
|
|
820
1005
|
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
1006
|
+
def generate_subtodos(todo, state: ShellState) -> List[Dict[str, Any]]:
|
|
1007
|
+
"""Generate atomic subtodos for a complex todo"""
|
|
1008
|
+
prompt = f"""
|
|
1009
|
+
Parent todo: {todo}
|
|
1010
|
+
|
|
1011
|
+
Break this down into atomic, executable subtodos. Each subtodo should be:
|
|
1012
|
+
- A single, concrete action
|
|
1013
|
+
- Executable in one step
|
|
1014
|
+
- Clear and unambiguous
|
|
1015
|
+
|
|
1016
|
+
Return JSON with format:
|
|
1017
|
+
{{
|
|
1018
|
+
"subtodos": [
|
|
1019
|
+
"subtodo description",
|
|
1020
|
+
...
|
|
1021
|
+
]
|
|
1022
|
+
}}
|
|
1023
|
+
"""
|
|
1024
|
+
|
|
1025
|
+
response = get_llm_response(
|
|
1026
|
+
prompt,
|
|
1027
|
+
model=state.chat_model,
|
|
1028
|
+
provider=state.chat_provider,
|
|
1029
|
+
npc=state.npc,
|
|
1030
|
+
format="json"
|
|
1031
|
+
)
|
|
1032
|
+
|
|
1033
|
+
return response.get("response", {}).get("subtodos", [])
|
|
1034
|
+
def execute_todo_item(todo: Dict[str, Any], ride_state: RideState, shell_state: ShellState) -> bool:
|
|
1035
|
+
"""Execute a single todo item using the existing jinx system"""
|
|
1036
|
+
path_cmd = 'The current working directory is: ' + shell_state.current_path
|
|
1037
|
+
ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(shell_state.current_path, f) for f in os.listdir(shell_state.current_path)]) if os.path.exists(shell_state.current_path) else 'No files found in the current directory.'
|
|
1038
|
+
platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
|
|
1039
|
+
info = path_cmd + '\n' + ls_files + '\n' + platform_info
|
|
1040
|
+
|
|
1041
|
+
command = f"""
|
|
1042
|
+
|
|
1043
|
+
General information:
|
|
1044
|
+
{info}
|
|
1045
|
+
|
|
1046
|
+
Execute this todo: {todo}
|
|
1047
|
+
|
|
1048
|
+
Constraints to follow:
|
|
1049
|
+
{chr(10).join([f"- {c}" for c in ride_state.constraints])}
|
|
1050
|
+
"""
|
|
1051
|
+
|
|
1052
|
+
print(f"\nExecuting: {todo}")
|
|
1053
|
+
|
|
826
1054
|
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
1055
|
+
result = check_llm_command(
|
|
1056
|
+
command,
|
|
1057
|
+
model=shell_state.chat_model,
|
|
1058
|
+
provider=shell_state.chat_provider,
|
|
1059
|
+
npc=shell_state.npc,
|
|
1060
|
+
team=shell_state.team,
|
|
1061
|
+
messages=[],
|
|
1062
|
+
stream=shell_state.stream_output,
|
|
1063
|
+
shell=True,
|
|
1064
|
+
)
|
|
1065
|
+
|
|
1066
|
+
output_payload = result.get("output", "")
|
|
1067
|
+
output_str = ""
|
|
1068
|
+
|
|
1069
|
+
if isgenerator(output_payload):
|
|
1070
|
+
output_str = print_and_process_stream_with_markdown(output_payload, shell_state.chat_model, shell_state.chat_provider)
|
|
1071
|
+
elif isinstance(output_payload, dict):
|
|
1072
|
+
output_str = output_payload.get('output', str(output_payload))
|
|
1073
|
+
if 'output' in output_str:
|
|
1074
|
+
output_str = output_payload['output']
|
|
1075
|
+
elif 'response' in output_str:
|
|
1076
|
+
output_str = output_payload['response']
|
|
1077
|
+
render_markdown(output_str)
|
|
1078
|
+
elif output_payload:
|
|
1079
|
+
output_str = str(output_payload)
|
|
1080
|
+
render_markdown(output_str)
|
|
1081
|
+
|
|
1082
|
+
user_feedback = input(f"\nTodo completed successfully? [y/N/notes]: ").strip()
|
|
1083
|
+
|
|
1084
|
+
if user_feedback.lower() in ['y', 'yes']:
|
|
1085
|
+
return True, output_str
|
|
1086
|
+
elif user_feedback.lower() in ['n', 'no']:
|
|
1087
|
+
mistake = input("What went wrong? ").strip()
|
|
1088
|
+
ride_state.mistakes.append(f"Failed {todo}: {mistake}")
|
|
1089
|
+
return False, output_str
|
|
1090
|
+
else:
|
|
1091
|
+
ride_state.facts.append(f"Re: {todo}: {user_feedback}")
|
|
1092
|
+
success = input("Mark as completed? [y/N]: ").strip().lower() in ['y', 'yes']
|
|
1093
|
+
return success, output_str
|
|
844
1094
|
|
|
845
|
-
|
|
1095
|
+
def agentic_ride_loop(user_goal: str, state: ShellState) -> tuple:
|
|
1096
|
+
"""
|
|
1097
|
+
New /ride mode: hierarchical planning with human-in-the-loop control
|
|
1098
|
+
"""
|
|
1099
|
+
ride_state = RideState()
|
|
1100
|
+
|
|
1101
|
+
# 1. Generate high-level todos
|
|
1102
|
+
print("🚀 Generating high-level todos...")
|
|
1103
|
+
todos = generate_todos(user_goal, state)
|
|
1104
|
+
|
|
1105
|
+
# 2. User reviews/edits todos
|
|
1106
|
+
print("\n📋 Review and edit todos:")
|
|
1107
|
+
todo_descriptions = [todo for todo in todos]
|
|
1108
|
+
edited_descriptions = interactive_edit_list(todo_descriptions, "todos")
|
|
1109
|
+
|
|
846
1110
|
|
|
1111
|
+
ride_state.todos = edited_descriptions
|
|
1112
|
+
|
|
1113
|
+
# 3. Generate constraints
|
|
1114
|
+
print("\n🔒 Generating constraints...")
|
|
1115
|
+
constraints = generate_constraints(edited_descriptions, user_goal, state)
|
|
1116
|
+
|
|
1117
|
+
# 4. User reviews/edits constraints
|
|
1118
|
+
print("\n📐 Review and edit constraints:")
|
|
1119
|
+
edited_constraints = interactive_edit_list(constraints, "constraints")
|
|
1120
|
+
ride_state.constraints = edited_constraints
|
|
1121
|
+
|
|
1122
|
+
# 5. Execution loop
|
|
1123
|
+
print("\n⚡ Starting execution...")
|
|
1124
|
+
|
|
1125
|
+
for i, todo in enumerate(edited_descriptions):
|
|
1126
|
+
print(f"\n--- Todo {i+1}/{len(todos)}: {todo} ---")
|
|
1127
|
+
|
|
1128
|
+
def attempt_execution(current_todo):
|
|
1129
|
+
# This inner function handles the execution and retry logic
|
|
1130
|
+
success, output_str = execute_todo_item(current_todo, ride_state, state)
|
|
1131
|
+
if not success:
|
|
1132
|
+
retry = input("Retry this todo? [y/N]: ").strip().lower()
|
|
1133
|
+
if retry in ['y', 'yes']:
|
|
1134
|
+
success, output_str = execute_todo_item(current_todo, ride_state, state)
|
|
1135
|
+
return success, output_str
|
|
1136
|
+
|
|
1137
|
+
if should_break_down_todo(todo, state):
|
|
1138
|
+
print("Breaking down todo...")
|
|
1139
|
+
subtodos = generate_subtodos(todo, state)
|
|
1140
|
+
subtodo_descriptions = [st for st in subtodos]
|
|
1141
|
+
edited_subtodos = interactive_edit_list(subtodo_descriptions, "subtodos")
|
|
1142
|
+
|
|
1143
|
+
for j, subtodo_desc in enumerate(edited_subtodos):
|
|
1144
|
+
subtodo = {"description": subtodo_desc, "type": "atomic"}
|
|
1145
|
+
success, output = attempt_execution(subtodo)
|
|
1146
|
+
if success:
|
|
1147
|
+
ride_state.successes.append({"description": subtodo_desc, "output": output})
|
|
1148
|
+
else:
|
|
1149
|
+
print("Subtodo failed. Continuing to next...")
|
|
1150
|
+
else:
|
|
1151
|
+
success, output = attempt_execution(todo)
|
|
1152
|
+
if success:
|
|
1153
|
+
ride_state.successes.append({"description": todo, "output": output})
|
|
1154
|
+
# 6. Final summary
|
|
1155
|
+
print("\n🎯 Execution Summary:")
|
|
1156
|
+
print(f"Successes: {len(ride_state.successes)}")
|
|
1157
|
+
print(f"Mistakes: {len(ride_state.mistakes)}")
|
|
1158
|
+
print(f"Facts learned: {len(ride_state.facts)}")
|
|
1159
|
+
|
|
1160
|
+
return state, {
|
|
1161
|
+
"todos_completed": len(ride_state.successes),
|
|
1162
|
+
"ride_state": ride_state,
|
|
1163
|
+
"final_context": ride_state.get_context_summary()
|
|
1164
|
+
}
|
|
847
1165
|
# --- Main Application Logic ---
|
|
848
1166
|
|
|
849
1167
|
def check_deprecation_warnings():
|
|
@@ -857,12 +1175,12 @@ def print_welcome_message():
|
|
|
857
1175
|
print(
|
|
858
1176
|
"""
|
|
859
1177
|
Welcome to \033[1;94mnpc\033[0m\033[1;38;5;202msh\033[0m!
|
|
860
|
-
\033[1;94m \033[0m\033[1;38;5;202m
|
|
861
|
-
\033[1;94m _ __ _ __ ___ \033[0m\033[1;38;5;202m
|
|
862
|
-
\033[1;94m| '_
|
|
863
|
-
\033[1;94m| | | || |_) |( |__ \033[0m\033[1;38;5;202m
|
|
864
|
-
\033[1;94m|_| |_|| .__/
|
|
865
|
-
\033[1;94m| | \033[0m\033[1;38;5;202m
|
|
1178
|
+
\033[1;94m \033[0m\033[1;38;5;202m \\\\
|
|
1179
|
+
\033[1;94m _ __ _ __ ___ \033[0m\033[1;38;5;202m ___ | |___ \\\\
|
|
1180
|
+
\033[1;94m| '_ \\ | ' \\ / __|\033[0m\033[1;38;5;202m / __/ | |_ _| \\\\
|
|
1181
|
+
\033[1;94m| | | || |_) |( |__ \033[0m\033[1;38;5;202m \\_ \\ | | | | //
|
|
1182
|
+
\033[1;94m|_| |_|| .__/ \\___|\033[0m\033[1;38;5;202m |___/ |_| |_| //
|
|
1183
|
+
\033[1;94m| | \033[0m\033[1;38;5;202m //
|
|
866
1184
|
\033[1;94m| |
|
|
867
1185
|
\033[1;94m|_|
|
|
868
1186
|
|
|
@@ -871,7 +1189,6 @@ Begin by asking a question, issuing a bash command, or typing '/help' for more i
|
|
|
871
1189
|
"""
|
|
872
1190
|
)
|
|
873
1191
|
|
|
874
|
-
|
|
875
1192
|
def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
876
1193
|
check_deprecation_warnings()
|
|
877
1194
|
setup_npcsh_config()
|
|
@@ -892,89 +1209,87 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
|
892
1209
|
project_team_path = os.path.abspath(PROJECT_NPC_TEAM_PATH)
|
|
893
1210
|
global_team_path = os.path.expanduser(DEFAULT_NPC_TEAM_PATH)
|
|
894
1211
|
team_dir = None
|
|
895
|
-
|
|
896
|
-
team_ctx = {}
|
|
1212
|
+
default_forenpc_name = None
|
|
897
1213
|
|
|
898
|
-
# --- Always prefer local/project team first ---
|
|
899
1214
|
if os.path.exists(project_team_path):
|
|
900
1215
|
team_dir = project_team_path
|
|
901
|
-
|
|
1216
|
+
default_forenpc_name = "forenpc"
|
|
902
1217
|
else:
|
|
903
1218
|
resp = input(f"No npc_team found in {os.getcwd()}. Create a new team here? [Y/n]: ").strip().lower()
|
|
904
1219
|
if resp in ("", "y", "yes"):
|
|
905
1220
|
team_dir = project_team_path
|
|
906
1221
|
os.makedirs(team_dir, exist_ok=True)
|
|
907
|
-
|
|
1222
|
+
default_forenpc_name = "forenpc"
|
|
908
1223
|
forenpc_directive = input(
|
|
909
|
-
f"Enter a primary directive for {
|
|
1224
|
+
f"Enter a primary directive for {default_forenpc_name} (default: 'You are the forenpc of the team...'): "
|
|
910
1225
|
).strip() or "You are the forenpc of the team, coordinating activities between NPCs on the team, verifying that results from NPCs are high quality and can help to adequately answer user requests."
|
|
911
1226
|
forenpc_model = input("Enter a model for your forenpc (default: llama3.2): ").strip() or "llama3.2"
|
|
912
1227
|
forenpc_provider = input("Enter a provider for your forenpc (default: ollama): ").strip() or "ollama"
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
"provider": forenpc_provider
|
|
921
|
-
}, f)
|
|
1228
|
+
|
|
1229
|
+
with open(os.path.join(team_dir, f"{default_forenpc_name}.npc"), "w") as f:
|
|
1230
|
+
yaml.dump({
|
|
1231
|
+
"name": default_forenpc_name, "primary_directive": forenpc_directive,
|
|
1232
|
+
"model": forenpc_model, "provider": forenpc_provider
|
|
1233
|
+
}, f)
|
|
1234
|
+
|
|
922
1235
|
ctx_path = os.path.join(team_dir, "team.ctx")
|
|
923
|
-
folder_context = input("Enter a short description
|
|
924
|
-
|
|
925
|
-
"forenpc":
|
|
926
|
-
"
|
|
927
|
-
"provider": forenpc_provider,
|
|
928
|
-
"api_key": None,
|
|
929
|
-
"api_url": None,
|
|
1236
|
+
folder_context = input("Enter a short description for this project/team (optional): ").strip()
|
|
1237
|
+
team_ctx_data = {
|
|
1238
|
+
"forenpc": default_forenpc_name, "model": forenpc_model,
|
|
1239
|
+
"provider": forenpc_provider, "api_key": None, "api_url": None,
|
|
930
1240
|
"context": folder_context if folder_context else None
|
|
931
1241
|
}
|
|
932
|
-
use_jinxs = input("
|
|
933
|
-
global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
|
|
934
|
-
project_jinxs_dir = os.path.join(team_dir, "jinxs")
|
|
1242
|
+
use_jinxs = input("Use global jinxs folder (g) or copy to this project (c)? [g/c, default: g]: ").strip().lower()
|
|
935
1243
|
if use_jinxs == "c":
|
|
1244
|
+
global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
|
|
936
1245
|
if os.path.exists(global_jinxs_dir):
|
|
937
|
-
shutil.copytree(global_jinxs_dir,
|
|
938
|
-
print(f"Copied jinxs from {global_jinxs_dir} to {project_jinxs_dir}")
|
|
939
|
-
else:
|
|
940
|
-
print(f"No global jinxs found at {global_jinxs_dir}")
|
|
1246
|
+
shutil.copytree(global_jinxs_dir, os.path.join(team_dir, "jinxs"), dirs_exist_ok=True)
|
|
941
1247
|
else:
|
|
942
|
-
|
|
1248
|
+
team_ctx_data["use_global_jinxs"] = True
|
|
943
1249
|
|
|
944
1250
|
with open(ctx_path, "w") as f:
|
|
945
|
-
yaml.dump(
|
|
1251
|
+
yaml.dump(team_ctx_data, f)
|
|
946
1252
|
elif os.path.exists(global_team_path):
|
|
947
1253
|
team_dir = global_team_path
|
|
948
|
-
|
|
1254
|
+
default_forenpc_name = "sibiji"
|
|
949
1255
|
else:
|
|
950
1256
|
print("No global npc_team found. Please run 'npcpy init' or create a team first.")
|
|
951
1257
|
sys.exit(1)
|
|
952
1258
|
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
1259
|
+
team_ctx = {}
|
|
1260
|
+
for filename in os.listdir(team_dir):
|
|
1261
|
+
if filename.endswith(".ctx"):
|
|
1262
|
+
try:
|
|
1263
|
+
with open(os.path.join(team_dir, filename), "r") as f:
|
|
1264
|
+
team_ctx = yaml.safe_load(f) or {}
|
|
1265
|
+
break
|
|
1266
|
+
except Exception as e:
|
|
1267
|
+
print(f"Warning: Could not load context file {filename}: {e}")
|
|
958
1268
|
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
if os.path.exists(forenpc_path):
|
|
962
|
-
forenpc_obj = NPC(forenpc_path)
|
|
963
|
-
else:
|
|
964
|
-
forenpc_obj = None
|
|
1269
|
+
forenpc_name = team_ctx.get("forenpc", default_forenpc_name)
|
|
1270
|
+
print(f"Using forenpc: {forenpc_name}")
|
|
965
1271
|
|
|
966
|
-
# --- Decide which jinxs directory to use ---
|
|
967
1272
|
if team_ctx.get("use_global_jinxs", False):
|
|
968
1273
|
jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
|
|
969
1274
|
else:
|
|
970
1275
|
jinxs_dir = os.path.join(team_dir, "jinxs")
|
|
971
|
-
|
|
1276
|
+
|
|
972
1277
|
jinxs_list = load_jinxs_from_directory(jinxs_dir)
|
|
973
1278
|
jinxs_dict = {jinx.jinx_name: jinx for jinx in jinxs_list}
|
|
974
1279
|
|
|
1280
|
+
forenpc_obj = None
|
|
1281
|
+
forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
|
|
1282
|
+
#print('forenpc_path', forenpc_path)
|
|
1283
|
+
#print('jinx list', jinxs_list)
|
|
1284
|
+
if os.path.exists(forenpc_path):
|
|
1285
|
+
|
|
1286
|
+
forenpc_obj = NPC(file = forenpc_path, jinxs=jinxs_list)
|
|
1287
|
+
else:
|
|
1288
|
+
print(f"Warning: Forenpc file '{forenpc_name}.npc' not found in {team_dir}.")
|
|
1289
|
+
|
|
975
1290
|
team = Team(team_path=team_dir, forenpc=forenpc_obj, jinxs=jinxs_dict)
|
|
976
1291
|
return command_history, team, forenpc_obj
|
|
977
|
-
|
|
1292
|
+
|
|
978
1293
|
def process_result(
|
|
979
1294
|
user_input: str,
|
|
980
1295
|
result_state: ShellState,
|
|
@@ -1002,6 +1317,7 @@ def process_result(
|
|
|
1002
1317
|
if user_input =='/help':
|
|
1003
1318
|
render_markdown(output)
|
|
1004
1319
|
elif result_state.stream_output:
|
|
1320
|
+
|
|
1005
1321
|
try:
|
|
1006
1322
|
final_output_str = print_and_process_stream_with_markdown(output, result_state.chat_model, result_state.chat_provider)
|
|
1007
1323
|
except AttributeError as e:
|
|
@@ -1009,6 +1325,16 @@ def process_result(
|
|
|
1009
1325
|
if len(output) > 0:
|
|
1010
1326
|
final_output_str = output
|
|
1011
1327
|
render_markdown(final_output_str)
|
|
1328
|
+
except TypeError as e:
|
|
1329
|
+
|
|
1330
|
+
if isinstance(output, str):
|
|
1331
|
+
if len(output) > 0:
|
|
1332
|
+
final_output_str = output
|
|
1333
|
+
render_markdown(final_output_str)
|
|
1334
|
+
elif isinstance(output, dict):
|
|
1335
|
+
if 'output' in output:
|
|
1336
|
+
final_output_str = output['output']
|
|
1337
|
+
render_markdown(final_output_str)
|
|
1012
1338
|
|
|
1013
1339
|
elif output is not None:
|
|
1014
1340
|
final_output_str = str(output)
|
|
@@ -1042,12 +1368,12 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
|
1042
1368
|
|
|
1043
1369
|
def exit_shell(state):
|
|
1044
1370
|
print("\nGoodbye!")
|
|
1045
|
-
print('beginning knowledge consolidation')
|
|
1046
|
-
try:
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
except KeyboardInterrupt:
|
|
1050
|
-
|
|
1371
|
+
#print('beginning knowledge consolidation')
|
|
1372
|
+
#try:
|
|
1373
|
+
# breathe_result = breathe(state.messages, state.chat_model, state.chat_provider, state.npc)
|
|
1374
|
+
# print(breathe_result)
|
|
1375
|
+
#except KeyboardInterrupt:
|
|
1376
|
+
# print("Knowledge consolidation interrupted. Exiting immediately.")
|
|
1051
1377
|
sys.exit(0)
|
|
1052
1378
|
|
|
1053
1379
|
while True:
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: npcsh
|
|
3
|
-
Version: 1.0.
|
|
4
|
-
Summary: npcsh is a command-line toolkit for using AI agents.
|
|
3
|
+
Version: 1.0.3
|
|
4
|
+
Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
|
|
5
5
|
Home-page: https://github.com/NPC-Worldwide/npcsh
|
|
6
6
|
Author: Christopher Agostino
|
|
7
7
|
Author-email: info@npcworldwi.de
|
|
@@ -402,7 +402,7 @@ Read the docs at [npcsh.readthedocs.io](https://npcsh.readthedocs.io/en/latest/)
|
|
|
402
402
|
|
|
403
403
|
|
|
404
404
|
## NPC Studio
|
|
405
|
-
There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables
|
|
405
|
+
There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables at [our website](https://enpisi.com/npc-studio).
|
|
406
406
|
|
|
407
407
|
|
|
408
408
|
## Mailing List
|
|
@@ -84,7 +84,7 @@ extra_files = package_files("npcpy/npc_team/")
|
|
|
84
84
|
|
|
85
85
|
setup(
|
|
86
86
|
name="npcsh",
|
|
87
|
-
version="1.0.
|
|
87
|
+
version="1.0.3",
|
|
88
88
|
packages=find_packages(exclude=["tests*"]),
|
|
89
89
|
install_requires=base_requirements, # Only install base requirements by default
|
|
90
90
|
extras_require={
|
|
@@ -108,7 +108,7 @@ setup(
|
|
|
108
108
|
},
|
|
109
109
|
author="Christopher Agostino",
|
|
110
110
|
author_email="info@npcworldwi.de",
|
|
111
|
-
description="npcsh is a command-line toolkit for using AI agents.",
|
|
111
|
+
description="npcsh is a command-line toolkit for using AI agents in novel ways.",
|
|
112
112
|
long_description=open("README.md").read(),
|
|
113
113
|
long_description_content_type="text/markdown",
|
|
114
114
|
url="https://github.com/NPC-Worldwide/npcsh",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|