npcsh 1.0.2__tar.gz → 1.0.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcsh-1.0.2 → npcsh-1.0.4}/PKG-INFO +3 -3
- {npcsh-1.0.2 → npcsh-1.0.4}/README.md +1 -1
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/guac.py +19 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/npcsh.py +507 -180
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh.egg-info/PKG-INFO +3 -3
- {npcsh-1.0.2 → npcsh-1.0.4}/setup.py +2 -2
- {npcsh-1.0.2 → npcsh-1.0.4}/LICENSE +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/__init__.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/_state.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/alicanto.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/mcp_helpers.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/mcp_npcsh.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/mcp_server.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/npc.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/plonk.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/pti.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/routes.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/spool.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/wander.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh/yap.py +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh.egg-info/SOURCES.txt +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh.egg-info/dependency_links.txt +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh.egg-info/entry_points.txt +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh.egg-info/requires.txt +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/npcsh.egg-info/top_level.txt +0 -0
- {npcsh-1.0.2 → npcsh-1.0.4}/setup.cfg +0 -0
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: npcsh
|
|
3
|
-
Version: 1.0.
|
|
4
|
-
Summary: npcsh is a command-line toolkit for using AI agents.
|
|
3
|
+
Version: 1.0.4
|
|
4
|
+
Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
|
|
5
5
|
Home-page: https://github.com/NPC-Worldwide/npcsh
|
|
6
6
|
Author: Christopher Agostino
|
|
7
7
|
Author-email: info@npcworldwi.de
|
|
@@ -402,7 +402,7 @@ Read the docs at [npcsh.readthedocs.io](https://npcsh.readthedocs.io/en/latest/)
|
|
|
402
402
|
|
|
403
403
|
|
|
404
404
|
## NPC Studio
|
|
405
|
-
There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables
|
|
405
|
+
There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables at [our website](https://enpisi.com/npc-studio).
|
|
406
406
|
|
|
407
407
|
|
|
408
408
|
## Mailing List
|
|
@@ -307,7 +307,7 @@ Read the docs at [npcsh.readthedocs.io](https://npcsh.readthedocs.io/en/latest/)
|
|
|
307
307
|
|
|
308
308
|
|
|
309
309
|
## NPC Studio
|
|
310
|
-
There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables
|
|
310
|
+
There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables at [our website](https://enpisi.com/npc-studio).
|
|
311
311
|
|
|
312
312
|
|
|
313
313
|
## Mailing List
|
|
@@ -607,11 +607,30 @@ def execute_guac_command(command: str, state: GuacState) -> Tuple[GuacState, Any
|
|
|
607
607
|
state.command_history.add_command(nl_input_for_llm, [history_output], "", state.current_path)
|
|
608
608
|
|
|
609
609
|
elif state.current_mode == "cmd":
|
|
610
|
+
locals_context_string = "Current Python environment variables and functions:\n"
|
|
611
|
+
if state.locals:
|
|
612
|
+
for k, v in state.locals.items():
|
|
613
|
+
if not k.startswith('__'): # Exclude Python built-ins and internal vars
|
|
614
|
+
try:
|
|
615
|
+
# Use repr() for a developer-friendly representation
|
|
616
|
+
value_repr = repr(v)
|
|
617
|
+
# Truncate long representations to prevent context window bloat
|
|
618
|
+
if len(value_repr) > 200:
|
|
619
|
+
value_repr = value_repr[:197] + "..."
|
|
620
|
+
locals_context_string += f"- {k} (type: {type(v).__name__}) = {value_repr}\n"
|
|
621
|
+
except Exception:
|
|
622
|
+
locals_context_string += f"- {k} (type: {type(v).__name__}) = <unrepresentable>\n"
|
|
623
|
+
# Add a clear separator for LLM to distinguish this context
|
|
624
|
+
locals_context_string += "\n--- End of Environment Context ---\n"
|
|
625
|
+
else:
|
|
626
|
+
locals_context_string += "(Environment is empty)\n"
|
|
627
|
+
|
|
610
628
|
prompt_cmd = (
|
|
611
629
|
f"User input for Python CMD mode: '{nl_input_for_llm}'.\n"
|
|
612
630
|
f"Generate ONLY executable Python code required to fulfill this.\n"
|
|
613
631
|
f"Do not include any explanations, leading markdown like ```python, or any text other than the Python code itself.\n"
|
|
614
632
|
)
|
|
633
|
+
|
|
615
634
|
llm_response = get_llm_response(
|
|
616
635
|
prompt_cmd,
|
|
617
636
|
model=state.chat_model,
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# Standard Library Imports
|
|
2
1
|
import os
|
|
3
2
|
import sys
|
|
4
3
|
import atexit
|
|
@@ -548,60 +547,91 @@ def process_pipeline_command(
|
|
|
548
547
|
if not cmd_to_process:
|
|
549
548
|
return state, stdin_input
|
|
550
549
|
|
|
551
|
-
exec_model = model_override or state.chat_model
|
|
552
|
-
exec_provider = provider_override or state.chat_provider
|
|
550
|
+
exec_model = model_override or state.chat_model
|
|
551
|
+
exec_provider = provider_override or state.chat_provider
|
|
553
552
|
|
|
554
553
|
if cmd_to_process.startswith("/"):
|
|
555
|
-
#print(cmd_to_process)
|
|
556
554
|
return execute_slash_command(cmd_to_process, stdin_input, state, stream_final)
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
555
|
+
|
|
556
|
+
try:
|
|
557
|
+
cmd_parts = parse_command_safely(cmd_to_process)
|
|
558
|
+
if not cmd_parts:
|
|
559
|
+
return state, stdin_input
|
|
562
560
|
|
|
563
|
-
|
|
561
|
+
command_name = cmd_parts[0]
|
|
564
562
|
|
|
563
|
+
is_unambiguous_bash = (
|
|
564
|
+
command_name in BASH_COMMANDS or
|
|
565
|
+
command_name in interactive_commands or
|
|
566
|
+
command_name == "cd" or
|
|
567
|
+
cmd_to_process.startswith("./")
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
if is_unambiguous_bash:
|
|
565
571
|
if command_name in interactive_commands:
|
|
566
572
|
return handle_interactive_command(cmd_parts, state)
|
|
567
573
|
elif command_name == "cd":
|
|
568
574
|
return handle_cd_command(cmd_parts, state)
|
|
569
575
|
else:
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
576
|
+
return handle_bash_command(cmd_parts, cmd_to_process, stdin_input, state)
|
|
577
|
+
else:
|
|
578
|
+
full_llm_cmd = f"{cmd_to_process} {stdin_input}" if stdin_input else cmd_to_process
|
|
579
|
+
|
|
580
|
+
path_cmd = 'The current working directory is: ' + state.current_path
|
|
581
|
+
ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(state.current_path, f) for f in os.listdir(state.current_path)]) if os.path.exists(state.current_path) else 'No files found in the current directory.'
|
|
582
|
+
platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
|
|
583
|
+
info = path_cmd + '\n' + ls_files + '\n' + platform_info + '\n'
|
|
584
|
+
|
|
585
|
+
llm_result = check_llm_command(
|
|
586
|
+
full_llm_cmd,
|
|
587
|
+
model=exec_model,
|
|
588
|
+
provider=exec_provider,
|
|
589
|
+
api_url=state.api_url,
|
|
590
|
+
api_key=state.api_key,
|
|
591
|
+
npc=state.npc,
|
|
592
|
+
team=state.team,
|
|
593
|
+
messages=state.messages,
|
|
594
|
+
images=state.attachments,
|
|
595
|
+
stream=stream_final,
|
|
596
|
+
context=info,
|
|
597
|
+
shell=True,
|
|
598
|
+
)
|
|
599
|
+
if isinstance(llm_result, dict):
|
|
600
|
+
state.messages = llm_result.get("messages", state.messages)
|
|
601
|
+
output = llm_result.get("output")
|
|
602
|
+
return state, output
|
|
603
|
+
else:
|
|
604
|
+
return state, llm_result
|
|
605
|
+
|
|
606
|
+
except CommandNotFoundError as e:
|
|
607
|
+
print(colored(f"Command not found, falling back to LLM: {e}", "yellow"), file=sys.stderr)
|
|
608
|
+
full_llm_cmd = f"{cmd_to_process} {stdin_input}" if stdin_input else cmd_to_process
|
|
609
|
+
llm_result = check_llm_command(
|
|
610
|
+
full_llm_cmd,
|
|
611
|
+
model=exec_model,
|
|
612
|
+
provider=exec_provider,
|
|
613
|
+
api_url=state.api_url,
|
|
614
|
+
api_key=state.api_key,
|
|
615
|
+
npc=state.npc,
|
|
616
|
+
team=state.team,
|
|
617
|
+
messages=state.messages,
|
|
618
|
+
images=state.attachments,
|
|
619
|
+
stream=stream_final,
|
|
620
|
+
context=None,
|
|
621
|
+
shell=True
|
|
622
|
+
)
|
|
623
|
+
if isinstance(llm_result, dict):
|
|
624
|
+
state.messages = llm_result.get("messages", state.messages)
|
|
625
|
+
output = llm_result.get("output")
|
|
626
|
+
return state, output
|
|
627
|
+
else:
|
|
628
|
+
return state, llm_result
|
|
629
|
+
|
|
630
|
+
except Exception as e:
|
|
631
|
+
import traceback
|
|
632
|
+
traceback.print_exc()
|
|
633
|
+
return state, colored(f"Error processing command '{cmd_segment[:50]}...': {e}", "red")
|
|
634
|
+
|
|
605
635
|
def check_mode_switch(command:str , state: ShellState):
|
|
606
636
|
if command in ['/cmd', '/agent', '/chat', '/ride']:
|
|
607
637
|
state.current_mode = command[1:]
|
|
@@ -624,6 +654,7 @@ def execute_command(
|
|
|
624
654
|
stdin_for_next = None
|
|
625
655
|
final_output = None
|
|
626
656
|
current_state = state
|
|
657
|
+
|
|
627
658
|
if state.current_mode == 'agent':
|
|
628
659
|
for i, cmd_segment in enumerate(commands):
|
|
629
660
|
is_last_command = (i == len(commands) - 1)
|
|
@@ -646,12 +677,14 @@ def execute_command(
|
|
|
646
677
|
if not stream_this_segment: # If intermediate output is a stream, consume for piping
|
|
647
678
|
full_stream_output = "".join(map(str, output))
|
|
648
679
|
stdin_for_next = full_stream_output
|
|
649
|
-
if is_last_command:
|
|
680
|
+
if is_last_command:
|
|
681
|
+
final_output = full_stream_output
|
|
650
682
|
else: # Final output is a stream, don't consume, can't pipe
|
|
651
683
|
stdin_for_next = None
|
|
652
684
|
final_output = output
|
|
653
685
|
elif output is not None: # Try converting other types to string
|
|
654
|
-
try:
|
|
686
|
+
try:
|
|
687
|
+
stdin_for_next = str(output)
|
|
655
688
|
except Exception:
|
|
656
689
|
print(f"Warning: Cannot convert output to string for piping: {type(output)}", file=sys.stderr)
|
|
657
690
|
stdin_for_next = None
|
|
@@ -759,91 +792,377 @@ def execute_command(
|
|
|
759
792
|
|
|
760
793
|
# Otherwise, run the agentic ride loop
|
|
761
794
|
return agentic_ride_loop(command, state)
|
|
795
|
+
@dataclass
|
|
796
|
+
class RideState:
|
|
797
|
+
"""Lightweight state tracking for /ride mode"""
|
|
798
|
+
todos: List[Dict[str, Any]] = field(default_factory=list)
|
|
799
|
+
constraints: List[str] = field(default_factory=list)
|
|
800
|
+
facts: List[str] = field(default_factory=list)
|
|
801
|
+
mistakes: List[str] = field(default_factory=list)
|
|
802
|
+
successes: List[str] = field(default_factory=list)
|
|
803
|
+
current_todo_index: int = 0
|
|
804
|
+
current_subtodo_index: int = 0
|
|
805
|
+
|
|
806
|
+
def get_context_summary(self) -> str:
|
|
807
|
+
"""Generate lightweight context for LLM prompts"""
|
|
808
|
+
context = []
|
|
809
|
+
if self.facts:
|
|
810
|
+
context.append(f"Facts: {'; '.join(self.facts[:5])}") # Limit to 5 most recent
|
|
811
|
+
if self.mistakes:
|
|
812
|
+
context.append(f"Recent mistakes: {'; '.join(self.mistakes[-3:])}")
|
|
813
|
+
if self.successes:
|
|
814
|
+
context.append(f"Recent successes: {'; '.join(self.successes[-3:])}")
|
|
815
|
+
return "\n".join(context)
|
|
816
|
+
|
|
817
|
+
def interactive_edit_list(items: List[str], item_type: str) -> List[str]:
|
|
818
|
+
"""Interactive REPL for editing lists of items with regeneration options"""
|
|
819
|
+
while True:
|
|
820
|
+
print(f"\nCurrent {item_type}:")
|
|
821
|
+
for i, item in enumerate(items, 1):
|
|
822
|
+
print(f"{i}. {item}")
|
|
823
|
+
|
|
824
|
+
choice = input(f"\nEdit {item_type} (e<num> to edit, d<num> to delete, a to add, r to regenerate, c to add context, ok to continue): ").strip()
|
|
825
|
+
|
|
826
|
+
if choice.lower() == 'ok':
|
|
827
|
+
break
|
|
828
|
+
elif choice.lower() == 'r':
|
|
829
|
+
print("Regenerating list...")
|
|
830
|
+
return "REGENERATE" # Special signal to regenerate
|
|
831
|
+
elif choice.lower() == 'c':
|
|
832
|
+
additional_context = input("Add more context: ").strip()
|
|
833
|
+
if additional_context:
|
|
834
|
+
return {"ADD_CONTEXT": additional_context, "items": items}
|
|
835
|
+
elif choice.lower() == 'a':
|
|
836
|
+
new_item = input(f"Enter new {item_type[:-1]}: ").strip()
|
|
837
|
+
if new_item:
|
|
838
|
+
items.append(new_item)
|
|
839
|
+
elif choice.lower().startswith('e'):
|
|
840
|
+
try:
|
|
841
|
+
idx = int(choice[1:]) - 1
|
|
842
|
+
if 0 <= idx < len(items):
|
|
843
|
+
print(f"Current: {items[idx]}")
|
|
844
|
+
new_item = input("New version: ").strip()
|
|
845
|
+
if new_item:
|
|
846
|
+
items[idx] = new_item
|
|
847
|
+
except ValueError:
|
|
848
|
+
print("Invalid format. Use e<number>")
|
|
849
|
+
elif choice.lower().startswith('d'):
|
|
850
|
+
try:
|
|
851
|
+
idx = int(choice[1:]) - 1
|
|
852
|
+
if 0 <= idx < len(items):
|
|
853
|
+
items.pop(idx)
|
|
854
|
+
except ValueError:
|
|
855
|
+
print("Invalid format. Use d<number>")
|
|
856
|
+
else:
|
|
857
|
+
print("Invalid choice. Use: e<num>, d<num>, a, r (regenerate), c (add context), or ok")
|
|
858
|
+
|
|
859
|
+
return items
|
|
860
|
+
def generate_todos(user_goal: str, state: ShellState, additional_context: str = "") -> List[Dict[str, Any]]:
|
|
861
|
+
"""Generate high-level todos for the user's goal"""
|
|
862
|
+
path_cmd = 'The current working directory is: ' + state.current_path
|
|
863
|
+
ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(state.current_path, f) for f in os.listdir(state.current_path)]) if os.path.exists(state.current_path) else 'No files found in the current directory.'
|
|
864
|
+
platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
|
|
865
|
+
info = path_cmd + '\n' + ls_files + '\n' + platform_info
|
|
762
866
|
|
|
763
867
|
|
|
764
|
-
|
|
765
|
-
"""
|
|
766
|
-
|
|
767
|
-
|
|
868
|
+
|
|
869
|
+
high_level_planning_instruction = """
|
|
870
|
+
You are a high-level project planner. When a user asks to work on a file or code,
|
|
871
|
+
structure your plan using a simple, high-level software development lifecycle:
|
|
872
|
+
1. First, understand the current state (e.g., read the relevant file).
|
|
873
|
+
2. Second, make the required changes based on the user's goal.
|
|
874
|
+
3. Third, verify the changes work as intended (e.g., test the code).
|
|
875
|
+
Your generated todos should reflect this high-level thinking.
|
|
768
876
|
|
|
877
|
+
|
|
878
|
+
|
|
769
879
|
"""
|
|
770
|
-
|
|
771
|
-
|
|
880
|
+
|
|
881
|
+
prompt = f"""
|
|
882
|
+
{high_level_planning_instruction}
|
|
772
883
|
|
|
773
|
-
|
|
774
|
-
|
|
884
|
+
User goal: {user_goal}
|
|
885
|
+
|
|
886
|
+
{additional_context}
|
|
887
|
+
|
|
888
|
+
Generate a list of 3 todos to accomplish this goal. Use specific actionable language based on the user request.
|
|
889
|
+
Do not make assumptions about user needs.
|
|
890
|
+
Every todo must be directly sourced from the user's request.
|
|
891
|
+
If users request specific files to be incorporated, you MUST include the full path to the file in the todo.
|
|
892
|
+
Here is some relevant information for the current folder and working directory that may be relevant:
|
|
893
|
+
{info}
|
|
894
|
+
|
|
895
|
+
For example, if the user says "I need to add a new function to calculate the average of a list of numbers my research.py script" and the current working directory is /home/user/projects and one
|
|
896
|
+
of the available files in the current directory is /home/user/projects/research.py then one of the todos should be:
|
|
897
|
+
- "Add a new function to /home/user/projects/research.py to calculate the average of a list of numbers"
|
|
898
|
+
Do not truncate paths. Do not additional paths. Use them exactly as they are provided here.
|
|
899
|
+
|
|
900
|
+
Each todo should be:
|
|
901
|
+
- Specific and actionable
|
|
902
|
+
- Independent where possible
|
|
903
|
+
- Focused on a single major component
|
|
904
|
+
|
|
905
|
+
Remember, it is critical to provide as much relevant information as possible. Even if the user only refers to a file or something by a relative path, it is
|
|
906
|
+
critical for operation that you provide the full path to the file in the todo item.
|
|
907
|
+
|
|
908
|
+
Return JSON with format:
|
|
909
|
+
{{
|
|
910
|
+
"todos": [
|
|
911
|
+
todo1, todo2, todo3,
|
|
912
|
+
]
|
|
913
|
+
}}
|
|
914
|
+
"""
|
|
915
|
+
|
|
916
|
+
response = get_llm_response(
|
|
917
|
+
prompt,
|
|
918
|
+
model=state.chat_model,
|
|
919
|
+
provider=state.chat_provider,
|
|
920
|
+
npc=state.npc,
|
|
921
|
+
format="json"
|
|
922
|
+
)
|
|
923
|
+
|
|
924
|
+
todos_data = response.get("response", {}).get("todos", [])
|
|
925
|
+
return todos_data
|
|
775
926
|
|
|
776
|
-
while True:
|
|
777
|
-
# 1. Orchestrate the current request
|
|
778
|
-
result = state.team.orchestrate(request)
|
|
779
|
-
all_results.append(result)
|
|
780
|
-
render_markdown("# Orchestration Result")
|
|
781
|
-
render_markdown(f"- Request: {request}")
|
|
782
|
-
render_markdown(f"- Final response: {result.get('output')}")
|
|
783
|
-
|
|
784
|
-
render_markdown('- Summary: '+result['debrief']['summary'])
|
|
785
|
-
recommendations = result['debrief']['recommendations']
|
|
786
|
-
render_markdown(f'- Recommendations: {recommendations}')
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
# 2. Ask LLM for three next possible steps
|
|
790
|
-
suggestion_prompt = f"""
|
|
791
|
-
Given the following user goal and orchestration result, suggest three new
|
|
792
|
-
avenues to go down that are related but distinct from the original goal and from each other.
|
|
793
|
-
|
|
794
|
-
Be concise. Each step should be a single actionable instruction or question.
|
|
795
|
-
|
|
796
|
-
User goal: {user_goal}
|
|
797
|
-
Orchestration result: {result}
|
|
798
|
-
|
|
799
|
-
Return a JSON object with a "steps" key, whose value is a list of three strings, each string being a next step.
|
|
800
|
-
Return only the JSON object.
|
|
801
|
-
"""
|
|
802
|
-
suggestions = get_llm_response(
|
|
803
|
-
suggestion_prompt,
|
|
804
|
-
model=state.chat_model,
|
|
805
|
-
provider=state.chat_provider,
|
|
806
|
-
api_url=state.api_url,
|
|
807
|
-
api_key=state.api_key,
|
|
808
|
-
npc=state.npc,
|
|
809
|
-
format="json"
|
|
810
|
-
)
|
|
811
|
-
# No custom parsing: just use the parsed output
|
|
812
|
-
steps = suggestions.get("response", {}).get("steps", [])
|
|
813
|
-
if not steps or len(steps) < 1:
|
|
814
|
-
print("No further steps suggested by LLM. Exiting.")
|
|
815
|
-
break
|
|
816
927
|
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
928
|
+
def generate_constraints(todos: List[Dict[str, Any]], user_goal: str, state: ShellState) -> List[str]:
|
|
929
|
+
"""Generate constraints and requirements that define relationships between todos"""
|
|
930
|
+
prompt = f"""
|
|
931
|
+
User goal: {user_goal}
|
|
932
|
+
|
|
933
|
+
Todos to accomplish:
|
|
934
|
+
{chr(10).join([f"- {todo}" for todo in todos])}
|
|
935
|
+
|
|
936
|
+
Based ONLY on what the user explicitly stated in their goal, identify any constraints or requirements they mentioned.
|
|
937
|
+
Do NOT invent new constraints. Only extract constraints that are directly stated or clearly implied by the user's request.
|
|
938
|
+
|
|
939
|
+
Examples of valid constraints:
|
|
940
|
+
- If user says "without breaking existing functionality" -> "Maintain existing functionality"
|
|
941
|
+
- If user says "must be fast" -> "Performance must be optimized"
|
|
942
|
+
- If user says "should integrate with X" -> "Must integrate with X"
|
|
943
|
+
|
|
944
|
+
If the user didn't specify any constraints, return an empty list.
|
|
945
|
+
|
|
946
|
+
Return JSON with format:
|
|
947
|
+
{{
|
|
948
|
+
"constraints": ["constraint 1", "constraint 2", ...]
|
|
949
|
+
}}
|
|
950
|
+
"""
|
|
951
|
+
|
|
952
|
+
response = get_llm_response(
|
|
953
|
+
prompt,
|
|
954
|
+
model=state.chat_model,
|
|
955
|
+
provider=state.chat_provider,
|
|
956
|
+
npc=state.npc,
|
|
957
|
+
format="json"
|
|
958
|
+
)
|
|
959
|
+
|
|
960
|
+
constraints_data = response.get("response", {})
|
|
961
|
+
|
|
962
|
+
if isinstance(constraints_data, dict):
|
|
963
|
+
constraints = constraints_data.get("constraints", [])
|
|
964
|
+
# Make sure we're getting strings, not dicts
|
|
965
|
+
cleaned_constraints = []
|
|
966
|
+
for c in constraints:
|
|
967
|
+
if isinstance(c, str):
|
|
968
|
+
cleaned_constraints.append(c)
|
|
969
|
+
return cleaned_constraints
|
|
970
|
+
else:
|
|
971
|
+
return []
|
|
972
|
+
def should_break_down_todo(todo, state: ShellState) -> bool:
|
|
973
|
+
"""Ask LLM if a todo needs breakdown, then ask user for confirmation"""
|
|
974
|
+
prompt = f"""
|
|
975
|
+
Todo: {todo}
|
|
976
|
+
|
|
977
|
+
|
|
978
|
+
Does this todo need to be broken down into smaller, more atomic components?
|
|
979
|
+
Consider:
|
|
980
|
+
- Is it complex enough to warrant breakdown?
|
|
981
|
+
- Would breaking it down make execution clearer?
|
|
982
|
+
- Are there multiple distinct steps involved?
|
|
983
|
+
|
|
984
|
+
Return JSON: {{"should_break_down": true/false, "reason": "explanation"}}
|
|
985
|
+
"""
|
|
986
|
+
|
|
987
|
+
response = get_llm_response(
|
|
988
|
+
prompt,
|
|
989
|
+
model=state.chat_model,
|
|
990
|
+
provider=state.chat_provider,
|
|
991
|
+
npc=state.npc,
|
|
992
|
+
format="json"
|
|
993
|
+
)
|
|
994
|
+
|
|
995
|
+
result = response.get("response", {})
|
|
996
|
+
llm_suggests = result.get("should_break_down", False)
|
|
997
|
+
reason = result.get("reason", "No reason provided")
|
|
998
|
+
|
|
999
|
+
if llm_suggests:
|
|
1000
|
+
print(f"\nLLM suggests breaking down: '{todo}'")
|
|
1001
|
+
print(f"Reason: {reason}")
|
|
1002
|
+
user_choice = input("Break it down? [y/N]: ").strip().lower()
|
|
1003
|
+
return user_choice in ['y', 'yes']
|
|
1004
|
+
|
|
1005
|
+
return False
|
|
820
1006
|
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
1007
|
+
def generate_subtodos(todo, state: ShellState) -> List[Dict[str, Any]]:
|
|
1008
|
+
"""Generate atomic subtodos for a complex todo"""
|
|
1009
|
+
prompt = f"""
|
|
1010
|
+
Parent todo: {todo}
|
|
1011
|
+
|
|
1012
|
+
Break this down into atomic, executable subtodos. Each subtodo should be:
|
|
1013
|
+
- A single, concrete action
|
|
1014
|
+
- Executable in one step
|
|
1015
|
+
- Clear and unambiguous
|
|
1016
|
+
|
|
1017
|
+
Return JSON with format:
|
|
1018
|
+
{{
|
|
1019
|
+
"subtodos": [
|
|
1020
|
+
"subtodo description",
|
|
1021
|
+
...
|
|
1022
|
+
]
|
|
1023
|
+
}}
|
|
1024
|
+
"""
|
|
1025
|
+
|
|
1026
|
+
response = get_llm_response(
|
|
1027
|
+
prompt,
|
|
1028
|
+
model=state.chat_model,
|
|
1029
|
+
provider=state.chat_provider,
|
|
1030
|
+
npc=state.npc,
|
|
1031
|
+
format="json"
|
|
1032
|
+
)
|
|
1033
|
+
|
|
1034
|
+
return response.get("response", {}).get("subtodos", [])
|
|
1035
|
+
def execute_todo_item(todo: Dict[str, Any], ride_state: RideState, shell_state: ShellState) -> bool:
|
|
1036
|
+
"""Execute a single todo item using the existing jinx system"""
|
|
1037
|
+
path_cmd = 'The current working directory is: ' + shell_state.current_path
|
|
1038
|
+
ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(shell_state.current_path, f) for f in os.listdir(shell_state.current_path)]) if os.path.exists(shell_state.current_path) else 'No files found in the current directory.'
|
|
1039
|
+
platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
|
|
1040
|
+
info = path_cmd + '\n' + ls_files + '\n' + platform_info
|
|
1041
|
+
|
|
1042
|
+
command = f"""
|
|
1043
|
+
|
|
1044
|
+
General information:
|
|
1045
|
+
{info}
|
|
1046
|
+
|
|
1047
|
+
Execute this todo: {todo}
|
|
1048
|
+
|
|
1049
|
+
Constraints to follow:
|
|
1050
|
+
{chr(10).join([f"- {c}" for c in ride_state.constraints])}
|
|
1051
|
+
"""
|
|
1052
|
+
|
|
1053
|
+
print(f"\nExecuting: {todo}")
|
|
1054
|
+
|
|
826
1055
|
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
1056
|
+
result = check_llm_command(
|
|
1057
|
+
command,
|
|
1058
|
+
model=shell_state.chat_model,
|
|
1059
|
+
provider=shell_state.chat_provider,
|
|
1060
|
+
npc=shell_state.npc,
|
|
1061
|
+
team=shell_state.team,
|
|
1062
|
+
messages=[],
|
|
1063
|
+
stream=shell_state.stream_output,
|
|
1064
|
+
shell=True,
|
|
1065
|
+
)
|
|
1066
|
+
|
|
1067
|
+
output_payload = result.get("output", "")
|
|
1068
|
+
output_str = ""
|
|
1069
|
+
|
|
1070
|
+
if isgenerator(output_payload):
|
|
1071
|
+
output_str = print_and_process_stream_with_markdown(output_payload, shell_state.chat_model, shell_state.chat_provider)
|
|
1072
|
+
elif isinstance(output_payload, dict):
|
|
1073
|
+
output_str = output_payload.get('output', str(output_payload))
|
|
1074
|
+
if 'output' in output_str:
|
|
1075
|
+
output_str = output_payload['output']
|
|
1076
|
+
elif 'response' in output_str:
|
|
1077
|
+
output_str = output_payload['response']
|
|
1078
|
+
render_markdown(output_str)
|
|
1079
|
+
elif output_payload:
|
|
1080
|
+
output_str = str(output_payload)
|
|
1081
|
+
render_markdown(output_str)
|
|
1082
|
+
|
|
1083
|
+
user_feedback = input(f"\nTodo completed successfully? [y/N/notes]: ").strip()
|
|
1084
|
+
|
|
1085
|
+
if user_feedback.lower() in ['y', 'yes']:
|
|
1086
|
+
return True, output_str
|
|
1087
|
+
elif user_feedback.lower() in ['n', 'no']:
|
|
1088
|
+
mistake = input("What went wrong? ").strip()
|
|
1089
|
+
ride_state.mistakes.append(f"Failed {todo}: {mistake}")
|
|
1090
|
+
return False, output_str
|
|
1091
|
+
else:
|
|
1092
|
+
ride_state.facts.append(f"Re: {todo}: {user_feedback}")
|
|
1093
|
+
success = input("Mark as completed? [y/N]: ").strip().lower() in ['y', 'yes']
|
|
1094
|
+
return success, output_str
|
|
844
1095
|
|
|
845
|
-
|
|
1096
|
+
def agentic_ride_loop(user_goal: str, state: ShellState) -> tuple:
|
|
1097
|
+
"""
|
|
1098
|
+
New /ride mode: hierarchical planning with human-in-the-loop control
|
|
1099
|
+
"""
|
|
1100
|
+
ride_state = RideState()
|
|
1101
|
+
|
|
1102
|
+
# 1. Generate high-level todos
|
|
1103
|
+
print("🚀 Generating high-level todos...")
|
|
1104
|
+
todos = generate_todos(user_goal, state)
|
|
1105
|
+
|
|
1106
|
+
# 2. User reviews/edits todos
|
|
1107
|
+
print("\n📋 Review and edit todos:")
|
|
1108
|
+
todo_descriptions = [todo for todo in todos]
|
|
1109
|
+
edited_descriptions = interactive_edit_list(todo_descriptions, "todos")
|
|
1110
|
+
|
|
846
1111
|
|
|
1112
|
+
ride_state.todos = edited_descriptions
|
|
1113
|
+
|
|
1114
|
+
# 3. Generate constraints
|
|
1115
|
+
print("\n🔒 Generating constraints...")
|
|
1116
|
+
constraints = generate_constraints(edited_descriptions, user_goal, state)
|
|
1117
|
+
|
|
1118
|
+
# 4. User reviews/edits constraints
|
|
1119
|
+
print("\n📐 Review and edit constraints:")
|
|
1120
|
+
edited_constraints = interactive_edit_list(constraints, "constraints")
|
|
1121
|
+
ride_state.constraints = edited_constraints
|
|
1122
|
+
|
|
1123
|
+
# 5. Execution loop
|
|
1124
|
+
print("\n⚡ Starting execution...")
|
|
1125
|
+
|
|
1126
|
+
for i, todo in enumerate(edited_descriptions):
|
|
1127
|
+
print(f"\n--- Todo {i+1}/{len(todos)}: {todo} ---")
|
|
1128
|
+
|
|
1129
|
+
def attempt_execution(current_todo):
|
|
1130
|
+
# This inner function handles the execution and retry logic
|
|
1131
|
+
success, output_str = execute_todo_item(current_todo, ride_state, state)
|
|
1132
|
+
if not success:
|
|
1133
|
+
retry = input("Retry this todo? [y/N]: ").strip().lower()
|
|
1134
|
+
if retry in ['y', 'yes']:
|
|
1135
|
+
success, output_str = execute_todo_item(current_todo, ride_state, state)
|
|
1136
|
+
return success, output_str
|
|
1137
|
+
|
|
1138
|
+
if should_break_down_todo(todo, state):
|
|
1139
|
+
print("Breaking down todo...")
|
|
1140
|
+
subtodos = generate_subtodos(todo, state)
|
|
1141
|
+
subtodo_descriptions = [st for st in subtodos]
|
|
1142
|
+
edited_subtodos = interactive_edit_list(subtodo_descriptions, "subtodos")
|
|
1143
|
+
|
|
1144
|
+
for j, subtodo_desc in enumerate(edited_subtodos):
|
|
1145
|
+
subtodo = {"description": subtodo_desc, "type": "atomic"}
|
|
1146
|
+
success, output = attempt_execution(subtodo)
|
|
1147
|
+
if success:
|
|
1148
|
+
ride_state.successes.append({"description": subtodo_desc, "output": output})
|
|
1149
|
+
else:
|
|
1150
|
+
print("Subtodo failed. Continuing to next...")
|
|
1151
|
+
else:
|
|
1152
|
+
success, output = attempt_execution(todo)
|
|
1153
|
+
if success:
|
|
1154
|
+
ride_state.successes.append({"description": todo, "output": output})
|
|
1155
|
+
# 6. Final summary
|
|
1156
|
+
print("\n🎯 Execution Summary:")
|
|
1157
|
+
print(f"Successes: {len(ride_state.successes)}")
|
|
1158
|
+
print(f"Mistakes: {len(ride_state.mistakes)}")
|
|
1159
|
+
print(f"Facts learned: {len(ride_state.facts)}")
|
|
1160
|
+
|
|
1161
|
+
return state, {
|
|
1162
|
+
"todos_completed": len(ride_state.successes),
|
|
1163
|
+
"ride_state": ride_state,
|
|
1164
|
+
"final_context": ride_state.get_context_summary()
|
|
1165
|
+
}
|
|
847
1166
|
# --- Main Application Logic ---
|
|
848
1167
|
|
|
849
1168
|
def check_deprecation_warnings():
|
|
@@ -857,12 +1176,12 @@ def print_welcome_message():
|
|
|
857
1176
|
print(
|
|
858
1177
|
"""
|
|
859
1178
|
Welcome to \033[1;94mnpc\033[0m\033[1;38;5;202msh\033[0m!
|
|
860
|
-
\033[1;94m \033[0m\033[1;38;5;202m
|
|
861
|
-
\033[1;94m _ __ _ __ ___ \033[0m\033[1;38;5;202m
|
|
862
|
-
\033[1;94m| '_
|
|
863
|
-
\033[1;94m| | | || |_) |( |__ \033[0m\033[1;38;5;202m
|
|
864
|
-
\033[1;94m|_| |_|| .__/
|
|
865
|
-
\033[1;94m| | \033[0m\033[1;38;5;202m
|
|
1179
|
+
\033[1;94m \033[0m\033[1;38;5;202m \\\\
|
|
1180
|
+
\033[1;94m _ __ _ __ ___ \033[0m\033[1;38;5;202m ___ | |___ \\\\
|
|
1181
|
+
\033[1;94m| '_ \\ | ' \\ / __|\033[0m\033[1;38;5;202m / __/ | |_ _| \\\\
|
|
1182
|
+
\033[1;94m| | | || |_) |( |__ \033[0m\033[1;38;5;202m \\_ \\ | | | | //
|
|
1183
|
+
\033[1;94m|_| |_|| .__/ \\___|\033[0m\033[1;38;5;202m |___/ |_| |_| //
|
|
1184
|
+
\033[1;94m| | \033[0m\033[1;38;5;202m //
|
|
866
1185
|
\033[1;94m| |
|
|
867
1186
|
\033[1;94m|_|
|
|
868
1187
|
|
|
@@ -871,7 +1190,6 @@ Begin by asking a question, issuing a bash command, or typing '/help' for more i
|
|
|
871
1190
|
"""
|
|
872
1191
|
)
|
|
873
1192
|
|
|
874
|
-
|
|
875
1193
|
def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
876
1194
|
check_deprecation_warnings()
|
|
877
1195
|
setup_npcsh_config()
|
|
@@ -892,89 +1210,87 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
|
892
1210
|
project_team_path = os.path.abspath(PROJECT_NPC_TEAM_PATH)
|
|
893
1211
|
global_team_path = os.path.expanduser(DEFAULT_NPC_TEAM_PATH)
|
|
894
1212
|
team_dir = None
|
|
895
|
-
|
|
896
|
-
team_ctx = {}
|
|
1213
|
+
default_forenpc_name = None
|
|
897
1214
|
|
|
898
|
-
# --- Always prefer local/project team first ---
|
|
899
1215
|
if os.path.exists(project_team_path):
|
|
900
1216
|
team_dir = project_team_path
|
|
901
|
-
|
|
1217
|
+
default_forenpc_name = "forenpc"
|
|
902
1218
|
else:
|
|
903
1219
|
resp = input(f"No npc_team found in {os.getcwd()}. Create a new team here? [Y/n]: ").strip().lower()
|
|
904
1220
|
if resp in ("", "y", "yes"):
|
|
905
1221
|
team_dir = project_team_path
|
|
906
1222
|
os.makedirs(team_dir, exist_ok=True)
|
|
907
|
-
|
|
1223
|
+
default_forenpc_name = "forenpc"
|
|
908
1224
|
forenpc_directive = input(
|
|
909
|
-
f"Enter a primary directive for {
|
|
1225
|
+
f"Enter a primary directive for {default_forenpc_name} (default: 'You are the forenpc of the team...'): "
|
|
910
1226
|
).strip() or "You are the forenpc of the team, coordinating activities between NPCs on the team, verifying that results from NPCs are high quality and can help to adequately answer user requests."
|
|
911
1227
|
forenpc_model = input("Enter a model for your forenpc (default: llama3.2): ").strip() or "llama3.2"
|
|
912
1228
|
forenpc_provider = input("Enter a provider for your forenpc (default: ollama): ").strip() or "ollama"
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
"provider": forenpc_provider
|
|
921
|
-
}, f)
|
|
1229
|
+
|
|
1230
|
+
with open(os.path.join(team_dir, f"{default_forenpc_name}.npc"), "w") as f:
|
|
1231
|
+
yaml.dump({
|
|
1232
|
+
"name": default_forenpc_name, "primary_directive": forenpc_directive,
|
|
1233
|
+
"model": forenpc_model, "provider": forenpc_provider
|
|
1234
|
+
}, f)
|
|
1235
|
+
|
|
922
1236
|
ctx_path = os.path.join(team_dir, "team.ctx")
|
|
923
|
-
folder_context = input("Enter a short description
|
|
924
|
-
|
|
925
|
-
"forenpc":
|
|
926
|
-
"
|
|
927
|
-
"provider": forenpc_provider,
|
|
928
|
-
"api_key": None,
|
|
929
|
-
"api_url": None,
|
|
1237
|
+
folder_context = input("Enter a short description for this project/team (optional): ").strip()
|
|
1238
|
+
team_ctx_data = {
|
|
1239
|
+
"forenpc": default_forenpc_name, "model": forenpc_model,
|
|
1240
|
+
"provider": forenpc_provider, "api_key": None, "api_url": None,
|
|
930
1241
|
"context": folder_context if folder_context else None
|
|
931
1242
|
}
|
|
932
|
-
use_jinxs = input("
|
|
933
|
-
global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
|
|
934
|
-
project_jinxs_dir = os.path.join(team_dir, "jinxs")
|
|
1243
|
+
use_jinxs = input("Use global jinxs folder (g) or copy to this project (c)? [g/c, default: g]: ").strip().lower()
|
|
935
1244
|
if use_jinxs == "c":
|
|
1245
|
+
global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
|
|
936
1246
|
if os.path.exists(global_jinxs_dir):
|
|
937
|
-
shutil.copytree(global_jinxs_dir,
|
|
938
|
-
print(f"Copied jinxs from {global_jinxs_dir} to {project_jinxs_dir}")
|
|
939
|
-
else:
|
|
940
|
-
print(f"No global jinxs found at {global_jinxs_dir}")
|
|
1247
|
+
shutil.copytree(global_jinxs_dir, os.path.join(team_dir, "jinxs"), dirs_exist_ok=True)
|
|
941
1248
|
else:
|
|
942
|
-
|
|
1249
|
+
team_ctx_data["use_global_jinxs"] = True
|
|
943
1250
|
|
|
944
1251
|
with open(ctx_path, "w") as f:
|
|
945
|
-
yaml.dump(
|
|
1252
|
+
yaml.dump(team_ctx_data, f)
|
|
946
1253
|
elif os.path.exists(global_team_path):
|
|
947
1254
|
team_dir = global_team_path
|
|
948
|
-
|
|
1255
|
+
default_forenpc_name = "sibiji"
|
|
949
1256
|
else:
|
|
950
1257
|
print("No global npc_team found. Please run 'npcpy init' or create a team first.")
|
|
951
1258
|
sys.exit(1)
|
|
952
1259
|
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
1260
|
+
team_ctx = {}
|
|
1261
|
+
for filename in os.listdir(team_dir):
|
|
1262
|
+
if filename.endswith(".ctx"):
|
|
1263
|
+
try:
|
|
1264
|
+
with open(os.path.join(team_dir, filename), "r") as f:
|
|
1265
|
+
team_ctx = yaml.safe_load(f) or {}
|
|
1266
|
+
break
|
|
1267
|
+
except Exception as e:
|
|
1268
|
+
print(f"Warning: Could not load context file {filename}: {e}")
|
|
958
1269
|
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
if os.path.exists(forenpc_path):
|
|
962
|
-
forenpc_obj = NPC(forenpc_path)
|
|
963
|
-
else:
|
|
964
|
-
forenpc_obj = None
|
|
1270
|
+
forenpc_name = team_ctx.get("forenpc", default_forenpc_name)
|
|
1271
|
+
print(f"Using forenpc: {forenpc_name}")
|
|
965
1272
|
|
|
966
|
-
# --- Decide which jinxs directory to use ---
|
|
967
1273
|
if team_ctx.get("use_global_jinxs", False):
|
|
968
1274
|
jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
|
|
969
1275
|
else:
|
|
970
1276
|
jinxs_dir = os.path.join(team_dir, "jinxs")
|
|
971
|
-
|
|
1277
|
+
|
|
972
1278
|
jinxs_list = load_jinxs_from_directory(jinxs_dir)
|
|
973
1279
|
jinxs_dict = {jinx.jinx_name: jinx for jinx in jinxs_list}
|
|
974
1280
|
|
|
1281
|
+
forenpc_obj = None
|
|
1282
|
+
forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
|
|
1283
|
+
#print('forenpc_path', forenpc_path)
|
|
1284
|
+
#print('jinx list', jinxs_list)
|
|
1285
|
+
if os.path.exists(forenpc_path):
|
|
1286
|
+
|
|
1287
|
+
forenpc_obj = NPC(file = forenpc_path, jinxs=jinxs_list)
|
|
1288
|
+
else:
|
|
1289
|
+
print(f"Warning: Forenpc file '{forenpc_name}.npc' not found in {team_dir}.")
|
|
1290
|
+
|
|
975
1291
|
team = Team(team_path=team_dir, forenpc=forenpc_obj, jinxs=jinxs_dict)
|
|
976
1292
|
return command_history, team, forenpc_obj
|
|
977
|
-
|
|
1293
|
+
|
|
978
1294
|
def process_result(
|
|
979
1295
|
user_input: str,
|
|
980
1296
|
result_state: ShellState,
|
|
@@ -1002,6 +1318,7 @@ def process_result(
|
|
|
1002
1318
|
if user_input =='/help':
|
|
1003
1319
|
render_markdown(output)
|
|
1004
1320
|
elif result_state.stream_output:
|
|
1321
|
+
|
|
1005
1322
|
try:
|
|
1006
1323
|
final_output_str = print_and_process_stream_with_markdown(output, result_state.chat_model, result_state.chat_provider)
|
|
1007
1324
|
except AttributeError as e:
|
|
@@ -1009,6 +1326,16 @@ def process_result(
|
|
|
1009
1326
|
if len(output) > 0:
|
|
1010
1327
|
final_output_str = output
|
|
1011
1328
|
render_markdown(final_output_str)
|
|
1329
|
+
except TypeError as e:
|
|
1330
|
+
|
|
1331
|
+
if isinstance(output, str):
|
|
1332
|
+
if len(output) > 0:
|
|
1333
|
+
final_output_str = output
|
|
1334
|
+
render_markdown(final_output_str)
|
|
1335
|
+
elif isinstance(output, dict):
|
|
1336
|
+
if 'output' in output:
|
|
1337
|
+
final_output_str = output['output']
|
|
1338
|
+
render_markdown(final_output_str)
|
|
1012
1339
|
|
|
1013
1340
|
elif output is not None:
|
|
1014
1341
|
final_output_str = str(output)
|
|
@@ -1042,12 +1369,12 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
|
1042
1369
|
|
|
1043
1370
|
def exit_shell(state):
|
|
1044
1371
|
print("\nGoodbye!")
|
|
1045
|
-
print('beginning knowledge consolidation')
|
|
1046
|
-
try:
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
except KeyboardInterrupt:
|
|
1050
|
-
|
|
1372
|
+
#print('beginning knowledge consolidation')
|
|
1373
|
+
#try:
|
|
1374
|
+
# breathe_result = breathe(state.messages, state.chat_model, state.chat_provider, state.npc)
|
|
1375
|
+
# print(breathe_result)
|
|
1376
|
+
#except KeyboardInterrupt:
|
|
1377
|
+
# print("Knowledge consolidation interrupted. Exiting immediately.")
|
|
1051
1378
|
sys.exit(0)
|
|
1052
1379
|
|
|
1053
1380
|
while True:
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: npcsh
|
|
3
|
-
Version: 1.0.
|
|
4
|
-
Summary: npcsh is a command-line toolkit for using AI agents.
|
|
3
|
+
Version: 1.0.4
|
|
4
|
+
Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
|
|
5
5
|
Home-page: https://github.com/NPC-Worldwide/npcsh
|
|
6
6
|
Author: Christopher Agostino
|
|
7
7
|
Author-email: info@npcworldwi.de
|
|
@@ -402,7 +402,7 @@ Read the docs at [npcsh.readthedocs.io](https://npcsh.readthedocs.io/en/latest/)
|
|
|
402
402
|
|
|
403
403
|
|
|
404
404
|
## NPC Studio
|
|
405
|
-
There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables
|
|
405
|
+
There is a graphical user interface that makes use of the NPC Toolkit through the NPC Studio. See the open source code for NPC Studio [here](https://github.com/npc-worldwide/npc-studio). Download the executables at [our website](https://enpisi.com/npc-studio).
|
|
406
406
|
|
|
407
407
|
|
|
408
408
|
## Mailing List
|
|
@@ -84,7 +84,7 @@ extra_files = package_files("npcpy/npc_team/")
|
|
|
84
84
|
|
|
85
85
|
setup(
|
|
86
86
|
name="npcsh",
|
|
87
|
-
version="1.0.
|
|
87
|
+
version="1.0.4",
|
|
88
88
|
packages=find_packages(exclude=["tests*"]),
|
|
89
89
|
install_requires=base_requirements, # Only install base requirements by default
|
|
90
90
|
extras_require={
|
|
@@ -108,7 +108,7 @@ setup(
|
|
|
108
108
|
},
|
|
109
109
|
author="Christopher Agostino",
|
|
110
110
|
author_email="info@npcworldwi.de",
|
|
111
|
-
description="npcsh is a command-line toolkit for using AI agents.",
|
|
111
|
+
description="npcsh is a command-line toolkit for using AI agents in novel ways.",
|
|
112
112
|
long_description=open("README.md").read(),
|
|
113
113
|
long_description_content_type="text/markdown",
|
|
114
114
|
url="https://github.com/NPC-Worldwide/npcsh",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|