npcsh 1.1.10__py3-none-any.whl → 1.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +409 -362
- npcsh/corca.py +28 -2
- npcsh/guac.py +4 -1
- npcsh/npc_team/jinxs/code/sh.jinx +32 -13
- npcsh/npc_team/jinxs/code/sql.jinx +2 -2
- npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
- npcsh/npc_team/jinxs/utils/chat.jinx +17 -0
- npcsh/npc_team/jinxs/utils/vixynt.jinx +104 -77
- npcsh-1.1.12.data/data/npcsh/npc_team/agent.jinx +17 -0
- npcsh-1.1.12.data/data/npcsh/npc_team/chat.jinx +17 -0
- npcsh-1.1.12.data/data/npcsh/npc_team/sh.jinx +38 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/sql.jinx +2 -2
- npcsh-1.1.12.data/data/npcsh/npc_team/vixynt.jinx +144 -0
- {npcsh-1.1.10.dist-info → npcsh-1.1.12.dist-info}/METADATA +1 -1
- {npcsh-1.1.10.dist-info → npcsh-1.1.12.dist-info}/RECORD +66 -62
- npcsh-1.1.10.data/data/npcsh/npc_team/sh.jinx +0 -19
- npcsh-1.1.10.data/data/npcsh/npc_team/vixynt.jinx +0 -117
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/alicanto.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/build.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/compile.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/compress.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/corca.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/guac.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/help.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/init.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/load_file.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/ots.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/plonk.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/pti.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/python.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/roll.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/sample.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/search.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/serve.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/set.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/sleep.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/spool.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/trigger.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/wander.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/yap.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.10.dist-info → npcsh-1.1.12.dist-info}/WHEEL +0 -0
- {npcsh-1.1.10.dist-info → npcsh-1.1.12.dist-info}/entry_points.txt +0 -0
- {npcsh-1.1.10.dist-info → npcsh-1.1.12.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.10.dist-info → npcsh-1.1.12.dist-info}/top_level.txt +0 -0
npcsh/_state.py
CHANGED
|
@@ -26,7 +26,7 @@ import sqlite3
|
|
|
26
26
|
import subprocess
|
|
27
27
|
import sys
|
|
28
28
|
import time
|
|
29
|
-
from typing import Dict, List, Any, Tuple, Union, Optional
|
|
29
|
+
from typing import Dict, List, Any, Tuple, Union, Optional, Callable
|
|
30
30
|
import logging
|
|
31
31
|
import textwrap
|
|
32
32
|
from termcolor import colored
|
|
@@ -80,14 +80,15 @@ from npcpy.memory.command_history import (
|
|
|
80
80
|
load_kg_from_db,
|
|
81
81
|
save_kg_to_db,
|
|
82
82
|
)
|
|
83
|
-
from npcpy.npc_compiler import NPC, Team, load_jinxs_from_directory
|
|
83
|
+
from npcpy.npc_compiler import NPC, Team, load_jinxs_from_directory, build_jinx_tool_catalog
|
|
84
84
|
from npcpy.llm_funcs import (
|
|
85
85
|
check_llm_command,
|
|
86
86
|
get_llm_response,
|
|
87
87
|
execute_llm_command,
|
|
88
88
|
breathe,
|
|
89
|
-
|
|
89
|
+
|
|
90
90
|
)
|
|
91
|
+
from npcpy.tools import auto_tools
|
|
91
92
|
|
|
92
93
|
from npcpy.memory.knowledge_graph import (
|
|
93
94
|
kg_evolve_incremental,
|
|
@@ -544,59 +545,7 @@ def get_relevant_memories(
|
|
|
544
545
|
return all_memories[-max_memories:]
|
|
545
546
|
|
|
546
547
|
|
|
547
|
-
def search_kg_facts(
|
|
548
|
-
self,
|
|
549
|
-
npc: str,
|
|
550
|
-
team: str,
|
|
551
|
-
directory_path: str,
|
|
552
|
-
query: str
|
|
553
|
-
) -> List[Dict]:
|
|
554
|
-
|
|
555
|
-
kg = load_kg_from_db(
|
|
556
|
-
self.engine,
|
|
557
|
-
team,
|
|
558
|
-
npc,
|
|
559
|
-
directory_path
|
|
560
|
-
)
|
|
561
|
-
|
|
562
|
-
if not kg or 'facts' not in kg:
|
|
563
|
-
return []
|
|
564
|
-
|
|
565
|
-
query_lower = query.lower()
|
|
566
|
-
matching_facts = []
|
|
567
|
-
|
|
568
|
-
for fact in kg['facts']:
|
|
569
|
-
statement = fact.get('statement', '').lower()
|
|
570
|
-
if query_lower in statement:
|
|
571
|
-
matching_facts.append(fact)
|
|
572
|
-
|
|
573
|
-
return matching_facts
|
|
574
548
|
|
|
575
|
-
def format_memory_context(memory_examples):
|
|
576
|
-
if not memory_examples:
|
|
577
|
-
return ""
|
|
578
|
-
|
|
579
|
-
context_parts = []
|
|
580
|
-
|
|
581
|
-
approved_examples = memory_examples.get("approved", [])
|
|
582
|
-
rejected_examples = memory_examples.get("rejected", [])
|
|
583
|
-
|
|
584
|
-
if approved_examples:
|
|
585
|
-
context_parts.append("EXAMPLES OF GOOD MEMORIES:")
|
|
586
|
-
for ex in approved_examples[:5]:
|
|
587
|
-
final = ex.get("final_memory") or ex.get("initial_memory")
|
|
588
|
-
context_parts.append(f"- {final}")
|
|
589
|
-
|
|
590
|
-
if rejected_examples:
|
|
591
|
-
context_parts.append("\nEXAMPLES OF POOR MEMORIES TO AVOID:")
|
|
592
|
-
for ex in rejected_examples[:3]:
|
|
593
|
-
context_parts.append(f"- {ex.get('initial_memory')}")
|
|
594
|
-
|
|
595
|
-
if context_parts:
|
|
596
|
-
context_parts.append("\nLearn from these examples to generate similar high-quality memories.")
|
|
597
|
-
return "\n".join(context_parts)
|
|
598
|
-
|
|
599
|
-
return ""
|
|
600
549
|
def add_npcshrc_to_shell_config() -> None:
|
|
601
550
|
"""
|
|
602
551
|
Function Description:
|
|
@@ -989,6 +938,7 @@ def validate_bash_command(command_parts: list) -> bool:
|
|
|
989
938
|
"""
|
|
990
939
|
Function Description:
|
|
991
940
|
Validate if the command sequence is a valid bash command with proper arguments/flags.
|
|
941
|
+
Simplified to be less strict and allow bash to handle argument specifics for common commands.
|
|
992
942
|
Args:
|
|
993
943
|
command_parts : list : Command parts
|
|
994
944
|
Keyword Args:
|
|
@@ -999,216 +949,20 @@ def validate_bash_command(command_parts: list) -> bool:
|
|
|
999
949
|
if not command_parts:
|
|
1000
950
|
return False
|
|
1001
951
|
|
|
1002
|
-
COMMAND_PATTERNS = {
|
|
1003
|
-
"cat": {
|
|
1004
|
-
"flags": ["-n", "-b", "-E", "-T", "-s", "--number", "-A", "--show-all"],
|
|
1005
|
-
"requires_arg": True,
|
|
1006
|
-
},
|
|
1007
|
-
"find": {
|
|
1008
|
-
"flags": [
|
|
1009
|
-
"-name",
|
|
1010
|
-
"-type",
|
|
1011
|
-
"-size",
|
|
1012
|
-
"-mtime",
|
|
1013
|
-
"-exec",
|
|
1014
|
-
"-print",
|
|
1015
|
-
"-delete",
|
|
1016
|
-
"-maxdepth",
|
|
1017
|
-
"-mindepth",
|
|
1018
|
-
"-perm",
|
|
1019
|
-
"-user",
|
|
1020
|
-
"-group",
|
|
1021
|
-
],
|
|
1022
|
-
"requires_arg": True,
|
|
1023
|
-
},
|
|
1024
|
-
"who": {
|
|
1025
|
-
"flags": [
|
|
1026
|
-
"-a",
|
|
1027
|
-
"-b",
|
|
1028
|
-
"-d",
|
|
1029
|
-
"-H",
|
|
1030
|
-
"-l",
|
|
1031
|
-
"-p",
|
|
1032
|
-
"-q",
|
|
1033
|
-
"-r",
|
|
1034
|
-
"-s",
|
|
1035
|
-
"-t",
|
|
1036
|
-
"-u",
|
|
1037
|
-
"--all",
|
|
1038
|
-
"--count",
|
|
1039
|
-
"--heading",
|
|
1040
|
-
],
|
|
1041
|
-
"requires_arg": False,
|
|
1042
|
-
},
|
|
1043
|
-
"open": {
|
|
1044
|
-
"flags": ["-a", "-e", "-t", "-f", "-F", "-W", "-n", "-g", "-h"],
|
|
1045
|
-
"requires_arg": True,
|
|
1046
|
-
},
|
|
1047
|
-
"ls": {
|
|
1048
|
-
"flags": [
|
|
1049
|
-
"-a",
|
|
1050
|
-
"-l",
|
|
1051
|
-
"-h",
|
|
1052
|
-
"-R",
|
|
1053
|
-
"-t",
|
|
1054
|
-
"-S",
|
|
1055
|
-
"-r",
|
|
1056
|
-
"-d",
|
|
1057
|
-
"-F",
|
|
1058
|
-
"-i",
|
|
1059
|
-
"--color",
|
|
1060
|
-
],
|
|
1061
|
-
"requires_arg": False,
|
|
1062
|
-
},
|
|
1063
|
-
"cp": {
|
|
1064
|
-
"flags": [
|
|
1065
|
-
"-r",
|
|
1066
|
-
"-f",
|
|
1067
|
-
"-i",
|
|
1068
|
-
"-u",
|
|
1069
|
-
"-v",
|
|
1070
|
-
"--preserve",
|
|
1071
|
-
"--no-preserve=mode,ownership,timestamps",
|
|
1072
|
-
],
|
|
1073
|
-
"requires_arg": True,
|
|
1074
|
-
},
|
|
1075
|
-
"mv": {
|
|
1076
|
-
"flags": ["-f", "-i", "-u", "-v", "--backup", "--no-clobber"],
|
|
1077
|
-
"requires_arg": True,
|
|
1078
|
-
},
|
|
1079
|
-
"rm": {
|
|
1080
|
-
"flags": ["-f", "-i", "-r", "-v", "--preserve-root", "--no-preserve-root"],
|
|
1081
|
-
"requires_arg": True,
|
|
1082
|
-
},
|
|
1083
|
-
"mkdir": {
|
|
1084
|
-
"flags": ["-p", "-v", "-m", "--mode", "--parents"],
|
|
1085
|
-
"requires_arg": True,
|
|
1086
|
-
},
|
|
1087
|
-
"rmdir": {
|
|
1088
|
-
"flags": ["-p", "-v", "--ignore-fail-on-non-empty"],
|
|
1089
|
-
"requires_arg": True,
|
|
1090
|
-
},
|
|
1091
|
-
"touch": {
|
|
1092
|
-
"flags": ["-a", "-c", "-m", "-r", "-d", "--date"],
|
|
1093
|
-
"requires_arg": True,
|
|
1094
|
-
},
|
|
1095
|
-
"grep": {
|
|
1096
|
-
"flags": [
|
|
1097
|
-
"-i",
|
|
1098
|
-
"-v",
|
|
1099
|
-
"-r",
|
|
1100
|
-
"-l",
|
|
1101
|
-
"-n",
|
|
1102
|
-
"-c",
|
|
1103
|
-
"-w",
|
|
1104
|
-
"-x",
|
|
1105
|
-
"--color",
|
|
1106
|
-
"--exclude",
|
|
1107
|
-
"--include",
|
|
1108
|
-
],
|
|
1109
|
-
"requires_arg": True,
|
|
1110
|
-
},
|
|
1111
|
-
"sed": {
|
|
1112
|
-
"flags": [
|
|
1113
|
-
"-e",
|
|
1114
|
-
"-f",
|
|
1115
|
-
"-i",
|
|
1116
|
-
"-n",
|
|
1117
|
-
"--expression",
|
|
1118
|
-
"--file",
|
|
1119
|
-
"--in-place",
|
|
1120
|
-
"--quiet",
|
|
1121
|
-
"--silent",
|
|
1122
|
-
],
|
|
1123
|
-
"requires_arg": True,
|
|
1124
|
-
},
|
|
1125
|
-
"awk": {
|
|
1126
|
-
"flags": [
|
|
1127
|
-
"-f",
|
|
1128
|
-
"-v",
|
|
1129
|
-
"--file",
|
|
1130
|
-
"--source",
|
|
1131
|
-
"--assign",
|
|
1132
|
-
"--posix",
|
|
1133
|
-
"--traditional",
|
|
1134
|
-
],
|
|
1135
|
-
"requires_arg": True,
|
|
1136
|
-
},
|
|
1137
|
-
"sort": {
|
|
1138
|
-
"flags": [
|
|
1139
|
-
"-b",
|
|
1140
|
-
"-d",
|
|
1141
|
-
"-f",
|
|
1142
|
-
"-g",
|
|
1143
|
-
"-i",
|
|
1144
|
-
"-n",
|
|
1145
|
-
"-r",
|
|
1146
|
-
"-u",
|
|
1147
|
-
"--check",
|
|
1148
|
-
"--ignore-case",
|
|
1149
|
-
"--numeric-sort",
|
|
1150
|
-
],
|
|
1151
|
-
"requires_arg": False,
|
|
1152
|
-
},
|
|
1153
|
-
"uniq": {
|
|
1154
|
-
"flags": ["-c", "-d", "-u", "-i", "--check-chars", "--skip-chars"],
|
|
1155
|
-
"requires_arg": False,
|
|
1156
|
-
},
|
|
1157
|
-
"wc": {
|
|
1158
|
-
"flags": ["-c", "-l", "-w", "-m", "-L", "--bytes", "--lines", "--words"],
|
|
1159
|
-
"requires_arg": False,
|
|
1160
|
-
},
|
|
1161
|
-
"pwd": {
|
|
1162
|
-
"flags": ["-L", "-P"],
|
|
1163
|
-
"requires_arg": False,
|
|
1164
|
-
},
|
|
1165
|
-
"chmod": {
|
|
1166
|
-
"flags": ["-R", "-v", "-c", "--reference"],
|
|
1167
|
-
"requires_arg": True,
|
|
1168
|
-
},
|
|
1169
|
-
|
|
1170
|
-
}
|
|
1171
|
-
|
|
1172
952
|
base_command = command_parts[0]
|
|
1173
953
|
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
954
|
+
# Commands that are always considered valid for direct execution
|
|
955
|
+
ALWAYS_VALID_COMMANDS = BASH_COMMANDS + list(interactive_commands.keys()) + TERMINAL_EDITORS
|
|
1177
956
|
|
|
1178
|
-
|
|
1179
|
-
INTERACTIVE_COMMANDS = ["ipython", "python", "sqlite3", "r"]
|
|
1180
|
-
TERMINAL_EDITORS = ["vim", "nano", "emacs"]
|
|
1181
|
-
if base_command in TERMINAL_EDITORS or base_command in INTERACTIVE_COMMANDS:
|
|
957
|
+
if base_command in ALWAYS_VALID_COMMANDS:
|
|
1182
958
|
return True
|
|
959
|
+
|
|
960
|
+
# Specific checks for commands that might be misinterpreted or need special handling
|
|
961
|
+
if base_command == 'which':
|
|
962
|
+
return True # 'which' is a valid bash command
|
|
1183
963
|
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
pattern = COMMAND_PATTERNS.get(base_command)
|
|
1188
|
-
if not pattern:
|
|
1189
|
-
return True
|
|
1190
|
-
|
|
1191
|
-
args = []
|
|
1192
|
-
flags = []
|
|
1193
|
-
|
|
1194
|
-
for i in range(1, len(command_parts)):
|
|
1195
|
-
part = command_parts[i]
|
|
1196
|
-
if part.startswith("-"):
|
|
1197
|
-
flags.append(part)
|
|
1198
|
-
if part not in pattern["flags"]:
|
|
1199
|
-
return False
|
|
1200
|
-
else:
|
|
1201
|
-
args.append(part)
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
if base_command == "who" and args:
|
|
1205
|
-
return False
|
|
1206
|
-
|
|
1207
|
-
if pattern.get("requires_arg", False) and not args:
|
|
1208
|
-
return False
|
|
1209
|
-
|
|
1210
|
-
return True
|
|
1211
|
-
|
|
964
|
+
# If it's not in our explicit list, it's not a bash command we want to validate strictly
|
|
965
|
+
return False # If it reaches here, it's not a recognized bash command for strict validation.
|
|
1212
966
|
|
|
1213
967
|
def is_npcsh_initialized() -> bool:
|
|
1214
968
|
"""
|
|
@@ -1506,8 +1260,9 @@ if not completion_logger.handlers:
|
|
|
1506
1260
|
formatter = logging.Formatter('[%(name)s] %(message)s')
|
|
1507
1261
|
handler.setFormatter(formatter)
|
|
1508
1262
|
completion_logger.addHandler(handler)
|
|
1509
|
-
|
|
1510
1263
|
def make_completer(shell_state: ShellState, router: Any):
|
|
1264
|
+
slash_hint_cache = {"last_key": None}
|
|
1265
|
+
|
|
1511
1266
|
def complete(text: str, state_index: int) -> Optional[str]:
|
|
1512
1267
|
"""Main completion function"""
|
|
1513
1268
|
try:
|
|
@@ -1515,56 +1270,72 @@ def make_completer(shell_state: ShellState, router: Any):
|
|
|
1515
1270
|
begidx = readline.get_begidx()
|
|
1516
1271
|
endidx = readline.get_endidx()
|
|
1517
1272
|
|
|
1518
|
-
|
|
1519
|
-
|
|
1273
|
+
# The word currently being completed (e.g., "lor" in "ls lor")
|
|
1274
|
+
word_under_cursor = buffer[begidx:endidx]
|
|
1275
|
+
|
|
1276
|
+
# The very first word/token in the entire buffer (e.g., "ls" in "ls lor")
|
|
1277
|
+
first_token_of_buffer = ""
|
|
1278
|
+
if buffer.strip():
|
|
1279
|
+
match = re.match(r'^(\S+)', buffer.strip())
|
|
1280
|
+
if match:
|
|
1281
|
+
first_token_of_buffer = match.group(1)
|
|
1282
|
+
|
|
1520
1283
|
matches = []
|
|
1521
|
-
|
|
1522
|
-
|
|
1523
|
-
|
|
1524
|
-
|
|
1284
|
+
|
|
1285
|
+
# Determine if we are in a "slash command context"
|
|
1286
|
+
# This is true if the *entire buffer starts with a slash* AND
|
|
1287
|
+
# the current completion is for that initial slash command (begidx == 0).
|
|
1288
|
+
|
|
1289
|
+
is_slash_command_context = (begidx <=1 and first_token_of_buffer.startswith('/'))
|
|
1290
|
+
|
|
1291
|
+
if is_slash_command_context:
|
|
1525
1292
|
slash_commands = get_slash_commands(shell_state, router)
|
|
1526
|
-
completion_logger.debug(f"Available slash commands: {slash_commands}")
|
|
1527
1293
|
|
|
1528
|
-
if
|
|
1294
|
+
if first_token_of_buffer == '/': # If just '/' is typed
|
|
1529
1295
|
matches = [cmd[1:] for cmd in slash_commands]
|
|
1530
|
-
else:
|
|
1531
|
-
|
|
1532
|
-
matching_commands = [cmd for cmd in slash_commands if cmd.startswith(full_text)]
|
|
1296
|
+
else: # If '/ag' is typed
|
|
1297
|
+
matching_commands = [cmd for cmd in slash_commands if cmd.startswith(first_token_of_buffer)]
|
|
1533
1298
|
matches = [cmd[1:] for cmd in matching_commands]
|
|
1534
1299
|
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
|
|
1300
|
+
# Only print hints if this is the first completion attempt (state_index == 0)
|
|
1301
|
+
# and the hints haven't been printed for this specific input yet.
|
|
1302
|
+
if matches and state_index == 0:
|
|
1303
|
+
key = (buffer, first_token_of_buffer) # Use full buffer for cache key
|
|
1304
|
+
if slash_hint_cache["last_key"] != key:
|
|
1305
|
+
print("\nAvailable slash commands: " + ", ".join(slash_commands))
|
|
1306
|
+
try:
|
|
1307
|
+
readline.redisplay()
|
|
1308
|
+
except Exception:
|
|
1309
|
+
pass
|
|
1310
|
+
slash_hint_cache["last_key"] = key
|
|
1311
|
+
|
|
1312
|
+
# If not a slash command context, then it's either a regular command or an argument.
|
|
1313
|
+
elif begidx == 0: # Completing a regular command (e.g., "ls", "pyt")
|
|
1314
|
+
bash_matches = [cmd for cmd in BASH_COMMANDS if cmd.startswith(word_under_cursor)]
|
|
1540
1315
|
matches.extend(bash_matches)
|
|
1541
1316
|
|
|
1542
|
-
interactive_matches = [cmd for cmd in interactive_commands.keys() if cmd.startswith(
|
|
1317
|
+
interactive_matches = [cmd for cmd in interactive_commands.keys() if cmd.startswith(word_under_cursor)]
|
|
1543
1318
|
matches.extend(interactive_matches)
|
|
1544
1319
|
|
|
1545
|
-
if len(
|
|
1320
|
+
if len(word_under_cursor) >= 1:
|
|
1546
1321
|
path_executables = get_path_executables()
|
|
1547
|
-
exec_matches = [cmd for cmd in path_executables if cmd.startswith(
|
|
1322
|
+
exec_matches = [cmd for cmd in path_executables if cmd.startswith(word_under_cursor)]
|
|
1548
1323
|
matches.extend(exec_matches[:20])
|
|
1549
|
-
|
|
1550
|
-
|
|
1551
|
-
matches = get_file_completions(
|
|
1324
|
+
|
|
1325
|
+
else: # Completing a file or directory path (e.g., "ls doc/my_f")
|
|
1326
|
+
matches = get_file_completions(word_under_cursor)
|
|
1552
1327
|
|
|
1553
1328
|
matches = sorted(list(set(matches)))
|
|
1554
|
-
completion_logger.debug(f"Final matches: {matches}")
|
|
1555
1329
|
|
|
1556
1330
|
if state_index < len(matches):
|
|
1557
|
-
|
|
1558
|
-
completion_logger.debug(f"Returning: '{result}'")
|
|
1559
|
-
return result
|
|
1331
|
+
return matches[state_index]
|
|
1560
1332
|
else:
|
|
1561
|
-
|
|
1333
|
+
return None # readline expects None when no more completions
|
|
1562
1334
|
|
|
1563
1335
|
except Exception as e:
|
|
1564
|
-
completion_logger
|
|
1565
|
-
completion_logger.
|
|
1566
|
-
|
|
1567
|
-
return None
|
|
1336
|
+
# Using completion_logger for internal debugging, not printing to stdout for user.
|
|
1337
|
+
# completion_logger.error(f"Exception in completion: {e}", exc_info=True)
|
|
1338
|
+
return None
|
|
1568
1339
|
|
|
1569
1340
|
return complete
|
|
1570
1341
|
|
|
@@ -1598,39 +1369,71 @@ def get_slash_commands(state: ShellState, router: Any) -> List[str]:
|
|
|
1598
1369
|
completion_logger.debug(f"Final slash commands: {result}")
|
|
1599
1370
|
return result
|
|
1600
1371
|
def get_file_completions(text: str) -> List[str]:
|
|
1601
|
-
"""Get file/directory completions"""
|
|
1372
|
+
"""Get file/directory completions, including for subfolders."""
|
|
1602
1373
|
try:
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
elif text.startswith('./') or text.startswith('../'):
|
|
1607
|
-
basedir = os.path.dirname(text) or '.'
|
|
1374
|
+
# Determine the base directory and the prefix to match
|
|
1375
|
+
if '/' in text:
|
|
1376
|
+
basedir = os.path.dirname(text)
|
|
1608
1377
|
prefix = os.path.basename(text)
|
|
1609
1378
|
else:
|
|
1610
1379
|
basedir = '.'
|
|
1611
1380
|
prefix = text
|
|
1612
1381
|
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1382
|
+
# If basedir is empty (e.g., text is "folder/"), it should be current dir
|
|
1383
|
+
if not basedir:
|
|
1384
|
+
basedir = '.'
|
|
1385
|
+
|
|
1386
|
+
# Handle absolute paths
|
|
1387
|
+
if text.startswith('/'):
|
|
1388
|
+
# Ensure absolute path starts with / and handle cases like "/something"
|
|
1389
|
+
if basedir.startswith('/'):
|
|
1390
|
+
pass # already absolute
|
|
1391
|
+
else:
|
|
1392
|
+
basedir = '/' + basedir.lstrip('/')
|
|
1393
|
+
if basedir == '/': # If text was just "/something", basedir is "/"
|
|
1394
|
+
prefix = os.path.basename(text)
|
|
1395
|
+
|
|
1396
|
+
# Resolve the actual path to list
|
|
1397
|
+
if basedir == '.':
|
|
1398
|
+
current_path_to_list = os.getcwd()
|
|
1399
|
+
else:
|
|
1400
|
+
# If basedir is relative, join it with current working directory
|
|
1401
|
+
if not os.path.isabs(basedir):
|
|
1402
|
+
current_path_to_list = os.path.join(os.getcwd(), basedir)
|
|
1403
|
+
else:
|
|
1404
|
+
current_path_to_list = basedir
|
|
1405
|
+
|
|
1406
|
+
if not os.path.isdir(current_path_to_list): # If the base path doesn't exist yet, no completions
|
|
1407
|
+
return []
|
|
1408
|
+
|
|
1616
1409
|
matches = []
|
|
1617
1410
|
try:
|
|
1618
|
-
for item in os.listdir(
|
|
1411
|
+
for item in os.listdir(current_path_to_list):
|
|
1619
1412
|
if item.startswith(prefix):
|
|
1620
|
-
|
|
1413
|
+
full_item_path = os.path.join(current_path_to_list, item)
|
|
1414
|
+
|
|
1415
|
+
# Construct the completion string relative to the input 'text'
|
|
1416
|
+
# This ensures that if the input was 'folder/s', the completion is 'folder/subfolder/'
|
|
1621
1417
|
if basedir == '.':
|
|
1622
1418
|
completion = item
|
|
1623
1419
|
else:
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
1420
|
+
# Reconstruct the path fragment before the prefix
|
|
1421
|
+
path_fragment_before_prefix = text[:len(text) - len(prefix)]
|
|
1422
|
+
completion = os.path.join(path_fragment_before_prefix, item)
|
|
1423
|
+
|
|
1424
|
+
if os.path.isdir(full_item_path):
|
|
1425
|
+
matches.append(completion + '/')
|
|
1426
|
+
else:
|
|
1427
|
+
matches.append(completion)
|
|
1628
1428
|
except (PermissionError, OSError):
|
|
1629
1429
|
pass
|
|
1630
1430
|
|
|
1631
1431
|
return sorted(matches)
|
|
1632
|
-
except Exception:
|
|
1432
|
+
except Exception as e:
|
|
1433
|
+
completion_logger.error(f"Error in get_file_completions for text '{text}': {e}", exc_info=True)
|
|
1633
1434
|
return []
|
|
1435
|
+
|
|
1436
|
+
|
|
1634
1437
|
def is_command_position(buffer: str, begidx: int) -> bool:
|
|
1635
1438
|
"""Determine if cursor is at a command position"""
|
|
1636
1439
|
|
|
@@ -1996,6 +1799,181 @@ def parse_generic_command_flags(parts: List[str]) -> Tuple[Dict[str, Any], List[
|
|
|
1996
1799
|
|
|
1997
1800
|
return parsed_kwargs, positional_args
|
|
1998
1801
|
|
|
1802
|
+
def _ollama_supports_tools(model: str) -> Optional[bool]:
|
|
1803
|
+
"""
|
|
1804
|
+
Best-effort check for tool-call support on an Ollama model by inspecting its template/metadata.
|
|
1805
|
+
Mirrors the lightweight check used in the Flask serve path.
|
|
1806
|
+
"""
|
|
1807
|
+
try:
|
|
1808
|
+
import ollama # Local import to avoid hard dependency when Ollama isn't installed
|
|
1809
|
+
except Exception:
|
|
1810
|
+
return None
|
|
1811
|
+
|
|
1812
|
+
try:
|
|
1813
|
+
details = ollama.show(model)
|
|
1814
|
+
template = details.get("template") or ""
|
|
1815
|
+
metadata = details.get("metadata") or {}
|
|
1816
|
+
if any(token in template for token in ["{{- if .Tools", "{{- range .Tools", "{{- if .ToolCalls"]):
|
|
1817
|
+
return True
|
|
1818
|
+
if metadata.get("tools") or metadata.get("tool_calls"):
|
|
1819
|
+
return True
|
|
1820
|
+
return False
|
|
1821
|
+
except Exception:
|
|
1822
|
+
return None
|
|
1823
|
+
|
|
1824
|
+
|
|
1825
|
+
def model_supports_tool_calls(model: Optional[str], provider: Optional[str]) -> bool:
|
|
1826
|
+
"""
|
|
1827
|
+
Decide whether to attempt tool-calling for the given model/provider.
|
|
1828
|
+
Uses Ollama template inspection when possible and falls back to name heuristics.
|
|
1829
|
+
"""
|
|
1830
|
+
if not model:
|
|
1831
|
+
return False
|
|
1832
|
+
|
|
1833
|
+
provider = (provider or "").lower()
|
|
1834
|
+
model_lower = model.lower()
|
|
1835
|
+
|
|
1836
|
+
if provider == "ollama":
|
|
1837
|
+
ollama_support = _ollama_supports_tools(model)
|
|
1838
|
+
if ollama_support is not None:
|
|
1839
|
+
return ollama_support
|
|
1840
|
+
|
|
1841
|
+
toolish_markers = [
|
|
1842
|
+
"gpt",
|
|
1843
|
+
"claude",
|
|
1844
|
+
"qwen",
|
|
1845
|
+
"mistral",
|
|
1846
|
+
"llama-3.1",
|
|
1847
|
+
"llama3.1",
|
|
1848
|
+
"llama-3.2",
|
|
1849
|
+
"llama3.2",
|
|
1850
|
+
"tool",
|
|
1851
|
+
]
|
|
1852
|
+
return any(marker in model_lower for marker in toolish_markers)
|
|
1853
|
+
|
|
1854
|
+
|
|
1855
|
+
def collect_llm_tools(state: ShellState) -> Tuple[List[Dict[str, Any]], Dict[str, Callable]]:
|
|
1856
|
+
"""
|
|
1857
|
+
Assemble tool definitions + executable map from NPC tools, Jinxs, and MCP servers.
|
|
1858
|
+
This mirrors the auto-translation used in the Flask server path.
|
|
1859
|
+
"""
|
|
1860
|
+
tools: List[Dict[str, Any]] = []
|
|
1861
|
+
tool_map: Dict[str, Callable] = {}
|
|
1862
|
+
|
|
1863
|
+
# NPC-defined Python tools
|
|
1864
|
+
npc_obj = state.npc if isinstance(state.npc, NPC) else None
|
|
1865
|
+
if npc_obj and getattr(npc_obj, "tools", None):
|
|
1866
|
+
if isinstance(npc_obj.tools, list) and npc_obj.tools and callable(npc_obj.tools[0]):
|
|
1867
|
+
tools_schema, auto_map = auto_tools(npc_obj.tools)
|
|
1868
|
+
tools.extend(tools_schema or [])
|
|
1869
|
+
tool_map.update(auto_map or {})
|
|
1870
|
+
else:
|
|
1871
|
+
tools.extend(npc_obj.tools or [])
|
|
1872
|
+
if getattr(npc_obj, "tool_map", None):
|
|
1873
|
+
tool_map.update(npc_obj.tool_map)
|
|
1874
|
+
elif npc_obj and getattr(npc_obj, "tool_map", None):
|
|
1875
|
+
tool_map.update(npc_obj.tool_map)
|
|
1876
|
+
|
|
1877
|
+
# Jinx tools from NPC and Team
|
|
1878
|
+
aggregated_jinxs: Dict[str, Any] = {}
|
|
1879
|
+
if npc_obj and getattr(npc_obj, "jinxs_dict", None):
|
|
1880
|
+
aggregated_jinxs.update(npc_obj.jinxs_dict)
|
|
1881
|
+
if state.team and isinstance(state.team, Team) and getattr(state.team, "jinxs_dict", None):
|
|
1882
|
+
aggregated_jinxs.update({k: v for k, v in state.team.jinxs_dict.items() if k not in aggregated_jinxs})
|
|
1883
|
+
|
|
1884
|
+
if aggregated_jinxs:
|
|
1885
|
+
jinx_catalog: Dict[str, Dict[str, Any]] = {}
|
|
1886
|
+
if npc_obj and getattr(npc_obj, "jinx_tool_catalog", None):
|
|
1887
|
+
jinx_catalog.update(npc_obj.jinx_tool_catalog or {})
|
|
1888
|
+
if state.team and isinstance(state.team, Team) and getattr(state.team, "jinx_tool_catalog", None):
|
|
1889
|
+
jinx_catalog.update(state.team.jinx_tool_catalog or {})
|
|
1890
|
+
if not jinx_catalog:
|
|
1891
|
+
jinx_catalog = build_jinx_tool_catalog(aggregated_jinxs)
|
|
1892
|
+
|
|
1893
|
+
tools.extend(list(jinx_catalog.values()))
|
|
1894
|
+
|
|
1895
|
+
jinja_env_for_jinx = getattr(npc_obj, "jinja_env", None)
|
|
1896
|
+
if not jinja_env_for_jinx and state.team and isinstance(state.team, Team):
|
|
1897
|
+
jinja_env_for_jinx = getattr(state.team, "jinja_env", None)
|
|
1898
|
+
|
|
1899
|
+
for name, jinx_obj in aggregated_jinxs.items():
|
|
1900
|
+
def _make_runner(jinx=jinx_obj, jinja_env=jinja_env_for_jinx, tool_name=name):
|
|
1901
|
+
def runner(**kwargs):
|
|
1902
|
+
input_values = kwargs if isinstance(kwargs, dict) else {}
|
|
1903
|
+
try:
|
|
1904
|
+
ctx = jinx.execute(
|
|
1905
|
+
input_values=input_values,
|
|
1906
|
+
npc=npc_obj,
|
|
1907
|
+
messages=state.messages,
|
|
1908
|
+
extra_globals={"state": state},
|
|
1909
|
+
jinja_env=jinja_env
|
|
1910
|
+
)
|
|
1911
|
+
return ctx.get("output", ctx)
|
|
1912
|
+
except Exception as exc:
|
|
1913
|
+
return f"Jinx '{tool_name}' failed: {exc}"
|
|
1914
|
+
return runner
|
|
1915
|
+
tool_map[name] = _make_runner()
|
|
1916
|
+
|
|
1917
|
+
# MCP tools via npcsh.corca client
|
|
1918
|
+
try:
|
|
1919
|
+
from npcsh.corca import MCPClientNPC, _resolve_and_copy_mcp_server_path # type: ignore
|
|
1920
|
+
|
|
1921
|
+
team_ctx_mcp_servers = None
|
|
1922
|
+
if state.team and isinstance(state.team, Team) and hasattr(state.team, "team_ctx"):
|
|
1923
|
+
team_ctx_mcp_servers = state.team.team_ctx.get("mcp_servers", [])
|
|
1924
|
+
|
|
1925
|
+
mcp_server_path = _resolve_and_copy_mcp_server_path(
|
|
1926
|
+
explicit_path=None,
|
|
1927
|
+
current_path=state.current_path,
|
|
1928
|
+
team_ctx_mcp_servers=team_ctx_mcp_servers,
|
|
1929
|
+
interactive=False,
|
|
1930
|
+
auto_copy_bypass=True
|
|
1931
|
+
)
|
|
1932
|
+
|
|
1933
|
+
if mcp_server_path:
|
|
1934
|
+
reuse_client = (
|
|
1935
|
+
state.mcp_client
|
|
1936
|
+
if state.mcp_client and getattr(state.mcp_client, "server_script_path", None) == mcp_server_path
|
|
1937
|
+
else None
|
|
1938
|
+
)
|
|
1939
|
+
mcp_client = reuse_client or MCPClientNPC()
|
|
1940
|
+
if reuse_client is None:
|
|
1941
|
+
try:
|
|
1942
|
+
connected = mcp_client.connect_sync(mcp_server_path)
|
|
1943
|
+
except Exception:
|
|
1944
|
+
connected = False
|
|
1945
|
+
if connected:
|
|
1946
|
+
state.mcp_client = mcp_client
|
|
1947
|
+
if mcp_client and getattr(mcp_client, "available_tools_llm", None):
|
|
1948
|
+
for tool_def in mcp_client.available_tools_llm:
|
|
1949
|
+
name = tool_def.get("function", {}).get("name")
|
|
1950
|
+
if name and name not in tool_map:
|
|
1951
|
+
tools.append(tool_def)
|
|
1952
|
+
tool_map.update(getattr(mcp_client, "tool_map", {}) or {})
|
|
1953
|
+
except Exception:
|
|
1954
|
+
pass # MCP is optional; ignore failures
|
|
1955
|
+
|
|
1956
|
+
# Deduplicate tools by name to avoid confusing the LLM
|
|
1957
|
+
deduped = {}
|
|
1958
|
+
for tool_def in tools:
|
|
1959
|
+
name = tool_def.get("function", {}).get("name")
|
|
1960
|
+
if name:
|
|
1961
|
+
deduped[name] = tool_def
|
|
1962
|
+
return list(deduped.values()), tool_map
|
|
1963
|
+
|
|
1964
|
+
|
|
1965
|
+
def normalize_llm_result(llm_result: Any, fallback_messages: List[Dict[str, Any]]) -> Tuple[Any, List[Dict[str, Any]]]:
|
|
1966
|
+
"""
|
|
1967
|
+
Normalize varying LLM return shapes into (output, messages).
|
|
1968
|
+
"""
|
|
1969
|
+
if isinstance(llm_result, dict):
|
|
1970
|
+
messages = llm_result.get("messages", fallback_messages)
|
|
1971
|
+
output = llm_result.get("output")
|
|
1972
|
+
if output is None:
|
|
1973
|
+
output = llm_result.get("response")
|
|
1974
|
+
return output, messages
|
|
1975
|
+
return llm_result, fallback_messages
|
|
1976
|
+
|
|
1999
1977
|
|
|
2000
1978
|
def should_skip_kg_processing(user_input: str, assistant_output: str) -> bool:
|
|
2001
1979
|
"""Determine if this interaction is too trivial for KG processing"""
|
|
@@ -2166,12 +2144,16 @@ def process_pipeline_command(
|
|
|
2166
2144
|
|
|
2167
2145
|
if validate_bash_command(cmd_parts):
|
|
2168
2146
|
with SpinnerContext(f"Executing {command_name}", style="line"):
|
|
2169
|
-
|
|
2170
|
-
|
|
2171
|
-
|
|
2172
|
-
|
|
2173
|
-
|
|
2174
|
-
|
|
2147
|
+
try: # Added try-except for KeyboardInterrupt here
|
|
2148
|
+
success, result = handle_bash_command(
|
|
2149
|
+
cmd_parts,
|
|
2150
|
+
cmd_to_process,
|
|
2151
|
+
stdin_input,
|
|
2152
|
+
state
|
|
2153
|
+
)
|
|
2154
|
+
except KeyboardInterrupt:
|
|
2155
|
+
print(colored("\nBash command interrupted by user.", "yellow"))
|
|
2156
|
+
return state, colored("Command interrupted.", "red")
|
|
2175
2157
|
|
|
2176
2158
|
if success:
|
|
2177
2159
|
return state, result
|
|
@@ -2192,14 +2174,18 @@ def process_pipeline_command(
|
|
|
2192
2174
|
f"{exec_model} analyzing error",
|
|
2193
2175
|
style="brain"
|
|
2194
2176
|
):
|
|
2195
|
-
|
|
2196
|
-
|
|
2197
|
-
|
|
2198
|
-
|
|
2199
|
-
|
|
2200
|
-
|
|
2201
|
-
|
|
2202
|
-
|
|
2177
|
+
try: # Added try-except for KeyboardInterrupt here
|
|
2178
|
+
response = execute_llm_command(
|
|
2179
|
+
fixer_prompt,
|
|
2180
|
+
model=exec_model,
|
|
2181
|
+
provider=exec_provider,
|
|
2182
|
+
npc=state.npc,
|
|
2183
|
+
stream=stream_final,
|
|
2184
|
+
messages=state.messages
|
|
2185
|
+
)
|
|
2186
|
+
except KeyboardInterrupt:
|
|
2187
|
+
print(colored("\nLLM analysis interrupted by user.", "yellow"))
|
|
2188
|
+
return state, colored("LLM analysis interrupted.", "red")
|
|
2203
2189
|
|
|
2204
2190
|
state.messages = response['messages']
|
|
2205
2191
|
return state, response['response']
|
|
@@ -2225,7 +2211,15 @@ def process_pipeline_command(
|
|
|
2225
2211
|
)
|
|
2226
2212
|
info = path_cmd + '\n' + ls_files + '\n' + platform_info + '\n'
|
|
2227
2213
|
state.messages.append({'role':'user', 'content':full_llm_cmd})
|
|
2228
|
-
|
|
2214
|
+
|
|
2215
|
+
tools_for_llm: List[Dict[str, Any]] = []
|
|
2216
|
+
tool_exec_map: Dict[str, Callable] = {}
|
|
2217
|
+
tool_capable = model_supports_tool_calls(exec_model, exec_provider)
|
|
2218
|
+
if tool_capable:
|
|
2219
|
+
tools_for_llm, tool_exec_map = collect_llm_tools(state)
|
|
2220
|
+
if not tools_for_llm:
|
|
2221
|
+
tool_capable = False
|
|
2222
|
+
|
|
2229
2223
|
npc_name = (
|
|
2230
2224
|
state.npc.name
|
|
2231
2225
|
if isinstance(state.npc, NPC)
|
|
@@ -2245,27 +2239,54 @@ def process_pipeline_command(
|
|
|
2245
2239
|
"load_file_contents": load_file_contents,
|
|
2246
2240
|
"search_web": search_web,
|
|
2247
2241
|
"get_relevant_memories": get_relevant_memories,
|
|
2248
|
-
|
|
2242
|
+
|
|
2249
2243
|
'state': state
|
|
2250
2244
|
}
|
|
2251
2245
|
current_module = sys.modules[__name__]
|
|
2252
2246
|
for name, func in inspect.getmembers(current_module, inspect.isfunction):
|
|
2253
2247
|
application_globals_for_jinx[name] = func
|
|
2254
2248
|
|
|
2255
|
-
|
|
2256
|
-
|
|
2257
|
-
|
|
2258
|
-
|
|
2259
|
-
|
|
2260
|
-
|
|
2261
|
-
|
|
2262
|
-
|
|
2263
|
-
|
|
2264
|
-
|
|
2265
|
-
|
|
2266
|
-
|
|
2267
|
-
|
|
2268
|
-
|
|
2249
|
+
try: # Added try-except for KeyboardInterrupt here
|
|
2250
|
+
if tool_capable:
|
|
2251
|
+
llm_result = get_llm_response(
|
|
2252
|
+
full_llm_cmd,
|
|
2253
|
+
model=exec_model,
|
|
2254
|
+
provider=exec_provider,
|
|
2255
|
+
npc=state.npc,
|
|
2256
|
+
team=state.team,
|
|
2257
|
+
messages=state.messages,
|
|
2258
|
+
stream=stream_final,
|
|
2259
|
+
attachments=state.attachments,
|
|
2260
|
+
context=info,
|
|
2261
|
+
auto_process_tool_calls=True,
|
|
2262
|
+
tools=tools_for_llm,
|
|
2263
|
+
tool_map=tool_exec_map,
|
|
2264
|
+
tool_choice={"type": "auto"},
|
|
2265
|
+
)
|
|
2266
|
+
else:
|
|
2267
|
+
llm_result = check_llm_command(
|
|
2268
|
+
full_llm_cmd,
|
|
2269
|
+
model=exec_model,
|
|
2270
|
+
provider=exec_provider,
|
|
2271
|
+
api_url=state.api_url,
|
|
2272
|
+
api_key=state.api_key,
|
|
2273
|
+
npc=state.npc,
|
|
2274
|
+
team=state.team,
|
|
2275
|
+
messages=state.messages,
|
|
2276
|
+
images=state.attachments,
|
|
2277
|
+
stream=stream_final,
|
|
2278
|
+
context=info,
|
|
2279
|
+
extra_globals=application_globals_for_jinx
|
|
2280
|
+
)
|
|
2281
|
+
except KeyboardInterrupt:
|
|
2282
|
+
print(colored("\nLLM processing interrupted by user.", "yellow"))
|
|
2283
|
+
return state, colored("LLM processing interrupted.", "red")
|
|
2284
|
+
|
|
2285
|
+
if tool_capable:
|
|
2286
|
+
output, updated_messages = normalize_llm_result(llm_result, state.messages)
|
|
2287
|
+
state.messages = updated_messages
|
|
2288
|
+
return state, output
|
|
2289
|
+
|
|
2269
2290
|
if not review:
|
|
2270
2291
|
if isinstance(llm_result, dict):
|
|
2271
2292
|
state.messages = llm_result.get("messages", state.messages)
|
|
@@ -2475,7 +2496,9 @@ def execute_command(
|
|
|
2475
2496
|
review=review,
|
|
2476
2497
|
router=router
|
|
2477
2498
|
)
|
|
2478
|
-
|
|
2499
|
+
if isinstance(output, dict) and 'output' in output:
|
|
2500
|
+
output = output['output']
|
|
2501
|
+
|
|
2479
2502
|
if is_last_command:
|
|
2480
2503
|
print(colored("✅ Pipeline complete", "green"))
|
|
2481
2504
|
return current_state, output
|
|
@@ -2514,6 +2537,9 @@ def execute_command(
|
|
|
2514
2537
|
f" → Passing to stage {stage_num + 1}",
|
|
2515
2538
|
"blue"
|
|
2516
2539
|
))
|
|
2540
|
+
except KeyboardInterrupt:
|
|
2541
|
+
print(colored("\nOperation interrupted by user.", "yellow"))
|
|
2542
|
+
return current_state, colored("Command interrupted.", "red")
|
|
2517
2543
|
except RateLimitError:
|
|
2518
2544
|
print(colored('Rate Limit Exceeded'))
|
|
2519
2545
|
# wait 30 seconds then truncate messages/condense context with breathing mechanism
|
|
@@ -2590,14 +2616,18 @@ def execute_command(
|
|
|
2590
2616
|
f"Chatting with {active_model}",
|
|
2591
2617
|
style="brain"
|
|
2592
2618
|
):
|
|
2593
|
-
|
|
2594
|
-
|
|
2595
|
-
|
|
2596
|
-
|
|
2597
|
-
|
|
2598
|
-
|
|
2599
|
-
|
|
2600
|
-
|
|
2619
|
+
try: # Added try-except for KeyboardInterrupt here
|
|
2620
|
+
response = get_llm_response(
|
|
2621
|
+
command,
|
|
2622
|
+
model=active_model,
|
|
2623
|
+
provider=active_provider,
|
|
2624
|
+
npc=state.npc,
|
|
2625
|
+
stream=state.stream_output,
|
|
2626
|
+
messages=state.messages
|
|
2627
|
+
)
|
|
2628
|
+
except KeyboardInterrupt:
|
|
2629
|
+
print(colored("\nChat interrupted by user.", "yellow"))
|
|
2630
|
+
return state, colored("Chat interrupted.", "red")
|
|
2601
2631
|
|
|
2602
2632
|
state.messages = response['messages']
|
|
2603
2633
|
return state, response['response']
|
|
@@ -2607,18 +2637,23 @@ def execute_command(
|
|
|
2607
2637
|
f"Executing with {active_model}",
|
|
2608
2638
|
style="dots_pulse"
|
|
2609
2639
|
):
|
|
2610
|
-
|
|
2611
|
-
|
|
2612
|
-
|
|
2613
|
-
|
|
2614
|
-
|
|
2615
|
-
|
|
2616
|
-
|
|
2617
|
-
|
|
2640
|
+
try: # Added try-except for KeyboardInterrupt here
|
|
2641
|
+
response = execute_llm_command(
|
|
2642
|
+
command,
|
|
2643
|
+
model=active_model,
|
|
2644
|
+
provider=active_provider,
|
|
2645
|
+
npc=state.npc,
|
|
2646
|
+
stream=state.stream_output,
|
|
2647
|
+
messages=state.messages
|
|
2648
|
+
)
|
|
2649
|
+
except KeyboardInterrupt:
|
|
2650
|
+
print(colored("\nCommand execution interrupted by user.", "yellow"))
|
|
2651
|
+
return state, colored("Command interrupted.", "red")
|
|
2618
2652
|
|
|
2619
2653
|
state.messages = response['messages']
|
|
2620
2654
|
return state, response['response']
|
|
2621
2655
|
|
|
2656
|
+
|
|
2622
2657
|
def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
2623
2658
|
setup_npcsh_config()
|
|
2624
2659
|
|
|
@@ -2821,7 +2856,6 @@ def process_memory_approvals(command_history, memory_queue):
|
|
|
2821
2856
|
approval['decision'],
|
|
2822
2857
|
approval.get('final_memory')
|
|
2823
2858
|
)
|
|
2824
|
-
|
|
2825
2859
|
def process_result(
|
|
2826
2860
|
user_input: str,
|
|
2827
2861
|
result_state: ShellState,
|
|
@@ -2854,10 +2888,17 @@ def process_result(
|
|
|
2854
2888
|
|
|
2855
2889
|
final_output_str = None
|
|
2856
2890
|
|
|
2891
|
+
# FIX: Handle dict output properly
|
|
2857
2892
|
if isinstance(output, dict):
|
|
2858
2893
|
output_content = output.get('output')
|
|
2859
2894
|
model_for_stream = output.get('model', active_npc.model)
|
|
2860
2895
|
provider_for_stream = output.get('provider', active_npc.provider)
|
|
2896
|
+
|
|
2897
|
+
# If output_content is still a dict or None, convert to string
|
|
2898
|
+
if isinstance(output_content, dict):
|
|
2899
|
+
output_content = str(output_content)
|
|
2900
|
+
elif output_content is None:
|
|
2901
|
+
output_content = "Command completed with no output"
|
|
2861
2902
|
else:
|
|
2862
2903
|
output_content = output
|
|
2863
2904
|
model_for_stream = active_npc.model
|
|
@@ -2870,15 +2911,21 @@ def process_result(
|
|
|
2870
2911
|
else:
|
|
2871
2912
|
render_markdown(str(output_content))
|
|
2872
2913
|
elif result_state.stream_output:
|
|
2873
|
-
|
|
2874
|
-
|
|
2875
|
-
|
|
2876
|
-
|
|
2877
|
-
|
|
2878
|
-
|
|
2914
|
+
# FIX: Only stream if output_content is a generator, not a string
|
|
2915
|
+
if isinstance(output_content, str):
|
|
2916
|
+
final_output_str = output_content
|
|
2917
|
+
render_markdown(final_output_str)
|
|
2918
|
+
else:
|
|
2919
|
+
final_output_str = print_and_process_stream_with_markdown(
|
|
2920
|
+
output_content,
|
|
2921
|
+
model_for_stream,
|
|
2922
|
+
provider_for_stream,
|
|
2923
|
+
show=True
|
|
2924
|
+
)
|
|
2879
2925
|
elif output_content is not None:
|
|
2880
2926
|
final_output_str = str(output_content)
|
|
2881
2927
|
render_markdown(final_output_str)
|
|
2928
|
+
|
|
2882
2929
|
|
|
2883
2930
|
if final_output_str:
|
|
2884
2931
|
if result_state.messages:
|