npcsh 1.0.16__py3-none-any.whl → 1.0.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +1536 -78
- npcsh/corca.py +709 -0
- npcsh/guac.py +1433 -596
- npcsh/mcp_server.py +64 -60
- npcsh/npc.py +5 -4
- npcsh/npcsh.py +27 -1334
- npcsh/pti.py +195 -215
- npcsh/routes.py +94 -18
- npcsh/spool.py +138 -144
- {npcsh-1.0.16.dist-info → npcsh-1.0.17.dist-info}/METADATA +22 -400
- npcsh-1.0.17.dist-info/RECORD +21 -0
- {npcsh-1.0.16.dist-info → npcsh-1.0.17.dist-info}/entry_points.txt +1 -1
- npcsh/mcp_npcsh.py +0 -822
- npcsh-1.0.16.dist-info/RECORD +0 -21
- {npcsh-1.0.16.dist-info → npcsh-1.0.17.dist-info}/WHEEL +0 -0
- {npcsh-1.0.16.dist-info → npcsh-1.0.17.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.0.16.dist-info → npcsh-1.0.17.dist-info}/top_level.txt +0 -0
npcsh/_state.py
CHANGED
|
@@ -3,10 +3,16 @@ from colorama import Fore, Back, Style
|
|
|
3
3
|
from dataclasses import dataclass, field
|
|
4
4
|
import filecmp
|
|
5
5
|
import os
|
|
6
|
+
from pathlib import Path
|
|
6
7
|
import platform
|
|
7
8
|
import pty
|
|
9
|
+
try:
|
|
10
|
+
import readline
|
|
11
|
+
except:
|
|
12
|
+
pass
|
|
8
13
|
import re
|
|
9
14
|
import select
|
|
15
|
+
import shlex
|
|
10
16
|
import shutil
|
|
11
17
|
import signal
|
|
12
18
|
import sqlite3
|
|
@@ -25,6 +31,247 @@ from npcpy.memory.command_history import (
|
|
|
25
31
|
)
|
|
26
32
|
from npcpy.npc_compiler import NPC, Team
|
|
27
33
|
|
|
34
|
+
|
|
35
|
+
from npcpy.memory.command_history import CommandHistory
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
import os
|
|
40
|
+
import sys
|
|
41
|
+
import atexit
|
|
42
|
+
import subprocess
|
|
43
|
+
import shlex
|
|
44
|
+
import re
|
|
45
|
+
from datetime import datetime
|
|
46
|
+
import importlib.metadata
|
|
47
|
+
import textwrap
|
|
48
|
+
from typing import Optional, List, Dict, Any, Tuple, Union
|
|
49
|
+
from dataclasses import dataclass, field
|
|
50
|
+
import platform
|
|
51
|
+
try:
|
|
52
|
+
from termcolor import colored
|
|
53
|
+
except:
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
import chromadb
|
|
58
|
+
except ImportError:
|
|
59
|
+
chromadb = None
|
|
60
|
+
import shutil
|
|
61
|
+
import sqlite3
|
|
62
|
+
import yaml
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
from npcpy.npc_sysenv import (
|
|
66
|
+
print_and_process_stream_with_markdown,
|
|
67
|
+
render_markdown,
|
|
68
|
+
get_model_and_provider,
|
|
69
|
+
get_locally_available_models,
|
|
70
|
+
lookup_provider
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
from npcpy.memory.command_history import (
|
|
74
|
+
CommandHistory,
|
|
75
|
+
save_conversation_message,
|
|
76
|
+
load_kg_from_db,
|
|
77
|
+
save_kg_to_db,
|
|
78
|
+
)
|
|
79
|
+
from npcpy.npc_compiler import NPC, Team, load_jinxs_from_directory
|
|
80
|
+
from npcpy.llm_funcs import (
|
|
81
|
+
check_llm_command,
|
|
82
|
+
get_llm_response,
|
|
83
|
+
execute_llm_command,
|
|
84
|
+
breathe,
|
|
85
|
+
|
|
86
|
+
)
|
|
87
|
+
from npcpy.memory.knowledge_graph import (
|
|
88
|
+
kg_evolve_incremental,
|
|
89
|
+
|
|
90
|
+
)
|
|
91
|
+
from npcpy.gen.embeddings import get_embeddings
|
|
92
|
+
|
|
93
|
+
try:
|
|
94
|
+
import readline
|
|
95
|
+
except:
|
|
96
|
+
print('no readline support, some features may not work as desired. ')
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
VERSION = importlib.metadata.version("npcsh")
|
|
100
|
+
except importlib.metadata.PackageNotFoundError:
|
|
101
|
+
VERSION = "unknown"
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
NPCSH_CHAT_MODEL = os.environ.get("NPCSH_CHAT_MODEL", "gemma3:4b")
|
|
107
|
+
# print("NPCSH_CHAT_MODEL", NPCSH_CHAT_MODEL)
|
|
108
|
+
NPCSH_CHAT_PROVIDER = os.environ.get("NPCSH_CHAT_PROVIDER", "ollama")
|
|
109
|
+
# print("NPCSH_CHAT_PROVIDER", NPCSH_CHAT_PROVIDER)
|
|
110
|
+
NPCSH_DB_PATH = os.path.expanduser(
|
|
111
|
+
os.environ.get("NPCSH_DB_PATH", "~/npcsh_history.db")
|
|
112
|
+
)
|
|
113
|
+
NPCSH_VECTOR_DB_PATH = os.path.expanduser(
|
|
114
|
+
os.environ.get("NPCSH_VECTOR_DB_PATH", "~/npcsh_chroma.db")
|
|
115
|
+
)
|
|
116
|
+
#DEFAULT MODES = ['CHAT', 'AGENT', 'CODE', ]
|
|
117
|
+
|
|
118
|
+
NPCSH_DEFAULT_MODE = os.path.expanduser(os.environ.get("NPCSH_DEFAULT_MODE", "agent"))
|
|
119
|
+
NPCSH_VISION_MODEL = os.environ.get("NPCSH_VISION_MODEL", "gemma3:4b")
|
|
120
|
+
NPCSH_VISION_PROVIDER = os.environ.get("NPCSH_VISION_PROVIDER", "ollama")
|
|
121
|
+
NPCSH_IMAGE_GEN_MODEL = os.environ.get(
|
|
122
|
+
"NPCSH_IMAGE_GEN_MODEL", "runwayml/stable-diffusion-v1-5"
|
|
123
|
+
)
|
|
124
|
+
NPCSH_IMAGE_GEN_PROVIDER = os.environ.get("NPCSH_IMAGE_GEN_PROVIDER", "diffusers")
|
|
125
|
+
NPCSH_VIDEO_GEN_MODEL = os.environ.get(
|
|
126
|
+
"NPCSH_VIDEO_GEN_MODEL", "damo-vilab/text-to-video-ms-1.7b"
|
|
127
|
+
)
|
|
128
|
+
NPCSH_VIDEO_GEN_PROVIDER = os.environ.get("NPCSH_VIDEO_GEN_PROVIDER", "diffusers")
|
|
129
|
+
|
|
130
|
+
NPCSH_EMBEDDING_MODEL = os.environ.get("NPCSH_EMBEDDING_MODEL", "nomic-embed-text")
|
|
131
|
+
NPCSH_EMBEDDING_PROVIDER = os.environ.get("NPCSH_EMBEDDING_PROVIDER", "ollama")
|
|
132
|
+
NPCSH_REASONING_MODEL = os.environ.get("NPCSH_REASONING_MODEL", "deepseek-r1")
|
|
133
|
+
NPCSH_REASONING_PROVIDER = os.environ.get("NPCSH_REASONING_PROVIDER", "ollama")
|
|
134
|
+
NPCSH_STREAM_OUTPUT = eval(os.environ.get("NPCSH_STREAM_OUTPUT", "0")) == 1
|
|
135
|
+
NPCSH_API_URL = os.environ.get("NPCSH_API_URL", None)
|
|
136
|
+
NPCSH_SEARCH_PROVIDER = os.environ.get("NPCSH_SEARCH_PROVIDER", "duckduckgo")
|
|
137
|
+
NPCSH_BUILD_KG = os.environ.get("NPCSH_BUILD_KG") == "1"
|
|
138
|
+
READLINE_HISTORY_FILE = os.path.expanduser("~/.npcsh_history")
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
@dataclass
|
|
143
|
+
class ShellState:
|
|
144
|
+
npc: Optional[Union[NPC, str]] = None
|
|
145
|
+
team: Optional[Team] = None
|
|
146
|
+
messages: List[Dict[str, Any]] = field(default_factory=list)
|
|
147
|
+
mcp_client: Optional[Any] = None
|
|
148
|
+
conversation_id: Optional[int] = None
|
|
149
|
+
chat_model: str = NPCSH_CHAT_MODEL
|
|
150
|
+
chat_provider: str = NPCSH_CHAT_PROVIDER
|
|
151
|
+
vision_model: str = NPCSH_VISION_MODEL
|
|
152
|
+
vision_provider: str = NPCSH_VISION_PROVIDER
|
|
153
|
+
embedding_model: str = NPCSH_EMBEDDING_MODEL
|
|
154
|
+
embedding_provider: str = NPCSH_EMBEDDING_PROVIDER
|
|
155
|
+
reasoning_model: str = NPCSH_REASONING_MODEL
|
|
156
|
+
reasoning_provider: str = NPCSH_REASONING_PROVIDER
|
|
157
|
+
search_provider: str = NPCSH_SEARCH_PROVIDER
|
|
158
|
+
image_gen_model: str = NPCSH_IMAGE_GEN_MODEL
|
|
159
|
+
image_gen_provider: str = NPCSH_IMAGE_GEN_PROVIDER
|
|
160
|
+
video_gen_model: str = NPCSH_VIDEO_GEN_MODEL
|
|
161
|
+
video_gen_provider: str = NPCSH_VIDEO_GEN_PROVIDER
|
|
162
|
+
current_mode: str = NPCSH_DEFAULT_MODE
|
|
163
|
+
build_kg: bool = NPCSH_BUILD_KG
|
|
164
|
+
api_key: Optional[str] = None
|
|
165
|
+
api_url: Optional[str] = NPCSH_API_URL
|
|
166
|
+
current_path: str = field(default_factory=os.getcwd)
|
|
167
|
+
stream_output: bool = NPCSH_STREAM_OUTPUT
|
|
168
|
+
attachments: Optional[List[Any]] = None
|
|
169
|
+
turn_count: int =0
|
|
170
|
+
def get_model_for_command(self, model_type: str = "chat"):
|
|
171
|
+
if model_type == "chat":
|
|
172
|
+
return self.chat_model, self.chat_provider
|
|
173
|
+
elif model_type == "vision":
|
|
174
|
+
return self.vision_model, self.vision_provider
|
|
175
|
+
elif model_type == "embedding":
|
|
176
|
+
return self.embedding_model, self.embedding_provider
|
|
177
|
+
elif model_type == "reasoning":
|
|
178
|
+
return self.reasoning_model, self.reasoning_provider
|
|
179
|
+
elif model_type == "image_gen":
|
|
180
|
+
return self.image_gen_model, self.image_gen_provider
|
|
181
|
+
elif model_type == "video_gen":
|
|
182
|
+
return self.video_gen_model, self.video_gen_provider
|
|
183
|
+
else:
|
|
184
|
+
return self.chat_model, self.chat_provider # Default fallback
|
|
185
|
+
CONFIG_KEY_MAP = {
|
|
186
|
+
# Chat
|
|
187
|
+
"model": "NPCSH_CHAT_MODEL",
|
|
188
|
+
"chatmodel": "NPCSH_CHAT_MODEL",
|
|
189
|
+
"provider": "NPCSH_CHAT_PROVIDER",
|
|
190
|
+
"chatprovider": "NPCSH_CHAT_PROVIDER",
|
|
191
|
+
|
|
192
|
+
# Vision
|
|
193
|
+
"vmodel": "NPCSH_VISION_MODEL",
|
|
194
|
+
"visionmodel": "NPCSH_VISION_MODEL",
|
|
195
|
+
"vprovider": "NPCSH_VISION_PROVIDER",
|
|
196
|
+
"visionprovider": "NPCSH_VISION_PROVIDER",
|
|
197
|
+
|
|
198
|
+
# Embedding
|
|
199
|
+
"emodel": "NPCSH_EMBEDDING_MODEL",
|
|
200
|
+
"embeddingmodel": "NPCSH_EMBEDDING_MODEL",
|
|
201
|
+
"eprovider": "NPCSH_EMBEDDING_PROVIDER",
|
|
202
|
+
"embeddingprovider": "NPCSH_EMBEDDING_PROVIDER",
|
|
203
|
+
|
|
204
|
+
# Reasoning
|
|
205
|
+
"rmodel": "NPCSH_REASONING_MODEL",
|
|
206
|
+
"reasoningmodel": "NPCSH_REASONING_MODEL",
|
|
207
|
+
"rprovider": "NPCSH_REASONING_PROVIDER",
|
|
208
|
+
"reasoningprovider": "NPCSH_REASONING_PROVIDER",
|
|
209
|
+
|
|
210
|
+
# Image generation
|
|
211
|
+
"igmodel": "NPCSH_IMAGE_GEN_MODEL",
|
|
212
|
+
"imagegenmodel": "NPCSH_IMAGE_GEN_MODEL",
|
|
213
|
+
"igprovider": "NPCSH_IMAGE_GEN_PROVIDER",
|
|
214
|
+
"imagegenprovider": "NPCSH_IMAGE_GEN_PROVIDER",
|
|
215
|
+
|
|
216
|
+
# Video generation
|
|
217
|
+
"vgmodel": "NPCSH_VIDEO_GEN_MODEL",
|
|
218
|
+
"videogenmodel": "NPCSH_VIDEO_GEN_MODEL",
|
|
219
|
+
"vgprovider": "NPCSH_VIDEO_GEN_PROVIDER",
|
|
220
|
+
"videogenprovider": "NPCSH_VIDEO_GEN_PROVIDER",
|
|
221
|
+
|
|
222
|
+
# Other
|
|
223
|
+
"sprovider": "NPCSH_SEARCH_PROVIDER",
|
|
224
|
+
"mode": "NPCSH_DEFAULT_MODE",
|
|
225
|
+
"stream": "NPCSH_STREAM_OUTPUT",
|
|
226
|
+
"apiurl": "NPCSH_API_URL",
|
|
227
|
+
"buildkg": "NPCSH_BUILD_KG",
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def set_npcsh_config_value(key: str, value: str):
|
|
232
|
+
"""
|
|
233
|
+
Set NPCSH config values at runtime using shorthand (case-insensitive) or full keys.
|
|
234
|
+
Updates os.environ, globals, and ShellState defaults.
|
|
235
|
+
"""
|
|
236
|
+
# case-insensitive lookup for shorthand
|
|
237
|
+
env_key = CONFIG_KEY_MAP.get(key.lower(), key)
|
|
238
|
+
|
|
239
|
+
# update env
|
|
240
|
+
os.environ[env_key] = value
|
|
241
|
+
|
|
242
|
+
# normalize types
|
|
243
|
+
if env_key in ["NPCSH_STREAM_OUTPUT", "NPCSH_BUILD_KG"]:
|
|
244
|
+
parsed_val = value.strip().lower() in ["1", "true", "yes"]
|
|
245
|
+
elif env_key.endswith("_PATH"):
|
|
246
|
+
parsed_val = os.path.expanduser(value)
|
|
247
|
+
else:
|
|
248
|
+
parsed_val = value
|
|
249
|
+
|
|
250
|
+
# update global
|
|
251
|
+
globals()[env_key] = parsed_val
|
|
252
|
+
|
|
253
|
+
# update ShellState defaults
|
|
254
|
+
field_map = {
|
|
255
|
+
"NPCSH_CHAT_MODEL": "chat_model",
|
|
256
|
+
"NPCSH_CHAT_PROVIDER": "chat_provider",
|
|
257
|
+
"NPCSH_VISION_MODEL": "vision_model",
|
|
258
|
+
"NPCSH_VISION_PROVIDER": "vision_provider",
|
|
259
|
+
"NPCSH_EMBEDDING_MODEL": "embedding_model",
|
|
260
|
+
"NPCSH_EMBEDDING_PROVIDER": "embedding_provider",
|
|
261
|
+
"NPCSH_REASONING_MODEL": "reasoning_model",
|
|
262
|
+
"NPCSH_REASONING_PROVIDER": "reasoning_provider",
|
|
263
|
+
"NPCSH_SEARCH_PROVIDER": "search_provider",
|
|
264
|
+
"NPCSH_IMAGE_GEN_MODEL": "image_gen_model",
|
|
265
|
+
"NPCSH_IMAGE_GEN_PROVIDER": "image_gen_provider",
|
|
266
|
+
"NPCSH_VIDEO_GEN_MODEL": "video_gen_model",
|
|
267
|
+
"NPCSH_VIDEO_GEN_PROVIDER": "video_gen_provider",
|
|
268
|
+
"NPCSH_DEFAULT_MODE": "current_mode",
|
|
269
|
+
"NPCSH_BUILD_KG": "build_kg",
|
|
270
|
+
"NPCSH_API_URL": "api_url",
|
|
271
|
+
"NPCSH_STREAM_OUTPUT": "stream_output",
|
|
272
|
+
}
|
|
273
|
+
if env_key in field_map:
|
|
274
|
+
setattr(ShellState, field_map[env_key], parsed_val)
|
|
28
275
|
def get_npc_path(npc_name: str, db_path: str) -> str:
|
|
29
276
|
project_npc_team_dir = os.path.abspath("./npc_team")
|
|
30
277
|
project_npc_path = os.path.join(project_npc_team_dir, f"{npc_name}.npc")
|
|
@@ -513,7 +760,7 @@ interactive_commands = {
|
|
|
513
760
|
}
|
|
514
761
|
|
|
515
762
|
|
|
516
|
-
def start_interactive_session(command:
|
|
763
|
+
def start_interactive_session(command: str) -> int:
|
|
517
764
|
"""
|
|
518
765
|
Starts an interactive session. Only works on Unix. On Windows, print a message and return 1.
|
|
519
766
|
"""
|
|
@@ -1015,41 +1262,6 @@ def get_setting_windows(key, default=None):
|
|
|
1015
1262
|
config = read_rc_file_windows(get_npcshrc_path_windows())
|
|
1016
1263
|
return config.get(key, default)
|
|
1017
1264
|
|
|
1018
|
-
NPCSH_CHAT_MODEL = os.environ.get("NPCSH_CHAT_MODEL", "llama3.2")
|
|
1019
|
-
# print("NPCSH_CHAT_MODEL", NPCSH_CHAT_MODEL)
|
|
1020
|
-
NPCSH_CHAT_PROVIDER = os.environ.get("NPCSH_CHAT_PROVIDER", "ollama")
|
|
1021
|
-
# print("NPCSH_CHAT_PROVIDER", NPCSH_CHAT_PROVIDER)
|
|
1022
|
-
NPCSH_DB_PATH = os.path.expanduser(
|
|
1023
|
-
os.environ.get("NPCSH_DB_PATH", "~/npcsh_history.db")
|
|
1024
|
-
)
|
|
1025
|
-
NPCSH_VECTOR_DB_PATH = os.path.expanduser(
|
|
1026
|
-
os.environ.get("NPCSH_VECTOR_DB_PATH", "~/npcsh_chroma.db")
|
|
1027
|
-
)
|
|
1028
|
-
#DEFAULT MODES = ['CHAT', 'AGENT', 'CODE', ]
|
|
1029
|
-
|
|
1030
|
-
NPCSH_DEFAULT_MODE = os.path.expanduser(os.environ.get("NPCSH_DEFAULT_MODE", "agent"))
|
|
1031
|
-
NPCSH_VISION_MODEL = os.environ.get("NPCSH_VISION_MODEL", "llava:7b")
|
|
1032
|
-
NPCSH_VISION_PROVIDER = os.environ.get("NPCSH_VISION_PROVIDER", "ollama")
|
|
1033
|
-
NPCSH_IMAGE_GEN_MODEL = os.environ.get(
|
|
1034
|
-
"NPCSH_IMAGE_GEN_MODEL", "runwayml/stable-diffusion-v1-5"
|
|
1035
|
-
)
|
|
1036
|
-
NPCSH_IMAGE_GEN_PROVIDER = os.environ.get("NPCSH_IMAGE_GEN_PROVIDER", "diffusers")
|
|
1037
|
-
NPCSH_VIDEO_GEN_MODEL = os.environ.get(
|
|
1038
|
-
"NPCSH_VIDEO_GEN_MODEL", "damo-vilab/text-to-video-ms-1.7b"
|
|
1039
|
-
)
|
|
1040
|
-
NPCSH_VIDEO_GEN_PROVIDER = os.environ.get("NPCSH_VIDEO_GEN_PROVIDER", "diffusers")
|
|
1041
|
-
|
|
1042
|
-
NPCSH_EMBEDDING_MODEL = os.environ.get("NPCSH_EMBEDDING_MODEL", "nomic-embed-text")
|
|
1043
|
-
NPCSH_EMBEDDING_PROVIDER = os.environ.get("NPCSH_EMBEDDING_PROVIDER", "ollama")
|
|
1044
|
-
NPCSH_REASONING_MODEL = os.environ.get("NPCSH_REASONING_MODEL", "deepseek-r1")
|
|
1045
|
-
NPCSH_REASONING_PROVIDER = os.environ.get("NPCSH_REASONING_PROVIDER", "ollama")
|
|
1046
|
-
NPCSH_STREAM_OUTPUT = eval(os.environ.get("NPCSH_STREAM_OUTPUT", "0")) == 1
|
|
1047
|
-
NPCSH_API_URL = os.environ.get("NPCSH_API_URL", None)
|
|
1048
|
-
NPCSH_SEARCH_PROVIDER = os.environ.get("NPCSH_SEARCH_PROVIDER", "duckduckgo")
|
|
1049
|
-
NPCSH_BUILD_KG = os.environ.get("NPCSH_BUILD_KG") == "1"
|
|
1050
|
-
READLINE_HISTORY_FILE = os.path.expanduser("~/.npcsh_history")
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
1265
|
|
|
1054
1266
|
def setup_readline() -> str:
|
|
1055
1267
|
import readline
|
|
@@ -1084,50 +1296,1296 @@ def save_readline_history():
|
|
|
1084
1296
|
|
|
1085
1297
|
|
|
1086
1298
|
|
|
1299
|
+
TERMINAL_EDITORS = ["vim", "emacs", "nano"]
|
|
1300
|
+
EMBEDDINGS_DB_PATH = os.path.expanduser("~/npcsh_chroma.db")
|
|
1301
|
+
HISTORY_DB_DEFAULT_PATH = os.path.expanduser("~/npcsh_history.db")
|
|
1302
|
+
READLINE_HISTORY_FILE = os.path.expanduser("~/.npcsh_readline_history")
|
|
1303
|
+
DEFAULT_NPC_TEAM_PATH = os.path.expanduser("~/.npcsh/npc_team/")
|
|
1304
|
+
PROJECT_NPC_TEAM_PATH = "./npc_team/"
|
|
1087
1305
|
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1306
|
+
# --- Global Clients ---
|
|
1307
|
+
try:
|
|
1308
|
+
chroma_client = chromadb.PersistentClient(path=EMBEDDINGS_DB_PATH) if chromadb else None
|
|
1309
|
+
except Exception as e:
|
|
1310
|
+
print(f"Warning: Failed to initialize ChromaDB client at {EMBEDDINGS_DB_PATH}: {e}")
|
|
1311
|
+
chroma_client = None
|
|
1312
|
+
|
|
1313
|
+
|
|
1314
|
+
|
|
1315
|
+
|
|
1316
|
+
def get_path_executables() -> List[str]:
|
|
1317
|
+
"""Get executables from PATH (cached for performance)"""
|
|
1318
|
+
if not hasattr(get_path_executables, '_cache'):
|
|
1319
|
+
executables = set()
|
|
1320
|
+
path_dirs = os.environ.get('PATH', '').split(os.pathsep)
|
|
1321
|
+
for path_dir in path_dirs:
|
|
1322
|
+
if os.path.isdir(path_dir):
|
|
1323
|
+
try:
|
|
1324
|
+
for item in os.listdir(path_dir):
|
|
1325
|
+
item_path = os.path.join(path_dir, item)
|
|
1326
|
+
if os.path.isfile(item_path) and os.access(item_path, os.X_OK):
|
|
1327
|
+
executables.add(item)
|
|
1328
|
+
except (PermissionError, OSError):
|
|
1329
|
+
continue
|
|
1330
|
+
get_path_executables._cache = sorted(list(executables))
|
|
1331
|
+
return get_path_executables._cache
|
|
1332
|
+
|
|
1333
|
+
|
|
1334
|
+
import logging
|
|
1335
|
+
|
|
1336
|
+
# Set up completion logger
|
|
1337
|
+
completion_logger = logging.getLogger('npcsh.completion')
|
|
1338
|
+
completion_logger.setLevel(logging.WARNING) # Default to WARNING (quiet)
|
|
1339
|
+
|
|
1340
|
+
# Add handler if not already present
|
|
1341
|
+
if not completion_logger.handlers:
|
|
1342
|
+
handler = logging.StreamHandler(sys.stderr)
|
|
1343
|
+
formatter = logging.Formatter('[%(name)s] %(message)s')
|
|
1344
|
+
handler.setFormatter(formatter)
|
|
1345
|
+
completion_logger.addHandler(handler)
|
|
1346
|
+
|
|
1347
|
+
def make_completer(shell_state: ShellState, router: Any):
|
|
1348
|
+
def complete(text: str, state_index: int) -> Optional[str]:
|
|
1349
|
+
"""Main completion function"""
|
|
1350
|
+
try:
|
|
1351
|
+
buffer = readline.get_line_buffer()
|
|
1352
|
+
begidx = readline.get_begidx()
|
|
1353
|
+
endidx = readline.get_endidx()
|
|
1354
|
+
|
|
1355
|
+
completion_logger.debug(f"text='{text}', buffer='{buffer}', begidx={begidx}, endidx={endidx}, state_index={state_index}")
|
|
1356
|
+
|
|
1357
|
+
matches = []
|
|
1358
|
+
|
|
1359
|
+
# Check if we're completing a slash command
|
|
1360
|
+
if begidx > 0 and buffer[begidx-1] == '/':
|
|
1361
|
+
completion_logger.debug(f"Slash command completion - text='{text}'")
|
|
1362
|
+
slash_commands = get_slash_commands(shell_state, router)
|
|
1363
|
+
completion_logger.debug(f"Available slash commands: {slash_commands}")
|
|
1364
|
+
|
|
1365
|
+
if text == '':
|
|
1366
|
+
matches = [cmd[1:] for cmd in slash_commands]
|
|
1367
|
+
else:
|
|
1368
|
+
full_text = '/' + text
|
|
1369
|
+
matching_commands = [cmd for cmd in slash_commands if cmd.startswith(full_text)]
|
|
1370
|
+
matches = [cmd[1:] for cmd in matching_commands]
|
|
1371
|
+
|
|
1372
|
+
completion_logger.debug(f"Slash command matches: {matches}")
|
|
1373
|
+
|
|
1374
|
+
elif is_command_position(buffer, begidx):
|
|
1375
|
+
completion_logger.debug("Command position detected")
|
|
1376
|
+
bash_matches = [cmd for cmd in BASH_COMMANDS if cmd.startswith(text)]
|
|
1377
|
+
matches.extend(bash_matches)
|
|
1378
|
+
|
|
1379
|
+
interactive_matches = [cmd for cmd in interactive_commands.keys() if cmd.startswith(text)]
|
|
1380
|
+
matches.extend(interactive_matches)
|
|
1381
|
+
|
|
1382
|
+
if len(text) >= 1:
|
|
1383
|
+
path_executables = get_path_executables()
|
|
1384
|
+
exec_matches = [cmd for cmd in path_executables if cmd.startswith(text)]
|
|
1385
|
+
matches.extend(exec_matches[:20])
|
|
1386
|
+
else:
|
|
1387
|
+
completion_logger.debug("File completion")
|
|
1388
|
+
matches = get_file_completions(text)
|
|
1389
|
+
|
|
1390
|
+
matches = sorted(list(set(matches)))
|
|
1391
|
+
completion_logger.debug(f"Final matches: {matches}")
|
|
1392
|
+
|
|
1393
|
+
if state_index < len(matches):
|
|
1394
|
+
result = matches[state_index]
|
|
1395
|
+
completion_logger.debug(f"Returning: '{result}'")
|
|
1396
|
+
return result
|
|
1397
|
+
else:
|
|
1398
|
+
completion_logger.debug(f"No match for state_index {state_index}")
|
|
1399
|
+
|
|
1400
|
+
except Exception as e:
|
|
1401
|
+
completion_logger.error(f"Exception in completion: {e}")
|
|
1402
|
+
completion_logger.debug("Exception details:", exc_info=True)
|
|
1403
|
+
|
|
1404
|
+
return None
|
|
1405
|
+
|
|
1406
|
+
return complete
|
|
1407
|
+
|
|
1408
|
+
def get_slash_commands(state: ShellState, router: Any) -> List[str]:
|
|
1409
|
+
"""Get available slash commands from the provided router and team"""
|
|
1410
|
+
commands = []
|
|
1411
|
+
|
|
1412
|
+
if router and hasattr(router, 'routes'):
|
|
1413
|
+
router_cmds = [f"/{cmd}" for cmd in router.routes.keys()]
|
|
1414
|
+
commands.extend(router_cmds)
|
|
1415
|
+
completion_logger.debug(f"Router commands: {router_cmds}")
|
|
1416
|
+
|
|
1417
|
+
# Team jinxs
|
|
1418
|
+
if state.team and hasattr(state.team, 'jinxs_dict'):
|
|
1419
|
+
jinx_cmds = [f"/{jinx}" for jinx in state.team.jinxs_dict.keys()]
|
|
1420
|
+
commands.extend(jinx_cmds)
|
|
1421
|
+
completion_logger.debug(f"Jinx commands: {jinx_cmds}")
|
|
1422
|
+
|
|
1423
|
+
# NPC names for switching
|
|
1424
|
+
if state.team and hasattr(state.team, 'npcs'):
|
|
1425
|
+
npc_cmds = [f"/{npc}" for npc in state.team.npcs.keys()]
|
|
1426
|
+
commands.extend(npc_cmds)
|
|
1427
|
+
completion_logger.debug(f"NPC commands: {npc_cmds}")
|
|
1428
|
+
|
|
1429
|
+
# Mode switching commands
|
|
1430
|
+
mode_cmds = ['/cmd', '/agent', '/chat']
|
|
1431
|
+
commands.extend(mode_cmds)
|
|
1432
|
+
completion_logger.debug(f"Mode commands: {mode_cmds}")
|
|
1433
|
+
|
|
1434
|
+
result = sorted(commands)
|
|
1435
|
+
completion_logger.debug(f"Final slash commands: {result}")
|
|
1436
|
+
return result
|
|
1437
|
+
def get_file_completions(text: str) -> List[str]:
|
|
1438
|
+
"""Get file/directory completions"""
|
|
1439
|
+
try:
|
|
1440
|
+
if text.startswith('/'):
|
|
1441
|
+
basedir = os.path.dirname(text) or '/'
|
|
1442
|
+
prefix = os.path.basename(text)
|
|
1443
|
+
elif text.startswith('./') or text.startswith('../'):
|
|
1444
|
+
basedir = os.path.dirname(text) or '.'
|
|
1445
|
+
prefix = os.path.basename(text)
|
|
1129
1446
|
else:
|
|
1130
|
-
|
|
1447
|
+
basedir = '.'
|
|
1448
|
+
prefix = text
|
|
1449
|
+
|
|
1450
|
+
if not os.path.exists(basedir):
|
|
1451
|
+
return []
|
|
1452
|
+
|
|
1453
|
+
matches = []
|
|
1454
|
+
try:
|
|
1455
|
+
for item in os.listdir(basedir):
|
|
1456
|
+
if item.startswith(prefix):
|
|
1457
|
+
full_path = os.path.join(basedir, item)
|
|
1458
|
+
if basedir == '.':
|
|
1459
|
+
completion = item
|
|
1460
|
+
else:
|
|
1461
|
+
completion = os.path.join(basedir, item)
|
|
1462
|
+
|
|
1463
|
+
# Just return the name, let readline handle spacing/slashes
|
|
1464
|
+
matches.append(completion)
|
|
1465
|
+
except (PermissionError, OSError):
|
|
1466
|
+
pass
|
|
1467
|
+
|
|
1468
|
+
return sorted(matches)
|
|
1469
|
+
except Exception:
|
|
1470
|
+
return []
|
|
1471
|
+
def is_command_position(buffer: str, begidx: int) -> bool:
|
|
1472
|
+
"""Determine if cursor is at a command position"""
|
|
1473
|
+
# Get the part of buffer before the current word
|
|
1474
|
+
before_word = buffer[:begidx]
|
|
1475
|
+
|
|
1476
|
+
# Split by command separators
|
|
1477
|
+
parts = re.split(r'[|;&]', before_word)
|
|
1478
|
+
current_command_part = parts[-1].strip()
|
|
1479
|
+
|
|
1480
|
+
# If there's nothing before the current word in this command part,
|
|
1481
|
+
# or only whitespace, we're at command position
|
|
1482
|
+
return len(current_command_part) == 0
|
|
1483
|
+
|
|
1484
|
+
|
|
1485
|
+
def readline_safe_prompt(prompt: str) -> str:
|
|
1486
|
+
ansi_escape = re.compile(r"(\033\[[0-9;]*[a-zA-Z])")
|
|
1487
|
+
return ansi_escape.sub(r"\001\1\002", prompt)
|
|
1488
|
+
|
|
1489
|
+
def print_jinxs(jinxs):
|
|
1490
|
+
output = "Available jinxs:\n"
|
|
1491
|
+
for jinx in jinxs:
|
|
1492
|
+
output += f" {jinx.jinx_name}\n"
|
|
1493
|
+
output += f" Description: {jinx.description}\n"
|
|
1494
|
+
output += f" Inputs: {jinx.inputs}\n"
|
|
1495
|
+
return output
|
|
1496
|
+
|
|
1497
|
+
def open_terminal_editor(command: str) -> str:
|
|
1498
|
+
try:
|
|
1499
|
+
os.system(command)
|
|
1500
|
+
return 'Terminal editor closed.'
|
|
1501
|
+
except Exception as e:
|
|
1502
|
+
return f"Error opening terminal editor: {e}"
|
|
1503
|
+
|
|
1504
|
+
def get_multiline_input(prompt: str) -> str:
|
|
1505
|
+
lines = []
|
|
1506
|
+
current_prompt = prompt
|
|
1507
|
+
while True:
|
|
1508
|
+
try:
|
|
1509
|
+
line = input(current_prompt)
|
|
1510
|
+
if line.endswith("\\"):
|
|
1511
|
+
lines.append(line[:-1])
|
|
1512
|
+
current_prompt = readline_safe_prompt("> ")
|
|
1513
|
+
else:
|
|
1514
|
+
lines.append(line)
|
|
1515
|
+
break
|
|
1516
|
+
except EOFError:
|
|
1517
|
+
print("Goodbye!")
|
|
1518
|
+
sys.exit(0)
|
|
1519
|
+
return "\n".join(lines)
|
|
1520
|
+
|
|
1521
|
+
def split_by_pipes(command: str) -> List[str]:
|
|
1522
|
+
parts = []
|
|
1523
|
+
current = ""
|
|
1524
|
+
in_single_quote = False
|
|
1525
|
+
in_double_quote = False
|
|
1526
|
+
escape = False
|
|
1527
|
+
|
|
1528
|
+
for char in command:
|
|
1529
|
+
if escape:
|
|
1530
|
+
current += char
|
|
1531
|
+
escape = False
|
|
1532
|
+
elif char == '\\':
|
|
1533
|
+
escape = True
|
|
1534
|
+
current += char
|
|
1535
|
+
elif char == "'" and not in_double_quote:
|
|
1536
|
+
in_single_quote = not in_single_quote
|
|
1537
|
+
current += char
|
|
1538
|
+
elif char == '"' and not in_single_quote:
|
|
1539
|
+
in_double_quote = not in_single_quote
|
|
1540
|
+
current += char
|
|
1541
|
+
elif char == '|' and not in_single_quote and not in_double_quote:
|
|
1542
|
+
parts.append(current.strip())
|
|
1543
|
+
current = ""
|
|
1544
|
+
else:
|
|
1545
|
+
current += char
|
|
1546
|
+
|
|
1547
|
+
if current:
|
|
1548
|
+
parts.append(current.strip())
|
|
1549
|
+
return parts
|
|
1550
|
+
|
|
1551
|
+
def parse_command_safely(cmd: str) -> List[str]:
|
|
1552
|
+
try:
|
|
1553
|
+
return shlex.split(cmd)
|
|
1554
|
+
except ValueError as e:
|
|
1555
|
+
if "No closing quotation" in str(e):
|
|
1556
|
+
if cmd.count('"') % 2 == 1:
|
|
1557
|
+
cmd += '"'
|
|
1558
|
+
elif cmd.count("'") % 2 == 1:
|
|
1559
|
+
cmd += "'"
|
|
1560
|
+
try:
|
|
1561
|
+
return shlex.split(cmd)
|
|
1562
|
+
except ValueError:
|
|
1563
|
+
return cmd.split()
|
|
1564
|
+
else:
|
|
1565
|
+
return cmd.split()
|
|
1566
|
+
|
|
1567
|
+
def get_file_color(filepath: str) -> tuple:
|
|
1568
|
+
if not os.path.exists(filepath):
|
|
1569
|
+
return "grey", []
|
|
1570
|
+
if os.path.isdir(filepath):
|
|
1571
|
+
return "blue", ["bold"]
|
|
1572
|
+
elif os.access(filepath, os.X_OK) and not os.path.isdir(filepath):
|
|
1573
|
+
return "green", ["bold"]
|
|
1574
|
+
elif filepath.endswith((".zip", ".tar", ".gz", ".bz2", ".xz", ".7z")):
|
|
1575
|
+
return "red", []
|
|
1576
|
+
elif filepath.endswith((".jpg", ".jpeg", ".png", ".gif", ".bmp", ".tiff")):
|
|
1577
|
+
return "magenta", []
|
|
1578
|
+
elif filepath.endswith((".py", ".pyw")):
|
|
1579
|
+
return "yellow", []
|
|
1580
|
+
elif filepath.endswith((".sh", ".bash", ".zsh")):
|
|
1581
|
+
return "green", []
|
|
1582
|
+
elif filepath.endswith((".c", ".cpp", ".h", ".hpp")):
|
|
1583
|
+
return "cyan", []
|
|
1584
|
+
elif filepath.endswith((".js", ".ts", ".jsx", ".tsx")):
|
|
1585
|
+
return "yellow", []
|
|
1586
|
+
elif filepath.endswith((".html", ".css", ".scss", ".sass")):
|
|
1587
|
+
return "magenta", []
|
|
1588
|
+
elif filepath.endswith((".md", ".txt", ".log")):
|
|
1589
|
+
return "white", []
|
|
1590
|
+
elif os.path.basename(filepath).startswith("."):
|
|
1591
|
+
return "cyan", []
|
|
1592
|
+
else:
|
|
1593
|
+
return "white", []
|
|
1594
|
+
|
|
1595
|
+
def format_file_listing(output: str) -> str:
|
|
1596
|
+
colored_lines = []
|
|
1597
|
+
current_dir = os.getcwd()
|
|
1598
|
+
for line in output.strip().split("\n"):
|
|
1599
|
+
parts = line.split()
|
|
1600
|
+
if not parts:
|
|
1601
|
+
colored_lines.append(line)
|
|
1602
|
+
continue
|
|
1603
|
+
|
|
1604
|
+
filepath_guess = parts[-1]
|
|
1605
|
+
potential_path = os.path.join(current_dir, filepath_guess)
|
|
1606
|
+
|
|
1607
|
+
color, attrs = get_file_color(potential_path)
|
|
1608
|
+
colored_filepath = colored(filepath_guess, color, attrs=attrs)
|
|
1609
|
+
|
|
1610
|
+
if len(parts) > 1 :
|
|
1611
|
+
# Handle cases like 'ls -l' where filename is last
|
|
1612
|
+
colored_line = " ".join(parts[:-1] + [colored_filepath])
|
|
1613
|
+
else:
|
|
1614
|
+
# Handle cases where line is just the filename
|
|
1615
|
+
colored_line = colored_filepath
|
|
1616
|
+
|
|
1617
|
+
colored_lines.append(colored_line)
|
|
1618
|
+
|
|
1619
|
+
return "\n".join(colored_lines)
|
|
1620
|
+
|
|
1621
|
+
def wrap_text(text: str, width: int = 80) -> str:
|
|
1622
|
+
lines = []
|
|
1623
|
+
for paragraph in text.split("\n"):
|
|
1624
|
+
if len(paragraph) > width:
|
|
1625
|
+
lines.extend(textwrap.wrap(paragraph, width=width, replace_whitespace=False, drop_whitespace=False))
|
|
1626
|
+
else:
|
|
1627
|
+
lines.append(paragraph)
|
|
1628
|
+
return "\n".join(lines)
|
|
1629
|
+
|
|
1630
|
+
# --- Readline Setup and Completion ---
|
|
1631
|
+
|
|
1632
|
+
def setup_readline() -> str:
|
|
1633
|
+
"""Setup readline with history and completion"""
|
|
1634
|
+
try:
|
|
1635
|
+
readline.read_history_file(READLINE_HISTORY_FILE)
|
|
1636
|
+
readline.set_history_length(1000)
|
|
1637
|
+
|
|
1638
|
+
# Don't set completer here - it will be set in run_repl with state
|
|
1639
|
+
readline.parse_and_bind("tab: complete")
|
|
1640
|
+
|
|
1641
|
+
readline.parse_and_bind("set enable-bracketed-paste on")
|
|
1642
|
+
readline.parse_and_bind(r'"\C-r": reverse-search-history')
|
|
1643
|
+
readline.parse_and_bind(r'"\C-e": end-of-line')
|
|
1644
|
+
readline.parse_and_bind(r'"\C-a": beginning-of-line')
|
|
1645
|
+
|
|
1646
|
+
return READLINE_HISTORY_FILE
|
|
1647
|
+
|
|
1648
|
+
except FileNotFoundError:
|
|
1649
|
+
pass
|
|
1650
|
+
except OSError as e:
|
|
1651
|
+
print(f"Warning: Could not read readline history file {READLINE_HISTORY_FILE}: {e}")
|
|
1652
|
+
|
|
1653
|
+
|
|
1654
|
+
def save_readline_history():
|
|
1655
|
+
try:
|
|
1656
|
+
readline.write_history_file(READLINE_HISTORY_FILE)
|
|
1657
|
+
except OSError as e:
|
|
1658
|
+
print(f"Warning: Could not write readline history file {READLINE_HISTORY_FILE}: {e}")
|
|
1659
|
+
|
|
1660
|
+
def store_command_embeddings(command: str, output: Any, state: ShellState):
|
|
1661
|
+
if not chroma_client or not state.embedding_model or not state.embedding_provider:
|
|
1662
|
+
if not chroma_client: print("Warning: ChromaDB client not available for embeddings.", file=sys.stderr)
|
|
1663
|
+
return
|
|
1664
|
+
if not command and not output:
|
|
1665
|
+
return
|
|
1666
|
+
|
|
1667
|
+
try:
|
|
1668
|
+
output_str = str(output) if output else ""
|
|
1669
|
+
if not command and not output_str: return # Avoid empty embeddings
|
|
1670
|
+
|
|
1671
|
+
texts_to_embed = [command, output_str]
|
|
1672
|
+
|
|
1673
|
+
embeddings = get_embeddings(
|
|
1674
|
+
texts_to_embed,
|
|
1675
|
+
state.embedding_model,
|
|
1676
|
+
state.embedding_provider,
|
|
1677
|
+
)
|
|
1678
|
+
|
|
1679
|
+
if not embeddings or len(embeddings) != 2:
|
|
1680
|
+
print(f"Warning: Failed to generate embeddings for command: {command[:50]}...", file=sys.stderr)
|
|
1681
|
+
return
|
|
1682
|
+
|
|
1683
|
+
timestamp = datetime.now().isoformat()
|
|
1684
|
+
npc_name = state.npc.name if isinstance(state.npc, NPC) else state.npc
|
|
1685
|
+
|
|
1686
|
+
metadata = [
|
|
1687
|
+
{
|
|
1688
|
+
"type": "command", "timestamp": timestamp, "path": state.current_path,
|
|
1689
|
+
"npc": npc_name, "conversation_id": state.conversation_id,
|
|
1690
|
+
},
|
|
1691
|
+
{
|
|
1692
|
+
"type": "response", "timestamp": timestamp, "path": state.current_path,
|
|
1693
|
+
"npc": npc_name, "conversation_id": state.conversation_id,
|
|
1694
|
+
},
|
|
1695
|
+
]
|
|
1696
|
+
|
|
1697
|
+
collection_name = f"{state.embedding_provider}_{state.embedding_model}_embeddings"
|
|
1698
|
+
try:
|
|
1699
|
+
collection = chroma_client.get_or_create_collection(collection_name)
|
|
1700
|
+
ids = [f"cmd_{timestamp}_{hash(command)}", f"resp_{timestamp}_{hash(output_str)}"]
|
|
1701
|
+
|
|
1702
|
+
collection.add(
|
|
1703
|
+
embeddings=embeddings,
|
|
1704
|
+
documents=texts_to_embed,
|
|
1705
|
+
metadatas=metadata,
|
|
1706
|
+
ids=ids,
|
|
1707
|
+
)
|
|
1708
|
+
except Exception as e:
|
|
1709
|
+
print(f"Warning: Failed to add embeddings to collection '{collection_name}': {e}", file=sys.stderr)
|
|
1710
|
+
|
|
1711
|
+
except Exception as e:
|
|
1712
|
+
print(f"Warning: Failed to store embeddings: {e}", file=sys.stderr)
|
|
1713
|
+
|
|
1714
|
+
|
|
1715
|
+
def handle_interactive_command(cmd_parts: List[str], state: ShellState) -> Tuple[ShellState, str]:
|
|
1716
|
+
command_name = cmd_parts[0]
|
|
1717
|
+
print(f"Starting interactive {command_name} session...")
|
|
1718
|
+
try:
|
|
1719
|
+
# CORRECTED: Join all parts into one string to pass to the function.
|
|
1720
|
+
full_command_str = " ".join(cmd_parts)
|
|
1721
|
+
return_code = start_interactive_session(full_command_str)
|
|
1722
|
+
output = f"Interactive {command_name} session ended with return code {return_code}"
|
|
1723
|
+
except Exception as e:
|
|
1724
|
+
output = f"Error starting interactive session {command_name}: {e}"
|
|
1725
|
+
return state, output
|
|
1726
|
+
|
|
1727
|
+
def handle_cd_command(cmd_parts: List[str], state: ShellState) -> Tuple[ShellState, str]:
|
|
1728
|
+
original_path = os.getcwd()
|
|
1729
|
+
target_path = cmd_parts[1] if len(cmd_parts) > 1 else os.path.expanduser("~")
|
|
1730
|
+
try:
|
|
1731
|
+
os.chdir(target_path)
|
|
1732
|
+
state.current_path = os.getcwd()
|
|
1733
|
+
output = f"Changed directory to {state.current_path}"
|
|
1734
|
+
except FileNotFoundError:
|
|
1735
|
+
output = colored(f"cd: no such file or directory: {target_path}", "red")
|
|
1736
|
+
except Exception as e:
|
|
1737
|
+
output = colored(f"cd: error changing directory: {e}", "red")
|
|
1738
|
+
os.chdir(original_path) # Revert if error
|
|
1739
|
+
|
|
1740
|
+
return state, output
|
|
1741
|
+
|
|
1742
|
+
|
|
1743
|
+
def handle_bash_command(
|
|
1744
|
+
cmd_parts: List[str],
|
|
1745
|
+
cmd_str: str,
|
|
1746
|
+
stdin_input: Optional[str],
|
|
1747
|
+
state: ShellState,
|
|
1748
|
+
) -> Tuple[bool, str]:
|
|
1749
|
+
try:
|
|
1750
|
+
process = subprocess.Popen(
|
|
1751
|
+
cmd_parts,
|
|
1752
|
+
stdin=subprocess.PIPE if stdin_input is not None else None,
|
|
1753
|
+
stdout=subprocess.PIPE,
|
|
1754
|
+
stderr=subprocess.PIPE,
|
|
1755
|
+
text=True,
|
|
1756
|
+
cwd=state.current_path
|
|
1757
|
+
)
|
|
1758
|
+
stdout, stderr = process.communicate(input=stdin_input)
|
|
1759
|
+
|
|
1760
|
+
if process.returncode != 0:
|
|
1761
|
+
return False, stderr.strip() if stderr else f"Command '{cmd_str}' failed with return code {process.returncode}."
|
|
1762
|
+
|
|
1763
|
+
if stderr.strip():
|
|
1764
|
+
print(colored(f"stderr: {stderr.strip()}", "yellow"), file=sys.stderr)
|
|
1765
|
+
|
|
1766
|
+
if cmd_parts[0] in ["ls", "find", "dir"]:
|
|
1767
|
+
return True, format_file_listing(stdout.strip())
|
|
1768
|
+
|
|
1769
|
+
return True, stdout.strip()
|
|
1770
|
+
|
|
1771
|
+
except FileNotFoundError:
|
|
1772
|
+
return False, f"Command not found: {cmd_parts[0]}"
|
|
1773
|
+
except PermissionError:
|
|
1774
|
+
return False, f"Permission denied: {cmd_str}"
|
|
1775
|
+
|
|
1776
|
+
def _try_convert_type(value: str) -> Union[str, int, float, bool]:
|
|
1777
|
+
"""Helper to convert string values to appropriate types."""
|
|
1778
|
+
if value.lower() in ['true', 'yes']:
|
|
1779
|
+
return True
|
|
1780
|
+
if value.lower() in ['false', 'no']:
|
|
1781
|
+
return False
|
|
1782
|
+
try:
|
|
1783
|
+
return int(value)
|
|
1784
|
+
except (ValueError, TypeError):
|
|
1785
|
+
pass
|
|
1786
|
+
try:
|
|
1787
|
+
return float(value)
|
|
1788
|
+
except (ValueError, TypeError):
|
|
1789
|
+
pass
|
|
1790
|
+
return value
|
|
1791
|
+
|
|
1792
|
+
def parse_generic_command_flags(parts: List[str]) -> Tuple[Dict[str, Any], List[str]]:
|
|
1793
|
+
"""
|
|
1794
|
+
Parses a list of command parts into a dictionary of keyword arguments and a list of positional arguments.
|
|
1795
|
+
Handles: -f val, --flag val, --flag=val, flag=val, --boolean-flag
|
|
1796
|
+
"""
|
|
1797
|
+
parsed_kwargs = {}
|
|
1798
|
+
positional_args = []
|
|
1799
|
+
i = 0
|
|
1800
|
+
while i < len(parts):
|
|
1801
|
+
part = parts[i]
|
|
1802
|
+
|
|
1803
|
+
if part.startswith('--'):
|
|
1804
|
+
key_part = part[2:]
|
|
1805
|
+
if '=' in key_part:
|
|
1806
|
+
key, value = key_part.split('=', 1)
|
|
1807
|
+
parsed_kwargs[key] = _try_convert_type(value)
|
|
1808
|
+
else:
|
|
1809
|
+
# Look ahead for a value
|
|
1810
|
+
if i + 1 < len(parts) and not parts[i + 1].startswith('-'):
|
|
1811
|
+
parsed_kwargs[key_part] = _try_convert_type(parts[i + 1])
|
|
1812
|
+
i += 1 # Consume the value
|
|
1813
|
+
else:
|
|
1814
|
+
parsed_kwargs[key_part] = True # Boolean flag
|
|
1815
|
+
|
|
1816
|
+
elif part.startswith('-'):
|
|
1817
|
+
key = part[1:]
|
|
1818
|
+
# Look ahead for a value
|
|
1819
|
+
if i + 1 < len(parts) and not parts[i + 1].startswith('-'):
|
|
1820
|
+
parsed_kwargs[key] = _try_convert_type(parts[i + 1])
|
|
1821
|
+
i += 1 # Consume the value
|
|
1822
|
+
else:
|
|
1823
|
+
parsed_kwargs[key] = True # Boolean flag
|
|
1824
|
+
|
|
1825
|
+
elif '=' in part and not part.startswith('-'):
|
|
1826
|
+
key, value = part.split('=', 1)
|
|
1827
|
+
parsed_kwargs[key] = _try_convert_type(value)
|
|
1828
|
+
|
|
1829
|
+
else:
|
|
1830
|
+
positional_args.append(part)
|
|
1831
|
+
|
|
1832
|
+
i += 1
|
|
1833
|
+
|
|
1834
|
+
return parsed_kwargs, positional_args
|
|
1835
|
+
|
|
1836
|
+
|
|
1837
|
+
def should_skip_kg_processing(user_input: str, assistant_output: str) -> bool:
|
|
1838
|
+
"""Determine if this interaction is too trivial for KG processing"""
|
|
1839
|
+
|
|
1840
|
+
# Skip if user input is very short (less than 10 chars)
|
|
1841
|
+
if len(user_input.strip()) < 10:
|
|
1842
|
+
return True
|
|
1843
|
+
|
|
1844
|
+
simple_bash = {'ls', 'pwd', 'cd', 'mkdir', 'touch', 'rm', 'mv', 'cp'}
|
|
1845
|
+
first_word = user_input.strip().split()[0] if user_input.strip() else ""
|
|
1846
|
+
if first_word in simple_bash:
|
|
1847
|
+
return True
|
|
1848
|
+
|
|
1849
|
+
if len(assistant_output.strip()) < 20:
|
|
1850
|
+
return True
|
|
1851
|
+
|
|
1852
|
+
if "exiting" in assistant_output.lower() or "exited" in assistant_output.lower():
|
|
1853
|
+
return True
|
|
1854
|
+
|
|
1855
|
+
return False
|
|
1856
|
+
|
|
1857
|
+
|
|
1858
|
+
|
|
1859
|
+
|
|
1860
|
+
def execute_slash_command(command: str,
|
|
1861
|
+
stdin_input: Optional[str],
|
|
1862
|
+
state: ShellState,
|
|
1863
|
+
stream: bool, router) -> Tuple[ShellState, Any]:
|
|
1864
|
+
"""Executes slash commands using the router or checking NPC/Team jinxs."""
|
|
1865
|
+
all_command_parts = shlex.split(command)
|
|
1866
|
+
command_name = all_command_parts[0].lstrip('/')
|
|
1867
|
+
|
|
1868
|
+
# Handle NPC switching commands
|
|
1869
|
+
if command_name in ['n', 'npc']:
|
|
1870
|
+
npc_to_switch_to = all_command_parts[1] if len(all_command_parts) > 1 else None
|
|
1871
|
+
if npc_to_switch_to and state.team and npc_to_switch_to in state.team.npcs:
|
|
1872
|
+
state.npc = state.team.npcs[npc_to_switch_to]
|
|
1873
|
+
return state, f"Switched to NPC: {npc_to_switch_to}"
|
|
1874
|
+
else:
|
|
1875
|
+
available_npcs = list(state.team.npcs.keys()) if state.team else []
|
|
1876
|
+
return state, colored(f"NPC '{npc_to_switch_to}' not found. Available NPCs: {', '.join(available_npcs)}", "red")
|
|
1877
|
+
|
|
1878
|
+
# Check router commands first
|
|
1879
|
+
handler = router.get_route(command_name)
|
|
1880
|
+
if handler:
|
|
1881
|
+
parsed_flags, positional_args = parse_generic_command_flags(all_command_parts[1:])
|
|
1882
|
+
normalized_flags = normalize_and_expand_flags(parsed_flags)
|
|
1883
|
+
|
|
1884
|
+
handler_kwargs = {
|
|
1885
|
+
'stream': stream,
|
|
1886
|
+
'team': state.team,
|
|
1887
|
+
'messages': state.messages,
|
|
1888
|
+
'api_url': state.api_url,
|
|
1889
|
+
'api_key': state.api_key,
|
|
1890
|
+
'stdin_input': stdin_input,
|
|
1891
|
+
'positional_args': positional_args,
|
|
1892
|
+
'plonk_context': state.team.shared_context.get('PLONK_CONTEXT') if state.team and hasattr(state.team, 'shared_context') else None,
|
|
1893
|
+
|
|
1894
|
+
# Default chat model/provider
|
|
1895
|
+
'model': state.npc.model if isinstance(state.npc, NPC) and state.npc.model else state.chat_model,
|
|
1896
|
+
'provider': state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else state.chat_provider,
|
|
1897
|
+
'npc': state.npc,
|
|
1898
|
+
|
|
1899
|
+
# All other specific defaults
|
|
1900
|
+
'sprovider': state.search_provider,
|
|
1901
|
+
'emodel': state.embedding_model,
|
|
1902
|
+
'eprovider': state.embedding_provider,
|
|
1903
|
+
'igmodel': state.image_gen_model,
|
|
1904
|
+
'igprovider': state.image_gen_provider,
|
|
1905
|
+
'vgmodel': state.video_gen_model,
|
|
1906
|
+
'vgprovider': state.video_gen_provider,
|
|
1907
|
+
'vmodel': state.vision_model,
|
|
1908
|
+
'vprovider': state.vision_provider,
|
|
1909
|
+
'rmodel': state.reasoning_model,
|
|
1910
|
+
'rprovider': state.reasoning_provider,
|
|
1911
|
+
}
|
|
1912
|
+
|
|
1913
|
+
if len(normalized_flags) > 0:
|
|
1914
|
+
kwarg_part = 'with kwargs: \n -' + '\n -'.join(f'{key}={item}' for key, item in normalized_flags.items())
|
|
1915
|
+
else:
|
|
1916
|
+
kwarg_part = ''
|
|
1917
|
+
|
|
1918
|
+
render_markdown(f'- Calling {command_name} handler {kwarg_part} ')
|
|
1919
|
+
|
|
1920
|
+
# Handle model/provider inference
|
|
1921
|
+
if 'model' in normalized_flags and 'provider' not in normalized_flags:
|
|
1922
|
+
inferred_provider = lookup_provider(normalized_flags['model'])
|
|
1923
|
+
if inferred_provider:
|
|
1924
|
+
handler_kwargs['provider'] = inferred_provider
|
|
1925
|
+
print(colored(f"Info: Inferred provider '{inferred_provider}' for model '{normalized_flags['model']}'.", "cyan"))
|
|
1926
|
+
|
|
1927
|
+
if 'provider' in normalized_flags and 'model' not in normalized_flags:
|
|
1928
|
+
current_provider = lookup_provider(handler_kwargs['model'])
|
|
1929
|
+
if current_provider != normalized_flags['provider']:
|
|
1930
|
+
prov = normalized_flags['provider']
|
|
1931
|
+
print(f'Please specify a model for the provider: {prov}')
|
|
1932
|
+
|
|
1933
|
+
handler_kwargs.update(normalized_flags)
|
|
1934
|
+
|
|
1935
|
+
try:
|
|
1936
|
+
result_dict = handler(command=command,
|
|
1937
|
+
**handler_kwargs)
|
|
1938
|
+
if isinstance(result_dict, dict):
|
|
1939
|
+
state.messages = result_dict.get("messages", state.messages)
|
|
1940
|
+
return state, result_dict
|
|
1941
|
+
else:
|
|
1942
|
+
return state, result_dict
|
|
1943
|
+
except Exception as e:
|
|
1944
|
+
import traceback
|
|
1945
|
+
print(f"Error executing slash command '{command_name}':", file=sys.stderr)
|
|
1946
|
+
traceback.print_exc()
|
|
1947
|
+
return state, colored(f"Error executing slash command '{command_name}': {e}", "red")
|
|
1948
|
+
|
|
1949
|
+
# Check for jinxs in active NPC
|
|
1950
|
+
active_npc = state.npc if isinstance(state.npc, NPC) else None
|
|
1951
|
+
jinx_to_execute = None
|
|
1952
|
+
executor = None
|
|
1953
|
+
|
|
1954
|
+
if active_npc and hasattr(active_npc, 'jinxs_dict') and command_name in active_npc.jinxs_dict:
|
|
1955
|
+
jinx_to_execute = active_npc.jinxs_dict[command_name]
|
|
1956
|
+
executor = active_npc
|
|
1957
|
+
elif state.team and hasattr(state.team, 'jinxs_dict') and command_name in state.team.jinxs_dict:
|
|
1958
|
+
jinx_to_execute = state.team.jinxs_dict[command_name]
|
|
1959
|
+
executor = state.team
|
|
1960
|
+
if jinx_to_execute:
|
|
1961
|
+
args = all_command_parts[1:] # Fix: use all_command_parts instead of command_parts
|
|
1962
|
+
try:
|
|
1963
|
+
# Create input dictionary from args based on jinx inputs
|
|
1964
|
+
input_values = {}
|
|
1965
|
+
if hasattr(jinx_to_execute, 'inputs') and jinx_to_execute.inputs:
|
|
1966
|
+
for i, input_name in enumerate(jinx_to_execute.inputs):
|
|
1967
|
+
if i < len(args):
|
|
1968
|
+
input_values[input_name] = args[i]
|
|
1969
|
+
|
|
1970
|
+
# Execute the jinx with proper parameters
|
|
1971
|
+
if isinstance(executor, NPC):
|
|
1972
|
+
jinx_output = jinx_to_execute.execute(
|
|
1973
|
+
input_values=input_values,
|
|
1974
|
+
jinxs_dict=executor.jinxs_dict if hasattr(executor, 'jinxs_dict') else {},
|
|
1975
|
+
npc=executor,
|
|
1976
|
+
messages=state.messages
|
|
1977
|
+
)
|
|
1978
|
+
else: # Team executor
|
|
1979
|
+
jinx_output = jinx_to_execute.execute(
|
|
1980
|
+
input_values=input_values,
|
|
1981
|
+
jinxs_dict=executor.jinxs_dict if hasattr(executor, 'jinxs_dict') else {},
|
|
1982
|
+
npc=active_npc or state.npc,
|
|
1983
|
+
messages=state.messages
|
|
1984
|
+
)
|
|
1985
|
+
if isinstance(jinx_output, dict) and 'messages' in jinx_output:
|
|
1986
|
+
state.messages = jinx_output['messages']
|
|
1987
|
+
return state, str(jinx_output.get('output', jinx_output))
|
|
1988
|
+
elif isinstance(jinx_output, dict):
|
|
1989
|
+
return state, str(jinx_output.get('output', jinx_output))
|
|
1990
|
+
else:
|
|
1991
|
+
return state, jinx_output
|
|
1992
|
+
|
|
1993
|
+
except Exception as e:
|
|
1994
|
+
import traceback
|
|
1995
|
+
print(f"Error executing jinx '{command_name}':", file=sys.stderr)
|
|
1996
|
+
traceback.print_exc()
|
|
1997
|
+
return state, colored(f"Error executing jinx '{command_name}': {e}", "red")
|
|
1998
|
+
if state.team and command_name in state.team.npcs:
|
|
1999
|
+
new_npc = state.team.npcs[command_name]
|
|
2000
|
+
state.npc = new_npc
|
|
2001
|
+
return state, f"Switched to NPC: {new_npc.name}"
|
|
2002
|
+
|
|
2003
|
+
return state, colored(f"Unknown slash command, jinx, or NPC: {command_name}", "red")
|
|
2004
|
+
|
|
2005
|
+
def process_pipeline_command(
|
|
2006
|
+
cmd_segment: str,
|
|
2007
|
+
stdin_input: Optional[str],
|
|
2008
|
+
state: ShellState,
|
|
2009
|
+
stream_final: bool,
|
|
2010
|
+
review = True,
|
|
2011
|
+
router = None,
|
|
2012
|
+
) -> Tuple[ShellState, Any]:
|
|
2013
|
+
'''
|
|
2014
|
+
Processing command
|
|
2015
|
+
'''
|
|
2016
|
+
|
|
2017
|
+
if not cmd_segment:
|
|
2018
|
+
return state, stdin_input
|
|
2019
|
+
|
|
2020
|
+
available_models_all = get_locally_available_models(state.current_path)
|
|
2021
|
+
available_models_all_list = [item for key, item in available_models_all.items()]
|
|
2022
|
+
|
|
2023
|
+
model_override, provider_override, cmd_cleaned = get_model_and_provider(
|
|
2024
|
+
cmd_segment, available_models_all_list
|
|
2025
|
+
)
|
|
2026
|
+
cmd_to_process = cmd_cleaned.strip()
|
|
2027
|
+
if not cmd_to_process:
|
|
2028
|
+
return state, stdin_input
|
|
2029
|
+
|
|
2030
|
+
npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
|
|
2031
|
+
npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
|
|
2032
|
+
|
|
2033
|
+
exec_model = model_override or npc_model or state.chat_model
|
|
2034
|
+
exec_provider = provider_override or npc_provider or state.chat_provider
|
|
2035
|
+
|
|
2036
|
+
if cmd_to_process.startswith("/"):
|
|
2037
|
+
return execute_slash_command(cmd_to_process, stdin_input, state, stream_final, router)
|
|
2038
|
+
|
|
2039
|
+
cmd_parts = parse_command_safely(cmd_to_process)
|
|
2040
|
+
if not cmd_parts:
|
|
2041
|
+
return state, stdin_input
|
|
2042
|
+
|
|
2043
|
+
command_name = cmd_parts[0]
|
|
2044
|
+
|
|
2045
|
+
if command_name == "cd":
|
|
2046
|
+
return handle_cd_command(cmd_parts, state)
|
|
2047
|
+
|
|
2048
|
+
if command_name in interactive_commands:
|
|
2049
|
+
return handle_interactive_command(cmd_parts, state)
|
|
2050
|
+
if command_name in TERMINAL_EDITORS:
|
|
2051
|
+
print(f"Starting interactive editor: {command_name}...")
|
|
2052
|
+
full_command_str = " ".join(cmd_parts)
|
|
2053
|
+
output = open_terminal_editor(full_command_str)
|
|
2054
|
+
return state, output
|
|
2055
|
+
|
|
2056
|
+
if validate_bash_command(cmd_parts):
|
|
2057
|
+
success, result = handle_bash_command(cmd_parts, cmd_to_process, stdin_input, state)
|
|
2058
|
+
if success:
|
|
2059
|
+
return state, result
|
|
2060
|
+
else:
|
|
2061
|
+
print(colored(f"Bash command failed: {result}. Asking LLM for a fix...", "yellow"), file=sys.stderr)
|
|
2062
|
+
fixer_prompt = f"The command '{cmd_to_process}' failed with the error: '{result}'. Provide the correct command."
|
|
2063
|
+
response = execute_llm_command(
|
|
2064
|
+
fixer_prompt,
|
|
2065
|
+
model=exec_model,
|
|
2066
|
+
provider=exec_provider,
|
|
2067
|
+
npc=state.npc,
|
|
2068
|
+
stream=stream_final,
|
|
2069
|
+
messages=state.messages
|
|
2070
|
+
)
|
|
2071
|
+
state.messages = response['messages']
|
|
2072
|
+
return state, response['response']
|
|
2073
|
+
else:
|
|
2074
|
+
full_llm_cmd = f"{cmd_to_process} {stdin_input}" if stdin_input else cmd_to_process
|
|
2075
|
+
path_cmd = 'The current working directory is: ' + state.current_path
|
|
2076
|
+
ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(state.current_path, f) for f in os.listdir(state.current_path)]) if os.path.exists(state.current_path) else 'No files found in the current directory.'
|
|
2077
|
+
platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
|
|
2078
|
+
info = path_cmd + '\n' + ls_files + '\n' + platform_info + '\n'
|
|
2079
|
+
state.messages.append({'role':'user', 'content':full_llm_cmd})
|
|
2080
|
+
|
|
2081
|
+
|
|
2082
|
+
llm_result = check_llm_command(
|
|
2083
|
+
full_llm_cmd,
|
|
2084
|
+
model=exec_model,
|
|
2085
|
+
provider=exec_provider,
|
|
2086
|
+
api_url=state.api_url,
|
|
2087
|
+
api_key=state.api_key,
|
|
2088
|
+
npc=state.npc,
|
|
2089
|
+
team=state.team,
|
|
2090
|
+
messages=state.messages,
|
|
2091
|
+
images=state.attachments,
|
|
2092
|
+
stream=stream_final,
|
|
2093
|
+
context=info,
|
|
2094
|
+
)
|
|
2095
|
+
#
|
|
2096
|
+
|
|
2097
|
+
if not review:
|
|
2098
|
+
if isinstance(llm_result, dict):
|
|
2099
|
+
state.messages = llm_result.get("messages", state.messages)
|
|
2100
|
+
output = llm_result.get("output")
|
|
2101
|
+
return state, output
|
|
2102
|
+
else:
|
|
2103
|
+
return state, llm_result
|
|
2104
|
+
|
|
2105
|
+
else:
|
|
2106
|
+
return review_and_iterate_command(
|
|
2107
|
+
original_command=full_llm_cmd,
|
|
2108
|
+
initial_result=llm_result,
|
|
2109
|
+
state=state,
|
|
2110
|
+
exec_model=exec_model,
|
|
2111
|
+
exec_provider=exec_provider,
|
|
2112
|
+
stream_final=stream_final,
|
|
2113
|
+
info=info
|
|
2114
|
+
)
|
|
2115
|
+
def review_and_iterate_command(
|
|
2116
|
+
original_command: str,
|
|
2117
|
+
initial_result: Any,
|
|
2118
|
+
state: ShellState,
|
|
2119
|
+
exec_model: str,
|
|
2120
|
+
exec_provider: str,
|
|
2121
|
+
stream_final: bool,
|
|
2122
|
+
info: str,
|
|
2123
|
+
max_iterations: int = 2
|
|
2124
|
+
) -> Tuple[ShellState, Any]:
|
|
2125
|
+
"""
|
|
2126
|
+
Simple iteration on LLM command result to improve quality.
|
|
2127
|
+
"""
|
|
2128
|
+
|
|
2129
|
+
# Extract current state
|
|
2130
|
+
if isinstance(initial_result, dict):
|
|
2131
|
+
current_output = initial_result.get("output")
|
|
2132
|
+
current_messages = initial_result.get("messages", state.messages)
|
|
2133
|
+
else:
|
|
2134
|
+
current_output = initial_result
|
|
2135
|
+
current_messages = state.messages
|
|
2136
|
+
|
|
2137
|
+
# Simple refinement prompt
|
|
2138
|
+
refinement_prompt = f"""
|
|
2139
|
+
The previous response to "{original_command}" was:
|
|
2140
|
+
{current_output}
|
|
2141
|
+
|
|
2142
|
+
Please review and improve this response if needed. Provide a better, more complete answer.
|
|
2143
|
+
"""
|
|
2144
|
+
|
|
2145
|
+
# Iterate with check_llm_command
|
|
2146
|
+
refined_result = check_llm_command(
|
|
2147
|
+
refinement_prompt,
|
|
2148
|
+
model=exec_model,
|
|
2149
|
+
provider=exec_provider,
|
|
2150
|
+
api_url=state.api_url,
|
|
2151
|
+
api_key=state.api_key,
|
|
2152
|
+
npc=state.npc,
|
|
2153
|
+
team=state.team,
|
|
2154
|
+
messages=current_messages,
|
|
2155
|
+
images=state.attachments,
|
|
2156
|
+
stream=stream_final,
|
|
2157
|
+
context=info,
|
|
2158
|
+
)
|
|
2159
|
+
|
|
2160
|
+
# Update state and return
|
|
2161
|
+
if isinstance(refined_result, dict):
|
|
2162
|
+
state.messages = refined_result.get("messages", current_messages)
|
|
2163
|
+
return state, refined_result.get("output", current_output)
|
|
2164
|
+
else:
|
|
2165
|
+
state.messages = current_messages
|
|
2166
|
+
return state, refined_result
|
|
2167
|
+
def check_mode_switch(command:str , state: ShellState):
|
|
2168
|
+
if command in ['/cmd', '/agent', '/chat',]:
|
|
2169
|
+
state.current_mode = command[1:]
|
|
2170
|
+
return True, state
|
|
2171
|
+
return False, state
|
|
2172
|
+
|
|
2173
|
+
def execute_command(
|
|
2174
|
+
command: str,
|
|
2175
|
+
state: ShellState,
|
|
2176
|
+
review = True,
|
|
2177
|
+
router = None,
|
|
2178
|
+
) -> Tuple[ShellState, Any]:
|
|
2179
|
+
|
|
2180
|
+
if not command.strip():
|
|
2181
|
+
return state, ""
|
|
2182
|
+
mode_change, state = check_mode_switch(command, state)
|
|
2183
|
+
if mode_change:
|
|
2184
|
+
return state, 'Mode changed.'
|
|
2185
|
+
|
|
2186
|
+
original_command_for_embedding = command
|
|
2187
|
+
commands = split_by_pipes(command)
|
|
2188
|
+
stdin_for_next = None
|
|
2189
|
+
final_output = None
|
|
2190
|
+
current_state = state
|
|
2191
|
+
npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
|
|
2192
|
+
npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
|
|
2193
|
+
active_model = npc_model or state.chat_model
|
|
2194
|
+
active_provider = npc_provider or state.chat_provider
|
|
2195
|
+
if state.current_mode == 'agent':
|
|
2196
|
+
print('# of parsed commands: ', len(commands))
|
|
2197
|
+
print('Commands:' '\n'.join(commands))
|
|
2198
|
+
for i, cmd_segment in enumerate(commands):
|
|
2199
|
+
render_markdown(f'- executing command {i+1}/{len(commands)}')
|
|
2200
|
+
is_last_command = (i == len(commands) - 1)
|
|
2201
|
+
stream_this_segment = state.stream_output and not is_last_command
|
|
2202
|
+
try:
|
|
2203
|
+
current_state, output = process_pipeline_command(
|
|
2204
|
+
cmd_segment.strip(),
|
|
2205
|
+
stdin_for_next,
|
|
2206
|
+
current_state,
|
|
2207
|
+
stream_final=stream_this_segment,
|
|
2208
|
+
review=review,
|
|
2209
|
+
router= router
|
|
2210
|
+
)
|
|
2211
|
+
if is_last_command:
|
|
2212
|
+
return current_state, output
|
|
2213
|
+
if isinstance(output, str):
|
|
2214
|
+
stdin_for_next = output
|
|
2215
|
+
elif not isinstance(output, str):
|
|
2216
|
+
try:
|
|
2217
|
+
if stream_this_segment:
|
|
2218
|
+
full_stream_output = print_and_process_stream_with_markdown(output,
|
|
2219
|
+
state.npc.model,
|
|
2220
|
+
state.npc.provider, show=True)
|
|
2221
|
+
stdin_for_next = full_stream_output
|
|
2222
|
+
if is_last_command:
|
|
2223
|
+
final_output = full_stream_output
|
|
2224
|
+
except:
|
|
2225
|
+
if output is not None: # Try converting other types to string
|
|
2226
|
+
try:
|
|
2227
|
+
stdin_for_next = str(output)
|
|
2228
|
+
except Exception:
|
|
2229
|
+
print(f"Warning: Cannot convert output to string for piping: {type(output)}", file=sys.stderr)
|
|
2230
|
+
stdin_for_next = None
|
|
2231
|
+
else: # Output was None
|
|
2232
|
+
stdin_for_next = None
|
|
2233
|
+
except Exception as pipeline_error:
|
|
2234
|
+
import traceback
|
|
2235
|
+
traceback.print_exc()
|
|
2236
|
+
error_msg = colored(f"Error in pipeline stage {i+1} ('{cmd_segment[:50]}...'): {pipeline_error}", "red")
|
|
2237
|
+
return current_state, error_msg
|
|
2238
|
+
|
|
2239
|
+
if final_output is not None and isinstance(final_output,str):
|
|
2240
|
+
store_command_embeddings(original_command_for_embedding, final_output, current_state)
|
|
2241
|
+
|
|
2242
|
+
return current_state, final_output
|
|
2243
|
+
|
|
2244
|
+
|
|
2245
|
+
elif state.current_mode == 'chat':
|
|
2246
|
+
# Only treat as bash if it looks like a shell command (starts with known command or is a slash command)
|
|
2247
|
+
cmd_parts = parse_command_safely(command)
|
|
2248
|
+
is_probably_bash = (
|
|
2249
|
+
cmd_parts
|
|
2250
|
+
and (
|
|
2251
|
+
cmd_parts[0] in interactive_commands
|
|
2252
|
+
or cmd_parts[0] in BASH_COMMANDS
|
|
2253
|
+
or command.strip().startswith("./")
|
|
2254
|
+
or command.strip().startswith("/")
|
|
2255
|
+
)
|
|
2256
|
+
)
|
|
2257
|
+
if is_probably_bash:
|
|
2258
|
+
try:
|
|
2259
|
+
command_name = cmd_parts[0]
|
|
2260
|
+
if command_name in interactive_commands:
|
|
2261
|
+
return handle_interactive_command(cmd_parts, state)
|
|
2262
|
+
elif command_name == "cd":
|
|
2263
|
+
return handle_cd_command(cmd_parts, state)
|
|
2264
|
+
else:
|
|
2265
|
+
try:
|
|
2266
|
+
bash_state, bash_output = handle_bash_command(cmd_parts, command, None, state)
|
|
2267
|
+
return state, bash_output
|
|
2268
|
+
except Exception as bash_err:
|
|
2269
|
+
return state, colored(f"Bash execution failed: {bash_err}", "red")
|
|
2270
|
+
except Exception:
|
|
2271
|
+
pass # Fall through to LLM
|
|
2272
|
+
|
|
2273
|
+
# Otherwise, treat as chat (LLM)
|
|
2274
|
+
response = get_llm_response(
|
|
2275
|
+
command,
|
|
2276
|
+
model=active_model,
|
|
2277
|
+
provider=active_provider,
|
|
2278
|
+
npc=state.npc,
|
|
2279
|
+
stream=state.stream_output,
|
|
2280
|
+
messages=state.messages
|
|
2281
|
+
)
|
|
2282
|
+
state.messages = response['messages']
|
|
2283
|
+
return state, response['response']
|
|
2284
|
+
|
|
2285
|
+
elif state.current_mode == 'cmd':
|
|
2286
|
+
|
|
2287
|
+
response = execute_llm_command(command,
|
|
2288
|
+
model=active_model,
|
|
2289
|
+
provider=active_provider,
|
|
2290
|
+
npc = state.npc,
|
|
2291
|
+
stream = state.stream_output,
|
|
2292
|
+
messages = state.messages)
|
|
2293
|
+
state.messages = response['messages']
|
|
2294
|
+
return state, response['response']
|
|
2295
|
+
|
|
2296
|
+
def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
2297
|
+
|
|
2298
|
+
setup_npcsh_config()
|
|
2299
|
+
|
|
2300
|
+
db_path = os.getenv("NPCSH_DB_PATH", HISTORY_DB_DEFAULT_PATH)
|
|
2301
|
+
db_path = os.path.expanduser(db_path)
|
|
2302
|
+
os.makedirs(os.path.dirname(db_path), exist_ok=True)
|
|
2303
|
+
command_history = CommandHistory(db_path)
|
|
2304
|
+
|
|
2305
|
+
|
|
2306
|
+
if not is_npcsh_initialized():
|
|
2307
|
+
print("Initializing NPCSH...")
|
|
2308
|
+
initialize_base_npcs_if_needed(db_path)
|
|
2309
|
+
print("NPCSH initialization complete. Restart or source ~/.npcshrc.")
|
|
2310
|
+
|
|
2311
|
+
|
|
2312
|
+
|
|
2313
|
+
try:
|
|
2314
|
+
history_file = setup_readline()
|
|
2315
|
+
atexit.register(save_readline_history)
|
|
2316
|
+
atexit.register(command_history.close)
|
|
2317
|
+
except:
|
|
2318
|
+
pass
|
|
2319
|
+
|
|
2320
|
+
project_team_path = os.path.abspath(PROJECT_NPC_TEAM_PATH)
|
|
2321
|
+
global_team_path = os.path.expanduser(DEFAULT_NPC_TEAM_PATH)
|
|
2322
|
+
team_dir = None
|
|
2323
|
+
default_forenpc_name = None
|
|
2324
|
+
|
|
2325
|
+
if os.path.exists(project_team_path):
|
|
2326
|
+
team_dir = project_team_path
|
|
2327
|
+
default_forenpc_name = "forenpc"
|
|
2328
|
+
else:
|
|
2329
|
+
if not os.path.exists('.npcsh_global'):
|
|
2330
|
+
resp = input(f"No npc_team found in {os.getcwd()}. Create a new team here? [Y/n]: ").strip().lower()
|
|
2331
|
+
if resp in ("", "y", "yes"):
|
|
2332
|
+
team_dir = project_team_path
|
|
2333
|
+
os.makedirs(team_dir, exist_ok=True)
|
|
2334
|
+
default_forenpc_name = "forenpc"
|
|
2335
|
+
forenpc_directive = input(
|
|
2336
|
+
f"Enter a primary directive for {default_forenpc_name} (default: 'You are the forenpc of the team...'): "
|
|
2337
|
+
).strip() or "You are the forenpc of the team, coordinating activities between NPCs on the team, verifying that results from NPCs are high quality and can help to adequately answer user requests."
|
|
2338
|
+
forenpc_model = input("Enter a model for your forenpc (default: llama3.2): ").strip() or "llama3.2"
|
|
2339
|
+
forenpc_provider = input("Enter a provider for your forenpc (default: ollama): ").strip() or "ollama"
|
|
2340
|
+
|
|
2341
|
+
with open(os.path.join(team_dir, f"{default_forenpc_name}.npc"), "w") as f:
|
|
2342
|
+
yaml.dump({
|
|
2343
|
+
"name": default_forenpc_name, "primary_directive": forenpc_directive,
|
|
2344
|
+
"model": forenpc_model, "provider": forenpc_provider
|
|
2345
|
+
}, f)
|
|
2346
|
+
|
|
2347
|
+
ctx_path = os.path.join(team_dir, "team.ctx")
|
|
2348
|
+
folder_context = input("Enter a short description for this project/team (optional): ").strip()
|
|
2349
|
+
team_ctx_data = {
|
|
2350
|
+
"forenpc": default_forenpc_name, "model": forenpc_model,
|
|
2351
|
+
"provider": forenpc_provider, "api_key": None, "api_url": None,
|
|
2352
|
+
"context": folder_context if folder_context else None
|
|
2353
|
+
}
|
|
2354
|
+
use_jinxs = input("Use global jinxs folder (g) or copy to this project (c)? [g/c, default: g]: ").strip().lower()
|
|
2355
|
+
if use_jinxs == "c":
|
|
2356
|
+
global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
|
|
2357
|
+
if os.path.exists(global_jinxs_dir):
|
|
2358
|
+
shutil.copytree(global_jinxs_dir, team_dir, dirs_exist_ok=True)
|
|
2359
|
+
else:
|
|
2360
|
+
team_ctx_data["use_global_jinxs"] = True
|
|
2361
|
+
|
|
2362
|
+
with open(ctx_path, "w") as f:
|
|
2363
|
+
yaml.dump(team_ctx_data, f)
|
|
2364
|
+
else:
|
|
2365
|
+
render_markdown('From now on, npcsh will assume you will use the global team when activating from this folder. \n If you change your mind and want to initialize a team, use /init from within npcsh, `npc init` or `rm .npcsh_global` from the current working directory.')
|
|
2366
|
+
with open(".npcsh_global", "w") as f:
|
|
2367
|
+
pass
|
|
2368
|
+
team_dir = global_team_path
|
|
2369
|
+
default_forenpc_name = "sibiji"
|
|
2370
|
+
elif os.path.exists(global_team_path):
|
|
2371
|
+
team_dir = global_team_path
|
|
2372
|
+
default_forenpc_name = "sibiji"
|
|
2373
|
+
|
|
2374
|
+
|
|
2375
|
+
team_ctx = {}
|
|
2376
|
+
for filename in os.listdir(team_dir):
|
|
2377
|
+
if filename.endswith(".ctx"):
|
|
2378
|
+
try:
|
|
2379
|
+
with open(os.path.join(team_dir, filename), "r") as f:
|
|
2380
|
+
team_ctx = yaml.safe_load(f) or {}
|
|
2381
|
+
break
|
|
2382
|
+
except Exception as e:
|
|
2383
|
+
print(f"Warning: Could not load context file {filename}: {e}")
|
|
2384
|
+
|
|
2385
|
+
forenpc_name = team_ctx.get("forenpc", default_forenpc_name)
|
|
2386
|
+
#render_markdown(f"- Using forenpc: {forenpc_name}")
|
|
2387
|
+
|
|
2388
|
+
if team_ctx.get("use_global_jinxs", False):
|
|
2389
|
+
jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
|
|
2390
|
+
else:
|
|
2391
|
+
jinxs_dir = os.path.join(team_dir, "jinxs")
|
|
2392
|
+
|
|
2393
|
+
jinxs_list = load_jinxs_from_directory(jinxs_dir)
|
|
2394
|
+
jinxs_dict = {jinx.jinx_name: jinx for jinx in jinxs_list}
|
|
2395
|
+
|
|
2396
|
+
forenpc_obj = None
|
|
2397
|
+
forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
|
|
2398
|
+
|
|
2399
|
+
|
|
2400
|
+
#render_markdown('- Loaded team context'+ json.dumps(team_ctx, indent=2))
|
|
2401
|
+
|
|
2402
|
+
|
|
2403
|
+
|
|
2404
|
+
if os.path.exists(forenpc_path):
|
|
2405
|
+
forenpc_obj = NPC(file = forenpc_path,
|
|
2406
|
+
jinxs=jinxs_list,
|
|
2407
|
+
db_conn=command_history.engine)
|
|
2408
|
+
if forenpc_obj.model is None:
|
|
2409
|
+
forenpc_obj.model= team_ctx.get("model", initial_state.chat_model)
|
|
2410
|
+
if forenpc_obj.provider is None:
|
|
2411
|
+
forenpc_obj.provider=team_ctx.get('provider', initial_state.chat_provider)
|
|
2412
|
+
|
|
2413
|
+
else:
|
|
2414
|
+
print(f"Warning: Forenpc file '{forenpc_name}.npc' not found in {team_dir}.")
|
|
2415
|
+
|
|
2416
|
+
team = Team(team_path=team_dir,
|
|
2417
|
+
forenpc=forenpc_obj,
|
|
2418
|
+
jinxs=jinxs_dict)
|
|
2419
|
+
|
|
2420
|
+
for npc_name, npc_obj in team.npcs.items():
|
|
2421
|
+
if not npc_obj.model:
|
|
2422
|
+
npc_obj.model = initial_state.chat_model
|
|
2423
|
+
if not npc_obj.provider:
|
|
2424
|
+
npc_obj.provider = initial_state.chat_provider
|
|
2425
|
+
|
|
2426
|
+
# Also apply to the forenpc specifically
|
|
2427
|
+
if team.forenpc and isinstance(team.forenpc, NPC):
|
|
2428
|
+
if not team.forenpc.model:
|
|
2429
|
+
team.forenpc.model = initial_state.chat_model
|
|
2430
|
+
if not team.forenpc.provider:
|
|
2431
|
+
team.forenpc.provider = initial_state.chat_provider
|
|
2432
|
+
team_name_from_ctx = team_ctx.get("name")
|
|
2433
|
+
if team_name_from_ctx:
|
|
2434
|
+
team.name = team_name_from_ctx
|
|
2435
|
+
elif team_dir and os.path.basename(team_dir) != 'npc_team':
|
|
2436
|
+
team.name = os.path.basename(team_dir)
|
|
2437
|
+
else:
|
|
2438
|
+
team.name = "global_team" # fallback for ~/.npcsh/npc_team
|
|
2439
|
+
|
|
2440
|
+
return command_history, team, forenpc_obj
|
|
2441
|
+
|
|
2442
|
+
|
|
2443
|
+
|
|
2444
|
+
def process_result(
|
|
2445
|
+
user_input: str,
|
|
2446
|
+
result_state: ShellState,
|
|
2447
|
+
output: Any,
|
|
2448
|
+
command_history: CommandHistory,
|
|
2449
|
+
|
|
2450
|
+
):
|
|
2451
|
+
|
|
2452
|
+
team_name = result_state.team.name if result_state.team else "__none__"
|
|
2453
|
+
npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "__none__"
|
|
2454
|
+
|
|
2455
|
+
# Determine the actual NPC object to use for this turn's operations
|
|
2456
|
+
active_npc = result_state.npc if isinstance(result_state.npc, NPC) else NPC(
|
|
2457
|
+
name="default",
|
|
2458
|
+
model=result_state.chat_model,
|
|
2459
|
+
provider=result_state.chat_provider,
|
|
2460
|
+
db_conn=command_history.engine)
|
|
2461
|
+
save_conversation_message(
|
|
2462
|
+
command_history,
|
|
2463
|
+
result_state.conversation_id,
|
|
2464
|
+
"user",
|
|
2465
|
+
user_input,
|
|
2466
|
+
wd=result_state.current_path,
|
|
2467
|
+
model=active_npc.model,
|
|
2468
|
+
provider=active_npc.provider,
|
|
2469
|
+
npc=npc_name,
|
|
2470
|
+
team=team_name,
|
|
2471
|
+
attachments=result_state.attachments,
|
|
2472
|
+
)
|
|
2473
|
+
result_state.attachments = None
|
|
2474
|
+
|
|
2475
|
+
final_output_str = None
|
|
2476
|
+
output_content = output.get('output') if isinstance(output, dict) else output
|
|
2477
|
+
model_for_stream = output.get('model', active_npc.model) if isinstance(output, dict) else active_npc.model
|
|
2478
|
+
provider_for_stream = output.get('provider', active_npc.provider) if isinstance(output, dict) else active_npc.provider
|
|
2479
|
+
|
|
2480
|
+
print('\n')
|
|
2481
|
+
if user_input =='/help':
|
|
2482
|
+
render_markdown(output.get('output'))
|
|
2483
|
+
elif result_state.stream_output:
|
|
2484
|
+
|
|
2485
|
+
|
|
2486
|
+
final_output_str = print_and_process_stream_with_markdown(output_content,
|
|
2487
|
+
model_for_stream,
|
|
2488
|
+
provider_for_stream,
|
|
2489
|
+
show=True)
|
|
2490
|
+
|
|
2491
|
+
elif output_content is not None:
|
|
2492
|
+
final_output_str = str(output_content)
|
|
2493
|
+
render_markdown(final_output_str)
|
|
2494
|
+
|
|
2495
|
+
if final_output_str:
|
|
2496
|
+
if result_state.messages:
|
|
2497
|
+
if result_state.messages[-1].get("role") != "assistant":
|
|
2498
|
+
result_state.messages.append({"role": "assistant",
|
|
2499
|
+
"content": final_output_str})
|
|
2500
|
+
save_conversation_message(
|
|
2501
|
+
command_history,
|
|
2502
|
+
result_state.conversation_id,
|
|
2503
|
+
"assistant",
|
|
2504
|
+
final_output_str,
|
|
2505
|
+
wd=result_state.current_path,
|
|
2506
|
+
model=active_npc.model,
|
|
2507
|
+
provider=active_npc.provider,
|
|
2508
|
+
npc=npc_name,
|
|
2509
|
+
team=team_name,
|
|
2510
|
+
)
|
|
2511
|
+
|
|
2512
|
+
conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
|
|
2513
|
+
engine = command_history.engine
|
|
2514
|
+
|
|
2515
|
+
|
|
2516
|
+
if result_state.build_kg:
|
|
2517
|
+
import pdb
|
|
2518
|
+
pdb.set_trace()
|
|
2519
|
+
try:
|
|
2520
|
+
if not should_skip_kg_processing(user_input, final_output_str):
|
|
2521
|
+
|
|
2522
|
+
npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
|
|
2523
|
+
evolved_npc_kg, _ = kg_evolve_incremental(
|
|
2524
|
+
existing_kg=npc_kg,
|
|
2525
|
+
new_content_text=conversation_turn_text,
|
|
2526
|
+
model=active_npc.model,
|
|
2527
|
+
provider=active_npc.provider,
|
|
2528
|
+
get_concepts=True,
|
|
2529
|
+
link_concepts_facts = False,
|
|
2530
|
+
link_concepts_concepts = False,
|
|
2531
|
+
link_facts_facts = False,
|
|
2532
|
+
|
|
2533
|
+
|
|
2534
|
+
)
|
|
2535
|
+
save_kg_to_db(engine,
|
|
2536
|
+
evolved_npc_kg,
|
|
2537
|
+
team_name,
|
|
2538
|
+
npc_name,
|
|
2539
|
+
result_state.current_path)
|
|
2540
|
+
except Exception as e:
|
|
2541
|
+
print(colored(f"Error during real-time KG evolution: {e}", "red"))
|
|
2542
|
+
|
|
2543
|
+
# --- Part 3: Periodic Team Context Suggestions ---
|
|
2544
|
+
result_state.turn_count += 1
|
|
2545
|
+
|
|
2546
|
+
if result_state.turn_count > 0 and result_state.turn_count % 10 == 0:
|
|
2547
|
+
print(colored("\nChecking for potential team improvements...", "cyan"))
|
|
2548
|
+
try:
|
|
2549
|
+
summary = breathe(messages=result_state.messages[-20:],
|
|
2550
|
+
npc=active_npc)
|
|
2551
|
+
characterization = summary.get('output')
|
|
2552
|
+
|
|
2553
|
+
if characterization and result_state.team:
|
|
2554
|
+
team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
|
|
2555
|
+
ctx_data = {}
|
|
2556
|
+
if os.path.exists(team_ctx_path):
|
|
2557
|
+
with open(team_ctx_path, 'r') as f:
|
|
2558
|
+
ctx_data = yaml.safe_load(f) or {}
|
|
2559
|
+
current_context = ctx_data.get('context', '')
|
|
2560
|
+
|
|
2561
|
+
prompt = f"""Based on this characterization: {characterization},
|
|
2562
|
+
|
|
2563
|
+
suggest changes (additions, deletions, edits) to the team's context.
|
|
2564
|
+
Additions need not be fully formed sentences and can simply be equations, relationships, or other plain clear items.
|
|
2565
|
+
|
|
2566
|
+
Current Context: "{current_context}".
|
|
2567
|
+
|
|
2568
|
+
Respond with JSON: {{"suggestion": "Your sentence."
|
|
2569
|
+
}}"""
|
|
2570
|
+
response = get_llm_response(prompt, npc=active_npc, format="json")
|
|
2571
|
+
suggestion = response.get("response", {}).get("suggestion")
|
|
2572
|
+
|
|
2573
|
+
if suggestion:
|
|
2574
|
+
new_context = (current_context + " " + suggestion).strip()
|
|
2575
|
+
print(colored(f"{result_state.npc.name} suggests updating team context:", "yellow"))
|
|
2576
|
+
print(f" - OLD: {current_context}\n + NEW: {new_context}")
|
|
2577
|
+
if input("Apply? [y/N]: ").strip().lower() == 'y':
|
|
2578
|
+
ctx_data['context'] = new_context
|
|
2579
|
+
with open(team_ctx_path, 'w') as f:
|
|
2580
|
+
yaml.dump(ctx_data, f)
|
|
2581
|
+
print(colored("Team context updated.", "green"))
|
|
2582
|
+
else:
|
|
2583
|
+
print("Suggestion declined.")
|
|
2584
|
+
except Exception as e:
|
|
2585
|
+
import traceback
|
|
2586
|
+
print(colored(f"Could not generate team suggestions: {e}", "yellow"))
|
|
2587
|
+
traceback.print_exc()
|
|
2588
|
+
|
|
1131
2589
|
initial_state = ShellState(
|
|
1132
2590
|
conversation_id=start_new_conversation(),
|
|
1133
2591
|
stream_output=NPCSH_STREAM_OUTPUT,
|