npcsh 1.0.16__py3-none-any.whl → 1.0.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcsh/_state.py CHANGED
@@ -3,10 +3,16 @@ from colorama import Fore, Back, Style
3
3
  from dataclasses import dataclass, field
4
4
  import filecmp
5
5
  import os
6
+ from pathlib import Path
6
7
  import platform
7
8
  import pty
9
+ try:
10
+ import readline
11
+ except:
12
+ pass
8
13
  import re
9
14
  import select
15
+ import shlex
10
16
  import shutil
11
17
  import signal
12
18
  import sqlite3
@@ -25,6 +31,247 @@ from npcpy.memory.command_history import (
25
31
  )
26
32
  from npcpy.npc_compiler import NPC, Team
27
33
 
34
+
35
+ from npcpy.memory.command_history import CommandHistory
36
+
37
+
38
+
39
+ import os
40
+ import sys
41
+ import atexit
42
+ import subprocess
43
+ import shlex
44
+ import re
45
+ from datetime import datetime
46
+ import importlib.metadata
47
+ import textwrap
48
+ from typing import Optional, List, Dict, Any, Tuple, Union
49
+ from dataclasses import dataclass, field
50
+ import platform
51
+ try:
52
+ from termcolor import colored
53
+ except:
54
+ pass
55
+
56
+ try:
57
+ import chromadb
58
+ except ImportError:
59
+ chromadb = None
60
+ import shutil
61
+ import sqlite3
62
+ import yaml
63
+
64
+
65
+ from npcpy.npc_sysenv import (
66
+ print_and_process_stream_with_markdown,
67
+ render_markdown,
68
+ get_model_and_provider,
69
+ get_locally_available_models,
70
+ lookup_provider
71
+ )
72
+
73
+ from npcpy.memory.command_history import (
74
+ CommandHistory,
75
+ save_conversation_message,
76
+ load_kg_from_db,
77
+ save_kg_to_db,
78
+ )
79
+ from npcpy.npc_compiler import NPC, Team, load_jinxs_from_directory
80
+ from npcpy.llm_funcs import (
81
+ check_llm_command,
82
+ get_llm_response,
83
+ execute_llm_command,
84
+ breathe,
85
+
86
+ )
87
+ from npcpy.memory.knowledge_graph import (
88
+ kg_evolve_incremental,
89
+
90
+ )
91
+ from npcpy.gen.embeddings import get_embeddings
92
+
93
+ try:
94
+ import readline
95
+ except:
96
+ print('no readline support, some features may not work as desired. ')
97
+
98
+ try:
99
+ VERSION = importlib.metadata.version("npcsh")
100
+ except importlib.metadata.PackageNotFoundError:
101
+ VERSION = "unknown"
102
+
103
+
104
+
105
+
106
+ NPCSH_CHAT_MODEL = os.environ.get("NPCSH_CHAT_MODEL", "gemma3:4b")
107
+ # print("NPCSH_CHAT_MODEL", NPCSH_CHAT_MODEL)
108
+ NPCSH_CHAT_PROVIDER = os.environ.get("NPCSH_CHAT_PROVIDER", "ollama")
109
+ # print("NPCSH_CHAT_PROVIDER", NPCSH_CHAT_PROVIDER)
110
+ NPCSH_DB_PATH = os.path.expanduser(
111
+ os.environ.get("NPCSH_DB_PATH", "~/npcsh_history.db")
112
+ )
113
+ NPCSH_VECTOR_DB_PATH = os.path.expanduser(
114
+ os.environ.get("NPCSH_VECTOR_DB_PATH", "~/npcsh_chroma.db")
115
+ )
116
+ #DEFAULT MODES = ['CHAT', 'AGENT', 'CODE', ]
117
+
118
+ NPCSH_DEFAULT_MODE = os.path.expanduser(os.environ.get("NPCSH_DEFAULT_MODE", "agent"))
119
+ NPCSH_VISION_MODEL = os.environ.get("NPCSH_VISION_MODEL", "gemma3:4b")
120
+ NPCSH_VISION_PROVIDER = os.environ.get("NPCSH_VISION_PROVIDER", "ollama")
121
+ NPCSH_IMAGE_GEN_MODEL = os.environ.get(
122
+ "NPCSH_IMAGE_GEN_MODEL", "runwayml/stable-diffusion-v1-5"
123
+ )
124
+ NPCSH_IMAGE_GEN_PROVIDER = os.environ.get("NPCSH_IMAGE_GEN_PROVIDER", "diffusers")
125
+ NPCSH_VIDEO_GEN_MODEL = os.environ.get(
126
+ "NPCSH_VIDEO_GEN_MODEL", "damo-vilab/text-to-video-ms-1.7b"
127
+ )
128
+ NPCSH_VIDEO_GEN_PROVIDER = os.environ.get("NPCSH_VIDEO_GEN_PROVIDER", "diffusers")
129
+
130
+ NPCSH_EMBEDDING_MODEL = os.environ.get("NPCSH_EMBEDDING_MODEL", "nomic-embed-text")
131
+ NPCSH_EMBEDDING_PROVIDER = os.environ.get("NPCSH_EMBEDDING_PROVIDER", "ollama")
132
+ NPCSH_REASONING_MODEL = os.environ.get("NPCSH_REASONING_MODEL", "deepseek-r1")
133
+ NPCSH_REASONING_PROVIDER = os.environ.get("NPCSH_REASONING_PROVIDER", "ollama")
134
+ NPCSH_STREAM_OUTPUT = eval(os.environ.get("NPCSH_STREAM_OUTPUT", "0")) == 1
135
+ NPCSH_API_URL = os.environ.get("NPCSH_API_URL", None)
136
+ NPCSH_SEARCH_PROVIDER = os.environ.get("NPCSH_SEARCH_PROVIDER", "duckduckgo")
137
+ NPCSH_BUILD_KG = os.environ.get("NPCSH_BUILD_KG") == "1"
138
+ READLINE_HISTORY_FILE = os.path.expanduser("~/.npcsh_history")
139
+
140
+
141
+
142
+ @dataclass
143
+ class ShellState:
144
+ npc: Optional[Union[NPC, str]] = None
145
+ team: Optional[Team] = None
146
+ messages: List[Dict[str, Any]] = field(default_factory=list)
147
+ mcp_client: Optional[Any] = None
148
+ conversation_id: Optional[int] = None
149
+ chat_model: str = NPCSH_CHAT_MODEL
150
+ chat_provider: str = NPCSH_CHAT_PROVIDER
151
+ vision_model: str = NPCSH_VISION_MODEL
152
+ vision_provider: str = NPCSH_VISION_PROVIDER
153
+ embedding_model: str = NPCSH_EMBEDDING_MODEL
154
+ embedding_provider: str = NPCSH_EMBEDDING_PROVIDER
155
+ reasoning_model: str = NPCSH_REASONING_MODEL
156
+ reasoning_provider: str = NPCSH_REASONING_PROVIDER
157
+ search_provider: str = NPCSH_SEARCH_PROVIDER
158
+ image_gen_model: str = NPCSH_IMAGE_GEN_MODEL
159
+ image_gen_provider: str = NPCSH_IMAGE_GEN_PROVIDER
160
+ video_gen_model: str = NPCSH_VIDEO_GEN_MODEL
161
+ video_gen_provider: str = NPCSH_VIDEO_GEN_PROVIDER
162
+ current_mode: str = NPCSH_DEFAULT_MODE
163
+ build_kg: bool = NPCSH_BUILD_KG
164
+ api_key: Optional[str] = None
165
+ api_url: Optional[str] = NPCSH_API_URL
166
+ current_path: str = field(default_factory=os.getcwd)
167
+ stream_output: bool = NPCSH_STREAM_OUTPUT
168
+ attachments: Optional[List[Any]] = None
169
+ turn_count: int =0
170
+ def get_model_for_command(self, model_type: str = "chat"):
171
+ if model_type == "chat":
172
+ return self.chat_model, self.chat_provider
173
+ elif model_type == "vision":
174
+ return self.vision_model, self.vision_provider
175
+ elif model_type == "embedding":
176
+ return self.embedding_model, self.embedding_provider
177
+ elif model_type == "reasoning":
178
+ return self.reasoning_model, self.reasoning_provider
179
+ elif model_type == "image_gen":
180
+ return self.image_gen_model, self.image_gen_provider
181
+ elif model_type == "video_gen":
182
+ return self.video_gen_model, self.video_gen_provider
183
+ else:
184
+ return self.chat_model, self.chat_provider # Default fallback
185
+ CONFIG_KEY_MAP = {
186
+ # Chat
187
+ "model": "NPCSH_CHAT_MODEL",
188
+ "chatmodel": "NPCSH_CHAT_MODEL",
189
+ "provider": "NPCSH_CHAT_PROVIDER",
190
+ "chatprovider": "NPCSH_CHAT_PROVIDER",
191
+
192
+ # Vision
193
+ "vmodel": "NPCSH_VISION_MODEL",
194
+ "visionmodel": "NPCSH_VISION_MODEL",
195
+ "vprovider": "NPCSH_VISION_PROVIDER",
196
+ "visionprovider": "NPCSH_VISION_PROVIDER",
197
+
198
+ # Embedding
199
+ "emodel": "NPCSH_EMBEDDING_MODEL",
200
+ "embeddingmodel": "NPCSH_EMBEDDING_MODEL",
201
+ "eprovider": "NPCSH_EMBEDDING_PROVIDER",
202
+ "embeddingprovider": "NPCSH_EMBEDDING_PROVIDER",
203
+
204
+ # Reasoning
205
+ "rmodel": "NPCSH_REASONING_MODEL",
206
+ "reasoningmodel": "NPCSH_REASONING_MODEL",
207
+ "rprovider": "NPCSH_REASONING_PROVIDER",
208
+ "reasoningprovider": "NPCSH_REASONING_PROVIDER",
209
+
210
+ # Image generation
211
+ "igmodel": "NPCSH_IMAGE_GEN_MODEL",
212
+ "imagegenmodel": "NPCSH_IMAGE_GEN_MODEL",
213
+ "igprovider": "NPCSH_IMAGE_GEN_PROVIDER",
214
+ "imagegenprovider": "NPCSH_IMAGE_GEN_PROVIDER",
215
+
216
+ # Video generation
217
+ "vgmodel": "NPCSH_VIDEO_GEN_MODEL",
218
+ "videogenmodel": "NPCSH_VIDEO_GEN_MODEL",
219
+ "vgprovider": "NPCSH_VIDEO_GEN_PROVIDER",
220
+ "videogenprovider": "NPCSH_VIDEO_GEN_PROVIDER",
221
+
222
+ # Other
223
+ "sprovider": "NPCSH_SEARCH_PROVIDER",
224
+ "mode": "NPCSH_DEFAULT_MODE",
225
+ "stream": "NPCSH_STREAM_OUTPUT",
226
+ "apiurl": "NPCSH_API_URL",
227
+ "buildkg": "NPCSH_BUILD_KG",
228
+ }
229
+
230
+
231
+ def set_npcsh_config_value(key: str, value: str):
232
+ """
233
+ Set NPCSH config values at runtime using shorthand (case-insensitive) or full keys.
234
+ Updates os.environ, globals, and ShellState defaults.
235
+ """
236
+ # case-insensitive lookup for shorthand
237
+ env_key = CONFIG_KEY_MAP.get(key.lower(), key)
238
+
239
+ # update env
240
+ os.environ[env_key] = value
241
+
242
+ # normalize types
243
+ if env_key in ["NPCSH_STREAM_OUTPUT", "NPCSH_BUILD_KG"]:
244
+ parsed_val = value.strip().lower() in ["1", "true", "yes"]
245
+ elif env_key.endswith("_PATH"):
246
+ parsed_val = os.path.expanduser(value)
247
+ else:
248
+ parsed_val = value
249
+
250
+ # update global
251
+ globals()[env_key] = parsed_val
252
+
253
+ # update ShellState defaults
254
+ field_map = {
255
+ "NPCSH_CHAT_MODEL": "chat_model",
256
+ "NPCSH_CHAT_PROVIDER": "chat_provider",
257
+ "NPCSH_VISION_MODEL": "vision_model",
258
+ "NPCSH_VISION_PROVIDER": "vision_provider",
259
+ "NPCSH_EMBEDDING_MODEL": "embedding_model",
260
+ "NPCSH_EMBEDDING_PROVIDER": "embedding_provider",
261
+ "NPCSH_REASONING_MODEL": "reasoning_model",
262
+ "NPCSH_REASONING_PROVIDER": "reasoning_provider",
263
+ "NPCSH_SEARCH_PROVIDER": "search_provider",
264
+ "NPCSH_IMAGE_GEN_MODEL": "image_gen_model",
265
+ "NPCSH_IMAGE_GEN_PROVIDER": "image_gen_provider",
266
+ "NPCSH_VIDEO_GEN_MODEL": "video_gen_model",
267
+ "NPCSH_VIDEO_GEN_PROVIDER": "video_gen_provider",
268
+ "NPCSH_DEFAULT_MODE": "current_mode",
269
+ "NPCSH_BUILD_KG": "build_kg",
270
+ "NPCSH_API_URL": "api_url",
271
+ "NPCSH_STREAM_OUTPUT": "stream_output",
272
+ }
273
+ if env_key in field_map:
274
+ setattr(ShellState, field_map[env_key], parsed_val)
28
275
  def get_npc_path(npc_name: str, db_path: str) -> str:
29
276
  project_npc_team_dir = os.path.abspath("./npc_team")
30
277
  project_npc_path = os.path.join(project_npc_team_dir, f"{npc_name}.npc")
@@ -513,7 +760,7 @@ interactive_commands = {
513
760
  }
514
761
 
515
762
 
516
- def start_interactive_session(command: list) -> int:
763
+ def start_interactive_session(command: str) -> int:
517
764
  """
518
765
  Starts an interactive session. Only works on Unix. On Windows, print a message and return 1.
519
766
  """
@@ -1015,41 +1262,6 @@ def get_setting_windows(key, default=None):
1015
1262
  config = read_rc_file_windows(get_npcshrc_path_windows())
1016
1263
  return config.get(key, default)
1017
1264
 
1018
- NPCSH_CHAT_MODEL = os.environ.get("NPCSH_CHAT_MODEL", "llama3.2")
1019
- # print("NPCSH_CHAT_MODEL", NPCSH_CHAT_MODEL)
1020
- NPCSH_CHAT_PROVIDER = os.environ.get("NPCSH_CHAT_PROVIDER", "ollama")
1021
- # print("NPCSH_CHAT_PROVIDER", NPCSH_CHAT_PROVIDER)
1022
- NPCSH_DB_PATH = os.path.expanduser(
1023
- os.environ.get("NPCSH_DB_PATH", "~/npcsh_history.db")
1024
- )
1025
- NPCSH_VECTOR_DB_PATH = os.path.expanduser(
1026
- os.environ.get("NPCSH_VECTOR_DB_PATH", "~/npcsh_chroma.db")
1027
- )
1028
- #DEFAULT MODES = ['CHAT', 'AGENT', 'CODE', ]
1029
-
1030
- NPCSH_DEFAULT_MODE = os.path.expanduser(os.environ.get("NPCSH_DEFAULT_MODE", "agent"))
1031
- NPCSH_VISION_MODEL = os.environ.get("NPCSH_VISION_MODEL", "llava:7b")
1032
- NPCSH_VISION_PROVIDER = os.environ.get("NPCSH_VISION_PROVIDER", "ollama")
1033
- NPCSH_IMAGE_GEN_MODEL = os.environ.get(
1034
- "NPCSH_IMAGE_GEN_MODEL", "runwayml/stable-diffusion-v1-5"
1035
- )
1036
- NPCSH_IMAGE_GEN_PROVIDER = os.environ.get("NPCSH_IMAGE_GEN_PROVIDER", "diffusers")
1037
- NPCSH_VIDEO_GEN_MODEL = os.environ.get(
1038
- "NPCSH_VIDEO_GEN_MODEL", "damo-vilab/text-to-video-ms-1.7b"
1039
- )
1040
- NPCSH_VIDEO_GEN_PROVIDER = os.environ.get("NPCSH_VIDEO_GEN_PROVIDER", "diffusers")
1041
-
1042
- NPCSH_EMBEDDING_MODEL = os.environ.get("NPCSH_EMBEDDING_MODEL", "nomic-embed-text")
1043
- NPCSH_EMBEDDING_PROVIDER = os.environ.get("NPCSH_EMBEDDING_PROVIDER", "ollama")
1044
- NPCSH_REASONING_MODEL = os.environ.get("NPCSH_REASONING_MODEL", "deepseek-r1")
1045
- NPCSH_REASONING_PROVIDER = os.environ.get("NPCSH_REASONING_PROVIDER", "ollama")
1046
- NPCSH_STREAM_OUTPUT = eval(os.environ.get("NPCSH_STREAM_OUTPUT", "0")) == 1
1047
- NPCSH_API_URL = os.environ.get("NPCSH_API_URL", None)
1048
- NPCSH_SEARCH_PROVIDER = os.environ.get("NPCSH_SEARCH_PROVIDER", "duckduckgo")
1049
- NPCSH_BUILD_KG = os.environ.get("NPCSH_BUILD_KG") == "1"
1050
- READLINE_HISTORY_FILE = os.path.expanduser("~/.npcsh_history")
1051
-
1052
-
1053
1265
 
1054
1266
  def setup_readline() -> str:
1055
1267
  import readline
@@ -1084,50 +1296,1301 @@ def save_readline_history():
1084
1296
 
1085
1297
 
1086
1298
 
1299
+ TERMINAL_EDITORS = ["vim", "emacs", "nano"]
1300
+ EMBEDDINGS_DB_PATH = os.path.expanduser("~/npcsh_chroma.db")
1301
+ HISTORY_DB_DEFAULT_PATH = os.path.expanduser("~/npcsh_history.db")
1302
+ READLINE_HISTORY_FILE = os.path.expanduser("~/.npcsh_readline_history")
1303
+ DEFAULT_NPC_TEAM_PATH = os.path.expanduser("~/.npcsh/npc_team/")
1304
+ PROJECT_NPC_TEAM_PATH = "./npc_team/"
1087
1305
 
1088
- @dataclass
1089
- class ShellState:
1090
- npc: Optional[Union[NPC, str]] = None
1091
- team: Optional[Team] = None
1092
- messages: List[Dict[str, Any]] = field(default_factory=list)
1093
- mcp_client: Optional[Any] = None
1094
- conversation_id: Optional[int] = None
1095
- chat_model: str = NPCSH_CHAT_MODEL
1096
- chat_provider: str = NPCSH_CHAT_PROVIDER
1097
- vision_model: str = NPCSH_VISION_MODEL
1098
- vision_provider: str = NPCSH_VISION_PROVIDER
1099
- embedding_model: str = NPCSH_EMBEDDING_MODEL
1100
- embedding_provider: str = NPCSH_EMBEDDING_PROVIDER
1101
- reasoning_model: str = NPCSH_REASONING_MODEL
1102
- reasoning_provider: str = NPCSH_REASONING_PROVIDER
1103
- search_provider: str = NPCSH_SEARCH_PROVIDER
1104
- image_gen_model: str = NPCSH_IMAGE_GEN_MODEL
1105
- image_gen_provider: str = NPCSH_IMAGE_GEN_PROVIDER
1106
- video_gen_model: str = NPCSH_VIDEO_GEN_MODEL
1107
- video_gen_provider: str = NPCSH_VIDEO_GEN_PROVIDER
1108
- current_mode: str = NPCSH_DEFAULT_MODE
1109
- build_kg: bool = NPCSH_BUILD_KG,
1110
- api_key: Optional[str] = None
1111
- api_url: Optional[str] = NPCSH_API_URL
1112
- current_path: str = field(default_factory=os.getcwd)
1113
- stream_output: bool = NPCSH_STREAM_OUTPUT
1114
- attachments: Optional[List[Any]] = None
1115
- turn_count: int =0
1116
- def get_model_for_command(self, model_type: str = "chat"):
1117
- if model_type == "chat":
1118
- return self.chat_model, self.chat_provider
1119
- elif model_type == "vision":
1120
- return self.vision_model, self.vision_provider
1121
- elif model_type == "embedding":
1122
- return self.embedding_model, self.embedding_provider
1123
- elif model_type == "reasoning":
1124
- return self.reasoning_model, self.reasoning_provider
1125
- elif model_type == "image_gen":
1126
- return self.image_gen_model, self.image_gen_provider
1127
- elif model_type == "video_gen":
1128
- return self.video_gen_model, self.video_gen_provider
1306
+ # --- Global Clients ---
1307
+ try:
1308
+ chroma_client = chromadb.PersistentClient(path=EMBEDDINGS_DB_PATH) if chromadb else None
1309
+ except Exception as e:
1310
+ print(f"Warning: Failed to initialize ChromaDB client at {EMBEDDINGS_DB_PATH}: {e}")
1311
+ chroma_client = None
1312
+
1313
+
1314
+
1315
+
1316
+ def get_path_executables() -> List[str]:
1317
+ """Get executables from PATH (cached for performance)"""
1318
+ if not hasattr(get_path_executables, '_cache'):
1319
+ executables = set()
1320
+ path_dirs = os.environ.get('PATH', '').split(os.pathsep)
1321
+ for path_dir in path_dirs:
1322
+ if os.path.isdir(path_dir):
1323
+ try:
1324
+ for item in os.listdir(path_dir):
1325
+ item_path = os.path.join(path_dir, item)
1326
+ if os.path.isfile(item_path) and os.access(item_path, os.X_OK):
1327
+ executables.add(item)
1328
+ except (PermissionError, OSError):
1329
+ continue
1330
+ get_path_executables._cache = sorted(list(executables))
1331
+ return get_path_executables._cache
1332
+
1333
+
1334
+ import logging
1335
+
1336
+ # Set up completion logger
1337
+ completion_logger = logging.getLogger('npcsh.completion')
1338
+ completion_logger.setLevel(logging.WARNING) # Default to WARNING (quiet)
1339
+
1340
+ # Add handler if not already present
1341
+ if not completion_logger.handlers:
1342
+ handler = logging.StreamHandler(sys.stderr)
1343
+ formatter = logging.Formatter('[%(name)s] %(message)s')
1344
+ handler.setFormatter(formatter)
1345
+ completion_logger.addHandler(handler)
1346
+
1347
+ def make_completer(shell_state: ShellState, router: Any):
1348
+ def complete(text: str, state_index: int) -> Optional[str]:
1349
+ """Main completion function"""
1350
+ try:
1351
+ buffer = readline.get_line_buffer()
1352
+ begidx = readline.get_begidx()
1353
+ endidx = readline.get_endidx()
1354
+
1355
+ completion_logger.debug(f"text='{text}', buffer='{buffer}', begidx={begidx}, endidx={endidx}, state_index={state_index}")
1356
+
1357
+ matches = []
1358
+
1359
+ # Check if we're completing a slash command
1360
+ if begidx > 0 and buffer[begidx-1] == '/':
1361
+ completion_logger.debug(f"Slash command completion - text='{text}'")
1362
+ slash_commands = get_slash_commands(shell_state, router)
1363
+ completion_logger.debug(f"Available slash commands: {slash_commands}")
1364
+
1365
+ if text == '':
1366
+ matches = [cmd[1:] for cmd in slash_commands]
1367
+ else:
1368
+ full_text = '/' + text
1369
+ matching_commands = [cmd for cmd in slash_commands if cmd.startswith(full_text)]
1370
+ matches = [cmd[1:] for cmd in matching_commands]
1371
+
1372
+ completion_logger.debug(f"Slash command matches: {matches}")
1373
+
1374
+ elif is_command_position(buffer, begidx):
1375
+ completion_logger.debug("Command position detected")
1376
+ bash_matches = [cmd for cmd in BASH_COMMANDS if cmd.startswith(text)]
1377
+ matches.extend(bash_matches)
1378
+
1379
+ interactive_matches = [cmd for cmd in interactive_commands.keys() if cmd.startswith(text)]
1380
+ matches.extend(interactive_matches)
1381
+
1382
+ if len(text) >= 1:
1383
+ path_executables = get_path_executables()
1384
+ exec_matches = [cmd for cmd in path_executables if cmd.startswith(text)]
1385
+ matches.extend(exec_matches[:20])
1386
+ else:
1387
+ completion_logger.debug("File completion")
1388
+ matches = get_file_completions(text)
1389
+
1390
+ matches = sorted(list(set(matches)))
1391
+ completion_logger.debug(f"Final matches: {matches}")
1392
+
1393
+ if state_index < len(matches):
1394
+ result = matches[state_index]
1395
+ completion_logger.debug(f"Returning: '{result}'")
1396
+ return result
1397
+ else:
1398
+ completion_logger.debug(f"No match for state_index {state_index}")
1399
+
1400
+ except Exception as e:
1401
+ completion_logger.error(f"Exception in completion: {e}")
1402
+ completion_logger.debug("Exception details:", exc_info=True)
1403
+
1404
+ return None
1405
+
1406
+ return complete
1407
+
1408
+ def get_slash_commands(state: ShellState, router: Any) -> List[str]:
1409
+ """Get available slash commands from the provided router and team"""
1410
+ commands = []
1411
+
1412
+ if router and hasattr(router, 'routes'):
1413
+ router_cmds = [f"/{cmd}" for cmd in router.routes.keys()]
1414
+ commands.extend(router_cmds)
1415
+ completion_logger.debug(f"Router commands: {router_cmds}")
1416
+
1417
+ # Team jinxs
1418
+ if state.team and hasattr(state.team, 'jinxs_dict'):
1419
+ jinx_cmds = [f"/{jinx}" for jinx in state.team.jinxs_dict.keys()]
1420
+ commands.extend(jinx_cmds)
1421
+ completion_logger.debug(f"Jinx commands: {jinx_cmds}")
1422
+
1423
+ # NPC names for switching
1424
+ if state.team and hasattr(state.team, 'npcs'):
1425
+ npc_cmds = [f"/{npc}" for npc in state.team.npcs.keys()]
1426
+ commands.extend(npc_cmds)
1427
+ completion_logger.debug(f"NPC commands: {npc_cmds}")
1428
+
1429
+ # Mode switching commands
1430
+ mode_cmds = ['/cmd', '/agent', '/chat']
1431
+ commands.extend(mode_cmds)
1432
+ completion_logger.debug(f"Mode commands: {mode_cmds}")
1433
+
1434
+ result = sorted(commands)
1435
+ completion_logger.debug(f"Final slash commands: {result}")
1436
+ return result
1437
+ def get_file_completions(text: str) -> List[str]:
1438
+ """Get file/directory completions"""
1439
+ try:
1440
+ if text.startswith('/'):
1441
+ basedir = os.path.dirname(text) or '/'
1442
+ prefix = os.path.basename(text)
1443
+ elif text.startswith('./') or text.startswith('../'):
1444
+ basedir = os.path.dirname(text) or '.'
1445
+ prefix = os.path.basename(text)
1129
1446
  else:
1130
- return self.chat_model, self.chat_provider # Default fallback
1447
+ basedir = '.'
1448
+ prefix = text
1449
+
1450
+ if not os.path.exists(basedir):
1451
+ return []
1452
+
1453
+ matches = []
1454
+ try:
1455
+ for item in os.listdir(basedir):
1456
+ if item.startswith(prefix):
1457
+ full_path = os.path.join(basedir, item)
1458
+ if basedir == '.':
1459
+ completion = item
1460
+ else:
1461
+ completion = os.path.join(basedir, item)
1462
+
1463
+ # Just return the name, let readline handle spacing/slashes
1464
+ matches.append(completion)
1465
+ except (PermissionError, OSError):
1466
+ pass
1467
+
1468
+ return sorted(matches)
1469
+ except Exception:
1470
+ return []
1471
+ def is_command_position(buffer: str, begidx: int) -> bool:
1472
+ """Determine if cursor is at a command position"""
1473
+ # Get the part of buffer before the current word
1474
+ before_word = buffer[:begidx]
1475
+
1476
+ # Split by command separators
1477
+ parts = re.split(r'[|;&]', before_word)
1478
+ current_command_part = parts[-1].strip()
1479
+
1480
+ # If there's nothing before the current word in this command part,
1481
+ # or only whitespace, we're at command position
1482
+ return len(current_command_part) == 0
1483
+
1484
+
1485
+ def readline_safe_prompt(prompt: str) -> str:
1486
+ ansi_escape = re.compile(r"(\033\[[0-9;]*[a-zA-Z])")
1487
+ return ansi_escape.sub(r"\001\1\002", prompt)
1488
+
1489
+ def print_jinxs(jinxs):
1490
+ output = "Available jinxs:\n"
1491
+ for jinx in jinxs:
1492
+ output += f" {jinx.jinx_name}\n"
1493
+ output += f" Description: {jinx.description}\n"
1494
+ output += f" Inputs: {jinx.inputs}\n"
1495
+ return output
1496
+
1497
+ def open_terminal_editor(command: str) -> str:
1498
+ try:
1499
+ os.system(command)
1500
+ return 'Terminal editor closed.'
1501
+ except Exception as e:
1502
+ return f"Error opening terminal editor: {e}"
1503
+
1504
+ def get_multiline_input(prompt: str) -> str:
1505
+ lines = []
1506
+ current_prompt = prompt
1507
+ while True:
1508
+ try:
1509
+ line = input(current_prompt)
1510
+ if line.endswith("\\"):
1511
+ lines.append(line[:-1])
1512
+ current_prompt = readline_safe_prompt("> ")
1513
+ else:
1514
+ lines.append(line)
1515
+ break
1516
+ except EOFError:
1517
+ print("Goodbye!")
1518
+ sys.exit(0)
1519
+ return "\n".join(lines)
1520
+
1521
+ def split_by_pipes(command: str) -> List[str]:
1522
+ parts = []
1523
+ current = ""
1524
+ in_single_quote = False
1525
+ in_double_quote = False
1526
+ escape = False
1527
+
1528
+ for char in command:
1529
+ if escape:
1530
+ current += char
1531
+ escape = False
1532
+ elif char == '\\':
1533
+ escape = True
1534
+ current += char
1535
+ elif char == "'" and not in_double_quote:
1536
+ in_single_quote = not in_single_quote
1537
+ current += char
1538
+ elif char == '"' and not in_single_quote:
1539
+ in_double_quote = not in_single_quote
1540
+ current += char
1541
+ elif char == '|' and not in_single_quote and not in_double_quote:
1542
+ parts.append(current.strip())
1543
+ current = ""
1544
+ else:
1545
+ current += char
1546
+
1547
+ if current:
1548
+ parts.append(current.strip())
1549
+ return parts
1550
+
1551
+ def parse_command_safely(cmd: str) -> List[str]:
1552
+ try:
1553
+ return shlex.split(cmd)
1554
+ except ValueError as e:
1555
+ if "No closing quotation" in str(e):
1556
+ if cmd.count('"') % 2 == 1:
1557
+ cmd += '"'
1558
+ elif cmd.count("'") % 2 == 1:
1559
+ cmd += "'"
1560
+ try:
1561
+ return shlex.split(cmd)
1562
+ except ValueError:
1563
+ return cmd.split()
1564
+ else:
1565
+ return cmd.split()
1566
+
1567
+ def get_file_color(filepath: str) -> tuple:
1568
+ if not os.path.exists(filepath):
1569
+ return "grey", []
1570
+ if os.path.isdir(filepath):
1571
+ return "blue", ["bold"]
1572
+ elif os.access(filepath, os.X_OK) and not os.path.isdir(filepath):
1573
+ return "green", ["bold"]
1574
+ elif filepath.endswith((".zip", ".tar", ".gz", ".bz2", ".xz", ".7z")):
1575
+ return "red", []
1576
+ elif filepath.endswith((".jpg", ".jpeg", ".png", ".gif", ".bmp", ".tiff")):
1577
+ return "magenta", []
1578
+ elif filepath.endswith((".py", ".pyw")):
1579
+ return "yellow", []
1580
+ elif filepath.endswith((".sh", ".bash", ".zsh")):
1581
+ return "green", []
1582
+ elif filepath.endswith((".c", ".cpp", ".h", ".hpp")):
1583
+ return "cyan", []
1584
+ elif filepath.endswith((".js", ".ts", ".jsx", ".tsx")):
1585
+ return "yellow", []
1586
+ elif filepath.endswith((".html", ".css", ".scss", ".sass")):
1587
+ return "magenta", []
1588
+ elif filepath.endswith((".md", ".txt", ".log")):
1589
+ return "white", []
1590
+ elif os.path.basename(filepath).startswith("."):
1591
+ return "cyan", []
1592
+ else:
1593
+ return "white", []
1594
+
1595
+ def format_file_listing(output: str) -> str:
1596
+ colored_lines = []
1597
+ current_dir = os.getcwd()
1598
+ for line in output.strip().split("\n"):
1599
+ parts = line.split()
1600
+ if not parts:
1601
+ colored_lines.append(line)
1602
+ continue
1603
+
1604
+ filepath_guess = parts[-1]
1605
+ potential_path = os.path.join(current_dir, filepath_guess)
1606
+
1607
+ color, attrs = get_file_color(potential_path)
1608
+ colored_filepath = colored(filepath_guess, color, attrs=attrs)
1609
+
1610
+ if len(parts) > 1 :
1611
+ # Handle cases like 'ls -l' where filename is last
1612
+ colored_line = " ".join(parts[:-1] + [colored_filepath])
1613
+ else:
1614
+ # Handle cases where line is just the filename
1615
+ colored_line = colored_filepath
1616
+
1617
+ colored_lines.append(colored_line)
1618
+
1619
+ return "\n".join(colored_lines)
1620
+
1621
+ def wrap_text(text: str, width: int = 80) -> str:
1622
+ lines = []
1623
+ for paragraph in text.split("\n"):
1624
+ if len(paragraph) > width:
1625
+ lines.extend(textwrap.wrap(paragraph, width=width, replace_whitespace=False, drop_whitespace=False))
1626
+ else:
1627
+ lines.append(paragraph)
1628
+ return "\n".join(lines)
1629
+
1630
+ # --- Readline Setup and Completion ---
1631
+
1632
+ def setup_readline() -> str:
1633
+ """Setup readline with history and completion"""
1634
+ try:
1635
+ readline.read_history_file(READLINE_HISTORY_FILE)
1636
+ readline.set_history_length(1000)
1637
+
1638
+ # Don't set completer here - it will be set in run_repl with state
1639
+ readline.parse_and_bind("tab: complete")
1640
+
1641
+ readline.parse_and_bind("set enable-bracketed-paste on")
1642
+ readline.parse_and_bind(r'"\C-r": reverse-search-history')
1643
+ readline.parse_and_bind(r'"\C-e": end-of-line')
1644
+ readline.parse_and_bind(r'"\C-a": beginning-of-line')
1645
+
1646
+ return READLINE_HISTORY_FILE
1647
+
1648
+ except FileNotFoundError:
1649
+ pass
1650
+ except OSError as e:
1651
+ print(f"Warning: Could not read readline history file {READLINE_HISTORY_FILE}: {e}")
1652
+
1653
+
1654
+ def save_readline_history():
1655
+ try:
1656
+ readline.write_history_file(READLINE_HISTORY_FILE)
1657
+ except OSError as e:
1658
+ print(f"Warning: Could not write readline history file {READLINE_HISTORY_FILE}: {e}")
1659
+
1660
+ def store_command_embeddings(command: str, output: Any, state: ShellState):
1661
+ if not chroma_client or not state.embedding_model or not state.embedding_provider:
1662
+ if not chroma_client: print("Warning: ChromaDB client not available for embeddings.", file=sys.stderr)
1663
+ return
1664
+ if not command and not output:
1665
+ return
1666
+
1667
+ try:
1668
+ output_str = str(output) if output else ""
1669
+ if not command and not output_str: return # Avoid empty embeddings
1670
+
1671
+ texts_to_embed = [command, output_str]
1672
+
1673
+ embeddings = get_embeddings(
1674
+ texts_to_embed,
1675
+ state.embedding_model,
1676
+ state.embedding_provider,
1677
+ )
1678
+
1679
+ if not embeddings or len(embeddings) != 2:
1680
+ print(f"Warning: Failed to generate embeddings for command: {command[:50]}...", file=sys.stderr)
1681
+ return
1682
+
1683
+ timestamp = datetime.now().isoformat()
1684
+ npc_name = state.npc.name if isinstance(state.npc, NPC) else state.npc
1685
+
1686
+ metadata = [
1687
+ {
1688
+ "type": "command", "timestamp": timestamp, "path": state.current_path,
1689
+ "npc": npc_name, "conversation_id": state.conversation_id,
1690
+ },
1691
+ {
1692
+ "type": "response", "timestamp": timestamp, "path": state.current_path,
1693
+ "npc": npc_name, "conversation_id": state.conversation_id,
1694
+ },
1695
+ ]
1696
+
1697
+ collection_name = f"{state.embedding_provider}_{state.embedding_model}_embeddings"
1698
+ try:
1699
+ collection = chroma_client.get_or_create_collection(collection_name)
1700
+ ids = [f"cmd_{timestamp}_{hash(command)}", f"resp_{timestamp}_{hash(output_str)}"]
1701
+
1702
+ collection.add(
1703
+ embeddings=embeddings,
1704
+ documents=texts_to_embed,
1705
+ metadatas=metadata,
1706
+ ids=ids,
1707
+ )
1708
+ except Exception as e:
1709
+ print(f"Warning: Failed to add embeddings to collection '{collection_name}': {e}", file=sys.stderr)
1710
+
1711
+ except Exception as e:
1712
+ print(f"Warning: Failed to store embeddings: {e}", file=sys.stderr)
1713
+
1714
+
1715
+ def handle_interactive_command(cmd_parts: List[str], state: ShellState) -> Tuple[ShellState, str]:
1716
+ command_name = cmd_parts[0]
1717
+ print(f"Starting interactive {command_name} session...")
1718
+ try:
1719
+ # CORRECTED: Join all parts into one string to pass to the function.
1720
+ full_command_str = " ".join(cmd_parts)
1721
+ return_code = start_interactive_session(full_command_str)
1722
+ output = f"Interactive {command_name} session ended with return code {return_code}"
1723
+ except Exception as e:
1724
+ output = f"Error starting interactive session {command_name}: {e}"
1725
+ return state, output
1726
+
1727
+ def handle_cd_command(cmd_parts: List[str], state: ShellState) -> Tuple[ShellState, str]:
1728
+ original_path = os.getcwd()
1729
+ target_path = cmd_parts[1] if len(cmd_parts) > 1 else os.path.expanduser("~")
1730
+ try:
1731
+ os.chdir(target_path)
1732
+ state.current_path = os.getcwd()
1733
+ output = f"Changed directory to {state.current_path}"
1734
+ except FileNotFoundError:
1735
+ output = colored(f"cd: no such file or directory: {target_path}", "red")
1736
+ except Exception as e:
1737
+ output = colored(f"cd: error changing directory: {e}", "red")
1738
+ os.chdir(original_path) # Revert if error
1739
+
1740
+ return state, output
1741
+
1742
+
1743
+ def handle_bash_command(
1744
+ cmd_parts: List[str],
1745
+ cmd_str: str,
1746
+ stdin_input: Optional[str],
1747
+ state: ShellState,
1748
+ ) -> Tuple[bool, str]:
1749
+ try:
1750
+ process = subprocess.Popen(
1751
+ cmd_parts,
1752
+ stdin=subprocess.PIPE if stdin_input is not None else None,
1753
+ stdout=subprocess.PIPE,
1754
+ stderr=subprocess.PIPE,
1755
+ text=True,
1756
+ cwd=state.current_path
1757
+ )
1758
+ stdout, stderr = process.communicate(input=stdin_input)
1759
+
1760
+ if process.returncode != 0:
1761
+ return False, stderr.strip() if stderr else f"Command '{cmd_str}' failed with return code {process.returncode}."
1762
+
1763
+ if stderr.strip():
1764
+ print(colored(f"stderr: {stderr.strip()}", "yellow"), file=sys.stderr)
1765
+
1766
+ if cmd_parts[0] in ["ls", "find", "dir"]:
1767
+ return True, format_file_listing(stdout.strip())
1768
+
1769
+ return True, stdout.strip()
1770
+
1771
+ except FileNotFoundError:
1772
+ return False, f"Command not found: {cmd_parts[0]}"
1773
+ except PermissionError:
1774
+ return False, f"Permission denied: {cmd_str}"
1775
+
1776
+ def _try_convert_type(value: str) -> Union[str, int, float, bool]:
1777
+ """Helper to convert string values to appropriate types."""
1778
+ if value.lower() in ['true', 'yes']:
1779
+ return True
1780
+ if value.lower() in ['false', 'no']:
1781
+ return False
1782
+ try:
1783
+ return int(value)
1784
+ except (ValueError, TypeError):
1785
+ pass
1786
+ try:
1787
+ return float(value)
1788
+ except (ValueError, TypeError):
1789
+ pass
1790
+ return value
1791
+
1792
+ def parse_generic_command_flags(parts: List[str]) -> Tuple[Dict[str, Any], List[str]]:
1793
+ """
1794
+ Parses a list of command parts into a dictionary of keyword arguments and a list of positional arguments.
1795
+ Handles: -f val, --flag val, --flag=val, flag=val, --boolean-flag
1796
+ """
1797
+ parsed_kwargs = {}
1798
+ positional_args = []
1799
+ i = 0
1800
+ while i < len(parts):
1801
+ part = parts[i]
1802
+
1803
+ if part.startswith('--'):
1804
+ key_part = part[2:]
1805
+ if '=' in key_part:
1806
+ key, value = key_part.split('=', 1)
1807
+ parsed_kwargs[key] = _try_convert_type(value)
1808
+ else:
1809
+ # Look ahead for a value
1810
+ if i + 1 < len(parts) and not parts[i + 1].startswith('-'):
1811
+ parsed_kwargs[key_part] = _try_convert_type(parts[i + 1])
1812
+ i += 1 # Consume the value
1813
+ else:
1814
+ parsed_kwargs[key_part] = True # Boolean flag
1815
+
1816
+ elif part.startswith('-'):
1817
+ key = part[1:]
1818
+ # Look ahead for a value
1819
+ if i + 1 < len(parts) and not parts[i + 1].startswith('-'):
1820
+ parsed_kwargs[key] = _try_convert_type(parts[i + 1])
1821
+ i += 1 # Consume the value
1822
+ else:
1823
+ parsed_kwargs[key] = True # Boolean flag
1824
+
1825
+ elif '=' in part and not part.startswith('-'):
1826
+ key, value = part.split('=', 1)
1827
+ parsed_kwargs[key] = _try_convert_type(value)
1828
+
1829
+ else:
1830
+ positional_args.append(part)
1831
+
1832
+ i += 1
1833
+
1834
+ return parsed_kwargs, positional_args
1835
+
1836
+
1837
+ def should_skip_kg_processing(user_input: str, assistant_output: str) -> bool:
1838
+ """Determine if this interaction is too trivial for KG processing"""
1839
+
1840
+ # Skip if user input is very short (less than 10 chars)
1841
+ if len(user_input.strip()) < 10:
1842
+ return True
1843
+
1844
+ simple_bash = {'ls', 'pwd', 'cd', 'mkdir', 'touch', 'rm', 'mv', 'cp'}
1845
+ first_word = user_input.strip().split()[0] if user_input.strip() else ""
1846
+ if first_word in simple_bash:
1847
+ return True
1848
+
1849
+ if len(assistant_output.strip()) < 20:
1850
+ return True
1851
+
1852
+ if "exiting" in assistant_output.lower() or "exited" in assistant_output.lower():
1853
+ return True
1854
+
1855
+ return False
1856
+
1857
+
1858
+
1859
+
1860
+ def execute_slash_command(command: str,
1861
+ stdin_input: Optional[str],
1862
+ state: ShellState,
1863
+ stream: bool,
1864
+ router) -> Tuple[ShellState, Any]:
1865
+ """Executes slash commands using the router or checking NPC/Team jinxs."""
1866
+ all_command_parts = shlex.split(command)
1867
+ command_name = all_command_parts[0].lstrip('/')
1868
+
1869
+ # Handle NPC switching commands
1870
+ if command_name in ['n', 'npc']:
1871
+ npc_to_switch_to = all_command_parts[1] if len(all_command_parts) > 1 else None
1872
+ if npc_to_switch_to and state.team and npc_to_switch_to in state.team.npcs:
1873
+ state.npc = state.team.npcs[npc_to_switch_to]
1874
+ return state, f"Switched to NPC: {npc_to_switch_to}"
1875
+ else:
1876
+ available_npcs = list(state.team.npcs.keys()) if state.team else []
1877
+ return state, colored(f"NPC '{npc_to_switch_to}' not found. Available NPCs: {', '.join(available_npcs)}", "red")
1878
+
1879
+ # Check router commands first
1880
+ handler = router.get_route(command_name)
1881
+ if handler:
1882
+ parsed_flags, positional_args = parse_generic_command_flags(all_command_parts[1:])
1883
+ normalized_flags = normalize_and_expand_flags(parsed_flags)
1884
+
1885
+ handler_kwargs = {
1886
+ 'stream': stream,
1887
+ 'team': state.team,
1888
+ 'messages': state.messages,
1889
+ 'api_url': state.api_url,
1890
+ 'api_key': state.api_key,
1891
+ 'stdin_input': stdin_input,
1892
+ 'positional_args': positional_args,
1893
+ 'plonk_context': state.team.shared_context.get('PLONK_CONTEXT') if state.team and hasattr(state.team, 'shared_context') else None,
1894
+
1895
+ # Default chat model/provider
1896
+ 'model': state.npc.model if isinstance(state.npc, NPC) and state.npc.model else state.chat_model,
1897
+ 'provider': state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else state.chat_provider,
1898
+ 'npc': state.npc,
1899
+
1900
+ # All other specific defaults
1901
+ 'sprovider': state.search_provider,
1902
+ 'emodel': state.embedding_model,
1903
+ 'eprovider': state.embedding_provider,
1904
+ 'igmodel': state.image_gen_model,
1905
+ 'igprovider': state.image_gen_provider,
1906
+ 'vgmodel': state.video_gen_model,
1907
+ 'vgprovider': state.video_gen_provider,
1908
+ 'vmodel': state.vision_model,
1909
+ 'vprovider': state.vision_provider,
1910
+ 'rmodel': state.reasoning_model,
1911
+ 'rprovider': state.reasoning_provider,
1912
+ }
1913
+
1914
+ if len(normalized_flags) > 0:
1915
+ kwarg_part = 'with kwargs: \n -' + '\n -'.join(f'{key}={item}' for key, item in normalized_flags.items())
1916
+ else:
1917
+ kwarg_part = ''
1918
+
1919
+ render_markdown(f'- Calling {command_name} handler {kwarg_part} ')
1920
+
1921
+ # Handle model/provider inference
1922
+ if 'model' in normalized_flags and 'provider' not in normalized_flags:
1923
+ inferred_provider = lookup_provider(normalized_flags['model'])
1924
+ if inferred_provider:
1925
+ handler_kwargs['provider'] = inferred_provider
1926
+ print(colored(f"Info: Inferred provider '{inferred_provider}' for model '{normalized_flags['model']}'.", "cyan"))
1927
+
1928
+ if 'provider' in normalized_flags and 'model' not in normalized_flags:
1929
+ current_provider = lookup_provider(handler_kwargs['model'])
1930
+ if current_provider != normalized_flags['provider']:
1931
+ prov = normalized_flags['provider']
1932
+ print(f'Please specify a model for the provider: {prov}')
1933
+
1934
+ handler_kwargs.update(normalized_flags)
1935
+
1936
+ try:
1937
+ result_dict = handler(command=command,
1938
+ **handler_kwargs)
1939
+ if isinstance(result_dict, dict):
1940
+ state.messages = result_dict.get("messages", state.messages)
1941
+ return state, result_dict
1942
+ else:
1943
+ return state, result_dict
1944
+ except Exception as e:
1945
+ import traceback
1946
+ print(f"Error executing slash command '{command_name}':", file=sys.stderr)
1947
+ traceback.print_exc()
1948
+ return state, colored(f"Error executing slash command '{command_name}': {e}", "red")
1949
+
1950
+ # Check for jinxs in active NPC
1951
+ active_npc = state.npc if isinstance(state.npc, NPC) else None
1952
+ jinx_to_execute = None
1953
+ executor = None
1954
+
1955
+ if active_npc and hasattr(active_npc, 'jinxs_dict') and command_name in active_npc.jinxs_dict:
1956
+ jinx_to_execute = active_npc.jinxs_dict[command_name]
1957
+ executor = active_npc
1958
+ elif state.team and hasattr(state.team, 'jinxs_dict') and command_name in state.team.jinxs_dict:
1959
+ jinx_to_execute = state.team.jinxs_dict[command_name]
1960
+ executor = state.team
1961
+ if jinx_to_execute:
1962
+ args = all_command_parts[1:] # Fix: use all_command_parts instead of command_parts
1963
+ try:
1964
+ # Create input dictionary from args based on jinx inputs
1965
+ input_values = {}
1966
+ if hasattr(jinx_to_execute, 'inputs') and jinx_to_execute.inputs:
1967
+ for i, input_name in enumerate(jinx_to_execute.inputs):
1968
+ if i < len(args):
1969
+ input_values[input_name] = args[i]
1970
+
1971
+ # Execute the jinx with proper parameters
1972
+ if isinstance(executor, NPC):
1973
+ jinx_output = jinx_to_execute.execute(
1974
+ input_values=input_values,
1975
+ jinxs_dict=executor.jinxs_dict if hasattr(executor, 'jinxs_dict') else {},
1976
+ npc=executor,
1977
+ messages=state.messages
1978
+ )
1979
+ else: # Team executor
1980
+ jinx_output = jinx_to_execute.execute(
1981
+ input_values=input_values,
1982
+ jinxs_dict=executor.jinxs_dict if hasattr(executor, 'jinxs_dict') else {},
1983
+ npc=active_npc or state.npc,
1984
+ messages=state.messages
1985
+ )
1986
+ if isinstance(jinx_output, dict) and 'messages' in jinx_output:
1987
+ state.messages = jinx_output['messages']
1988
+ return state, str(jinx_output.get('output', jinx_output))
1989
+ elif isinstance(jinx_output, dict):
1990
+ return state, str(jinx_output.get('output', jinx_output))
1991
+ else:
1992
+ return state, jinx_output
1993
+
1994
+ except Exception as e:
1995
+ import traceback
1996
+ print(f"Error executing jinx '{command_name}':", file=sys.stderr)
1997
+ traceback.print_exc()
1998
+ return state, colored(f"Error executing jinx '{command_name}': {e}", "red")
1999
+ if state.team and command_name in state.team.npcs:
2000
+ new_npc = state.team.npcs[command_name]
2001
+ state.npc = new_npc
2002
+ return state, f"Switched to NPC: {new_npc.name}"
2003
+
2004
+ return state, colored(f"Unknown slash command, jinx, or NPC: {command_name}", "red")
2005
+
2006
+ def process_pipeline_command(
2007
+ cmd_segment: str,
2008
+ stdin_input: Optional[str],
2009
+ state: ShellState,
2010
+ stream_final: bool,
2011
+ review = True,
2012
+ router = None,
2013
+ ) -> Tuple[ShellState, Any]:
2014
+ '''
2015
+ Processing command
2016
+ '''
2017
+
2018
+ if not cmd_segment:
2019
+ return state, stdin_input
2020
+
2021
+ available_models_all = get_locally_available_models(state.current_path)
2022
+ available_models_all_list = [item for key, item in available_models_all.items()]
2023
+
2024
+ model_override, provider_override, cmd_cleaned = get_model_and_provider(
2025
+ cmd_segment, available_models_all_list
2026
+ )
2027
+ cmd_to_process = cmd_cleaned.strip()
2028
+ if not cmd_to_process:
2029
+ return state, stdin_input
2030
+
2031
+ npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
2032
+ npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
2033
+
2034
+ exec_model = model_override or npc_model or state.chat_model
2035
+ exec_provider = provider_override or npc_provider or state.chat_provider
2036
+
2037
+ if cmd_to_process.startswith("/"):
2038
+ return execute_slash_command(cmd_to_process,
2039
+ stdin_input,
2040
+ state,
2041
+ stream_final,
2042
+ router)
2043
+
2044
+ cmd_parts = parse_command_safely(cmd_to_process)
2045
+ if not cmd_parts:
2046
+ return state, stdin_input
2047
+
2048
+ command_name = cmd_parts[0]
2049
+
2050
+ if command_name == "cd":
2051
+ return handle_cd_command(cmd_parts, state)
2052
+
2053
+ if command_name in interactive_commands:
2054
+ return handle_interactive_command(cmd_parts, state)
2055
+ if command_name in TERMINAL_EDITORS:
2056
+ print(f"Starting interactive editor: {command_name}...")
2057
+ full_command_str = " ".join(cmd_parts)
2058
+ output = open_terminal_editor(full_command_str)
2059
+ return state, output
2060
+
2061
+ if validate_bash_command(cmd_parts):
2062
+ success, result = handle_bash_command(cmd_parts, cmd_to_process, stdin_input, state)
2063
+ if success:
2064
+ return state, result
2065
+ else:
2066
+ print(colored(f"Bash command failed: {result}. Asking LLM for a fix...", "yellow"), file=sys.stderr)
2067
+ fixer_prompt = f"The command '{cmd_to_process}' failed with the error: '{result}'. Provide the correct command."
2068
+ response = execute_llm_command(
2069
+ fixer_prompt,
2070
+ model=exec_model,
2071
+ provider=exec_provider,
2072
+ npc=state.npc,
2073
+ stream=stream_final,
2074
+ messages=state.messages
2075
+ )
2076
+ state.messages = response['messages']
2077
+ return state, response['response']
2078
+ else:
2079
+ full_llm_cmd = f"{cmd_to_process} {stdin_input}" if stdin_input else cmd_to_process
2080
+ path_cmd = 'The current working directory is: ' + state.current_path
2081
+ ls_files = 'Files in the current directory (full paths):\n' + "\n".join([os.path.join(state.current_path, f) for f in os.listdir(state.current_path)]) if os.path.exists(state.current_path) else 'No files found in the current directory.'
2082
+ platform_info = f"Platform: {platform.system()} {platform.release()} ({platform.machine()})"
2083
+ info = path_cmd + '\n' + ls_files + '\n' + platform_info + '\n'
2084
+ state.messages.append({'role':'user', 'content':full_llm_cmd})
2085
+
2086
+
2087
+ llm_result = check_llm_command(
2088
+ full_llm_cmd,
2089
+ model=exec_model,
2090
+ provider=exec_provider,
2091
+ api_url=state.api_url,
2092
+ api_key=state.api_key,
2093
+ npc=state.npc,
2094
+ team=state.team,
2095
+ messages=state.messages,
2096
+ images=state.attachments,
2097
+ stream=stream_final,
2098
+ context=info,
2099
+ )
2100
+ #
2101
+
2102
+ if not review:
2103
+ if isinstance(llm_result, dict):
2104
+ state.messages = llm_result.get("messages", state.messages)
2105
+ output = llm_result.get("output")
2106
+ return state, output
2107
+ else:
2108
+ return state, llm_result
2109
+
2110
+ else:
2111
+ return review_and_iterate_command(
2112
+ original_command=full_llm_cmd,
2113
+ initial_result=llm_result,
2114
+ state=state,
2115
+ exec_model=exec_model,
2116
+ exec_provider=exec_provider,
2117
+ stream_final=stream_final,
2118
+ info=info
2119
+ )
2120
+ def review_and_iterate_command(
2121
+ original_command: str,
2122
+ initial_result: Any,
2123
+ state: ShellState,
2124
+ exec_model: str,
2125
+ exec_provider: str,
2126
+ stream_final: bool,
2127
+ info: str,
2128
+ max_iterations: int = 2
2129
+ ) -> Tuple[ShellState, Any]:
2130
+ """
2131
+ Simple iteration on LLM command result to improve quality.
2132
+ """
2133
+
2134
+ # Extract current state
2135
+ if isinstance(initial_result, dict):
2136
+ current_output = initial_result.get("output")
2137
+ current_messages = initial_result.get("messages", state.messages)
2138
+ else:
2139
+ current_output = initial_result
2140
+ current_messages = state.messages
2141
+
2142
+ # Simple refinement prompt
2143
+ refinement_prompt = f"""
2144
+ The previous response to "{original_command}" was:
2145
+ {current_output}
2146
+
2147
+ Please review and improve this response if needed. Provide a better, more complete answer.
2148
+ """
2149
+
2150
+ # Iterate with check_llm_command
2151
+ refined_result = check_llm_command(
2152
+ refinement_prompt,
2153
+ model=exec_model,
2154
+ provider=exec_provider,
2155
+ api_url=state.api_url,
2156
+ api_key=state.api_key,
2157
+ npc=state.npc,
2158
+ team=state.team,
2159
+ messages=current_messages,
2160
+ images=state.attachments,
2161
+ stream=stream_final,
2162
+ context=info,
2163
+ )
2164
+
2165
+ # Update state and return
2166
+ if isinstance(refined_result, dict):
2167
+ state.messages = refined_result.get("messages", current_messages)
2168
+ return state, refined_result.get("output", current_output)
2169
+ else:
2170
+ state.messages = current_messages
2171
+ return state, refined_result
2172
+ def check_mode_switch(command:str , state: ShellState):
2173
+ if command in ['/cmd', '/agent', '/chat',]:
2174
+ state.current_mode = command[1:]
2175
+ return True, state
2176
+ return False, state
2177
+
2178
+ def execute_command(
2179
+ command: str,
2180
+ state: ShellState,
2181
+ review = True,
2182
+ router = None,
2183
+ ) -> Tuple[ShellState, Any]:
2184
+
2185
+ if not command.strip():
2186
+ return state, ""
2187
+ mode_change, state = check_mode_switch(command, state)
2188
+ if mode_change:
2189
+ return state, 'Mode changed.'
2190
+
2191
+ original_command_for_embedding = command
2192
+ commands = split_by_pipes(command)
2193
+ stdin_for_next = None
2194
+ final_output = None
2195
+ current_state = state
2196
+ npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
2197
+ npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
2198
+ active_model = npc_model or state.chat_model
2199
+ active_provider = npc_provider or state.chat_provider
2200
+ if state.current_mode == 'agent':
2201
+ print('# of parsed commands: ', len(commands))
2202
+ print('Commands:' '\n'.join(commands))
2203
+ for i, cmd_segment in enumerate(commands):
2204
+ render_markdown(f'- executing command {i+1}/{len(commands)}')
2205
+ is_last_command = (i == len(commands) - 1)
2206
+ stream_this_segment = state.stream_output and is_last_command
2207
+ try:
2208
+ current_state, output = process_pipeline_command(
2209
+ cmd_segment.strip(),
2210
+ stdin_for_next,
2211
+ current_state,
2212
+ stream_final=stream_this_segment,
2213
+ review=review,
2214
+ router= router
2215
+ )
2216
+ if is_last_command:
2217
+ return current_state, output
2218
+ if isinstance(output, str):
2219
+ stdin_for_next = output
2220
+ elif not isinstance(output, str):
2221
+ try:
2222
+ if stream_this_segment:
2223
+ full_stream_output = print_and_process_stream_with_markdown(output,
2224
+ state.npc.model,
2225
+ state.npc.provider, show=True)
2226
+ stdin_for_next = full_stream_output
2227
+ if is_last_command:
2228
+ final_output = full_stream_output
2229
+ except:
2230
+ if output is not None: # Try converting other types to string
2231
+ try:
2232
+ stdin_for_next = str(output)
2233
+ except Exception:
2234
+ print(f"Warning: Cannot convert output to string for piping: {type(output)}", file=sys.stderr)
2235
+ stdin_for_next = None
2236
+ else: # Output was None
2237
+ stdin_for_next = None
2238
+ except Exception as pipeline_error:
2239
+ import traceback
2240
+ traceback.print_exc()
2241
+ error_msg = colored(f"Error in pipeline stage {i+1} ('{cmd_segment[:50]}...'): {pipeline_error}", "red")
2242
+ return current_state, error_msg
2243
+
2244
+ if final_output is not None and isinstance(final_output,str):
2245
+ store_command_embeddings(original_command_for_embedding, final_output, current_state)
2246
+
2247
+ return current_state, final_output
2248
+
2249
+
2250
+ elif state.current_mode == 'chat':
2251
+ # Only treat as bash if it looks like a shell command (starts with known command or is a slash command)
2252
+ cmd_parts = parse_command_safely(command)
2253
+ is_probably_bash = (
2254
+ cmd_parts
2255
+ and (
2256
+ cmd_parts[0] in interactive_commands
2257
+ or cmd_parts[0] in BASH_COMMANDS
2258
+ or command.strip().startswith("./")
2259
+ or command.strip().startswith("/")
2260
+ )
2261
+ )
2262
+ if is_probably_bash:
2263
+ try:
2264
+ command_name = cmd_parts[0]
2265
+ if command_name in interactive_commands:
2266
+ return handle_interactive_command(cmd_parts, state)
2267
+ elif command_name == "cd":
2268
+ return handle_cd_command(cmd_parts, state)
2269
+ else:
2270
+ try:
2271
+ bash_state, bash_output = handle_bash_command(cmd_parts, command, None, state)
2272
+ return state, bash_output
2273
+ except Exception as bash_err:
2274
+ return state, colored(f"Bash execution failed: {bash_err}", "red")
2275
+ except Exception:
2276
+ pass # Fall through to LLM
2277
+
2278
+ # Otherwise, treat as chat (LLM)
2279
+ response = get_llm_response(
2280
+ command,
2281
+ model=active_model,
2282
+ provider=active_provider,
2283
+ npc=state.npc,
2284
+ stream=state.stream_output,
2285
+ messages=state.messages
2286
+ )
2287
+ state.messages = response['messages']
2288
+ return state, response['response']
2289
+
2290
+ elif state.current_mode == 'cmd':
2291
+
2292
+ response = execute_llm_command(command,
2293
+ model=active_model,
2294
+ provider=active_provider,
2295
+ npc = state.npc,
2296
+ stream = state.stream_output,
2297
+ messages = state.messages)
2298
+ state.messages = response['messages']
2299
+ return state, response['response']
2300
+
2301
+ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
2302
+
2303
+ setup_npcsh_config()
2304
+
2305
+ db_path = os.getenv("NPCSH_DB_PATH", HISTORY_DB_DEFAULT_PATH)
2306
+ db_path = os.path.expanduser(db_path)
2307
+ os.makedirs(os.path.dirname(db_path), exist_ok=True)
2308
+ command_history = CommandHistory(db_path)
2309
+
2310
+
2311
+ if not is_npcsh_initialized():
2312
+ print("Initializing NPCSH...")
2313
+ initialize_base_npcs_if_needed(db_path)
2314
+ print("NPCSH initialization complete. Restart or source ~/.npcshrc.")
2315
+
2316
+
2317
+
2318
+ try:
2319
+ history_file = setup_readline()
2320
+ atexit.register(save_readline_history)
2321
+ atexit.register(command_history.close)
2322
+ except:
2323
+ pass
2324
+
2325
+ project_team_path = os.path.abspath(PROJECT_NPC_TEAM_PATH)
2326
+ global_team_path = os.path.expanduser(DEFAULT_NPC_TEAM_PATH)
2327
+ team_dir = None
2328
+ default_forenpc_name = None
2329
+
2330
+ if os.path.exists(project_team_path):
2331
+ team_dir = project_team_path
2332
+ default_forenpc_name = "forenpc"
2333
+ else:
2334
+ if not os.path.exists('.npcsh_global'):
2335
+ resp = input(f"No npc_team found in {os.getcwd()}. Create a new team here? [Y/n]: ").strip().lower()
2336
+ if resp in ("", "y", "yes"):
2337
+ team_dir = project_team_path
2338
+ os.makedirs(team_dir, exist_ok=True)
2339
+ default_forenpc_name = "forenpc"
2340
+ forenpc_directive = input(
2341
+ f"Enter a primary directive for {default_forenpc_name} (default: 'You are the forenpc of the team...'): "
2342
+ ).strip() or "You are the forenpc of the team, coordinating activities between NPCs on the team, verifying that results from NPCs are high quality and can help to adequately answer user requests."
2343
+ forenpc_model = input("Enter a model for your forenpc (default: llama3.2): ").strip() or "llama3.2"
2344
+ forenpc_provider = input("Enter a provider for your forenpc (default: ollama): ").strip() or "ollama"
2345
+
2346
+ with open(os.path.join(team_dir, f"{default_forenpc_name}.npc"), "w") as f:
2347
+ yaml.dump({
2348
+ "name": default_forenpc_name, "primary_directive": forenpc_directive,
2349
+ "model": forenpc_model, "provider": forenpc_provider
2350
+ }, f)
2351
+
2352
+ ctx_path = os.path.join(team_dir, "team.ctx")
2353
+ folder_context = input("Enter a short description for this project/team (optional): ").strip()
2354
+ team_ctx_data = {
2355
+ "forenpc": default_forenpc_name, "model": forenpc_model,
2356
+ "provider": forenpc_provider, "api_key": None, "api_url": None,
2357
+ "context": folder_context if folder_context else None
2358
+ }
2359
+ use_jinxs = input("Use global jinxs folder (g) or copy to this project (c)? [g/c, default: g]: ").strip().lower()
2360
+ if use_jinxs == "c":
2361
+ global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
2362
+ if os.path.exists(global_jinxs_dir):
2363
+ shutil.copytree(global_jinxs_dir, team_dir, dirs_exist_ok=True)
2364
+ else:
2365
+ team_ctx_data["use_global_jinxs"] = True
2366
+
2367
+ with open(ctx_path, "w") as f:
2368
+ yaml.dump(team_ctx_data, f)
2369
+ else:
2370
+ render_markdown('From now on, npcsh will assume you will use the global team when activating from this folder. \n If you change your mind and want to initialize a team, use /init from within npcsh, `npc init` or `rm .npcsh_global` from the current working directory.')
2371
+ with open(".npcsh_global", "w") as f:
2372
+ pass
2373
+ team_dir = global_team_path
2374
+ default_forenpc_name = "sibiji"
2375
+ elif os.path.exists(global_team_path):
2376
+ team_dir = global_team_path
2377
+ default_forenpc_name = "sibiji"
2378
+
2379
+
2380
+ team_ctx = {}
2381
+ for filename in os.listdir(team_dir):
2382
+ if filename.endswith(".ctx"):
2383
+ try:
2384
+ with open(os.path.join(team_dir, filename), "r") as f:
2385
+ team_ctx = yaml.safe_load(f) or {}
2386
+ break
2387
+ except Exception as e:
2388
+ print(f"Warning: Could not load context file {filename}: {e}")
2389
+
2390
+ forenpc_name = team_ctx.get("forenpc", default_forenpc_name)
2391
+ #render_markdown(f"- Using forenpc: {forenpc_name}")
2392
+
2393
+ if team_ctx.get("use_global_jinxs", False):
2394
+ jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
2395
+ else:
2396
+ jinxs_dir = os.path.join(team_dir, "jinxs")
2397
+
2398
+ jinxs_list = load_jinxs_from_directory(jinxs_dir)
2399
+ jinxs_dict = {jinx.jinx_name: jinx for jinx in jinxs_list}
2400
+
2401
+ forenpc_obj = None
2402
+ forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
2403
+
2404
+
2405
+ #render_markdown('- Loaded team context'+ json.dumps(team_ctx, indent=2))
2406
+
2407
+
2408
+
2409
+ if os.path.exists(forenpc_path):
2410
+ forenpc_obj = NPC(file = forenpc_path,
2411
+ jinxs=jinxs_list,
2412
+ db_conn=command_history.engine)
2413
+ if forenpc_obj.model is None:
2414
+ forenpc_obj.model= team_ctx.get("model", initial_state.chat_model)
2415
+ if forenpc_obj.provider is None:
2416
+ forenpc_obj.provider=team_ctx.get('provider', initial_state.chat_provider)
2417
+
2418
+ else:
2419
+ print(f"Warning: Forenpc file '{forenpc_name}.npc' not found in {team_dir}.")
2420
+
2421
+ team = Team(team_path=team_dir,
2422
+ forenpc=forenpc_obj,
2423
+ jinxs=jinxs_dict)
2424
+
2425
+ for npc_name, npc_obj in team.npcs.items():
2426
+ if not npc_obj.model:
2427
+ npc_obj.model = initial_state.chat_model
2428
+ if not npc_obj.provider:
2429
+ npc_obj.provider = initial_state.chat_provider
2430
+
2431
+ # Also apply to the forenpc specifically
2432
+ if team.forenpc and isinstance(team.forenpc, NPC):
2433
+ if not team.forenpc.model:
2434
+ team.forenpc.model = initial_state.chat_model
2435
+ if not team.forenpc.provider:
2436
+ team.forenpc.provider = initial_state.chat_provider
2437
+ team_name_from_ctx = team_ctx.get("name")
2438
+ if team_name_from_ctx:
2439
+ team.name = team_name_from_ctx
2440
+ elif team_dir and os.path.basename(team_dir) != 'npc_team':
2441
+ team.name = os.path.basename(team_dir)
2442
+ else:
2443
+ team.name = "global_team" # fallback for ~/.npcsh/npc_team
2444
+
2445
+ return command_history, team, forenpc_obj
2446
+
2447
+
2448
+
2449
+ def process_result(
2450
+ user_input: str,
2451
+ result_state: ShellState,
2452
+ output: Any,
2453
+ command_history: CommandHistory,
2454
+
2455
+ ):
2456
+
2457
+ team_name = result_state.team.name if result_state.team else "__none__"
2458
+ npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "__none__"
2459
+
2460
+ # Determine the actual NPC object to use for this turn's operations
2461
+ active_npc = result_state.npc if isinstance(result_state.npc, NPC) else NPC(
2462
+ name="default",
2463
+ model=result_state.chat_model,
2464
+ provider=result_state.chat_provider,
2465
+ db_conn=command_history.engine)
2466
+ save_conversation_message(
2467
+ command_history,
2468
+ result_state.conversation_id,
2469
+ "user",
2470
+ user_input,
2471
+ wd=result_state.current_path,
2472
+ model=active_npc.model,
2473
+ provider=active_npc.provider,
2474
+ npc=npc_name,
2475
+ team=team_name,
2476
+ attachments=result_state.attachments,
2477
+ )
2478
+ result_state.attachments = None
2479
+
2480
+ final_output_str = None
2481
+ output_content = output.get('output') if isinstance(output, dict) else output
2482
+ model_for_stream = output.get('model', active_npc.model) if isinstance(output, dict) else active_npc.model
2483
+ provider_for_stream = output.get('provider', active_npc.provider) if isinstance(output, dict) else active_npc.provider
2484
+
2485
+ print('\n')
2486
+ if user_input =='/help':
2487
+ render_markdown(output.get('output'))
2488
+ elif result_state.stream_output:
2489
+
2490
+
2491
+ final_output_str = print_and_process_stream_with_markdown(output_content,
2492
+ model_for_stream,
2493
+ provider_for_stream,
2494
+ show=True)
2495
+
2496
+ elif output_content is not None:
2497
+ final_output_str = str(output_content)
2498
+ render_markdown(final_output_str)
2499
+
2500
+ if final_output_str:
2501
+ if result_state.messages:
2502
+ if result_state.messages[-1].get("role") != "assistant":
2503
+ result_state.messages.append({"role": "assistant",
2504
+ "content": final_output_str})
2505
+ save_conversation_message(
2506
+ command_history,
2507
+ result_state.conversation_id,
2508
+ "assistant",
2509
+ final_output_str,
2510
+ wd=result_state.current_path,
2511
+ model=active_npc.model,
2512
+ provider=active_npc.provider,
2513
+ npc=npc_name,
2514
+ team=team_name,
2515
+ )
2516
+
2517
+ conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
2518
+ engine = command_history.engine
2519
+
2520
+
2521
+ if result_state.build_kg:
2522
+ import pdb
2523
+ pdb.set_trace()
2524
+ try:
2525
+ if not should_skip_kg_processing(user_input, final_output_str):
2526
+
2527
+ npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
2528
+ evolved_npc_kg, _ = kg_evolve_incremental(
2529
+ existing_kg=npc_kg,
2530
+ new_content_text=conversation_turn_text,
2531
+ model=active_npc.model,
2532
+ provider=active_npc.provider,
2533
+ get_concepts=True,
2534
+ link_concepts_facts = False,
2535
+ link_concepts_concepts = False,
2536
+ link_facts_facts = False,
2537
+
2538
+
2539
+ )
2540
+ save_kg_to_db(engine,
2541
+ evolved_npc_kg,
2542
+ team_name,
2543
+ npc_name,
2544
+ result_state.current_path)
2545
+ except Exception as e:
2546
+ print(colored(f"Error during real-time KG evolution: {e}", "red"))
2547
+
2548
+ # --- Part 3: Periodic Team Context Suggestions ---
2549
+ result_state.turn_count += 1
2550
+
2551
+ if result_state.turn_count > 0 and result_state.turn_count % 10 == 0:
2552
+ print(colored("\nChecking for potential team improvements...", "cyan"))
2553
+ try:
2554
+ summary = breathe(messages=result_state.messages[-20:],
2555
+ npc=active_npc)
2556
+ characterization = summary.get('output')
2557
+
2558
+ if characterization and result_state.team:
2559
+ team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
2560
+ ctx_data = {}
2561
+ if os.path.exists(team_ctx_path):
2562
+ with open(team_ctx_path, 'r') as f:
2563
+ ctx_data = yaml.safe_load(f) or {}
2564
+ current_context = ctx_data.get('context', '')
2565
+
2566
+ prompt = f"""Based on this characterization: {characterization},
2567
+
2568
+ suggest changes (additions, deletions, edits) to the team's context.
2569
+ Additions need not be fully formed sentences and can simply be equations, relationships, or other plain clear items.
2570
+
2571
+ Current Context: "{current_context}".
2572
+
2573
+ Respond with JSON: {{"suggestion": "Your sentence."
2574
+ }}"""
2575
+ response = get_llm_response(prompt, npc=active_npc, format="json")
2576
+ suggestion = response.get("response", {}).get("suggestion")
2577
+
2578
+ if suggestion:
2579
+ new_context = (current_context + " " + suggestion).strip()
2580
+ print(colored(f"{result_state.npc.name} suggests updating team context:", "yellow"))
2581
+ print(f" - OLD: {current_context}\n + NEW: {new_context}")
2582
+ if input("Apply? [y/N]: ").strip().lower() == 'y':
2583
+ ctx_data['context'] = new_context
2584
+ with open(team_ctx_path, 'w') as f:
2585
+ yaml.dump(ctx_data, f)
2586
+ print(colored("Team context updated.", "green"))
2587
+ else:
2588
+ print("Suggestion declined.")
2589
+ except Exception as e:
2590
+ import traceback
2591
+ print(colored(f"Could not generate team suggestions: {e}", "yellow"))
2592
+ traceback.print_exc()
2593
+
1131
2594
  initial_state = ShellState(
1132
2595
  conversation_id=start_new_conversation(),
1133
2596
  stream_output=NPCSH_STREAM_OUTPUT,