npcsh 0.1.2__py3-none-any.whl → 1.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (143) hide show
  1. npcsh/_state.py +3508 -0
  2. npcsh/alicanto.py +65 -0
  3. npcsh/build.py +291 -0
  4. npcsh/completion.py +206 -0
  5. npcsh/config.py +163 -0
  6. npcsh/corca.py +50 -0
  7. npcsh/execution.py +185 -0
  8. npcsh/guac.py +46 -0
  9. npcsh/mcp_helpers.py +357 -0
  10. npcsh/mcp_server.py +299 -0
  11. npcsh/npc.py +323 -0
  12. npcsh/npc_team/alicanto.npc +2 -0
  13. npcsh/npc_team/alicanto.png +0 -0
  14. npcsh/npc_team/corca.npc +12 -0
  15. npcsh/npc_team/corca.png +0 -0
  16. npcsh/npc_team/corca_example.png +0 -0
  17. npcsh/npc_team/foreman.npc +7 -0
  18. npcsh/npc_team/frederic.npc +6 -0
  19. npcsh/npc_team/frederic4.png +0 -0
  20. npcsh/npc_team/guac.png +0 -0
  21. npcsh/npc_team/jinxs/code/python.jinx +11 -0
  22. npcsh/npc_team/jinxs/code/sh.jinx +34 -0
  23. npcsh/npc_team/jinxs/code/sql.jinx +16 -0
  24. npcsh/npc_team/jinxs/modes/alicanto.jinx +194 -0
  25. npcsh/npc_team/jinxs/modes/corca.jinx +249 -0
  26. npcsh/npc_team/jinxs/modes/guac.jinx +317 -0
  27. npcsh/npc_team/jinxs/modes/plonk.jinx +214 -0
  28. npcsh/npc_team/jinxs/modes/pti.jinx +170 -0
  29. npcsh/npc_team/jinxs/modes/spool.jinx +161 -0
  30. npcsh/npc_team/jinxs/modes/wander.jinx +186 -0
  31. npcsh/npc_team/jinxs/modes/yap.jinx +262 -0
  32. npcsh/npc_team/jinxs/npc_studio/npc-studio.jinx +77 -0
  33. npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
  34. npcsh/npc_team/jinxs/utils/chat.jinx +44 -0
  35. npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
  36. npcsh/npc_team/jinxs/utils/compress.jinx +140 -0
  37. npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
  38. npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
  39. npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
  40. npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
  41. npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
  42. npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
  43. npcsh/npc_team/jinxs/utils/edit_file.jinx +94 -0
  44. npcsh/npc_team/jinxs/utils/load_file.jinx +35 -0
  45. npcsh/npc_team/jinxs/utils/ots.jinx +61 -0
  46. npcsh/npc_team/jinxs/utils/roll.jinx +68 -0
  47. npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
  48. npcsh/npc_team/jinxs/utils/search.jinx +130 -0
  49. npcsh/npc_team/jinxs/utils/serve.jinx +26 -0
  50. npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
  51. npcsh/npc_team/jinxs/utils/trigger.jinx +61 -0
  52. npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
  53. npcsh/npc_team/jinxs/utils/vixynt.jinx +144 -0
  54. npcsh/npc_team/kadiefa.npc +3 -0
  55. npcsh/npc_team/kadiefa.png +0 -0
  56. npcsh/npc_team/npcsh.ctx +18 -0
  57. npcsh/npc_team/npcsh_sibiji.png +0 -0
  58. npcsh/npc_team/plonk.npc +2 -0
  59. npcsh/npc_team/plonk.png +0 -0
  60. npcsh/npc_team/plonkjr.npc +2 -0
  61. npcsh/npc_team/plonkjr.png +0 -0
  62. npcsh/npc_team/sibiji.npc +3 -0
  63. npcsh/npc_team/sibiji.png +0 -0
  64. npcsh/npc_team/spool.png +0 -0
  65. npcsh/npc_team/yap.png +0 -0
  66. npcsh/npcsh.py +296 -112
  67. npcsh/parsing.py +118 -0
  68. npcsh/plonk.py +54 -0
  69. npcsh/pti.py +54 -0
  70. npcsh/routes.py +139 -0
  71. npcsh/spool.py +48 -0
  72. npcsh/ui.py +199 -0
  73. npcsh/wander.py +62 -0
  74. npcsh/yap.py +50 -0
  75. npcsh-1.1.13.data/data/npcsh/npc_team/agent.jinx +17 -0
  76. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.jinx +194 -0
  77. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.npc +2 -0
  78. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.png +0 -0
  79. npcsh-1.1.13.data/data/npcsh/npc_team/build.jinx +65 -0
  80. npcsh-1.1.13.data/data/npcsh/npc_team/chat.jinx +44 -0
  81. npcsh-1.1.13.data/data/npcsh/npc_team/cmd.jinx +44 -0
  82. npcsh-1.1.13.data/data/npcsh/npc_team/compile.jinx +50 -0
  83. npcsh-1.1.13.data/data/npcsh/npc_team/compress.jinx +140 -0
  84. npcsh-1.1.13.data/data/npcsh/npc_team/corca.jinx +249 -0
  85. npcsh-1.1.13.data/data/npcsh/npc_team/corca.npc +12 -0
  86. npcsh-1.1.13.data/data/npcsh/npc_team/corca.png +0 -0
  87. npcsh-1.1.13.data/data/npcsh/npc_team/corca_example.png +0 -0
  88. npcsh-1.1.13.data/data/npcsh/npc_team/edit_file.jinx +94 -0
  89. npcsh-1.1.13.data/data/npcsh/npc_team/foreman.npc +7 -0
  90. npcsh-1.1.13.data/data/npcsh/npc_team/frederic.npc +6 -0
  91. npcsh-1.1.13.data/data/npcsh/npc_team/frederic4.png +0 -0
  92. npcsh-1.1.13.data/data/npcsh/npc_team/guac.jinx +317 -0
  93. npcsh-1.1.13.data/data/npcsh/npc_team/guac.png +0 -0
  94. npcsh-1.1.13.data/data/npcsh/npc_team/help.jinx +52 -0
  95. npcsh-1.1.13.data/data/npcsh/npc_team/init.jinx +41 -0
  96. npcsh-1.1.13.data/data/npcsh/npc_team/jinxs.jinx +32 -0
  97. npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.npc +3 -0
  98. npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.png +0 -0
  99. npcsh-1.1.13.data/data/npcsh/npc_team/load_file.jinx +35 -0
  100. npcsh-1.1.13.data/data/npcsh/npc_team/npc-studio.jinx +77 -0
  101. npcsh-1.1.13.data/data/npcsh/npc_team/npcsh.ctx +18 -0
  102. npcsh-1.1.13.data/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  103. npcsh-1.1.13.data/data/npcsh/npc_team/ots.jinx +61 -0
  104. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.jinx +214 -0
  105. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.npc +2 -0
  106. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.png +0 -0
  107. npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.npc +2 -0
  108. npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.png +0 -0
  109. npcsh-1.1.13.data/data/npcsh/npc_team/pti.jinx +170 -0
  110. npcsh-1.1.13.data/data/npcsh/npc_team/python.jinx +11 -0
  111. npcsh-1.1.13.data/data/npcsh/npc_team/roll.jinx +68 -0
  112. npcsh-1.1.13.data/data/npcsh/npc_team/sample.jinx +56 -0
  113. npcsh-1.1.13.data/data/npcsh/npc_team/search.jinx +130 -0
  114. npcsh-1.1.13.data/data/npcsh/npc_team/serve.jinx +26 -0
  115. npcsh-1.1.13.data/data/npcsh/npc_team/set.jinx +40 -0
  116. npcsh-1.1.13.data/data/npcsh/npc_team/sh.jinx +34 -0
  117. npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.npc +3 -0
  118. npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.png +0 -0
  119. npcsh-1.1.13.data/data/npcsh/npc_team/sleep.jinx +116 -0
  120. npcsh-1.1.13.data/data/npcsh/npc_team/spool.jinx +161 -0
  121. npcsh-1.1.13.data/data/npcsh/npc_team/spool.png +0 -0
  122. npcsh-1.1.13.data/data/npcsh/npc_team/sql.jinx +16 -0
  123. npcsh-1.1.13.data/data/npcsh/npc_team/trigger.jinx +61 -0
  124. npcsh-1.1.13.data/data/npcsh/npc_team/usage.jinx +33 -0
  125. npcsh-1.1.13.data/data/npcsh/npc_team/vixynt.jinx +144 -0
  126. npcsh-1.1.13.data/data/npcsh/npc_team/wander.jinx +186 -0
  127. npcsh-1.1.13.data/data/npcsh/npc_team/yap.jinx +262 -0
  128. npcsh-1.1.13.data/data/npcsh/npc_team/yap.png +0 -0
  129. npcsh-1.1.13.dist-info/METADATA +522 -0
  130. npcsh-1.1.13.dist-info/RECORD +135 -0
  131. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/WHEEL +1 -1
  132. npcsh-1.1.13.dist-info/entry_points.txt +9 -0
  133. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info/licenses}/LICENSE +1 -1
  134. npcsh/command_history.py +0 -81
  135. npcsh/helpers.py +0 -36
  136. npcsh/llm_funcs.py +0 -295
  137. npcsh/main.py +0 -5
  138. npcsh/modes.py +0 -343
  139. npcsh/npc_compiler.py +0 -124
  140. npcsh-0.1.2.dist-info/METADATA +0 -99
  141. npcsh-0.1.2.dist-info/RECORD +0 -14
  142. npcsh-0.1.2.dist-info/entry_points.txt +0 -2
  143. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/top_level.txt +0 -0
npcsh/_state.py ADDED
@@ -0,0 +1,3508 @@
1
+ # Standard library imports
2
+ import atexit
3
+ from dataclasses import dataclass, field
4
+ from datetime import datetime
5
+ import filecmp
6
+ import inspect
7
+ import logging
8
+ import os
9
+ from pathlib import Path
10
+ import platform
11
+ import re
12
+ import select
13
+ import shlex
14
+ import shutil
15
+ import signal
16
+ import sqlite3
17
+ import subprocess
18
+ import sys
19
+ import time
20
+ import textwrap
21
+ from typing import Dict, List, Any, Tuple, Union, Optional, Callable
22
+ import yaml
23
+
24
+ # Setup debug logging if NPCSH_DEBUG is set
25
+ def _setup_debug_logging():
26
+ if os.environ.get("NPCSH_DEBUG", "0") == "1":
27
+ logging.basicConfig(
28
+ level=logging.DEBUG,
29
+ format='%(asctime)s [%(name)s] %(levelname)s: %(message)s',
30
+ datefmt='%H:%M:%S'
31
+ )
32
+ # Set specific loggers to DEBUG
33
+ logging.getLogger("npcsh.state").setLevel(logging.DEBUG)
34
+ logging.getLogger("npcpy.llm_funcs").setLevel(logging.DEBUG)
35
+ logging.getLogger("npcsh.state").debug("Debug logging enabled via NPCSH_DEBUG=1")
36
+
37
+ _setup_debug_logging()
38
+
39
+ # Platform-specific imports
40
+ try:
41
+ import pty
42
+ import tty
43
+ import termios
44
+ import readline
45
+ except ImportError:
46
+ readline = None
47
+ pty = None
48
+ tty = None
49
+ termios = None
50
+
51
+ # Optional dependencies
52
+ try:
53
+ import chromadb
54
+ except ImportError:
55
+ chromadb = None
56
+
57
+ # Third-party imports
58
+ from colorama import Fore, Back, Style
59
+ from litellm import RateLimitError
60
+ from termcolor import colored
61
+
62
+ # npcpy imports
63
+ from npcpy.data.load import load_file_contents
64
+ from npcpy.data.web import search_web
65
+ from npcpy.gen.embeddings import get_embeddings
66
+ from npcpy.llm_funcs import (
67
+ check_llm_command,
68
+ get_llm_response,
69
+ execute_llm_command,
70
+ breathe,
71
+ )
72
+ from npcpy.memory.command_history import (
73
+ CommandHistory,
74
+ start_new_conversation,
75
+ save_conversation_message,
76
+ load_kg_from_db,
77
+ save_kg_to_db,
78
+ )
79
+ from npcpy.memory.knowledge_graph import kg_evolve_incremental
80
+ from npcpy.memory.search import execute_rag_command, execute_brainblast_command
81
+ from npcpy.npc_compiler import NPC, Team, load_jinxs_from_directory, build_jinx_tool_catalog
82
+ from npcpy.npc_sysenv import (
83
+ print_and_process_stream_with_markdown,
84
+ render_markdown,
85
+ get_model_and_provider,
86
+ get_locally_available_models,
87
+ lookup_provider
88
+ )
89
+ from npcpy.tools import auto_tools
90
+
91
+ # Local module imports
92
+ from .config import (
93
+ VERSION,
94
+ DEFAULT_NPC_TEAM_PATH,
95
+ PROJECT_NPC_TEAM_PATH,
96
+ HISTORY_DB_DEFAULT_PATH,
97
+ READLINE_HISTORY_FILE,
98
+ NPCSH_CHAT_MODEL,
99
+ NPCSH_CHAT_PROVIDER,
100
+ NPCSH_DB_PATH,
101
+ NPCSH_VECTOR_DB_PATH,
102
+ NPCSH_DEFAULT_MODE,
103
+ NPCSH_VISION_MODEL,
104
+ NPCSH_VISION_PROVIDER,
105
+ NPCSH_IMAGE_GEN_MODEL,
106
+ NPCSH_IMAGE_GEN_PROVIDER,
107
+ NPCSH_VIDEO_GEN_MODEL,
108
+ NPCSH_VIDEO_GEN_PROVIDER,
109
+ NPCSH_EMBEDDING_MODEL,
110
+ NPCSH_EMBEDDING_PROVIDER,
111
+ NPCSH_REASONING_MODEL,
112
+ NPCSH_REASONING_PROVIDER,
113
+ NPCSH_STREAM_OUTPUT,
114
+ NPCSH_API_URL,
115
+ NPCSH_SEARCH_PROVIDER,
116
+ NPCSH_BUILD_KG,
117
+ setup_npcsh_config,
118
+ is_npcsh_initialized,
119
+ set_npcsh_initialized,
120
+ set_npcsh_config_value,
121
+ )
122
+ from .ui import SpinnerContext, orange, get_file_color, format_file_listing, wrap_text
123
+ from .parsing import split_by_pipes, parse_command_safely, parse_generic_command_flags
124
+ from .execution import (
125
+ TERMINAL_EDITORS,
126
+ INTERACTIVE_COMMANDS as interactive_commands,
127
+ validate_bash_command,
128
+ handle_bash_command,
129
+ handle_cd_command,
130
+ handle_interactive_command,
131
+ open_terminal_editor,
132
+ list_directory,
133
+ )
134
+ from .completion import setup_readline, save_readline_history, make_completer, get_slash_commands
135
+
136
+
137
+
138
+ @dataclass
139
+ class ShellState:
140
+ npc: Optional[Union[NPC, str]] = None
141
+ team: Optional[Team] = None
142
+ messages: List[Dict[str, Any]] = field(default_factory=list)
143
+ mcp_client: Optional[Any] = None
144
+ conversation_id: Optional[int] = None
145
+ chat_model: str = NPCSH_CHAT_MODEL
146
+ chat_provider: str = NPCSH_CHAT_PROVIDER
147
+ vision_model: str = NPCSH_VISION_MODEL
148
+ vision_provider: str = NPCSH_VISION_PROVIDER
149
+ embedding_model: str = NPCSH_EMBEDDING_MODEL
150
+ embedding_provider: str = NPCSH_EMBEDDING_PROVIDER
151
+ reasoning_model: str = NPCSH_REASONING_MODEL
152
+ reasoning_provider: str = NPCSH_REASONING_PROVIDER
153
+ search_provider: str = NPCSH_SEARCH_PROVIDER
154
+ image_gen_model: str = NPCSH_IMAGE_GEN_MODEL
155
+ image_gen_provider: str = NPCSH_IMAGE_GEN_PROVIDER
156
+ video_gen_model: str = NPCSH_VIDEO_GEN_MODEL
157
+ video_gen_provider: str = NPCSH_VIDEO_GEN_PROVIDER
158
+ current_mode: str = NPCSH_DEFAULT_MODE
159
+ build_kg: bool = NPCSH_BUILD_KG
160
+ api_key: Optional[str] = None
161
+ api_url: Optional[str] = NPCSH_API_URL
162
+ current_path: str = field(default_factory=os.getcwd)
163
+ stream_output: bool = NPCSH_STREAM_OUTPUT
164
+ attachments: Optional[List[Any]] = None
165
+ turn_count: int = 0
166
+ # Token usage tracking
167
+ session_input_tokens: int = 0
168
+ session_output_tokens: int = 0
169
+ session_cost_usd: float = 0.0
170
+
171
+ def get_model_for_command(self, model_type: str = "chat"):
172
+ if model_type == "chat":
173
+ return self.chat_model, self.chat_provider
174
+ elif model_type == "vision":
175
+ return self.vision_model, self.vision_provider
176
+ elif model_type == "embedding":
177
+ return self.embedding_model, self.embedding_provider
178
+ elif model_type == "reasoning":
179
+ return self.reasoning_model, self.reasoning_provider
180
+ elif model_type == "image_gen":
181
+ return self.image_gen_model, self.image_gen_provider
182
+ elif model_type == "video_gen":
183
+ return self.video_gen_model, self.video_gen_provider
184
+ else:
185
+ return self.chat_model, self.chat_provider
186
+ CONFIG_KEY_MAP = {
187
+
188
+ "model": "NPCSH_CHAT_MODEL",
189
+ "chatmodel": "NPCSH_CHAT_MODEL",
190
+ "provider": "NPCSH_CHAT_PROVIDER",
191
+ "chatprovider": "NPCSH_CHAT_PROVIDER",
192
+
193
+
194
+ "vmodel": "NPCSH_VISION_MODEL",
195
+ "visionmodel": "NPCSH_VISION_MODEL",
196
+ "vprovider": "NPCSH_VISION_PROVIDER",
197
+ "visionprovider": "NPCSH_VISION_PROVIDER",
198
+
199
+
200
+ "emodel": "NPCSH_EMBEDDING_MODEL",
201
+ "embeddingmodel": "NPCSH_EMBEDDING_MODEL",
202
+ "eprovider": "NPCSH_EMBEDDING_PROVIDER",
203
+ "embeddingprovider": "NPCSH_EMBEDDING_PROVIDER",
204
+
205
+
206
+ "rmodel": "NPCSH_REASONING_MODEL",
207
+ "reasoningmodel": "NPCSH_REASONING_MODEL",
208
+ "rprovider": "NPCSH_REASONING_PROVIDER",
209
+ "reasoningprovider": "NPCSH_REASONING_PROVIDER",
210
+
211
+
212
+ "igmodel": "NPCSH_IMAGE_GEN_MODEL",
213
+ "imagegenmodel": "NPCSH_IMAGE_GEN_MODEL",
214
+ "igprovider": "NPCSH_IMAGE_GEN_PROVIDER",
215
+ "imagegenprovider": "NPCSH_IMAGE_GEN_PROVIDER",
216
+
217
+
218
+ "vgmodel": "NPCSH_VIDEO_GEN_MODEL",
219
+ "videogenmodel": "NPCSH_VIDEO_GEN_MODEL",
220
+ "vgprovider": "NPCSH_VIDEO_GEN_PROVIDER",
221
+ "videogenprovider": "NPCSH_VIDEO_GEN_PROVIDER",
222
+
223
+
224
+ "sprovider": "NPCSH_SEARCH_PROVIDER",
225
+ "mode": "NPCSH_DEFAULT_MODE",
226
+ "stream": "NPCSH_STREAM_OUTPUT",
227
+ "apiurl": "NPCSH_API_URL",
228
+ "buildkg": "NPCSH_BUILD_KG",
229
+ }
230
+
231
+
232
+ def set_npcsh_config_value(key: str, value: str):
233
+ """
234
+ Set NPCSH config values at runtime using shorthand (case-insensitive) or full keys.
235
+ Updates os.environ, globals, and ShellState defaults.
236
+ """
237
+
238
+ env_key = CONFIG_KEY_MAP.get(key.lower(), key)
239
+
240
+
241
+ os.environ[env_key] = value
242
+
243
+
244
+ if env_key in ["NPCSH_STREAM_OUTPUT", "NPCSH_BUILD_KG"]:
245
+ parsed_val = value.strip().lower() in ["1", "true", "yes"]
246
+ elif env_key.endswith("_PATH"):
247
+ parsed_val = os.path.expanduser(value)
248
+ else:
249
+ parsed_val = value
250
+
251
+
252
+ globals()[env_key] = parsed_val
253
+
254
+
255
+ field_map = {
256
+ "NPCSH_CHAT_MODEL": "chat_model",
257
+ "NPCSH_CHAT_PROVIDER": "chat_provider",
258
+ "NPCSH_VISION_MODEL": "vision_model",
259
+ "NPCSH_VISION_PROVIDER": "vision_provider",
260
+ "NPCSH_EMBEDDING_MODEL": "embedding_model",
261
+ "NPCSH_EMBEDDING_PROVIDER": "embedding_provider",
262
+ "NPCSH_REASONING_MODEL": "reasoning_model",
263
+ "NPCSH_REASONING_PROVIDER": "reasoning_provider",
264
+ "NPCSH_SEARCH_PROVIDER": "search_provider",
265
+ "NPCSH_IMAGE_GEN_MODEL": "image_gen_model",
266
+ "NPCSH_IMAGE_GEN_PROVIDER": "image_gen_provider",
267
+ "NPCSH_VIDEO_GEN_MODEL": "video_gen_model",
268
+ "NPCSH_VIDEO_GEN_PROVIDER": "video_gen_provider",
269
+ "NPCSH_DEFAULT_MODE": "current_mode",
270
+ "NPCSH_BUILD_KG": "build_kg",
271
+ "NPCSH_API_URL": "api_url",
272
+ "NPCSH_STREAM_OUTPUT": "stream_output",
273
+ }
274
+ if env_key in field_map:
275
+ setattr(ShellState, field_map[env_key], parsed_val)
276
+ def get_npc_path(npc_name: str, db_path: str) -> str:
277
+ project_npc_team_dir = os.path.abspath("./npc_team")
278
+ project_npc_path = os.path.join(project_npc_team_dir, f"{npc_name}.npc")
279
+ user_npc_team_dir = os.path.expanduser("~/.npcsh/npc_team")
280
+ global_npc_path = os.path.join(user_npc_team_dir, f"{npc_name}.npc")
281
+ try:
282
+ with sqlite3.connect(db_path) as conn:
283
+ cursor = conn.cursor()
284
+ query = f"SELECT source_path FROM compiled_npcs WHERE name = '{npc_name}'"
285
+ cursor.execute(query)
286
+ result = cursor.fetchone()
287
+ if result:
288
+ return result[0]
289
+
290
+ except Exception as e:
291
+ try:
292
+ with sqlite3.connect(db_path) as conn:
293
+ cursor = conn.cursor()
294
+ query = f"SELECT source_path FROM compiled_npcs WHERE name = {npc_name}"
295
+ cursor.execute(query)
296
+ result = cursor.fetchone()
297
+ if result:
298
+ return result[0]
299
+ except Exception as e:
300
+ print(f"Database query error: {e}")
301
+
302
+
303
+ if os.path.exists(project_npc_path):
304
+ return project_npc_path
305
+
306
+ if os.path.exists(global_npc_path):
307
+ return global_npc_path
308
+
309
+ raise ValueError(f"NPC file not found: {npc_name}")
310
+
311
+ def initialize_base_npcs_if_needed(db_path: str) -> None:
312
+ """
313
+ Function Description:
314
+ This function initializes the base NPCs if they are not already in the database.
315
+ Args:
316
+ db_path: The path to the database file.
317
+ Keyword Args:
318
+
319
+ None
320
+ Returns:
321
+ None
322
+ """
323
+
324
+ if is_npcsh_initialized():
325
+ return
326
+
327
+ conn = sqlite3.connect(db_path)
328
+ cursor = conn.cursor()
329
+
330
+ # Create table
331
+ cursor.execute(
332
+ """
333
+ CREATE TABLE IF NOT EXISTS compiled_npcs (
334
+ name TEXT PRIMARY KEY,
335
+ source_path TEXT NOT NULL,
336
+ compiled_content TEXT
337
+ )
338
+ """
339
+ )
340
+
341
+ # Package directories
342
+ package_dir = os.path.dirname(__file__)
343
+ package_npc_team_dir = os.path.join(package_dir, "npc_team")
344
+
345
+ user_npc_team_dir = os.path.expanduser("~/.npcsh/npc_team")
346
+
347
+ user_jinxs_dir = os.path.join(user_npc_team_dir, "jinxs")
348
+ user_templates_dir = os.path.join(user_npc_team_dir, "templates")
349
+ os.makedirs(user_npc_team_dir, exist_ok=True)
350
+ os.makedirs(user_jinxs_dir, exist_ok=True)
351
+ os.makedirs(user_templates_dir, exist_ok=True)
352
+
353
+ # Copy .npc and .ctx files
354
+ for filename in os.listdir(package_npc_team_dir):
355
+ if filename.endswith(".npc"):
356
+ source_path = os.path.join(package_npc_team_dir, filename)
357
+ destination_path = os.path.join(user_npc_team_dir, filename)
358
+ if not os.path.exists(destination_path) or file_has_changed(
359
+ source_path, destination_path
360
+ ):
361
+ shutil.copy2(source_path, destination_path)
362
+ print(f"Copied NPC {filename} to {destination_path}")
363
+ if filename.endswith(".ctx"):
364
+ source_path = os.path.join(package_npc_team_dir, filename)
365
+ destination_path = os.path.join(user_npc_team_dir, filename)
366
+ if not os.path.exists(destination_path) or file_has_changed(
367
+ source_path, destination_path
368
+ ):
369
+ shutil.copy2(source_path, destination_path)
370
+ print(f"Copied ctx {filename} to {destination_path}")
371
+
372
+ # Copy jinxs directory RECURSIVELY
373
+ package_jinxs_dir = os.path.join(package_npc_team_dir, "jinxs")
374
+ if os.path.exists(package_jinxs_dir):
375
+ for root, dirs, files in os.walk(package_jinxs_dir):
376
+ # Calculate relative path from package_jinxs_dir
377
+ rel_path = os.path.relpath(root, package_jinxs_dir)
378
+
379
+ # Create corresponding directory in user jinxs
380
+ if rel_path == '.':
381
+ dest_dir = user_jinxs_dir
382
+ else:
383
+ dest_dir = os.path.join(user_jinxs_dir, rel_path)
384
+ os.makedirs(dest_dir, exist_ok=True)
385
+
386
+ # Copy all .jinx files in this directory
387
+ for filename in files:
388
+ if filename.endswith(".jinx"):
389
+ source_jinx_path = os.path.join(root, filename)
390
+ destination_jinx_path = os.path.join(dest_dir, filename)
391
+
392
+ if not os.path.exists(destination_jinx_path) or file_has_changed(
393
+ source_jinx_path, destination_jinx_path
394
+ ):
395
+ shutil.copy2(source_jinx_path, destination_jinx_path)
396
+ print(f"Copied jinx {os.path.join(rel_path, filename)} to {destination_jinx_path}")
397
+
398
+ # Copy templates directory
399
+ templates = os.path.join(package_npc_team_dir, "templates")
400
+ if os.path.exists(templates):
401
+ for folder in os.listdir(templates):
402
+ os.makedirs(os.path.join(user_templates_dir, folder), exist_ok=True)
403
+ for file in os.listdir(os.path.join(templates, folder)):
404
+ if file.endswith(".npc"):
405
+ source_template_path = os.path.join(templates, folder, file)
406
+
407
+ destination_template_path = os.path.join(
408
+ user_templates_dir, folder, file
409
+ )
410
+ if not os.path.exists(
411
+ destination_template_path
412
+ ) or file_has_changed(
413
+ source_template_path, destination_template_path
414
+ ):
415
+ shutil.copy2(source_template_path, destination_template_path)
416
+ print(f"Copied template {file} to {destination_template_path}")
417
+ conn.commit()
418
+ conn.close()
419
+ set_npcsh_initialized()
420
+ add_npcshrc_to_shell_config()
421
+
422
+
423
+ def get_shell_config_file() -> str:
424
+ """
425
+
426
+ Function Description:
427
+ This function returns the path to the shell configuration file.
428
+ Args:
429
+ None
430
+ Keyword Args:
431
+ None
432
+ Returns:
433
+ The path to the shell configuration file.
434
+ """
435
+
436
+ shell = os.environ.get("SHELL", "")
437
+
438
+ if "zsh" in shell:
439
+ return os.path.expanduser("~/.zshrc")
440
+ elif "bash" in shell:
441
+
442
+ if platform.system() == "Darwin":
443
+ return os.path.expanduser("~/.bash_profile")
444
+ else:
445
+ return os.path.expanduser("~/.bashrc")
446
+ else:
447
+
448
+ return os.path.expanduser("~/.bashrc")
449
+
450
+
451
+ def get_team_ctx_path(team_path: str) -> Optional[str]:
452
+ """Find the first .ctx file in the team directory"""
453
+ team_dir = Path(team_path)
454
+ ctx_files = list(team_dir.glob("*.ctx"))
455
+ return str(ctx_files[0]) if ctx_files else None
456
+
457
+
458
+ from npcpy.memory.memory_processor import memory_approval_ui
459
+ from npcpy.ft.memory_trainer import MemoryTrainer
460
+ from npcpy.llm_funcs import get_facts
461
+
462
+ def get_relevant_memories(
463
+ command_history: CommandHistory,
464
+ npc_name: str,
465
+ team_name: str,
466
+ path: str,
467
+ query: Optional[str] = None,
468
+ max_memories: int = 10,
469
+ state: Optional[ShellState] = None
470
+ ) -> List[Dict]:
471
+
472
+ engine = command_history.engine
473
+
474
+ all_memories = command_history.get_memories_for_scope(
475
+ npc=npc_name,
476
+ team=team_name,
477
+ directory_path=path,
478
+ )
479
+
480
+ if not all_memories:
481
+ return []
482
+
483
+ if len(all_memories) <= max_memories and not query:
484
+ return all_memories
485
+
486
+ if query:
487
+ query_lower = query.lower()
488
+ keyword_matches = [
489
+ m for m in all_memories
490
+ if query_lower in (m.get('final_memory') or m.get('initial_memory') or '').lower()
491
+ ]
492
+
493
+ if keyword_matches:
494
+ return keyword_matches[:max_memories]
495
+
496
+ if state and state.embedding_model and state.embedding_provider:
497
+ try:
498
+ from npcpy.gen.embeddings import get_embeddings
499
+
500
+ search_text = query if query else "recent context"
501
+ query_embedding = get_embeddings(
502
+ [search_text],
503
+ state.embedding_model,
504
+ state.embedding_provider
505
+ )[0]
506
+
507
+ memory_texts = [
508
+ m.get('final_memory', '') for m in all_memories
509
+ ]
510
+ memory_embeddings = get_embeddings(
511
+ memory_texts,
512
+ state.embedding_model,
513
+ state.embedding_provider
514
+ )
515
+
516
+ import numpy as np
517
+ similarities = []
518
+ for mem_emb in memory_embeddings:
519
+ similarity = np.dot(query_embedding, mem_emb) / (
520
+ np.linalg.norm(query_embedding) *
521
+ np.linalg.norm(mem_emb)
522
+ )
523
+ similarities.append(similarity)
524
+
525
+ sorted_indices = np.argsort(similarities)[::-1]
526
+ return [all_memories[i] for i in sorted_indices[:max_memories]]
527
+
528
+ except Exception as e:
529
+ print(colored(
530
+ f"RAG search failed, using recent: {e}",
531
+ "yellow"
532
+ ))
533
+
534
+ return all_memories[-max_memories:]
535
+
536
+
537
+
538
+ def add_npcshrc_to_shell_config() -> None:
539
+ """
540
+ Function Description:
541
+ This function adds the sourcing of the .npcshrc file to the user's shell configuration file.
542
+ Args:
543
+ None
544
+ Keyword Args:
545
+ None
546
+ Returns:
547
+ None
548
+ """
549
+
550
+ if os.getenv("NPCSH_INITIALIZED") is not None:
551
+ return
552
+ config_file = get_shell_config_file()
553
+ npcshrc_line = "\n# Source NPCSH configuration\nif [ -f ~/.npcshrc ]; then\n . ~/.npcshrc\nfi\n"
554
+
555
+ with open(config_file, "a+") as shell_config:
556
+ shell_config.seek(0)
557
+ content = shell_config.read()
558
+ if "source ~/.npcshrc" not in content and ". ~/.npcshrc" not in content:
559
+ shell_config.write(npcshrc_line)
560
+ print(f"Added .npcshrc sourcing to {config_file}")
561
+ else:
562
+ print(f".npcshrc already sourced in {config_file}")
563
+
564
+ def ensure_npcshrc_exists() -> str:
565
+ """
566
+ Function Description:
567
+ This function ensures that the .npcshrc file exists in the user's home directory.
568
+ Args:
569
+ None
570
+ Keyword Args:
571
+ None
572
+ Returns:
573
+ The path to the .npcshrc file.
574
+ """
575
+
576
+ npcshrc_path = os.path.expanduser("~/.npcshrc")
577
+ if not os.path.exists(npcshrc_path):
578
+ with open(npcshrc_path, "w") as npcshrc:
579
+ npcshrc.write("# NPCSH Configuration File\n")
580
+ npcshrc.write("export NPCSH_INITIALIZED=0\n")
581
+ npcshrc.write("export NPCSH_DEFAULT_MODE='agent'\n")
582
+ npcshrc.write("export NPCSH_BUILD_KG=1")
583
+ npcshrc.write("export NPCSH_CHAT_PROVIDER='ollama'\n")
584
+ npcshrc.write("export NPCSH_CHAT_MODEL='gemma3:4b'\n")
585
+ npcshrc.write("export NPCSH_REASONING_PROVIDER='ollama'\n")
586
+ npcshrc.write("export NPCSH_REASONING_MODEL='deepseek-r1'\n")
587
+ npcshrc.write("export NPCSH_EMBEDDING_PROVIDER='ollama'\n")
588
+ npcshrc.write("export NPCSH_EMBEDDING_MODEL='nomic-embed-text'\n")
589
+ npcshrc.write("export NPCSH_VISION_PROVIDER='ollama'\n")
590
+ npcshrc.write("export NPCSH_VISION_MODEL='llava7b'\n")
591
+ npcshrc.write(
592
+ "export NPCSH_IMAGE_GEN_MODEL='runwayml/stable-diffusion-v1-5'\n"
593
+ )
594
+
595
+ npcshrc.write("export NPCSH_IMAGE_GEN_PROVIDER='diffusers'\n")
596
+ npcshrc.write(
597
+ "export NPCSH_VIDEO_GEN_MODEL='runwayml/stable-diffusion-v1-5'\n"
598
+ )
599
+
600
+ npcshrc.write("export NPCSH_VIDEO_GEN_PROVIDER='diffusers'\n")
601
+
602
+ npcshrc.write("export NPCSH_API_URL=''\n")
603
+ npcshrc.write("export NPCSH_DB_PATH='~/npcsh_history.db'\n")
604
+ npcshrc.write("export NPCSH_VECTOR_DB_PATH='~/npcsh_chroma.db'\n")
605
+ npcshrc.write("export NPCSH_STREAM_OUTPUT=0")
606
+ return npcshrc_path
607
+
608
+
609
+
610
+ def setup_npcsh_config() -> None:
611
+ """
612
+ Function Description:
613
+ This function initializes the NPCSH configuration.
614
+ Args:
615
+ None
616
+ Keyword Args:
617
+ None
618
+ Returns:
619
+ None
620
+ """
621
+
622
+ ensure_npcshrc_exists()
623
+ add_npcshrc_to_shell_config()
624
+
625
+
626
+
627
+ CANONICAL_ARGS = [
628
+ 'model',
629
+ 'provider',
630
+ 'output_file',
631
+ 'attachments',
632
+ 'format',
633
+ 'temperature',
634
+ 'top_k',
635
+ 'top_p',
636
+ 'max_tokens',
637
+ 'messages',
638
+ 'npc',
639
+ 'team',
640
+ 'height',
641
+ 'width',
642
+ 'num_frames',
643
+ 'sprovider',
644
+ 'emodel',
645
+ 'eprovider',
646
+ 'igmodel',
647
+ 'igprovider',
648
+ 'vmodel',
649
+ 'vprovider',
650
+ 'rmodel',
651
+ 'rprovider',
652
+ 'num_npcs',
653
+ 'depth',
654
+ 'exploration',
655
+ 'creativity',
656
+ 'port',
657
+ 'cors',
658
+ 'config_dir',
659
+ 'plots_dir',
660
+ 'refresh_period',
661
+ 'lang',
662
+ ]
663
+
664
+ def get_argument_help() -> Dict[str, List[str]]:
665
+ """
666
+ Analyzes CANONICAL_ARGS to generate a map of canonical arguments
667
+ to all their possible shorthands.
668
+
669
+ Returns -> {'model': ['m', 'mo', 'mod', 'mode'], 'provider': ['p', 'pr', ...]}
670
+ """
671
+ arg_map = {arg: [] for arg in CANONICAL_ARGS}
672
+
673
+ for arg in CANONICAL_ARGS:
674
+
675
+ for i in range(1, len(arg)):
676
+ prefix = arg[:i]
677
+
678
+
679
+ matches = [canonical for canonical in CANONICAL_ARGS if canonical.startswith(prefix)]
680
+
681
+
682
+ if len(matches) == 1 and matches[0] == arg:
683
+ arg_map[arg].append(prefix)
684
+
685
+ return arg_map
686
+
687
+
688
+
689
+
690
+ def normalize_and_expand_flags(parsed_flags: Dict[str, Any]) -> Dict[str, Any]:
691
+ """
692
+ Expands argument aliases based on the priority order of CANONICAL_ARGS.
693
+ The first matching prefix in the list wins.
694
+ """
695
+ normalized = {}
696
+ for key, value in parsed_flags.items():
697
+ if key in CANONICAL_ARGS:
698
+ if key in normalized:
699
+ print(colored(f"Warning: Argument '{key}' specified multiple times. Using last value.", "yellow"))
700
+ normalized[key] = value
701
+ continue
702
+ first_match = next((arg for arg in CANONICAL_ARGS if arg.startswith(key)), None)
703
+ if first_match:
704
+ if first_match in normalized:
705
+ print(colored(f"Warning: Argument '{first_match}' specified multiple times (via alias '{key}'). Using last value.", "yellow"))
706
+ normalized[first_match] = value
707
+ else:
708
+ normalized[key] = value
709
+ return normalized
710
+
711
+
712
+ BASH_COMMANDS = [
713
+ "npc",
714
+ "npm",
715
+ "npx",
716
+ "open",
717
+ "alias",
718
+ "bg",
719
+ "bind",
720
+ "break",
721
+ "builtin",
722
+ "case",
723
+ "command",
724
+ "compgen",
725
+ "complete",
726
+ "continue",
727
+ "declare",
728
+ "dirs",
729
+ "disown",
730
+ "echo",
731
+ "enable",
732
+ "eval",
733
+ "exec",
734
+ "exit",
735
+ "export",
736
+ "fc",
737
+ "fg",
738
+ "getopts",
739
+ "hash",
740
+ "history",
741
+ "if",
742
+ "jobs",
743
+ "kill",
744
+ "let",
745
+ "local",
746
+ "logout",
747
+ "ollama",
748
+ "popd",
749
+ "printf",
750
+ "pushd",
751
+ "pwd",
752
+ "read",
753
+ "readonly",
754
+ "return",
755
+ "set",
756
+ "shift",
757
+ "shopt",
758
+ "source",
759
+ "suspend",
760
+ "test",
761
+ "times",
762
+ "trap",
763
+ "type",
764
+ "typeset",
765
+ "ulimit",
766
+ "umask",
767
+ "unalias",
768
+ "unset",
769
+ "until",
770
+ "wait",
771
+ "while",
772
+
773
+ "ls",
774
+ "cp",
775
+ "mv",
776
+ "rm",
777
+ "mkdir",
778
+ "rmdir",
779
+ "touch",
780
+ "cat",
781
+ "less",
782
+ "more",
783
+ "head",
784
+ "tail",
785
+ "grep",
786
+ "find",
787
+ "sed",
788
+ "awk",
789
+ "sort",
790
+ "uniq",
791
+ "wc",
792
+ "diff",
793
+ "chmod",
794
+ "chown",
795
+ "chgrp",
796
+ "ln",
797
+ "tar",
798
+ "gzip",
799
+ "gunzip",
800
+ "zip",
801
+ "unzip",
802
+ "ssh",
803
+ "scp",
804
+ "rsync",
805
+ "wget",
806
+ "curl",
807
+ "ping",
808
+ "netstat",
809
+ "ifconfig",
810
+ "route",
811
+ "traceroute",
812
+ "ps",
813
+ "top",
814
+ "htop",
815
+ "kill",
816
+ "killall",
817
+ "su",
818
+ "sudo",
819
+ "whoami",
820
+ "who",
821
+ "last",
822
+ "finger",
823
+ "uptime",
824
+ "free",
825
+ "df",
826
+ "du",
827
+ "mount",
828
+ "umount",
829
+ "fdisk",
830
+ "mkfs",
831
+ "fsck",
832
+ "dd",
833
+ "cron",
834
+ "at",
835
+ "systemctl",
836
+ "service",
837
+ "journalctl",
838
+ "man",
839
+ "info",
840
+ "whatis",
841
+ "whereis",
842
+ "date",
843
+ "cal",
844
+ "bc",
845
+ "expr",
846
+ "screen",
847
+ "tmux",
848
+ "git",
849
+ "vim",
850
+ "emacs",
851
+ "nano",
852
+ "pip",
853
+ ]
854
+
855
+
856
+ # interactive_commands imported from .execution
857
+
858
+
859
+ def start_interactive_session(command: str) -> int:
860
+ """
861
+ Starts an interactive session. Only works on Unix. On Windows, print a message and return 1.
862
+ """
863
+ ON_WINDOWS = platform.system().lower().startswith("win")
864
+ if ON_WINDOWS or termios is None or tty is None or pty is None or select is None or signal is None or tty is None:
865
+ print("Interactive terminal sessions are not supported on Windows.")
866
+ return 1
867
+
868
+ old_tty = termios.tcgetattr(sys.stdin)
869
+ try:
870
+
871
+ master_fd, slave_fd = pty.openpty()
872
+
873
+
874
+ p = subprocess.Popen(
875
+ command,
876
+ stdin=slave_fd,
877
+ stdout=slave_fd,
878
+ stderr=slave_fd,
879
+ shell=True,
880
+ preexec_fn=os.setsid,
881
+ )
882
+
883
+
884
+ tty.setraw(sys.stdin.fileno())
885
+
886
+ def handle_timeout(signum, frame):
887
+ raise TimeoutError("Process did not terminate in time")
888
+
889
+ while p.poll() is None:
890
+ r, w, e = select.select([sys.stdin, master_fd], [], [], 0.1)
891
+ if sys.stdin in r:
892
+ d = os.read(sys.stdin.fileno(), 10240)
893
+ os.write(master_fd, d)
894
+ elif master_fd in r:
895
+ o = os.read(master_fd, 10240)
896
+ if o:
897
+ os.write(sys.stdout.fileno(), o)
898
+ else:
899
+ break
900
+
901
+
902
+ signal.signal(signal.SIGALRM, handle_timeout)
903
+ signal.alarm(5)
904
+ try:
905
+ p.wait()
906
+ except TimeoutError:
907
+ print("\nProcess did not terminate. Force killing...")
908
+ os.killpg(os.getpgid(p.pid), signal.SIGTERM)
909
+ time.sleep(1)
910
+ if p.poll() is None:
911
+ os.killpg(os.getpgid(p.pid), signal.SIGKILL)
912
+ finally:
913
+ signal.alarm(0)
914
+
915
+ finally:
916
+
917
+ termios.tcsetattr(sys.stdin, termios.TCSAFLUSH, old_tty)
918
+
919
+ return p.returncode
920
+
921
+ def validate_bash_command(command_parts: list) -> bool:
922
+ """
923
+ Function Description:
924
+ Validate if the command sequence is a valid bash command with proper arguments/flags.
925
+ Simplified to be less strict and allow bash to handle argument specifics for common commands.
926
+ Args:
927
+ command_parts : list : Command parts
928
+ Keyword Args:
929
+ None
930
+ Returns:
931
+ bool : bool : Boolean
932
+ """
933
+ if not command_parts:
934
+ return False
935
+
936
+ base_command = command_parts[0]
937
+
938
+ # Commands that are always considered valid for direct execution
939
+ ALWAYS_VALID_COMMANDS = BASH_COMMANDS + list(interactive_commands.keys()) + TERMINAL_EDITORS
940
+
941
+ if base_command in ALWAYS_VALID_COMMANDS:
942
+ return True
943
+
944
+ # Specific checks for commands that might be misinterpreted or need special handling
945
+ if base_command == 'which':
946
+ return True # 'which' is a valid bash command
947
+
948
+ # If it's not in our explicit list, it's not a bash command we want to validate strictly
949
+ return False # If it reaches here, it's not a recognized bash command for strict validation.
950
+
951
+ def is_npcsh_initialized() -> bool:
952
+ """
953
+ Function Description:
954
+ This function checks if the NPCSH initialization flag is set.
955
+ Args:
956
+ None
957
+ Keyword Args:
958
+ None
959
+ Returns:
960
+ A boolean indicating whether NPCSH is initialized.
961
+ """
962
+
963
+ return os.environ.get("NPCSH_INITIALIZED", None) == "1"
964
+
965
+
966
+ def execute_set_command(command: str, value: str) -> str:
967
+ """
968
+ Function Description:
969
+ This function sets a configuration value in the .npcshrc file.
970
+ Args:
971
+ command: The command to execute.
972
+ value: The value to set.
973
+ Keyword Args:
974
+ None
975
+ Returns:
976
+ A message indicating the success or failure of the operation.
977
+ """
978
+
979
+ config_path = os.path.expanduser("~/.npcshrc")
980
+
981
+
982
+ var_map = {
983
+ "model": "NPCSH_CHAT_MODEL",
984
+ "provider": "NPCSH_CHAT_PROVIDER",
985
+ "db_path": "NPCSH_DB_PATH",
986
+ }
987
+
988
+ if command not in var_map:
989
+ return f"Unknown setting: {command}"
990
+
991
+ env_var = var_map[command]
992
+
993
+
994
+ if os.path.exists(config_path):
995
+ with open(config_path, "r") as f:
996
+ lines = f.readlines()
997
+ else:
998
+ lines = []
999
+
1000
+
1001
+ property_exists = False
1002
+ for i, line in enumerate(lines):
1003
+ if line.startswith(f"export {env_var}="):
1004
+ lines[i] = f"export {env_var}='{value}'\n"
1005
+ property_exists = True
1006
+ break
1007
+
1008
+ if not property_exists:
1009
+ lines.append(f"export {env_var}='{value}'\n")
1010
+
1011
+
1012
+ with open(config_path, "w") as f:
1013
+ f.writelines(lines)
1014
+
1015
+ return f"{command.capitalize()} has been set to: {value}"
1016
+
1017
+
1018
+ def set_npcsh_initialized() -> None:
1019
+ """
1020
+ Function Description:
1021
+ This function sets the NPCSH initialization flag in the .npcshrc file.
1022
+ Args:
1023
+ None
1024
+ Keyword Args:
1025
+ None
1026
+ Returns:
1027
+
1028
+ None
1029
+ """
1030
+
1031
+ npcshrc_path = ensure_npcshrc_exists()
1032
+
1033
+ with open(npcshrc_path, "r+") as npcshrc:
1034
+ content = npcshrc.read()
1035
+ if "export NPCSH_INITIALIZED=0" in content:
1036
+ content = content.replace(
1037
+ "export NPCSH_INITIALIZED=0", "export NPCSH_INITIALIZED=1"
1038
+ )
1039
+ npcshrc.seek(0)
1040
+ npcshrc.write(content)
1041
+ npcshrc.truncate()
1042
+
1043
+
1044
+ os.environ["NPCSH_INITIALIZED"] = "1"
1045
+ print("NPCSH initialization flag set in .npcshrc")
1046
+
1047
+
1048
+
1049
+ def file_has_changed(source_path: str, destination_path: str) -> bool:
1050
+ """
1051
+ Function Description:
1052
+ This function compares two files to determine if they are different.
1053
+ Args:
1054
+ source_path: The path to the source file.
1055
+ destination_path: The path to the destination file.
1056
+ Keyword Args:
1057
+ None
1058
+ Returns:
1059
+ A boolean indicating whether the files are different
1060
+ """
1061
+
1062
+
1063
+ return not filecmp.cmp(source_path, destination_path, shallow=False)
1064
+
1065
+
1066
+ def list_directory(args: List[str]) -> None:
1067
+ """
1068
+ Function Description:
1069
+ This function lists the contents of a directory.
1070
+ Args:
1071
+ args: The command arguments.
1072
+ Keyword Args:
1073
+ None
1074
+ Returns:
1075
+ None
1076
+ """
1077
+ directory = args[0] if args else "."
1078
+ try:
1079
+ files = os.listdir(directory)
1080
+ for f in files:
1081
+ print(f)
1082
+ except Exception as e:
1083
+ print(f"Error listing directory: {e}")
1084
+
1085
+
1086
+
1087
+ def change_directory(command_parts: list, messages: list) -> dict:
1088
+ """
1089
+ Function Description:
1090
+ Changes the current directory.
1091
+ Args:
1092
+ command_parts : list : Command parts
1093
+ messages : list : Messages
1094
+ Keyword Args:
1095
+ None
1096
+ Returns:
1097
+ dict : dict : Dictionary
1098
+
1099
+ """
1100
+
1101
+ try:
1102
+ if len(command_parts) > 1:
1103
+ new_dir = os.path.expanduser(command_parts[1])
1104
+ else:
1105
+ new_dir = os.path.expanduser("~")
1106
+ os.chdir(new_dir)
1107
+ return {
1108
+ "messages": messages,
1109
+ "output": f"Changed directory to {os.getcwd()}",
1110
+ }
1111
+ except FileNotFoundError:
1112
+ return {
1113
+ "messages": messages,
1114
+ "output": f"Directory not found: {new_dir}",
1115
+ }
1116
+ except PermissionError:
1117
+ return {"messages": messages, "output": f"Permission denied: {new_dir}"}
1118
+
1119
+
1120
+ def orange(text: str) -> str:
1121
+ """
1122
+ Function Description:
1123
+ Returns orange text.
1124
+ Args:
1125
+ text : str : Text
1126
+ Keyword Args:
1127
+ None
1128
+ Returns:
1129
+ text : str : Text
1130
+
1131
+ """
1132
+ return f"\033[38;2;255;165;0m{text}{Style.RESET_ALL}"
1133
+
1134
+
1135
+ def get_npcshrc_path_windows():
1136
+ return Path.home() / ".npcshrc"
1137
+
1138
+
1139
+ def read_rc_file_windows(path):
1140
+ """Read shell-style rc file"""
1141
+ config = {}
1142
+ if not path.exists():
1143
+ return config
1144
+
1145
+ with open(path) as f:
1146
+ for line in f:
1147
+ line = line.strip()
1148
+ if line and not line.startswith("#"):
1149
+
1150
+ match = re.match(r'^([A-Z_]+)\s*=\s*[\'"](.*?)[\'"]$', line)
1151
+ if match:
1152
+ key, value = match.groups()
1153
+ config[key] = value
1154
+ return config
1155
+
1156
+
1157
+ def get_setting_windows(key, default=None):
1158
+
1159
+ if env_value := os.getenv(key):
1160
+ return env_value
1161
+
1162
+
1163
+ config = read_rc_file_windows(get_npcshrc_path_windows())
1164
+ return config.get(key, default)
1165
+
1166
+
1167
+ def setup_readline() -> str:
1168
+ import readline
1169
+ if readline is None:
1170
+ return None
1171
+ try:
1172
+ readline.read_history_file(READLINE_HISTORY_FILE)
1173
+ readline.set_history_length(1000)
1174
+ readline.parse_and_bind("set enable-bracketed-paste on")
1175
+ readline.parse_and_bind(r'"\e[A": history-search-backward')
1176
+ readline.parse_and_bind(r'"\e[B": history-search-forward')
1177
+ readline.parse_and_bind(r'"\C-r": reverse-search-history')
1178
+ readline.parse_and_bind(r'\C-e: end-of-line')
1179
+ readline.parse_and_bind(r'\C-a: beginning-of-line')
1180
+ if sys.platform == "darwin":
1181
+ readline.parse_and_bind("bind ^I rl_complete")
1182
+ else:
1183
+ readline.parse_and_bind("tab: complete")
1184
+ return READLINE_HISTORY_FILE
1185
+ except FileNotFoundError:
1186
+ pass
1187
+ except OSError as e:
1188
+ print(f"Warning: Could not read readline history file {READLINE_HISTORY_FILE}: {e}")
1189
+
1190
+ def save_readline_history():
1191
+ if readline is None:
1192
+ return
1193
+ try:
1194
+ readline.write_history_file(READLINE_HISTORY_FILE)
1195
+ except OSError as e:
1196
+ print(f"Warning: Could not write readline history file {READLINE_HISTORY_FILE}: {e}")
1197
+
1198
+
1199
+
1200
+ # ChromaDB client (lazy init)
1201
+ EMBEDDINGS_DB_PATH = NPCSH_VECTOR_DB_PATH
1202
+
1203
+ try:
1204
+ chroma_client = chromadb.PersistentClient(path=EMBEDDINGS_DB_PATH) if chromadb else None
1205
+ except Exception as e:
1206
+ print(f"Warning: Failed to initialize ChromaDB client at {EMBEDDINGS_DB_PATH}: {e}")
1207
+ chroma_client = None
1208
+
1209
+
1210
+
1211
+
1212
+ def get_path_executables() -> List[str]:
1213
+ """Get executables from PATH (cached for performance)"""
1214
+ if not hasattr(get_path_executables, '_cache'):
1215
+ executables = set()
1216
+ path_dirs = os.environ.get('PATH', '').split(os.pathsep)
1217
+ for path_dir in path_dirs:
1218
+ if os.path.isdir(path_dir):
1219
+ try:
1220
+ for item in os.listdir(path_dir):
1221
+ item_path = os.path.join(path_dir, item)
1222
+ if os.path.isfile(item_path) and os.access(item_path, os.X_OK):
1223
+ executables.add(item)
1224
+ except (PermissionError, OSError):
1225
+ continue
1226
+ get_path_executables._cache = sorted(list(executables))
1227
+ return get_path_executables._cache
1228
+
1229
+
1230
+ import logging
1231
+
1232
+
1233
+ completion_logger = logging.getLogger('npcsh.completion')
1234
+ completion_logger.setLevel(logging.WARNING)
1235
+
1236
+
1237
+ if not completion_logger.handlers:
1238
+ handler = logging.StreamHandler(sys.stderr)
1239
+ formatter = logging.Formatter('[%(name)s] %(message)s')
1240
+ handler.setFormatter(formatter)
1241
+ completion_logger.addHandler(handler)
1242
+ def make_completer(shell_state: ShellState, router: Any):
1243
+ slash_hint_cache = {"last_key": None}
1244
+
1245
+ def complete(text: str, state_index: int) -> Optional[str]:
1246
+ """Main completion function"""
1247
+ try:
1248
+ buffer = readline.get_line_buffer()
1249
+ begidx = readline.get_begidx()
1250
+ endidx = readline.get_endidx()
1251
+
1252
+ # The word currently being completed (e.g., "lor" in "ls lor")
1253
+ word_under_cursor = buffer[begidx:endidx]
1254
+
1255
+ # The very first word/token in the entire buffer (e.g., "ls" in "ls lor")
1256
+ first_token_of_buffer = ""
1257
+ if buffer.strip():
1258
+ match = re.match(r'^(\S+)', buffer.strip())
1259
+ if match:
1260
+ first_token_of_buffer = match.group(1)
1261
+
1262
+ matches = []
1263
+
1264
+ # Determine if we are in a "slash command context"
1265
+ # This is true if the *entire buffer starts with a slash* AND
1266
+ # the current completion is for that initial slash command (begidx == 0).
1267
+
1268
+ is_slash_command_context = (begidx <=1 and first_token_of_buffer.startswith('/'))
1269
+
1270
+ if is_slash_command_context:
1271
+ slash_commands = get_slash_commands(shell_state, router)
1272
+
1273
+ if first_token_of_buffer == '/': # If just '/' is typed
1274
+ matches = [cmd[1:] for cmd in slash_commands]
1275
+ else: # If '/ag' is typed
1276
+ matching_commands = [cmd for cmd in slash_commands if cmd.startswith(first_token_of_buffer)]
1277
+ matches = [cmd[1:] for cmd in matching_commands]
1278
+
1279
+ # Only print hints if this is the first completion attempt (state_index == 0)
1280
+ # and the hints haven't been printed for this specific input yet.
1281
+ if matches and state_index == 0:
1282
+ key = (buffer, first_token_of_buffer) # Use full buffer for cache key
1283
+ if slash_hint_cache["last_key"] != key:
1284
+ print("\nAvailable slash commands: " + ", ".join(slash_commands))
1285
+ try:
1286
+ readline.redisplay()
1287
+ except Exception:
1288
+ pass
1289
+ slash_hint_cache["last_key"] = key
1290
+
1291
+ # If not a slash command context, then it's either a regular command or an argument.
1292
+ elif begidx == 0: # Completing a regular command (e.g., "ls", "pyt")
1293
+ bash_matches = [cmd for cmd in BASH_COMMANDS if cmd.startswith(word_under_cursor)]
1294
+ matches.extend(bash_matches)
1295
+
1296
+ interactive_matches = [cmd for cmd in interactive_commands.keys() if cmd.startswith(word_under_cursor)]
1297
+ matches.extend(interactive_matches)
1298
+
1299
+ if len(word_under_cursor) >= 1:
1300
+ path_executables = get_path_executables()
1301
+ exec_matches = [cmd for cmd in path_executables if cmd.startswith(word_under_cursor)]
1302
+ matches.extend(exec_matches[:20])
1303
+
1304
+ else: # Completing a file or directory path (e.g., "ls doc/my_f")
1305
+ matches = get_file_completions(word_under_cursor)
1306
+
1307
+ matches = sorted(list(set(matches)))
1308
+
1309
+ if state_index < len(matches):
1310
+ return matches[state_index]
1311
+ else:
1312
+ return None # readline expects None when no more completions
1313
+
1314
+ except Exception as e:
1315
+ # Using completion_logger for internal debugging, not printing to stdout for user.
1316
+ # completion_logger.error(f"Exception in completion: {e}", exc_info=True)
1317
+ return None
1318
+
1319
+ return complete
1320
+
1321
+ def get_slash_commands(state: ShellState, router: Any) -> List[str]:
1322
+ """Get available slash commands from the provided router and team"""
1323
+ commands = []
1324
+
1325
+ if router and hasattr(router, 'routes'):
1326
+ router_cmds = [f"/{cmd}" for cmd in router.routes.keys()]
1327
+ commands.extend(router_cmds)
1328
+ completion_logger.debug(f"Router commands: {router_cmds}")
1329
+
1330
+
1331
+ if state.team and hasattr(state.team, 'jinxs_dict'):
1332
+ jinx_cmds = [f"/{jinx}" for jinx in state.team.jinxs_dict.keys()]
1333
+ commands.extend(jinx_cmds)
1334
+ completion_logger.debug(f"Jinx commands: {jinx_cmds}")
1335
+
1336
+
1337
+ if state.team and hasattr(state.team, 'npcs'):
1338
+ npc_cmds = [f"/{npc}" for npc in state.team.npcs.keys()]
1339
+ commands.extend(npc_cmds)
1340
+ completion_logger.debug(f"NPC commands: {npc_cmds}")
1341
+
1342
+
1343
+ mode_cmds = ['/cmd', '/agent', '/chat']
1344
+ commands.extend(mode_cmds)
1345
+ completion_logger.debug(f"Mode commands: {mode_cmds}")
1346
+
1347
+ result = sorted(commands)
1348
+ completion_logger.debug(f"Final slash commands: {result}")
1349
+ return result
1350
+ def get_file_completions(text: str) -> List[str]:
1351
+ """Get file/directory completions, including for subfolders."""
1352
+ try:
1353
+ # Determine the base directory and the prefix to match
1354
+ if '/' in text:
1355
+ basedir = os.path.dirname(text)
1356
+ prefix = os.path.basename(text)
1357
+ else:
1358
+ basedir = '.'
1359
+ prefix = text
1360
+
1361
+ # If basedir is empty (e.g., text is "folder/"), it should be current dir
1362
+ if not basedir:
1363
+ basedir = '.'
1364
+
1365
+ # Handle absolute paths
1366
+ if text.startswith('/'):
1367
+ # Ensure absolute path starts with / and handle cases like "/something"
1368
+ if basedir.startswith('/'):
1369
+ pass # already absolute
1370
+ else:
1371
+ basedir = '/' + basedir.lstrip('/')
1372
+ if basedir == '/': # If text was just "/something", basedir is "/"
1373
+ prefix = os.path.basename(text)
1374
+
1375
+ # Resolve the actual path to list
1376
+ if basedir == '.':
1377
+ current_path_to_list = os.getcwd()
1378
+ else:
1379
+ # If basedir is relative, join it with current working directory
1380
+ if not os.path.isabs(basedir):
1381
+ current_path_to_list = os.path.join(os.getcwd(), basedir)
1382
+ else:
1383
+ current_path_to_list = basedir
1384
+
1385
+ if not os.path.isdir(current_path_to_list): # If the base path doesn't exist yet, no completions
1386
+ return []
1387
+
1388
+ matches = []
1389
+ try:
1390
+ for item in os.listdir(current_path_to_list):
1391
+ if item.startswith(prefix):
1392
+ full_item_path = os.path.join(current_path_to_list, item)
1393
+
1394
+ # Construct the completion string relative to the input 'text'
1395
+ # This ensures that if the input was 'folder/s', the completion is 'folder/subfolder/'
1396
+ if basedir == '.':
1397
+ completion = item
1398
+ else:
1399
+ # Reconstruct the path fragment before the prefix
1400
+ path_fragment_before_prefix = text[:len(text) - len(prefix)]
1401
+ completion = os.path.join(path_fragment_before_prefix, item)
1402
+
1403
+ if os.path.isdir(full_item_path):
1404
+ matches.append(completion + '/')
1405
+ else:
1406
+ matches.append(completion)
1407
+ except (PermissionError, OSError):
1408
+ pass
1409
+
1410
+ return sorted(matches)
1411
+ except Exception as e:
1412
+ completion_logger.error(f"Error in get_file_completions for text '{text}': {e}", exc_info=True)
1413
+ return []
1414
+
1415
+
1416
+ def is_command_position(buffer: str, begidx: int) -> bool:
1417
+ """Determine if cursor is at a command position"""
1418
+
1419
+ before_word = buffer[:begidx]
1420
+
1421
+
1422
+ parts = re.split(r'[|;&]', before_word)
1423
+ current_command_part = parts[-1].strip()
1424
+
1425
+
1426
+
1427
+ return len(current_command_part) == 0
1428
+
1429
+
1430
+ def readline_safe_prompt(prompt: str) -> str:
1431
+ ansi_escape = re.compile(r"(\033\[[0-9;]*[a-zA-Z])")
1432
+ return ansi_escape.sub(r"\001\1\002", prompt)
1433
+
1434
+ def print_jinxs(jinxs):
1435
+ output = "Available jinxs:\n"
1436
+ for jinx in jinxs:
1437
+ output += f" {jinx.jinx_name}\n"
1438
+ output += f" Description: {jinx.description}\n"
1439
+ output += f" Inputs: {jinx.inputs}\n"
1440
+ return output
1441
+
1442
+ def open_terminal_editor(command: str) -> str:
1443
+ try:
1444
+ os.system(command)
1445
+ return 'Terminal editor closed.'
1446
+ except Exception as e:
1447
+ return f"Error opening terminal editor: {e}"
1448
+
1449
+ def get_multiline_input(prompt: str, state=None, router=None, token_hint: str = "") -> str:
1450
+ """Get input with hint line below prompt."""
1451
+ lines = []
1452
+ current_prompt = prompt
1453
+ while True:
1454
+ try:
1455
+ line = _input_with_hint_below(current_prompt, state, router, token_hint)
1456
+ if line.endswith("\\"):
1457
+ lines.append(line[:-1])
1458
+ current_prompt = "> "
1459
+ token_hint = ""
1460
+ else:
1461
+ lines.append(line)
1462
+ break
1463
+ except EOFError:
1464
+ print("Goodbye!")
1465
+ sys.exit(0)
1466
+ return "\n".join(lines)
1467
+
1468
+
1469
+ def _input_with_hint_below(prompt: str, state=None, router=None, token_hint: str = "") -> str:
1470
+ """Custom input with hint displayed below. Arrow keys work for history."""
1471
+ try:
1472
+ import termios
1473
+ import tty
1474
+ import readline
1475
+ except ImportError:
1476
+ return input(prompt)
1477
+
1478
+ if not sys.stdin.isatty():
1479
+ return input(prompt)
1480
+
1481
+ # Get history from readline
1482
+ hist_len = readline.get_current_history_length()
1483
+ history = [readline.get_history_item(i) for i in range(1, hist_len + 1)]
1484
+ history_idx = len(history)
1485
+ saved_line = ""
1486
+
1487
+ fd = sys.stdin.fileno()
1488
+ old_settings = termios.tcgetattr(fd)
1489
+
1490
+ buf = ""
1491
+ pos = 0 # cursor position in buf
1492
+
1493
+ # Calculate visible prompt length (strip ANSI codes)
1494
+ import re
1495
+ prompt_visible_len = len(re.sub(r'\x1b\[[0-9;]*m|\x01|\x02', '', prompt))
1496
+
1497
+ def current_hint():
1498
+ if buf.startswith('/') and len(buf) >= 1:
1499
+ h = _get_slash_hints(state, router, buf)
1500
+ return h if h else token_hint
1501
+ elif buf.startswith('@') and len(buf) >= 1:
1502
+ h = _get_npc_hints(state, buf)
1503
+ return h if h else token_hint
1504
+ return token_hint
1505
+
1506
+ # Get terminal width
1507
+ try:
1508
+ import shutil
1509
+ term_width = shutil.get_terminal_size().columns
1510
+ except:
1511
+ term_width = 80
1512
+
1513
+ def draw():
1514
+ # Calculate how many lines the input takes
1515
+ total_len = prompt_visible_len + len(buf)
1516
+ num_lines = (total_len // term_width) + 1
1517
+
1518
+ # Move to start of input (may need to go up multiple lines)
1519
+ # First go to column 0
1520
+ sys.stdout.write('\r')
1521
+ # Move up for each wrapped line we're on
1522
+ cursor_total = prompt_visible_len + pos
1523
+ cursor_line = cursor_total // term_width
1524
+ # Go up to the first line of input
1525
+ for _ in range(num_lines - 1):
1526
+ sys.stdout.write('\033[A')
1527
+
1528
+ # Clear from cursor to end of screen (clears all wrapped lines + hint)
1529
+ sys.stdout.write('\033[J')
1530
+
1531
+ # Print prompt and buffer
1532
+ sys.stdout.write(prompt + buf)
1533
+
1534
+ # Print hint on next line
1535
+ sys.stdout.write('\n\033[K' + current_hint())
1536
+
1537
+ # Now position cursor back to correct spot
1538
+ # Go back up to the line where cursor should be
1539
+ lines_after_cursor = (total_len // term_width) - (cursor_total // term_width) + 1 # +1 for hint line
1540
+ for _ in range(lines_after_cursor):
1541
+ sys.stdout.write('\033[A')
1542
+
1543
+ # Position cursor in correct column
1544
+ cursor_col = cursor_total % term_width
1545
+ sys.stdout.write('\r')
1546
+ if cursor_col > 0:
1547
+ sys.stdout.write('\033[' + str(cursor_col) + 'C')
1548
+
1549
+ sys.stdout.flush()
1550
+
1551
+ # Print prompt and reserve hint line
1552
+ sys.stdout.write(prompt + '\n' + (token_hint or '') + '\033[A\r')
1553
+ if prompt_visible_len > 0:
1554
+ sys.stdout.write('\033[' + str(prompt_visible_len) + 'C')
1555
+ sys.stdout.flush()
1556
+
1557
+ try:
1558
+ tty.setcbreak(fd)
1559
+ while True:
1560
+ c = sys.stdin.read(1)
1561
+
1562
+ if c in ('\n', '\r'):
1563
+ # Clear hint and newline
1564
+ sys.stdout.write('\n\033[K')
1565
+ sys.stdout.flush()
1566
+ if buf.strip():
1567
+ readline.add_history(buf)
1568
+ return buf
1569
+
1570
+ elif c == '\x1b': # ESC - could be arrow key
1571
+ c2 = sys.stdin.read(1)
1572
+ if c2 == '[':
1573
+ c3 = sys.stdin.read(1)
1574
+ if c3 == 'A': # Up
1575
+ if history_idx > 0:
1576
+ if history_idx == len(history):
1577
+ saved_line = buf
1578
+ history_idx -= 1
1579
+ buf = history[history_idx] or ''
1580
+ pos = len(buf)
1581
+ draw()
1582
+ elif c3 == 'B': # Down
1583
+ if history_idx < len(history):
1584
+ history_idx += 1
1585
+ buf = saved_line if history_idx == len(history) else (history[history_idx] or '')
1586
+ pos = len(buf)
1587
+ draw()
1588
+ elif c3 == 'C': # Right
1589
+ if pos < len(buf):
1590
+ pos += 1
1591
+ sys.stdout.write('\033[C')
1592
+ sys.stdout.flush()
1593
+ elif c3 == 'D': # Left
1594
+ if pos > 0:
1595
+ pos -= 1
1596
+ sys.stdout.write('\033[D')
1597
+ sys.stdout.flush()
1598
+ elif c3 == '3': # Del
1599
+ sys.stdin.read(1) # ~
1600
+ if pos < len(buf):
1601
+ buf = buf[:pos] + buf[pos+1:]
1602
+ draw()
1603
+ elif c3 == 'H': # Home
1604
+ pos = 0
1605
+ draw()
1606
+ elif c3 == 'F': # End
1607
+ pos = len(buf)
1608
+ draw()
1609
+ elif c2 == '\x1b': # Double ESC
1610
+ sys.stdout.write('\n\033[K')
1611
+ sys.stdout.flush()
1612
+ return '\x1b'
1613
+
1614
+ elif c == '\x7f' or c == '\x08': # Backspace
1615
+ if pos > 0:
1616
+ buf = buf[:pos-1] + buf[pos:]
1617
+ pos -= 1
1618
+ draw()
1619
+
1620
+ elif c == '\x03': # Ctrl-C
1621
+ sys.stdout.write('\n\033[K')
1622
+ sys.stdout.flush()
1623
+ raise KeyboardInterrupt
1624
+
1625
+ elif c == '\x04': # Ctrl-D
1626
+ if not buf:
1627
+ sys.stdout.write('\n\033[K')
1628
+ sys.stdout.flush()
1629
+ raise EOFError
1630
+
1631
+ elif c == '\x01': # Ctrl-A
1632
+ pos = 0
1633
+ draw()
1634
+
1635
+ elif c == '\x05': # Ctrl-E
1636
+ pos = len(buf)
1637
+ draw()
1638
+
1639
+ elif c == '\x15': # Ctrl-U
1640
+ buf = buf[pos:]
1641
+ pos = 0
1642
+ draw()
1643
+
1644
+ elif c == '\x0b': # Ctrl-K
1645
+ buf = buf[:pos]
1646
+ draw()
1647
+
1648
+ elif c == '\x17': # Ctrl-W - delete word back
1649
+ while pos > 0 and buf[pos-1] == ' ':
1650
+ buf = buf[:pos-1] + buf[pos:]
1651
+ pos -= 1
1652
+ while pos > 0 and buf[pos-1] != ' ':
1653
+ buf = buf[:pos-1] + buf[pos:]
1654
+ pos -= 1
1655
+ draw()
1656
+
1657
+ elif c == '\t': # Tab - do nothing for now
1658
+ pass
1659
+
1660
+ elif c == '\x0f': # Ctrl-O - show last tool call args
1661
+ try:
1662
+ import builtins
1663
+ last_call = getattr(builtins, '_npcsh_last_tool_call', None)
1664
+ if last_call:
1665
+ from termcolor import colored
1666
+ # Save cursor, move down past hint, show args, restore
1667
+ sys.stdout.write('\n\033[K') # New line, clear
1668
+ sys.stdout.write(colored(f"─── {last_call['name']} ───\n", "cyan"))
1669
+ args = last_call.get('arguments', {})
1670
+ for k, v in args.items():
1671
+ v_str = str(v)
1672
+ # Show with syntax highlighting for code
1673
+ if '\n' in v_str:
1674
+ sys.stdout.write(colored(f"{k}:\n", "yellow"))
1675
+ for line in v_str.split('\n')[:30]: # Limit lines
1676
+ sys.stdout.write(f" {line}\n")
1677
+ if v_str.count('\n') > 30:
1678
+ sys.stdout.write(colored(f" ... ({v_str.count(chr(10)) - 30} more lines)\n", "white", attrs=["dark"]))
1679
+ else:
1680
+ sys.stdout.write(colored(f"{k}: ", "yellow") + f"{v_str}\n")
1681
+ sys.stdout.write(colored("─" * 40 + "\n", "cyan"))
1682
+ # Redraw prompt
1683
+ sys.stdout.write(prompt)
1684
+ sys.stdout.write(buf)
1685
+ sys.stdout.write('\n' + (token_hint or ''))
1686
+ sys.stdout.write('\033[A\r')
1687
+ if prompt_visible_len > 0:
1688
+ sys.stdout.write('\033[' + str(prompt_visible_len + pos) + 'C')
1689
+ sys.stdout.flush()
1690
+ else:
1691
+ pass # No tool call to show
1692
+ except:
1693
+ pass
1694
+
1695
+ elif ord(c) >= 32: # Printable
1696
+ buf = buf[:pos] + c + buf[pos:]
1697
+ pos += 1
1698
+ draw()
1699
+
1700
+ finally:
1701
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
1702
+
1703
+
1704
+ def _get_slash_hints(state, router, prefix='/') -> str:
1705
+ """Slash command hints - fits terminal width."""
1706
+ cmds = {'help', 'set', 'agent', 'chat', 'cmd', 'sq', 'quit', 'exit', 'clear', 'npc'}
1707
+ if state and state.team and hasattr(state.team, 'jinxs_dict'):
1708
+ cmds.update(state.team.jinxs_dict.keys())
1709
+ if router and hasattr(router, 'jinx_routes'):
1710
+ cmds.update(router.jinx_routes.keys())
1711
+ if len(prefix) > 1:
1712
+ f = prefix[1:].lower()
1713
+ cmds = {c for c in cmds if c.lower().startswith(f)}
1714
+ if cmds:
1715
+ # Get terminal width, default 80
1716
+ try:
1717
+ import shutil
1718
+ term_width = shutil.get_terminal_size().columns
1719
+ except:
1720
+ term_width = 80
1721
+
1722
+ # Build hint string that fits in terminal
1723
+ sorted_cmds = sorted(cmds)
1724
+ hint_parts = []
1725
+ current_len = 2 # leading spaces
1726
+ for c in sorted_cmds:
1727
+ item = '/' + c
1728
+ if current_len + len(item) + 2 > term_width - 5: # leave margin
1729
+ break
1730
+ hint_parts.append(item)
1731
+ current_len += len(item) + 2
1732
+
1733
+ if hint_parts:
1734
+ return colored(' ' + ' '.join(hint_parts), 'white', attrs=['dark'])
1735
+ return ""
1736
+
1737
+
1738
+ def _get_npc_hints(state, prefix='@') -> str:
1739
+ """NPC hints."""
1740
+ npcs = set()
1741
+ if state and state.team:
1742
+ if hasattr(state.team, 'npcs') and state.team.npcs:
1743
+ npcs.update(state.team.npcs.keys())
1744
+ if hasattr(state.team, 'forenpc') and state.team.forenpc:
1745
+ npcs.add(state.team.forenpc.name)
1746
+ if not npcs:
1747
+ npcs = {'sibiji', 'guac', 'corca', 'kadiefa', 'plonk'}
1748
+ if len(prefix) > 1:
1749
+ f = prefix[1:].lower()
1750
+ npcs = {n for n in npcs if n.lower().startswith(f)}
1751
+ if npcs:
1752
+ return colored(' ' + ' '.join('@' + n for n in sorted(npcs)), 'cyan')
1753
+ return ""
1754
+
1755
+
1756
+
1757
+ def split_by_pipes(command: str) -> List[str]:
1758
+ parts = []
1759
+ current = ""
1760
+ in_single_quote = False
1761
+ in_double_quote = False
1762
+ escape = False
1763
+
1764
+ for char in command:
1765
+ if escape:
1766
+ current += char
1767
+ escape = False
1768
+ elif char == '\\':
1769
+ escape = True
1770
+ current += char
1771
+ elif char == "'" and not in_double_quote:
1772
+ in_single_quote = not in_single_quote
1773
+ current += char
1774
+ elif char == '"' and not in_single_quote:
1775
+ in_double_quote = not in_single_quote
1776
+ current += char
1777
+ elif char == '|' and not in_single_quote and not in_double_quote:
1778
+ parts.append(current.strip())
1779
+ current = ""
1780
+ else:
1781
+ current += char
1782
+
1783
+ if current:
1784
+ parts.append(current.strip())
1785
+ return parts
1786
+
1787
+ def parse_command_safely(cmd: str) -> List[str]:
1788
+ try:
1789
+ return shlex.split(cmd)
1790
+ except ValueError as e:
1791
+ if "No closing quotation" in str(e):
1792
+ if cmd.count('"') % 2 == 1:
1793
+ cmd += '"'
1794
+ elif cmd.count("'") % 2 == 1:
1795
+ cmd += "'"
1796
+ try:
1797
+ return shlex.split(cmd)
1798
+ except ValueError:
1799
+ return cmd.split()
1800
+ else:
1801
+ return cmd.split()
1802
+
1803
+ def get_file_color(filepath: str) -> tuple:
1804
+ if not os.path.exists(filepath):
1805
+ return "grey", []
1806
+ if os.path.isdir(filepath):
1807
+ return "blue", ["bold"]
1808
+ elif os.access(filepath, os.X_OK) and not os.path.isdir(filepath):
1809
+ return "green", ["bold"]
1810
+ elif filepath.endswith((".zip", ".tar", ".gz", ".bz2", ".xz", ".7z")):
1811
+ return "red", []
1812
+ elif filepath.endswith((".jpg", ".jpeg", ".png", ".gif", ".bmp", ".tiff")):
1813
+ return "magenta", []
1814
+ elif filepath.endswith((".py", ".pyw")):
1815
+ return "yellow", []
1816
+ elif filepath.endswith((".sh", ".bash", ".zsh")):
1817
+ return "green", []
1818
+ elif filepath.endswith((".c", ".cpp", ".h", ".hpp")):
1819
+ return "cyan", []
1820
+ elif filepath.endswith((".js", ".ts", ".jsx", ".tsx")):
1821
+ return "yellow", []
1822
+ elif filepath.endswith((".html", ".css", ".scss", ".sass")):
1823
+ return "magenta", []
1824
+ elif filepath.endswith((".md", ".txt", ".log")):
1825
+ return "white", []
1826
+ elif os.path.basename(filepath).startswith("."):
1827
+ return "cyan", []
1828
+ else:
1829
+ return "white", []
1830
+
1831
+ def format_file_listing(output: str) -> str:
1832
+ colored_lines = []
1833
+ current_dir = os.getcwd()
1834
+ for line in output.strip().split("\n"):
1835
+ parts = line.split()
1836
+ if not parts:
1837
+ colored_lines.append(line)
1838
+ continue
1839
+
1840
+ filepath_guess = parts[-1]
1841
+ potential_path = os.path.join(current_dir, filepath_guess)
1842
+
1843
+ color, attrs = get_file_color(potential_path)
1844
+ colored_filepath = colored(filepath_guess, color, attrs=attrs)
1845
+
1846
+ if len(parts) > 1 :
1847
+
1848
+ colored_line = " ".join(parts[:-1] + [colored_filepath])
1849
+ else:
1850
+
1851
+ colored_line = colored_filepath
1852
+
1853
+ colored_lines.append(colored_line)
1854
+
1855
+ return "\n".join(colored_lines)
1856
+
1857
+ def wrap_text(text: str, width: int = 80) -> str:
1858
+ lines = []
1859
+ for paragraph in text.split("\n"):
1860
+ if len(paragraph) > width:
1861
+ lines.extend(textwrap.wrap(paragraph, width=width, replace_whitespace=False, drop_whitespace=False))
1862
+ else:
1863
+ lines.append(paragraph)
1864
+ return "\n".join(lines)
1865
+
1866
+
1867
+
1868
+ def setup_readline() -> str:
1869
+ """Setup readline with history and completion"""
1870
+ try:
1871
+ readline.read_history_file(READLINE_HISTORY_FILE)
1872
+ readline.set_history_length(1000)
1873
+
1874
+
1875
+ readline.parse_and_bind("tab: complete")
1876
+
1877
+ readline.parse_and_bind("set enable-bracketed-paste on")
1878
+ readline.parse_and_bind(r'"\C-r": reverse-search-history')
1879
+ readline.parse_and_bind(r'"\C-e": end-of-line')
1880
+ readline.parse_and_bind(r'"\C-a": beginning-of-line')
1881
+
1882
+ return READLINE_HISTORY_FILE
1883
+
1884
+ except FileNotFoundError:
1885
+ pass
1886
+ except OSError as e:
1887
+ print(f"Warning: Could not read readline history file {READLINE_HISTORY_FILE}: {e}")
1888
+
1889
+
1890
+ def save_readline_history():
1891
+ try:
1892
+ readline.write_history_file(READLINE_HISTORY_FILE)
1893
+ except OSError as e:
1894
+ print(f"Warning: Could not write readline history file {READLINE_HISTORY_FILE}: {e}")
1895
+
1896
+ def store_command_embeddings(command: str, output: Any, state: ShellState):
1897
+ if not chroma_client or not state.embedding_model or not state.embedding_provider:
1898
+ if not chroma_client: print("Warning: ChromaDB client not available for embeddings.", file=sys.stderr)
1899
+ return
1900
+ if not command and not output:
1901
+ return
1902
+
1903
+ try:
1904
+ output_str = str(output) if output else ""
1905
+ if not command and not output_str: return
1906
+
1907
+ texts_to_embed = [command, output_str]
1908
+
1909
+ embeddings = get_embeddings(
1910
+ texts_to_embed,
1911
+ state.embedding_model,
1912
+ state.embedding_provider,
1913
+ )
1914
+
1915
+ if not embeddings or len(embeddings) != 2:
1916
+ print(f"Warning: Failed to generate embeddings for command: {command[:50]}...", file=sys.stderr)
1917
+ return
1918
+
1919
+ timestamp = datetime.now().isoformat()
1920
+ npc_name = state.npc.name if isinstance(state.npc, NPC) else state.npc
1921
+
1922
+ metadata = [
1923
+ {
1924
+ "type": "command", "timestamp": timestamp, "path": state.current_path,
1925
+ "npc": npc_name, "conversation_id": state.conversation_id,
1926
+ },
1927
+ {
1928
+ "type": "response", "timestamp": timestamp, "path": state.current_path,
1929
+ "npc": npc_name, "conversation_id": state.conversation_id,
1930
+ },
1931
+ ]
1932
+
1933
+ collection_name = f"{state.embedding_provider}_{state.embedding_model}_embeddings"
1934
+ try:
1935
+ collection = chroma_client.get_or_create_collection(collection_name)
1936
+ ids = [f"cmd_{timestamp}_{hash(command)}", f"resp_{timestamp}_{hash(output_str)}"]
1937
+
1938
+ collection.add(
1939
+ embeddings=embeddings,
1940
+ documents=texts_to_embed,
1941
+ metadatas=metadata,
1942
+ ids=ids,
1943
+ )
1944
+ except Exception as e:
1945
+ print(f"Warning: Failed to add embeddings to collection '{collection_name}': {e}", file=sys.stderr)
1946
+
1947
+ except Exception as e:
1948
+ print(f"Warning: Failed to store embeddings: {e}", file=sys.stderr)
1949
+
1950
+
1951
+ def handle_interactive_command(cmd_parts: List[str], state: ShellState) -> Tuple[ShellState, str]:
1952
+ command_name = cmd_parts[0]
1953
+ print(f"Starting interactive {command_name} session...")
1954
+ try:
1955
+
1956
+ full_command_str = " ".join(cmd_parts)
1957
+ return_code = start_interactive_session(full_command_str)
1958
+ output = f"Interactive {command_name} session ended with return code {return_code}"
1959
+ except Exception as e:
1960
+ output = f"Error starting interactive session {command_name}: {e}"
1961
+ return state, output
1962
+
1963
+ def handle_cd_command(cmd_parts: List[str], state: ShellState) -> Tuple[ShellState, str]:
1964
+ original_path = os.getcwd()
1965
+ target_path = cmd_parts[1] if len(cmd_parts) > 1 else os.path.expanduser("~")
1966
+ try:
1967
+ os.chdir(target_path)
1968
+ state.current_path = os.getcwd()
1969
+ output = f"Changed directory to {state.current_path}"
1970
+ except FileNotFoundError:
1971
+ output = colored(f"cd: no such file or directory: {target_path}", "red")
1972
+ except Exception as e:
1973
+ output = colored(f"cd: error changing directory: {e}", "red")
1974
+ os.chdir(original_path)
1975
+
1976
+ return state, output
1977
+
1978
+
1979
+ def handle_bash_command(
1980
+ cmd_parts: List[str],
1981
+ cmd_str: str,
1982
+ stdin_input: Optional[str],
1983
+ state: ShellState,
1984
+ ) -> Tuple[bool, str]:
1985
+ try:
1986
+ process = subprocess.Popen(
1987
+ cmd_parts,
1988
+ stdin=subprocess.PIPE if stdin_input is not None else None,
1989
+ stdout=subprocess.PIPE,
1990
+ stderr=subprocess.PIPE,
1991
+ text=True,
1992
+ cwd=state.current_path
1993
+ )
1994
+ stdout, stderr = process.communicate(input=stdin_input)
1995
+
1996
+ if process.returncode != 0:
1997
+ return False, stderr.strip() if stderr else f"Command '{cmd_str}' failed with return code {process.returncode}."
1998
+
1999
+ if stderr.strip():
2000
+ print(colored(f"stderr: {stderr.strip()}", "yellow"), file=sys.stderr)
2001
+
2002
+ if cmd_parts[0] in ["ls", "find", "dir"]:
2003
+ return True, format_file_listing(stdout.strip())
2004
+
2005
+ return True, stdout.strip()
2006
+
2007
+ except FileNotFoundError:
2008
+ return False, f"Command not found: {cmd_parts[0]}"
2009
+ except PermissionError:
2010
+ return False, f"Permission denied: {cmd_str}"
2011
+
2012
+ def _try_convert_type(value: str) -> Union[str, int, float, bool]:
2013
+ """Helper to convert string values to appropriate types."""
2014
+ if value.lower() in ['true', 'yes']:
2015
+ return True
2016
+ if value.lower() in ['false', 'no']:
2017
+ return False
2018
+ try:
2019
+ return int(value)
2020
+ except (ValueError, TypeError):
2021
+ pass
2022
+ try:
2023
+ return float(value)
2024
+ except (ValueError, TypeError):
2025
+ pass
2026
+ return value
2027
+
2028
+ def parse_generic_command_flags(parts: List[str]) -> Tuple[Dict[str, Any], List[str]]:
2029
+ """
2030
+ Parses a list of command parts into a dictionary of keyword arguments and a list of positional arguments.
2031
+ Handles: -f val, --flag val, --flag=val, flag=val, --boolean-flag
2032
+ """
2033
+ parsed_kwargs = {}
2034
+ positional_args = []
2035
+ i = 0
2036
+ while i < len(parts):
2037
+ part = parts[i]
2038
+
2039
+ if part.startswith('--'):
2040
+ key_part = part[2:]
2041
+ if '=' in key_part:
2042
+ key, value = key_part.split('=', 1)
2043
+ parsed_kwargs[key] = _try_convert_type(value)
2044
+ else:
2045
+
2046
+ if i + 1 < len(parts) and not parts[i + 1].startswith('-'):
2047
+ parsed_kwargs[key_part] = _try_convert_type(parts[i + 1])
2048
+ i += 1
2049
+ else:
2050
+ parsed_kwargs[key_part] = True
2051
+
2052
+ elif part.startswith('-'):
2053
+ key = part[1:]
2054
+
2055
+ if i + 1 < len(parts) and not parts[i + 1].startswith('-'):
2056
+ parsed_kwargs[key] = _try_convert_type(parts[i + 1])
2057
+ i += 1
2058
+ else:
2059
+ parsed_kwargs[key] = True
2060
+
2061
+ elif '=' in part and not part.startswith('-'):
2062
+ key, value = part.split('=', 1)
2063
+ parsed_kwargs[key] = _try_convert_type(value)
2064
+
2065
+ else:
2066
+ positional_args.append(part)
2067
+
2068
+ i += 1
2069
+
2070
+ return parsed_kwargs, positional_args
2071
+
2072
+ def _ollama_supports_tools(model: str) -> Optional[bool]:
2073
+ """
2074
+ Best-effort check for tool-call support on an Ollama model by inspecting its template/metadata.
2075
+ Mirrors the lightweight check used in the Flask serve path.
2076
+ """
2077
+ try:
2078
+ import ollama # Local import to avoid hard dependency when Ollama isn't installed
2079
+ except Exception:
2080
+ return None
2081
+
2082
+ try:
2083
+ details = ollama.show(model)
2084
+ template = details.get("template") or ""
2085
+ metadata = details.get("metadata") or {}
2086
+ if any(token in template for token in ["{{- if .Tools", "{{- range .Tools", "{{- if .ToolCalls"]):
2087
+ return True
2088
+ if metadata.get("tools") or metadata.get("tool_calls"):
2089
+ return True
2090
+ return False
2091
+ except Exception:
2092
+ return None
2093
+
2094
+
2095
+ def model_supports_tool_calls(model: Optional[str], provider: Optional[str]) -> bool:
2096
+ """
2097
+ Decide whether to attempt tool-calling for the given model/provider.
2098
+ Uses Ollama template inspection when possible and falls back to name heuristics.
2099
+ """
2100
+ if not model:
2101
+ return False
2102
+
2103
+ provider = (provider or "").lower()
2104
+ model_lower = model.lower()
2105
+
2106
+ if provider == "ollama":
2107
+ ollama_support = _ollama_supports_tools(model)
2108
+ if ollama_support is not None:
2109
+ return ollama_support
2110
+
2111
+ toolish_markers = [
2112
+ "gpt",
2113
+ "claude",
2114
+ "qwen",
2115
+ "mistral",
2116
+ "llama-3.1",
2117
+ "llama3.1",
2118
+ "llama-3.2",
2119
+ "llama3.2",
2120
+ "gemini",
2121
+ "tool",
2122
+ ]
2123
+ return any(marker in model_lower for marker in toolish_markers)
2124
+
2125
+
2126
+ def collect_llm_tools(state: ShellState) -> Tuple[List[Dict[str, Any]], Dict[str, Callable]]:
2127
+ """
2128
+ Assemble tool definitions + executable map from NPC tools, Jinxs, and MCP servers.
2129
+ This mirrors the auto-translation used in the Flask server path.
2130
+ """
2131
+ tools: List[Dict[str, Any]] = []
2132
+ tool_map: Dict[str, Callable] = {}
2133
+
2134
+ # NPC-defined Python tools
2135
+ npc_obj = state.npc if isinstance(state.npc, NPC) else None
2136
+ if npc_obj and getattr(npc_obj, "tools", None):
2137
+ if isinstance(npc_obj.tools, list) and npc_obj.tools and callable(npc_obj.tools[0]):
2138
+ tools_schema, auto_map = auto_tools(npc_obj.tools)
2139
+ tools.extend(tools_schema or [])
2140
+ tool_map.update(auto_map or {})
2141
+ else:
2142
+ tools.extend(npc_obj.tools or [])
2143
+ if getattr(npc_obj, "tool_map", None):
2144
+ tool_map.update(npc_obj.tool_map)
2145
+ elif npc_obj and getattr(npc_obj, "tool_map", None):
2146
+ tool_map.update(npc_obj.tool_map)
2147
+
2148
+ # Jinx tools from NPC and Team
2149
+ aggregated_jinxs: Dict[str, Any] = {}
2150
+ if npc_obj and getattr(npc_obj, "jinxs_dict", None):
2151
+ aggregated_jinxs.update(npc_obj.jinxs_dict)
2152
+ if state.team and isinstance(state.team, Team) and getattr(state.team, "jinxs_dict", None):
2153
+ aggregated_jinxs.update({k: v for k, v in state.team.jinxs_dict.items() if k not in aggregated_jinxs})
2154
+
2155
+ if aggregated_jinxs:
2156
+ jinx_catalog: Dict[str, Dict[str, Any]] = {}
2157
+ if npc_obj and getattr(npc_obj, "jinx_tool_catalog", None):
2158
+ jinx_catalog.update(npc_obj.jinx_tool_catalog or {})
2159
+ if state.team and isinstance(state.team, Team) and getattr(state.team, "jinx_tool_catalog", None):
2160
+ jinx_catalog.update(state.team.jinx_tool_catalog or {})
2161
+ if not jinx_catalog:
2162
+ jinx_catalog = build_jinx_tool_catalog(aggregated_jinxs)
2163
+
2164
+ tools.extend(list(jinx_catalog.values()))
2165
+
2166
+ jinja_env_for_jinx = getattr(npc_obj, "jinja_env", None)
2167
+ if not jinja_env_for_jinx and state.team and isinstance(state.team, Team):
2168
+ jinja_env_for_jinx = getattr(state.team, "jinja_env", None)
2169
+
2170
+ for name, jinx_obj in aggregated_jinxs.items():
2171
+ def _make_runner(jinx=jinx_obj, jinja_env=jinja_env_for_jinx, tool_name=name):
2172
+ def runner(**kwargs):
2173
+ input_values = kwargs if isinstance(kwargs, dict) else {}
2174
+ try:
2175
+ ctx = jinx.execute(
2176
+ input_values=input_values,
2177
+ npc=npc_obj,
2178
+ messages=state.messages,
2179
+ extra_globals={"state": state},
2180
+ jinja_env=jinja_env
2181
+ )
2182
+ return ctx.get("output", ctx)
2183
+ except Exception as exc:
2184
+ return f"Jinx '{tool_name}' failed: {exc}"
2185
+ return runner
2186
+ tool_map[name] = _make_runner()
2187
+
2188
+ # MCP tools via npcsh.corca client
2189
+ try:
2190
+ from npcsh.corca import MCPClientNPC, _resolve_and_copy_mcp_server_path # type: ignore
2191
+
2192
+ team_ctx_mcp_servers = None
2193
+ if state.team and isinstance(state.team, Team) and hasattr(state.team, "team_ctx"):
2194
+ team_ctx_mcp_servers = state.team.team_ctx.get("mcp_servers", [])
2195
+
2196
+ mcp_server_path = _resolve_and_copy_mcp_server_path(
2197
+ explicit_path=None,
2198
+ current_path=state.current_path,
2199
+ team_ctx_mcp_servers=team_ctx_mcp_servers,
2200
+ interactive=False,
2201
+ auto_copy_bypass=True
2202
+ )
2203
+
2204
+ if mcp_server_path:
2205
+ reuse_client = (
2206
+ state.mcp_client
2207
+ if state.mcp_client and getattr(state.mcp_client, "server_script_path", None) == mcp_server_path
2208
+ else None
2209
+ )
2210
+ mcp_client = reuse_client or MCPClientNPC()
2211
+ if reuse_client is None:
2212
+ try:
2213
+ connected = mcp_client.connect_sync(mcp_server_path)
2214
+ except Exception:
2215
+ connected = False
2216
+ if connected:
2217
+ state.mcp_client = mcp_client
2218
+ if mcp_client and getattr(mcp_client, "available_tools_llm", None):
2219
+ for tool_def in mcp_client.available_tools_llm:
2220
+ name = tool_def.get("function", {}).get("name")
2221
+ if name and name not in tool_map:
2222
+ tools.append(tool_def)
2223
+ tool_map.update(getattr(mcp_client, "tool_map", {}) or {})
2224
+ except Exception:
2225
+ pass # MCP is optional; ignore failures
2226
+
2227
+ # Deduplicate tools by name to avoid confusing the LLM
2228
+ deduped = {}
2229
+ for tool_def in tools:
2230
+ name = tool_def.get("function", {}).get("name")
2231
+ if name:
2232
+ deduped[name] = tool_def
2233
+ return list(deduped.values()), tool_map
2234
+
2235
+
2236
+ def should_skip_kg_processing(user_input: str, assistant_output: str) -> bool:
2237
+ """Determine if this interaction is too trivial for KG processing"""
2238
+
2239
+
2240
+ if len(user_input.strip()) < 10:
2241
+ return True
2242
+
2243
+ simple_bash = {'ls', 'pwd', 'cd', 'mkdir', 'touch', 'rm', 'mv', 'cp'}
2244
+ first_word = user_input.strip().split()[0] if user_input.strip() else ""
2245
+ if first_word in simple_bash:
2246
+ return True
2247
+
2248
+ if len(assistant_output.strip()) < 20:
2249
+ return True
2250
+
2251
+ if "exiting" in assistant_output.lower() or "exited" in assistant_output.lower():
2252
+ return True
2253
+
2254
+ return False
2255
+
2256
+ def execute_slash_command(command: str,
2257
+ stdin_input: Optional[str],
2258
+ state: ShellState,
2259
+ stream: bool,
2260
+ router) -> Tuple[ShellState, Any]:
2261
+ """Executes slash commands using the router."""
2262
+ try:
2263
+ all_command_parts = shlex.split(command)
2264
+ except ValueError:
2265
+ all_command_parts = command.split()
2266
+ command_name = all_command_parts[0].lstrip('/')
2267
+
2268
+ # --- NPC SWITCHING LOGIC ---
2269
+ if command_name in ['n', 'npc']:
2270
+ npc_to_switch_to = all_command_parts[1] if len(all_command_parts) > 1 else None
2271
+ if npc_to_switch_to and state.team and npc_to_switch_to in state.team.npcs:
2272
+ state.npc = state.team.npcs[npc_to_switch_to]
2273
+ return state, {"output": f"Switched to NPC: {npc_to_switch_to}", "messages": state.messages}
2274
+ else:
2275
+ available_npcs = list(state.team.npcs.keys()) if state.team else []
2276
+ return state, {"output": colored(f"NPC '{npc_to_switch_to}' not found. Available NPCs: {', '.join(available_npcs)}", "red"), "messages": state.messages}
2277
+
2278
+ # --- ROUTER LOGIC ---
2279
+ handler = router.get_route(command_name)
2280
+ if handler:
2281
+ handler_kwargs = {
2282
+ 'stream': stream, 'team': state.team, 'messages': state.messages, 'api_url': state.api_url,
2283
+ 'api_key': state.api_key, 'stdin_input': stdin_input,
2284
+ 'model': state.npc.model if isinstance(state.npc, NPC) and state.npc.model else state.chat_model,
2285
+ 'provider': state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else state.chat_provider,
2286
+ 'npc': state.npc, 'sprovider': state.search_provider, 'emodel': state.embedding_model,
2287
+ 'eprovider': state.embedding_provider, 'igmodel': state.image_gen_model, 'igprovider': state.image_gen_provider,
2288
+ 'vmodel': state.vision_model, 'vprovider': state.vision_provider, 'rmodel': state.reasoning_model,
2289
+ 'rprovider': state.reasoning_provider, 'state': state
2290
+ }
2291
+ try:
2292
+ result = handler(command=command, **handler_kwargs)
2293
+ if isinstance(result, dict):
2294
+ state.messages = result.get("messages", state.messages)
2295
+ return state, result
2296
+ except Exception as e:
2297
+ import traceback
2298
+ traceback.print_exc()
2299
+ return state, {"output": colored(f"Error executing slash command '{command_name}': {e}", "red"), "messages": state.messages}
2300
+
2301
+ # Fallback for switching NPC by name
2302
+ if state.team and command_name in state.team.npcs:
2303
+ state.npc = state.team.npcs[command_name]
2304
+ return state, {"output": f"Switched to NPC: {state.npc.name}", "messages": state.messages}
2305
+
2306
+ return state, {"output": colored(f"Unknown slash command or NPC: {command_name}", "red"), "messages": state.messages}
2307
+
2308
+
2309
+ def process_pipeline_command(
2310
+ cmd_segment: str,
2311
+ stdin_input: Optional[str],
2312
+ state: ShellState,
2313
+ stream_final: bool,
2314
+ review = False,
2315
+ router = None,
2316
+ ) -> Tuple[ShellState, Any]:
2317
+
2318
+ if not cmd_segment:
2319
+ return state, stdin_input
2320
+
2321
+ available_models_all = get_locally_available_models(state.current_path)
2322
+ available_models_all_list = [
2323
+ item for key, item in available_models_all.items()
2324
+ ]
2325
+
2326
+ model_override, provider_override, cmd_cleaned = get_model_and_provider(
2327
+ cmd_segment, available_models_all_list
2328
+ )
2329
+ cmd_to_process = cmd_cleaned.strip()
2330
+ if not cmd_to_process:
2331
+ return state, stdin_input
2332
+
2333
+ npc_model = (
2334
+ state.npc.model
2335
+ if isinstance(state.npc, NPC) and state.npc.model
2336
+ else None
2337
+ )
2338
+ npc_provider = (
2339
+ state.npc.provider
2340
+ if isinstance(state.npc, NPC) and state.npc.provider
2341
+ else None
2342
+ )
2343
+
2344
+ exec_model = model_override or npc_model or state.chat_model
2345
+ exec_provider = provider_override or npc_provider or state.chat_provider
2346
+
2347
+ if cmd_to_process.startswith("/"):
2348
+ command_name = cmd_to_process.split()[0].lstrip('/')
2349
+
2350
+ # Check if this is an interactive mode by looking for the jinx file in modes/
2351
+ is_interactive_mode = False
2352
+
2353
+ # Check global modes
2354
+ global_modes_jinx = os.path.expanduser(f'~/.npcsh/npc_team/jinxs/modes/{command_name}.jinx')
2355
+ if os.path.exists(global_modes_jinx):
2356
+ is_interactive_mode = True
2357
+
2358
+ # Check team modes
2359
+ if not is_interactive_mode and state.team and state.team.team_path:
2360
+ team_modes_jinx = os.path.join(state.team.team_path, 'jinxs', 'modes', f'{command_name}.jinx')
2361
+ if os.path.exists(team_modes_jinx):
2362
+ is_interactive_mode = True
2363
+
2364
+ if is_interactive_mode:
2365
+ result = execute_slash_command(
2366
+ cmd_to_process,
2367
+ stdin_input,
2368
+ state,
2369
+ stream_final,
2370
+ router
2371
+ )
2372
+ else:
2373
+ with SpinnerContext(
2374
+ f"Routing to {cmd_to_process.split()[0]}",
2375
+ style="arrow"
2376
+ ):
2377
+ result = execute_slash_command(
2378
+ cmd_to_process,
2379
+ stdin_input,
2380
+ state,
2381
+ stream_final,
2382
+ router
2383
+ )
2384
+ return result
2385
+ cmd_parts = parse_command_safely(cmd_to_process)
2386
+ if not cmd_parts:
2387
+ return state, stdin_input
2388
+
2389
+ command_name = cmd_parts[0]
2390
+
2391
+ if command_name == "cd":
2392
+ return handle_cd_command(cmd_parts, state)
2393
+
2394
+ if command_name in interactive_commands:
2395
+ return handle_interactive_command(cmd_parts, state)
2396
+
2397
+ if command_name in TERMINAL_EDITORS:
2398
+ print(f"Starting interactive editor: {command_name}...")
2399
+ full_command_str = " ".join(cmd_parts)
2400
+ output = open_terminal_editor(full_command_str)
2401
+ return state, output
2402
+
2403
+ if validate_bash_command(cmd_parts):
2404
+ with SpinnerContext(f"Executing {command_name}", style="line"):
2405
+ try: # Added try-except for KeyboardInterrupt here
2406
+ success, result = handle_bash_command(
2407
+ cmd_parts,
2408
+ cmd_to_process,
2409
+ stdin_input,
2410
+ state
2411
+ )
2412
+ except KeyboardInterrupt:
2413
+ print(colored("\nBash command interrupted by user.", "yellow"))
2414
+ return state, colored("Command interrupted.", "red")
2415
+
2416
+ if success:
2417
+ return state, result
2418
+ else:
2419
+ print(
2420
+ colored(
2421
+ f"Command failed. Consulting {exec_model}...",
2422
+ "yellow"
2423
+ ),
2424
+ file=sys.stderr
2425
+ )
2426
+ fixer_prompt = (
2427
+ f"The command '{cmd_to_process}' failed with error: "
2428
+ f"'{result}'. Provide the correct command."
2429
+ )
2430
+
2431
+ with SpinnerContext(
2432
+ f"{exec_model} analyzing error",
2433
+ style="brain"
2434
+ ):
2435
+ try: # Added try-except for KeyboardInterrupt here
2436
+ response = execute_llm_command(
2437
+ fixer_prompt,
2438
+ model=exec_model,
2439
+ provider=exec_provider,
2440
+ npc=state.npc,
2441
+ stream=stream_final,
2442
+ messages=state.messages
2443
+ )
2444
+ except KeyboardInterrupt:
2445
+ print(colored("\nLLM analysis interrupted by user.", "yellow"))
2446
+ return state, colored("LLM analysis interrupted.", "red")
2447
+
2448
+ state.messages = response['messages']
2449
+ return state, response['response']
2450
+ else:
2451
+ full_llm_cmd = (
2452
+ f"{cmd_to_process} {stdin_input}"
2453
+ if stdin_input
2454
+ else cmd_to_process
2455
+ )
2456
+ path_cmd = 'The current working directory is: ' + state.current_path
2457
+ ls_files = (
2458
+ 'Files in the current directory (full paths):\n' +
2459
+ "\n".join([
2460
+ os.path.join(state.current_path, f)
2461
+ for f in os.listdir(state.current_path)
2462
+ ])
2463
+ if os.path.exists(state.current_path)
2464
+ else 'No files found in the current directory.'
2465
+ )
2466
+ platform_info = (
2467
+ f"Platform: {platform.system()} {platform.release()} "
2468
+ f"({platform.machine()})"
2469
+ )
2470
+ info = path_cmd + '\n' + ls_files + '\n' + platform_info + '\n'
2471
+ # Note: Don't append user message here - get_llm_response/check_llm_command handle it
2472
+
2473
+ tools_for_llm: List[Dict[str, Any]] = []
2474
+ tool_exec_map: Dict[str, Callable] = {}
2475
+ tool_capable = model_supports_tool_calls(exec_model, exec_provider)
2476
+ if tool_capable:
2477
+ tools_for_llm, tool_exec_map = collect_llm_tools(state)
2478
+ if not tools_for_llm:
2479
+ tool_capable = False
2480
+
2481
+ npc_name = (
2482
+ state.npc.name
2483
+ if isinstance(state.npc, NPC)
2484
+ else "Assistant"
2485
+ )
2486
+
2487
+ with SpinnerContext(
2488
+ f"{npc_name} processing with {exec_model}",
2489
+ style="dots_pulse"
2490
+ ):
2491
+ # Build extra_globals for jinx execution
2492
+ application_globals_for_jinx = {
2493
+ "CommandHistory": CommandHistory,
2494
+ "load_kg_from_db": load_kg_from_db,
2495
+ "execute_rag_command": execute_rag_command,
2496
+ "execute_brainblast_command": execute_brainblast_command,
2497
+ "load_file_contents": load_file_contents,
2498
+ "search_web": search_web,
2499
+ "get_relevant_memories": get_relevant_memories,
2500
+
2501
+ 'state': state
2502
+ }
2503
+ current_module = sys.modules[__name__]
2504
+ for name, func in inspect.getmembers(current_module, inspect.isfunction):
2505
+ application_globals_for_jinx[name] = func
2506
+
2507
+ # Log messages before LLM call
2508
+ logger = logging.getLogger("npcsh.state")
2509
+ logger.debug(f"[process_pipeline_command] Before LLM call: {len(state.messages)} messages, tool_capable={tool_capable}")
2510
+ for i, msg in enumerate(state.messages[-3:]):
2511
+ role = msg.get('role', 'unknown')
2512
+ content = msg.get('content', '')
2513
+ preview = content[:80] if isinstance(content, str) else str(type(content))
2514
+ logger.debug(f" msg[{len(state.messages)-3+i}] role={role}: {preview}...")
2515
+
2516
+ try: # Added try-except for KeyboardInterrupt here
2517
+ if tool_capable:
2518
+ # Build kwargs - don't pass tool_choice for gemini as it doesn't support it
2519
+ llm_kwargs = {
2520
+ "auto_process_tool_calls": True,
2521
+ "tools": tools_for_llm,
2522
+ "tool_map": tool_exec_map,
2523
+ }
2524
+ # Only add tool_choice for providers that support it (not gemini)
2525
+ is_gemini = (exec_provider and "gemini" in exec_provider.lower()) or \
2526
+ (exec_model and "gemini" in exec_model.lower())
2527
+ if not is_gemini:
2528
+ llm_kwargs["tool_choice"] = {"type": "auto"}
2529
+
2530
+ llm_result = get_llm_response(
2531
+ full_llm_cmd,
2532
+ model=exec_model,
2533
+ provider=exec_provider,
2534
+ npc=state.npc,
2535
+ team=state.team,
2536
+ messages=state.messages,
2537
+ stream=stream_final,
2538
+ attachments=state.attachments,
2539
+ context=info,
2540
+ **llm_kwargs,
2541
+ )
2542
+ else:
2543
+ llm_result = check_llm_command(
2544
+ full_llm_cmd,
2545
+ model=exec_model,
2546
+ provider=exec_provider,
2547
+ api_url=state.api_url,
2548
+ api_key=state.api_key,
2549
+ npc=state.npc,
2550
+ team=state.team,
2551
+ messages=state.messages,
2552
+ images=state.attachments,
2553
+ stream=stream_final,
2554
+ context=info,
2555
+ extra_globals=application_globals_for_jinx
2556
+ )
2557
+ except KeyboardInterrupt:
2558
+ print(colored("\nLLM processing interrupted by user.", "yellow"))
2559
+ return state, colored("LLM processing interrupted.", "red")
2560
+
2561
+ # Extract output and messages from llm_result
2562
+ # get_llm_response uses 'response', check_llm_command uses 'output'
2563
+ if isinstance(llm_result, dict):
2564
+ new_messages = llm_result.get("messages", state.messages)
2565
+ logger.debug(f"[process_pipeline_command] After LLM call: received {len(new_messages)} messages (was {len(state.messages)})")
2566
+ state.messages = new_messages
2567
+ output_text = llm_result.get("output") or llm_result.get("response")
2568
+
2569
+ # Preserve usage info for process_result to accumulate
2570
+ output = {
2571
+ 'output': output_text,
2572
+ 'usage': llm_result.get('usage'),
2573
+ 'model': exec_model,
2574
+ 'provider': exec_provider,
2575
+ }
2576
+ else:
2577
+ output = llm_result
2578
+
2579
+ if tool_capable or not review:
2580
+ return state, output
2581
+ else:
2582
+ return review_and_iterate_command(
2583
+ original_command=full_llm_cmd,
2584
+ initial_result=llm_result,
2585
+ state=state,
2586
+ exec_model=exec_model,
2587
+ exec_provider=exec_provider,
2588
+ stream_final=stream_final,
2589
+ info=info
2590
+ )
2591
+
2592
+
2593
+ def review_and_iterate_command(
2594
+ original_command: str,
2595
+ initial_result: Any,
2596
+ state: ShellState,
2597
+ exec_model: str,
2598
+ exec_provider: str,
2599
+ stream_final: bool,
2600
+ info: str,
2601
+ max_iterations: int = 2
2602
+ ) -> Tuple[ShellState, Any]:
2603
+ """
2604
+ Simple iteration on LLM command result to improve quality.
2605
+ """
2606
+
2607
+
2608
+ if isinstance(initial_result, dict):
2609
+ current_output = initial_result.get("output")
2610
+ current_messages = initial_result.get("messages", state.messages)
2611
+ else:
2612
+ current_output = initial_result
2613
+ current_messages = state.messages
2614
+
2615
+
2616
+ refinement_prompt = f"""
2617
+ The previous response to "{original_command}" was:
2618
+ {current_output}
2619
+
2620
+ Please review and improve this response if needed. Provide a better, more complete answer.
2621
+ """
2622
+
2623
+
2624
+ refined_result = check_llm_command(
2625
+ refinement_prompt,
2626
+ model=exec_model,
2627
+ provider=exec_provider,
2628
+ api_url=state.api_url,
2629
+ api_key=state.api_key,
2630
+ npc=state.npc,
2631
+ team=state.team,
2632
+ messages=current_messages,
2633
+ images=state.attachments,
2634
+ stream=stream_final,
2635
+ context=info,
2636
+ )
2637
+
2638
+
2639
+ if isinstance(refined_result, dict):
2640
+ state.messages = refined_result.get("messages", current_messages)
2641
+ return state, refined_result.get("output", current_output)
2642
+ else:
2643
+ state.messages = current_messages
2644
+ return state, refined_result
2645
+ def check_mode_switch(command:str , state: ShellState):
2646
+ if command in ['/cmd', '/agent', '/chat']:
2647
+ state.current_mode = command[1:]
2648
+ return True, state
2649
+ return False, state
2650
+
2651
+
2652
+ def _delegate_to_npc(state: ShellState, npc_name: str, command: str, delegation_depth: int = 0) -> Tuple[ShellState, Any]:
2653
+ """
2654
+ Delegate a command to a specific NPC.
2655
+
2656
+ Specialists just receive the task directly - no mention of delegation.
2657
+ Only forenpc can delegate (depth 0), and we catch @mentions in forenpc responses.
2658
+ """
2659
+ import re
2660
+
2661
+ MAX_DELEGATION_DEPTH = 1 # Only allow one level of delegation
2662
+
2663
+ if delegation_depth > MAX_DELEGATION_DEPTH:
2664
+ return state, {'output': f"⚠ Maximum delegation depth reached."}
2665
+
2666
+ if not state.team or not hasattr(state.team, 'npcs') or npc_name not in state.team.npcs:
2667
+ return state, {'output': f"⚠ NPC '{npc_name}' not found in team"}
2668
+
2669
+ target_npc = state.team.npcs[npc_name]
2670
+ model_name = target_npc.model if hasattr(target_npc, 'model') else 'unknown'
2671
+
2672
+ try:
2673
+ # Build tools from the NPC's jinx catalog
2674
+ tools_for_npc = None
2675
+ tool_map_for_npc = None
2676
+ if hasattr(target_npc, 'jinx_tool_catalog') and target_npc.jinx_tool_catalog:
2677
+ tools_for_npc = list(target_npc.jinx_tool_catalog.values())
2678
+ # Build tool_map that executes jinxs
2679
+ tool_map_for_npc = {}
2680
+ for jinx_name, jinx_obj in target_npc.jinxs_dict.items():
2681
+ def make_executor(jname, jobj, npc):
2682
+ # Get expected input names from jinx
2683
+ expected_inputs = []
2684
+ for inp in (jobj.inputs or []):
2685
+ if isinstance(inp, str):
2686
+ expected_inputs.append(inp)
2687
+ elif isinstance(inp, dict):
2688
+ expected_inputs.append(list(inp.keys())[0])
2689
+
2690
+ def executor(**received):
2691
+ # Map received args to expected jinx inputs
2692
+ mapped = {}
2693
+ if expected_inputs:
2694
+ # If we got unexpected keys, map first value to first expected input
2695
+ received_keys = list(received.keys())
2696
+ for i, expected in enumerate(expected_inputs):
2697
+ if expected in received:
2698
+ mapped[expected] = received[expected]
2699
+ elif i < len(received_keys):
2700
+ # Map positionally
2701
+ mapped[expected] = received[received_keys[i]]
2702
+ else:
2703
+ mapped = received
2704
+
2705
+ result = npc.execute_jinx(jname, mapped)
2706
+ return result.get('output', str(result))
2707
+ executor.__name__ = jname
2708
+ return executor
2709
+ tool_map_for_npc[jinx_name] = make_executor(jinx_name, jinx_obj, target_npc)
2710
+
2711
+ with SpinnerContext(
2712
+ f"{npc_name} processing with {model_name}",
2713
+ style="dots_pulse"
2714
+ ):
2715
+ # Just send the command directly - don't pass team context so they don't know about other NPCs
2716
+ result = target_npc.get_llm_response(
2717
+ command,
2718
+ messages=[], # Fresh messages - don't leak conversation history
2719
+ context={}, # No team context - they shouldn't know about teammates
2720
+ tools=tools_for_npc,
2721
+ tool_map=tool_map_for_npc,
2722
+ auto_process_tool_calls=True
2723
+ )
2724
+
2725
+ output = result.get("response") or result.get("output", "")
2726
+ if result.get("messages"):
2727
+ state.messages = result["messages"]
2728
+
2729
+ # Only forenpc/sibiji (depth 0) can have @mentions processed
2730
+ if delegation_depth == 0 and output and isinstance(output, str):
2731
+ # Look for @npc_name patterns in the response
2732
+ at_mention_pattern = r'@(\w+)\s*,?\s*(?:could you|can you|please|would you)?[^.!?\n]*[.!?\n]?'
2733
+ matches = re.findall(at_mention_pattern, output, re.IGNORECASE)
2734
+
2735
+ for mentioned_npc in matches:
2736
+ mentioned_npc = mentioned_npc.lower()
2737
+ if mentioned_npc in state.team.npcs and mentioned_npc != npc_name:
2738
+ # Extract what they're asking the other NPC to do
2739
+ delegation_match = re.search(
2740
+ rf'@{mentioned_npc}\s*,?\s*(.*?)(?:\n|$)',
2741
+ output,
2742
+ re.IGNORECASE
2743
+ )
2744
+ if delegation_match:
2745
+ sub_request = delegation_match.group(1).strip()
2746
+ if sub_request:
2747
+ # Recursive delegation will show its own spinner
2748
+ state, sub_output = _delegate_to_npc(
2749
+ state, mentioned_npc, sub_request, delegation_depth + 1
2750
+ )
2751
+ # Append the sub-NPC's response
2752
+ if isinstance(sub_output, dict):
2753
+ sub_text = sub_output.get('output', '')
2754
+ else:
2755
+ sub_text = str(sub_output)
2756
+ if sub_text:
2757
+ output += f"\n\n--- Response from {mentioned_npc} ---\n{sub_text}"
2758
+
2759
+ return state, {'output': output}
2760
+
2761
+ except KeyboardInterrupt:
2762
+ print(colored(f"\n{npc_name} interrupted.", "yellow"))
2763
+ return state, {'output': colored("Interrupted.", "red")}
2764
+
2765
+
2766
+ def execute_command(
2767
+ command: str,
2768
+ state: ShellState,
2769
+ review = False,
2770
+ router = None,
2771
+ command_history = None,
2772
+ ) -> Tuple[ShellState, Any]:
2773
+ """
2774
+ Execute a command in npcsh.
2775
+
2776
+ Routes commands based on:
2777
+ 1. Mode switch commands (/agent, /chat, /cmd, etc.)
2778
+ 2. Slash commands (/jinx_name) -> execute via router
2779
+ 3. Default mode behavior -> pipeline processing in agent mode, or jinx execution for other modes
2780
+ """
2781
+ if not command.strip():
2782
+ return state, ""
2783
+
2784
+ # Check for mode switch commands
2785
+ mode_change, state = check_mode_switch(command, state)
2786
+ if mode_change:
2787
+ print(colored(f"⚡ Switched to {state.current_mode} mode", "green"))
2788
+ return state, 'Mode changed.'
2789
+
2790
+ # Check for @npc delegation syntax: @sibiji do something
2791
+ if command.startswith('@') and ' ' in command:
2792
+ npc_name = command.split()[0][1:] # Remove @ prefix
2793
+ delegated_command = command[len(npc_name) + 2:] # Rest of command
2794
+
2795
+ # Check if NPC exists in team
2796
+ if state.team and hasattr(state.team, 'npcs') and npc_name in state.team.npcs:
2797
+ state, output = _delegate_to_npc(state, npc_name, delegated_command)
2798
+ return state, output
2799
+ else:
2800
+ print(colored(f"⚠ NPC '{npc_name}' not found in team", "yellow"))
2801
+ # Fall through to normal processing
2802
+
2803
+ original_command_for_embedding = command
2804
+ commands = split_by_pipes(command)
2805
+
2806
+ stdin_for_next = None
2807
+ final_output = None
2808
+ current_state = state
2809
+
2810
+ # Agent mode uses pipeline processing (the original behavior)
2811
+ # Other modes route to their respective jinxs
2812
+ if state.current_mode == 'agent':
2813
+ total_stages = len(commands)
2814
+
2815
+ for i, cmd_segment in enumerate(commands):
2816
+ stage_num = i + 1
2817
+ stage_emoji = ["🎯", "⚙️", "🔧", "✨", "🚀"][i % 5]
2818
+
2819
+ if total_stages > 1:
2820
+ print(colored(
2821
+ f"\n{stage_emoji} Pipeline Stage {stage_num}/{total_stages}",
2822
+ "cyan",
2823
+ attrs=["bold"]
2824
+ ))
2825
+
2826
+ is_last_command = (i == len(commands) - 1)
2827
+ stream_this_segment = state.stream_output and not is_last_command
2828
+
2829
+ try:
2830
+ current_state, output = process_pipeline_command(
2831
+ cmd_segment.strip(),
2832
+ stdin_for_next,
2833
+ current_state,
2834
+ stream_final=stream_this_segment,
2835
+ review=review,
2836
+ router=router
2837
+ )
2838
+
2839
+ # For last command, preserve full dict with usage info
2840
+ if is_last_command:
2841
+ if total_stages > 1:
2842
+ print(colored("✅ Pipeline complete", "green"))
2843
+ return current_state, output
2844
+
2845
+ # For intermediate stages, extract output text for piping
2846
+ if isinstance(output, dict) and 'output' in output:
2847
+ output = output['output']
2848
+
2849
+ if isinstance(output, str):
2850
+ stdin_for_next = output
2851
+ else:
2852
+ try:
2853
+ if stream_this_segment:
2854
+ full_stream_output = (
2855
+ print_and_process_stream_with_markdown(
2856
+ output,
2857
+ state.npc.model if isinstance(state.npc, NPC) else state.chat_model,
2858
+ state.npc.provider if isinstance(state.npc, NPC) else state.chat_provider,
2859
+ show=True
2860
+ )
2861
+ )
2862
+ stdin_for_next = full_stream_output
2863
+ except:
2864
+ if output is not None:
2865
+ try:
2866
+ stdin_for_next = str(output)
2867
+ except Exception:
2868
+ stdin_for_next = None
2869
+ else:
2870
+ stdin_for_next = None
2871
+
2872
+ if total_stages > 1:
2873
+ print(colored(f" → Passing to stage {stage_num + 1}", "blue"))
2874
+
2875
+ except KeyboardInterrupt:
2876
+ print(colored("\nOperation interrupted by user.", "yellow"))
2877
+ return current_state, colored("Command interrupted.", "red")
2878
+ except RateLimitError:
2879
+ print(colored('Rate Limit Exceeded', 'yellow'))
2880
+ messages = current_state.messages[0:1] + current_state.messages[-2:]
2881
+ current_state.messages = messages
2882
+ import time
2883
+ print('Waiting 30s before retry...')
2884
+ time.sleep(30)
2885
+ return execute_command(command, current_state, review=review, router=router)
2886
+ except Exception as pipeline_error:
2887
+ import traceback
2888
+ traceback.print_exc()
2889
+ error_msg = colored(
2890
+ f"❌ Error in stage {stage_num} ('{cmd_segment[:50]}...'): {pipeline_error}",
2891
+ "red"
2892
+ )
2893
+ return current_state, error_msg
2894
+
2895
+ if final_output is not None and isinstance(final_output, str):
2896
+ store_command_embeddings(original_command_for_embedding, final_output, current_state)
2897
+
2898
+ return current_state, final_output
2899
+
2900
+ else:
2901
+ # For non-agent modes (chat, cmd, or any custom mode), route through the jinx
2902
+ mode_jinx_name = state.current_mode
2903
+
2904
+ # Check if mode jinx exists in team or router
2905
+ mode_jinx = None
2906
+ if state.team and hasattr(state.team, 'jinxs_dict') and mode_jinx_name in state.team.jinxs_dict:
2907
+ mode_jinx = state.team.jinxs_dict[mode_jinx_name]
2908
+ elif router and mode_jinx_name in router.jinx_routes:
2909
+ # Execute via router
2910
+ try:
2911
+ result = router.execute(f"/{mode_jinx_name} {command}",
2912
+ state=state, npc=state.npc, messages=state.messages)
2913
+ if isinstance(result, dict):
2914
+ state.messages = result.get('messages', state.messages)
2915
+ return state, result.get('output', '')
2916
+ return state, str(result) if result else ''
2917
+ except KeyboardInterrupt:
2918
+ print(colored(f"\n{mode_jinx_name} interrupted.", "yellow"))
2919
+ return state, colored("Interrupted.", "red")
2920
+
2921
+ if mode_jinx:
2922
+ # Execute the mode jinx directly
2923
+ try:
2924
+ result = mode_jinx.execute(
2925
+ input_values={'query': command, 'stream': state.stream_output},
2926
+ npc=state.npc,
2927
+ messages=state.messages,
2928
+ extra_globals={'state': state}
2929
+ )
2930
+ if isinstance(result, dict):
2931
+ state.messages = result.get('messages', state.messages)
2932
+ return state, result.get('output', '')
2933
+ return state, str(result) if result else ''
2934
+ except KeyboardInterrupt:
2935
+ print(colored(f"\n{mode_jinx_name} interrupted.", "yellow"))
2936
+ return state, colored("Interrupted.", "red")
2937
+
2938
+ # Fallback: if mode jinx not found, use basic LLM response
2939
+ npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
2940
+ npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
2941
+ active_model = npc_model or state.chat_model
2942
+ active_provider = npc_provider or state.chat_provider
2943
+
2944
+ with SpinnerContext(f"Processing with {active_model}", style="brain"):
2945
+ try:
2946
+ response = get_llm_response(
2947
+ command,
2948
+ model=active_model,
2949
+ provider=active_provider,
2950
+ npc=state.npc,
2951
+ stream=state.stream_output,
2952
+ messages=state.messages
2953
+ )
2954
+ except KeyboardInterrupt:
2955
+ print(colored("\nInterrupted.", "yellow"))
2956
+ return state, colored("Interrupted.", "red")
2957
+
2958
+ state.messages = response.get('messages', state.messages)
2959
+ return state, response.get('response', '')
2960
+
2961
+
2962
+ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
2963
+ setup_npcsh_config()
2964
+
2965
+ db_path = os.getenv("NPCSH_DB_PATH", HISTORY_DB_DEFAULT_PATH)
2966
+ db_path = os.path.expanduser(db_path)
2967
+ os.makedirs(os.path.dirname(db_path), exist_ok=True)
2968
+ command_history = CommandHistory(db_path)
2969
+
2970
+ if not is_npcsh_initialized():
2971
+ print("Initializing NPCSH...")
2972
+ initialize_base_npcs_if_needed(db_path)
2973
+ print("NPCSH initialization complete. Restart or source ~/.npcshrc.")
2974
+
2975
+ try:
2976
+ history_file = setup_readline()
2977
+ atexit.register(save_readline_history)
2978
+ atexit.register(command_history.close)
2979
+ except:
2980
+ pass
2981
+
2982
+ project_team_path = os.path.abspath(PROJECT_NPC_TEAM_PATH)
2983
+ global_team_path = os.path.expanduser(DEFAULT_NPC_TEAM_PATH)
2984
+
2985
+ team_dir = None
2986
+ default_forenpc_name = None
2987
+ global_team_path = os.path.expanduser(DEFAULT_NPC_TEAM_PATH)
2988
+ if not os.path.exists(global_team_path):
2989
+ print(f"Global NPC team directory doesn't exist. Initializing...")
2990
+ initialize_base_npcs_if_needed(db_path)
2991
+ if os.path.exists(project_team_path):
2992
+ team_dir = project_team_path
2993
+ default_forenpc_name = "forenpc"
2994
+ else:
2995
+ if not os.path.exists('.npcsh_global'):
2996
+ resp = input(f"No npc_team found in {os.getcwd()}. Create a new team here? [Y/n]: ").strip().lower()
2997
+ if resp in ("", "y", "yes"):
2998
+ team_dir = project_team_path
2999
+ os.makedirs(team_dir, exist_ok=True)
3000
+ default_forenpc_name = "forenpc"
3001
+ forenpc_directive = input(
3002
+ f"Enter a primary directive for {default_forenpc_name} (default: 'You are the forenpc of the team...'): "
3003
+ ).strip() or "You are the forenpc of the team, coordinating activities between NPCs on the team, verifying that results from NPCs are high quality and can help to adequately answer user requests."
3004
+ forenpc_model = input("Enter a model for your forenpc (default: llama3.2): ").strip() or "llama3.2"
3005
+ forenpc_provider = input("Enter a provider for your forenpc (default: ollama): ").strip() or "ollama"
3006
+
3007
+ with open(os.path.join(team_dir, f"{default_forenpc_name}.npc"), "w") as f:
3008
+ yaml.dump({
3009
+ "name": default_forenpc_name, "primary_directive": forenpc_directive,
3010
+ "model": forenpc_model, "provider": forenpc_provider
3011
+ }, f)
3012
+
3013
+ ctx_path = os.path.join(team_dir, "team.ctx")
3014
+ folder_context = input("Enter a short description for this project/team (optional): ").strip()
3015
+ team_ctx_data = {
3016
+ "forenpc": default_forenpc_name,
3017
+ "model": forenpc_model,
3018
+ "provider": forenpc_provider,
3019
+ "context": folder_context if folder_context else None
3020
+ }
3021
+ use_jinxs = input("Use global jinxs folder (g) or copy to this project (c)? [g/c, default: g]: ").strip().lower()
3022
+ if use_jinxs == "c":
3023
+ global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
3024
+ if os.path.exists(global_jinxs_dir):
3025
+ # Create the 'jinxs' subfolder within the new team's directory
3026
+ destination_jinxs_dir = os.path.join(team_dir, "jinxs")
3027
+ os.makedirs(destination_jinxs_dir, exist_ok=True)
3028
+ shutil.copytree(global_jinxs_dir, destination_jinxs_dir, dirs_exist_ok=True)
3029
+ else:
3030
+ team_ctx_data["use_global_jinxs"] = True
3031
+ with open(ctx_path, "w") as f:
3032
+ yaml.dump(team_ctx_data, f)
3033
+ else:
3034
+ render_markdown('From now on, npcsh will assume you will use the global team when activating from this folder. \n If you change your mind and want to initialize a team, use /init from within npcsh, `npc init` or `rm .npcsh_global` from the current working directory.')
3035
+ with open(".npcsh_global", "w") as f:
3036
+ pass
3037
+ team_dir = global_team_path
3038
+ default_forenpc_name = "sibiji"
3039
+ else:
3040
+ team_dir = global_team_path
3041
+ default_forenpc_name = "sibiji"
3042
+
3043
+ if team_dir is None:
3044
+ team_dir = global_team_path
3045
+ default_forenpc_name = "sibiji"
3046
+
3047
+ if not os.path.exists(team_dir):
3048
+ print(f"Creating team directory: {team_dir}")
3049
+ os.makedirs(team_dir, exist_ok=True)
3050
+
3051
+ team_ctx = {}
3052
+ team_ctx_path = get_team_ctx_path(team_dir)
3053
+ if team_ctx_path:
3054
+ try:
3055
+ with open(team_ctx_path, "r") as f:
3056
+ team_ctx = yaml.safe_load(f) or {}
3057
+ except Exception as e:
3058
+ print(f"Warning: Could not load context file {os.path.basename(team_ctx_path)}: {e}")
3059
+
3060
+ forenpc_name = team_ctx.get("forenpc", default_forenpc_name)
3061
+ if forenpc_name is None:
3062
+ forenpc_name = "sibiji"
3063
+
3064
+ print('forenpc_name:', forenpc_name)
3065
+
3066
+ forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
3067
+ print('forenpc_path:', forenpc_path)
3068
+
3069
+ team = Team(team_path=team_dir, db_conn=command_history.engine)
3070
+
3071
+ forenpc_obj = team.forenpc if hasattr(team, 'forenpc') and team.forenpc else None
3072
+
3073
+ for npc_name, npc_obj in team.npcs.items():
3074
+ if not npc_obj.model:
3075
+ npc_obj.model = initial_state.chat_model
3076
+ if not npc_obj.provider:
3077
+ npc_obj.provider = initial_state.chat_provider
3078
+
3079
+ if team.forenpc and isinstance(team.forenpc, NPC):
3080
+ if not team.forenpc.model:
3081
+ team.forenpc.model = initial_state.chat_model
3082
+ if not team.forenpc.provider:
3083
+ team.forenpc.provider = initial_state.chat_provider
3084
+
3085
+ team_name_from_ctx = team_ctx.get("name")
3086
+ if team_name_from_ctx:
3087
+ team.name = team_name_from_ctx
3088
+ elif team_dir:
3089
+ normalized_dir = os.path.normpath(team_dir)
3090
+ basename = os.path.basename(normalized_dir)
3091
+ if basename and basename != 'npc_team':
3092
+ team.name = basename
3093
+ else:
3094
+ team.name = "npcsh"
3095
+ else:
3096
+ team.name = "npcsh"
3097
+
3098
+ return command_history, team, forenpc_obj
3099
+ def initialize_router_with_jinxs(team, router):
3100
+ """Load global and team Jinxs into router"""
3101
+ global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
3102
+ router.load_jinx_routes(global_jinxs_dir)
3103
+
3104
+ if team and team.team_path:
3105
+ team_jinxs_dir = os.path.join(team.team_path, "jinxs")
3106
+ if os.path.exists(team_jinxs_dir):
3107
+ router.load_jinx_routes(team_jinxs_dir)
3108
+
3109
+ return router
3110
+
3111
+
3112
+ def process_memory_approvals(command_history, memory_queue):
3113
+ pending_memories = memory_queue.get_approval_batch(max_items=5)
3114
+
3115
+ if not pending_memories:
3116
+ return
3117
+
3118
+ print(f"\n🧠 Processing {len(pending_memories)} memories...")
3119
+
3120
+ try:
3121
+ trainer = MemoryTrainer()
3122
+ auto_processed = []
3123
+ need_human_review = []
3124
+
3125
+ for memory in pending_memories:
3126
+ result = trainer.auto_approve_memory(
3127
+ memory['content'],
3128
+ memory['context'],
3129
+ confidence_threshold=0.85
3130
+ )
3131
+
3132
+ if result['auto_processed']:
3133
+ auto_processed.append((memory, result))
3134
+ else:
3135
+ need_human_review.append(memory)
3136
+
3137
+ for memory, result in auto_processed:
3138
+ command_history.update_memory_status(
3139
+ memory['memory_id'],
3140
+ result['action']
3141
+ )
3142
+ print(f" Auto-{result['action']}: {memory['content'][:50]}... (confidence: {result['confidence']:.2f})")
3143
+
3144
+ if need_human_review:
3145
+ approvals = memory_approval_ui(need_human_review)
3146
+
3147
+ for approval in approvals:
3148
+ command_history.update_memory_status(
3149
+ approval['memory_id'],
3150
+ approval['decision'],
3151
+ approval.get('final_memory')
3152
+ )
3153
+
3154
+ except Exception as e:
3155
+ print(f"Auto-approval failed: {e}")
3156
+ approvals = memory_approval_ui(pending_memories)
3157
+
3158
+ for approval in approvals:
3159
+ command_history.update_memory_status(
3160
+ approval['memory_id'],
3161
+ approval['decision'],
3162
+ approval.get('final_memory')
3163
+ )
3164
+ def process_result(
3165
+ user_input: str,
3166
+ result_state: ShellState,
3167
+ output: Any,
3168
+ command_history: CommandHistory,
3169
+ ):
3170
+ team_name = result_state.team.name if result_state.team else "npcsh"
3171
+ npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "npcsh"
3172
+
3173
+ active_npc = result_state.npc if isinstance(result_state.npc, NPC) else NPC(
3174
+ name="default",
3175
+ model=result_state.chat_model,
3176
+ provider=result_state.chat_provider,
3177
+ db_conn=command_history.engine
3178
+ )
3179
+
3180
+ save_conversation_message(
3181
+ command_history,
3182
+ result_state.conversation_id,
3183
+ "user",
3184
+ user_input,
3185
+ wd=result_state.current_path,
3186
+ model=active_npc.model,
3187
+ provider=active_npc.provider,
3188
+ npc=npc_name,
3189
+ team=team_name,
3190
+ attachments=result_state.attachments,
3191
+ )
3192
+ result_state.attachments = None
3193
+
3194
+ final_output_str = None
3195
+
3196
+ # FIX: Handle dict output properly
3197
+ if isinstance(output, dict):
3198
+ output_content = output.get('output')
3199
+ model_for_stream = output.get('model', active_npc.model)
3200
+ provider_for_stream = output.get('provider', active_npc.provider)
3201
+
3202
+ # Accumulate token usage if available
3203
+ if 'usage' in output:
3204
+ usage = output['usage']
3205
+ result_state.session_input_tokens += usage.get('input_tokens', 0)
3206
+ result_state.session_output_tokens += usage.get('output_tokens', 0)
3207
+ # Calculate cost
3208
+ from npcpy.gen.response import calculate_cost
3209
+ result_state.session_cost_usd += calculate_cost(
3210
+ model_for_stream,
3211
+ usage.get('input_tokens', 0),
3212
+ usage.get('output_tokens', 0)
3213
+ )
3214
+
3215
+ # If output_content is still a dict or None, convert to string
3216
+ if isinstance(output_content, dict):
3217
+ output_content = str(output_content)
3218
+ elif output_content is None:
3219
+ output_content = "Command completed with no output"
3220
+ else:
3221
+ output_content = output
3222
+ model_for_stream = active_npc.model
3223
+ provider_for_stream = active_npc.provider
3224
+
3225
+ print('\n')
3226
+ if user_input == '/help':
3227
+ if isinstance(output_content, str):
3228
+ render_markdown(output_content)
3229
+ else:
3230
+ render_markdown(str(output_content))
3231
+ elif result_state.stream_output:
3232
+ # FIX: Only stream if output_content is a generator, not a string
3233
+ if isinstance(output_content, str):
3234
+ final_output_str = output_content
3235
+ render_markdown(final_output_str)
3236
+ else:
3237
+ final_output_str = print_and_process_stream_with_markdown(
3238
+ output_content,
3239
+ model_for_stream,
3240
+ provider_for_stream,
3241
+ show=True
3242
+ )
3243
+ elif output_content is not None:
3244
+ final_output_str = str(output_content)
3245
+ render_markdown(final_output_str)
3246
+
3247
+
3248
+ # Log message state after processing
3249
+ logger = logging.getLogger("npcsh.state")
3250
+ logger.debug(f"[process_result] Before final append: {len(result_state.messages)} messages, final_output_str={'set' if final_output_str else 'None'}")
3251
+
3252
+ if final_output_str:
3253
+ if result_state.messages:
3254
+ if not result_state.messages or result_state.messages[-1].get("role") != "assistant":
3255
+ result_state.messages.append({
3256
+ "role": "assistant",
3257
+ "content": final_output_str
3258
+ })
3259
+ logger.debug(f"[process_result] Appended assistant message, now {len(result_state.messages)} messages")
3260
+
3261
+ save_conversation_message(
3262
+ command_history,
3263
+ result_state.conversation_id,
3264
+ "assistant",
3265
+ final_output_str,
3266
+ wd=result_state.current_path,
3267
+ model=active_npc.model,
3268
+ provider=active_npc.provider,
3269
+ npc=npc_name,
3270
+ team=team_name,
3271
+ )
3272
+
3273
+ result_state.turn_count += 1
3274
+
3275
+ if result_state.turn_count % 10 == 0:
3276
+ approved_facts = []
3277
+
3278
+ conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
3279
+ engine = command_history.engine
3280
+
3281
+ memory_examples = command_history.get_memory_examples_for_context(
3282
+ npc=npc_name,
3283
+ team=team_name,
3284
+ directory_path=result_state.current_path
3285
+ )
3286
+
3287
+ memory_context = format_memory_context(memory_examples)
3288
+
3289
+ try:
3290
+ facts = get_facts(
3291
+ conversation_turn_text,
3292
+ model=active_npc.model,
3293
+ provider=active_npc.provider,
3294
+ npc=active_npc,
3295
+ context=memory_context + 'Memories should be fully self contained. They should not use vague pronouns or words like that or this or it. Do not generate more than 1-2 memories at a time.'
3296
+ )
3297
+
3298
+ if facts:
3299
+ num_memories = len(facts)
3300
+ print(colored(
3301
+ f"\nThere are {num_memories} potential memories. Do you want to review them now?",
3302
+ "cyan"
3303
+ ))
3304
+ review_choice = input("[y/N]: ").strip().lower()
3305
+
3306
+ if review_choice == 'y':
3307
+ memories_for_approval = []
3308
+ for i, fact in enumerate(facts):
3309
+ memories_for_approval.append({
3310
+ "memory_id": f"temp_{i}",
3311
+ "content": fact['statement'],
3312
+ "context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
3313
+ "npc": npc_name,
3314
+ "fact_data": fact
3315
+ })
3316
+
3317
+ approvals = memory_approval_ui(memories_for_approval)
3318
+
3319
+ for approval in approvals:
3320
+ fact_data = next(
3321
+ m['fact_data'] for m in memories_for_approval
3322
+ if m['memory_id'] == approval['memory_id']
3323
+ )
3324
+
3325
+ command_history.add_memory_to_database(
3326
+ message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
3327
+ conversation_id=result_state.conversation_id,
3328
+ npc=npc_name,
3329
+ team=team_name,
3330
+ directory_path=result_state.current_path,
3331
+ initial_memory=fact_data['statement'],
3332
+ status=approval['decision'],
3333
+ model=active_npc.model,
3334
+ provider=active_npc.provider,
3335
+ final_memory=approval.get('final_memory')
3336
+ )
3337
+
3338
+ if approval['decision'] in ['human-approved', 'human-edited']:
3339
+ approved_fact = {
3340
+ 'statement': approval.get('final_memory') or fact_data['statement'],
3341
+ 'source_text': fact_data.get('source_text', ''),
3342
+ 'type': fact_data.get('type', 'explicit'),
3343
+ 'generation': 0
3344
+ }
3345
+ approved_facts.append(approved_fact)
3346
+ else:
3347
+ for i, fact in enumerate(facts):
3348
+ command_history.add_memory_to_database(
3349
+ message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
3350
+ conversation_id=result_state.conversation_id,
3351
+ npc=npc_name,
3352
+ team=team_name,
3353
+ directory_path=result_state.current_path,
3354
+ initial_memory=fact['statement'],
3355
+ status='skipped',
3356
+ model=active_npc.model,
3357
+ provider=active_npc.provider,
3358
+ final_memory=None
3359
+ )
3360
+
3361
+ print(colored(
3362
+ f"Marked {num_memories} memories as skipped.",
3363
+ "yellow"
3364
+ ))
3365
+
3366
+ except Exception as e:
3367
+ print(colored(f"Memory generation error: {e}", "yellow"))
3368
+
3369
+ if result_state.build_kg and approved_facts:
3370
+ try:
3371
+ if not should_skip_kg_processing(user_input, final_output_str):
3372
+ npc_kg = load_kg_from_db(
3373
+ engine,
3374
+ team_name,
3375
+ npc_name,
3376
+ result_state.current_path
3377
+ )
3378
+ evolved_npc_kg, _ = kg_evolve_incremental(
3379
+ existing_kg=npc_kg,
3380
+ new_facts=approved_facts,
3381
+ model=active_npc.model,
3382
+ provider=active_npc.provider,
3383
+ npc=active_npc,
3384
+ get_concepts=True,
3385
+ link_concepts_facts=False,
3386
+ link_concepts_concepts=False,
3387
+ link_facts_facts=False,
3388
+ )
3389
+ save_kg_to_db(
3390
+ engine,
3391
+ evolved_npc_kg,
3392
+ team_name,
3393
+ npc_name,
3394
+ result_state.current_path
3395
+ )
3396
+ except Exception as e:
3397
+ print(colored(
3398
+ f"Error during real-time KG evolution: {e}",
3399
+ "red"
3400
+ ))
3401
+
3402
+ print(colored(
3403
+ "\nChecking for potential team improvements...",
3404
+ "cyan"
3405
+ ))
3406
+ try:
3407
+ summary = breathe(
3408
+ messages=result_state.messages[-20:],
3409
+ npc=active_npc
3410
+ )
3411
+ characterization = summary.get('output')
3412
+
3413
+ if characterization and result_state.team:
3414
+ team_ctx_path = get_team_ctx_path(
3415
+ result_state.team.team_path
3416
+ )
3417
+ if not team_ctx_path:
3418
+ team_ctx_path = os.path.join(
3419
+ result_state.team.team_path,
3420
+ "team.ctx"
3421
+ )
3422
+
3423
+ ctx_data = {}
3424
+ if os.path.exists(team_ctx_path):
3425
+ with open(team_ctx_path, 'r') as f:
3426
+ ctx_data = yaml.safe_load(f) or {}
3427
+
3428
+ current_context = ctx_data.get('context', '')
3429
+
3430
+ prompt = f"""Based on this characterization: {characterization},
3431
+ suggest changes (additions, deletions, edits) to the team's context.
3432
+ Additions need not be fully formed sentences and can simply be equations, relationships, or other plain clear items.
3433
+
3434
+ Current Context: "{current_context}".
3435
+
3436
+ Respond with JSON: {{"suggestion": "Your sentence."}}"""
3437
+
3438
+ response = get_llm_response(
3439
+ prompt,
3440
+ npc=active_npc,
3441
+ format="json"
3442
+ )
3443
+ suggestion = response.get("response", {}).get("suggestion")
3444
+
3445
+ if suggestion:
3446
+ new_context = (
3447
+ current_context + " " + suggestion
3448
+ ).strip()
3449
+ print(colored(
3450
+ f"{npc_name} suggests updating team context:",
3451
+ "yellow"
3452
+ ))
3453
+ print(
3454
+ f" - OLD: {current_context}\n + NEW: {new_context}"
3455
+ )
3456
+
3457
+ choice = input(
3458
+ "Apply? [y/N/e(dit)]: "
3459
+ ).strip().lower()
3460
+
3461
+ if choice == 'y':
3462
+ ctx_data['context'] = new_context
3463
+ with open(team_ctx_path, 'w') as f:
3464
+ yaml.dump(ctx_data, f)
3465
+ print(colored("Team context updated.", "green"))
3466
+ elif choice == 'e':
3467
+ edited_context = input(
3468
+ f"Edit context [{new_context}]: "
3469
+ ).strip()
3470
+ if edited_context:
3471
+ ctx_data['context'] = edited_context
3472
+ else:
3473
+ ctx_data['context'] = new_context
3474
+ with open(team_ctx_path, 'w') as f:
3475
+ yaml.dump(ctx_data, f)
3476
+ print(colored(
3477
+ "Team context updated with edits.",
3478
+ "green"
3479
+ ))
3480
+ else:
3481
+ print("Suggestion declined.")
3482
+ except Exception as e:
3483
+ import traceback
3484
+ print(colored(
3485
+ f"Could not generate team suggestions: {e}",
3486
+ "yellow"
3487
+ ))
3488
+ traceback.print_exc()
3489
+
3490
+ initial_state = ShellState(
3491
+ conversation_id=start_new_conversation(),
3492
+ stream_output=NPCSH_STREAM_OUTPUT,
3493
+ current_mode=NPCSH_DEFAULT_MODE,
3494
+ chat_model=NPCSH_CHAT_MODEL,
3495
+ chat_provider=NPCSH_CHAT_PROVIDER,
3496
+ vision_model=NPCSH_VISION_MODEL,
3497
+ vision_provider=NPCSH_VISION_PROVIDER,
3498
+ embedding_model=NPCSH_EMBEDDING_MODEL,
3499
+ embedding_provider=NPCSH_EMBEDDING_PROVIDER,
3500
+ reasoning_model=NPCSH_REASONING_MODEL,
3501
+ reasoning_provider=NPCSH_REASONING_PROVIDER,
3502
+ image_gen_model=NPCSH_IMAGE_GEN_MODEL,
3503
+ image_gen_provider=NPCSH_IMAGE_GEN_PROVIDER,
3504
+ video_gen_model=NPCSH_VIDEO_GEN_MODEL,
3505
+ video_gen_provider=NPCSH_VIDEO_GEN_PROVIDER,
3506
+ build_kg=NPCSH_BUILD_KG,
3507
+ api_url=NPCSH_API_URL,
3508
+ )