npcsh 1.1.12__py3-none-any.whl → 1.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. npcsh/_state.py +700 -377
  2. npcsh/alicanto.py +54 -1153
  3. npcsh/completion.py +206 -0
  4. npcsh/config.py +163 -0
  5. npcsh/corca.py +35 -1462
  6. npcsh/execution.py +185 -0
  7. npcsh/guac.py +31 -1986
  8. npcsh/npc_team/jinxs/code/sh.jinx +11 -15
  9. npcsh/npc_team/jinxs/modes/alicanto.jinx +186 -80
  10. npcsh/npc_team/jinxs/modes/corca.jinx +243 -22
  11. npcsh/npc_team/jinxs/modes/guac.jinx +313 -42
  12. npcsh/npc_team/jinxs/modes/plonk.jinx +209 -48
  13. npcsh/npc_team/jinxs/modes/pti.jinx +167 -25
  14. npcsh/npc_team/jinxs/modes/spool.jinx +158 -37
  15. npcsh/npc_team/jinxs/modes/wander.jinx +179 -74
  16. npcsh/npc_team/jinxs/modes/yap.jinx +258 -21
  17. npcsh/npc_team/jinxs/utils/chat.jinx +39 -12
  18. npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
  19. npcsh/npc_team/jinxs/utils/search.jinx +3 -3
  20. npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
  21. npcsh/npcsh.py +76 -20
  22. npcsh/parsing.py +118 -0
  23. npcsh/plonk.py +41 -329
  24. npcsh/pti.py +41 -201
  25. npcsh/spool.py +34 -239
  26. npcsh/ui.py +199 -0
  27. npcsh/wander.py +54 -542
  28. npcsh/yap.py +38 -570
  29. npcsh-1.1.14.data/data/npcsh/npc_team/alicanto.jinx +194 -0
  30. npcsh-1.1.14.data/data/npcsh/npc_team/chat.jinx +44 -0
  31. npcsh-1.1.14.data/data/npcsh/npc_team/cmd.jinx +44 -0
  32. npcsh-1.1.14.data/data/npcsh/npc_team/corca.jinx +249 -0
  33. npcsh-1.1.14.data/data/npcsh/npc_team/guac.jinx +317 -0
  34. npcsh-1.1.14.data/data/npcsh/npc_team/plonk.jinx +214 -0
  35. npcsh-1.1.14.data/data/npcsh/npc_team/pti.jinx +170 -0
  36. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/search.jinx +3 -3
  37. npcsh-1.1.14.data/data/npcsh/npc_team/sh.jinx +34 -0
  38. npcsh-1.1.14.data/data/npcsh/npc_team/spool.jinx +161 -0
  39. npcsh-1.1.14.data/data/npcsh/npc_team/usage.jinx +33 -0
  40. npcsh-1.1.14.data/data/npcsh/npc_team/wander.jinx +186 -0
  41. npcsh-1.1.14.data/data/npcsh/npc_team/yap.jinx +262 -0
  42. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/METADATA +1 -1
  43. npcsh-1.1.14.dist-info/RECORD +135 -0
  44. npcsh-1.1.12.data/data/npcsh/npc_team/alicanto.jinx +0 -88
  45. npcsh-1.1.12.data/data/npcsh/npc_team/chat.jinx +0 -17
  46. npcsh-1.1.12.data/data/npcsh/npc_team/corca.jinx +0 -28
  47. npcsh-1.1.12.data/data/npcsh/npc_team/guac.jinx +0 -46
  48. npcsh-1.1.12.data/data/npcsh/npc_team/plonk.jinx +0 -53
  49. npcsh-1.1.12.data/data/npcsh/npc_team/pti.jinx +0 -28
  50. npcsh-1.1.12.data/data/npcsh/npc_team/sh.jinx +0 -38
  51. npcsh-1.1.12.data/data/npcsh/npc_team/spool.jinx +0 -40
  52. npcsh-1.1.12.data/data/npcsh/npc_team/wander.jinx +0 -81
  53. npcsh-1.1.12.data/data/npcsh/npc_team/yap.jinx +0 -25
  54. npcsh-1.1.12.dist-info/RECORD +0 -126
  55. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/agent.jinx +0 -0
  56. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.npc +0 -0
  57. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.png +0 -0
  58. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/build.jinx +0 -0
  59. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compile.jinx +0 -0
  60. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compress.jinx +0 -0
  61. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.npc +0 -0
  62. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.png +0 -0
  63. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca_example.png +0 -0
  64. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
  65. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/foreman.npc +0 -0
  66. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic.npc +0 -0
  67. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic4.png +0 -0
  68. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/guac.png +0 -0
  69. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/help.jinx +0 -0
  70. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/init.jinx +0 -0
  71. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
  72. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
  73. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  74. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/load_file.jinx +0 -0
  75. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
  76. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  77. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  78. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/ots.jinx +0 -0
  79. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.npc +0 -0
  80. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.png +0 -0
  81. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
  82. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  83. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/python.jinx +0 -0
  84. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/roll.jinx +0 -0
  85. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sample.jinx +0 -0
  86. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/serve.jinx +0 -0
  87. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/set.jinx +0 -0
  88. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  89. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.png +0 -0
  90. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sleep.jinx +0 -0
  91. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/spool.png +0 -0
  92. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sql.jinx +0 -0
  93. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/trigger.jinx +0 -0
  94. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/vixynt.jinx +0 -0
  95. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/yap.png +0 -0
  96. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/WHEEL +0 -0
  97. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/entry_points.txt +0 -0
  98. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/licenses/LICENSE +0 -0
  99. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/top_level.txt +0 -0
npcsh/guac.py CHANGED
@@ -1,2001 +1,46 @@
1
- from datetime import datetime
2
- import json
3
- import numpy as np
4
- import os
5
- import pandas as pd
6
- import sys
7
- import argparse
8
- import importlib.metadata
9
- import queue
10
- plot_queue = queue.Queue()
11
-
12
-
13
- import matplotlib.pyplot as plt
14
- from chroptiks.plotting_utils import *
15
-
16
- import logging
17
- import shlex
18
- import yaml
19
- import re
20
- from pathlib import Path
21
- from typing import Optional, List, Dict, Any, Tuple
22
- import traceback
23
-
24
- try:
25
- from termcolor import colored
26
- except ImportError:
27
- pass
28
-
29
- import sys
30
-
31
- from npcpy.memory.command_history import CommandHistory, start_new_conversation
32
- from npcpy.npc_compiler import Team, NPC
33
- from npcpy.llm_funcs import get_llm_response
34
- from npcpy.npc_sysenv import render_markdown,print_and_process_stream
35
- from npcpy.data.load import load_file_contents
36
-
37
- from npcsh._state import (
38
- ShellState,
39
- execute_command,
40
- make_completer,
41
- process_result,
42
- readline_safe_prompt,
43
- setup_shell,
44
- get_multiline_input,
45
- orange,
46
- get_team_ctx_path,
47
- )
48
- import threading
49
- import time
50
- import ctypes
51
- import ctypes.util
52
-
53
- try:
54
- import readline
55
- except ImportError:
56
- print('no readline support, some features may not work as desired.')
57
-
58
- try:
59
- VERSION = importlib.metadata.version("npcsh")
60
- except importlib.metadata.PackageNotFoundError:
61
- VERSION = "unknown"
62
-
63
- GUAC_REFRESH_PERIOD = os.environ.get('GUAC_REFRESH_PERIOD', 100)
64
- READLINE_HISTORY_FILE = os.path.expanduser("~/.guac_readline_history")
65
-
66
- EXTENSION_MAP = {
67
- "PNG": "images", "JPG": "images", "JPEG": "images", "GIF": "images", "SVG": "images",
68
- "MP4": "videos", "AVI": "videos", "MOV": "videos", "WMV": "videos", "MPG": "videos", "MPEG": "videos",
69
- "DOC": "documents", "DOCX": "documents", "PDF": "documents", "PPT": "documents", "PPTX": "documents",
70
- "XLS": "documents", "XLSX": "documents", "TXT": "documents", "CSV": "documents",
71
- "ZIP": "archives", "RAR": "archives", "7Z": "archives", "TAR": "archives", "GZ": "archives", "BZ2": "archives",
72
- "ISO": "archives", "NPY": "data", "NPZ": "data", "H5": "data", "HDF5": "data", "PKL": "data", "JOBLIB": "data"
73
- }
74
-
75
- _guac_monitor_thread = None
76
- _guac_monitor_stop_event = None
77
-
78
- def _clear_readline_buffer():
79
- """Clear the current readline input buffer and redisplay prompt."""
80
- try:
81
-
82
- if hasattr(readline, "replace_line") and hasattr(readline, "redisplay"):
83
- readline.replace_line("", 0)
84
- readline.redisplay()
85
- return True
86
- except Exception:
87
- pass
88
-
89
-
90
- try:
91
- libname = ctypes.util.find_library("readline") or ctypes.util.find_library("edit") or "readline"
92
- rl = ctypes.CDLL(libname)
93
-
94
- rl.rl_replace_line.argtypes = [ctypes.c_char_p, ctypes.c_int]
95
- rl.rl_redisplay.argtypes = []
96
- rl.rl_replace_line(b"", 0)
97
- rl.rl_redisplay()
98
- return True
99
- except Exception:
100
- return False
101
-
102
- def _file_drop_monitor(npc_team_dir: Path, state: ShellState, locals_dict: Dict[str, Any], poll_interval: float = 0.2):
103
- processed_bufs = set()
104
- stop_event = _guac_monitor_stop_event
105
- while stop_event is None or not stop_event.is_set():
106
- try:
107
- buf = ""
108
- try:
109
- buf = readline.get_line_buffer()
110
- except Exception:
111
- buf = ""
112
- if not buf:
113
- time.sleep(poll_interval)
114
- continue
115
-
116
- candidate = buf.strip()
117
- if (candidate.startswith("'") and candidate.endswith("'")) or (candidate.startswith('"') and candidate.endswith('"')):
118
- inner = candidate[1:-1]
119
- else:
120
- inner = candidate
121
-
122
- if " " not in inner and Path(inner.replace('~', str(Path.home()))).expanduser().exists() and Path(inner.replace('~', str(Path.home()))).expanduser().is_file():
123
- if buf in processed_bufs:
124
- time.sleep(poll_interval)
125
- continue
126
- processed_bufs.add(buf)
127
-
128
- try:
129
- modified_input, processed_files = _handle_file_drop(buf, npc_team_dir)
130
- if processed_files:
131
- target_path = processed_files[0]
132
- loading_code = _generate_file_analysis_code(inner, target_path)
133
-
134
- plot_queue.put(('execute_code', loading_code, state, locals_dict))
135
- print("\n[guac] Detected file drop — queued for processing...")
136
- _clear_readline_buffer()
137
- except Exception as e:
138
- print(f"[guac][ERROR] file drop processing failed: {e}")
139
- except Exception:
140
- pass
141
- time.sleep(poll_interval)
142
-
143
-
144
-
145
- def is_python_code(text: str) -> bool:
146
- text = text.strip()
147
- if not text:
148
- return False
149
-
150
- try:
151
- compile(text, "<input>", "exec")
152
- return True
153
- except SyntaxError:
154
- return False
155
- except (OverflowError, ValueError):
156
- return False
157
- except IndentationError:
158
- return True
159
- def execute_python_code(code_str: str, state: ShellState, locals_dict: Dict[str, Any]) -> Tuple[ShellState, Any]:
160
- import io
161
- output_capture = io.StringIO()
162
- original_stdout = sys.stdout
163
- original_stderr = sys.stderr
164
- final_output_str = None
165
- is_expression = False
166
-
167
- try:
168
- sys.stdout = output_capture
169
- sys.stderr = output_capture
170
-
171
- if '\n' not in code_str.strip() and not re.match(r"^\s*(def|class|for|while|if|try|with|import|from|@)", code_str.strip()):
172
- try:
173
- compiled_expr = compile(code_str, "<input>", "eval")
174
- exec_result = eval(compiled_expr, locals_dict)
175
- if exec_result is not None and not output_capture.getvalue().strip():
176
- print(repr(exec_result), file=sys.stdout)
177
- is_expression = True
178
- except SyntaxError:
179
- is_expression = False
180
- except Exception:
181
- is_expression = False
182
- raise
183
-
184
- if not is_expression:
185
- compiled_code = compile(code_str, "<input>", "exec")
186
- exec(compiled_code, locals_dict)
187
-
188
- except SyntaxError:
189
- exc_type, exc_value, _ = sys.exc_info()
190
- error_lines = traceback.format_exception_only(exc_type, exc_value)
191
- adjusted_error_lines = [line.replace('File "<input>"', 'Syntax error in input') for line in error_lines]
192
- print("".join(adjusted_error_lines), file=output_capture, end="")
193
- except Exception:
194
- exc_type, exc_value, exc_tb = sys.exc_info()
195
- traceback.print_exception(exc_type, exc_value, exc_tb, file=output_capture)
196
- finally:
197
- sys.stdout = original_stdout
198
- sys.stderr = original_stderr
199
- final_output_str = output_capture.getvalue().strip()
200
- output_capture.close()
201
-
202
-
203
- _capture_plot_state(state.conversation_id, state.command_history.db_path, Path.cwd() / "npc_team")
204
-
205
- if state.command_history:
206
- state.command_history.add_command(code_str, [final_output_str if final_output_str else ""], "", state.current_path)
207
- return state, final_output_str
208
-
209
-
210
- def _generate_file_analysis_code(file_path: str, target_path: str) -> str:
211
- """Generate Python code to load and analyze the dropped file"""
212
- ext = Path(file_path).suffix.lower()
213
- file_var_name = f"file_{datetime.now().strftime('%H%M%S')}"
214
-
215
- capture_code = f"""
216
-
217
- _capture_file_state('{state.conversation_id}', '{state.command_history.db_path}', r'{target_path}', '''AUTO_GENERATED_CODE''', locals())
218
- """
219
-
220
- if ext == '.pdf':
221
- return f"""
222
-
223
- import PyPDF2
224
- import pandas as pd
225
- try:
226
- with open(r'{target_path}', 'rb') as file:
227
- pdf_reader = PyPDF2.PdfReader(file)
228
- {file_var_name}_text = ""
229
- for page_num in range(len(pdf_reader.pages)):
230
- {file_var_name}_text += pdf_reader.pages[page_num].extract_text()
231
-
232
- print(f"📄 Loaded PDF: {{len(pdf_reader.pages)}} pages, {{len({file_var_name}_text)}} characters")
233
- print("First 500 characters:")
234
- print({file_var_name}_text[:500])
235
- print("\\n--- PDF loaded as '{file_var_name}_text' variable ---")
236
- {capture_code}
237
- except Exception as e:
238
- print(f"Error loading PDF: {{e}}")
239
- {file_var_name}_text = None
240
- """
241
-
242
- elif ext in ['.csv']:
243
- return f"""
244
-
245
- import pandas as pd
246
- try:
247
- {file_var_name}_df = pd.read_csv(r'{target_path}')
248
- print(f"📊 Loaded CSV: {{len({file_var_name}_df)}} rows, {{len({file_var_name}_df.columns)}} columns")
249
- print("Columns:", list({file_var_name}_df.columns))
250
- print("\\nFirst 5 rows:")
251
- print({file_var_name}_df.head())
252
- print(f"\\n--- CSV loaded as '{file_var_name}_df' variable ---")
253
- {capture_code}
254
- except Exception as e:
255
- print(f"Error loading CSV: {{e}}")
256
- {file_var_name}_df = None
257
- """
258
-
259
- elif ext in ['.xlsx', '.xls']:
260
- return f"""
261
-
262
- import pandas as pd
263
- try:
264
- {file_var_name}_df = pd.read_excel(r'{target_path}')
265
- print(f"📊 Loaded Excel: {{len({file_var_name}_df)}} rows, {{len({file_var_name}_df.columns)}} columns")
266
- print("Columns:", list({file_var_name}_df.columns))
267
- print("\\nFirst 5 rows:")
268
- print({file_var_name}_df.head())
269
- print(f"\\n--- Excel loaded as '{file_var_name}_df' variable ---")
270
- {capture_code}
271
- except Exception as e:
272
- print(f"Error loading Excel: {{e}}")
273
- {file_var_name}_df = None
274
- """
275
-
276
- elif ext in ['.json']:
277
- return f"""
278
-
279
- import json
280
- try:
281
- with open(r'{target_path}', 'r') as file:
282
- {file_var_name}_data = json.load(file)
283
- print(f"📄 Loaded JSON: {{type({file_var_name}_data)}}")
284
- if isinstance({file_var_name}_data, dict):
285
- print("Keys:", list({file_var_name}_data.keys()))
286
- elif isinstance({file_var_name}_data, list):
287
- print(f"List with {{len({file_var_name}_data)}} items")
288
- print(f"\\n--- JSON loaded as '{file_var_name}_data' variable ---")
289
- {capture_code}
290
- except Exception as e:
291
- print(f"Error loading JSON: {{e}}")
292
- {file_var_name}_data = None
293
- """
294
-
295
- elif ext in ['.txt', '.md']:
296
- return f"""
297
-
298
- try:
299
- with open(r'{target_path}', 'r', encoding='utf-8') as file:
300
- {file_var_name}_text = file.read()
301
- print(f"📄 Loaded text file: {{len({file_var_name}_text)}} characters")
302
- print("First 500 characters:")
303
- print({file_var_name}_text[:500])
304
- print(f"\\n--- Text loaded as '{file_var_name}_text' variable ---")
305
- {capture_code}
306
- except Exception as e:
307
- print(f"Error loading text file: {{e}}")
308
- {file_var_name}_text = None
309
- """
310
-
311
- elif ext in ['.png', '.jpg', '.jpeg', '.gif']:
312
- return f"""
313
-
314
- import matplotlib.pyplot as plt
315
- from PIL import Image
316
- import numpy as np
317
- try:
318
- {file_var_name}_img = Image.open(r'{target_path}')
319
- {file_var_name}_array = np.array({file_var_name}_img)
320
- print(f"🖼️ Loaded image: {{({file_var_name}_img.size)}} pixels, mode: {{{file_var_name}_img.mode}}")
321
- print(f"Array shape: {{{file_var_name}_array.shape}}")
322
-
323
- plt.figure(figsize=(8, 6))
324
- plt.imshow({file_var_name}_img)
325
- plt.axis('off')
326
- plt.title('Loaded Image: {Path(file_path).name}')
327
- plt.show()
328
- print(f"\\n--- Image loaded as '{file_var_name}_img' and '{file_var_name}_array' variables ---")
329
- {capture_code}
330
- except Exception as e:
331
- print(f"Error loading image: {{e}}")
332
- {file_var_name}_img = None
333
- {file_var_name}_array = None
334
- """
335
-
336
- else:
337
- return f"""
338
-
339
- try:
340
- with open(r'{target_path}', 'rb') as file:
341
- {file_var_name}_data = file.read()
342
- print(f"📄 Loaded binary file: {{len({file_var_name}_data)}} bytes")
343
- print(f"File extension: {ext}")
344
- print(f"\\n--- Binary data loaded as '{file_var_name}_data' variable ---")
345
- {capture_code}
346
- except Exception as e:
347
- print(f"Error loading file: {{e}}")
348
- {file_var_name}_data = None
349
- """
350
-
351
-
352
-
353
-
354
- def _handle_guac_refresh(state: ShellState, project_name: str, src_dir: Path):
355
- if not state.command_history or not state.npc:
356
- print("Cannot refresh: command history or NPC not available.")
357
- return
358
-
359
- history_entries = state.command_history.get_all()
360
- if not history_entries:
361
- print("No command history to analyze for refresh.")
362
- return
363
-
364
- py_commands = []
365
- for entry in history_entries:
366
- if len(entry) > 2 and isinstance(entry[2], str) and entry[2].strip() and not entry[2].startswith('/'):
367
- py_commands.append(entry[2])
368
-
369
- if not py_commands:
370
- print("No relevant commands in history to analyze for refresh.")
371
- return
372
-
373
- prompt_parts = [
374
- "Analyze the following Python commands or natural language queries that led to Python code execution by a user:",
375
- "\n```python",
376
- "\n".join(py_commands[-20:]),
377
- "```\n",
378
- "Based on these, suggest 1-3 useful Python helper functions that the user might find valuable.",
379
- "Provide only the Python code for these functions, wrapped in ```python ... ``` blocks.",
380
- "Do not include any other text or explanation outside the code blocks."
381
- ]
382
- prompt = "\n".join(prompt_parts)
383
-
384
- try:
385
-
386
- npc_model = state.npc.model if state.npc and state.npc.model else state.chat_model
387
- npc_provider = state.npc.provider if state.npc and state.npc.provider else state.chat_provider
388
-
389
- response = get_llm_response(prompt,
390
- model=npc_model,
391
- provider=npc_provider,
392
- npc=state.npc,
393
- stream=False)
394
- suggested_code_raw = response.get("response", "").strip()
395
- code_blocks = re.findall(r'```python\s*(.*?)\s*```', suggested_code_raw, re.DOTALL)
396
-
397
- if not code_blocks:
398
- if "def " in suggested_code_raw:
399
- code_blocks = [suggested_code_raw]
400
- else:
401
- print("\nNo functions suggested by LLM or format not recognized.")
402
- return
403
-
404
- suggested_functions_code = "\n\n".join(block.strip() for block in code_blocks)
405
- if not suggested_functions_code.strip():
406
- print("\nLLM did not suggest any functions.")
407
- return
408
-
409
- print("\n=== Suggested Helper Functions ===\n")
410
- render_markdown(f"```python\n{suggested_functions_code}\n```")
411
- print("\n===============================\n")
412
-
413
- user_choice = input("Add these functions to your main.py? (y/n): ").strip().lower()
414
- if user_choice == 'y':
415
- main_py_path = src_dir / "main.py"
416
- with open(main_py_path, "a") as f:
417
- f.write("\n\n# --- Functions suggested by /refresh ---\n")
418
- f.write(suggested_functions_code)
419
- f.write("\n# --- End of suggested functions ---\n")
420
- print(f"Functions appended to {main_py_path}.")
421
- print(f"To use them in the current session: import importlib; importlib.reload({project_name}.src.main); from {project_name}.src.main import *")
422
- else:
423
- print("Suggested functions not added.")
424
- except Exception as e:
425
- print(f"Error during /refresh: {e}")
426
- traceback.print_exc()
427
-
428
-
429
-
430
- def ensure_global_guac_team():
431
- """Ensure a global guac team exists at ~/.npcsh/guac/npc_team/."""
432
- base_dir = Path.home() / ".npcsh" / "guac"
433
- team_dir = base_dir / "npc_team"
434
- team_dir.mkdir(parents=True, exist_ok=True)
435
-
436
- guac_npc_path = team_dir / "guac.npc"
437
- if not guac_npc_path.exists():
438
- guac = {
439
- "name": "guac",
440
- "primary_directive": (
441
- "You are guac, the global coordinator NPC for Guac Mode. "
442
- "Always prioritize Python code, concise answers, and coordination."
443
- ),
444
- "model": os.environ.get("NPCSH_CHAT_MODEL", "gemma3:4b"),
445
- "provider": os.environ.get("NPCSH_CHAT_PROVIDER", "ollama")
446
- }
447
- with open(guac_npc_path, "w") as f:
448
- yaml.dump(guac, f, default_flow_style=False)
449
- print(f"✅ Created global guac NPC at {guac_npc_path}")
450
-
451
- ctx_path = team_dir / "team.ctx"
452
- if not ctx_path.exists():
453
- ctx = {
454
- "team_name": "guac_global_team",
455
- "forenpc": "guac",
456
- "model": os.environ.get("NPCSH_CHAT_MODEL", "gemma3:4b"),
457
- "provider": os.environ.get("NPCSH_CHAT_PROVIDER", "ollama"),
458
- "description": "Global guac team context"
459
- }
460
- with open(ctx_path, "w") as f:
461
- yaml.dump(ctx, f, default_flow_style=False)
462
- print(f"✅ Created global guac team.ctx at {ctx_path}")
463
-
464
- return team_dir
465
-
466
-
467
- def setup_guac_mode(config_dir=None,
468
- plots_dir=None,
469
- npc_team_dir=None,
470
- lang='python',
471
- default_mode_choice=None):
472
- base_dir = Path.cwd()
473
-
474
- local_npc_team = base_dir / "npc_team"
475
- if local_npc_team.exists():
476
- npc_team_dir = local_npc_team
477
- workspace_dirs = _get_workspace_dirs(npc_team_dir)
478
- _ensure_workspace_dirs(workspace_dirs)
479
-
480
- team_ctx_path = npc_team_dir / "team.ctx"
481
- existing_ctx = {}
482
- if team_ctx_path.exists():
483
- try:
484
- with open(team_ctx_path, "r") as f:
485
- existing_ctx = yaml.safe_load(f) or {}
486
- except Exception as e:
487
- print(f"Warning: Could not read team.ctx: {e}")
488
-
489
- package_root = existing_ctx.get("GUAC_PACKAGE_ROOT", str(base_dir))
490
- package_name = existing_ctx.get("GUAC_PACKAGE_NAME", "project")
491
- project_description = existing_ctx.get("GUAC_PROJECT_DESCRIPTION", "Local guac team")
492
-
493
- return {
494
- "language": lang, "package_root": Path(package_root), "plots_dir": plots_dir,
495
- "npc_team_dir": npc_team_dir, "config_dir": config_dir, "default_mode": default_mode_choice or "agent",
496
- "project_description": project_description, "package_name": package_name
497
- }
498
-
499
- global_flag_file = base_dir / ".npcsh_global"
500
- if global_flag_file.exists() or os.environ.get("GUAC_USE_GLOBAL") == "1":
501
- print("Using global Guac team")
502
- team_dir = ensure_global_guac_team()
503
- return {
504
- "language": lang, "package_root": team_dir, "plots_dir": plots_dir,
505
- "npc_team_dir": team_dir, "config_dir": config_dir, "default_mode": default_mode_choice or "agent",
506
- "project_description": "Global guac team for analysis.", "package_name": "guac"
507
- }
508
-
509
- if npc_team_dir is None:
510
- npc_team_dir = base_dir / "npc_team"
511
- else:
512
- npc_team_dir = Path(npc_team_dir)
513
- npc_team_dir.mkdir(parents=True, exist_ok=True)
514
- workspace_dirs = _get_workspace_dirs(npc_team_dir)
515
- _ensure_workspace_dirs(workspace_dirs)
516
-
517
- team_ctx_path = npc_team_dir / "team.ctx"
518
- existing_ctx = {}
519
- if team_ctx_path.exists():
520
- try:
521
- with open(team_ctx_path, "r") as f:
522
- existing_ctx = yaml.safe_load(f) or {}
523
- except Exception as e:
524
- print(f"Warning: Could not read team.ctx: {e}")
525
-
526
- package_root = existing_ctx.get("GUAC_PACKAGE_ROOT")
527
- package_name = existing_ctx.get("GUAC_PACKAGE_NAME")
528
-
529
- if package_root is None or package_name is None:
530
- try:
531
- response = input("Enter package root (Enter for current dir): ").strip()
532
- package_root = response if response else str(base_dir)
533
- response = input("Enter package name (Enter for 'project'): ").strip()
534
- package_name = response if response else "project"
535
- except (KeyboardInterrupt, EOFError):
536
- print("Project setup interrupted. Falling back to global guac team...")
537
- global_flag_file.touch()
538
- os.environ["GUAC_USE_GLOBAL"] = "1"
539
- team_dir = ensure_global_guac_team()
540
- return {
541
- "language": lang, "package_root": team_dir, "plots_dir": plots_dir,
542
- "npc_team_dir": team_dir, "config_dir": config_dir, "default_mode": default_mode_choice or "agent",
543
- "project_description": "Global guac team for analysis.", "package_name": "guac"
544
- }
545
-
546
- project_description = existing_ctx.get("GUAC_PROJECT_DESCRIPTION", "")
547
- if not project_description:
548
- try:
549
- project_description = input("Enter a project description: ").strip() or "No description."
550
- except (KeyboardInterrupt, EOFError):
551
- project_description = "No description provided."
552
-
553
- updated_ctx = {**existing_ctx}
554
- updated_ctx.update({
555
- "GUAC_TEAM_NAME": "guac_team",
556
- "GUAC_DESCRIPTION": f"A team for {lang} analysis for project {package_name}",
557
- "GUAC_FORENPC": "guac", "GUAC_PROJECT_DESCRIPTION": project_description,
558
- "GUAC_LANG": lang, "GUAC_PACKAGE_ROOT": package_root, "GUAC_PACKAGE_NAME": package_name,
559
- "GUAC_WORKSPACE_PATHS": {k: str(v) for k, v in workspace_dirs.items()},
560
- })
561
-
562
- pkg_root_path = Path(package_root)
563
- try:
564
- pkg_root_path.mkdir(parents=True, exist_ok=True)
565
- (pkg_root_path / package_name / "__init__.py").touch()
566
- except Exception as e:
567
- logging.warning("Could not ensure package root/dir: %s", e)
568
-
569
- with open(team_ctx_path, "w") as f:
570
- yaml.dump(updated_ctx, f, default_flow_style=False)
571
- print("Updated team.ctx with GUAC-specific information.")
572
-
573
- setup_py_path = pkg_root_path / "setup.py"
574
- desc = project_description.replace('"', '\\"')
575
- if not setup_py_path.exists():
576
- setup_content = f'''from setuptools import setup, find_packages
577
- setup(name="{package_name}", version="0.0.1", description="{desc}", packages=find_packages())
578
- '''
579
- setup_py_path.write_text(setup_content)
580
- logging.info("Created minimal setup.py at %s", setup_py_path)
581
-
582
- default_mode_val = default_mode_choice or "agent"
583
- setup_npc_team(npc_team_dir, lang)
584
-
585
- print(f"\nGuac mode configured for package: {package_name} at {package_root}")
586
- print(f"Workspace created at: {workspace_dirs['workspace']}")
587
- return {
588
- "language": lang, "package_root": Path(package_root), "plots_dir": plots_dir,
589
- "npc_team_dir": npc_team_dir, "config_dir": config_dir, "default_mode": default_mode_val,
590
- "project_description": project_description, "package_name": package_name
591
- }
592
-
593
- def setup_npc_team(npc_team_dir, lang, is_subteam=False):
594
-
595
- guac_npc = {
596
- "name": "guac",
597
- "primary_directive": (
598
- f"You are guac, an AI assistant operating in a Python environment. "
599
- f"When asked to perform actions or generate code, prioritize Python. "
600
- f"For general queries, provide concise answers. "
601
- f"When routing tasks (agent mode), consider Python-based tools or direct Python code generation if appropriate. "
602
- f"If generating code directly (cmd mode), ensure it's Python."
603
- )
604
- }
605
- caug_npc = {
606
- "name": "caug",
607
- "primary_directive": f"You are caug, a specialist in big data statistical methods in {lang}. You never make scatter plots with discrete values unless asked. "
608
- }
609
-
610
- parsely_npc = {
611
- "name": "parsely",
612
- "primary_directive": f"You are parsely, a specialist in mathematical methods in {lang}."
613
- }
614
-
615
- toon_npc = {
616
- "name": "toon",
617
- "primary_directive": f"You are toon, a specialist in brute force methods in {lang}."
618
- }
619
-
620
- for npc_data in [guac_npc, caug_npc, parsely_npc, toon_npc]:
621
- npc_file = npc_team_dir / f"{npc_data['name']}.npc"
622
- if not npc_file.exists():
623
- with open(npc_file, "w") as f:
624
- yaml.dump(npc_data, f, default_flow_style=False)
625
- print(f"Created NPC: {npc_data['name']}")
626
- else:
627
- print(f"NPC already exists: {npc_data['name']}")
628
-
629
-
630
- if is_subteam:
631
- team_ctx_model = os.environ.get("NPCSH_CHAT_MODEL", "gemma3:4b")
632
- team_ctx_provider = os.environ.get("NPCSH_CHAT_PROVIDER", "ollama")
633
- team_ctx = {
634
- "team_name": "guac_team",
635
- "description": f"A subteam for {lang} analysis",
636
- "forenpc": "guac",
637
- "model": team_ctx_model,
638
- "provider": team_ctx_provider
639
- }
640
- with open(npc_team_dir / "team.ctx", "w") as f:
641
- yaml.dump(team_ctx, f, default_flow_style=False)
642
-
643
- def _get_workspace_dirs(npc_team_dir: Path) -> Dict[str, Path]:
644
- """Get workspace directories from the npc_team directory"""
645
- workspace_dir = npc_team_dir / "guac_workspace"
646
- return {
647
- "workspace": workspace_dir,
648
- "plots": workspace_dir / "plots",
649
- "data_inputs": workspace_dir / "data_inputs",
650
- "data_outputs": workspace_dir / "data_outputs"
651
- }
652
-
653
- def _ensure_workspace_dirs(workspace_dirs: Dict[str, Path]):
654
- """Ensure all workspace directories exist"""
655
- for directory in workspace_dirs.values():
656
- directory.mkdir(parents=True, exist_ok=True)
657
- import shutil
658
-
659
- def _detect_file_drop(input_text: str) -> bool:
660
- """Detect if input is just a file path (drag and drop)"""
661
-
662
- stripped = input_text.strip()
663
-
664
-
665
- if stripped.startswith("'") and stripped.endswith("'"):
666
- stripped = stripped[1:-1]
667
- elif stripped.startswith('"') and stripped.endswith('"'):
668
- stripped = stripped[1:-1]
669
-
670
-
671
- if len(stripped.split()) != 1:
672
- return False
673
-
674
-
675
- python_indicators = ['(', ')', '[', ']', '{', '}', '=', '+', '-', '*', '/', '%', '&', '|', '^', '<', '>', '!', '?', ':', ';', ',']
676
- if any(indicator in stripped for indicator in python_indicators):
677
- return False
678
-
679
-
680
- python_keywords = ['import', 'from', 'def', 'class', 'if', 'for', 'while', 'try', 'with', 'lambda', 'print', 'len', 'str', 'int', 'float', 'list', 'dict', 'set', 'tuple']
681
- if any(stripped.startswith(keyword) for keyword in python_keywords):
682
- return False
683
-
684
-
685
- import hashlib
686
- from sqlalchemy import create_engine, Column, Integer, String, Text, Float, DateTime, func
687
- from sqlalchemy.ext.declarative import declarative_base
688
- from sqlalchemy.orm import sessionmaker
689
-
690
-
691
- Base = declarative_base()
692
-
693
- class PlotState(Base):
694
- __tablename__ = 'plot_states'
695
- id = Column(Integer, primary_key=True)
696
- session_id = Column(String(255))
697
- plot_hash = Column(String(32))
698
- plot_description = Column(Text)
699
- figure_path = Column(String(500))
700
- data_summary = Column(String(500))
701
- change_significance = Column(Float)
702
- timestamp = Column(DateTime, default=func.now())
703
-
704
- class FileAnalysisState(Base):
705
- __tablename__ = 'file_analysis_states'
706
- id = Column(Integer, primary_key=True)
707
- session_id = Column(String(255))
708
- file_path = Column(String(1000))
709
- file_hash = Column(String(32))
710
- analysis_summary = Column(Text)
711
- variable_names = Column(Text)
712
- timestamp = Column(DateTime, default=func.now())
713
-
714
- def _capture_file_state(session_id: str, db_path: str, file_path: str, analysis_code: str, locals_dict: Dict):
715
- """Capture file analysis state"""
716
- engine = create_engine(f'sqlite:///{db_path}')
717
- Base.metadata.create_all(engine)
718
- Session = sessionmaker(bind=engine)
719
- session = Session()
720
-
721
-
722
- try:
723
- with open(file_path, 'rb') as f:
724
- file_hash = hashlib.md5(f.read()).hexdigest()
725
- except:
726
- file_hash = "unknown"
727
-
728
-
729
- file_stem = Path(file_path).stem.lower()
730
- vars_created = [k for k in locals_dict.keys() if not k.startswith('_') and file_stem in k.lower()]
731
-
732
- file_state = FileAnalysisState(
733
- session_id=session_id,
734
- file_path=file_path,
735
- file_hash=file_hash,
736
- analysis_summary=f"Loaded {Path(file_path).name} -> {len(vars_created)} variables",
737
- variable_names=json.dumps(vars_created)
738
- )
739
-
740
- session.add(file_state)
741
- session.commit()
742
- session.close()
743
- print(f"📁 File state captured: {Path(file_path).name}")
744
-
745
- def _get_plot_context(session_id: str, db_path: str) -> str:
746
- """Get plot context for LLM"""
747
- engine = create_engine(f'sqlite:///{db_path}')
748
- Session = sessionmaker(bind=engine)
749
- session = Session()
750
-
751
- plots = session.query(PlotState).filter(PlotState.session_id == session_id).order_by(PlotState.timestamp.desc()).limit(3).all()
752
- session.close()
753
-
754
- if not plots:
755
- return "No plots in session."
756
-
757
- context = "Recent plots:\n"
758
- for i, plot in enumerate(plots):
759
- if i == 0:
760
- context += f"📊 CURRENT: {plot.plot_description}\n"
761
- else:
762
- context += f"📊 Previous: {plot.plot_description}\n"
763
- return context
764
-
765
- def _get_file_context(session_id: str, db_path: str) -> str:
766
- """Get file context for LLM"""
767
- engine = create_engine(f'sqlite:///{db_path}')
768
- Session = sessionmaker(bind=engine)
769
- session = Session()
770
-
771
- files = session.query(FileAnalysisState).filter(FileAnalysisState.session_id == session_id).order_by(FileAnalysisState.timestamp.desc()).all()
772
- session.close()
773
-
774
- if not files:
775
- return "No files analyzed."
776
-
777
- context = "Analyzed files:\n"
778
- for file in files:
779
- context += f"📁 {Path(file.file_path).name}: {file.analysis_summary}\n"
780
- return context
781
- def _generate_file_analysis_code(file_path: str, target_path: str) -> str:
782
- """Generate Python code to load and analyze the dropped file"""
783
- ext = Path(file_path).suffix.lower()
784
- file_var_name = f"file_{datetime.now().strftime('%H%M%S')}"
785
-
786
- if ext == '.pdf':
787
- return f"""
788
-
789
- import PyPDF2
790
- import pandas as pd
791
- try:
792
- with open(r'{target_path}', 'rb') as file:
793
- pdf_reader = PyPDF2.PdfReader(file)
794
- {file_var_name}_text = ""
795
- for page_num in range(len(pdf_reader.pages)):
796
- {file_var_name}_text += pdf_reader.pages[page_num].extract_text()
797
-
798
- print(f"📄 Loaded PDF: {{len(pdf_reader.pages)}} pages, {{len({file_var_name}_text)}} characters")
799
- print("First 500 characters:")
800
- print({file_var_name}_text[:500])
801
- print("\\n--- PDF loaded as '{file_var_name}_text' variable ---")
802
- except Exception as e:
803
- print(f"Error loading PDF: {{e}}")
804
- {file_var_name}_text = None
805
1
  """
806
-
807
- elif ext in ['.csv']:
808
- return f"""
2
+ guac - Python data analysis mode CLI entry point
809
3
 
810
- import pandas as pd
811
- try:
812
- {file_var_name}_df = pd.read_csv(r'{target_path}')
813
- print(f"📊 Loaded CSV: {{len({file_var_name}_df)}} rows, {{len({file_var_name}_df.columns)}} columns")
814
- print("Columns:", list({file_var_name}_df.columns))
815
- print("\\nFirst 5 rows:")
816
- print({file_var_name}_df.head())
817
- print(f"\\n--- CSV loaded as '{file_var_name}_df' variable ---")
818
- except Exception as e:
819
- print(f"Error loading CSV: {{e}}")
820
- {file_var_name}_df = None
4
+ This is a thin wrapper that executes the guac.jinx through the jinx mechanism.
821
5
  """
822
-
823
- elif ext in ['.xlsx', '.xls']:
824
- return f"""
825
-
826
- import pandas as pd
827
- try:
828
- {file_var_name}_df = pd.read_excel(r'{target_path}')
829
- print(f"📊 Loaded Excel: {{len({file_var_name}_df)}} rows, {{len({file_var_name}_df.columns)}} columns")
830
- print("Columns:", list({file_var_name}_df.columns))
831
- print("\\nFirst 5 rows:")
832
- print({file_var_name}_df.head())
833
- print(f"\\n--- Excel loaded as '{file_var_name}_df' variable ---")
834
- except Exception as e:
835
- print(f"Error loading Excel: {{e}}")
836
- {file_var_name}_df = None
837
- """
838
-
839
- elif ext in ['.json']:
840
- return f"""
841
-
842
- import json
843
- try:
844
- with open(r'{target_path}', 'r') as file:
845
- {file_var_name}_data = json.load(file)
846
- print(f"📄 Loaded JSON: {{type({file_var_name}_data)}}")
847
- if isinstance({file_var_name}_data, dict):
848
- print("Keys:", list({file_var_name}_data.keys()))
849
- elif isinstance({file_var_name}_data, list):
850
- print(f"List with {{len({file_var_name}_data)}} items")
851
- print(f"\\n--- JSON loaded as '{file_var_name}_data' variable ---")
852
- except Exception as e:
853
- print(f"Error loading JSON: {{e}}")
854
- {file_var_name}_data = None
855
- """
856
-
857
- elif ext in ['.txt', '.md']:
858
- return f"""
859
-
860
- try:
861
- with open(r'{target_path}', 'r', encoding='utf-8') as file:
862
- {file_var_name}_text = file.read()
863
- print(f"📄 Loaded text file: {{len({file_var_name}_text)}} characters")
864
- print("First 500 characters:")
865
- print({file_var_name}_text[:500])
866
- print(f"\\n--- Text loaded as '{file_var_name}_text' variable ---")
867
- except Exception as e:
868
- print(f"Error loading text file: {{e}}")
869
- {file_var_name}_text = None
870
- """
871
-
872
- elif ext in ['.png', '.jpg', '.jpeg', '.gif']:
873
- return f"""
874
-
875
- import matplotlib.pyplot as plt
876
- from PIL import Image
877
- import numpy as np
878
- try:
879
- {file_var_name}_img = Image.open(r'{target_path}')
880
- {file_var_name}_array = np.array({file_var_name}_img)
881
- print(f"🖼️ Loaded image: {{({file_var_name}_img.size)}} pixels, mode: {{{file_var_name}_img.mode}}")
882
- print(f"Array shape: {{{file_var_name}_array.shape}}")
883
-
884
- plt.figure(figsize=(8, 6))
885
- plt.imshow({file_var_name}_img)
886
- plt.axis('off')
887
- plt.title('Loaded Image: {Path(file_path).name}')
888
- plt.show()
889
- print(f"\\n--- Image loaded as '{file_var_name}_img' and '{file_var_name}_array' variables ---")
890
- except Exception as e:
891
- print(f"Error loading image: {{e}}")
892
- {file_var_name}_img = None
893
- {file_var_name}_array = None
894
- """
895
-
896
- else:
897
- return f"""
898
-
899
- try:
900
- with open(r'{target_path}', 'rb') as file:
901
- {file_var_name}_data = file.read()
902
- print(f"📄 Loaded binary file: {{len({file_var_name}_data)}} bytes")
903
- print(f"File extension: {ext}")
904
- print(f"\\n--- Binary data loaded as '{file_var_name}_data' variable ---")
905
- except Exception as e:
906
- print(f"Error loading file: {{e}}")
907
- {file_var_name}_data = None
908
- """
909
- def _handle_file_drop(input_text: str, npc_team_dir: Path) -> Tuple[str, List[str]]:
910
- """Handle file drops by copying files to appropriate workspace directories"""
911
-
912
-
913
-
914
- stripped = input_text.strip("'\"")
915
- if os.path.exists(stripped) and os.path.isfile(stripped):
916
- print(f"[DEBUG] Direct file drop detected: {stripped}")
917
-
918
- workspace_dirs = _get_workspace_dirs(npc_team_dir)
919
- _ensure_workspace_dirs(workspace_dirs)
920
-
921
- expanded_path = Path(stripped).resolve()
922
-
923
- ext = expanded_path.suffix[1:].upper() if expanded_path.suffix else "OTHERS"
924
- category = EXTENSION_MAP.get(ext, "data_inputs")
925
- target_dir = workspace_dirs.get(category, workspace_dirs["data_inputs"])
926
-
927
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
928
- new_filename = f"{timestamp}_{expanded_path.name}"
929
- target_path = target_dir / new_filename
930
-
931
- try:
932
- shutil.copy2(expanded_path, target_path)
933
- print(f"📁 Copied {expanded_path.name} to workspace: {target_path}")
934
-
935
-
936
- loading_code = _generate_file_analysis_code(str(expanded_path), str(target_path))
937
- print(f"\n# Auto-generated file loading code:\n---\n{loading_code}\n---\n")
938
-
939
-
940
- exec(loading_code)
941
-
942
- return "", [str(target_path)]
943
- except Exception as e:
944
- print(f"[ERROR] Failed to process file drop: {e}")
945
- return input_text, []
946
-
947
-
948
- processed_files = []
949
- file_paths = re.findall(r"'([^']+)'|\"([^\"]+)\"|(\S+)", input_text)
950
- file_paths = [path for group in file_paths for path in group if path]
951
-
952
-
953
-
954
- if not file_paths:
955
-
956
- return input_text, processed_files, file_paths
957
-
958
- modified_input = input_text
959
-
960
- return modified_input, processed_files, file_paths
961
-
962
- def _capture_plot_state(session_id: str, db_path: str, npc_team_dir: Path):
963
- if not plt.get_fignums():
964
- return
965
-
966
- try:
967
- workspace_dirs = _get_workspace_dirs(npc_team_dir)
968
- workspace_dirs["plots"].mkdir(parents=True, exist_ok=True)
969
-
970
- engine = create_engine(f'sqlite:///{db_path}')
971
- Base.metadata.create_all(engine)
972
- Session = sessionmaker(bind=engine)
973
- session = Session()
974
-
975
- fig = plt.gcf()
976
- axes = fig.get_axes()
977
- data_points = sum(len(line.get_xdata()) for ax in axes for line in ax.get_lines())
978
-
979
- plot_hash = hashlib.md5(f"{len(axes)}{data_points}".encode()).hexdigest()
980
-
981
- last = session.query(PlotState).filter(PlotState.session_id == session_id).order_by(PlotState.timestamp.desc()).first()
982
- if last and last.plot_hash == plot_hash:
983
- session.close()
984
- return
985
-
986
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
987
- plot_path = workspace_dirs["plots"] / f"state_{timestamp}.png"
988
- plt.savefig(plot_path, dpi=150, bbox_inches='tight')
989
-
990
- plot_state = PlotState(
991
- session_id=session_id,
992
- plot_hash=plot_hash,
993
- plot_description=f"Plot with {len(axes)} axes, {data_points} points",
994
- figure_path=str(plot_path),
995
- data_summary=f"{data_points} data points",
996
- change_significance=1.0 if not last else 0.5
997
- )
998
-
999
- session.add(plot_state)
1000
- session.commit()
1001
- session.close()
1002
- print(f"Plot state captured -> {plot_path.name}")
1003
-
1004
- except Exception as e:
1005
- print(f"Error capturing plot state: {e}")
1006
- def _capture_file_state(session_id: str, db_path: str, file_path: str, analysis_code: str, locals_dict: Dict):
1007
- """Capture file analysis state"""
1008
- try:
1009
- engine = create_engine(f'sqlite:///{db_path}')
1010
- Base.metadata.create_all(engine)
1011
- Session = sessionmaker(bind=engine)
1012
- session = Session()
1013
-
1014
-
1015
- try:
1016
- with open(file_path, 'rb') as f:
1017
- file_hash = hashlib.md5(f.read()).hexdigest()
1018
- except:
1019
- file_hash = "unknown"
1020
-
1021
-
1022
- file_stem = Path(file_path).stem.lower()
1023
- vars_created = [k for k in locals_dict.keys() if not k.startswith('_') and file_stem in k.lower()]
1024
-
1025
- file_state = FileAnalysisState(
1026
- session_id=session_id,
1027
- file_path=file_path,
1028
- file_hash=file_hash,
1029
- analysis_summary=f"Loaded {Path(file_path).name} -> {len(vars_created)} variables",
1030
- variable_names=json.dumps(vars_created)
1031
- )
1032
-
1033
- session.add(file_state)
1034
- session.commit()
1035
- session.close()
1036
- print(f"📁 File state captured: {Path(file_path).name}")
1037
-
1038
- except Exception as e:
1039
- print(f"Error capturing file state: {e}")
1040
-
1041
- def _get_plot_context(session_id: str, db_path: str) -> str:
1042
- """Get plot context for LLM"""
1043
- try:
1044
- engine = create_engine(f'sqlite:///{db_path}')
1045
- Base.metadata.create_all(engine)
1046
- Session = sessionmaker(bind=engine)
1047
- session = Session()
1048
-
1049
- plots = session.query(PlotState).filter(PlotState.session_id == session_id).order_by(PlotState.timestamp.desc()).limit(3).all()
1050
- session.close()
1051
-
1052
- if not plots:
1053
- return "No plots in session."
1054
-
1055
- context = "Recent plots:\n"
1056
- for i, plot in enumerate(plots):
1057
- if i == 0:
1058
- context += f"📊 CURRENT: {plot.plot_description}\n"
1059
- else:
1060
- context += f"📊 Previous: {plot.plot_description}\n"
1061
- return context
1062
-
1063
- except Exception as e:
1064
- return f"Error retrieving plot context: {e}"
1065
-
1066
- def _get_file_context(session_id: str, db_path: str) -> str:
1067
- """Get file context for LLM"""
1068
- try:
1069
- engine = create_engine(f'sqlite:///{db_path}')
1070
- Base.metadata.create_all(engine)
1071
- Session = sessionmaker(bind=engine)
1072
- session = Session()
1073
-
1074
- files = session.query(FileAnalysisState).filter(FileAnalysisState.session_id == session_id).order_by(FileAnalysisState.timestamp.desc()).all()
1075
- session.close()
1076
-
1077
- if not files:
1078
- return "No files analyzed."
1079
-
1080
- context = "Analyzed files:\n"
1081
- for file in files:
1082
- context += f"📁 {Path(file.file_path).name}: {file.analysis_summary}\n"
1083
- return context
1084
-
1085
- except Exception as e:
1086
- return f"Error retrieving file context: {e}"
1087
-
1088
-
1089
-
1090
- def _save_matplotlib_figures(npc_team_dir: Path) -> List[str]:
1091
- """Save all matplotlib figures to the plots directory and return paths"""
1092
- workspace_dirs = _get_workspace_dirs(npc_team_dir)
1093
- _ensure_workspace_dirs(workspace_dirs)
1094
-
1095
- saved_figures = []
1096
- if plt.get_fignums():
1097
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
1098
-
1099
- for i, fig_num in enumerate(plt.get_fignums()):
1100
- fig = plt.figure(fig_num)
1101
- fig_path = workspace_dirs["plots"] / f"{timestamp}_figure_{i+1}.png"
1102
- fig.savefig(fig_path, dpi=150, bbox_inches='tight')
1103
- saved_figures.append(str(fig_path))
1104
- print(f"📊 Saved figure to: {fig_path}")
1105
-
1106
- plt.close('all')
1107
-
1108
- return saved_figures
1109
-
6
+ import argparse
7
+ import os
1110
8
  import sys
1111
- from io import StringIO
1112
- from contextlib import redirect_stdout, redirect_stderr
1113
-
1114
-
1115
- def _get_guac_agent_emoji(failures: int, max_fail: int = 3) -> str:
1116
- """
1117
- Returns an avocado emoji representing the state based on consecutive failures.
1118
- Includes "puke" emoji for max_fail, and "skull" for exceeding max_fail + 20.
1119
- """
1120
- if failures == 0:
1121
- return "🥑"
1122
- elif failures == 1:
1123
- return "🥑🔪"
1124
- elif failures == 2:
1125
- return "🥑🥣"
1126
- elif failures == max_fail:
1127
- return "🥑🤢"
1128
- elif failures > max_fail + 20:
1129
- return "🥑💀"
1130
- elif failures > max_fail:
1131
- return "🥑🟤"
1132
- else:
1133
- return "🥑❓"
1134
-
1135
-
1136
-
1137
-
1138
- def _run_agentic_mode(command: str,
1139
- state: ShellState,
1140
- locals_dict: Dict[str, Any],
1141
- npc_team_dir: Path) -> Tuple[ShellState, Any]:
1142
- max_iterations = 5
1143
- iteration = 0
1144
- full_output = []
1145
- current_command = command
1146
- consecutive_failures = 0
1147
- max_consecutive_failures = 3
1148
-
1149
- if len(state.messages) > 15:
1150
- planning_state = {
1151
- "goal": "ongoing guac session",
1152
- "facts": [f"Working in {state.current_path}", f"Variables: {list(locals_dict.keys())[:10]}"],
1153
- "successes": [],
1154
- "mistakes": [],
1155
- "todos": [],
1156
- "constraints": ["Focus on Python code execution", "Use existing variables when possible"]
1157
- }
1158
- compressed_state = state.npc.compress_planning_state(planning_state)
1159
- state.messages = [{"role": "system", "content": f"Session context: {compressed_state}"}]
1160
-
1161
-
1162
-
1163
-
1164
- existing_vars_context = "EXISTING VARIABLES IN ENVIRONMENT:\n"
1165
- for var_name, var_value in locals_dict.items():
1166
- if not var_name.startswith('_') and var_name not in ['In', 'Out', 'exit', 'quit', 'get_ipython']:
1167
- try:
1168
- var_type = type(var_value).__name__
1169
- var_repr = repr(var_value)
1170
- if len(var_repr) > 100:
1171
- var_repr = var_repr[:97] + "..."
1172
- existing_vars_context += f"- {var_name} ({var_type}): {var_repr}\n"
1173
- except:
1174
- existing_vars_context += f"- {var_name} ({type(var_value).__name__}): <unrepresentable>\n"
1175
- previous_code = ''
1176
- next_step = ''
1177
- steps = []
1178
- while iteration < max_iterations and consecutive_failures < max_consecutive_failures:
1179
- iteration += 1
1180
- print(f"\n{_get_guac_agent_emoji(consecutive_failures, max_consecutive_failures)} Agentic iteration {iteration} ")
1181
-
1182
- prompt = f"""
1183
- USER REQUEST: {current_command} {next_step}
1184
-
1185
- Here is the existing variable context:
1186
-
1187
- ```
1188
- {existing_vars_context}
1189
- ```
1190
- PREVIOUS ATTEMPTS: ```{full_output[-1] if full_output else 'None'}```
1191
-
1192
- DO NOT SIMPLY COPY A PREVIOUS ATTEMPT.
1193
-
1194
- Your goal is to generate Python code that BUILDS ON EXISTING VARIABLES to respond to this task: USER TASK: "{current_command}", with this next step planned: `{next_step} `
1195
-
1196
- If there is no relevant code to build on or the user is simply asking a question, generate new code as needed to respond to their questions.
1197
-
1198
- You will notice in the local envs that there are functions for reading, editing, and loading files.
1199
- You should use these to your advantage as they will help you to clearly understand the user's system best.
1200
-
1201
- Here are all the previous steps: {steps}
1202
-
1203
- DO NOT redefine variables that already exist unless absolutely necessary.
1204
- Use the existing variables and add/modify as needed.
1205
- Be sure to generate logs and information that once executed provide us with enough information to keep moving forward.
1206
- log variables and behaviors so we can pinpoint fixes clearly rather than getting stuck in nonsensical problematic loops.
1207
-
1208
- Provide ONLY executable Python code without any explanations or markdown formatting.
1209
- Focus on incremental changes rather than rewriting everything. Do not re-write any functions that are currently within the existing vars context or which appear to have no need to be changed.
1210
-
1211
- Do not include any leading ```python. Begin directly with the code.
1212
- Do not write your code to include a __main__ check or portion unless the user asks.
1213
- These should be functional components and building blocks that you and the user will take and build a great
1214
- library of modules. Keep things straightforward and do not do unnecessary exception handling unless requested.
1215
- Failing fast in research is important and so it is necessary to
1216
- No try except blocks unless requested.
1217
- Determine and log information in a way that helps us move forward rather than by exception handling.
1218
- Do not simply generate code that resembles the previous code.
1219
- While this code may one day be `production` code with such error handling,
1220
- at the moment, we are simply in the business of experimentation.
1221
- Do not use the python `input()` function. if you have a question, ask directly by typing <request_for_input> request </request_for_input>
1222
-
1223
- users may ask you to edit code directly. do this by loading the code in and evaluating it. once it is evaluated, you may attempt to write changes to it.
1224
-
1225
- Always evaluate before attempting to fix. Read first. Gather information. Look at files. This will not be your final code, this is just part of
1226
- an ongoing workflow.
1227
-
1228
-
1229
- If a user is asking for help debugging, it's better to figure out what is wrong by attempting to run it yourself, and if they do not prefer that,
1230
- then it's best to use static parsing methods and arguments based on deduction rather than attempting to just fix everything over and over.
1231
-
1232
- Do not over- complicate the code.
1233
-
1234
- DO NOT include any '__name__'=='__main__' block.
1235
- """
1236
-
1237
- npc_model = state.npc.model if state.npc and state.npc.model else state.chat_model
1238
- npc_provider = state.npc.provider if state.npc and state.npc.provider else state.chat_provider
1239
-
1240
- print(state.npc.model)
1241
- print(state.chat_model)
1242
- llm_response = get_llm_response(prompt,
1243
- npc=state.npc,
1244
- stream=True,
1245
- messages=state.messages,
1246
- thinking=False)
1247
-
1248
- print(llm_response.get('response'))
1249
- print(npc_model, npc_provider)
1250
-
1251
- generated_code = print_and_process_stream(llm_response.get('response'),
1252
- npc_model,
1253
- npc_provider,
1254
- )
1255
-
1256
- state.messages.append({'role':'user', 'content':current_command })
1257
- state.messages.append({'role':'assistant', 'content': generated_code})
1258
-
1259
- if '<request_for_input>' in generated_code:
1260
-
1261
- generated_code = generated_code.split('>')[1].split('<')[0]
1262
- user_feedback = input("\n🤔 Agent requests feedback (press Enter to continue or type your input): ").strip()
1263
- current_command = f"{current_command} - User feedback: {user_feedback}"
1264
- max_iterations += int(max_iterations/2)
1265
- continue
1266
- if '<think>' in generated_code and '</think>' in generated_code:
1267
- generated_code = generated_code.split('</think>')[1]
1268
- if generated_code.startswith('```python'):
1269
- generated_code = generated_code[len('```python'):].strip()
1270
- if generated_code.endswith('```'):
1271
- generated_code = generated_code[:-3].strip()
1272
-
1273
- try:
1274
- stdout_capture = StringIO()
1275
- stderr_capture = StringIO()
1276
- with redirect_stdout(stdout_capture), redirect_stderr(stderr_capture):
1277
- state, exec_output = execute_python_code(generated_code,
1278
- state,
1279
- locals_dict)
1280
-
1281
- captured_stdout = stdout_capture.getvalue()
1282
- captured_stderr = stderr_capture.getvalue()
1283
- if exec_output: print(exec_output)
1284
- if captured_stdout:
1285
- print("\n📤 Captured stdout:\n", captured_stdout)
1286
- if captured_stderr:
1287
- print("\n❌ Captured stderr:\n", captured_stderr)
1288
-
1289
- combined_output = f"{exec_output}\nstdout:\n{captured_stdout}\nstderr:\n{captured_stderr}"
1290
- full_output.append(f"Iteration {iteration}:\nCode:\n{generated_code}\nOutput:\n{combined_output}")
1291
9
 
1292
- new_vars = []
1293
- for var_name, var_value in locals_dict.items():
1294
- if (not var_name.startswith('_') and
1295
- var_name not in existing_vars_context and
1296
- var_name not in ['In', 'Out', 'exit', 'quit', 'get_ipython']):
1297
- new_vars.append(var_name)
1298
- if new_vars:
1299
- existing_vars_context += f"\nNEW VARIABLES CREATED: {', '.join(new_vars)}\n"
10
+ from npcsh._state import setup_shell
1300
11
 
1301
- analysis_prompt = f"""
1302
- CODE EXECUTION RESULTS: {combined_output}
1303
- EXISTING VARIABLES: {existing_vars_context}
1304
- EXECUTED_CODE: {generated_code}
1305
- PREVIOUS_CODE: {previous_code}
1306
- PREVIOUS ATTEMPTS: ```{full_output[-3:] if full_output else 'None'}```
1307
- Here are the steps so far: {steps}
1308
- ANALYSIS:
1309
- - Is there MEANINGFUL PROGRESS? Return 'progress' if making good progress.
1310
- - Is there a PROBLEM? Return 'problem' if stuck or error occurred.
1311
- - Is there an AMBIGUITY that should be resolved? Return 'question'.
1312
- - Is the analysis COMPLETE enough to get feedback? If it's pretty much done, return 'complete'.
1313
- - Return ONLY one of these words followed by a brief explanation for the next step.
1314
- """
1315
- analysis_response = get_llm_response(analysis_prompt, npc=state.npc, stream=False)
1316
- analysis = analysis_response.get("response", "").strip().lower()
1317
- next_step = analysis[8:]
1318
- state.messages.append({'role':'assistant',
1319
- 'content':f'Is there progress? is there a problem/ is there ambiguity? is it complete?\n {analysis}'})
1320
12
 
1321
- if analysis.startswith('complete'):
1322
- print(f" Task completed! {_get_guac_agent_emoji(0, max_consecutive_failures)}")
1323
- break
1324
- elif analysis.startswith('question'):
1325
- print(f"🤔 Agent has a question: {next_step} {_get_guac_agent_emoji(consecutive_failures, max_consecutive_failures)}")
1326
- break
1327
- elif analysis.startswith('progress'):
1328
- consecutive_failures = 0
1329
- print(f"➡️ Making progress... {_get_guac_agent_emoji(consecutive_failures, max_consecutive_failures)}")
1330
- current_command = next_step
1331
- elif analysis.startswith('problem'):
1332
- consecutive_failures += 1
1333
- print(f"⚠️ Problem detected ({consecutive_failures}/{max_consecutive_failures}) {_get_guac_agent_emoji(consecutive_failures, max_consecutive_failures)}")
1334
- current_command = f"{current_command} - PROBLEM: {analysis}"
1335
- else:
1336
- consecutive_failures += 1
1337
- print(f"❓ Unexpected analysis, counting as failure ({consecutive_failures}/{max_consecutive_failures}) {_get_guac_agent_emoji(consecutive_failures, max_consecutive_failures)}")
1338
- except KeyboardInterrupt:
1339
- user_input = input('User input: ')
1340
- current_command += user_input
1341
- except Exception as e:
1342
- consecutive_failures += 1
1343
- error_msg = f"Error in iteration {iteration}: {str(e)} {_get_guac_agent_emoji(consecutive_failures, max_consecutive_failures)}"
1344
- print(error_msg)
1345
- full_output.append(error_msg)
1346
- current_command = f"{current_command} - Error: {str(e)}"
1347
-
1348
- if consecutive_failures >= max_consecutive_failures:
1349
- print(f"❌ Too many consecutive failures, stopping. {_get_guac_agent_emoji(consecutive_failures, max_consecutive_failures)}")
1350
-
1351
- return state, "# Agentic execution completed\n" + '\n'.join(full_output)
1352
-
1353
- def print_guac_bowl():
1354
- bowl_art = """
1355
- 🟢🟢🟢🟢🟢
1356
- 🟢 🟢
1357
- 🟢
1358
- 🟢
1359
- 🟢
1360
- 🟢 🟢🟢🟢 🟢 🟢 🟢🟢🟢 🟢🟢🟢
1361
- 🟢 🟢 🟢 🟢 ⚫⚫🟢 🟢
1362
- 🟢 🟢 🟢 🟢 ⚫🥑🧅⚫ 🟢
1363
- 🟢 🟢 🟢 🟢 ⚫🥑🍅⚫ 🟢
1364
- 🟢🟢🟢🟢🟢🟢 🟢🟢🟢🟢 ⚫⚫🟢 🟢🟢🟢
1365
- """
1366
- print(bowl_art)
1367
-
1368
- def get_guac_prompt_char(command_count: int, guac_refresh_period = 100) -> str:
1369
- period = int(guac_refresh_period)
1370
- period = max(1, period)
1371
- stages = ["\U0001F951", "\U0001F951🔪", "\U0001F951🥣", "\U0001F951🥣🧂", "\U0001F958 REFRESH?"]
1372
- divisor = max(1, period // (len(stages)-1) if len(stages) > 1 else period)
1373
- stage_index = min(command_count // divisor, len(stages) - 1)
1374
- return stages[stage_index]
1375
-
1376
- def execute_guac_command(command: str, state: ShellState, locals_dict: Dict[str, Any], project_name: str, src_dir: Path, router) -> Tuple[ShellState, Any]:
1377
- stripped_command = command.strip()
1378
- output = None
1379
- try:
1380
- cmd_parts = shlex.split(stripped_command)
1381
- if cmd_parts and cmd_parts[0] in ["cd", "ls", "pwd"]:
1382
- return execute_command(stripped_command, state, review=False, router=router)
1383
- except Exception as e:
1384
- pass
1385
- npc_team_dir = Path(state.team.team_path) if state.team and hasattr(state.team, 'team_path') else Path.cwd() / "npc_team"
1386
-
1387
- if not stripped_command:
1388
- return state, None
1389
- if stripped_command.lower() in ["exit", "quit", "exit()", "quit()"]:
1390
- raise SystemExit("Exiting Guac Mode.")
1391
-
1392
- if stripped_command.startswith('run '):
1393
- file_path = stripped_command[4:].strip()
1394
- try:
1395
- resolved_path = Path(file_path).resolve()
1396
- if not resolved_path.exists():
1397
- return state, f"Error: File '{file_path}' not found"
1398
-
1399
- with open(resolved_path, 'r', encoding='utf-8') as f:
1400
- file_content = f.read()
1401
-
1402
- print(f"Running {resolved_path.name}...")
1403
- state, exec_output = execute_python_code(file_content, state, locals_dict)
1404
- return state, exec_output
1405
-
1406
- except Exception as e:
1407
- return state, f"Error running file: {e}"
1408
-
1409
-
1410
-
1411
-
1412
- if _detect_file_drop(stripped_command):
1413
- if stripped_command.startswith('run'):
1414
- pass
1415
- else:
1416
-
1417
- file_path = stripped_command.strip("'\"")
1418
- expanded_path = Path(file_path).resolve()
1419
-
1420
-
1421
- workspace_dirs = _get_workspace_dirs(npc_team_dir)
1422
- _ensure_workspace_dirs(workspace_dirs)
1423
-
1424
- ext = expanded_path.suffix[1:].upper() if expanded_path.suffix else "OTHERS"
1425
- category = EXTENSION_MAP.get(ext, "data_inputs")
1426
- target_dir = workspace_dirs.get(category, workspace_dirs["data_inputs"])
1427
-
1428
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
1429
- new_filename = f"{timestamp}_{expanded_path.name}"
1430
- target_path = target_dir / new_filename
1431
-
1432
- try:
1433
- shutil.copy2(expanded_path, target_path)
1434
- print(f"📁 Copied {expanded_path.name} to workspace: {target_path}")
1435
-
1436
-
1437
- loading_code = _generate_file_analysis_code(str(expanded_path), str(target_path))
1438
- print(f"\n# Auto-generated file loading code:\n---\n{loading_code}\n---\n")
1439
-
1440
- state, exec_output = execute_python_code(loading_code, state, locals_dict)
1441
- return state, exec_output
1442
- except Exception as e:
1443
- print(f"[ERROR] Failed to copy or load file: {e}")
1444
- return state, f"Error loading file: {e}"
1445
-
1446
-
1447
- processed_command, processed_files, file_paths = _handle_file_drop(stripped_command, npc_team_dir)
1448
- if processed_files:
1449
- print(f"📁 Processed {len(processed_files)} files")
1450
- stripped_command = processed_command + 'Here are the files associated with the request'
1451
-
1452
-
1453
- if stripped_command == "/refresh":
1454
- _handle_guac_refresh(state, project_name, src_dir)
1455
- return state, "Refresh process initiated."
1456
-
1457
-
1458
- if stripped_command in ["/agent", "/chat", "/cmd"]:
1459
- state.current_mode = stripped_command[1:]
1460
- return state, f"Switched to {state.current_mode.upper()} mode."
1461
-
1462
-
1463
-
1464
-
1465
- if stripped_command.startswith('/') and stripped_command not in ["/refresh", "/agent", "/chat", "/cmd"]:
1466
- return execute_command(stripped_command, state, review=True, router=router)
1467
- print(is_python_code(stripped_command))
1468
- if is_python_code(stripped_command):
1469
- try:
1470
- state, exec_output = execute_python_code(stripped_command, state, locals_dict)
1471
- return state, exec_output
1472
- except KeyboardInterrupt:
1473
- print("\nExecution interrupted by user")
1474
- return state, "Execution interrupted"
1475
- if state.current_mode == "agent":
1476
-
1477
- return _run_agentic_mode(stripped_command, state, locals_dict, npc_team_dir)
1478
- if state.current_mode == "cmd":
1479
-
1480
-
1481
- locals_context_string = "Current Python environment variables and functions:\n"
1482
- if locals_dict:
1483
- for k, v in locals_dict.items():
1484
- if not k.startswith('__'):
1485
- try:
1486
- value_repr = repr(v)
1487
- if len(value_repr) > 200:
1488
- value_repr = value_repr[:197] + "..."
1489
- loaals_context_string += f"- {k} (type: {type(v).__name__}) = {value_repr}\n"
1490
- except Exception:
1491
- locals_context_string += f"- {k} (type: {type(v).__name__}) = <unrepresentable>\n"
1492
- locals_context_string += "\n--- End of Environment Context ---\n"
1493
- else:
1494
- locals_context_string += "(Environment is empty)\n"
1495
-
1496
-
1497
- enhanced_prompt = stripped_command
1498
- if any(word in stripped_command.lower() for word in ['plot', 'graph', 'chart', 'figure', 'visualiz']):
1499
- plot_context = _get_plot_context(state.conversation_id, state.command_history.db_path)
1500
- enhanced_prompt += f"\n\n{plot_context}"
1501
-
1502
- if any(word in stripped_command.lower() for word in ['file', 'data', 'load', 'variable', 'df']):
1503
- file_context = _get_file_context(state.conversation_id, state.command_history.db_path)
1504
- enhanced_prompt += f"\n\n{file_context}"
1505
-
1506
- prompt_cmd = f"""User input for Python CMD mode: '{enhanced_prompt}'.
1507
- Generate ONLY executable Python code required to fulfill this.
1508
- Do not include any explanations, leading markdown like ```python, or any text other than the Python code itself.
1509
- {locals_context_string}
1510
- Begin directly with the code
1511
- """
1512
-
1513
- npc_model = state.npc.model if state.npc and state.npc.model else state.chat_model
1514
- npc_provider = state.npc.provider if state.npc and state.npc.provider else state.chat_provider
1515
-
1516
- llm_response = get_llm_response(prompt_cmd,
1517
- npc=state.npc,
1518
- stream=True,
1519
- messages=state.messages)
1520
- response = print_and_process_stream(llm_response.get('response'),
1521
- npc_model,
1522
- npc_provider )
1523
-
1524
-
1525
- if response.startswith('```python'):
1526
- generated_code = response.strip()[len('```python'):].strip()
1527
- generated_code = generated_code.rsplit('```', 1)[0].strip()
1528
- else:
1529
- generated_code = response.strip()
1530
-
1531
- state.messages = llm_response.get("messages", state.messages)
1532
-
1533
- if generated_code and not generated_code.startswith("# Error:"):
1534
- print(f"\n# LLM Generated Code (Cmd Mode):\n---\n{generated_code}\n---\n")
1535
- try:
1536
- state, exec_output = execute_python_code(generated_code, state, locals_dict)
1537
- output = f"# Code executed.\n# Output:\n{exec_output if exec_output else '(No direct output)'}"
1538
- except KeyboardInterrupt:
1539
- print("\nExecution interrupted by user")
1540
- output = "Execution interrupted"
1541
- else:
1542
- output = generated_code if generated_code else "# Error: LLM did not generate Python code."
1543
-
1544
- if state.command_history:
1545
- state.command_history.add_command(stripped_command, [str(output if output else "")], "", state.current_path)
1546
-
1547
- return state, output
1548
-
1549
- return execute_command(stripped_command, state, review=True, router=router)
1550
- def run_guac_repl(state: ShellState, project_name: str, package_root: Path, package_name: str):
1551
- from npcsh.routes import router
1552
-
1553
- npc_team_dir = Path.cwd() / "npc_team"
1554
- workspace_dirs = _get_workspace_dirs(npc_team_dir)
1555
- _ensure_workspace_dirs(workspace_dirs)
1556
-
1557
- locals_dict = {}
1558
- global _guac_monitor_thread, _guac_monitor_stop_event
1559
- if _guac_monitor_thread is None or not (_guac_monitor_thread.is_alive()):
1560
- _guac_monitor_stop_event = threading.Event()
1561
- _guac_monitor_thread = threading.Thread(
1562
- target=_file_drop_monitor,
1563
- args=(workspace_dirs['workspace'].parent, state, locals_dict),
1564
- kwargs={'poll_interval': 0.2},
1565
- daemon=True
1566
- )
1567
- _guac_monitor_thread.start()
1568
-
1569
- try:
1570
- if str(package_root) not in sys.path:
1571
- sys.path.insert(0, str(package_root))
1572
-
1573
- try:
1574
- package_module = importlib.import_module(package_name)
1575
- for name in dir(package_module):
1576
- if not name.startswith('__'):
1577
- locals_dict[name] = getattr(package_module, name)
1578
- print(f"Loaded package: {package_name}")
1579
- except ImportError:
1580
- print(f"Warning: Could not import package {package_name}")
1581
-
1582
- except Exception as e:
1583
- print(f"Warning: Could not load package {package_name}: {e}", file=sys.stderr)
1584
-
1585
- def read_file(file_path, max_lines=10000, encoding='utf-8'):
1586
- path = Path(file_path).expanduser().resolve()
1587
-
1588
- if not path.exists():
1589
- print(f"File not found: {path}")
1590
- return None
1591
-
1592
- if not path.is_file():
1593
- print(f"Not a file: {path}")
1594
- return None
1595
-
1596
- try:
1597
- file_ext = path.suffix.upper().lstrip('.')
1598
- if file_ext in ['PDF', 'DOCX', 'PPTX', 'HTML', 'HTM', 'CSV', 'XLS', 'XLSX', 'JSON']:
1599
- chunks = load_file_contents(str(path), chunk_size=10000)
1600
- if chunks and not chunks[0].startswith("Error") and not chunks[0].startswith("Unsupported"):
1601
- content = '\n'.join(chunks)
1602
- lines = content.split('\n')
1603
-
1604
- if len(lines) > max_lines:
1605
- lines = lines[:max_lines]
1606
- print(f"File truncated at {max_lines} lines. Use windowed reading for larger files.")
1607
-
1608
- print(f"Reading {path.name} ({len(lines)} lines, {len(content)} chars)")
1609
- print("=" * 60)
1610
-
1611
- for i, line in enumerate(lines, 1):
1612
- print(f"{i:4d} | {line}")
1613
-
1614
- print("=" * 60)
1615
- print(f"End of {path.name}")
1616
- return content
1617
-
1618
- with open(path, 'r', encoding=encoding) as f:
1619
- lines = []
1620
- for i, line in enumerate(f, 1):
1621
- if i > max_lines:
1622
- print(f"File truncated at {max_lines} lines. Use windowed reading for larger files.")
1623
- break
1624
- lines.append(line.rstrip('\n\r'))
1625
-
1626
- content = '\n'.join(lines)
1627
-
1628
- print(f"Reading {path.name} ({len(lines)} lines, {len(content)} chars)")
1629
- print("=" * 60)
1630
-
1631
- for i, line in enumerate(lines, 1):
1632
- print(f"{i:4d} | {line}")
1633
-
1634
- print("=" * 60)
1635
- print(f"End of {path.name}")
1636
-
1637
- return content
1638
-
1639
- except UnicodeDecodeError:
1640
- try:
1641
- with open(path, 'rb') as f:
1642
- data = f.read(min(1024, max_lines * 80))
1643
- print(f"Binary file {path.name} ({len(data)} bytes)")
1644
- print("=" * 60)
1645
- print(data.hex()[:1000] + ("..." if len(data) > 500 else ""))
1646
- print("=" * 60)
1647
- return data
1648
- except Exception as e:
1649
- print(f"Error reading file: {e}")
1650
- return None
1651
- except Exception as e:
1652
- print(f"Error reading file: {e}")
1653
- return None
1654
-
1655
- def edit_file(file_path, content=None, line_number=None, new_line=None, insert_at=None, append=False, backup=True):
1656
- path = Path(file_path).expanduser().resolve()
1657
-
1658
- path.parent.mkdir(parents=True, exist_ok=True)
1659
-
1660
- if backup and path.exists():
1661
- backup_path = path.with_suffix(path.suffix + '.backup')
1662
- import shutil
1663
- shutil.copy2(path, backup_path)
1664
- print(f"Backup saved: {backup_path.name}")
1665
-
1666
- try:
1667
- existing_lines = []
1668
- if path.exists():
1669
- with open(path, 'r', encoding='utf-8') as f:
1670
- existing_lines = [line.rstrip('\n\r') for line in f]
1671
-
1672
- if content is not None:
1673
- if append:
1674
- with open(path, 'a', encoding='utf-8') as f:
1675
- f.write('\n' + content if existing_lines else content)
1676
- print(f"Appended to {path.name}")
1677
- else:
1678
- with open(path, 'w', encoding='utf-8') as f:
1679
- f.write(content)
1680
- print(f"Wrote {path.name} ({len(content)} chars)")
1681
-
1682
- elif line_number is not None and new_line is not None:
1683
- if line_number < 1:
1684
- print("Line numbers start at 1")
1685
- return False
1686
-
1687
- while len(existing_lines) < line_number:
1688
- existing_lines.append("")
1689
-
1690
- if line_number <= len(existing_lines):
1691
- old_line = existing_lines[line_number - 1] if line_number <= len(existing_lines) else ""
1692
- existing_lines[line_number - 1] = new_line
1693
-
1694
- with open(path, 'w', encoding='utf-8') as f:
1695
- f.write('\n'.join(existing_lines))
1696
-
1697
- print(f"Line {line_number} in {path.name}:")
1698
- print(f" - OLD: {old_line}")
1699
- print(f" + NEW: {new_line}")
1700
- else:
1701
- print(f"File only has {len(existing_lines)} lines")
1702
- return False
1703
-
1704
- elif insert_at is not None and new_line is not None:
1705
- if insert_at < 1:
1706
- insert_at = 1
1707
-
1708
- existing_lines.insert(insert_at - 1, new_line)
1709
-
1710
- with open(path, 'w', encoding='utf-8') as f:
1711
- f.write('\n'.join(existing_lines))
1712
-
1713
- print(f"Inserted at line {insert_at} in {path.name}: {new_line}")
1714
-
1715
- else:
1716
- print("Must specify either 'content', or 'line_number + new_line', or 'insert_at + new_line'")
1717
- return False
1718
-
1719
- return True
1720
-
1721
- except Exception as e:
1722
- print(f"Error editing file: {e}")
1723
- return False
13
+ def main():
14
+ parser = argparse.ArgumentParser(description="guac - Python data analysis mode")
15
+ parser.add_argument("--model", "-m", type=str, help="LLM model to use")
16
+ parser.add_argument("--provider", "-p", type=str, help="LLM provider to use")
17
+ parser.add_argument("--plots-dir", type=str, help="Directory to save plots")
18
+ args = parser.parse_args()
1724
19
 
1725
- def load_file(file_path):
1726
- path = Path(file_path).expanduser().resolve()
1727
-
1728
- if not path.exists():
1729
- print(f"File not found: {path}")
1730
- return None
1731
-
1732
- chunks = load_file_contents(str(path))
1733
- if chunks and not chunks[0].startswith("Error") and not chunks[0].startswith("Unsupported"):
1734
- content = '\n'.join(chunks)
1735
- print(f"Loaded {path.name} using npcpy loader")
1736
- return content
1737
- else:
1738
- print(f"Could not load {path.name}: {chunks[0] if chunks else 'Unknown error'}")
1739
- return None
1740
-
1741
- core_imports = {
1742
- 'pd': pd, 'np': np, 'plt': plt, 'datetime': datetime,
1743
- 'Path': Path, 'os': os, 'sys': sys, 'json': json,
1744
- 'yaml': yaml, 're': re, 'traceback': traceback,
1745
- 'edit_file': edit_file,
1746
- 'read_file':read_file,
1747
- 'load_file':load_file,
20
+ # Setup shell to get team and default NPC
21
+ command_history, team, default_npc = setup_shell()
22
+
23
+ if not team or "guac" not in team.jinxs_dict:
24
+ print("Error: guac jinx not found. Ensure npc_team/jinxs/modes/guac.jinx exists.")
25
+ sys.exit(1)
26
+
27
+ # Build context for jinx execution
28
+ context = {
29
+ "npc": default_npc,
30
+ "team": team,
31
+ "messages": [],
32
+ "model": args.model,
33
+ "provider": args.provider,
34
+ "plots_dir": args.plots_dir,
1748
35
  }
1749
36
 
1750
- locals_dict.update(core_imports)
1751
- locals_dict.update({f"guac_{k}": v for k, v in workspace_dirs.items()})
1752
-
1753
- print_guac_bowl()
1754
- print(f"Welcome to Guac Mode! Current mode: {state.current_mode.upper()}. Type /agent, /chat, or /cmd to switch modes.")
1755
- print(f"Workspace: {workspace_dirs['workspace']}")
1756
- print("💡 You can drag and drop files into the terminal to automatically import them!")
1757
-
1758
- command_count = 0
1759
-
1760
- try:
1761
- completer = make_completer(state, router)
1762
- readline.set_completer(completer)
1763
- except:
1764
- pass
1765
-
1766
- while True:
1767
- try:
1768
- try:
1769
- while True:
1770
- operation, code, exec_state, exec_locals = plot_queue.get_nowait()
1771
- if operation == 'execute_code':
1772
- print("\n[guac] Processing queued file drop...")
1773
- exec_state, exec_output = execute_python_code(code, exec_state, exec_locals)
1774
- if exec_output:
1775
- print(exec_output)
1776
- except queue.Empty:
1777
- pass
1778
-
1779
- state.current_path = os.getcwd()
1780
-
1781
- display_model = state.chat_model
1782
- if isinstance(state.npc, NPC) and state.npc.model:
1783
- display_model = state.npc.model
1784
- display_provider = state.chat_provider
1785
- if isinstance(state.npc, NPC) and state.npc.provider:
1786
- display_provider = state.npc.provider
1787
-
1788
- cwd_colored = colored(os.path.basename(state.current_path), "blue")
1789
- npc_name = state.npc.name if state.npc and state.npc.name else "guac"
1790
- prompt_char = get_guac_prompt_char(command_count)
1791
-
1792
- prompt_str = f"{cwd_colored}:{npc_name}:{display_model}@{display_provider}{prompt_char}> "
1793
- prompt = readline_safe_prompt(prompt_str)
1794
-
1795
- user_input = get_multiline_input(prompt).strip()
1796
-
1797
- if not user_input:
1798
- continue
1799
-
1800
- command_count += 1
1801
- state, result = execute_guac_command(user_input, state, locals_dict, project_name, package_root, router)
1802
-
1803
- process_result(user_input, state, result, state.command_history)
1804
-
1805
- except EOFError:
1806
- print("\nExiting Guac Mode...")
1807
- try:
1808
- readline.write_history_file(READLINE_HISTORY_FILE)
1809
- except:
1810
- pass
1811
- if _guac_monitor_stop_event:
1812
- _guac_monitor_stop_event.set()
1813
- if _guac_monitor_thread:
1814
- _guac_monitor_thread.join(timeout=1.0)
1815
- break
1816
- except SystemExit as e:
1817
- try:
1818
- readline.write_history_file(READLINE_HISTORY_FILE)
1819
- except:
1820
- pass
1821
- print(f"\n{e}")
1822
-
1823
- if _guac_monitor_stop_event:
1824
- _guac_monitor_stop_event.set()
1825
- if _guac_monitor_thread:
1826
- _guac_monitor_thread.join(timeout=1.0)
1827
- break
1828
-
1829
- except Exception:
1830
- print("An unexpected error occurred in the REPL:")
1831
- traceback.print_exc()
1832
- try:
1833
- readline.write_history_file(READLINE_HISTORY_FILE)
1834
- except:
1835
- pass
1836
-
1837
- if _guac_monitor_stop_event:
1838
- _guac_monitor_stop_event.set()
1839
- if _guac_monitor_thread:
1840
- _guac_monitor_thread.join(timeout=1.0)
1841
- break
1842
-
1843
-
1844
-
1845
- def enter_guac_mode(npc=None,
1846
- team=None,
1847
- config_dir=None,
1848
- plots_dir=None,
1849
- npc_team_dir=None,
1850
- refresh_period=None,
1851
- lang='python',
1852
- default_mode_choice=None):
1853
-
1854
- if refresh_period is not None:
1855
- try:
1856
- GUAC_REFRESH_PERIOD = int(refresh_period)
1857
- except ValueError:
1858
- pass
1859
-
1860
- setup_result = setup_guac_mode(
1861
- config_dir=config_dir,
1862
- plots_dir=plots_dir,
1863
- npc_team_dir=npc_team_dir,
1864
- lang=lang,
1865
- default_mode_choice=default_mode_choice
1866
- )
1867
-
1868
- project_name = setup_result.get("package_name", "project")
1869
- package_root = setup_result["package_root"]
1870
- package_name = setup_result.get("package_name", "project")
1871
- npc_team_dir = setup_result.get("npc_team_dir")
37
+ # Execute the jinx
38
+ guac_jinx = team.jinxs_dict["guac"]
39
+ result = guac_jinx.execute(context=context, npc=default_npc)
1872
40
 
1873
-
1874
- command_history, default_team, default_npc = setup_shell()
1875
-
1876
-
1877
- state = ShellState(
1878
- conversation_id=start_new_conversation(),
1879
- stream_output=True,
1880
- current_mode=setup_result.get("default_mode", "cmd"),
1881
- chat_model=os.environ.get("NPCSH_CHAT_MODEL", "gemma3:4b"),
1882
- chat_provider=os.environ.get("NPCSH_CHAT_PROVIDER", "ollama"),
1883
- current_path=os.getcwd(),
1884
- npc=npc,
1885
- team=team or default_team
1886
- )
1887
-
1888
- state.command_history = command_history
1889
-
1890
- if npc is None and default_npc is None:
1891
- guac_npc_path = Path(npc_team_dir) / "guac.npc"
1892
- if guac_npc_path.exists():
1893
- npc = NPC(file=str(guac_npc_path),
1894
- db_conn=command_history.engine)
1895
- print(guac_npc_path, npc)
1896
-
1897
- team_ctx_path = get_team_ctx_path(str(npc_team_dir))
1898
- team_ctx = {}
1899
- if team_ctx_path and Path(team_ctx_path).exists():
1900
- with open(team_ctx_path, "r") as f:
1901
- team_ctx = yaml.safe_load(f) or {}
1902
- print(team_ctx, team_ctx_path)
1903
- team = Team(team_path=str(npc_team_dir),
1904
- forenpc=npc,
1905
- jinxs={})
1906
- team.name = team_ctx.get("team_name", "guac_global_team")
1907
- team.team_ctx = team_ctx
1908
- print(team)
1909
- if npc.model is None:
1910
- npc.model = team_ctx.get("model", state.chat_model)
1911
- if npc.provider is None:
1912
- npc.provider = team_ctx.get("provider", state.chat_provider)
1913
-
1914
- for npc_name, npc_obj in team.npcs.items():
1915
- if not npc_obj.model:
1916
- npc_obj.model = team_ctx.get("model", state.chat_model)
1917
- if not npc_obj.provider:
1918
- npc_obj.provider = team_ctx.get("provider", state.chat_provider)
1919
- else:
1920
- print("No local guac.npc found. Checking for global team...")
1921
- global_team_dir = ensure_global_guac_team()
1922
- global_guac_npc_path = global_team_dir / "guac.npc"
1923
-
1924
- if global_guac_npc_path.exists():
1925
- print("Using global guac team")
1926
- npc = NPC(file=str(global_guac_npc_path),
1927
- db_conn=command_history.engine)
1928
- team_ctx_path = global_team_dir / "team.ctx"
1929
- team_ctx = {}
1930
- if team_ctx_path.exists():
1931
- with open(team_ctx_path, "r") as f:
1932
- team_ctx = yaml.safe_load(f) or {}
1933
-
1934
- team = Team(team_path=str(global_team_dir),
1935
- forenpc=npc,
1936
- jinxs={})
1937
- team.name = team_ctx.get("team_name", "guac_global_team")
1938
- team.team_ctx = team_ctx
1939
-
1940
- if npc.model is None:
1941
- npc.model = team_ctx.get("model", state.chat_model)
1942
- if npc.provider is None:
1943
- npc.provider = team_ctx.get("provider", state.chat_provider)
1944
- else:
1945
- print("Could not find or create global guac team. Please run /init to set up guac properly.")
1946
- try:
1947
- user_choice = input("Would you like to initialize guac now? (y/n): ").strip().lower()
1948
- if user_choice == 'y':
1949
- setup_npc_team(Path(npc_team_dir), lang)
1950
- npc = NPC(file=str(Path(npc_team_dir) / "guac.npc"),
1951
- db_conn=command_history.engine)
1952
- team = Team(team_path=str(npc_team_dir), forenpc=npc, jinxs={})
1953
- else:
1954
- print("Exiting guac mode.")
1955
- return
1956
- except (KeyboardInterrupt, EOFError):
1957
- print("Initialization cancelled. Exiting guac mode.")
1958
- return
1959
- elif default_npc and npc is None:
1960
- npc = default_npc
1961
-
1962
-
1963
- state.npc = npc or default_npc
1964
- state.team = team or default_team
1965
-
1966
- state.plots_dir = setup_result.get("plots_dir")
1967
- state.config_dir = setup_result.get("config_dir")
1968
-
1969
- try:
1970
- readline.read_history_file(READLINE_HISTORY_FILE)
1971
- readline.set_history_length(1000)
1972
- readline.parse_and_bind("set enable-bracketed-paste on")
1973
- except FileNotFoundError:
1974
- pass
1975
- except OSError as e:
1976
- print(f"Warning: Could not read readline history file {READLINE_HISTORY_FILE}: {e}")
1977
-
1978
- run_guac_repl(state, project_name, package_root, package_name)
1979
-
1980
- def main():
1981
- parser = argparse.ArgumentParser(description="Enter Guac Mode - Interactive Python with LLM assistance.")
1982
- parser.add_argument("--config_dir", type=str, help="Guac configuration directory.")
1983
- parser.add_argument("--plots_dir", type=str, help="Directory to save plots.")
1984
- parser.add_argument("--npc_team_dir", type=str, default=None,
1985
- help="NPC team directory for Guac. Defaults to ./npc_team")
1986
- parser.add_argument("--refresh_period", type=int, help="Number of commands before suggesting /refresh.")
1987
- parser.add_argument("--default_mode", type=str, choices=["agent", "chat", "cmd"],
1988
- help="Default mode to start in.")
1989
-
1990
- args = parser.parse_args()
41
+ if isinstance(result, dict) and result.get("output"):
42
+ print(result["output"])
1991
43
 
1992
- enter_guac_mode(
1993
- config_dir=args.config_dir,
1994
- plots_dir=args.plots_dir,
1995
- npc_team_dir=args.npc_team_dir,
1996
- refresh_period=args.refresh_period,
1997
- default_mode_choice=args.default_mode
1998
- )
1999
44
 
2000
45
  if __name__ == "__main__":
2001
46
  main()