npcsh 1.0.16__py3-none-any.whl → 1.0.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcsh/guac.py CHANGED
@@ -1,92 +1,158 @@
1
- import re
1
+ from chroptiks.plotting_utils import *
2
+ from datetime import datetime
3
+ import json
4
+ import numpy as np
2
5
  import os
6
+ import pandas as pd
3
7
  import sys
4
- import code
8
+ import argparse
9
+ import importlib.metadata
10
+ import matplotlib.pyplot as plt
11
+
12
+ plt.ioff()
13
+
14
+ import platform
5
15
  import yaml
16
+ import re
6
17
  from pathlib import Path
7
- import atexit
8
- import traceback
9
18
  from typing import Optional, List, Dict, Any, Tuple
10
- from dataclasses import dataclass, field
11
- import json
12
- import pandas as pd
13
- import numpy as np
14
- import matplotlib.pyplot as plt
15
- import datetime
16
- import argparse
17
- import io
18
- import importlib.util
19
+ import traceback
20
+
21
+ try:
22
+ from termcolor import colored
23
+ except ImportError:
24
+ pass
19
25
 
20
- from npcpy.memory.command_history import CommandHistory
26
+ import sys
27
+
28
+ from npcpy.memory.command_history import CommandHistory, start_new_conversation
21
29
  from npcpy.npc_compiler import Team, NPC
22
- from npcpy.llm_funcs import get_llm_response, check_llm_command, execute_llm_command
23
- from npcsh._state import initial_state as npcsh_initial_state
24
- from npcpy.npc_sysenv import render_markdown, print_and_process_stream_with_markdown
30
+ from npcpy.llm_funcs import get_llm_response
31
+ from npcpy.npc_sysenv import render_markdown,print_and_process_stream_with_markdown
32
+
33
+
34
+ from npcsh._state import (
35
+ ShellState,
36
+ execute_command,
37
+ make_completer,
38
+ process_result,
39
+ readline_safe_prompt,
40
+ setup_shell,
41
+ get_multiline_input,
42
+ orange
43
+ )
44
+ import threading
45
+ import time
46
+ import ctypes
47
+ import ctypes.util
25
48
 
26
49
  try:
27
50
  import readline
28
51
  except ImportError:
29
- readline = None
52
+ print('no readline support, some features may not work as desired.')
53
+
54
+ try:
55
+ VERSION = importlib.metadata.version("npcsh")
56
+ except importlib.metadata.PackageNotFoundError:
57
+ VERSION = "unknown"
30
58
 
31
59
  GUAC_REFRESH_PERIOD = os.environ.get('GUAC_REFRESH_PERIOD', 100)
32
60
  READLINE_HISTORY_FILE = os.path.expanduser("~/.guac_readline_history")
33
- try:
34
- npcsh_initial_state.GUAC_REFRESH_PERIOD = int(GUAC_REFRESH_PERIOD)
35
- except ValueError:
36
- npcsh_initial_state.GUAC_REFRESH_PERIOD = 100
37
-
38
- @dataclass
39
- class GuacState:
40
- current_mode: str = "cmd"
41
- current_path: str = field(default_factory=os.getcwd)
42
- npc: Optional[NPC] = None
43
- team: Optional[Team] = None
44
- messages: List[Dict[str, str]] = field(default_factory=list)
45
- locals: Dict[str, Any] = field(default_factory=dict)
46
- command_history: Optional[CommandHistory] = None
47
- chat_model: Optional[str] = npcsh_initial_state.chat_model
48
- chat_provider: Optional[str] = npcsh_initial_state.chat_provider
49
- stream_output: bool = True
50
- config_dir: Optional[Path] = None
51
- src_dir: Optional[Path] = None
52
- command_count: int = 0
53
- compile_buffer: List[str] = field(default_factory=list)
54
-
55
- def get_multiline_input_guac(prompt_str: str, state: GuacState) -> str:
56
- lines = list(state.compile_buffer)
57
- current_prompt = prompt_str if not lines else "... "
58
- while True:
61
+ # File extension mapping for organization
62
+ EXTENSION_MAP = {
63
+ "PNG": "images", "JPG": "images", "JPEG": "images", "GIF": "images", "SVG": "images",
64
+ "MP4": "videos", "AVI": "videos", "MOV": "videos", "WMV": "videos", "MPG": "videos", "MPEG": "videos",
65
+ "DOC": "documents", "DOCX": "documents", "PDF": "documents", "PPT": "documents", "PPTX": "documents",
66
+ "XLS": "documents", "XLSX": "documents", "TXT": "documents", "CSV": "documents",
67
+ "ZIP": "archives", "RAR": "archives", "7Z": "archives", "TAR": "archives", "GZ": "archives", "BZ2": "archives",
68
+ "ISO": "archives", "NPY": "data", "NPZ": "data", "H5": "data", "HDF5": "data", "PKL": "data", "JOBLIB": "data"
69
+ }
70
+
71
+ _guac_monitor_thread = None
72
+ _guac_monitor_stop_event = None
73
+
74
+ def _clear_readline_buffer():
75
+ """Clear the current readline input buffer and redisplay prompt."""
76
+ try:
77
+ # Preferred: use Python readline API if available
78
+ if hasattr(readline, "replace_line") and hasattr(readline, "redisplay"):
79
+ readline.replace_line("", 0)
80
+ readline.redisplay()
81
+ return True
82
+ except Exception:
83
+ pass
84
+
85
+ # Fallback: call rl_replace_line and rl_redisplay from the linked readline/libedit
86
+ try:
87
+ libname = ctypes.util.find_library("readline") or ctypes.util.find_library("edit") or "readline"
88
+ rl = ctypes.CDLL(libname)
89
+ # rl_replace_line(char *text, int clear_undo)
90
+ rl.rl_replace_line.argtypes = [ctypes.c_char_p, ctypes.c_int]
91
+ rl.rl_redisplay.argtypes = []
92
+ rl.rl_replace_line(b"", 0)
93
+ rl.rl_redisplay()
94
+ return True
95
+ except Exception:
96
+ return False
97
+
98
+ def _file_drop_monitor(npc_team_dir: Path, state: ShellState, locals_dict: Dict[str, Any], poll_interval: float = 0.2):
99
+ """
100
+ Background thread: poll readline.get_line_buffer() and process file drops immediately.
101
+ """
102
+ processed_bufs = set()
103
+ stop_event = _guac_monitor_stop_event
104
+ while stop_event is None or not stop_event.is_set():
59
105
  try:
60
- line = input(current_prompt)
61
- lines.append(line)
62
- current_prompt = "... "
63
- if not line and len(lines) > 1 and not lines[-2].strip():
64
- lines.pop()
65
- lines.pop()
66
- break
67
- if not line and len(lines) == 1:
68
- lines.pop()
69
- break
70
- if len(lines) == 1 and line.strip():
71
- temp_line = line.strip()
72
- is_block_starter = re.match(r"^\s*(def|class|for|while|if|try|with|@)", temp_line)
73
- ends_with_colon_for_block = temp_line.endswith(":") and is_block_starter
74
- if not is_block_starter and not ends_with_colon_for_block:
75
- open_brackets = (temp_line.count('(') - temp_line.count(')') +
76
- temp_line.count('[') - temp_line.count(']') +
77
- temp_line.count('{') - temp_line.count('}'))
78
- if open_brackets <= 0:
79
- break
80
- except EOFError:
81
- print("\nGoodbye!")
82
- sys.exit(0)
83
- except KeyboardInterrupt:
84
- print("\nKeyboardInterrupt")
85
- state.compile_buffer.clear()
86
- return ""
87
- full_input = "\n".join(lines)
88
- state.compile_buffer.clear()
89
- return full_input
106
+ buf = ""
107
+ try:
108
+ buf = readline.get_line_buffer()
109
+ except Exception:
110
+ buf = ""
111
+ if not buf:
112
+ time.sleep(poll_interval)
113
+ continue
114
+
115
+ # Normalize buffer
116
+ candidate = buf.strip()
117
+ # If quoted, remove quotes
118
+ if (candidate.startswith("'") and candidate.endswith("'")) or (candidate.startswith('"') and candidate.endswith('"')):
119
+ inner = candidate[1:-1]
120
+ else:
121
+ inner = candidate
122
+
123
+ # quick check: must be single token and existing file
124
+ if " " not in inner and Path(inner.replace('~', str(Path.home()))).expanduser().exists() and Path(inner.replace('~', str(Path.home()))).expanduser().is_file():
125
+ # Avoid double-processing same buffer
126
+ if buf in processed_bufs:
127
+ time.sleep(poll_interval)
128
+ continue
129
+ processed_bufs.add(buf)
130
+
131
+ # Immediately process: copy and load
132
+ try:
133
+ # Use your existing handler for multi-file copies to ensure directory structure
134
+ # But we want immediate execution for a single file: call _handle_file_drop first to copy
135
+ modified_input, processed_files = _handle_file_drop(buf, npc_team_dir)
136
+ if processed_files:
137
+ target_path = processed_files[0]
138
+ # Generate loading code based on original file (inner) and target_path
139
+ loading_code = _generate_file_analysis_code(inner, target_path)
140
+ # Execute via your normal execute_python_code so it records in history
141
+ print("\n[guac] Detected file drop — processing automatically...")
142
+ # Note: execute_python_code expects state and locals_dict
143
+ _state, exec_output = execute_python_code(loading_code, state, locals_dict)
144
+ # Print whatever result execute_python_code returned (it will already have been captured)
145
+ if exec_output:
146
+ print(exec_output)
147
+ # Clear the current readline buffer so user doesn't have to press Enter
148
+ _clear_readline_buffer()
149
+ except Exception as e:
150
+ print(f"[guac][ERROR] file drop processing failed: {e}")
151
+ except Exception:
152
+ # Be resilient: don't let thread die
153
+ pass
154
+ time.sleep(poll_interval)
155
+
90
156
 
91
157
  def is_python_code(text: str) -> bool:
92
158
  text = text.strip()
@@ -101,258 +167,208 @@ def is_python_code(text: str) -> bool:
101
167
  return True
102
168
  except SyntaxError:
103
169
  return False
104
- except (OverflowError, ValueError): # Other potential compile errors
170
+ except (OverflowError, ValueError):
105
171
  return False
172
+ def execute_python_code(code_str: str, state: ShellState, locals_dict: Dict[str, Any]) -> Tuple[ShellState, Any]:
173
+ import io
174
+ output_capture = io.StringIO()
175
+ original_stdout = sys.stdout
176
+ original_stderr = sys.stderr
177
+ final_output_str = None
178
+ is_expression = False
106
179
 
107
-
108
- def setup_guac_readline(history_file: str):
109
- if not readline:
110
- return
111
- try:
112
- readline.read_history_file(history_file)
113
- except FileNotFoundError:
114
- pass
115
- except OSError:
116
- pass
117
-
118
- try:
119
- if sys.stdin.isatty():
120
- readline.set_history_length(1000)
121
- try:
122
- readline.parse_and_bind("set enable-bracketed-paste on")
123
- except Exception:
124
- pass
125
- except Exception:
126
- pass
127
-
128
- def save_guac_readline_history(history_file: str):
129
- if not readline:
130
- return
131
180
  try:
132
- readline.write_history_file(history_file)
133
- except OSError:
134
- pass
135
- except Exception:
136
- pass
181
+ sys.stdout = output_capture
182
+ sys.stderr = output_capture
137
183
 
138
- def _load_guac_helpers_into_state(state: GuacState):
139
- if state.src_dir:
140
- main_module_path = state.src_dir / "main.py"
141
- if main_module_path.exists():
184
+ if '\n' not in code_str.strip() and not re.match(r"^\s*(def|class|for|while|if|try|with|import|from|@)", code_str.strip()):
142
185
  try:
143
- p_path = str(state.src_dir.parent)
144
- s_path = str(state.src_dir)
145
- if p_path not in sys.path:
146
- sys.path.insert(0, p_path)
147
- if s_path not in sys.path:
148
- sys.path.insert(0, s_path)
149
-
150
- spec = importlib.util.spec_from_file_location("guac_main_helpers", main_module_path)
151
- if spec and spec.loader:
152
- guac_main = importlib.util.module_from_spec(spec)
153
- spec.loader.exec_module(guac_main)
154
- for name in dir(guac_main):
155
- if not name.startswith('__'):
156
- state.locals[name] = getattr(guac_main, name)
157
-
158
- core_imports = {
159
- 'pd': pd, 'np': np, 'plt': plt, 'datetime': datetime,
160
- 'Path': Path, 'os': os, 'sys': sys, 'json': json,
161
- 'yaml': yaml, 're': re, 'traceback': traceback
162
- }
163
- state.locals.update(core_imports)
164
- except Exception as e:
165
- print(f"Warning: Could not load helpers from {main_module_path}: {e}", file=sys.stderr)
166
-
167
- def setup_guac_mode(config_dir=None,
168
- plots_dir=None,
169
- npc_team_dir=None,
170
- lang='python',
171
- ):
172
- home_dir = Path.home()
173
- config_dir = Path(config_dir) if config_dir else home_dir / ".npcsh" / "guac"
174
- plots_dir = Path(plots_dir) if plots_dir else config_dir / "plots"
175
- npc_team_dir = Path(npc_team_dir) if npc_team_dir else config_dir / "npc_team"
176
- src_dir = config_dir / "src"
177
-
178
- for p in [src_dir, plots_dir, npc_team_dir]:
179
- p.mkdir(parents=True, exist_ok=True)
180
-
181
-
182
- team_ctx = {
183
- "team_name": "guac_team",
184
- "description": f"A team of NPCs specialized in {lang} analysis",
185
- "forenpc": "guac",
186
- "model": os.environ.get("NPCSH_CHAT_MODEL", "llama3.2"),
187
- "provider": os.environ.get("NPCSH_CHAT_PROVIDER", "ollama")
188
- }
189
-
190
- with open(npc_team_dir / "team.ctx", "w") as f:
191
- yaml.dump(team_ctx, f, default_flow_style=False)
192
-
193
-
194
- if not (config_dir / "__init__.py").exists():
195
- (config_dir / "__init__.py").touch()
196
-
197
- config_file = config_dir / "config.json"
198
- default_mode_val = "cmd"
199
- current_config = {}
200
-
201
- if config_file.exists():
202
- try:
203
- with open(config_file, "r") as f:
204
- current_config = json.load(f)
205
- default_mode_val = current_config.get("default_mode", "cmd")
206
- except json.JSONDecodeError:
207
- pass
208
-
209
- if not current_config or \
210
- current_config.get("preferred_language") != lang or \
211
- current_config.get("default_mode") is None:
212
- current_config = {
213
- "preferred_language": lang,
214
- "plots_directory": str(plots_dir),
215
- "npc_team_directory": str(npc_team_dir),
216
- "default_mode": default_mode_val
217
- }
218
- with open(config_file, "w") as f:
219
- json.dump(current_config, f, indent=2)
186
+ compiled_expr = compile(code_str, "<input>", "eval")
187
+ exec_result = eval(compiled_expr, locals_dict)
188
+ if exec_result is not None and not output_capture.getvalue().strip():
189
+ print(repr(exec_result), file=sys.stdout)
190
+ is_expression = True
191
+ except SyntaxError:
192
+ is_expression = False
193
+ except Exception:
194
+ is_expression = False
195
+ raise
196
+
197
+ if not is_expression:
198
+ compiled_code = compile(code_str, "<input>", "exec")
199
+ exec(compiled_code, locals_dict)
220
200
 
221
- os.environ["NPCSH_GUAC_LANG"] = lang
222
- os.environ["NPCSH_GUAC_PLOTS"] = str(plots_dir)
223
- os.environ["NPCSH_GUAC_TEAM"] = str(npc_team_dir)
224
- npcsh_initial_state.GUAC_DEFAULT_MODE = default_mode_val
201
+ except SyntaxError:
202
+ exc_type, exc_value, _ = sys.exc_info()
203
+ error_lines = traceback.format_exception_only(exc_type, exc_value)
204
+ adjusted_error_lines = [line.replace('File "<input>"', 'Syntax error in input') for line in error_lines]
205
+ print("".join(adjusted_error_lines), file=output_capture, end="")
206
+ except Exception:
207
+ exc_type, exc_value, exc_tb = sys.exc_info()
208
+ traceback.print_exception(exc_type, exc_value, exc_tb, file=output_capture)
209
+ finally:
210
+ sys.stdout = original_stdout
211
+ sys.stderr = original_stderr
212
+ final_output_str = output_capture.getvalue().strip()
213
+ output_capture.close()
214
+
215
+ # ADD THIS LINE:
216
+ _capture_plot_state(state.conversation_id, state.command_history.db_path, Path.cwd() / "npc_team")
217
+
218
+ if state.command_history:
219
+ state.command_history.add_command(code_str, [final_output_str if final_output_str else ""], "", state.current_path)
220
+ return state, final_output_str
225
221
 
226
- if not (src_dir / "__init__.py").exists():
227
- with open(src_dir / "__init__.py", "w") as f:
228
- f.write("# Guac source directory\n")
222
+ # Modify _generate_file_analysis_code - add the capture call to each code block:
223
+ def _generate_file_analysis_code(file_path: str, target_path: str) -> str:
224
+ """Generate Python code to load and analyze the dropped file"""
225
+ ext = Path(file_path).suffix.lower()
226
+ file_var_name = f"file_{datetime.now().strftime('%H%M%S')}"
229
227
 
230
- main_py_content = """import pandas as pd
231
- import numpy as np
228
+ capture_code = f"""
229
+ # Capture file analysis state
230
+ _capture_file_state('{state.conversation_id}', '{state.command_history.db_path}', r'{target_path}', '''AUTO_GENERATED_CODE''', locals())
231
+ """
232
+
233
+ if ext == '.pdf':
234
+ return f"""
235
+ # Automatically loaded PDF file
236
+ import PyPDF2
237
+ import pandas as pd
238
+ try:
239
+ with open(r'{target_path}', 'rb') as file:
240
+ pdf_reader = PyPDF2.PdfReader(file)
241
+ {file_var_name}_text = ""
242
+ for page_num in range(len(pdf_reader.pages)):
243
+ {file_var_name}_text += pdf_reader.pages[page_num].extract_text()
244
+
245
+ print(f"📄 Loaded PDF: {{len(pdf_reader.pages)}} pages, {{len({file_var_name}_text)}} characters")
246
+ print("First 500 characters:")
247
+ print({file_var_name}_text[:500])
248
+ print("\\n--- PDF loaded as '{file_var_name}_text' variable ---")
249
+ {capture_code}
250
+ except Exception as e:
251
+ print(f"Error loading PDF: {{e}}")
252
+ {file_var_name}_text = None
253
+ """
254
+
255
+ elif ext in ['.csv']:
256
+ return f"""
257
+ # Automatically loaded CSV file
258
+ import pandas as pd
259
+ try:
260
+ {file_var_name}_df = pd.read_csv(r'{target_path}')
261
+ print(f"📊 Loaded CSV: {{len({file_var_name}_df)}} rows, {{len({file_var_name}_df.columns)}} columns")
262
+ print("Columns:", list({file_var_name}_df.columns))
263
+ print("\\nFirst 5 rows:")
264
+ print({file_var_name}_df.head())
265
+ print(f"\\n--- CSV loaded as '{file_var_name}_df' variable ---")
266
+ {capture_code}
267
+ except Exception as e:
268
+ print(f"Error loading CSV: {{e}}")
269
+ {file_var_name}_df = None
270
+ """
271
+
272
+ elif ext in ['.xlsx', '.xls']:
273
+ return f"""
274
+ # Automatically loaded Excel file
275
+ import pandas as pd
276
+ try:
277
+ {file_var_name}_df = pd.read_excel(r'{target_path}')
278
+ print(f"📊 Loaded Excel: {{len({file_var_name}_df)}} rows, {{len({file_var_name}_df.columns)}} columns")
279
+ print("Columns:", list({file_var_name}_df.columns))
280
+ print("\\nFirst 5 rows:")
281
+ print({file_var_name}_df.head())
282
+ print(f"\\n--- Excel loaded as '{file_var_name}_df' variable ---")
283
+ {capture_code}
284
+ except Exception as e:
285
+ print(f"Error loading Excel: {{e}}")
286
+ {file_var_name}_df = None
287
+ """
288
+
289
+ elif ext in ['.json']:
290
+ return f"""
291
+ # Automatically loaded JSON file
292
+ import json
293
+ try:
294
+ with open(r'{target_path}', 'r') as file:
295
+ {file_var_name}_data = json.load(file)
296
+ print(f"📄 Loaded JSON: {{type({file_var_name}_data)}}")
297
+ if isinstance({file_var_name}_data, dict):
298
+ print("Keys:", list({file_var_name}_data.keys()))
299
+ elif isinstance({file_var_name}_data, list):
300
+ print(f"List with {{len({file_var_name}_data)}} items")
301
+ print(f"\\n--- JSON loaded as '{file_var_name}_data' variable ---")
302
+ {capture_code}
303
+ except Exception as e:
304
+ print(f"Error loading JSON: {{e}}")
305
+ {file_var_name}_data = None
306
+ """
307
+
308
+ elif ext in ['.txt', '.md']:
309
+ return f"""
310
+ # Automatically loaded text file
311
+ try:
312
+ with open(r'{target_path}', 'r', encoding='utf-8') as file:
313
+ {file_var_name}_text = file.read()
314
+ print(f"📄 Loaded text file: {{len({file_var_name}_text)}} characters")
315
+ print("First 500 characters:")
316
+ print({file_var_name}_text[:500])
317
+ print(f"\\n--- Text loaded as '{file_var_name}_text' variable ---")
318
+ {capture_code}
319
+ except Exception as e:
320
+ print(f"Error loading text file: {{e}}")
321
+ {file_var_name}_text = None
322
+ """
323
+
324
+ elif ext in ['.png', '.jpg', '.jpeg', '.gif']:
325
+ return f"""
326
+ # Automatically loaded image file
232
327
  import matplotlib.pyplot as plt
233
- import os
234
- import datetime
235
- from pathlib import Path
236
-
237
- def save_plot(name=None, plots_dir=None):
238
- if plots_dir is None:
239
- plots_dir = os.environ.get("NPCSH_GUAC_PLOTS", Path.home() / ".npcsh" / "guac" / "plots")
240
- plots_dir = Path(plots_dir)
241
- plots_dir.mkdir(parents=True, exist_ok=True)
242
- timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
243
- filename = f"{timestamp}_{name}.png" if name else f"{timestamp}_plot.png"
244
- filepath = plots_dir / filename
245
- try:
246
- if plt.get_fignums():
247
- plt.savefig(filepath)
248
- print(f"Plot saved to {filepath}")
249
- else:
250
- print("No active matplotlib plot to save.")
251
- return None
252
- except Exception as e:
253
- print(f"Error saving plot: {e}")
254
- return None
255
- return filepath
256
-
257
- def read_img(img_path):
258
- try:
259
- from PIL import Image
260
- img = Image.open(img_path)
261
- img.show()
262
- except ImportError:
263
- print("PIL (Pillow) not available. Please install it: pip install Pillow")
264
- except FileNotFoundError:
265
- print(f"Image file not found: {img_path}")
266
- except Exception as e:
267
- print(f"Error reading image {img_path}: {e}")
268
- return img_path
328
+ from PIL import Image
329
+ import numpy as np
330
+ try:
331
+ {file_var_name}_img = Image.open(r'{target_path}')
332
+ {file_var_name}_array = np.array({file_var_name}_img)
333
+ print(f"🖼️ Loaded image: {{({file_var_name}_img.size)}} pixels, mode: {{{file_var_name}_img.mode}}")
334
+ print(f"Array shape: {{{file_var_name}_array.shape}}")
335
+
336
+ plt.figure(figsize=(8, 6))
337
+ plt.imshow({file_var_name}_img)
338
+ plt.axis('off')
339
+ plt.title('Loaded Image: {Path(file_path).name}')
340
+ plt.show()
341
+ print(f"\\n--- Image loaded as '{file_var_name}_img' and '{file_var_name}_array' variables ---")
342
+ {capture_code}
343
+ except Exception as e:
344
+ print(f"Error loading image: {{e}}")
345
+ {file_var_name}_img = None
346
+ {file_var_name}_array = None
269
347
  """
270
- if not (src_dir / "main.py").exists():
271
- with open(src_dir / "main.py", "w") as f:
272
- f.write(main_py_content)
273
348
 
274
- if str(config_dir) not in sys.path:
275
- sys.path.insert(0, str(config_dir))
276
- if str(config_dir.parent) not in sys.path:
277
- sys.path.insert(0, str(config_dir.parent))
278
-
279
- setup_npc_team(npc_team_dir, lang)
280
- return {
281
- "language": lang, "src_dir": src_dir, "config_path": config_file,
282
- "plots_dir": plots_dir, "npc_team_dir": npc_team_dir,
283
- "config_dir": config_dir, "default_mode": default_mode_val
284
- }
285
-
286
- def setup_npc_team(npc_team_dir, lang):
287
- guac_npc = {
288
- "name": "guac",
289
- "primary_directive": (
290
- f"You are guac, an AI assistant operating in a Python environment. "
291
- f"When asked to perform actions or generate code, prioritize Python. "
292
- f"For general queries, provide concise answers. "
293
- f"When routing tasks (agent mode), consider Python-based tools or direct Python code generation if appropriate. "
294
- f"If generating code directly (cmd mode), ensure it's Python."
295
- )
296
- }
297
- caug_npc = {
298
- "name": "caug",
299
- "primary_directive": f"You are caug, a specialist in big data statistical methods in {lang}."
300
- }
301
-
302
- parsely_npc = {
303
- "name": "parsely",
304
- "primary_directive": f"You are parsely, a specialist in mathematical methods in {lang}."
305
- }
306
-
307
- toon_npc = {
308
- "name": "toon",
309
- "primary_directive": f"You are toon, a specialist in brute force methods in {lang}."
310
- }
311
-
312
- for npc_data in [guac_npc, caug_npc, parsely_npc, toon_npc]:
313
- npc_file = npc_team_dir / f"{npc_data['name']}.npc"
314
- with open(npc_file, "w") as f:
315
- yaml.dump(npc_data, f, default_flow_style=False)
316
-
349
+ else:
350
+ return f"""
351
+ # Automatically loaded file (unknown type)
352
+ try:
353
+ with open(r'{target_path}', 'rb') as file:
354
+ {file_var_name}_data = file.read()
355
+ print(f"📄 Loaded binary file: {{len({file_var_name}_data)}} bytes")
356
+ print(f"File extension: {ext}")
357
+ print(f"\\n--- Binary data loaded as '{file_var_name}_data' variable ---")
358
+ {capture_code}
359
+ except Exception as e:
360
+ print(f"Error loading file: {{e}}")
361
+ {file_var_name}_data = None
362
+ """
317
363
 
318
- team_ctx_model = os.environ.get("NPCSH_CHAT_MODEL", npcsh_initial_state.chat_model or "llama3.2")
319
- team_ctx_provider = os.environ.get("NPCSH_CHAT_PROVIDER", npcsh_initial_state.chat_provider or "ollama")
320
- team_ctx = {
321
- "team_name": "guac_team", "description": f"A team for {lang} analysis", "foreman": "guac",
322
- "model": team_ctx_model, "provider": team_ctx_provider
323
- }
324
- npcsh_initial_state.chat_model = team_ctx_model
325
- npcsh_initial_state.chat_provider = team_ctx_provider
326
- with open(npc_team_dir / "team.ctx", "w") as f:
327
- yaml.dump(team_ctx, f, default_flow_style=False)
328
364
 
329
- def print_guac_bowl():
330
- bowl_art = """
331
- 🟢🟢🟢🟢🟢
332
- 🟢 🟢
333
- 🟢
334
- 🟢
335
- 🟢
336
- 🟢 🟢🟢🟢 🟢 🟢 🟢🟢🟢 🟢🟢🟢
337
- 🟢 🟢 🟢 🟢 ⚫⚫🟢 🟢
338
- 🟢 🟢 🟢 🟢 ⚫🥑🧅⚫ 🟢
339
- 🟢 🟢 🟢 🟢 ⚫🥑🍅⚫ 🟢
340
- 🟢🟢🟢🟢🟢🟢 🟢🟢🟢🟢 ⚫⚫🟢 🟢🟢🟢
341
- """
342
- print(bowl_art)
343
365
 
344
- def get_guac_prompt_char(command_count: int) -> str:
345
- period = int(npcsh_initial_state.GUAC_REFRESH_PERIOD)
346
- period = max(1, period)
347
- stages = ["\U0001F951", "\U0001F951🔪", "\U0001F951🥣", "\U0001F951🥣🧂", "\U0001F958 REFRESH?"]
348
- divisor = max(1, period // (len(stages)-1) if len(stages) > 1 else period)
349
- stage_index = min(command_count // divisor, len(stages) - 1)
350
- return stages[stage_index]
351
366
 
352
- def _handle_guac_refresh(state: GuacState):
367
+ def _handle_guac_refresh(state: ShellState, project_name: str, src_dir: Path):
353
368
  if not state.command_history or not state.npc:
354
369
  print("Cannot refresh: command history or NPC not available.")
355
370
  return
371
+
356
372
  history_entries = state.command_history.get_all()
357
373
  if not history_entries:
358
374
  print("No command history to analyze for refresh.")
@@ -379,7 +395,11 @@ def _handle_guac_refresh(state: GuacState):
379
395
  prompt = "\n".join(prompt_parts)
380
396
 
381
397
  try:
382
- response = get_llm_response(prompt, model=state.chat_model, provider=state.chat_provider, npc=state.npc, stream=False)
398
+ response = get_llm_response(prompt,
399
+ model=state.chat_model,
400
+ provider=state.chat_provider,
401
+ npc=state.npc,
402
+ stream=False)
383
403
  suggested_code_raw = response.get("response", "").strip()
384
404
  code_blocks = re.findall(r'```python\s*(.*?)\s*```', suggested_code_raw, re.DOTALL)
385
405
 
@@ -401,393 +421,1210 @@ def _handle_guac_refresh(state: GuacState):
401
421
 
402
422
  user_choice = input("Add these functions to your main.py? (y/n): ").strip().lower()
403
423
  if user_choice == 'y':
404
- main_py_path = state.src_dir / "main.py"
424
+ main_py_path = src_dir / "main.py"
405
425
  with open(main_py_path, "a") as f:
406
426
  f.write("\n\n# --- Functions suggested by /refresh ---\n")
407
427
  f.write(suggested_functions_code)
408
428
  f.write("\n# --- End of suggested functions ---\n")
409
429
  print(f"Functions appended to {main_py_path}.")
410
- print("To use them in the current session: import importlib; importlib.reload(guac.src.main); from guac.src.main import *")
430
+ print(f"To use them in the current session: import importlib; importlib.reload({project_name}.src.main); from {project_name}.src.main import *")
411
431
  else:
412
432
  print("Suggested functions not added.")
413
433
  except Exception as e:
414
434
  print(f"Error during /refresh: {e}")
415
435
  traceback.print_exc()
436
+ def setup_guac_mode(config_dir=None, plots_dir=None, npc_team_dir=None, lang='python', default_mode_choice=None):
437
+ base_dir = Path.cwd()
438
+
439
+ if config_dir is None:
440
+ config_dir = base_dir / ".guac"
441
+ else:
442
+ config_dir = Path(config_dir)
443
+
444
+ if plots_dir is None:
445
+ plots_dir = base_dir / "plots"
446
+ else:
447
+ plots_dir = Path(plots_dir)
448
+
449
+ if npc_team_dir is None:
450
+ npc_team_dir = base_dir / "npc_team"
451
+ else:
452
+ npc_team_dir = Path(npc_team_dir)
453
+
454
+ for p in [config_dir, plots_dir, npc_team_dir]:
455
+ p.mkdir(parents=True, exist_ok=True)
416
456
 
417
- def execute_python_code(code_str: str, state: GuacState) -> Tuple[GuacState, Any]:
418
- output_capture = io.StringIO()
419
- original_stdout = sys.stdout
420
- original_stderr = sys.stderr
421
- final_output_str = None
422
- is_expression = False
457
+ # Setup Guac workspace
458
+ workspace_dirs = _get_workspace_dirs(npc_team_dir)
459
+ _ensure_workspace_dirs(workspace_dirs)
423
460
 
424
- try:
425
- sys.stdout = output_capture
426
- sys.stderr = output_capture
461
+ # Rest of existing setup_guac_mode code...
462
+ team_ctx_path = npc_team_dir / "team.ctx"
463
+ existing_ctx = {}
464
+
465
+ if team_ctx_path.exists():
466
+ try:
467
+ with open(team_ctx_path, "r") as f:
468
+ existing_ctx = yaml.safe_load(f) or {}
469
+ except Exception as e:
470
+ print(f"Warning: Could not read team.ctx: {e}")
427
471
 
428
- if '\n' not in code_str.strip() and not re.match(r"^\s*(def|class|for|while|if|try|with|import|from|@)", code_str.strip()):
429
- try:
430
- compiled_expr = compile(code_str, "<input>", "eval")
431
- exec_result = eval(compiled_expr, state.locals)
432
- if exec_result is not None and not output_capture.getvalue().strip():
433
- print(repr(exec_result), file=sys.stdout)
434
- is_expression = True
435
- except SyntaxError:
436
- is_expression = False
437
- except Exception:
438
- is_expression = False
439
- raise
440
-
441
- if not is_expression:
442
- compiled_code = compile(code_str, "<input>", "exec")
443
- exec(compiled_code, state.locals)
472
+ package_root = existing_ctx.get("GUAC_PACKAGE_ROOT")
473
+ package_name = existing_ctx.get("GUAC_PACKAGE_NAME")
474
+
475
+ if package_root is None or package_name is None:
476
+ try:
477
+ response = input("Enter the path to your Python package root (press Enter for current directory): ").strip()
478
+ package_root = response if response else str(base_dir)
479
+
480
+ response = input("Enter your package name (press Enter to use 'project'): ").strip()
481
+ package_name = response if response else "project"
482
+ except EOFError:
483
+ package_root = str(base_dir)
484
+ package_name = "project"
444
485
 
445
- except SyntaxError:
446
- exc_type, exc_value, _ = sys.exc_info()
447
- error_lines = traceback.format_exception_only(exc_type, exc_value)
448
- adjusted_error_lines = [line.replace('File "<input>"', 'Syntax error in input') for line in error_lines]
449
- print("".join(adjusted_error_lines), file=output_capture, end="")
450
- except Exception:
451
- exc_type, exc_value, exc_tb = sys.exc_info()
452
- traceback.print_exception(exc_type, exc_value, exc_tb, file=output_capture)
453
- finally:
454
- sys.stdout = original_stdout
455
- sys.stderr = original_stderr
456
- final_output_str = output_capture.getvalue().strip()
457
- output_capture.close()
486
+ project_name = existing_ctx.get("GUAC_PROJECT_NAME")
487
+ project_description = existing_ctx.get("GUAC_PROJECT_DESCRIPTION")
458
488
 
459
- if state.command_history:
460
- state.command_history.add_command(code_str, [final_output_str if final_output_str else ""], "", state.current_path)
461
- return state, final_output_str
489
+ if project_name is None:
490
+ try:
491
+ project_name = input("Enter the project name: ").strip() or "unknown_project"
492
+ except EOFError:
493
+ project_name = "unknown_project"
494
+ if project_description is None:
495
+ try:
496
+ project_description = input("Enter a short description of the project: ").strip() or "No description provided."
497
+ except EOFError:
498
+ project_description = "No description provided."
462
499
 
463
- def execute_guac_command(command: str, state: GuacState) -> Tuple[GuacState, Any]:
464
- stripped_command = command.strip()
465
- output = None
500
+ updated_ctx = {**existing_ctx}
501
+ updated_ctx.update({
502
+ "GUAC_TEAM_NAME": "guac_team",
503
+ "GUAC_DESCRIPTION": f"A team of NPCs specialized in {lang} analysis for project {project_name}",
504
+ "GUAC_FORENPC": "guac",
505
+ "GUAC_PROJECT_NAME": project_name,
506
+ "GUAC_PROJECT_DESCRIPTION": project_description,
507
+ "GUAC_LANG": lang,
508
+ "GUAC_PACKAGE_ROOT": package_root,
509
+ "GUAC_PACKAGE_NAME": package_name,
510
+ "GUAC_WORKSPACE_PATHS": {k: str(v) for k, v in workspace_dirs.items()},
511
+ })
512
+
513
+ with open(team_ctx_path, "w") as f:
514
+ yaml.dump(updated_ctx, f, default_flow_style=False)
515
+ print("Updated team.ctx with GUAC-specific information.")
516
+
517
+ default_mode_val = default_mode_choice or "agent"
518
+ setup_npc_team(npc_team_dir, lang)
466
519
 
467
- if not stripped_command:
468
- return state, None
469
- if stripped_command.lower() in ["exit", "quit", "exit()", "quit()"]:
470
- raise SystemExit("Exiting Guac Mode.")
520
+ print(f"\nGuac mode configured for package: {package_name} at {package_root}")
521
+ print(f"Workspace created at: {workspace_dirs['workspace']}")
522
+
523
+ return {
524
+ "language": lang,
525
+ "package_root": Path(package_root),
526
+ "config_path": config_dir / "config.json",
527
+ "plots_dir": plots_dir,
528
+ "npc_team_dir": npc_team_dir,
529
+ "config_dir": config_dir,
530
+ "default_mode": default_mode_val,
531
+ "project_name": project_name,
532
+ "project_description": project_description,
533
+ "package_name": package_name
534
+ }
535
+
536
+
537
+
538
+
539
+
540
+ def setup_npc_team(npc_team_dir, lang, is_subteam=False):
541
+ # Create Guac-specific NPCs
542
+ guac_npc = {
543
+ "name": "guac",
544
+ "primary_directive": (
545
+ f"You are guac, an AI assistant operating in a Python environment. "
546
+ f"When asked to perform actions or generate code, prioritize Python. "
547
+ f"For general queries, provide concise answers. "
548
+ f"When routing tasks (agent mode), consider Python-based tools or direct Python code generation if appropriate. "
549
+ f"If generating code directly (cmd mode), ensure it's Python."
550
+ )
551
+ }
552
+ caug_npc = {
553
+ "name": "caug",
554
+ "primary_directive": f"You are caug, a specialist in big data statistical methods in {lang}."
555
+ }
556
+
557
+ parsely_npc = {
558
+ "name": "parsely",
559
+ "primary_directive": f"You are parsely, a specialist in mathematical methods in {lang}."
560
+ }
561
+
562
+ toon_npc = {
563
+ "name": "toon",
564
+ "primary_directive": f"You are toon, a specialist in brute force methods in {lang}."
565
+ }
471
566
 
472
- # Check for shell-like commands first, before Python code detection
473
- parts = stripped_command.split(maxsplit=1)
474
- cmd_name = parts[0].lower()
475
- args = parts[1] if len(parts) > 1 else ""
567
+ for npc_data in [guac_npc, caug_npc, parsely_npc, toon_npc]:
568
+ npc_file = npc_team_dir / f"{npc_data['name']}.npc"
569
+ if not npc_file.exists(): # Don't overwrite existing NPCs
570
+ with open(npc_file, "w") as f:
571
+ yaml.dump(npc_data, f, default_flow_style=False)
572
+ print(f"Created NPC: {npc_data['name']}")
573
+ else:
574
+ print(f"NPC already exists: {npc_data['name']}")
575
+
576
+ # Only create team.ctx for subteams, otherwise use the main one
577
+ if is_subteam:
578
+ team_ctx_model = os.environ.get("NPCSH_CHAT_MODEL", "gemma3:4b")
579
+ team_ctx_provider = os.environ.get("NPCSH_CHAT_PROVIDER", "ollama")
580
+ team_ctx = {
581
+ "team_name": "guac_team",
582
+ "description": f"A subteam for {lang} analysis",
583
+ "forenpc": "guac",
584
+ "model": team_ctx_model,
585
+ "provider": team_ctx_provider
586
+ }
587
+ with open(npc_team_dir / "team.ctx", "w") as f:
588
+ yaml.dump(team_ctx, f, default_flow_style=False)
589
+
590
+ def _get_workspace_dirs(npc_team_dir: Path) -> Dict[str, Path]:
591
+ """Get workspace directories from the npc_team directory"""
592
+ workspace_dir = npc_team_dir / "guac_workspace"
593
+ return {
594
+ "workspace": workspace_dir,
595
+ "plots": workspace_dir / "plots",
596
+ "data_inputs": workspace_dir / "data_inputs",
597
+ "data_outputs": workspace_dir / "data_outputs"
598
+ }
599
+
600
+ def _ensure_workspace_dirs(workspace_dirs: Dict[str, Path]):
601
+ """Ensure all workspace directories exist"""
602
+ for directory in workspace_dirs.values():
603
+ directory.mkdir(parents=True, exist_ok=True)
604
+ import shutil
605
+
606
+ def _detect_file_drop(input_text: str) -> bool:
607
+ """Detect if input is just a file path (drag and drop)"""
476
608
 
477
- # Handle shell-like commands without / prefix
478
- if cmd_name == "ls":
479
- try:
480
- ls_path = args.strip() if args.strip() else state.current_path
481
- output = "\n".join(os.listdir(ls_path))
482
- except Exception as e:
483
- output = f"Error listing directory: {e}"
484
- if state.command_history:
485
- state.command_history.add_command(command, [str(output)], "", state.current_path)
486
- return state, output
487
- elif cmd_name == "pwd":
488
- output = state.current_path
489
- if state.command_history:
490
- state.command_history.add_command(command, [str(output)], "", state.current_path)
491
- return state, output
492
- elif cmd_name == "cd":
493
- target_dir = args.strip() if args.strip() else str(Path.home())
609
+ stripped = input_text.strip()
610
+
611
+ # Remove quotes if present
612
+ if stripped.startswith("'") and stripped.endswith("'"):
613
+ stripped = stripped[1:-1]
614
+ elif stripped.startswith('"') and stripped.endswith('"'):
615
+ stripped = stripped[1:-1]
616
+
617
+ # Must be a single token (no spaces) - this is key!
618
+ if len(stripped.split()) != 1:
619
+ return False
620
+
621
+ # Must not contain Python operators or syntax
622
+ python_indicators = ['(', ')', '[', ']', '{', '}', '=', '+', '-', '*', '/', '%', '&', '|', '^', '<', '>', '!', '?', ':', ';', ',']
623
+ if any(indicator in stripped for indicator in python_indicators):
624
+ return False
625
+
626
+ # Must not start with common Python keywords or look like Python
627
+ python_keywords = ['import', 'from', 'def', 'class', 'if', 'for', 'while', 'try', 'with', 'lambda', 'print', 'len', 'str', 'int', 'float', 'list', 'dict', 'set', 'tuple']
628
+ if any(stripped.startswith(keyword) for keyword in python_keywords):
629
+ return False
630
+
631
+
632
+ import hashlib
633
+ from sqlalchemy import create_engine, Column, Integer, String, Text, Float, DateTime, func
634
+ from sqlalchemy.ext.declarative import declarative_base
635
+ from sqlalchemy.orm import sessionmaker
636
+
637
+ # Add these classes after your imports
638
+ Base = declarative_base()
639
+
640
+ class PlotState(Base):
641
+ __tablename__ = 'plot_states'
642
+ id = Column(Integer, primary_key=True)
643
+ session_id = Column(String(255))
644
+ plot_hash = Column(String(32))
645
+ plot_description = Column(Text)
646
+ figure_path = Column(String(500))
647
+ data_summary = Column(String(500))
648
+ change_significance = Column(Float)
649
+ timestamp = Column(DateTime, default=func.now())
650
+
651
+ class FileAnalysisState(Base):
652
+ __tablename__ = 'file_analysis_states'
653
+ id = Column(Integer, primary_key=True)
654
+ session_id = Column(String(255))
655
+ file_path = Column(String(1000))
656
+ file_hash = Column(String(32))
657
+ analysis_summary = Column(Text)
658
+ variable_names = Column(Text)
659
+ timestamp = Column(DateTime, default=func.now())
660
+
661
+ def _capture_plot_state(session_id: str, db_path: str, npc_team_dir: Path):
662
+ """Capture plot state if significant change"""
663
+ if not plt.get_fignums():
664
+ return
665
+
666
+ engine = create_engine(f'sqlite:///{db_path}')
667
+ Base.metadata.create_all(engine)
668
+ Session = sessionmaker(bind=engine)
669
+ session = Session()
670
+
671
+ # Get plot info
672
+ fig = plt.gcf()
673
+ axes = fig.get_axes()
674
+ data_points = sum(len(line.get_xdata()) for ax in axes for line in ax.get_lines())
675
+
676
+ # Create hash and check if different from last
677
+ plot_hash = hashlib.md5(f"{len(axes)}{data_points}".encode()).hexdigest()
678
+
679
+ last = session.query(PlotState).filter(PlotState.session_id == session_id).order_by(PlotState.timestamp.desc()).first()
680
+ if last and last.plot_hash == plot_hash:
681
+ session.close()
682
+ return
683
+
684
+ # Save plot
685
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
686
+ workspace_dirs = _get_workspace_dirs(npc_team_dir)
687
+ plot_path = workspace_dirs["plots"] / f"state_{timestamp}.png"
688
+ plt.savefig(plot_path, dpi=150, bbox_inches='tight')
689
+
690
+ # Save to DB
691
+ plot_state = PlotState(
692
+ session_id=session_id,
693
+ plot_hash=plot_hash,
694
+ plot_description=f"Plot with {len(axes)} axes, {data_points} points",
695
+ figure_path=str(plot_path),
696
+ data_summary=f"{data_points} data points",
697
+ change_significance=1.0 if not last else 0.5
698
+ )
699
+
700
+ session.add(plot_state)
701
+ session.commit()
702
+ session.close()
703
+ print(f"📊 Plot state captured -> {plot_path.name}")
704
+
705
+ def _capture_file_state(session_id: str, db_path: str, file_path: str, analysis_code: str, locals_dict: Dict):
706
+ """Capture file analysis state"""
707
+ engine = create_engine(f'sqlite:///{db_path}')
708
+ Base.metadata.create_all(engine)
709
+ Session = sessionmaker(bind=engine)
710
+ session = Session()
711
+
712
+ # Get file hash
713
+ try:
714
+ with open(file_path, 'rb') as f:
715
+ file_hash = hashlib.md5(f.read()).hexdigest()
716
+ except:
717
+ file_hash = "unknown"
718
+
719
+ # Get variables created
720
+ file_stem = Path(file_path).stem.lower()
721
+ vars_created = [k for k in locals_dict.keys() if not k.startswith('_') and file_stem in k.lower()]
722
+
723
+ file_state = FileAnalysisState(
724
+ session_id=session_id,
725
+ file_path=file_path,
726
+ file_hash=file_hash,
727
+ analysis_summary=f"Loaded {Path(file_path).name} -> {len(vars_created)} variables",
728
+ variable_names=json.dumps(vars_created)
729
+ )
730
+
731
+ session.add(file_state)
732
+ session.commit()
733
+ session.close()
734
+ print(f"📁 File state captured: {Path(file_path).name}")
735
+
736
+ def _get_plot_context(session_id: str, db_path: str) -> str:
737
+ """Get plot context for LLM"""
738
+ engine = create_engine(f'sqlite:///{db_path}')
739
+ Session = sessionmaker(bind=engine)
740
+ session = Session()
741
+
742
+ plots = session.query(PlotState).filter(PlotState.session_id == session_id).order_by(PlotState.timestamp.desc()).limit(3).all()
743
+ session.close()
744
+
745
+ if not plots:
746
+ return "No plots in session."
747
+
748
+ context = "Recent plots:\n"
749
+ for i, plot in enumerate(plots):
750
+ if i == 0:
751
+ context += f"📊 CURRENT: {plot.plot_description}\n"
752
+ else:
753
+ context += f"📊 Previous: {plot.plot_description}\n"
754
+ return context
755
+
756
+ def _get_file_context(session_id: str, db_path: str) -> str:
757
+ """Get file context for LLM"""
758
+ engine = create_engine(f'sqlite:///{db_path}')
759
+ Session = sessionmaker(bind=engine)
760
+ session = Session()
761
+
762
+ files = session.query(FileAnalysisState).filter(FileAnalysisState.session_id == session_id).order_by(FileAnalysisState.timestamp.desc()).all()
763
+ session.close()
764
+
765
+ if not files:
766
+ return "No files analyzed."
767
+
768
+ context = "Analyzed files:\n"
769
+ for file in files:
770
+ context += f"📁 {Path(file.file_path).name}: {file.analysis_summary}\n"
771
+ return context
772
+ def _generate_file_analysis_code(file_path: str, target_path: str) -> str:
773
+ """Generate Python code to load and analyze the dropped file"""
774
+ ext = Path(file_path).suffix.lower()
775
+ file_var_name = f"file_{datetime.now().strftime('%H%M%S')}"
776
+
777
+ if ext == '.pdf':
778
+ return f"""
779
+ # Automatically loaded PDF file
780
+ import PyPDF2
781
+ import pandas as pd
782
+ try:
783
+ with open(r'{target_path}', 'rb') as file:
784
+ pdf_reader = PyPDF2.PdfReader(file)
785
+ {file_var_name}_text = ""
786
+ for page_num in range(len(pdf_reader.pages)):
787
+ {file_var_name}_text += pdf_reader.pages[page_num].extract_text()
788
+
789
+ print(f"📄 Loaded PDF: {{len(pdf_reader.pages)}} pages, {{len({file_var_name}_text)}} characters")
790
+ print("First 500 characters:")
791
+ print({file_var_name}_text[:500])
792
+ print("\\n--- PDF loaded as '{file_var_name}_text' variable ---")
793
+ except Exception as e:
794
+ print(f"Error loading PDF: {{e}}")
795
+ {file_var_name}_text = None
796
+ """
797
+
798
+ elif ext in ['.csv']:
799
+ return f"""
800
+ # Automatically loaded CSV file
801
+ import pandas as pd
802
+ try:
803
+ {file_var_name}_df = pd.read_csv(r'{target_path}')
804
+ print(f"📊 Loaded CSV: {{len({file_var_name}_df)}} rows, {{len({file_var_name}_df.columns)}} columns")
805
+ print("Columns:", list({file_var_name}_df.columns))
806
+ print("\\nFirst 5 rows:")
807
+ print({file_var_name}_df.head())
808
+ print(f"\\n--- CSV loaded as '{file_var_name}_df' variable ---")
809
+ except Exception as e:
810
+ print(f"Error loading CSV: {{e}}")
811
+ {file_var_name}_df = None
812
+ """
813
+
814
+ elif ext in ['.xlsx', '.xls']:
815
+ return f"""
816
+ # Automatically loaded Excel file
817
+ import pandas as pd
818
+ try:
819
+ {file_var_name}_df = pd.read_excel(r'{target_path}')
820
+ print(f"📊 Loaded Excel: {{len({file_var_name}_df)}} rows, {{len({file_var_name}_df.columns)}} columns")
821
+ print("Columns:", list({file_var_name}_df.columns))
822
+ print("\\nFirst 5 rows:")
823
+ print({file_var_name}_df.head())
824
+ print(f"\\n--- Excel loaded as '{file_var_name}_df' variable ---")
825
+ except Exception as e:
826
+ print(f"Error loading Excel: {{e}}")
827
+ {file_var_name}_df = None
828
+ """
829
+
830
+ elif ext in ['.json']:
831
+ return f"""
832
+ # Automatically loaded JSON file
833
+ import json
834
+ try:
835
+ with open(r'{target_path}', 'r') as file:
836
+ {file_var_name}_data = json.load(file)
837
+ print(f"📄 Loaded JSON: {{type({file_var_name}_data)}}")
838
+ if isinstance({file_var_name}_data, dict):
839
+ print("Keys:", list({file_var_name}_data.keys()))
840
+ elif isinstance({file_var_name}_data, list):
841
+ print(f"List with {{len({file_var_name}_data)}} items")
842
+ print(f"\\n--- JSON loaded as '{file_var_name}_data' variable ---")
843
+ except Exception as e:
844
+ print(f"Error loading JSON: {{e}}")
845
+ {file_var_name}_data = None
846
+ """
847
+
848
+ elif ext in ['.txt', '.md']:
849
+ return f"""
850
+ # Automatically loaded text file
851
+ try:
852
+ with open(r'{target_path}', 'r', encoding='utf-8') as file:
853
+ {file_var_name}_text = file.read()
854
+ print(f"📄 Loaded text file: {{len({file_var_name}_text)}} characters")
855
+ print("First 500 characters:")
856
+ print({file_var_name}_text[:500])
857
+ print(f"\\n--- Text loaded as '{file_var_name}_text' variable ---")
858
+ except Exception as e:
859
+ print(f"Error loading text file: {{e}}")
860
+ {file_var_name}_text = None
861
+ """
862
+
863
+ elif ext in ['.png', '.jpg', '.jpeg', '.gif']:
864
+ return f"""
865
+ # Automatically loaded image file
866
+ import matplotlib.pyplot as plt
867
+ from PIL import Image
868
+ import numpy as np
869
+ try:
870
+ {file_var_name}_img = Image.open(r'{target_path}')
871
+ {file_var_name}_array = np.array({file_var_name}_img)
872
+ print(f"🖼️ Loaded image: {{({file_var_name}_img.size)}} pixels, mode: {{{file_var_name}_img.mode}}")
873
+ print(f"Array shape: {{{file_var_name}_array.shape}}")
874
+
875
+ plt.figure(figsize=(8, 6))
876
+ plt.imshow({file_var_name}_img)
877
+ plt.axis('off')
878
+ plt.title('Loaded Image: {Path(file_path).name}')
879
+ plt.show()
880
+ print(f"\\n--- Image loaded as '{file_var_name}_img' and '{file_var_name}_array' variables ---")
881
+ except Exception as e:
882
+ print(f"Error loading image: {{e}}")
883
+ {file_var_name}_img = None
884
+ {file_var_name}_array = None
885
+ """
886
+
887
+ else:
888
+ return f"""
889
+ # Automatically loaded file (unknown type)
890
+ try:
891
+ with open(r'{target_path}', 'rb') as file:
892
+ {file_var_name}_data = file.read()
893
+ print(f"📄 Loaded binary file: {{len({file_var_name}_data)}} bytes")
894
+ print(f"File extension: {ext}")
895
+ print(f"\\n--- Binary data loaded as '{file_var_name}_data' variable ---")
896
+ except Exception as e:
897
+ print(f"Error loading file: {{e}}")
898
+ {file_var_name}_data = None
899
+ """
900
+ def _handle_file_drop(input_text: str, npc_team_dir: Path) -> Tuple[str, List[str]]:
901
+ """Handle file drops by copying files to appropriate workspace directories"""
902
+ #print(f"[DEBUG] _handle_file_drop called with input: '{input_text}'")
903
+
904
+ # Immediately check if this is a single file path
905
+ stripped = input_text.strip("'\"")
906
+ if os.path.exists(stripped) and os.path.isfile(stripped):
907
+ print(f"[DEBUG] Direct file drop detected: {stripped}")
908
+
909
+ workspace_dirs = _get_workspace_dirs(npc_team_dir)
910
+ _ensure_workspace_dirs(workspace_dirs)
911
+
912
+ expanded_path = Path(stripped).resolve()
913
+
914
+ ext = expanded_path.suffix[1:].upper() if expanded_path.suffix else "OTHERS"
915
+ category = EXTENSION_MAP.get(ext, "data_inputs")
916
+ target_dir = workspace_dirs.get(category, workspace_dirs["data_inputs"])
917
+
918
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
919
+ new_filename = f"{timestamp}_{expanded_path.name}"
920
+ target_path = target_dir / new_filename
921
+
494
922
  try:
495
- os.chdir(target_dir)
496
- state.current_path = os.getcwd()
497
- output = f"Changed directory to {state.current_path}"
498
- except FileNotFoundError:
499
- output = f"Error: Directory not found: {target_dir}"
923
+ shutil.copy2(expanded_path, target_path)
924
+ print(f"📁 Copied {expanded_path.name} to workspace: {target_path}")
925
+
926
+ # Generate and execute loading code
927
+ loading_code = _generate_file_analysis_code(str(expanded_path), str(target_path))
928
+ print(f"\n# Auto-generated file loading code:\n---\n{loading_code}\n---\n")
929
+
930
+ # Actually execute the loading code
931
+ exec(loading_code)
932
+
933
+ return "", [str(target_path)]
500
934
  except Exception as e:
501
- output = f"Error changing directory: {e}"
502
- if state.command_history:
503
- state.command_history.add_command(command, [str(output)], "", state.current_path)
504
- return state, output
505
- elif cmd_name == "run" and args.strip().endswith(".py"):
506
- script_path = Path(args.strip())
507
- if script_path.exists():
935
+ print(f"[ERROR] Failed to process file drop: {e}")
936
+ return input_text, []
937
+
938
+ # Existing multi-file handling logic
939
+ processed_files = []
940
+ file_paths = re.findall(r"'([^']+)'|\"([^\"]+)\"|(\S+)", input_text)
941
+ file_paths = [path for group in file_paths for path in group if path]
942
+
943
+ #print(f"[DEBUG] Found file paths: {file_paths}")
944
+
945
+ if not file_paths:
946
+
947
+ return input_text, processed_files
948
+
949
+ modified_input = input_text
950
+ for file_path in file_paths:
951
+ expanded_path = Path(file_path.replace('~', str(Path.home()))).resolve()
952
+
953
+ if expanded_path.exists() and expanded_path.is_file():
954
+ workspace_dirs = _get_workspace_dirs(npc_team_dir)
955
+ _ensure_workspace_dirs(workspace_dirs)
956
+
957
+ ext = expanded_path.suffix[1:].upper() if expanded_path.suffix else "OTHERS"
958
+ category = EXTENSION_MAP.get(ext, "data_inputs")
959
+ target_dir = workspace_dirs.get(category, workspace_dirs["data_inputs"])
960
+
961
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
962
+ new_filename = f"{timestamp}_{expanded_path.name}"
963
+ target_path = target_dir / new_filename
964
+
508
965
  try:
509
- with open(script_path, "r") as f:
510
- script_code = f.read()
511
- _, script_exec_output = execute_python_code(script_code, state)
512
- output = (f"Executed script '{script_path}'.\n"
513
- f"Output from script:\n{script_exec_output if script_exec_output else '(No direct output)'}")
966
+ shutil.copy2(expanded_path, target_path)
967
+ processed_files.append(str(target_path))
968
+ modified_input = modified_input.replace(file_path, str(target_path))
969
+ print(f"📁 Copied {expanded_path.name} to workspace: {target_path}")
514
970
  except Exception as e:
515
- output = f"Error running script {script_path}: {e}"
516
- else:
517
- output = f"Error: Script not found: {script_path}"
518
- if state.command_history:
519
- state.command_history.add_command(command, [str(output)], "", state.current_path)
520
- return state, output
971
+ print(f"[ERROR] Failed to copy file: {e}")
972
+
973
+ return modified_input, processed_files
521
974
 
522
- # Now check if it's Python code
523
- if is_python_code(stripped_command):
524
- state, output = execute_python_code(stripped_command, state)
525
- return state, output
526
975
 
527
- # Handle / prefixed commands
528
- if stripped_command.startswith("/"):
529
- parts = stripped_command.split(maxsplit=1)
530
- cmd_name = parts[0].lower()
531
- args = parts[1] if len(parts) > 1 else ""
532
- is_core_cmd = True
533
-
534
- if cmd_name == "/agent":
535
- state.current_mode = "agent"
536
- output = "Switched to AGENT mode."
537
- elif cmd_name == "/chat":
538
- state.current_mode = "chat"
539
- output = "Switched to CHAT mode."
540
- elif cmd_name == "/cmd":
541
- state.current_mode = "cmd"
542
- output = "Switched to CMD mode."
543
- elif cmd_name == "/ride":
544
- state.current_mode = "ride"
545
- output = "Switched to RIDE mode (placeholder)."
546
- elif cmd_name == "/refresh":
547
- _handle_guac_refresh(state)
548
- output = "Refresh process initiated."
549
- elif cmd_name == "/mode":
550
- output = f"Current mode: {state.current_mode.upper()}"
551
- elif cmd_name == "/show_vars":
552
- temp_output_list = ["Current Python Environment Variables:"]
553
- if state.locals:
554
- for k, v_obj in state.locals.items():
555
- if not k.startswith("__"):
556
- try:
557
- v_repr = repr(v_obj)
558
- temp_output_list.append(f" {k}: {v_repr[:100]}{'...' if len(v_repr) > 100 else ''}")
559
- except Exception:
560
- temp_output_list.append(f" {k}: <Error representing value>")
976
+ def _capture_plot_state(session_id: str, db_path: str, npc_team_dir: Path):
977
+ """Capture plot state if significant change"""
978
+ if not plt.get_fignums():
979
+ return
980
+
981
+ try:
982
+ engine = create_engine(f'sqlite:///{db_path}')
983
+ Base.metadata.create_all(engine)
984
+ Session = sessionmaker(bind=engine)
985
+ session = Session()
986
+
987
+ # Get plot info
988
+ fig = plt.gcf()
989
+ axes = fig.get_axes()
990
+ data_points = sum(len(line.get_xdata()) for ax in axes for line in ax.get_lines())
991
+
992
+ # Create hash and check if different from last
993
+ plot_hash = hashlib.md5(f"{len(axes)}{data_points}".encode()).hexdigest()
994
+
995
+ last = session.query(PlotState).filter(PlotState.session_id == session_id).order_by(PlotState.timestamp.desc()).first()
996
+ if last and last.plot_hash == plot_hash:
997
+ session.close()
998
+ return
999
+
1000
+ # Save plot
1001
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
1002
+ workspace_dirs = _get_workspace_dirs(npc_team_dir)
1003
+ plot_path = workspace_dirs["plots"] / f"state_{timestamp}.png"
1004
+ plt.savefig(plot_path, dpi=150, bbox_inches='tight')
1005
+
1006
+ # Save to DB
1007
+ plot_state = PlotState(
1008
+ session_id=session_id,
1009
+ plot_hash=plot_hash,
1010
+ plot_description=f"Plot with {len(axes)} axes, {data_points} points",
1011
+ figure_path=str(plot_path),
1012
+ data_summary=f"{data_points} data points",
1013
+ change_significance=1.0 if not last else 0.5
1014
+ )
1015
+
1016
+ session.add(plot_state)
1017
+ session.commit()
1018
+ session.close()
1019
+ print(f"📊 Plot state captured -> {plot_path.name}")
1020
+
1021
+ except Exception as e:
1022
+ print(f"Error capturing plot state: {e}")
1023
+
1024
+ def _capture_file_state(session_id: str, db_path: str, file_path: str, analysis_code: str, locals_dict: Dict):
1025
+ """Capture file analysis state"""
1026
+ try:
1027
+ engine = create_engine(f'sqlite:///{db_path}')
1028
+ Base.metadata.create_all(engine)
1029
+ Session = sessionmaker(bind=engine)
1030
+ session = Session()
1031
+
1032
+ # Get file hash
1033
+ try:
1034
+ with open(file_path, 'rb') as f:
1035
+ file_hash = hashlib.md5(f.read()).hexdigest()
1036
+ except:
1037
+ file_hash = "unknown"
1038
+
1039
+ # Get variables created
1040
+ file_stem = Path(file_path).stem.lower()
1041
+ vars_created = [k for k in locals_dict.keys() if not k.startswith('_') and file_stem in k.lower()]
1042
+
1043
+ file_state = FileAnalysisState(
1044
+ session_id=session_id,
1045
+ file_path=file_path,
1046
+ file_hash=file_hash,
1047
+ analysis_summary=f"Loaded {Path(file_path).name} -> {len(vars_created)} variables",
1048
+ variable_names=json.dumps(vars_created)
1049
+ )
1050
+
1051
+ session.add(file_state)
1052
+ session.commit()
1053
+ session.close()
1054
+ print(f"📁 File state captured: {Path(file_path).name}")
1055
+
1056
+ except Exception as e:
1057
+ print(f"Error capturing file state: {e}")
1058
+
1059
+ def _get_plot_context(session_id: str, db_path: str) -> str:
1060
+ """Get plot context for LLM"""
1061
+ try:
1062
+ engine = create_engine(f'sqlite:///{db_path}')
1063
+ Base.metadata.create_all(engine)
1064
+ Session = sessionmaker(bind=engine)
1065
+ session = Session()
1066
+
1067
+ plots = session.query(PlotState).filter(PlotState.session_id == session_id).order_by(PlotState.timestamp.desc()).limit(3).all()
1068
+ session.close()
1069
+
1070
+ if not plots:
1071
+ return "No plots in session."
1072
+
1073
+ context = "Recent plots:\n"
1074
+ for i, plot in enumerate(plots):
1075
+ if i == 0:
1076
+ context += f"📊 CURRENT: {plot.plot_description}\n"
561
1077
  else:
562
- temp_output_list.append(" (empty)")
563
- output = "\n".join(temp_output_list)
1078
+ context += f"📊 Previous: {plot.plot_description}\n"
1079
+ return context
1080
+
1081
+ except Exception as e:
1082
+ return f"Error retrieving plot context: {e}"
564
1083
 
565
- else:
566
- is_core_cmd = False
1084
+ def _get_file_context(session_id: str, db_path: str) -> str:
1085
+ """Get file context for LLM"""
1086
+ try:
1087
+ engine = create_engine(f'sqlite:///{db_path}')
1088
+ Base.metadata.create_all(engine)
1089
+ Session = sessionmaker(bind=engine)
1090
+ session = Session()
567
1091
 
568
- if is_core_cmd:
569
- if state.command_history:
570
- state.command_history.add_command(command, [str(output if output else "")], "", state.current_path)
571
- return state, output
1092
+ files = session.query(FileAnalysisState).filter(FileAnalysisState.session_id == session_id).order_by(FileAnalysisState.timestamp.desc()).all()
1093
+ session.close()
1094
+
1095
+ if not files:
1096
+ return "No files analyzed."
1097
+
1098
+ context = "Analyzed files:\n"
1099
+ for file in files:
1100
+ context += f"📁 {Path(file.file_path).name}: {file.analysis_summary}\n"
1101
+ return context
1102
+
1103
+ except Exception as e:
1104
+ return f"Error retrieving file context: {e}"
1105
+
1106
+
1107
+
1108
+ def _save_matplotlib_figures(npc_team_dir: Path) -> List[str]:
1109
+ """Save all matplotlib figures to the plots directory and return paths"""
1110
+ workspace_dirs = _get_workspace_dirs(npc_team_dir)
1111
+ _ensure_workspace_dirs(workspace_dirs)
1112
+
1113
+ saved_figures = []
1114
+ if plt.get_fignums():
1115
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
1116
+
1117
+ for i, fig_num in enumerate(plt.get_fignums()):
1118
+ fig = plt.figure(fig_num)
1119
+ fig_path = workspace_dirs["plots"] / f"{timestamp}_figure_{i+1}.png"
1120
+ fig.savefig(fig_path, dpi=150, bbox_inches='tight')
1121
+ saved_figures.append(str(fig_path))
1122
+ print(f"📊 Saved figure to: {fig_path}")
1123
+
1124
+ plt.close('all')
572
1125
 
573
- nl_input_for_llm = stripped_command
1126
+ return saved_figures
574
1127
 
575
- if state.current_mode == "agent":
576
- llm_result_dict = check_llm_command(
577
- command=nl_input_for_llm,
578
- model=state.chat_model,
579
- provider=state.chat_provider,
580
- npc=state.npc,
581
- team=state.team,
582
- messages=state.messages, # Pass current messages for context
583
- stream=state.stream_output,
584
- # tools and jinxs would be sourced from state.npc or state.team if check_llm_command uses them
585
- )
586
- output = llm_result_dict.get("output")
587
- state.messages = llm_result_dict.get("messages", state.messages) # Update messages from check_llm_command
1128
+
1129
+ def _run_agentic_mode(command: str,
1130
+ state: ShellState,
1131
+ locals_dict: Dict[str, Any],
1132
+ npc_team_dir: Path) -> Tuple[ShellState, Any]:
1133
+ """Run agentic mode with continuous iteration based on progress"""
1134
+ max_iterations = 10 # Higher maximum as a safety limit
1135
+ iteration = 0
1136
+ full_output = []
1137
+ current_command = command
1138
+ consecutive_failures = 0
1139
+ max_consecutive_failures = 2
1140
+
1141
+ # Build context of existing variables
1142
+ existing_vars_context = "EXISTING VARIABLES IN ENVIRONMENT:\n"
1143
+ for var_name, var_value in locals_dict.items():
1144
+ if not var_name.startswith('_') and var_name not in ['In', 'Out', 'exit', 'quit', 'get_ipython']:
1145
+ try:
1146
+ var_type = type(var_value).__name__
1147
+ var_repr = repr(var_value)
1148
+ if len(var_repr) > 100:
1149
+ var_repr = var_repr[:97] + "..."
1150
+ existing_vars_context += f"- {var_name} ({var_type}): {var_repr}\n"
1151
+ except:
1152
+ existing_vars_context += f"- {var_name} ({type(var_value).__name__}): <unrepresentable>\n"
1153
+
1154
+ while iteration < max_iterations and consecutive_failures < max_consecutive_failures:
1155
+ iteration += 1
1156
+ print(f"\n🔄 Agentic iteration {iteration}")
588
1157
 
589
- history_output = str(output) if not (state.stream_output and hasattr(output, '__iter__') and not isinstance(output, (str,bytes))) else "[Streamed Agent Response]"
590
- if state.command_history:
591
- state.command_history.add_command(nl_input_for_llm, [history_output], "", state.current_path)
592
-
593
- elif state.current_mode == "chat":
594
- llm_response_dict = get_llm_response(
595
- nl_input_for_llm,
596
- model=state.chat_model,
597
- provider=state.chat_provider,
598
- npc=state.npc,
599
- messages=state.messages, # Pass current messages
600
- stream=state.stream_output
601
- )
602
- output = llm_response_dict.get("response")
603
- state.messages = llm_response_dict.get("messages", state.messages) # Update messages
1158
+ prompt = f"""
1159
+ USER REQUEST: {current_command}
604
1160
 
605
- history_output = str(output) if not (state.stream_output and hasattr(output, '__iter__') and not isinstance(output, (str,bytes))) else "[Streamed Chat Response]"
606
- if state.command_history:
607
- state.command_history.add_command(nl_input_for_llm, [history_output], "", state.current_path)
1161
+ {existing_vars_context}
1162
+
1163
+ PREVIOUS ATTEMPTS: {full_output[-1] if full_output else 'None'}
1164
+
1165
+ Generate Python code that BUILDS ON EXISTING VARIABLES to accomplish this task.
1166
+ DO NOT redefine variables that already exist unless absolutely necessary.
1167
+ Use the existing variables and add/modify as needed.
1168
+ Be sure to generate logs and information that oncne executed provide us with enough information to keep moving forward.
1169
+ log variables and behaviors so we can pinpoint fixes clearly rather than getting stufck in nonsensical problematic loops.
1170
+
1171
+
1172
+ Provide ONLY executable Python code without any explanations or markdown formatting.
1173
+ Focus on incremental changes rather than rewriting everything. Do not re-write any functions that are currently within the existing vars contxt or which appear to have no need to be changed.
1174
+
1175
+ Do not include any leading ```python. Begin directly with the code.
1176
+ """
1177
+
1178
+ llm_response = get_llm_response(prompt,
1179
+ npc=state.npc,
1180
+ stream=True)
1181
+
1182
+
1183
+ generated_code = print_and_process_stream_with_markdown(llm_response.get('response'),
1184
+ state.npc.model,
1185
+ state.npc.provider,
1186
+ show=True)
1187
+
1188
+ if generated_code.startswith('```python'):
1189
+ generated_code = generated_code[len('```python'):].strip()
1190
+ if generated_code.endswith('```'):
1191
+ generated_code = generated_code[:-3].strip()
1192
+
1193
+ print(f"\n# Generated Code (Iteration {iteration}):\n---\n{generated_code}\n---\n")
1194
+
1195
+ try:
1196
+ state, exec_output = execute_python_code(generated_code, state, locals_dict)
1197
+ full_output.append(f"Iteration {iteration}:\nCode:\n{generated_code}\nOutput:\n{exec_output}")
1198
+
1199
+ # Update the context with new variables
1200
+ new_vars = []
1201
+ for var_name, var_value in locals_dict.items():
1202
+ if (not var_name.startswith('_') and
1203
+ var_name not in existing_vars_context and
1204
+ var_name not in ['In', 'Out', 'exit', 'quit', 'get_ipython']):
1205
+ new_vars.append(var_name)
1206
+
1207
+ if new_vars:
1208
+ existing_vars_context += f"\nNEW VARIABLES CREATED: {', '.join(new_vars)}\n"
1209
+
1210
+ analysis_prompt = f"""
1211
+ CODE EXECUTION RESULTS: {exec_output}
1212
+
1213
+ EXISTING VARIABLES: {existing_vars_context}
1214
+
1215
+ ANALYSIS:
1216
+ - Is there MEANINGFUL PROGRESS? Return 'progress' if making good progress
1217
+ - Is there a PROBLEM? Return 'problem' if stuck or error occurred
1218
+
1219
+ - Return ONLY one of these words followed by a brief explanation.
1220
+ """
1221
+
1222
+ analysis_response = get_llm_response(analysis_prompt,
1223
+ model=state.chat_model,
1224
+ provider=state.chat_provider,
1225
+ npc=state.npc,
1226
+ stream=False)
1227
+
1228
+ analysis = analysis_response.get("response", "").strip().lower()
1229
+ print(f"\n# Analysis:\n{analysis}")
1230
+
1231
+ if analysis.startswith('complete'):
1232
+ print("✅ Task completed successfully!")
1233
+ break
1234
+ elif analysis.startswith('progress'):
1235
+ consecutive_failures = 0 # Reset failure counter on progress
1236
+ print("➡️ Making progress, continuing to next iteration...")
1237
+ # Continue to next iteration
1238
+ elif analysis.startswith('problem'):
1239
+ consecutive_failures += 1
1240
+ print(f"⚠️ Problem detected ({consecutive_failures}/{max_consecutive_failures} consecutive failures)")
1241
+
1242
+ user_feedback = input("\n🤔 Agent requests feedback (press Enter to continue or type your response): ").strip()
1243
+ if user_feedback:
1244
+ current_command = f"{current_command} - User feedback: {user_feedback}"
1245
+ elif consecutive_failures >= max_consecutive_failures:
1246
+ print("❌ Too many consecutive failures, stopping iteration")
1247
+ break
1248
+ else:
1249
+ # Default behavior for unexpected responses
1250
+ consecutive_failures += 1
1251
+ print(f"❓ Unexpected analysis response, counting as failure ({consecutive_failures}/{max_consecutive_failures})")
1252
+
1253
+ except Exception as e:
1254
+ error_msg = f"Error in iteration {iteration}: {str(e)}"
1255
+ print(error_msg)
1256
+ full_output.append(error_msg)
1257
+ consecutive_failures += 1
1258
+ current_command = f"{current_command} - Error: {str(e)}"
1259
+
1260
+ if consecutive_failures >= max_consecutive_failures:
1261
+ print("❌ Too many consecutive errors, stopping iteration")
1262
+ break
1263
+
1264
+ return state, "# Agentic execution completed\n" + '\n'.join(full_output)
1265
+
1266
+
1267
+ def print_guac_bowl():
1268
+ bowl_art = """
1269
+ 🟢🟢🟢🟢🟢
1270
+ 🟢 🟢
1271
+ 🟢
1272
+ 🟢
1273
+ 🟢
1274
+ 🟢 🟢🟢🟢 🟢 🟢 🟢🟢🟢 🟢🟢🟢
1275
+ 🟢 🟢 🟢 🟢 ⚫⚫🟢 🟢
1276
+ 🟢 🟢 🟢 🟢 ⚫🥑🧅⚫ 🟢
1277
+ 🟢 🟢 🟢 🟢 ⚫🥑🍅⚫ 🟢
1278
+ 🟢🟢🟢🟢🟢🟢 🟢🟢🟢🟢 ⚫⚫🟢 🟢🟢🟢
1279
+ """
1280
+ print(bowl_art)
1281
+
1282
+ def get_guac_prompt_char(command_count: int, guac_refresh_period = 100) -> str:
1283
+ period = int(guac_refresh_period)
1284
+ period = max(1, period)
1285
+ stages = ["\U0001F951", "\U0001F951🔪", "\U0001F951🥣", "\U0001F951🥣🧂", "\U0001F958 REFRESH?"]
1286
+ divisor = max(1, period // (len(stages)-1) if len(stages) > 1 else period)
1287
+ stage_index = min(command_count // divisor, len(stages) - 1)
1288
+ return stages[stage_index]
1289
+
1290
+ def execute_guac_command(command: str, state: ShellState, locals_dict: Dict[str, Any], project_name: str, src_dir: Path, router) -> Tuple[ShellState, Any]:
1291
+ stripped_command = command.strip()
1292
+ output = None
1293
+
1294
+ if not stripped_command:
1295
+ return state, None
1296
+ if stripped_command.lower() in ["exit", "quit", "exit()", "quit()"]:
1297
+ raise SystemExit("Exiting Guac Mode.")
1298
+
1299
+ # Get npc_team_dir from current working directory
1300
+ npc_team_dir = Path.cwd() / "npc_team"
1301
+ if stripped_command.startswith('run '):
1302
+ file_path = stripped_command[4:].strip()
1303
+ try:
1304
+ resolved_path = Path(file_path).resolve()
1305
+ if not resolved_path.exists():
1306
+ return state, f"Error: File '{file_path}' not found"
1307
+
1308
+ with open(resolved_path, 'r', encoding='utf-8') as f:
1309
+ file_content = f.read()
1310
+
1311
+ print(f"Running {resolved_path.name}...")
1312
+ state, exec_output = execute_python_code(file_content, state, locals_dict)
1313
+ return state, exec_output
1314
+
1315
+ except Exception as e:
1316
+ return state, f"Error running file: {e}"
608
1317
 
609
- elif state.current_mode == "cmd":
1318
+
1319
+
1320
+ # Check if this is a file drop (single file path)
1321
+ if _detect_file_drop(stripped_command):
1322
+ if stripped_command.startswith('run'):
1323
+ pass
1324
+ else:
1325
+ # Clean the path
1326
+ file_path = stripped_command.strip("'\"")
1327
+ expanded_path = Path(file_path).resolve()
1328
+
1329
+ # Copy to workspace
1330
+ workspace_dirs = _get_workspace_dirs(npc_team_dir)
1331
+ _ensure_workspace_dirs(workspace_dirs)
1332
+
1333
+ ext = expanded_path.suffix[1:].upper() if expanded_path.suffix else "OTHERS"
1334
+ category = EXTENSION_MAP.get(ext, "data_inputs")
1335
+ target_dir = workspace_dirs.get(category, workspace_dirs["data_inputs"])
1336
+
1337
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
1338
+ new_filename = f"{timestamp}_{expanded_path.name}"
1339
+ target_path = target_dir / new_filename
1340
+
1341
+ try:
1342
+ shutil.copy2(expanded_path, target_path)
1343
+ print(f"📁 Copied {expanded_path.name} to workspace: {target_path}")
1344
+
1345
+ # Generate and execute loading code
1346
+ loading_code = _generate_file_analysis_code(str(expanded_path), str(target_path))
1347
+ print(f"\n# Auto-generated file loading code:\n---\n{loading_code}\n---\n")
1348
+
1349
+ state, exec_output = execute_python_code(loading_code, state, locals_dict)
1350
+ return state, exec_output
1351
+ except Exception as e:
1352
+ print(f"[ERROR] Failed to copy or load file: {e}")
1353
+ return state, f"Error loading file: {e}"
1354
+
1355
+ # Handle file drops in text (multiple files or files with other text)
1356
+ processed_command, processed_files = _handle_file_drop(stripped_command, npc_team_dir)
1357
+ if processed_files:
1358
+ print(f"📁 Processed {len(processed_files)} files")
1359
+ stripped_command = processed_command
1360
+
1361
+ # Handle /refresh command
1362
+ if stripped_command == "/refresh":
1363
+ _handle_guac_refresh(state, project_name, src_dir)
1364
+ return state, "Refresh process initiated."
1365
+
1366
+ # Handle mode switching commands
1367
+ if stripped_command in ["/agent", "/chat", "/cmd"]:
1368
+ state.current_mode = stripped_command[1:]
1369
+ return state, f"Switched to {state.current_mode.upper()} mode."
1370
+
1371
+
1372
+
1373
+ # Check if it's a router command (starts with / and not a built-in command)
1374
+ if stripped_command.startswith('/') and stripped_command not in ["/refresh", "/agent", "/chat", "/cmd"]:
1375
+ return execute_command(stripped_command, state, review=True, router=router)
1376
+ if is_python_code(stripped_command):
1377
+ try:
1378
+ state, exec_output = execute_python_code(stripped_command, state, locals_dict)
1379
+ return state, exec_output
1380
+ except KeyboardInterrupt:
1381
+ print("\nExecution interrupted by user")
1382
+ return state, "Execution interrupted"
1383
+ if state.current_mode == "agent":
1384
+ return _run_agentic_mode(stripped_command, state, locals_dict, npc_team_dir)
1385
+ if state.current_mode == "cmd":
1386
+
1387
+ # If not Python, use LLM to generate Python code
610
1388
  locals_context_string = "Current Python environment variables and functions:\n"
611
- if state.locals:
612
- for k, v in state.locals.items():
613
- if not k.startswith('__'): # Exclude Python built-ins and internal vars
1389
+ if locals_dict:
1390
+ for k, v in locals_dict.items():
1391
+ if not k.startswith('__'):
614
1392
  try:
615
- # Use repr() for a developer-friendly representation
616
1393
  value_repr = repr(v)
617
- # Truncate long representations to prevent context window bloat
618
1394
  if len(value_repr) > 200:
619
1395
  value_repr = value_repr[:197] + "..."
620
1396
  locals_context_string += f"- {k} (type: {type(v).__name__}) = {value_repr}\n"
621
1397
  except Exception:
622
1398
  locals_context_string += f"- {k} (type: {type(v).__name__}) = <unrepresentable>\n"
623
- # Add a clear separator for LLM to distinguish this context
624
1399
  locals_context_string += "\n--- End of Environment Context ---\n"
625
1400
  else:
626
1401
  locals_context_string += "(Environment is empty)\n"
627
1402
 
628
- prompt_cmd = (
629
- f"User input for Python CMD mode: '{nl_input_for_llm}'.\n"
630
- f"Generate ONLY executable Python code required to fulfill this.\n"
631
- f"Do not include any explanations, leading markdown like ```python, or any text other than the Python code itself.\n"
632
- )
1403
+ # ADD CONTEXT ENHANCEMENT HERE:
1404
+ enhanced_prompt = stripped_command
1405
+ if any(word in stripped_command.lower() for word in ['plot', 'graph', 'chart', 'figure', 'visualiz']):
1406
+ plot_context = _get_plot_context(state.conversation_id, state.command_history.db_path)
1407
+ enhanced_prompt += f"\n\n{plot_context}"
1408
+
1409
+ if any(word in stripped_command.lower() for word in ['file', 'data', 'load', 'variable', 'df']):
1410
+ file_context = _get_file_context(state.conversation_id, state.command_history.db_path)
1411
+ enhanced_prompt += f"\n\n{file_context}"
633
1412
 
634
- llm_response = get_llm_response(
635
- prompt_cmd,
636
- model=state.chat_model,
637
- provider=state.chat_provider,
638
- npc=state.npc,
639
- stream=False,
640
- messages=state.messages # Pass messages for context if LLM uses them
641
- )
642
- if llm_response.get('response').startswith('```python'):
1413
+ prompt_cmd = f"""User input for Python CMD mode: '{enhanced_prompt}'.
1414
+ Generate ONLY executable Python code required to fulfill this.
1415
+ Do not include any explanations, leading markdown like ```python, or any text other than the Python code itself.
1416
+ {locals_context_string}
1417
+ Begin directly with the code
1418
+ """
1419
+
1420
+ llm_response = get_llm_response(prompt_cmd,
1421
+ model=state.chat_model,
1422
+ provider=state.chat_provider,
1423
+ npc=state.npc,
1424
+ stream=True,
1425
+ messages=state.messages)
1426
+
1427
+ if llm_response.get('response', '').startswith('```python'):
643
1428
  generated_code = llm_response.get("response", "").strip()[len('```python'):].strip()
644
1429
  generated_code = generated_code.rsplit('```', 1)[0].strip()
645
1430
  else:
646
1431
  generated_code = llm_response.get("response", "").strip()
1432
+
647
1433
  state.messages = llm_response.get("messages", state.messages)
648
1434
 
649
1435
  if generated_code and not generated_code.startswith("# Error:"):
650
1436
  print(f"\n# LLM Generated Code (Cmd Mode):\n---\n{generated_code}\n---\n")
651
- _, exec_output = execute_python_code(generated_code, state)
652
- output = f"# Code executed.\n# Output:\n{exec_output if exec_output else '(No direct output)'}"
1437
+ try:
1438
+ state, exec_output = execute_python_code(generated_code, state, locals_dict)
1439
+ output = f"# Code executed.\n# Output:\n{exec_output if exec_output else '(No direct output)'}"
1440
+ except KeyboardInterrupt:
1441
+ print("\nExecution interrupted by user")
1442
+ output = "Execution interrupted"
653
1443
  else:
654
1444
  output = generated_code if generated_code else "# Error: LLM did not generate Python code."
655
1445
 
656
1446
  if state.command_history:
657
- state.command_history.add_command(nl_input_for_llm, [str(output if output else "")], "", state.current_path)
1447
+ state.command_history.add_command(stripped_command, [str(output if output else "")], "", state.current_path)
1448
+
1449
+ return state, output
658
1450
 
659
- elif state.current_mode == "ride":
660
- output = "RIDE mode is not yet implemented. Your input was: " + nl_input_for_llm
661
- if state.command_history:
662
- state.command_history.add_command(nl_input_for_llm, [str(output)], "", state.current_path)
1451
+ return execute_command(stripped_command, state, review=True, router=router)
1452
+ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, package_name: str):
1453
+ from npcsh.routes import router
663
1454
 
664
- return state, output
1455
+
1456
+ # Get workspace info
1457
+ npc_team_dir = Path.cwd() / "npc_team"
1458
+ workspace_dirs = _get_workspace_dirs(npc_team_dir)
1459
+ _ensure_workspace_dirs(workspace_dirs)
1460
+
1461
+ locals_dict = {}
1462
+ global _guac_monitor_thread, _guac_monitor_stop_event
1463
+ if _guac_monitor_thread is None or not (_guac_monitor_thread.is_alive()):
1464
+ _guac_monitor_stop_event = threading.Event()
1465
+ _guac_monitor_thread = threading.Thread(
1466
+ target=_file_drop_monitor,
1467
+ args=(workspace_dirs['workspace'].parent, state, locals_dict),
1468
+ kwargs={'poll_interval': 0.2},
1469
+ daemon=True
1470
+ )
1471
+ _guac_monitor_thread.start()
665
1472
 
666
- def run_guac_repl(initial_guac_state: GuacState):
667
- state = initial_guac_state
668
- _load_guac_helpers_into_state(state)
1473
+ try:
1474
+ if str(package_root) not in sys.path:
1475
+ sys.path.insert(0, str(package_root))
1476
+
1477
+ try:
1478
+ package_module = importlib.import_module(package_name)
1479
+ for name in dir(package_module):
1480
+ if not name.startswith('__'):
1481
+ locals_dict[name] = getattr(package_module, name)
1482
+ print(f"Loaded package: {package_name}")
1483
+ except ImportError:
1484
+ print(f"Warning: Could not import package {package_name}")
1485
+
1486
+ except Exception as e:
1487
+ print(f"Warning: Could not load package {package_name}: {e}", file=sys.stderr)
1488
+
1489
+ core_imports = {
1490
+ 'pd': pd, 'np': np, 'plt': plt, 'datetime': datetime,
1491
+ 'Path': Path, 'os': os, 'sys': sys, 'json': json,
1492
+ 'yaml': yaml, 're': re, 'traceback': traceback
1493
+ }
1494
+ locals_dict.update(core_imports)
1495
+ locals_dict.update({f"guac_{k}": v for k, v in workspace_dirs.items()})
1496
+
669
1497
  print_guac_bowl()
670
1498
  print(f"Welcome to Guac Mode! Current mode: {state.current_mode.upper()}. Type /agent, /chat, or /cmd to switch modes.")
1499
+ print(f"Workspace: {workspace_dirs['workspace']}")
1500
+ print("💡 You can drag and drop files into the terminal to automatically import them!")
1501
+
1502
+ command_count = 0
1503
+
1504
+ try:
1505
+ completer = make_completer(state, router)
1506
+ readline.set_completer(completer)
1507
+ except:
1508
+ pass
671
1509
 
672
1510
  while True:
673
1511
  try:
674
1512
  state.current_path = os.getcwd()
675
- path_display = Path(state.current_path).name
676
- prompt_char = get_guac_prompt_char(state.command_count)
677
- mode_display = state.current_mode.upper()
678
- npc_display = f":{state.npc.name}" if state.npc and state.npc.name else ""
679
- prompt_str = f"[{path_display}|{mode_display}{npc_display}] {prompt_char} > "
680
-
681
- user_input = get_multiline_input_guac(prompt_str, state)
682
- if not user_input.strip() and not state.compile_buffer:
683
- if state.compile_buffer:
684
- state.compile_buffer.clear()
1513
+
1514
+ display_model = state.chat_model
1515
+ if isinstance(state.npc, NPC) and state.npc.model:
1516
+ display_model = state.npc.model
1517
+
1518
+ cwd_colored = colored(os.path.basename(state.current_path), "blue")
1519
+ npc_name = state.npc.name if state.npc and state.npc.name else "guac"
1520
+ prompt_char = get_guac_prompt_char(command_count)
1521
+
1522
+ prompt_str = f"{cwd_colored}:{npc_name}:{display_model}{prompt_char}> "
1523
+ prompt = readline_safe_prompt(prompt_str)
1524
+
1525
+ user_input = get_multiline_input(prompt).strip()
1526
+
1527
+ if not user_input:
685
1528
  continue
686
1529
 
687
- state.command_count +=1
688
- new_state, result = execute_guac_command(user_input, state)
689
- state = new_state
690
-
691
- if result is not None:
692
- if state.stream_output and hasattr(result, '__iter__') and not isinstance(result, (str, bytes, dict)):
693
- full_streamed_output_for_history = print_and_process_stream_with_markdown(result, state.chat_model, state.chat_provider)
694
- if (state.current_mode == "chat" or state.current_mode == "agent") and \
695
- state.messages and state.messages[-1].get("role") == "assistant":
696
- state.messages[-1]["content"] = full_streamed_output_for_history
697
-
698
- if state.command_history:
699
- try:
700
- last_entry_id = state.command_history.get_last_entry_id()
701
- if last_entry_id:
702
- state.command_history.update_command_output(last_entry_id, [full_streamed_output_for_history])
703
- except AttributeError:
704
- pass
705
- elif isinstance(result, str):
706
- if result.strip():
707
- render_markdown(result)
708
- elif not (state.stream_output and hasattr(result, '__iter__')):
709
- if result:
710
- print(str(result))
711
- print()
712
-
1530
+ command_count += 1
1531
+ state, result = execute_guac_command(user_input, state, locals_dict, project_name, package_root, router)
1532
+
1533
+ process_result(user_input, state, result, state.command_history)
1534
+
713
1535
  except (KeyboardInterrupt, EOFError):
714
1536
  print("\nExiting Guac Mode...")
1537
+ if _guac_monitor_stop_event:
1538
+ _guac_monitor_stop_event.set()
1539
+ if _guac_monitor_thread:
1540
+ _guac_monitor_thread.join(timeout=1.0)
1541
+ break
1542
+
715
1543
  break
716
1544
  except SystemExit as e:
717
1545
  print(f"\n{e}")
1546
+ if _guac_monitor_stop_event:
1547
+ _guac_monitor_stop_event.set()
1548
+ if _guac_monitor_thread:
1549
+ _guac_monitor_thread.join(timeout=1.0)
718
1550
  break
1551
+
719
1552
  except Exception:
720
1553
  print("An unexpected error occurred in the REPL:")
721
1554
  traceback.print_exc()
722
1555
 
723
- def enter_guac_mode(npc=None,
724
- team=None,
725
- config_dir=None,
726
- plots_dir=None,
1556
+ if _guac_monitor_stop_event:
1557
+ _guac_monitor_stop_event.set()
1558
+ if _guac_monitor_thread:
1559
+ _guac_monitor_thread.join(timeout=1.0)
1560
+ break
1561
+
1562
+
1563
+
1564
+
1565
+ def enter_guac_mode(npc=None,
1566
+ team=None,
1567
+ config_dir=None,
1568
+ plots_dir=None,
727
1569
  npc_team_dir=None,
728
- refresh_period=None,
729
- lang=None,
730
- default_mode_choice=None):
1570
+ refresh_period=None,
1571
+ lang='python',
1572
+ default_mode_choice=None):
731
1573
 
732
1574
  if refresh_period is not None:
733
1575
  try:
734
- npcsh_initial_state.GUAC_REFRESH_PERIOD = int(refresh_period)
1576
+ GUAC_REFRESH_PERIOD = int(refresh_period)
735
1577
  except ValueError:
736
1578
  pass
737
-
1579
+
738
1580
  setup_result = setup_guac_mode(
739
1581
  config_dir=config_dir,
740
1582
  plots_dir=plots_dir,
741
- npc_team_dir=npc_team_dir
1583
+ npc_team_dir=npc_team_dir,
1584
+ lang=lang,
1585
+ default_mode_choice=default_mode_choice
742
1586
  )
743
- guac_config_dir = setup_result["config_dir"]
744
- guac_src_dir = setup_result["src_dir"]
745
- guac_npc_team_dir = setup_result["npc_team_dir"]
746
- guac_default_mode = default_mode_choice or setup_result.get("default_mode", "cmd")
747
1587
 
748
- cmd_history = CommandHistory()
749
- current_npc = npc
750
- current_team = team
1588
+ project_name = setup_result.get("project_name", "project")
1589
+ package_root = setup_result["package_root"]
1590
+ package_name = setup_result.get("package_name", "project")
751
1591
 
752
- if current_npc is None and current_team is None:
753
- try:
754
- current_team = Team(team_path=str(guac_npc_team_dir), db_conn=None)
755
- if current_team and current_team.npcs:
756
- current_npc = current_team.get_npc("guac")
757
- if not current_npc:
758
- current_npc = current_team.get_foreman() or next(iter(current_team.npcs.values()), None)
759
- except Exception as e:
760
- print(f"Warning: Could not load Guac NPC team from {guac_npc_team_dir}: {e}", file=sys.stderr)
761
-
762
- initial_guac_state = GuacState(
763
- current_mode=guac_default_mode,
764
- npc=current_npc,
765
- team=current_team,
766
- command_history=cmd_history,
767
- chat_model=npcsh_initial_state.chat_model,
768
- chat_provider=npcsh_initial_state.chat_provider,
769
- config_dir=guac_config_dir,
770
- src_dir=guac_src_dir,
771
- locals={}
1592
+ command_history, default_team, default_npc = setup_shell()
1593
+
1594
+ state = ShellState(
1595
+ conversation_id=start_new_conversation(),
1596
+ stream_output=True,
1597
+ current_mode=setup_result.get("default_mode", "cmd"),
1598
+ chat_model=os.environ.get("NPCSH_CHAT_MODEL", "gemma3:4b"),
1599
+ chat_provider=os.environ.get("NPCSH_CHAT_PROVIDER", "ollama"),
1600
+ current_path=os.getcwd(),
1601
+ npc=npc or default_npc,
1602
+ team=team or default_team
772
1603
  )
1604
+
1605
+ state.command_history = command_history
773
1606
 
774
1607
  try:
775
- setup_guac_readline(READLINE_HISTORY_FILE)
776
- atexit.register(save_guac_readline_history, READLINE_HISTORY_FILE)
777
- except Exception as e:
778
- print(f'Could not set up readline: {e}', file=sys.stderr)
779
-
780
- atexit.register(cmd_history.close)
781
- run_guac_repl(initial_guac_state)
1608
+ readline.read_history_file(READLINE_HISTORY_FILE)
1609
+ readline.set_history_length(1000)
1610
+ readline.parse_and_bind("set enable-bracketed-paste on")
1611
+ except FileNotFoundError:
1612
+ pass
1613
+ except OSError as e:
1614
+ print(f"Warning: Could not read readline history file {READLINE_HISTORY_FILE}: {e}")
1615
+
1616
+ run_guac_repl(state, project_name, package_root, package_name)
782
1617
 
1618
+
1619
+
783
1620
  def main():
784
1621
  parser = argparse.ArgumentParser(description="Enter Guac Mode - Interactive Python with LLM assistance.")
785
1622
  parser.add_argument("--config_dir", type=str, help="Guac configuration directory.")
786
1623
  parser.add_argument("--plots_dir", type=str, help="Directory to save plots.")
787
- parser.add_argument("--npc_team_dir", type=str, default=os.path.expanduser('~/.npcsh/guac/npc_team/'),
788
- help="NPC team directory for Guac.")
1624
+ parser.add_argument("--npc_team_dir", type=str, default=None,
1625
+ help="NPC team directory for Guac. Defaults to ./npc_team")
789
1626
  parser.add_argument("--refresh_period", type=int, help="Number of commands before suggesting /refresh.")
790
- parser.add_argument("--default_mode", type=str, choices=["agent", "chat", "cmd", "ride"],
1627
+ parser.add_argument("--default_mode", type=str, choices=["agent", "chat", "cmd"],
791
1628
  help="Default mode to start in.")
792
1629
 
793
1630
  args = parser.parse_args()