npcsh 1.0.27__tar.gz → 1.0.29__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. {npcsh-1.0.27 → npcsh-1.0.29}/PKG-INFO +1 -1
  2. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/_state.py +27 -20
  3. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/corca.py +116 -97
  4. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/guac.py +127 -140
  5. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npcsh.py +10 -4
  6. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh.egg-info/PKG-INFO +1 -1
  7. {npcsh-1.0.27 → npcsh-1.0.29}/setup.py +1 -1
  8. {npcsh-1.0.27 → npcsh-1.0.29}/LICENSE +0 -0
  9. {npcsh-1.0.27 → npcsh-1.0.29}/README.md +0 -0
  10. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/__init__.py +0 -0
  11. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/alicanto.py +0 -0
  12. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/mcp_helpers.py +0 -0
  13. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/mcp_server.py +0 -0
  14. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc.py +0 -0
  15. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/alicanto.npc +0 -0
  16. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/alicanto.png +0 -0
  17. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/corca.npc +0 -0
  18. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/corca.png +0 -0
  19. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/foreman.npc +0 -0
  20. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/frederic.npc +0 -0
  21. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/frederic4.png +0 -0
  22. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/guac.png +0 -0
  23. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/jinxs/bash_executer.jinx +0 -0
  24. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/jinxs/edit_file.jinx +0 -0
  25. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/jinxs/image_generation.jinx +0 -0
  26. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/jinxs/internet_search.jinx +0 -0
  27. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/jinxs/python_executor.jinx +0 -0
  28. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/jinxs/screen_cap.jinx +0 -0
  29. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/kadiefa.npc +0 -0
  30. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/kadiefa.png +0 -0
  31. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/npcsh.ctx +0 -0
  32. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/npcsh_sibiji.png +0 -0
  33. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/plonk.npc +0 -0
  34. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/plonk.png +0 -0
  35. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/plonkjr.npc +0 -0
  36. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/plonkjr.png +0 -0
  37. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/sibiji.npc +0 -0
  38. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/sibiji.png +0 -0
  39. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/spool.png +0 -0
  40. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/npc_team/yap.png +0 -0
  41. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/plonk.py +0 -0
  42. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/pti.py +0 -0
  43. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/routes.py +0 -0
  44. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/spool.py +0 -0
  45. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/wander.py +0 -0
  46. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh/yap.py +0 -0
  47. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh.egg-info/SOURCES.txt +0 -0
  48. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh.egg-info/dependency_links.txt +0 -0
  49. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh.egg-info/entry_points.txt +0 -0
  50. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh.egg-info/requires.txt +0 -0
  51. {npcsh-1.0.27 → npcsh-1.0.29}/npcsh.egg-info/top_level.txt +0 -0
  52. {npcsh-1.0.27 → npcsh-1.0.29}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.27
3
+ Version: 1.0.29
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -5,11 +5,18 @@ import filecmp
5
5
  import os
6
6
  from pathlib import Path
7
7
  import platform
8
- import pty
9
8
  try:
9
+ import pty
10
+ import tty
11
+
12
+ import termios
13
+
10
14
  import readline
11
15
  except:
12
- pass
16
+ readline = None
17
+ pty = None
18
+ tty = None
19
+
13
20
  import re
14
21
  import select
15
22
  import shlex
@@ -18,11 +25,8 @@ import signal
18
25
  import sqlite3
19
26
  import subprocess
20
27
  import sys
21
- from termcolor import colored
22
- import termios
23
28
  import time
24
29
  from typing import Dict, List, Any, Tuple, Union, Optional
25
- import tty
26
30
  import logging
27
31
  import textwrap
28
32
  from termcolor import colored
@@ -432,6 +436,12 @@ def get_shell_config_file() -> str:
432
436
  return os.path.expanduser("~/.bashrc")
433
437
 
434
438
 
439
+ def get_team_ctx_path(team_path: str) -> Optional[str]:
440
+ """Find the first .ctx file in the team directory"""
441
+ team_dir = Path(team_path)
442
+ ctx_files = list(team_dir.glob("*.ctx"))
443
+ return str(ctx_files[0]) if ctx_files else None
444
+
435
445
 
436
446
  def add_npcshrc_to_shell_config() -> None:
437
447
  """
@@ -765,7 +775,7 @@ def start_interactive_session(command: str) -> int:
765
775
  Starts an interactive session. Only works on Unix. On Windows, print a message and return 1.
766
776
  """
767
777
  ON_WINDOWS = platform.system().lower().startswith("win")
768
- if ON_WINDOWS or termios is None or tty is None or pty is None or select is None or signal is None:
778
+ if ON_WINDOWS or termios is None or tty is None or pty is None or select is None or signal is None or tty is None:
769
779
  print("Interactive terminal sessions are not supported on Windows.")
770
780
  return 1
771
781
 
@@ -2380,17 +2390,16 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
2380
2390
 
2381
2391
 
2382
2392
  team_ctx = {}
2383
- for filename in os.listdir(team_dir):
2384
- if filename.endswith(".ctx"):
2385
- try:
2386
- with open(os.path.join(team_dir, filename), "r") as f:
2387
- team_ctx = yaml.safe_load(f) or {}
2388
- break
2389
- except Exception as e:
2390
- print(f"Warning: Could not load context file {filename}: {e}")
2391
-
2393
+ team_ctx_path = get_team_ctx_path(team_dir)
2394
+ if team_ctx_path:
2395
+ try:
2396
+ with open(team_ctx_path, "r") as f:
2397
+ team_ctx = yaml.safe_load(f) or {}
2398
+ except Exception as e:
2399
+ print(f"Warning: Could not load context file {os.path.basename(team_ctx_path)}: {e}")
2392
2400
  forenpc_name = team_ctx.get("forenpc", default_forenpc_name)
2393
2401
 
2402
+ print('forenpc_name:', forenpc_name)
2394
2403
 
2395
2404
  if team_ctx.get("use_global_jinxs", False):
2396
2405
  jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
@@ -2403,11 +2412,8 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
2403
2412
  forenpc_obj = None
2404
2413
  forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
2405
2414
 
2415
+ print('forenpc_path:', forenpc_path)
2406
2416
 
2407
-
2408
-
2409
-
2410
-
2411
2417
  if os.path.exists(forenpc_path):
2412
2418
  forenpc_obj = NPC(file = forenpc_path,
2413
2419
  jinxs=jinxs_list,
@@ -2554,7 +2560,8 @@ def process_result(
2554
2560
  characterization = summary.get('output')
2555
2561
 
2556
2562
  if characterization and result_state.team:
2557
- team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
2563
+
2564
+ team_ctx_path = os.path.join(result_state.team.team_path, ".ctx")
2558
2565
  ctx_data = {}
2559
2566
  if os.path.exists(team_ctx_path):
2560
2567
  with open(team_ctx_path, 'r') as f:
@@ -33,6 +33,7 @@ from npcsh._state import (
33
33
  should_skip_kg_processing,
34
34
  NPCSH_CHAT_PROVIDER,
35
35
  NPCSH_CHAT_MODEL,
36
+ get_team_ctx_path
36
37
  )
37
38
  import yaml
38
39
  from pathlib import Path
@@ -179,23 +180,21 @@ def process_mcp_stream(stream_response, active_npc):
179
180
  tool_calls = []
180
181
 
181
182
  interrupted = False
182
-
183
183
  sys.stdout.write('\033[s')
184
184
  sys.stdout.flush()
185
+
185
186
  try:
186
187
  for chunk in stream_response:
187
188
  if hasattr(active_npc, 'provider') and active_npc.provider == "ollama" and 'gpt-oss' not in active_npc.model:
188
189
  if hasattr(chunk, 'message') and hasattr(chunk.message, 'tool_calls') and chunk.message.tool_calls:
189
190
  for tool_call in chunk.message.tool_calls:
190
- tool_call_data = {
191
- 'id': getattr(tool_call, 'id', ''),
191
+ tool_call_data = {'id': getattr(tool_call, 'id', ''),
192
192
  'type': 'function',
193
193
  'function': {
194
194
  'name': getattr(tool_call.function, 'name', '') if hasattr(tool_call, 'function') else '',
195
195
  'arguments': getattr(tool_call.function, 'arguments', {}) if hasattr(tool_call, 'function') else {}
196
196
  }
197
197
  }
198
-
199
198
  if isinstance(tool_call_data['function']['arguments'], str):
200
199
  try:
201
200
  tool_call_data['function']['arguments'] = json.loads(tool_call_data['function']['arguments'])
@@ -203,7 +202,6 @@ def process_mcp_stream(stream_response, active_npc):
203
202
  tool_call_data['function']['arguments'] = {'raw': tool_call_data['function']['arguments']}
204
203
 
205
204
  tool_calls.append(tool_call_data)
206
-
207
205
  if hasattr(chunk, 'message') and hasattr(chunk.message, 'content') and chunk.message.content:
208
206
  collected_content += chunk.message.content
209
207
  print(chunk.message.content, end='', flush=True)
@@ -230,7 +228,6 @@ def process_mcp_stream(stream_response, active_npc):
230
228
 
231
229
  if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
232
230
  tool_calls[idx]['id'] = tool_call_delta.id
233
-
234
231
  if hasattr(tool_call_delta, 'function'):
235
232
  if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
236
233
  tool_calls[idx]['function']['name'] = tool_call_delta.function.name
@@ -239,66 +236,63 @@ def process_mcp_stream(stream_response, active_npc):
239
236
  tool_calls[idx]['function']['arguments'] += tool_call_delta.function.arguments
240
237
  except KeyboardInterrupt:
241
238
  interrupted = True
242
- print('\n⚠️ Stream interrupted by user')
239
+ print('⚠️ Stream interrupted by user')
243
240
 
244
241
  sys.stdout.write('\033[u')
245
242
  sys.stdout.write('\033[J')
246
243
  sys.stdout.flush()
247
244
 
245
+ # Use the render_markdown function for proper markdown rendering
248
246
  render_markdown(collected_content)
249
- print('\n')
250
247
  return collected_content, tool_calls
251
-
252
248
  def execute_command_corca(command: str, state: ShellState, command_history, selected_mcp_tools_names: Optional[List[str]] = None) -> Tuple[ShellState, Any]:
253
- mcp_tools_for_llm = []
254
-
255
- if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
256
- all_available_mcp_tools = state.mcp_client.available_tools_llm
257
-
258
- if selected_mcp_tools_names and len(selected_mcp_tools_names) > 0:
259
- mcp_tools_for_llm = [
260
- tool_def for tool_def in all_available_mcp_tools
261
- if tool_def['function']['name'] in selected_mcp_tools_names
262
- ]
263
- if not mcp_tools_for_llm:
264
- cprint("Warning: No selected MCP tools found or matched. Corca will proceed without tools.", "yellow", file=sys.stderr)
265
- else:
266
- mcp_tools_for_llm = all_available_mcp_tools
267
- else:
268
- cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
269
-
270
- active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
271
-
272
- response_dict = get_llm_response(
273
- prompt=command,
274
- npc=state.npc,
275
- messages=state.messages,
276
- tools=mcp_tools_for_llm,
277
- auto_process_tool_calls=False,
278
- stream=state.stream_output
279
- )
280
-
281
- stream_response = response_dict.get('response')
282
- messages = response_dict.get('messages', state.messages)
249
+ mcp_tools_for_llm = []
250
+
251
+ if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
252
+ all_available_mcp_tools = state.mcp_client.available_tools_llm
253
+
254
+ if selected_mcp_tools_names and len(selected_mcp_tools_names) > 0:
255
+ mcp_tools_for_llm = [
256
+ tool_def for tool_def in all_available_mcp_tools
257
+ if tool_def['function']['name'] in selected_mcp_tools_names
258
+ ]
259
+ if not mcp_tools_for_llm:
260
+ cprint("Warning: No selected MCP tools found or matched. Corca will proceed without tools.", "yellow", file=sys.stderr)
261
+ else:
262
+ mcp_tools_for_llm = all_available_mcp_tools
263
+ else:
264
+ cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
265
+
266
+ active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
267
+
268
+ response_dict = get_llm_response(
269
+ prompt=command,
270
+ npc=state.npc,
271
+ messages=state.messages,
272
+ tools=mcp_tools_for_llm,
273
+ auto_process_tool_calls=False,
274
+ stream=state.stream_output,
275
+ team=state.team
276
+ )
283
277
 
284
- print("DEBUG: Processing stream response...")
285
- collected_content, tool_calls = process_mcp_stream(stream_response, active_npc)
278
+ stream_response = response_dict.get('response')
279
+ messages = response_dict.get('messages', state.messages)
280
+
281
+ collected_content, tool_calls = process_mcp_stream(stream_response, active_npc)
286
282
 
287
- print(f"\nDEBUG: Final collected_content: {collected_content}")
288
- print(f"DEBUG: Final tool_calls: {tool_calls}")
289
-
290
- state.messages = messages
291
- if collected_content or tool_calls:
292
- assistant_message = {"role": "assistant", "content": collected_content}
293
- if tool_calls:
294
- assistant_message["tool_calls"] = tool_calls
295
- state.messages.append(assistant_message)
296
-
297
- return state, {
298
- "output": collected_content,
299
- "tool_calls": tool_calls,
300
- "messages": state.messages
301
- }
283
+
284
+ state.messages = messages
285
+ if collected_content or tool_calls:
286
+ assistant_message = {"role": "assistant", "content": collected_content}
287
+ if tool_calls:
288
+ assistant_message["tool_calls"] = tool_calls
289
+ state.messages.append(assistant_message)
290
+
291
+ return state, {
292
+ "output": collected_content,
293
+ "tool_calls": tool_calls,
294
+ "messages": state.messages
295
+ }
302
296
 
303
297
 
304
298
  def _resolve_and_copy_mcp_server_path(
@@ -306,7 +300,7 @@ def _resolve_and_copy_mcp_server_path(
306
300
  current_path: Optional[str],
307
301
  team_ctx_mcp_servers: Optional[List[Dict[str, str]]],
308
302
  interactive: bool = False,
309
- auto_copy_bypass: bool = False # <-- New parameter
303
+ auto_copy_bypass: bool = False
310
304
  ) -> Optional[str]:
311
305
  default_mcp_server_name = "mcp_server.py"
312
306
  npcsh_default_template_path = Path(__file__).parent / default_mcp_server_name
@@ -318,13 +312,12 @@ def _resolve_and_copy_mcp_server_path(
318
312
  return None
319
313
 
320
314
  if not destination_file.exists():
321
- # Check auto_copy_bypass first
322
- if auto_copy_bypass or not interactive: # If bypass is true OR not interactive, auto-copy
315
+ if auto_copy_bypass or not interactive:
323
316
  destination_dir.mkdir(parents=True, exist_ok=True)
324
317
  shutil.copy(npcsh_default_template_path, destination_file)
325
318
  print(colored(f"Automatically copied default {default_mcp_server_name} to {destination_file}", "green"))
326
319
  return destination_file
327
- else: # Only ask if interactive and no bypass
320
+ else:
328
321
  choice = input(colored(f"No {default_mcp_server_name} found in {description}. Copy default template to {destination_file}? (y/N): ", "yellow")).strip().lower()
329
322
  if choice == 'y':
330
323
  destination_dir.mkdir(parents=True, exist_ok=True)
@@ -417,15 +410,31 @@ def create_corca_state_and_mcp_client(conversation_id, command_history, npc=None
417
410
  )
418
411
  state.command_history = command_history
419
412
 
420
- # Read NPCSH_CORCA_AUTO_COPY_MCP_SERVER from environment for non-interactive calls
413
+ team_ctx_mcp_servers = None
414
+ if team and hasattr(team, 'team_path'):
415
+ team_ctx = _load_team_context(team.team_path)
416
+ team_ctx_mcp_servers = team_ctx.get('mcp_servers', [])
417
+
418
+ if npc and isinstance(npc, NPC):
419
+ if not npc.model and team_ctx.get('model'):
420
+ npc.model = team_ctx['model']
421
+ if not npc.provider and team_ctx.get('provider'):
422
+ npc.provider = team_ctx['provider']
423
+
424
+ if not state.chat_model and team_ctx.get('model'):
425
+ state.chat_model = team_ctx['model']
426
+ if not state.chat_provider and team_ctx.get('provider'):
427
+ state.chat_provider = team_ctx['provider']
428
+
421
429
  auto_copy_bypass = os.getenv("NPCSH_CORCA_AUTO_COPY_MCP_SERVER", "false").lower() == "true"
422
430
 
423
431
  resolved_server_path = _resolve_and_copy_mcp_server_path(
424
432
  explicit_path=mcp_server_path_from_request,
425
433
  current_path=current_path,
426
- team_ctx_mcp_servers=team.team_ctx.get('mcp_servers', []) if team and hasattr(team, 'team_ctx') else None,
427
- interactive=False, # Always non-interactive for Flask API calls
428
- auto_copy_bypass=auto_copy_bypass # Pass env var setting
434
+ team_ctx_mcp_servers=team_ctx_mcp_servers,
435
+ interactive=False,
436
+ auto_copy_bypass=auto_copy_bypass,
437
+ force_global=False
429
438
  )
430
439
 
431
440
  state.mcp_client = None
@@ -500,7 +509,6 @@ def process_corca_result(
500
509
  tool_args = {}
501
510
 
502
511
  try:
503
- print(f" Calling MCP tool: {tool_name} with args: {tool_args}")
504
512
 
505
513
  loop = asyncio.get_event_loop()
506
514
  if loop.is_closed():
@@ -511,15 +519,12 @@ def process_corca_result(
511
519
  result_state.mcp_client.session.call_tool(tool_name, tool_args)
512
520
  )
513
521
 
514
- print(f"DEBUG: MCP result type: {type(mcp_result)}")
515
- print(f"DEBUG: MCP result: {mcp_result}")
516
- print(f"DEBUG: MCP result attributes: {dir(mcp_result)}")
517
522
 
518
523
  tool_content = ""
519
524
  if hasattr(mcp_result, 'content') and mcp_result.content:
520
- print(f"DEBUG: content type: {type(mcp_result.content)}")
525
+
521
526
  for i, content_item in enumerate(mcp_result.content):
522
- print(f"DEBUG: content_item[{i}]: {content_item} (type: {type(content_item)})")
527
+
523
528
  if hasattr(content_item, 'text'):
524
529
  tool_content += content_item.text
525
530
  else:
@@ -527,8 +532,7 @@ def process_corca_result(
527
532
  else:
528
533
  tool_content = str(mcp_result)
529
534
 
530
- print(f"DEBUG: Extracted content length: {len(tool_content)}")
531
- print(f"DEBUG: Extracted content preview: {tool_content[:200]}")
535
+
532
536
 
533
537
  tool_responses.append({
534
538
  "role": "tool",
@@ -615,26 +619,16 @@ def process_corca_result(
615
619
  result_state.mcp_client.session.call_tool(tool_name, tool_args)
616
620
  )
617
621
 
618
- print(f"DEBUG: MCP result type: {type(mcp_result)}")
619
- print(f"DEBUG: MCP result: {mcp_result}")
620
- print(f"DEBUG: MCP result.isError: {mcp_result.isError}")
621
- print(f"DEBUG: MCP result.meta: {mcp_result.meta}")
622
- print(f"DEBUG: MCP result.content length: {len(mcp_result.content)}")
623
622
 
624
623
  tool_content = ""
625
624
  if hasattr(mcp_result, 'content') and mcp_result.content:
626
625
  for i, content_item in enumerate(mcp_result.content):
627
- print(f"DEBUG: content_item[{i}] full object: {repr(content_item)}")
628
- print(f"DEBUG: content_item[{i}] text attribute: '{content_item.text}'")
629
- print(f"DEBUG: content_item[{i}] text length: {len(content_item.text) if content_item.text else 0}")
630
626
 
631
627
  if hasattr(content_item, 'text') and content_item.text:
632
628
  tool_content += content_item.text
633
629
  elif hasattr(content_item, 'data'):
634
- print(f"DEBUG: content_item[{i}] has data: {content_item.data}")
635
630
  tool_content += str(content_item.data)
636
631
  else:
637
- print(f"DEBUG: content_item[{i}] converting to string: {str(content_item)}")
638
632
  tool_content += str(content_item)
639
633
  result_state.messages.append({
640
634
  "role": "tool",
@@ -717,11 +711,11 @@ def process_corca_result(
717
711
  characterization = summary.get('output')
718
712
 
719
713
  if characterization and result_state.team:
720
- team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
721
- ctx_data = {}
722
- if os.path.exists(team_ctx_path):
723
- with open(team_ctx_path, 'r') as f:
724
- ctx_data = yaml.safe_load(f) or {}
714
+ team_ctx_path = get_team_ctx_path(result_state.team.team_path)
715
+ if not team_ctx_path:
716
+ team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
717
+
718
+ ctx_data = _load_team_context(result_state.team.team_path)
725
719
  current_context = ctx_data.get('context', '')
726
720
 
727
721
  prompt = f"""Based on this characterization: {characterization},
@@ -736,28 +730,39 @@ def process_corca_result(
736
730
  "suggestion": "Your sentence.
737
731
  }
738
732
  """
739
- response = get_llm_response(prompt, npc=active_npc, format="json")
733
+ response = get_llm_response(prompt,
734
+ npc=active_npc,
735
+ format="json",
736
+ team=result_state.team)
740
737
  suggestion = response.get("response", {}).get("suggestion")
741
738
 
742
739
  if suggestion:
743
740
  new_context = (current_context + " " + suggestion).strip()
744
741
  print(colored(f"{result_state.npc.name} suggests updating team context:", "yellow"))
745
742
  print(f" - OLD: {current_context}\n + NEW: {new_context}")
746
- if input("Apply? [y/N]: ").strip().lower() == 'y':
743
+
744
+ choice = input("Apply? [y/N/e(dit)]: ").strip().lower()
745
+
746
+ if choice == 'y':
747
747
  ctx_data['context'] = new_context
748
748
  with open(team_ctx_path, 'w') as f:
749
749
  yaml.dump(ctx_data, f)
750
750
  print(colored("Team context updated.", "green"))
751
+ elif choice == 'e':
752
+ edited_context = input(f"Edit context [{new_context}]: ").strip()
753
+ if edited_context:
754
+ ctx_data['context'] = edited_context
755
+ else:
756
+ ctx_data['context'] = new_context
757
+ with open(team_ctx_path, 'w') as f:
758
+ yaml.dump(ctx_data, f)
759
+ print(colored("Team context updated with edits.", "green"))
751
760
  else:
752
- print("Suggestion declined.")
761
+ print("Suggestion declined.")
753
762
  except Exception as e:
754
763
  import traceback
755
764
  print(colored(f"Could not generate team suggestions: {e}", "yellow"))
756
765
  traceback.print_exc()
757
-
758
-
759
-
760
-
761
766
 
762
767
  def _read_npcsh_global_env() -> Dict[str, str]:
763
768
  global_env_file = Path(".npcsh_global")
@@ -774,6 +779,20 @@ def _read_npcsh_global_env() -> Dict[str, str]:
774
779
  print(f"Warning: Could not read .npcsh_global: {e}")
775
780
  return env_vars
776
781
 
782
+ def _load_team_context(team_path: str) -> Dict[str, Any]:
783
+ """Load team context from any .ctx file in the team directory"""
784
+ ctx_path = get_team_ctx_path(team_path)
785
+ if not ctx_path or not os.path.exists(ctx_path):
786
+ return {}
787
+
788
+ try:
789
+ with open(ctx_path, 'r') as f:
790
+ return yaml.safe_load(f) or {}
791
+ except Exception as e:
792
+ print(f"Warning: Could not load team context from {ctx_path}: {e}")
793
+ return {}
794
+
795
+
777
796
  def _write_to_npcsh_global(key: str, value: str) -> None:
778
797
  global_env_file = Path(".npcsh_global")
779
798
  env_vars = _read_npcsh_global_env()
@@ -786,6 +805,7 @@ def _write_to_npcsh_global(key: str, value: str) -> None:
786
805
  except Exception as e:
787
806
  print(f"Warning: Could not write to .npcsh_global: {e}")
788
807
 
808
+
789
809
  def _resolve_and_copy_mcp_server_path(
790
810
  explicit_path: Optional[str],
791
811
  current_path: Optional[str],
@@ -873,7 +893,6 @@ def _resolve_and_copy_mcp_server_path(
873
893
 
874
894
  cprint("No MCP server script found in any expected location.", "yellow")
875
895
  return None
876
-
877
896
  def create_corca_state_and_mcp_client(conversation_id, command_history, npc=None, team=None,
878
897
  current_path=None, mcp_server_path_from_request: Optional[str] = None):
879
898
  from npcsh._state import ShellState
@@ -1032,7 +1051,7 @@ def main():
1032
1051
  elif os.path.exists(global_corca_path):
1033
1052
  default_npc = NPC(file=global_corca_path,
1034
1053
  db_conn=command_history.engine)
1035
- print('Team Default: ', team.provider, team.model)
1054
+
1036
1055
  if default_npc.model is None:
1037
1056
  if team.model is not None:
1038
1057
  default_npc.model = team.model
@@ -1063,6 +1082,6 @@ def main():
1063
1082
  }
1064
1083
 
1065
1084
  enter_corca_mode(**kwargs)
1066
-
1085
+
1067
1086
  if __name__ == "__main__":
1068
1087
  main()
@@ -1,4 +1,3 @@
1
- from chroptiks.plotting_utils import *
2
1
  from datetime import datetime
3
2
  import json
4
3
  import numpy as np
@@ -7,12 +6,24 @@ import pandas as pd
7
6
  import sys
8
7
  import argparse
9
8
  import importlib.metadata
10
- import matplotlib.pyplot as plt
9
+ import matplotlib
10
+ import platform
11
+ import queue
12
+ plot_queue = queue.Queue()
13
+
14
+ if platform.system() == 'Darwin':
15
+ try:
16
+ matplotlib.use('TkAgg')
17
+ except ImportError:
18
+ matplotlib.use('Agg')
19
+ else:
20
+ matplotlib.use('TkAgg')
21
+
22
+ import matplotlib.pyplot as plt
23
+ from chroptiks.plotting_utils import *
11
24
 
12
25
  import logging
13
- plt.ioff()
14
26
  import shlex
15
- import platform
16
27
  import yaml
17
28
  import re
18
29
  from pathlib import Path
@@ -30,7 +41,7 @@ from npcpy.memory.command_history import CommandHistory, start_new_conversation
30
41
  from npcpy.npc_compiler import Team, NPC
31
42
  from npcpy.llm_funcs import get_llm_response
32
43
  from npcpy.npc_sysenv import render_markdown,print_and_process_stream
33
-
44
+ from npcpy.data.load import load_file_contents
34
45
 
35
46
  from npcsh._state import (
36
47
  ShellState,
@@ -40,7 +51,8 @@ from npcsh._state import (
40
51
  readline_safe_prompt,
41
52
  setup_shell,
42
53
  get_multiline_input,
43
- orange
54
+ orange,
55
+ get_team_ctx_path,
44
56
  )
45
57
  import threading
46
58
  import time
@@ -97,9 +109,6 @@ def _clear_readline_buffer():
97
109
  return False
98
110
 
99
111
  def _file_drop_monitor(npc_team_dir: Path, state: ShellState, locals_dict: Dict[str, Any], poll_interval: float = 0.2):
100
- """
101
- Background thread: poll readline.get_line_buffer() and process file drops immediately.
102
- """
103
112
  processed_bufs = set()
104
113
  stop_event = _guac_monitor_stop_event
105
114
  while stop_event is None or not stop_event.is_set():
@@ -113,48 +122,35 @@ def _file_drop_monitor(npc_team_dir: Path, state: ShellState, locals_dict: Dict[
113
122
  time.sleep(poll_interval)
114
123
  continue
115
124
 
116
-
117
125
  candidate = buf.strip()
118
-
119
126
  if (candidate.startswith("'") and candidate.endswith("'")) or (candidate.startswith('"') and candidate.endswith('"')):
120
127
  inner = candidate[1:-1]
121
128
  else:
122
129
  inner = candidate
123
130
 
124
-
125
131
  if " " not in inner and Path(inner.replace('~', str(Path.home()))).expanduser().exists() and Path(inner.replace('~', str(Path.home()))).expanduser().is_file():
126
-
127
132
  if buf in processed_bufs:
128
133
  time.sleep(poll_interval)
129
134
  continue
130
135
  processed_bufs.add(buf)
131
136
 
132
-
133
137
  try:
134
-
135
-
136
138
  modified_input, processed_files = _handle_file_drop(buf, npc_team_dir)
137
139
  if processed_files:
138
140
  target_path = processed_files[0]
139
-
140
141
  loading_code = _generate_file_analysis_code(inner, target_path)
141
-
142
- print("\n[guac] Detected file drop — processing automatically...")
143
-
144
- _state, exec_output = execute_python_code(loading_code, state, locals_dict)
145
-
146
- if exec_output:
147
- print(exec_output)
148
-
142
+
143
+ plot_queue.put(('execute_code', loading_code, state, locals_dict))
144
+ print("\n[guac] Detected file drop — queued for processing...")
149
145
  _clear_readline_buffer()
150
146
  except Exception as e:
151
147
  print(f"[guac][ERROR] file drop processing failed: {e}")
152
148
  except Exception:
153
-
154
149
  pass
155
150
  time.sleep(poll_interval)
156
151
 
157
152
 
153
+
158
154
  def is_python_code(text: str) -> bool:
159
155
  text = text.strip()
160
156
  if not text:
@@ -476,13 +472,43 @@ def ensure_global_guac_team():
476
472
  print(f"✅ Created global guac team.ctx at {ctx_path}")
477
473
 
478
474
  return team_dir
479
- def setup_guac_mode(config_dir=None, plots_dir=None, npc_team_dir=None,
480
- lang='python', default_mode_choice=None):
481
- base_dir = Path.cwd()
475
+
476
+
477
+ def setup_guac_mode(config_dir=None,
478
+ plots_dir=None,
479
+ npc_team_dir=None,
480
+ lang='python',
481
+ default_mode_choice=None):
482
+ base_dir = Path.cwd()
482
483
 
483
-
484
- if GUAC_GLOBAL_FLAG_FILE.exists():
485
- print("💡 Using global Guac team as default (previously set).")
484
+ local_npc_team = base_dir / "npc_team"
485
+ if local_npc_team.exists():
486
+ npc_team_dir = local_npc_team
487
+ workspace_dirs = _get_workspace_dirs(npc_team_dir)
488
+ _ensure_workspace_dirs(workspace_dirs)
489
+
490
+ team_ctx_path = npc_team_dir / "team.ctx"
491
+ existing_ctx = {}
492
+ if team_ctx_path.exists():
493
+ try:
494
+ with open(team_ctx_path, "r") as f:
495
+ existing_ctx = yaml.safe_load(f) or {}
496
+ except Exception as e:
497
+ print(f"Warning: Could not read team.ctx: {e}")
498
+
499
+ package_root = existing_ctx.get("GUAC_PACKAGE_ROOT", str(base_dir))
500
+ package_name = existing_ctx.get("GUAC_PACKAGE_NAME", "project")
501
+ project_description = existing_ctx.get("GUAC_PROJECT_DESCRIPTION", "Local guac team")
502
+
503
+ return {
504
+ "language": lang, "package_root": Path(package_root), "plots_dir": plots_dir,
505
+ "npc_team_dir": npc_team_dir, "config_dir": config_dir, "default_mode": default_mode_choice or "agent",
506
+ "project_description": project_description, "package_name": package_name
507
+ }
508
+
509
+ global_flag_file = base_dir / ".npcsh_global"
510
+ if global_flag_file.exists() or os.environ.get("GUAC_USE_GLOBAL") == "1":
511
+ print("Using global Guac team")
486
512
  team_dir = ensure_global_guac_team()
487
513
  return {
488
514
  "language": lang, "package_root": team_dir, "plots_dir": plots_dir,
@@ -490,7 +516,6 @@ def setup_guac_mode(config_dir=None, plots_dir=None, npc_team_dir=None,
490
516
  "project_description": "Global guac team for analysis.", "package_name": "guac"
491
517
  }
492
518
 
493
-
494
519
  if npc_team_dir is None:
495
520
  npc_team_dir = base_dir / "npc_team"
496
521
  else:
@@ -518,8 +543,9 @@ def setup_guac_mode(config_dir=None, plots_dir=None, npc_team_dir=None,
518
543
  response = input("Enter package name (Enter for 'project'): ").strip()
519
544
  package_name = response if response else "project"
520
545
  except (KeyboardInterrupt, EOFError):
521
- print("⚠️ Project setup interrupted. Falling back to global guac team...")
522
- GUAC_GLOBAL_FLAG_FILE.touch()
546
+ print("Project setup interrupted. Falling back to global guac team...")
547
+ global_flag_file.touch()
548
+ os.environ["GUAC_USE_GLOBAL"] = "1"
523
549
  team_dir = ensure_global_guac_team()
524
550
  return {
525
551
  "language": lang, "package_root": team_dir, "plots_dir": plots_dir,
@@ -573,6 +599,7 @@ setup(name="{package_name}", version="0.0.1", description="{desc}", packages=fin
573
599
  "npc_team_dir": npc_team_dir, "config_dir": config_dir, "default_mode": default_mode_val,
574
600
  "project_description": project_description, "package_name": package_name
575
601
  }
602
+
576
603
  def setup_npc_team(npc_team_dir, lang, is_subteam=False):
577
604
 
578
605
  guac_npc = {
@@ -694,50 +721,6 @@ class FileAnalysisState(Base):
694
721
  variable_names = Column(Text)
695
722
  timestamp = Column(DateTime, default=func.now())
696
723
 
697
- def _capture_plot_state(session_id: str, db_path: str, npc_team_dir: Path):
698
- """Capture plot state if significant change"""
699
- if not plt.get_fignums():
700
- return
701
-
702
- engine = create_engine(f'sqlite:///{db_path}')
703
- Base.metadata.create_all(engine)
704
- Session = sessionmaker(bind=engine)
705
- session = Session()
706
-
707
-
708
- fig = plt.gcf()
709
- axes = fig.get_axes()
710
- data_points = sum(len(line.get_xdata()) for ax in axes for line in ax.get_lines())
711
-
712
-
713
- plot_hash = hashlib.md5(f"{len(axes)}{data_points}".encode()).hexdigest()
714
-
715
- last = session.query(PlotState).filter(PlotState.session_id == session_id).order_by(PlotState.timestamp.desc()).first()
716
- if last and last.plot_hash == plot_hash:
717
- session.close()
718
- return
719
-
720
-
721
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
722
- workspace_dirs = _get_workspace_dirs(npc_team_dir)
723
- plot_path = workspace_dirs["plots"] / f"state_{timestamp}.png"
724
- plt.savefig(plot_path, dpi=150, bbox_inches='tight')
725
-
726
-
727
- plot_state = PlotState(
728
- session_id=session_id,
729
- plot_hash=plot_hash,
730
- plot_description=f"Plot with {len(axes)} axes, {data_points} points",
731
- figure_path=str(plot_path),
732
- data_summary=f"{data_points} data points",
733
- change_significance=1.0 if not last else 0.5
734
- )
735
-
736
- session.add(plot_state)
737
- session.commit()
738
- session.close()
739
- print(f"📊 Plot state captured -> {plot_path.name}")
740
-
741
724
  def _capture_file_state(session_id: str, db_path: str, file_path: str, analysis_code: str, locals_dict: Dict):
742
725
  """Capture file analysis state"""
743
726
  engine = create_engine(f'sqlite:///{db_path}')
@@ -986,24 +969,23 @@ def _handle_file_drop(input_text: str, npc_team_dir: Path) -> Tuple[str, List[st
986
969
 
987
970
  return modified_input, processed_files, file_paths
988
971
 
989
-
990
972
  def _capture_plot_state(session_id: str, db_path: str, npc_team_dir: Path):
991
- """Capture plot state if significant change"""
992
973
  if not plt.get_fignums():
993
974
  return
994
975
 
995
976
  try:
977
+ workspace_dirs = _get_workspace_dirs(npc_team_dir)
978
+ workspace_dirs["plots"].mkdir(parents=True, exist_ok=True)
979
+
996
980
  engine = create_engine(f'sqlite:///{db_path}')
997
981
  Base.metadata.create_all(engine)
998
982
  Session = sessionmaker(bind=engine)
999
983
  session = Session()
1000
984
 
1001
-
1002
985
  fig = plt.gcf()
1003
986
  axes = fig.get_axes()
1004
987
  data_points = sum(len(line.get_xdata()) for ax in axes for line in ax.get_lines())
1005
988
 
1006
-
1007
989
  plot_hash = hashlib.md5(f"{len(axes)}{data_points}".encode()).hexdigest()
1008
990
 
1009
991
  last = session.query(PlotState).filter(PlotState.session_id == session_id).order_by(PlotState.timestamp.desc()).first()
@@ -1011,13 +993,10 @@ def _capture_plot_state(session_id: str, db_path: str, npc_team_dir: Path):
1011
993
  session.close()
1012
994
  return
1013
995
 
1014
-
1015
996
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
1016
- workspace_dirs = _get_workspace_dirs(npc_team_dir)
1017
997
  plot_path = workspace_dirs["plots"] / f"state_{timestamp}.png"
1018
998
  plt.savefig(plot_path, dpi=150, bbox_inches='tight')
1019
999
 
1020
-
1021
1000
  plot_state = PlotState(
1022
1001
  session_id=session_id,
1023
1002
  plot_hash=plot_hash,
@@ -1026,15 +1005,14 @@ def _capture_plot_state(session_id: str, db_path: str, npc_team_dir: Path):
1026
1005
  data_summary=f"{data_points} data points",
1027
1006
  change_significance=1.0 if not last else 0.5
1028
1007
  )
1029
-
1008
+
1030
1009
  session.add(plot_state)
1031
1010
  session.commit()
1032
1011
  session.close()
1033
- print(f"📊 Plot state captured -> {plot_path.name}")
1012
+ print(f"Plot state captured -> {plot_path.name}")
1034
1013
 
1035
1014
  except Exception as e:
1036
1015
  print(f"Error capturing plot state: {e}")
1037
-
1038
1016
  def _capture_file_state(session_id: str, db_path: str, file_path: str, analysis_code: str, locals_dict: Dict):
1039
1017
  """Capture file analysis state"""
1040
1018
  try:
@@ -1165,7 +1143,7 @@ def _get_guac_agent_emoji(failures: int, max_fail: int = 3) -> str:
1165
1143
  return "🥑❓"
1166
1144
 
1167
1145
 
1168
- GUAC_GLOBAL_FLAG_FILE = Path.home() / ".npcsh" / ".guac_use_global"
1146
+
1169
1147
 
1170
1148
 
1171
1149
  def _run_agentic_mode(command: str,
@@ -1211,8 +1189,9 @@ def _run_agentic_mode(command: str,
1211
1189
 
1212
1190
  DO NOT SIMPLY COPY A PREVIOUS ATTEMPT.
1213
1191
 
1214
- Your goal is to generate Python code that BUILDS ON EXISTING VARIABLES to accomplish this task: {current_command}, with this next step planned: `{next_step} `
1192
+ Your goal is to generate Python code that BUILDS ON EXISTING VARIABLES to respond to this task: USER TASK: "{current_command}", with this next step planned: `{next_step} `
1215
1193
 
1194
+ If there is no relevant code to build on or the user is simply asking a question, generate new code as needed to respond to their questions.
1216
1195
 
1217
1196
  You will notice in the local envs that there are functions for reading, editing, and loading files.
1218
1197
  You should use these to your advantage as they will help you to clearly understand the user's system best.
@@ -1250,21 +1229,26 @@ def _run_agentic_mode(command: str,
1250
1229
 
1251
1230
  Do not over- complicate the code.
1252
1231
 
1253
- Do not include any '__name__'=='__main__' block.
1232
+ DO NOT include any '__name__'=='__main__' block.
1254
1233
  """
1255
1234
 
1256
1235
  npc_model = state.npc.model if state.npc and state.npc.model else state.chat_model
1257
1236
  npc_provider = state.npc.provider if state.npc and state.npc.provider else state.chat_provider
1258
1237
 
1238
+ print(state.npc.model)
1239
+ print(state.chat_model)
1259
1240
  llm_response = get_llm_response(prompt,
1260
1241
  npc=state.npc,
1261
1242
  stream=True,
1262
1243
  messages=state.messages,
1263
1244
  thinking=False)
1264
1245
 
1246
+ print(llm_response.get('response'))
1247
+ print(npc_model, npc_provider)
1248
+
1265
1249
  generated_code = print_and_process_stream(llm_response.get('response'),
1266
1250
  npc_model,
1267
- npc_provider
1251
+ npc_provider,
1268
1252
  )
1269
1253
 
1270
1254
  state.messages.append({'role':'user', 'content':current_command })
@@ -1560,8 +1544,6 @@ def execute_guac_command(command: str, state: ShellState, locals_dict: Dict[str,
1560
1544
  def run_guac_repl(state: ShellState, project_name: str, package_root: Path, package_name: str):
1561
1545
  from npcsh.routes import router
1562
1546
 
1563
-
1564
-
1565
1547
  npc_team_dir = Path.cwd() / "npc_team"
1566
1548
  workspace_dirs = _get_workspace_dirs(npc_team_dir)
1567
1549
  _ensure_workspace_dirs(workspace_dirs)
@@ -1593,15 +1575,8 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1593
1575
 
1594
1576
  except Exception as e:
1595
1577
  print(f"Warning: Could not load package {package_name}: {e}", file=sys.stderr)
1596
-
1597
- from npcpy.data.load import load_file_contents
1598
1578
 
1599
1579
  def read_file(file_path, max_lines=10000, encoding='utf-8'):
1600
- """
1601
- Read and print file contents up to max_lines.
1602
- Uses npcpy.data.load for specialized file types, falls back to text reading.
1603
- Returns the content as a string for further processing.
1604
- """
1605
1580
  path = Path(file_path).expanduser().resolve()
1606
1581
 
1607
1582
  if not path.exists():
@@ -1613,7 +1588,6 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1613
1588
  return None
1614
1589
 
1615
1590
  try:
1616
-
1617
1591
  file_ext = path.suffix.upper().lstrip('.')
1618
1592
  if file_ext in ['PDF', 'DOCX', 'PPTX', 'HTML', 'HTM', 'CSV', 'XLS', 'XLSX', 'JSON']:
1619
1593
  chunks = load_file_contents(str(path), chunk_size=10000)
@@ -1635,7 +1609,6 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1635
1609
  print(f"End of {path.name}")
1636
1610
  return content
1637
1611
 
1638
-
1639
1612
  with open(path, 'r', encoding=encoding) as f:
1640
1613
  lines = []
1641
1614
  for i, line in enumerate(f, 1):
@@ -1674,19 +1647,10 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1674
1647
  return None
1675
1648
 
1676
1649
  def edit_file(file_path, content=None, line_number=None, new_line=None, insert_at=None, append=False, backup=True):
1677
- """
1678
- Edit file contents in various ways:
1679
- - edit_file(path, content="new content") - replace entire file
1680
- - edit_file(path, line_number=5, new_line="new text") - replace specific line
1681
- - edit_file(path, insert_at=5, new_line="inserted text") - insert at line
1682
- - edit_file(path, append=True, content="appended") - append to file
1683
- """
1684
1650
  path = Path(file_path).expanduser().resolve()
1685
1651
 
1686
-
1687
1652
  path.parent.mkdir(parents=True, exist_ok=True)
1688
1653
 
1689
-
1690
1654
  if backup and path.exists():
1691
1655
  backup_path = path.with_suffix(path.suffix + '.backup')
1692
1656
  import shutil
@@ -1694,7 +1658,6 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1694
1658
  print(f"Backup saved: {backup_path.name}")
1695
1659
 
1696
1660
  try:
1697
-
1698
1661
  existing_lines = []
1699
1662
  if path.exists():
1700
1663
  with open(path, 'r', encoding='utf-8') as f:
@@ -1754,10 +1717,6 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1754
1717
  return False
1755
1718
 
1756
1719
  def load_file(file_path):
1757
- """
1758
- Simple wrapper around npcpy's load_file_contents for direct data loading.
1759
- Returns the loaded data in appropriate format.
1760
- """
1761
1720
  path = Path(file_path).expanduser().resolve()
1762
1721
 
1763
1722
  if not path.exists():
@@ -1782,7 +1741,6 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1782
1741
  'load_file':load_file,
1783
1742
  }
1784
1743
 
1785
-
1786
1744
  locals_dict.update(core_imports)
1787
1745
  locals_dict.update({f"guac_{k}": v for k, v in workspace_dirs.items()})
1788
1746
 
@@ -1801,10 +1759,20 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1801
1759
 
1802
1760
  while True:
1803
1761
  try:
1762
+ try:
1763
+ while True:
1764
+ operation, code, exec_state, exec_locals = plot_queue.get_nowait()
1765
+ if operation == 'execute_code':
1766
+ print("\n[guac] Processing queued file drop...")
1767
+ exec_state, exec_output = execute_python_code(code, exec_state, exec_locals)
1768
+ if exec_output:
1769
+ print(exec_output)
1770
+ except queue.Empty:
1771
+ pass
1772
+
1804
1773
  state.current_path = os.getcwd()
1805
1774
 
1806
1775
  display_model = state.chat_model
1807
-
1808
1776
  if isinstance(state.npc, NPC) and state.npc.model:
1809
1777
  display_model = state.npc.model
1810
1778
 
@@ -1865,7 +1833,6 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1865
1833
 
1866
1834
 
1867
1835
 
1868
-
1869
1836
  def enter_guac_mode(npc=None,
1870
1837
  team=None,
1871
1838
  config_dir=None,
@@ -1897,25 +1864,6 @@ def enter_guac_mode(npc=None,
1897
1864
 
1898
1865
  command_history, default_team, default_npc = setup_shell()
1899
1866
 
1900
-
1901
- if npc is None and default_npc is None:
1902
-
1903
- guac_npc_path = Path(npc_team_dir) / "guac.npc"
1904
- if guac_npc_path.exists():
1905
- npc = NPC(file=str(guac_npc_path), db_conn=command_history.engine)
1906
-
1907
- team_ctx_path = Path(npc_team_dir) / "team.ctx"
1908
- if team_ctx_path.exists():
1909
- with open(team_ctx_path, "r") as f:
1910
- team_ctx = yaml.safe_load(f) or {}
1911
- team = Team(team_path=str(npc_team_dir), forenpc=npc, jinxs={})
1912
- team.name = team_ctx.get("team_name", "guac_global_team")
1913
- else:
1914
- raise RuntimeError(f"No NPC loaded and {guac_npc_path} not found!")
1915
- elif default_npc and npc is None:
1916
-
1917
- npc = default_npc
1918
-
1919
1867
 
1920
1868
  state = ShellState(
1921
1869
  conversation_id=start_new_conversation(),
@@ -1930,6 +1878,45 @@ def enter_guac_mode(npc=None,
1930
1878
 
1931
1879
  state.command_history = command_history
1932
1880
 
1881
+ if npc is None and default_npc is None:
1882
+ guac_npc_path = Path(npc_team_dir) / "guac.npc"
1883
+ if guac_npc_path.exists():
1884
+ npc = NPC(file=str(guac_npc_path),
1885
+ db_conn=command_history.engine)
1886
+ print(guac_npc_path, npc)
1887
+
1888
+ team_ctx_path = get_team_ctx_path(str(npc_team_dir))
1889
+ team_ctx = {}
1890
+ if team_ctx_path and Path(team_ctx_path).exists():
1891
+ with open(team_ctx_path, "r") as f:
1892
+ team_ctx = yaml.safe_load(f) or {}
1893
+ print(team_ctx, team_ctx_path)
1894
+ team = Team(team_path=str(npc_team_dir),
1895
+ forenpc=npc,
1896
+ jinxs={})
1897
+ team.name = team_ctx.get("team_name", "guac_global_team")
1898
+ team.team_ctx = team_ctx
1899
+ print(team)
1900
+ if npc.model is None:
1901
+ npc.model = team_ctx.get("model", state.chat_model)
1902
+ if npc.provider is None:
1903
+ npc.provider = team_ctx.get("provider", state.chat_provider)
1904
+
1905
+ for npc_name, npc_obj in team.npcs.items():
1906
+ if not npc_obj.model:
1907
+ npc_obj.model = team_ctx.get("model", state.chat_model)
1908
+ if not npc_obj.provider:
1909
+ npc_obj.provider = team_ctx.get("provider", state.chat_provider)
1910
+ else:
1911
+ raise RuntimeError(f"No NPC loaded and {guac_npc_path} not found!")
1912
+ elif default_npc and npc is None:
1913
+ npc = default_npc
1914
+ state.npc = npc or default_npc
1915
+ state.team = team or default_team
1916
+
1917
+ state.plots_dir = setup_result.get("plots_dir")
1918
+ state.config_dir = setup_result.get("config_dir")
1919
+
1933
1920
  try:
1934
1921
  readline.read_history_file(READLINE_HISTORY_FILE)
1935
1922
  readline.set_history_length(1000)
@@ -213,13 +213,19 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
213
213
  )
214
214
 
215
215
  except KeyboardInterrupt:
216
- if is_windows:
217
- print("^C")
218
- continue
219
- else:
216
+ print("^C")
217
+ if input("\nExit? (y/n): ").lower().startswith('y'):
220
218
  exit_shell(state)
219
+ continue
220
+
221
221
  except EOFError:
222
222
  exit_shell(state)
223
+ except Exception as e:
224
+ if is_windows and "EOF" in str(e).lower():
225
+ print("\nHint: On Windows, use Ctrl+Z then Enter for EOF, or type 'exit'")
226
+ continue
227
+ raise # Re-raise if it's not the expected case
228
+
223
229
  def main() -> None:
224
230
  parser = argparse.ArgumentParser(description="npcsh - An NPC-powered shell.")
225
231
  parser.add_argument(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.27
3
+ Version: 1.0.29
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -78,7 +78,7 @@ extra_files = package_files("npcsh/npc_team/")
78
78
 
79
79
  setup(
80
80
  name="npcsh",
81
- version="1.0.27",
81
+ version="1.0.29",
82
82
  packages=find_packages(exclude=["tests*"]),
83
83
  install_requires=base_requirements, # Only install base requirements by default
84
84
  extras_require={
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes