npcsh 1.0.28__tar.gz → 1.0.29__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. {npcsh-1.0.28 → npcsh-1.0.29}/PKG-INFO +1 -1
  2. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/_state.py +17 -14
  3. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/corca.py +115 -82
  4. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/guac.py +127 -140
  5. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh.egg-info/PKG-INFO +1 -1
  6. {npcsh-1.0.28 → npcsh-1.0.29}/setup.py +1 -1
  7. {npcsh-1.0.28 → npcsh-1.0.29}/LICENSE +0 -0
  8. {npcsh-1.0.28 → npcsh-1.0.29}/README.md +0 -0
  9. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/__init__.py +0 -0
  10. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/alicanto.py +0 -0
  11. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/mcp_helpers.py +0 -0
  12. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/mcp_server.py +0 -0
  13. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc.py +0 -0
  14. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/alicanto.npc +0 -0
  15. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/alicanto.png +0 -0
  16. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/corca.npc +0 -0
  17. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/corca.png +0 -0
  18. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/foreman.npc +0 -0
  19. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/frederic.npc +0 -0
  20. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/frederic4.png +0 -0
  21. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/guac.png +0 -0
  22. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/jinxs/bash_executer.jinx +0 -0
  23. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/jinxs/edit_file.jinx +0 -0
  24. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/jinxs/image_generation.jinx +0 -0
  25. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/jinxs/internet_search.jinx +0 -0
  26. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/jinxs/python_executor.jinx +0 -0
  27. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/jinxs/screen_cap.jinx +0 -0
  28. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/kadiefa.npc +0 -0
  29. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/kadiefa.png +0 -0
  30. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/npcsh.ctx +0 -0
  31. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/npcsh_sibiji.png +0 -0
  32. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/plonk.npc +0 -0
  33. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/plonk.png +0 -0
  34. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/plonkjr.npc +0 -0
  35. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/plonkjr.png +0 -0
  36. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/sibiji.npc +0 -0
  37. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/sibiji.png +0 -0
  38. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/spool.png +0 -0
  39. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npc_team/yap.png +0 -0
  40. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/npcsh.py +0 -0
  41. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/plonk.py +0 -0
  42. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/pti.py +0 -0
  43. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/routes.py +0 -0
  44. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/spool.py +0 -0
  45. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/wander.py +0 -0
  46. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh/yap.py +0 -0
  47. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh.egg-info/SOURCES.txt +0 -0
  48. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh.egg-info/dependency_links.txt +0 -0
  49. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh.egg-info/entry_points.txt +0 -0
  50. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh.egg-info/requires.txt +0 -0
  51. {npcsh-1.0.28 → npcsh-1.0.29}/npcsh.egg-info/top_level.txt +0 -0
  52. {npcsh-1.0.28 → npcsh-1.0.29}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.28
3
+ Version: 1.0.29
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -436,6 +436,12 @@ def get_shell_config_file() -> str:
436
436
  return os.path.expanduser("~/.bashrc")
437
437
 
438
438
 
439
+ def get_team_ctx_path(team_path: str) -> Optional[str]:
440
+ """Find the first .ctx file in the team directory"""
441
+ team_dir = Path(team_path)
442
+ ctx_files = list(team_dir.glob("*.ctx"))
443
+ return str(ctx_files[0]) if ctx_files else None
444
+
439
445
 
440
446
  def add_npcshrc_to_shell_config() -> None:
441
447
  """
@@ -2384,17 +2390,16 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
2384
2390
 
2385
2391
 
2386
2392
  team_ctx = {}
2387
- for filename in os.listdir(team_dir):
2388
- if filename.endswith(".ctx"):
2389
- try:
2390
- with open(os.path.join(team_dir, filename), "r") as f:
2391
- team_ctx = yaml.safe_load(f) or {}
2392
- break
2393
- except Exception as e:
2394
- print(f"Warning: Could not load context file {filename}: {e}")
2395
-
2393
+ team_ctx_path = get_team_ctx_path(team_dir)
2394
+ if team_ctx_path:
2395
+ try:
2396
+ with open(team_ctx_path, "r") as f:
2397
+ team_ctx = yaml.safe_load(f) or {}
2398
+ except Exception as e:
2399
+ print(f"Warning: Could not load context file {os.path.basename(team_ctx_path)}: {e}")
2396
2400
  forenpc_name = team_ctx.get("forenpc", default_forenpc_name)
2397
2401
 
2402
+ print('forenpc_name:', forenpc_name)
2398
2403
 
2399
2404
  if team_ctx.get("use_global_jinxs", False):
2400
2405
  jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
@@ -2407,11 +2412,8 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
2407
2412
  forenpc_obj = None
2408
2413
  forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
2409
2414
 
2415
+ print('forenpc_path:', forenpc_path)
2410
2416
 
2411
-
2412
-
2413
-
2414
-
2415
2417
  if os.path.exists(forenpc_path):
2416
2418
  forenpc_obj = NPC(file = forenpc_path,
2417
2419
  jinxs=jinxs_list,
@@ -2558,7 +2560,8 @@ def process_result(
2558
2560
  characterization = summary.get('output')
2559
2561
 
2560
2562
  if characterization and result_state.team:
2561
- team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
2563
+
2564
+ team_ctx_path = os.path.join(result_state.team.team_path, ".ctx")
2562
2565
  ctx_data = {}
2563
2566
  if os.path.exists(team_ctx_path):
2564
2567
  with open(team_ctx_path, 'r') as f:
@@ -33,6 +33,7 @@ from npcsh._state import (
33
33
  should_skip_kg_processing,
34
34
  NPCSH_CHAT_PROVIDER,
35
35
  NPCSH_CHAT_MODEL,
36
+ get_team_ctx_path
36
37
  )
37
38
  import yaml
38
39
  from pathlib import Path
@@ -179,23 +180,21 @@ def process_mcp_stream(stream_response, active_npc):
179
180
  tool_calls = []
180
181
 
181
182
  interrupted = False
182
-
183
183
  sys.stdout.write('\033[s')
184
184
  sys.stdout.flush()
185
+
185
186
  try:
186
187
  for chunk in stream_response:
187
188
  if hasattr(active_npc, 'provider') and active_npc.provider == "ollama" and 'gpt-oss' not in active_npc.model:
188
189
  if hasattr(chunk, 'message') and hasattr(chunk.message, 'tool_calls') and chunk.message.tool_calls:
189
190
  for tool_call in chunk.message.tool_calls:
190
- tool_call_data = {
191
- 'id': getattr(tool_call, 'id', ''),
191
+ tool_call_data = {'id': getattr(tool_call, 'id', ''),
192
192
  'type': 'function',
193
193
  'function': {
194
194
  'name': getattr(tool_call.function, 'name', '') if hasattr(tool_call, 'function') else '',
195
195
  'arguments': getattr(tool_call.function, 'arguments', {}) if hasattr(tool_call, 'function') else {}
196
196
  }
197
197
  }
198
-
199
198
  if isinstance(tool_call_data['function']['arguments'], str):
200
199
  try:
201
200
  tool_call_data['function']['arguments'] = json.loads(tool_call_data['function']['arguments'])
@@ -203,7 +202,6 @@ def process_mcp_stream(stream_response, active_npc):
203
202
  tool_call_data['function']['arguments'] = {'raw': tool_call_data['function']['arguments']}
204
203
 
205
204
  tool_calls.append(tool_call_data)
206
-
207
205
  if hasattr(chunk, 'message') and hasattr(chunk.message, 'content') and chunk.message.content:
208
206
  collected_content += chunk.message.content
209
207
  print(chunk.message.content, end='', flush=True)
@@ -230,7 +228,6 @@ def process_mcp_stream(stream_response, active_npc):
230
228
 
231
229
  if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
232
230
  tool_calls[idx]['id'] = tool_call_delta.id
233
-
234
231
  if hasattr(tool_call_delta, 'function'):
235
232
  if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
236
233
  tool_calls[idx]['function']['name'] = tool_call_delta.function.name
@@ -239,66 +236,63 @@ def process_mcp_stream(stream_response, active_npc):
239
236
  tool_calls[idx]['function']['arguments'] += tool_call_delta.function.arguments
240
237
  except KeyboardInterrupt:
241
238
  interrupted = True
242
- print('\n⚠️ Stream interrupted by user')
239
+ print('⚠️ Stream interrupted by user')
243
240
 
244
241
  sys.stdout.write('\033[u')
245
242
  sys.stdout.write('\033[J')
246
243
  sys.stdout.flush()
247
244
 
245
+ # Use the render_markdown function for proper markdown rendering
248
246
  render_markdown(collected_content)
249
- print('\n')
250
247
  return collected_content, tool_calls
251
-
252
248
  def execute_command_corca(command: str, state: ShellState, command_history, selected_mcp_tools_names: Optional[List[str]] = None) -> Tuple[ShellState, Any]:
253
- mcp_tools_for_llm = []
254
-
255
- if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
256
- all_available_mcp_tools = state.mcp_client.available_tools_llm
257
-
258
- if selected_mcp_tools_names and len(selected_mcp_tools_names) > 0:
259
- mcp_tools_for_llm = [
260
- tool_def for tool_def in all_available_mcp_tools
261
- if tool_def['function']['name'] in selected_mcp_tools_names
262
- ]
263
- if not mcp_tools_for_llm:
264
- cprint("Warning: No selected MCP tools found or matched. Corca will proceed without tools.", "yellow", file=sys.stderr)
265
- else:
266
- mcp_tools_for_llm = all_available_mcp_tools
267
- else:
268
- cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
269
-
270
- active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
271
-
272
- response_dict = get_llm_response(
273
- prompt=command,
274
- npc=state.npc,
275
- messages=state.messages,
276
- tools=mcp_tools_for_llm,
277
- auto_process_tool_calls=False,
278
- stream=state.stream_output
279
- )
280
-
281
- stream_response = response_dict.get('response')
282
- messages = response_dict.get('messages', state.messages)
249
+ mcp_tools_for_llm = []
250
+
251
+ if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
252
+ all_available_mcp_tools = state.mcp_client.available_tools_llm
253
+
254
+ if selected_mcp_tools_names and len(selected_mcp_tools_names) > 0:
255
+ mcp_tools_for_llm = [
256
+ tool_def for tool_def in all_available_mcp_tools
257
+ if tool_def['function']['name'] in selected_mcp_tools_names
258
+ ]
259
+ if not mcp_tools_for_llm:
260
+ cprint("Warning: No selected MCP tools found or matched. Corca will proceed without tools.", "yellow", file=sys.stderr)
261
+ else:
262
+ mcp_tools_for_llm = all_available_mcp_tools
263
+ else:
264
+ cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
265
+
266
+ active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
267
+
268
+ response_dict = get_llm_response(
269
+ prompt=command,
270
+ npc=state.npc,
271
+ messages=state.messages,
272
+ tools=mcp_tools_for_llm,
273
+ auto_process_tool_calls=False,
274
+ stream=state.stream_output,
275
+ team=state.team
276
+ )
283
277
 
284
- print("DEBUG: Processing stream response...")
285
- collected_content, tool_calls = process_mcp_stream(stream_response, active_npc)
278
+ stream_response = response_dict.get('response')
279
+ messages = response_dict.get('messages', state.messages)
280
+
281
+ collected_content, tool_calls = process_mcp_stream(stream_response, active_npc)
286
282
 
287
- print(f"\nDEBUG: Final collected_content: {collected_content}")
288
- print(f"DEBUG: Final tool_calls: {tool_calls}")
289
-
290
- state.messages = messages
291
- if collected_content or tool_calls:
292
- assistant_message = {"role": "assistant", "content": collected_content}
293
- if tool_calls:
294
- assistant_message["tool_calls"] = tool_calls
295
- state.messages.append(assistant_message)
296
-
297
- return state, {
298
- "output": collected_content,
299
- "tool_calls": tool_calls,
300
- "messages": state.messages
301
- }
283
+
284
+ state.messages = messages
285
+ if collected_content or tool_calls:
286
+ assistant_message = {"role": "assistant", "content": collected_content}
287
+ if tool_calls:
288
+ assistant_message["tool_calls"] = tool_calls
289
+ state.messages.append(assistant_message)
290
+
291
+ return state, {
292
+ "output": collected_content,
293
+ "tool_calls": tool_calls,
294
+ "messages": state.messages
295
+ }
302
296
 
303
297
 
304
298
  def _resolve_and_copy_mcp_server_path(
@@ -306,7 +300,7 @@ def _resolve_and_copy_mcp_server_path(
306
300
  current_path: Optional[str],
307
301
  team_ctx_mcp_servers: Optional[List[Dict[str, str]]],
308
302
  interactive: bool = False,
309
- auto_copy_bypass: bool = False # <-- New parameter
303
+ auto_copy_bypass: bool = False
310
304
  ) -> Optional[str]:
311
305
  default_mcp_server_name = "mcp_server.py"
312
306
  npcsh_default_template_path = Path(__file__).parent / default_mcp_server_name
@@ -318,13 +312,12 @@ def _resolve_and_copy_mcp_server_path(
318
312
  return None
319
313
 
320
314
  if not destination_file.exists():
321
- # Check auto_copy_bypass first
322
- if auto_copy_bypass or not interactive: # If bypass is true OR not interactive, auto-copy
315
+ if auto_copy_bypass or not interactive:
323
316
  destination_dir.mkdir(parents=True, exist_ok=True)
324
317
  shutil.copy(npcsh_default_template_path, destination_file)
325
318
  print(colored(f"Automatically copied default {default_mcp_server_name} to {destination_file}", "green"))
326
319
  return destination_file
327
- else: # Only ask if interactive and no bypass
320
+ else:
328
321
  choice = input(colored(f"No {default_mcp_server_name} found in {description}. Copy default template to {destination_file}? (y/N): ", "yellow")).strip().lower()
329
322
  if choice == 'y':
330
323
  destination_dir.mkdir(parents=True, exist_ok=True)
@@ -417,15 +410,31 @@ def create_corca_state_and_mcp_client(conversation_id, command_history, npc=None
417
410
  )
418
411
  state.command_history = command_history
419
412
 
420
- # Read NPCSH_CORCA_AUTO_COPY_MCP_SERVER from environment for non-interactive calls
413
+ team_ctx_mcp_servers = None
414
+ if team and hasattr(team, 'team_path'):
415
+ team_ctx = _load_team_context(team.team_path)
416
+ team_ctx_mcp_servers = team_ctx.get('mcp_servers', [])
417
+
418
+ if npc and isinstance(npc, NPC):
419
+ if not npc.model and team_ctx.get('model'):
420
+ npc.model = team_ctx['model']
421
+ if not npc.provider and team_ctx.get('provider'):
422
+ npc.provider = team_ctx['provider']
423
+
424
+ if not state.chat_model and team_ctx.get('model'):
425
+ state.chat_model = team_ctx['model']
426
+ if not state.chat_provider and team_ctx.get('provider'):
427
+ state.chat_provider = team_ctx['provider']
428
+
421
429
  auto_copy_bypass = os.getenv("NPCSH_CORCA_AUTO_COPY_MCP_SERVER", "false").lower() == "true"
422
430
 
423
431
  resolved_server_path = _resolve_and_copy_mcp_server_path(
424
432
  explicit_path=mcp_server_path_from_request,
425
433
  current_path=current_path,
426
- team_ctx_mcp_servers=team.team_ctx.get('mcp_servers', []) if team and hasattr(team, 'team_ctx') else None,
427
- interactive=False, # Always non-interactive for Flask API calls
428
- auto_copy_bypass=auto_copy_bypass # Pass env var setting
434
+ team_ctx_mcp_servers=team_ctx_mcp_servers,
435
+ interactive=False,
436
+ auto_copy_bypass=auto_copy_bypass,
437
+ force_global=False
429
438
  )
430
439
 
431
440
  state.mcp_client = None
@@ -513,9 +522,9 @@ def process_corca_result(
513
522
 
514
523
  tool_content = ""
515
524
  if hasattr(mcp_result, 'content') and mcp_result.content:
516
- print(f"DEBUG: content type: {type(mcp_result.content)}")
525
+
517
526
  for i, content_item in enumerate(mcp_result.content):
518
- print(f"DEBUG: content_item[{i}]: {content_item} (type: {type(content_item)})")
527
+
519
528
  if hasattr(content_item, 'text'):
520
529
  tool_content += content_item.text
521
530
  else:
@@ -523,8 +532,7 @@ def process_corca_result(
523
532
  else:
524
533
  tool_content = str(mcp_result)
525
534
 
526
- print(f"DEBUG: Extracted content length: {len(tool_content)}")
527
- print(f"DEBUG: Extracted content preview: {tool_content[:200]}")
535
+
528
536
 
529
537
  tool_responses.append({
530
538
  "role": "tool",
@@ -703,11 +711,11 @@ def process_corca_result(
703
711
  characterization = summary.get('output')
704
712
 
705
713
  if characterization and result_state.team:
706
- team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
707
- ctx_data = {}
708
- if os.path.exists(team_ctx_path):
709
- with open(team_ctx_path, 'r') as f:
710
- ctx_data = yaml.safe_load(f) or {}
714
+ team_ctx_path = get_team_ctx_path(result_state.team.team_path)
715
+ if not team_ctx_path:
716
+ team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
717
+
718
+ ctx_data = _load_team_context(result_state.team.team_path)
711
719
  current_context = ctx_data.get('context', '')
712
720
 
713
721
  prompt = f"""Based on this characterization: {characterization},
@@ -722,28 +730,39 @@ def process_corca_result(
722
730
  "suggestion": "Your sentence.
723
731
  }
724
732
  """
725
- response = get_llm_response(prompt, npc=active_npc, format="json")
733
+ response = get_llm_response(prompt,
734
+ npc=active_npc,
735
+ format="json",
736
+ team=result_state.team)
726
737
  suggestion = response.get("response", {}).get("suggestion")
727
738
 
728
739
  if suggestion:
729
740
  new_context = (current_context + " " + suggestion).strip()
730
741
  print(colored(f"{result_state.npc.name} suggests updating team context:", "yellow"))
731
742
  print(f" - OLD: {current_context}\n + NEW: {new_context}")
732
- if input("Apply? [y/N]: ").strip().lower() == 'y':
743
+
744
+ choice = input("Apply? [y/N/e(dit)]: ").strip().lower()
745
+
746
+ if choice == 'y':
733
747
  ctx_data['context'] = new_context
734
748
  with open(team_ctx_path, 'w') as f:
735
749
  yaml.dump(ctx_data, f)
736
750
  print(colored("Team context updated.", "green"))
751
+ elif choice == 'e':
752
+ edited_context = input(f"Edit context [{new_context}]: ").strip()
753
+ if edited_context:
754
+ ctx_data['context'] = edited_context
755
+ else:
756
+ ctx_data['context'] = new_context
757
+ with open(team_ctx_path, 'w') as f:
758
+ yaml.dump(ctx_data, f)
759
+ print(colored("Team context updated with edits.", "green"))
737
760
  else:
738
- print("Suggestion declined.")
761
+ print("Suggestion declined.")
739
762
  except Exception as e:
740
763
  import traceback
741
764
  print(colored(f"Could not generate team suggestions: {e}", "yellow"))
742
765
  traceback.print_exc()
743
-
744
-
745
-
746
-
747
766
 
748
767
  def _read_npcsh_global_env() -> Dict[str, str]:
749
768
  global_env_file = Path(".npcsh_global")
@@ -760,6 +779,20 @@ def _read_npcsh_global_env() -> Dict[str, str]:
760
779
  print(f"Warning: Could not read .npcsh_global: {e}")
761
780
  return env_vars
762
781
 
782
+ def _load_team_context(team_path: str) -> Dict[str, Any]:
783
+ """Load team context from any .ctx file in the team directory"""
784
+ ctx_path = get_team_ctx_path(team_path)
785
+ if not ctx_path or not os.path.exists(ctx_path):
786
+ return {}
787
+
788
+ try:
789
+ with open(ctx_path, 'r') as f:
790
+ return yaml.safe_load(f) or {}
791
+ except Exception as e:
792
+ print(f"Warning: Could not load team context from {ctx_path}: {e}")
793
+ return {}
794
+
795
+
763
796
  def _write_to_npcsh_global(key: str, value: str) -> None:
764
797
  global_env_file = Path(".npcsh_global")
765
798
  env_vars = _read_npcsh_global_env()
@@ -772,6 +805,7 @@ def _write_to_npcsh_global(key: str, value: str) -> None:
772
805
  except Exception as e:
773
806
  print(f"Warning: Could not write to .npcsh_global: {e}")
774
807
 
808
+
775
809
  def _resolve_and_copy_mcp_server_path(
776
810
  explicit_path: Optional[str],
777
811
  current_path: Optional[str],
@@ -859,7 +893,6 @@ def _resolve_and_copy_mcp_server_path(
859
893
 
860
894
  cprint("No MCP server script found in any expected location.", "yellow")
861
895
  return None
862
-
863
896
  def create_corca_state_and_mcp_client(conversation_id, command_history, npc=None, team=None,
864
897
  current_path=None, mcp_server_path_from_request: Optional[str] = None):
865
898
  from npcsh._state import ShellState
@@ -1018,7 +1051,7 @@ def main():
1018
1051
  elif os.path.exists(global_corca_path):
1019
1052
  default_npc = NPC(file=global_corca_path,
1020
1053
  db_conn=command_history.engine)
1021
- print('Team Default: ', team.provider, team.model)
1054
+
1022
1055
  if default_npc.model is None:
1023
1056
  if team.model is not None:
1024
1057
  default_npc.model = team.model
@@ -1,4 +1,3 @@
1
- from chroptiks.plotting_utils import *
2
1
  from datetime import datetime
3
2
  import json
4
3
  import numpy as np
@@ -7,12 +6,24 @@ import pandas as pd
7
6
  import sys
8
7
  import argparse
9
8
  import importlib.metadata
10
- import matplotlib.pyplot as plt
9
+ import matplotlib
10
+ import platform
11
+ import queue
12
+ plot_queue = queue.Queue()
13
+
14
+ if platform.system() == 'Darwin':
15
+ try:
16
+ matplotlib.use('TkAgg')
17
+ except ImportError:
18
+ matplotlib.use('Agg')
19
+ else:
20
+ matplotlib.use('TkAgg')
21
+
22
+ import matplotlib.pyplot as plt
23
+ from chroptiks.plotting_utils import *
11
24
 
12
25
  import logging
13
- plt.ioff()
14
26
  import shlex
15
- import platform
16
27
  import yaml
17
28
  import re
18
29
  from pathlib import Path
@@ -30,7 +41,7 @@ from npcpy.memory.command_history import CommandHistory, start_new_conversation
30
41
  from npcpy.npc_compiler import Team, NPC
31
42
  from npcpy.llm_funcs import get_llm_response
32
43
  from npcpy.npc_sysenv import render_markdown,print_and_process_stream
33
-
44
+ from npcpy.data.load import load_file_contents
34
45
 
35
46
  from npcsh._state import (
36
47
  ShellState,
@@ -40,7 +51,8 @@ from npcsh._state import (
40
51
  readline_safe_prompt,
41
52
  setup_shell,
42
53
  get_multiline_input,
43
- orange
54
+ orange,
55
+ get_team_ctx_path,
44
56
  )
45
57
  import threading
46
58
  import time
@@ -97,9 +109,6 @@ def _clear_readline_buffer():
97
109
  return False
98
110
 
99
111
  def _file_drop_monitor(npc_team_dir: Path, state: ShellState, locals_dict: Dict[str, Any], poll_interval: float = 0.2):
100
- """
101
- Background thread: poll readline.get_line_buffer() and process file drops immediately.
102
- """
103
112
  processed_bufs = set()
104
113
  stop_event = _guac_monitor_stop_event
105
114
  while stop_event is None or not stop_event.is_set():
@@ -113,48 +122,35 @@ def _file_drop_monitor(npc_team_dir: Path, state: ShellState, locals_dict: Dict[
113
122
  time.sleep(poll_interval)
114
123
  continue
115
124
 
116
-
117
125
  candidate = buf.strip()
118
-
119
126
  if (candidate.startswith("'") and candidate.endswith("'")) or (candidate.startswith('"') and candidate.endswith('"')):
120
127
  inner = candidate[1:-1]
121
128
  else:
122
129
  inner = candidate
123
130
 
124
-
125
131
  if " " not in inner and Path(inner.replace('~', str(Path.home()))).expanduser().exists() and Path(inner.replace('~', str(Path.home()))).expanduser().is_file():
126
-
127
132
  if buf in processed_bufs:
128
133
  time.sleep(poll_interval)
129
134
  continue
130
135
  processed_bufs.add(buf)
131
136
 
132
-
133
137
  try:
134
-
135
-
136
138
  modified_input, processed_files = _handle_file_drop(buf, npc_team_dir)
137
139
  if processed_files:
138
140
  target_path = processed_files[0]
139
-
140
141
  loading_code = _generate_file_analysis_code(inner, target_path)
141
-
142
- print("\n[guac] Detected file drop — processing automatically...")
143
-
144
- _state, exec_output = execute_python_code(loading_code, state, locals_dict)
145
-
146
- if exec_output:
147
- print(exec_output)
148
-
142
+
143
+ plot_queue.put(('execute_code', loading_code, state, locals_dict))
144
+ print("\n[guac] Detected file drop — queued for processing...")
149
145
  _clear_readline_buffer()
150
146
  except Exception as e:
151
147
  print(f"[guac][ERROR] file drop processing failed: {e}")
152
148
  except Exception:
153
-
154
149
  pass
155
150
  time.sleep(poll_interval)
156
151
 
157
152
 
153
+
158
154
  def is_python_code(text: str) -> bool:
159
155
  text = text.strip()
160
156
  if not text:
@@ -476,13 +472,43 @@ def ensure_global_guac_team():
476
472
  print(f"✅ Created global guac team.ctx at {ctx_path}")
477
473
 
478
474
  return team_dir
479
- def setup_guac_mode(config_dir=None, plots_dir=None, npc_team_dir=None,
480
- lang='python', default_mode_choice=None):
481
- base_dir = Path.cwd()
475
+
476
+
477
+ def setup_guac_mode(config_dir=None,
478
+ plots_dir=None,
479
+ npc_team_dir=None,
480
+ lang='python',
481
+ default_mode_choice=None):
482
+ base_dir = Path.cwd()
482
483
 
483
-
484
- if GUAC_GLOBAL_FLAG_FILE.exists():
485
- print("💡 Using global Guac team as default (previously set).")
484
+ local_npc_team = base_dir / "npc_team"
485
+ if local_npc_team.exists():
486
+ npc_team_dir = local_npc_team
487
+ workspace_dirs = _get_workspace_dirs(npc_team_dir)
488
+ _ensure_workspace_dirs(workspace_dirs)
489
+
490
+ team_ctx_path = npc_team_dir / "team.ctx"
491
+ existing_ctx = {}
492
+ if team_ctx_path.exists():
493
+ try:
494
+ with open(team_ctx_path, "r") as f:
495
+ existing_ctx = yaml.safe_load(f) or {}
496
+ except Exception as e:
497
+ print(f"Warning: Could not read team.ctx: {e}")
498
+
499
+ package_root = existing_ctx.get("GUAC_PACKAGE_ROOT", str(base_dir))
500
+ package_name = existing_ctx.get("GUAC_PACKAGE_NAME", "project")
501
+ project_description = existing_ctx.get("GUAC_PROJECT_DESCRIPTION", "Local guac team")
502
+
503
+ return {
504
+ "language": lang, "package_root": Path(package_root), "plots_dir": plots_dir,
505
+ "npc_team_dir": npc_team_dir, "config_dir": config_dir, "default_mode": default_mode_choice or "agent",
506
+ "project_description": project_description, "package_name": package_name
507
+ }
508
+
509
+ global_flag_file = base_dir / ".npcsh_global"
510
+ if global_flag_file.exists() or os.environ.get("GUAC_USE_GLOBAL") == "1":
511
+ print("Using global Guac team")
486
512
  team_dir = ensure_global_guac_team()
487
513
  return {
488
514
  "language": lang, "package_root": team_dir, "plots_dir": plots_dir,
@@ -490,7 +516,6 @@ def setup_guac_mode(config_dir=None, plots_dir=None, npc_team_dir=None,
490
516
  "project_description": "Global guac team for analysis.", "package_name": "guac"
491
517
  }
492
518
 
493
-
494
519
  if npc_team_dir is None:
495
520
  npc_team_dir = base_dir / "npc_team"
496
521
  else:
@@ -518,8 +543,9 @@ def setup_guac_mode(config_dir=None, plots_dir=None, npc_team_dir=None,
518
543
  response = input("Enter package name (Enter for 'project'): ").strip()
519
544
  package_name = response if response else "project"
520
545
  except (KeyboardInterrupt, EOFError):
521
- print("⚠️ Project setup interrupted. Falling back to global guac team...")
522
- GUAC_GLOBAL_FLAG_FILE.touch()
546
+ print("Project setup interrupted. Falling back to global guac team...")
547
+ global_flag_file.touch()
548
+ os.environ["GUAC_USE_GLOBAL"] = "1"
523
549
  team_dir = ensure_global_guac_team()
524
550
  return {
525
551
  "language": lang, "package_root": team_dir, "plots_dir": plots_dir,
@@ -573,6 +599,7 @@ setup(name="{package_name}", version="0.0.1", description="{desc}", packages=fin
573
599
  "npc_team_dir": npc_team_dir, "config_dir": config_dir, "default_mode": default_mode_val,
574
600
  "project_description": project_description, "package_name": package_name
575
601
  }
602
+
576
603
  def setup_npc_team(npc_team_dir, lang, is_subteam=False):
577
604
 
578
605
  guac_npc = {
@@ -694,50 +721,6 @@ class FileAnalysisState(Base):
694
721
  variable_names = Column(Text)
695
722
  timestamp = Column(DateTime, default=func.now())
696
723
 
697
- def _capture_plot_state(session_id: str, db_path: str, npc_team_dir: Path):
698
- """Capture plot state if significant change"""
699
- if not plt.get_fignums():
700
- return
701
-
702
- engine = create_engine(f'sqlite:///{db_path}')
703
- Base.metadata.create_all(engine)
704
- Session = sessionmaker(bind=engine)
705
- session = Session()
706
-
707
-
708
- fig = plt.gcf()
709
- axes = fig.get_axes()
710
- data_points = sum(len(line.get_xdata()) for ax in axes for line in ax.get_lines())
711
-
712
-
713
- plot_hash = hashlib.md5(f"{len(axes)}{data_points}".encode()).hexdigest()
714
-
715
- last = session.query(PlotState).filter(PlotState.session_id == session_id).order_by(PlotState.timestamp.desc()).first()
716
- if last and last.plot_hash == plot_hash:
717
- session.close()
718
- return
719
-
720
-
721
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
722
- workspace_dirs = _get_workspace_dirs(npc_team_dir)
723
- plot_path = workspace_dirs["plots"] / f"state_{timestamp}.png"
724
- plt.savefig(plot_path, dpi=150, bbox_inches='tight')
725
-
726
-
727
- plot_state = PlotState(
728
- session_id=session_id,
729
- plot_hash=plot_hash,
730
- plot_description=f"Plot with {len(axes)} axes, {data_points} points",
731
- figure_path=str(plot_path),
732
- data_summary=f"{data_points} data points",
733
- change_significance=1.0 if not last else 0.5
734
- )
735
-
736
- session.add(plot_state)
737
- session.commit()
738
- session.close()
739
- print(f"📊 Plot state captured -> {plot_path.name}")
740
-
741
724
  def _capture_file_state(session_id: str, db_path: str, file_path: str, analysis_code: str, locals_dict: Dict):
742
725
  """Capture file analysis state"""
743
726
  engine = create_engine(f'sqlite:///{db_path}')
@@ -986,24 +969,23 @@ def _handle_file_drop(input_text: str, npc_team_dir: Path) -> Tuple[str, List[st
986
969
 
987
970
  return modified_input, processed_files, file_paths
988
971
 
989
-
990
972
  def _capture_plot_state(session_id: str, db_path: str, npc_team_dir: Path):
991
- """Capture plot state if significant change"""
992
973
  if not plt.get_fignums():
993
974
  return
994
975
 
995
976
  try:
977
+ workspace_dirs = _get_workspace_dirs(npc_team_dir)
978
+ workspace_dirs["plots"].mkdir(parents=True, exist_ok=True)
979
+
996
980
  engine = create_engine(f'sqlite:///{db_path}')
997
981
  Base.metadata.create_all(engine)
998
982
  Session = sessionmaker(bind=engine)
999
983
  session = Session()
1000
984
 
1001
-
1002
985
  fig = plt.gcf()
1003
986
  axes = fig.get_axes()
1004
987
  data_points = sum(len(line.get_xdata()) for ax in axes for line in ax.get_lines())
1005
988
 
1006
-
1007
989
  plot_hash = hashlib.md5(f"{len(axes)}{data_points}".encode()).hexdigest()
1008
990
 
1009
991
  last = session.query(PlotState).filter(PlotState.session_id == session_id).order_by(PlotState.timestamp.desc()).first()
@@ -1011,13 +993,10 @@ def _capture_plot_state(session_id: str, db_path: str, npc_team_dir: Path):
1011
993
  session.close()
1012
994
  return
1013
995
 
1014
-
1015
996
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
1016
- workspace_dirs = _get_workspace_dirs(npc_team_dir)
1017
997
  plot_path = workspace_dirs["plots"] / f"state_{timestamp}.png"
1018
998
  plt.savefig(plot_path, dpi=150, bbox_inches='tight')
1019
999
 
1020
-
1021
1000
  plot_state = PlotState(
1022
1001
  session_id=session_id,
1023
1002
  plot_hash=plot_hash,
@@ -1026,15 +1005,14 @@ def _capture_plot_state(session_id: str, db_path: str, npc_team_dir: Path):
1026
1005
  data_summary=f"{data_points} data points",
1027
1006
  change_significance=1.0 if not last else 0.5
1028
1007
  )
1029
-
1008
+
1030
1009
  session.add(plot_state)
1031
1010
  session.commit()
1032
1011
  session.close()
1033
- print(f"📊 Plot state captured -> {plot_path.name}")
1012
+ print(f"Plot state captured -> {plot_path.name}")
1034
1013
 
1035
1014
  except Exception as e:
1036
1015
  print(f"Error capturing plot state: {e}")
1037
-
1038
1016
  def _capture_file_state(session_id: str, db_path: str, file_path: str, analysis_code: str, locals_dict: Dict):
1039
1017
  """Capture file analysis state"""
1040
1018
  try:
@@ -1165,7 +1143,7 @@ def _get_guac_agent_emoji(failures: int, max_fail: int = 3) -> str:
1165
1143
  return "🥑❓"
1166
1144
 
1167
1145
 
1168
- GUAC_GLOBAL_FLAG_FILE = Path.home() / ".npcsh" / ".guac_use_global"
1146
+
1169
1147
 
1170
1148
 
1171
1149
  def _run_agentic_mode(command: str,
@@ -1211,8 +1189,9 @@ def _run_agentic_mode(command: str,
1211
1189
 
1212
1190
  DO NOT SIMPLY COPY A PREVIOUS ATTEMPT.
1213
1191
 
1214
- Your goal is to generate Python code that BUILDS ON EXISTING VARIABLES to accomplish this task: {current_command}, with this next step planned: `{next_step} `
1192
+ Your goal is to generate Python code that BUILDS ON EXISTING VARIABLES to respond to this task: USER TASK: "{current_command}", with this next step planned: `{next_step} `
1215
1193
 
1194
+ If there is no relevant code to build on or the user is simply asking a question, generate new code as needed to respond to their questions.
1216
1195
 
1217
1196
  You will notice in the local envs that there are functions for reading, editing, and loading files.
1218
1197
  You should use these to your advantage as they will help you to clearly understand the user's system best.
@@ -1250,21 +1229,26 @@ def _run_agentic_mode(command: str,
1250
1229
 
1251
1230
  Do not over- complicate the code.
1252
1231
 
1253
- Do not include any '__name__'=='__main__' block.
1232
+ DO NOT include any '__name__'=='__main__' block.
1254
1233
  """
1255
1234
 
1256
1235
  npc_model = state.npc.model if state.npc and state.npc.model else state.chat_model
1257
1236
  npc_provider = state.npc.provider if state.npc and state.npc.provider else state.chat_provider
1258
1237
 
1238
+ print(state.npc.model)
1239
+ print(state.chat_model)
1259
1240
  llm_response = get_llm_response(prompt,
1260
1241
  npc=state.npc,
1261
1242
  stream=True,
1262
1243
  messages=state.messages,
1263
1244
  thinking=False)
1264
1245
 
1246
+ print(llm_response.get('response'))
1247
+ print(npc_model, npc_provider)
1248
+
1265
1249
  generated_code = print_and_process_stream(llm_response.get('response'),
1266
1250
  npc_model,
1267
- npc_provider
1251
+ npc_provider,
1268
1252
  )
1269
1253
 
1270
1254
  state.messages.append({'role':'user', 'content':current_command })
@@ -1560,8 +1544,6 @@ def execute_guac_command(command: str, state: ShellState, locals_dict: Dict[str,
1560
1544
  def run_guac_repl(state: ShellState, project_name: str, package_root: Path, package_name: str):
1561
1545
  from npcsh.routes import router
1562
1546
 
1563
-
1564
-
1565
1547
  npc_team_dir = Path.cwd() / "npc_team"
1566
1548
  workspace_dirs = _get_workspace_dirs(npc_team_dir)
1567
1549
  _ensure_workspace_dirs(workspace_dirs)
@@ -1593,15 +1575,8 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1593
1575
 
1594
1576
  except Exception as e:
1595
1577
  print(f"Warning: Could not load package {package_name}: {e}", file=sys.stderr)
1596
-
1597
- from npcpy.data.load import load_file_contents
1598
1578
 
1599
1579
  def read_file(file_path, max_lines=10000, encoding='utf-8'):
1600
- """
1601
- Read and print file contents up to max_lines.
1602
- Uses npcpy.data.load for specialized file types, falls back to text reading.
1603
- Returns the content as a string for further processing.
1604
- """
1605
1580
  path = Path(file_path).expanduser().resolve()
1606
1581
 
1607
1582
  if not path.exists():
@@ -1613,7 +1588,6 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1613
1588
  return None
1614
1589
 
1615
1590
  try:
1616
-
1617
1591
  file_ext = path.suffix.upper().lstrip('.')
1618
1592
  if file_ext in ['PDF', 'DOCX', 'PPTX', 'HTML', 'HTM', 'CSV', 'XLS', 'XLSX', 'JSON']:
1619
1593
  chunks = load_file_contents(str(path), chunk_size=10000)
@@ -1635,7 +1609,6 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1635
1609
  print(f"End of {path.name}")
1636
1610
  return content
1637
1611
 
1638
-
1639
1612
  with open(path, 'r', encoding=encoding) as f:
1640
1613
  lines = []
1641
1614
  for i, line in enumerate(f, 1):
@@ -1674,19 +1647,10 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1674
1647
  return None
1675
1648
 
1676
1649
  def edit_file(file_path, content=None, line_number=None, new_line=None, insert_at=None, append=False, backup=True):
1677
- """
1678
- Edit file contents in various ways:
1679
- - edit_file(path, content="new content") - replace entire file
1680
- - edit_file(path, line_number=5, new_line="new text") - replace specific line
1681
- - edit_file(path, insert_at=5, new_line="inserted text") - insert at line
1682
- - edit_file(path, append=True, content="appended") - append to file
1683
- """
1684
1650
  path = Path(file_path).expanduser().resolve()
1685
1651
 
1686
-
1687
1652
  path.parent.mkdir(parents=True, exist_ok=True)
1688
1653
 
1689
-
1690
1654
  if backup and path.exists():
1691
1655
  backup_path = path.with_suffix(path.suffix + '.backup')
1692
1656
  import shutil
@@ -1694,7 +1658,6 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1694
1658
  print(f"Backup saved: {backup_path.name}")
1695
1659
 
1696
1660
  try:
1697
-
1698
1661
  existing_lines = []
1699
1662
  if path.exists():
1700
1663
  with open(path, 'r', encoding='utf-8') as f:
@@ -1754,10 +1717,6 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1754
1717
  return False
1755
1718
 
1756
1719
  def load_file(file_path):
1757
- """
1758
- Simple wrapper around npcpy's load_file_contents for direct data loading.
1759
- Returns the loaded data in appropriate format.
1760
- """
1761
1720
  path = Path(file_path).expanduser().resolve()
1762
1721
 
1763
1722
  if not path.exists():
@@ -1782,7 +1741,6 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1782
1741
  'load_file':load_file,
1783
1742
  }
1784
1743
 
1785
-
1786
1744
  locals_dict.update(core_imports)
1787
1745
  locals_dict.update({f"guac_{k}": v for k, v in workspace_dirs.items()})
1788
1746
 
@@ -1801,10 +1759,20 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1801
1759
 
1802
1760
  while True:
1803
1761
  try:
1762
+ try:
1763
+ while True:
1764
+ operation, code, exec_state, exec_locals = plot_queue.get_nowait()
1765
+ if operation == 'execute_code':
1766
+ print("\n[guac] Processing queued file drop...")
1767
+ exec_state, exec_output = execute_python_code(code, exec_state, exec_locals)
1768
+ if exec_output:
1769
+ print(exec_output)
1770
+ except queue.Empty:
1771
+ pass
1772
+
1804
1773
  state.current_path = os.getcwd()
1805
1774
 
1806
1775
  display_model = state.chat_model
1807
-
1808
1776
  if isinstance(state.npc, NPC) and state.npc.model:
1809
1777
  display_model = state.npc.model
1810
1778
 
@@ -1865,7 +1833,6 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1865
1833
 
1866
1834
 
1867
1835
 
1868
-
1869
1836
  def enter_guac_mode(npc=None,
1870
1837
  team=None,
1871
1838
  config_dir=None,
@@ -1897,25 +1864,6 @@ def enter_guac_mode(npc=None,
1897
1864
 
1898
1865
  command_history, default_team, default_npc = setup_shell()
1899
1866
 
1900
-
1901
- if npc is None and default_npc is None:
1902
-
1903
- guac_npc_path = Path(npc_team_dir) / "guac.npc"
1904
- if guac_npc_path.exists():
1905
- npc = NPC(file=str(guac_npc_path), db_conn=command_history.engine)
1906
-
1907
- team_ctx_path = Path(npc_team_dir) / "team.ctx"
1908
- if team_ctx_path.exists():
1909
- with open(team_ctx_path, "r") as f:
1910
- team_ctx = yaml.safe_load(f) or {}
1911
- team = Team(team_path=str(npc_team_dir), forenpc=npc, jinxs={})
1912
- team.name = team_ctx.get("team_name", "guac_global_team")
1913
- else:
1914
- raise RuntimeError(f"No NPC loaded and {guac_npc_path} not found!")
1915
- elif default_npc and npc is None:
1916
-
1917
- npc = default_npc
1918
-
1919
1867
 
1920
1868
  state = ShellState(
1921
1869
  conversation_id=start_new_conversation(),
@@ -1930,6 +1878,45 @@ def enter_guac_mode(npc=None,
1930
1878
 
1931
1879
  state.command_history = command_history
1932
1880
 
1881
+ if npc is None and default_npc is None:
1882
+ guac_npc_path = Path(npc_team_dir) / "guac.npc"
1883
+ if guac_npc_path.exists():
1884
+ npc = NPC(file=str(guac_npc_path),
1885
+ db_conn=command_history.engine)
1886
+ print(guac_npc_path, npc)
1887
+
1888
+ team_ctx_path = get_team_ctx_path(str(npc_team_dir))
1889
+ team_ctx = {}
1890
+ if team_ctx_path and Path(team_ctx_path).exists():
1891
+ with open(team_ctx_path, "r") as f:
1892
+ team_ctx = yaml.safe_load(f) or {}
1893
+ print(team_ctx, team_ctx_path)
1894
+ team = Team(team_path=str(npc_team_dir),
1895
+ forenpc=npc,
1896
+ jinxs={})
1897
+ team.name = team_ctx.get("team_name", "guac_global_team")
1898
+ team.team_ctx = team_ctx
1899
+ print(team)
1900
+ if npc.model is None:
1901
+ npc.model = team_ctx.get("model", state.chat_model)
1902
+ if npc.provider is None:
1903
+ npc.provider = team_ctx.get("provider", state.chat_provider)
1904
+
1905
+ for npc_name, npc_obj in team.npcs.items():
1906
+ if not npc_obj.model:
1907
+ npc_obj.model = team_ctx.get("model", state.chat_model)
1908
+ if not npc_obj.provider:
1909
+ npc_obj.provider = team_ctx.get("provider", state.chat_provider)
1910
+ else:
1911
+ raise RuntimeError(f"No NPC loaded and {guac_npc_path} not found!")
1912
+ elif default_npc and npc is None:
1913
+ npc = default_npc
1914
+ state.npc = npc or default_npc
1915
+ state.team = team or default_team
1916
+
1917
+ state.plots_dir = setup_result.get("plots_dir")
1918
+ state.config_dir = setup_result.get("config_dir")
1919
+
1933
1920
  try:
1934
1921
  readline.read_history_file(READLINE_HISTORY_FILE)
1935
1922
  readline.set_history_length(1000)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.28
3
+ Version: 1.0.29
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -78,7 +78,7 @@ extra_files = package_files("npcsh/npc_team/")
78
78
 
79
79
  setup(
80
80
  name="npcsh",
81
- version="1.0.28",
81
+ version="1.0.29",
82
82
  packages=find_packages(exclude=["tests*"]),
83
83
  install_requires=base_requirements, # Only install base requirements by default
84
84
  extras_require={
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes