npcsh 1.0.28__tar.gz → 1.0.30__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. {npcsh-1.0.28 → npcsh-1.0.30}/PKG-INFO +1 -1
  2. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/_state.py +18 -15
  3. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/corca.py +130 -84
  4. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/guac.py +157 -141
  5. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npcsh.py +15 -22
  6. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh.egg-info/PKG-INFO +1 -1
  7. {npcsh-1.0.28 → npcsh-1.0.30}/setup.py +1 -1
  8. {npcsh-1.0.28 → npcsh-1.0.30}/LICENSE +0 -0
  9. {npcsh-1.0.28 → npcsh-1.0.30}/README.md +0 -0
  10. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/__init__.py +0 -0
  11. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/alicanto.py +0 -0
  12. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/mcp_helpers.py +0 -0
  13. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/mcp_server.py +0 -0
  14. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc.py +0 -0
  15. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/alicanto.npc +0 -0
  16. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/alicanto.png +0 -0
  17. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/corca.npc +0 -0
  18. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/corca.png +0 -0
  19. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/foreman.npc +0 -0
  20. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/frederic.npc +0 -0
  21. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/frederic4.png +0 -0
  22. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/guac.png +0 -0
  23. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/jinxs/bash_executer.jinx +0 -0
  24. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/jinxs/edit_file.jinx +0 -0
  25. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/jinxs/image_generation.jinx +0 -0
  26. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/jinxs/internet_search.jinx +0 -0
  27. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/jinxs/python_executor.jinx +0 -0
  28. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/jinxs/screen_cap.jinx +0 -0
  29. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/kadiefa.npc +0 -0
  30. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/kadiefa.png +0 -0
  31. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/npcsh.ctx +0 -0
  32. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/npcsh_sibiji.png +0 -0
  33. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/plonk.npc +0 -0
  34. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/plonk.png +0 -0
  35. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/plonkjr.npc +0 -0
  36. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/plonkjr.png +0 -0
  37. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/sibiji.npc +0 -0
  38. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/sibiji.png +0 -0
  39. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/spool.png +0 -0
  40. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/npc_team/yap.png +0 -0
  41. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/plonk.py +0 -0
  42. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/pti.py +0 -0
  43. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/routes.py +0 -0
  44. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/spool.py +0 -0
  45. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/wander.py +0 -0
  46. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh/yap.py +0 -0
  47. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh.egg-info/SOURCES.txt +0 -0
  48. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh.egg-info/dependency_links.txt +0 -0
  49. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh.egg-info/entry_points.txt +0 -0
  50. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh.egg-info/requires.txt +0 -0
  51. {npcsh-1.0.28 → npcsh-1.0.30}/npcsh.egg-info/top_level.txt +0 -0
  52. {npcsh-1.0.28 → npcsh-1.0.30}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.28
3
+ Version: 1.0.30
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -436,6 +436,12 @@ def get_shell_config_file() -> str:
436
436
  return os.path.expanduser("~/.bashrc")
437
437
 
438
438
 
439
+ def get_team_ctx_path(team_path: str) -> Optional[str]:
440
+ """Find the first .ctx file in the team directory"""
441
+ team_dir = Path(team_path)
442
+ ctx_files = list(team_dir.glob("*.ctx"))
443
+ return str(ctx_files[0]) if ctx_files else None
444
+
439
445
 
440
446
  def add_npcshrc_to_shell_config() -> None:
441
447
  """
@@ -2012,7 +2018,7 @@ def process_pipeline_command(
2012
2018
  stdin_input: Optional[str],
2013
2019
  state: ShellState,
2014
2020
  stream_final: bool,
2015
- review = True,
2021
+ review = False,
2016
2022
  router = None,
2017
2023
  ) -> Tuple[ShellState, Any]:
2018
2024
  '''
@@ -2384,17 +2390,16 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
2384
2390
 
2385
2391
 
2386
2392
  team_ctx = {}
2387
- for filename in os.listdir(team_dir):
2388
- if filename.endswith(".ctx"):
2389
- try:
2390
- with open(os.path.join(team_dir, filename), "r") as f:
2391
- team_ctx = yaml.safe_load(f) or {}
2392
- break
2393
- except Exception as e:
2394
- print(f"Warning: Could not load context file {filename}: {e}")
2395
-
2393
+ team_ctx_path = get_team_ctx_path(team_dir)
2394
+ if team_ctx_path:
2395
+ try:
2396
+ with open(team_ctx_path, "r") as f:
2397
+ team_ctx = yaml.safe_load(f) or {}
2398
+ except Exception as e:
2399
+ print(f"Warning: Could not load context file {os.path.basename(team_ctx_path)}: {e}")
2396
2400
  forenpc_name = team_ctx.get("forenpc", default_forenpc_name)
2397
2401
 
2402
+ print('forenpc_name:', forenpc_name)
2398
2403
 
2399
2404
  if team_ctx.get("use_global_jinxs", False):
2400
2405
  jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
@@ -2407,11 +2412,8 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
2407
2412
  forenpc_obj = None
2408
2413
  forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
2409
2414
 
2415
+ print('forenpc_path:', forenpc_path)
2410
2416
 
2411
-
2412
-
2413
-
2414
-
2415
2417
  if os.path.exists(forenpc_path):
2416
2418
  forenpc_obj = NPC(file = forenpc_path,
2417
2419
  jinxs=jinxs_list,
@@ -2558,7 +2560,8 @@ def process_result(
2558
2560
  characterization = summary.get('output')
2559
2561
 
2560
2562
  if characterization and result_state.team:
2561
- team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
2563
+
2564
+ team_ctx_path = os.path.join(result_state.team.team_path, ".ctx")
2562
2565
  ctx_data = {}
2563
2566
  if os.path.exists(team_ctx_path):
2564
2567
  with open(team_ctx_path, 'r') as f:
@@ -33,6 +33,7 @@ from npcsh._state import (
33
33
  should_skip_kg_processing,
34
34
  NPCSH_CHAT_PROVIDER,
35
35
  NPCSH_CHAT_MODEL,
36
+ get_team_ctx_path
36
37
  )
37
38
  import yaml
38
39
  from pathlib import Path
@@ -179,23 +180,21 @@ def process_mcp_stream(stream_response, active_npc):
179
180
  tool_calls = []
180
181
 
181
182
  interrupted = False
182
-
183
183
  sys.stdout.write('\033[s')
184
184
  sys.stdout.flush()
185
+
185
186
  try:
186
187
  for chunk in stream_response:
187
188
  if hasattr(active_npc, 'provider') and active_npc.provider == "ollama" and 'gpt-oss' not in active_npc.model:
188
189
  if hasattr(chunk, 'message') and hasattr(chunk.message, 'tool_calls') and chunk.message.tool_calls:
189
190
  for tool_call in chunk.message.tool_calls:
190
- tool_call_data = {
191
- 'id': getattr(tool_call, 'id', ''),
191
+ tool_call_data = {'id': getattr(tool_call, 'id', ''),
192
192
  'type': 'function',
193
193
  'function': {
194
194
  'name': getattr(tool_call.function, 'name', '') if hasattr(tool_call, 'function') else '',
195
195
  'arguments': getattr(tool_call.function, 'arguments', {}) if hasattr(tool_call, 'function') else {}
196
196
  }
197
197
  }
198
-
199
198
  if isinstance(tool_call_data['function']['arguments'], str):
200
199
  try:
201
200
  tool_call_data['function']['arguments'] = json.loads(tool_call_data['function']['arguments'])
@@ -203,7 +202,6 @@ def process_mcp_stream(stream_response, active_npc):
203
202
  tool_call_data['function']['arguments'] = {'raw': tool_call_data['function']['arguments']}
204
203
 
205
204
  tool_calls.append(tool_call_data)
206
-
207
205
  if hasattr(chunk, 'message') and hasattr(chunk.message, 'content') and chunk.message.content:
208
206
  collected_content += chunk.message.content
209
207
  print(chunk.message.content, end='', flush=True)
@@ -230,7 +228,6 @@ def process_mcp_stream(stream_response, active_npc):
230
228
 
231
229
  if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
232
230
  tool_calls[idx]['id'] = tool_call_delta.id
233
-
234
231
  if hasattr(tool_call_delta, 'function'):
235
232
  if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
236
233
  tool_calls[idx]['function']['name'] = tool_call_delta.function.name
@@ -242,71 +239,81 @@ def process_mcp_stream(stream_response, active_npc):
242
239
  print('\n⚠️ Stream interrupted by user')
243
240
 
244
241
  sys.stdout.write('\033[u')
245
- sys.stdout.write('\033[J')
242
+ sys.stdout.write('\033[0J')
246
243
  sys.stdout.flush()
247
244
 
248
- render_markdown(collected_content)
249
- print('\n')
245
+ if collected_content:
246
+ render_markdown(collected_content)
247
+
250
248
  return collected_content, tool_calls
251
249
 
252
- def execute_command_corca(command: str, state: ShellState, command_history, selected_mcp_tools_names: Optional[List[str]] = None) -> Tuple[ShellState, Any]:
253
- mcp_tools_for_llm = []
254
-
255
- if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
256
- all_available_mcp_tools = state.mcp_client.available_tools_llm
257
-
258
- if selected_mcp_tools_names and len(selected_mcp_tools_names) > 0:
259
- mcp_tools_for_llm = [
260
- tool_def for tool_def in all_available_mcp_tools
261
- if tool_def['function']['name'] in selected_mcp_tools_names
262
- ]
263
- if not mcp_tools_for_llm:
264
- cprint("Warning: No selected MCP tools found or matched. Corca will proceed without tools.", "yellow", file=sys.stderr)
265
- else:
266
- mcp_tools_for_llm = all_available_mcp_tools
267
- else:
268
- cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
269
-
270
- active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
271
-
272
- response_dict = get_llm_response(
273
- prompt=command,
274
- npc=state.npc,
275
- messages=state.messages,
276
- tools=mcp_tools_for_llm,
277
- auto_process_tool_calls=False,
278
- stream=state.stream_output
279
- )
280
-
281
- stream_response = response_dict.get('response')
282
- messages = response_dict.get('messages', state.messages)
283
-
284
- print("DEBUG: Processing stream response...")
285
- collected_content, tool_calls = process_mcp_stream(stream_response, active_npc)
286
250
 
287
- print(f"\nDEBUG: Final collected_content: {collected_content}")
288
- print(f"DEBUG: Final tool_calls: {tool_calls}")
289
-
290
- state.messages = messages
291
- if collected_content or tool_calls:
292
- assistant_message = {"role": "assistant", "content": collected_content}
293
- if tool_calls:
294
- assistant_message["tool_calls"] = tool_calls
295
- state.messages.append(assistant_message)
296
-
297
- return state, {
298
- "output": collected_content,
299
- "tool_calls": tool_calls,
300
- "messages": state.messages
301
- }
302
251
 
303
252
 
253
+ def execute_command_corca(command: str, state: ShellState, command_history, selected_mcp_tools_names: Optional[List[str]] = None) -> Tuple[ShellState, Any]:
254
+ mcp_tools_for_llm = []
255
+
256
+ if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
257
+ all_available_mcp_tools = state.mcp_client.available_tools_llm
258
+
259
+ if selected_mcp_tools_names and len(selected_mcp_tools_names) > 0:
260
+ mcp_tools_for_llm = [
261
+ tool_def for tool_def in all_available_mcp_tools
262
+ if tool_def['function']['name'] in selected_mcp_tools_names
263
+ ]
264
+ if not mcp_tools_for_llm:
265
+ cprint("Warning: No selected MCP tools found or matched. Corca will proceed without tools.", "yellow", file=sys.stderr)
266
+ else:
267
+ mcp_tools_for_llm = all_available_mcp_tools
268
+ else:
269
+ cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
270
+
271
+ active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
272
+
273
+ if len(state.messages) > 20:
274
+ compressed_state = active_npc.compress_planning_state({
275
+ "goal": "ongoing session",
276
+ "facts": [],
277
+ "successes": [],
278
+ "mistakes": [],
279
+ "todos": [],
280
+ "constraints": []
281
+ })
282
+ state.messages = [{"role": "system", "content": f"Session context: {compressed_state}"}]
283
+
284
+ response_dict = get_llm_response(
285
+ prompt=command,
286
+ npc=state.npc,
287
+ messages=state.messages,
288
+ tools=mcp_tools_for_llm,
289
+ auto_process_tool_calls=False,
290
+ stream=state.stream_output,
291
+ team=state.team
292
+ )
293
+
294
+ stream_response = response_dict.get('response')
295
+ messages = response_dict.get('messages', state.messages)
296
+
297
+ collected_content, tool_calls = process_mcp_stream(stream_response, active_npc)
298
+
299
+ state.messages = messages
300
+ if collected_content or tool_calls:
301
+ assistant_message = {"role": "assistant", "content": collected_content}
302
+ if tool_calls:
303
+ assistant_message["tool_calls"] = tool_calls
304
+ state.messages.append(assistant_message)
305
+
306
+ return state, {
307
+ "output": collected_content,
308
+ "tool_calls": tool_calls,
309
+ "messages": state.messages
310
+ }
304
311
  def _resolve_and_copy_mcp_server_path(
305
312
  explicit_path: Optional[str],
306
313
  current_path: Optional[str],
307
314
  team_ctx_mcp_servers: Optional[List[Dict[str, str]]],
308
315
  interactive: bool = False,
309
- auto_copy_bypass: bool = False # <-- New parameter
316
+ auto_copy_bypass: bool = False
310
317
  ) -> Optional[str]:
311
318
  default_mcp_server_name = "mcp_server.py"
312
319
  npcsh_default_template_path = Path(__file__).parent / default_mcp_server_name
@@ -318,13 +325,12 @@ def _resolve_and_copy_mcp_server_path(
318
325
  return None
319
326
 
320
327
  if not destination_file.exists():
321
- # Check auto_copy_bypass first
322
- if auto_copy_bypass or not interactive: # If bypass is true OR not interactive, auto-copy
328
+ if auto_copy_bypass or not interactive:
323
329
  destination_dir.mkdir(parents=True, exist_ok=True)
324
330
  shutil.copy(npcsh_default_template_path, destination_file)
325
331
  print(colored(f"Automatically copied default {default_mcp_server_name} to {destination_file}", "green"))
326
332
  return destination_file
327
- else: # Only ask if interactive and no bypass
333
+ else:
328
334
  choice = input(colored(f"No {default_mcp_server_name} found in {description}. Copy default template to {destination_file}? (y/N): ", "yellow")).strip().lower()
329
335
  if choice == 'y':
330
336
  destination_dir.mkdir(parents=True, exist_ok=True)
@@ -417,15 +423,31 @@ def create_corca_state_and_mcp_client(conversation_id, command_history, npc=None
417
423
  )
418
424
  state.command_history = command_history
419
425
 
420
- # Read NPCSH_CORCA_AUTO_COPY_MCP_SERVER from environment for non-interactive calls
426
+ team_ctx_mcp_servers = None
427
+ if team and hasattr(team, 'team_path'):
428
+ team_ctx = _load_team_context(team.team_path)
429
+ team_ctx_mcp_servers = team_ctx.get('mcp_servers', [])
430
+
431
+ if npc and isinstance(npc, NPC):
432
+ if not npc.model and team_ctx.get('model'):
433
+ npc.model = team_ctx['model']
434
+ if not npc.provider and team_ctx.get('provider'):
435
+ npc.provider = team_ctx['provider']
436
+
437
+ if not state.chat_model and team_ctx.get('model'):
438
+ state.chat_model = team_ctx['model']
439
+ if not state.chat_provider and team_ctx.get('provider'):
440
+ state.chat_provider = team_ctx['provider']
441
+
421
442
  auto_copy_bypass = os.getenv("NPCSH_CORCA_AUTO_COPY_MCP_SERVER", "false").lower() == "true"
422
443
 
423
444
  resolved_server_path = _resolve_and_copy_mcp_server_path(
424
445
  explicit_path=mcp_server_path_from_request,
425
446
  current_path=current_path,
426
- team_ctx_mcp_servers=team.team_ctx.get('mcp_servers', []) if team and hasattr(team, 'team_ctx') else None,
427
- interactive=False, # Always non-interactive for Flask API calls
428
- auto_copy_bypass=auto_copy_bypass # Pass env var setting
447
+ team_ctx_mcp_servers=team_ctx_mcp_servers,
448
+ interactive=False,
449
+ auto_copy_bypass=auto_copy_bypass,
450
+ force_global=False
429
451
  )
430
452
 
431
453
  state.mcp_client = None
@@ -513,9 +535,9 @@ def process_corca_result(
513
535
 
514
536
  tool_content = ""
515
537
  if hasattr(mcp_result, 'content') and mcp_result.content:
516
- print(f"DEBUG: content type: {type(mcp_result.content)}")
538
+
517
539
  for i, content_item in enumerate(mcp_result.content):
518
- print(f"DEBUG: content_item[{i}]: {content_item} (type: {type(content_item)})")
540
+
519
541
  if hasattr(content_item, 'text'):
520
542
  tool_content += content_item.text
521
543
  else:
@@ -523,8 +545,7 @@ def process_corca_result(
523
545
  else:
524
546
  tool_content = str(mcp_result)
525
547
 
526
- print(f"DEBUG: Extracted content length: {len(tool_content)}")
527
- print(f"DEBUG: Extracted content preview: {tool_content[:200]}")
548
+
528
549
 
529
550
  tool_responses.append({
530
551
  "role": "tool",
@@ -703,11 +724,11 @@ def process_corca_result(
703
724
  characterization = summary.get('output')
704
725
 
705
726
  if characterization and result_state.team:
706
- team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
707
- ctx_data = {}
708
- if os.path.exists(team_ctx_path):
709
- with open(team_ctx_path, 'r') as f:
710
- ctx_data = yaml.safe_load(f) or {}
727
+ team_ctx_path = get_team_ctx_path(result_state.team.team_path)
728
+ if not team_ctx_path:
729
+ team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
730
+
731
+ ctx_data = _load_team_context(result_state.team.team_path)
711
732
  current_context = ctx_data.get('context', '')
712
733
 
713
734
  prompt = f"""Based on this characterization: {characterization},
@@ -722,28 +743,39 @@ def process_corca_result(
722
743
  "suggestion": "Your sentence.
723
744
  }
724
745
  """
725
- response = get_llm_response(prompt, npc=active_npc, format="json")
746
+ response = get_llm_response(prompt,
747
+ npc=active_npc,
748
+ format="json",
749
+ team=result_state.team)
726
750
  suggestion = response.get("response", {}).get("suggestion")
727
751
 
728
752
  if suggestion:
729
753
  new_context = (current_context + " " + suggestion).strip()
730
754
  print(colored(f"{result_state.npc.name} suggests updating team context:", "yellow"))
731
755
  print(f" - OLD: {current_context}\n + NEW: {new_context}")
732
- if input("Apply? [y/N]: ").strip().lower() == 'y':
756
+
757
+ choice = input("Apply? [y/N/e(dit)]: ").strip().lower()
758
+
759
+ if choice == 'y':
733
760
  ctx_data['context'] = new_context
734
761
  with open(team_ctx_path, 'w') as f:
735
762
  yaml.dump(ctx_data, f)
736
763
  print(colored("Team context updated.", "green"))
764
+ elif choice == 'e':
765
+ edited_context = input(f"Edit context [{new_context}]: ").strip()
766
+ if edited_context:
767
+ ctx_data['context'] = edited_context
768
+ else:
769
+ ctx_data['context'] = new_context
770
+ with open(team_ctx_path, 'w') as f:
771
+ yaml.dump(ctx_data, f)
772
+ print(colored("Team context updated with edits.", "green"))
737
773
  else:
738
- print("Suggestion declined.")
774
+ print("Suggestion declined.")
739
775
  except Exception as e:
740
776
  import traceback
741
777
  print(colored(f"Could not generate team suggestions: {e}", "yellow"))
742
778
  traceback.print_exc()
743
-
744
-
745
-
746
-
747
779
 
748
780
  def _read_npcsh_global_env() -> Dict[str, str]:
749
781
  global_env_file = Path(".npcsh_global")
@@ -760,6 +792,20 @@ def _read_npcsh_global_env() -> Dict[str, str]:
760
792
  print(f"Warning: Could not read .npcsh_global: {e}")
761
793
  return env_vars
762
794
 
795
+ def _load_team_context(team_path: str) -> Dict[str, Any]:
796
+ """Load team context from any .ctx file in the team directory"""
797
+ ctx_path = get_team_ctx_path(team_path)
798
+ if not ctx_path or not os.path.exists(ctx_path):
799
+ return {}
800
+
801
+ try:
802
+ with open(ctx_path, 'r') as f:
803
+ return yaml.safe_load(f) or {}
804
+ except Exception as e:
805
+ print(f"Warning: Could not load team context from {ctx_path}: {e}")
806
+ return {}
807
+
808
+
763
809
  def _write_to_npcsh_global(key: str, value: str) -> None:
764
810
  global_env_file = Path(".npcsh_global")
765
811
  env_vars = _read_npcsh_global_env()
@@ -772,6 +818,7 @@ def _write_to_npcsh_global(key: str, value: str) -> None:
772
818
  except Exception as e:
773
819
  print(f"Warning: Could not write to .npcsh_global: {e}")
774
820
 
821
+
775
822
  def _resolve_and_copy_mcp_server_path(
776
823
  explicit_path: Optional[str],
777
824
  current_path: Optional[str],
@@ -859,7 +906,6 @@ def _resolve_and_copy_mcp_server_path(
859
906
 
860
907
  cprint("No MCP server script found in any expected location.", "yellow")
861
908
  return None
862
-
863
909
  def create_corca_state_and_mcp_client(conversation_id, command_history, npc=None, team=None,
864
910
  current_path=None, mcp_server_path_from_request: Optional[str] = None):
865
911
  from npcsh._state import ShellState
@@ -1018,7 +1064,7 @@ def main():
1018
1064
  elif os.path.exists(global_corca_path):
1019
1065
  default_npc = NPC(file=global_corca_path,
1020
1066
  db_conn=command_history.engine)
1021
- print('Team Default: ', team.provider, team.model)
1067
+
1022
1068
  if default_npc.model is None:
1023
1069
  if team.model is not None:
1024
1070
  default_npc.model = team.model