npcsh 1.0.10__tar.gz → 1.0.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.10
3
+ Version: 1.0.12
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -1044,6 +1044,7 @@ class ShellState:
1044
1044
  current_path: str = field(default_factory=os.getcwd)
1045
1045
  stream_output: bool = NPCSH_STREAM_OUTPUT
1046
1046
  attachments: Optional[List[Any]] = None
1047
+ turn_count: int =0
1047
1048
  def get_model_for_command(self, model_type: str = "chat"):
1048
1049
  if model_type == "chat":
1049
1050
  return self.chat_model, self.chat_provider
@@ -25,7 +25,9 @@ try:
25
25
  except ImportError:
26
26
  chromadb = None
27
27
  import shutil
28
-
28
+ import json
29
+ import sqlite3
30
+ import copy
29
31
  import yaml
30
32
 
31
33
  # Local Application Imports
@@ -53,10 +55,22 @@ from npcpy.data.image import capture_screenshot
53
55
  from npcpy.memory.command_history import (
54
56
  CommandHistory,
55
57
  save_conversation_message,
58
+ load_kg_from_db,
59
+ save_kg_to_db,
56
60
  )
57
61
  from npcpy.npc_compiler import NPC, Team, load_jinxs_from_directory
58
- from npcpy.llm_funcs import check_llm_command, get_llm_response, execute_llm_command
62
+ from npcpy.llm_funcs import (
63
+ check_llm_command,
64
+ get_llm_response,
65
+ execute_llm_command,
66
+ breathe
67
+ )
68
+ from npcpy.memory.knowledge_graph import (
69
+ kg_initial,
70
+ kg_evolve_incremental
71
+ )
59
72
  from npcpy.gen.embeddings import get_embeddings
73
+
60
74
  try:
61
75
  import readline
62
76
  except:
@@ -561,6 +575,15 @@ def execute_slash_command(command: str, stdin_input: Optional[str], state: Shell
561
575
  """Executes slash commands using the router or checking NPC/Team jinxs."""
562
576
  command_parts = command.split()
563
577
  command_name = command_parts[0].lstrip('/')
578
+
579
+ if command_name in ['n', 'npc']:
580
+ npc_to_switch_to = command_parts[1] if len(command_parts) > 1 else None
581
+ if npc_to_switch_to and state.team and npc_to_switch_to in state.team.npcs:
582
+ state.npc = state.team.npcs[npc_to_switch_to]
583
+ return state, f"Switched to NPC: {npc_to_switch_to}"
584
+ else:
585
+ available_npcs = list(state.team.npcs.keys()) if state.team else []
586
+ return state, colored(f"NPC '{npc_to_switch_to}' not found. Available NPCs: {', '.join(available_npcs)}", "red")
564
587
  handler = router.get_route(command_name)
565
588
  #print(handler)
566
589
  if handler:
@@ -646,8 +669,14 @@ def process_pipeline_command(
646
669
  if not cmd_to_process:
647
670
  return state, stdin_input
648
671
 
649
- exec_model = model_override or state.chat_model
650
- exec_provider = provider_override or state.chat_provider
672
+ # --- Corrected Model Resolution ---
673
+ # Priority: 1. Inline Override, 2. NPC Model, 3. Global Model
674
+ npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
675
+ npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
676
+
677
+ exec_model = model_override or npc_model or state.chat_model
678
+ exec_provider = provider_override or npc_provider or state.chat_provider
679
+ # --- End of Correction ---
651
680
 
652
681
  if cmd_to_process.startswith("/"):
653
682
  return execute_slash_command(cmd_to_process, stdin_input, state, stream_final)
@@ -671,8 +700,8 @@ def process_pipeline_command(
671
700
  fixer_prompt = f"The command '{cmd_to_process}' failed with the error: '{result}'. Provide the correct command."
672
701
  response = execute_llm_command(
673
702
  fixer_prompt,
674
- model=exec_model,
675
- provider=exec_provider,
703
+ model=exec_model, # Uses corrected model
704
+ provider=exec_provider, # Uses corrected provider
676
705
  npc=state.npc,
677
706
  stream=stream_final,
678
707
  messages=state.messages
@@ -688,8 +717,8 @@ def process_pipeline_command(
688
717
 
689
718
  llm_result = check_llm_command(
690
719
  full_llm_cmd,
691
- model=exec_model,
692
- provider=exec_provider,
720
+ model=exec_model, # Uses corrected model
721
+ provider=exec_provider, # Uses corrected provider
693
722
  api_url=state.api_url,
694
723
  api_key=state.api_key,
695
724
  npc=state.npc,
@@ -705,7 +734,8 @@ def process_pipeline_command(
705
734
  output = llm_result.get("output")
706
735
  return state, output
707
736
  else:
708
- return state, llm_result
737
+ return state, llm_result
738
+
709
739
  def check_mode_switch(command:str , state: ShellState):
710
740
  if command in ['/cmd', '/agent', '/chat', '/ride']:
711
741
  state.current_mode = command[1:]
@@ -728,6 +758,10 @@ def execute_command(
728
758
  stdin_for_next = None
729
759
  final_output = None
730
760
  current_state = state
761
+ npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
762
+ npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
763
+ active_model = npc_model or state.chat_model
764
+ active_provider = npc_provider or state.chat_provider
731
765
 
732
766
  if state.current_mode == 'agent':
733
767
  for i, cmd_segment in enumerate(commands):
@@ -812,8 +846,8 @@ def execute_command(
812
846
  # Otherwise, treat as chat (LLM)
813
847
  response = get_llm_response(
814
848
  command,
815
- model=state.chat_model,
816
- provider=state.chat_provider,
849
+ model=active_model,
850
+ provider=active_provider,
817
851
  npc=state.npc,
818
852
  stream=state.stream_output,
819
853
  messages=state.messages
@@ -824,8 +858,8 @@ def execute_command(
824
858
  elif state.current_mode == 'cmd':
825
859
 
826
860
  response = execute_llm_command(command,
827
- model = state.chat_model,
828
- provider = state.chat_provider,
861
+ model=active_model,
862
+ provider=active_provider,
829
863
  npc = state.npc,
830
864
  stream = state.stream_output,
831
865
  messages = state.messages)
@@ -1247,7 +1281,7 @@ def print_welcome_message():
1247
1281
  print(
1248
1282
  """
1249
1283
  Welcome to \033[1;94mnpc\033[0m\033[1;38;5;202msh\033[0m!
1250
- \033[1;94m \033[0m\033[1;38;5;202m \\\\
1284
+ \033[1;94m \033[0m\033[1;38;5;202m _ \\\\
1251
1285
  \033[1;94m _ __ _ __ ___ \033[0m\033[1;38;5;202m ___ | |___ \\\\
1252
1286
  \033[1;94m| '_ \\ | ' \\ / __|\033[0m\033[1;38;5;202m / __/ | |_ _| \\\\
1253
1287
  \033[1;94m| | | || |_) |( |__ \033[0m\033[1;38;5;202m \\_ \\ | | | | //
@@ -1270,6 +1304,7 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
1270
1304
  os.makedirs(os.path.dirname(db_path), exist_ok=True)
1271
1305
  command_history = CommandHistory(db_path)
1272
1306
 
1307
+
1273
1308
  try:
1274
1309
  history_file = setup_readline()
1275
1310
  atexit.register(save_readline_history)
@@ -1315,7 +1350,7 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
1315
1350
  if use_jinxs == "c":
1316
1351
  global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
1317
1352
  if os.path.exists(global_jinxs_dir):
1318
- shutil.copytree(global_jinxs_dir, os.path.join(team_dir, "jinxs"), dirs_exist_ok=True)
1353
+ shutil.copytree(global_jinxs_dir, team_dir, dirs_exist_ok=True)
1319
1354
  else:
1320
1355
  team_ctx_data["use_global_jinxs"] = True
1321
1356
 
@@ -1343,7 +1378,7 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
1343
1378
  print(f"Warning: Could not load context file {filename}: {e}")
1344
1379
 
1345
1380
  forenpc_name = team_ctx.get("forenpc", default_forenpc_name)
1346
- print(f"Using forenpc: {forenpc_name}")
1381
+ #render_markdown(f"- Using forenpc: {forenpc_name}")
1347
1382
 
1348
1383
  if team_ctx.get("use_global_jinxs", False):
1349
1384
  jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
@@ -1355,110 +1390,244 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
1355
1390
 
1356
1391
  forenpc_obj = None
1357
1392
  forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
1358
- #print('forenpc_path', forenpc_path)
1359
- #print('jinx list', jinxs_list)
1360
- if os.path.exists(forenpc_path):
1361
1393
 
1362
- forenpc_obj = NPC(file = forenpc_path, jinxs=jinxs_list)
1394
+
1395
+ #render_markdown('- Loaded team context'+ json.dumps(team_ctx, indent=2))
1396
+
1397
+
1398
+
1399
+ if os.path.exists(forenpc_path):
1400
+ forenpc_obj = NPC(file = forenpc_path,
1401
+ jinxs=jinxs_list)
1402
+ if forenpc_obj.model is None:
1403
+ forenpc_obj.model= team_ctx.get("model", initial_state.chat_model)
1404
+ if forenpc_obj.provider is None:
1405
+ forenpc_obj.provider=team_ctx.get('provider', initial_state.chat_provider)
1406
+
1363
1407
  else:
1364
1408
  print(f"Warning: Forenpc file '{forenpc_name}.npc' not found in {team_dir}.")
1365
1409
 
1366
- team = Team(team_path=team_dir, forenpc=forenpc_obj, jinxs=jinxs_dict)
1410
+ team = Team(team_path=team_dir,
1411
+ forenpc=forenpc_obj,
1412
+ jinxs=jinxs_dict)
1413
+
1414
+ for npc_name, npc_obj in team.npcs.items():
1415
+ if not npc_obj.model:
1416
+ npc_obj.model = initial_state.chat_model
1417
+ if not npc_obj.provider:
1418
+ npc_obj.provider = initial_state.chat_provider
1419
+
1420
+ # Also apply to the forenpc specifically
1421
+ if team.forenpc and isinstance(team.forenpc, NPC):
1422
+ if not team.forenpc.model:
1423
+ team.forenpc.model = initial_state.chat_model
1424
+ if not team.forenpc.provider:
1425
+ team.forenpc.provider = initial_state.chat_provider
1426
+ team_name_from_ctx = team_ctx.get("name")
1427
+ if team_name_from_ctx:
1428
+ team.name = team_name_from_ctx
1429
+ elif team_dir and os.path.basename(team_dir) != 'npc_team':
1430
+ team.name = os.path.basename(team_dir)
1431
+ else:
1432
+ team.name = "global_team" # fallback for ~/.npcsh/npc_team
1433
+
1367
1434
  return command_history, team, forenpc_obj
1368
1435
 
1436
+ # In your main npcsh.py file
1437
+
1369
1438
  def process_result(
1370
1439
  user_input: str,
1371
1440
  result_state: ShellState,
1372
1441
  output: Any,
1373
- command_history: CommandHistory):
1442
+ command_history: CommandHistory
1443
+ ):
1444
+ # --- Part 1: Save Conversation & Determine Output ---
1445
+
1446
+ # Define team and NPC names early for consistent logging
1447
+ team_name = result_state.team.name if result_state.team else "__none__"
1448
+ npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "__none__"
1449
+
1450
+ # Determine the actual NPC object to use for this turn's operations
1451
+ active_npc = result_state.npc if isinstance(result_state.npc, NPC) else NPC(
1452
+ name="default",
1453
+ model=result_state.chat_model,
1454
+ provider=result_state.chat_provider
1455
+ )
1374
1456
 
1375
- npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else result_state.npc
1376
- team_name = result_state.team.name if isinstance(result_state.team, Team) else result_state.team
1377
1457
  save_conversation_message(
1378
1458
  command_history,
1379
1459
  result_state.conversation_id,
1380
1460
  "user",
1381
1461
  user_input,
1382
1462
  wd=result_state.current_path,
1383
- model=result_state.chat_model, # Log primary chat model? Or specific used one?
1384
- provider=result_state.chat_provider,
1463
+ model=active_npc.model,
1464
+ provider=active_npc.provider,
1385
1465
  npc=npc_name,
1386
1466
  team=team_name,
1387
1467
  attachments=result_state.attachments,
1388
1468
  )
1389
-
1390
- result_state.attachments = None # Clear attachments after logging user message
1469
+ result_state.attachments = None
1391
1470
 
1392
1471
  final_output_str = None
1393
- if user_input =='/help':
1394
- render_markdown(output)
1395
- elif result_state.stream_output:
1396
-
1397
- if isinstance(output, dict):
1398
- output_gen = output.get('output')
1399
- model = output.get('model', result_state.chat_model)
1400
- provider = output.get('provider', result_state.chat_provider)
1401
- else:
1402
- output_gen = output
1403
- model = result_state.chat_model
1404
- provider = result_state.chat_provider
1405
- print('processing stream output with markdown...')
1406
-
1407
- final_output_str = print_and_process_stream_with_markdown(output_gen,
1408
- model,
1409
- provider)
1410
-
1411
- elif output is not None:
1412
- final_output_str = str(output)
1413
- render_markdown('str not none: ', final_output_str)
1414
- if final_output_str and result_state.messages and result_state.messages[-1].get("role") != "assistant":
1415
- result_state.messages.append({"role": "assistant", "content": final_output_str})
1416
-
1417
- #print(result_state.messages)
1418
-
1419
-
1472
+ output_content = output.get('output') if isinstance(output, dict) else output
1473
+
1474
+ if result_state.stream_output and isgenerator(output_content):
1475
+ final_output_str = print_and_process_stream_with_markdown(output_content, active_npc.model, active_npc.provider)
1476
+ elif output_content is not None:
1477
+ final_output_str = str(output_content)
1478
+ render_markdown(final_output_str)
1420
1479
 
1480
+ # --- Part 2: Process Output and Evolve Knowledge ---
1421
1481
  if final_output_str:
1482
+ # Append assistant message to state for context continuity
1483
+ if result_state.messages and (not result_state.messages or result_state.messages[-1].get("role") != "assistant"):
1484
+ result_state.messages.append({"role": "assistant", "content": final_output_str})
1485
+
1486
+ # Save assistant message to the database
1422
1487
  save_conversation_message(
1423
1488
  command_history,
1424
1489
  result_state.conversation_id,
1425
1490
  "assistant",
1426
1491
  final_output_str,
1427
1492
  wd=result_state.current_path,
1428
- model=result_state.chat_model,
1429
- provider=result_state.chat_provider,
1493
+ model=active_npc.model,
1494
+ provider=active_npc.provider,
1430
1495
  npc=npc_name,
1431
1496
  team=team_name,
1432
1497
  )
1433
1498
 
1499
+ # --- Hierarchical Knowledge Graph Evolution ---
1500
+ conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
1501
+ conn = command_history.conn
1502
+
1503
+ try:
1504
+
1505
+ npc_kg = load_kg_from_db(conn, team_name, npc_name, "__npc_global__")
1506
+ evolved_npc_kg, _ = kg_evolve_incremental(
1507
+ existing_kg=npc_kg, new_content_text=conversation_turn_text,
1508
+ model=active_npc.model, provider=active_npc.provider
1509
+ )
1510
+ save_kg_to_db(conn, evolved_npc_kg, team_name, npc_name, result_state.current_path)
1511
+ except Exception as e:
1512
+ print(colored(f"Error during real-time KG evolution: {e}", "red"))
1513
+
1514
+ # --- Part 3: Periodic Team Context Suggestions ---
1515
+ result_state.turn_count += 1
1516
+ if result_state.turn_count > 0 and result_state.turn_count % 10 == 0:
1517
+ print(colored("\nChecking for potential team improvements...", "cyan"))
1518
+ try:
1519
+ summary = breathe(messages=result_state.messages[-20:], npc=active_npc)
1520
+ key_facts = summary.get('output', {}).get('facts', [])
1521
+
1522
+ if key_facts and result_state.team:
1523
+ team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
1524
+ ctx_data = {}
1525
+ if os.path.exists(team_ctx_path):
1526
+ with open(team_ctx_path, 'r') as f:
1527
+ ctx_data = yaml.safe_load(f) or {}
1528
+ current_context = ctx_data.get('context', '')
1529
+
1530
+ prompt = f"""Based on these key topics: {key_facts},
1531
+ suggest changes (additions, deletions, edits) to the team's context.
1532
+ Additions need not be fully formed sentences and can simply be equations, relationships, or other plain clear items.
1533
+
1534
+ Current Context: "{current_context}".
1535
+
1536
+ Respond with JSON: {{"suggestion": "Your sentence."}}"""
1537
+ response = get_llm_response(prompt, npc=active_npc, format="json")
1538
+ suggestion = response.get("response", {}).get("suggestion")
1539
+
1540
+ if suggestion:
1541
+ new_context = (current_context + " " + suggestion).strip()
1542
+ print(colored("AI suggests updating team context:", "yellow"))
1543
+ print(f" - OLD: {current_context}\n + NEW: {new_context}")
1544
+ if input("Apply? [y/N]: ").strip().lower() == 'y':
1545
+ ctx_data['context'] = new_context
1546
+ with open(team_ctx_path, 'w') as f:
1547
+ yaml.dump(ctx_data, f)
1548
+ print(colored("Team context updated.", "green"))
1549
+ else:
1550
+ print("Suggestion declined.")
1551
+ except Exception as e:
1552
+ import traceback
1553
+ print(colored(f"Could not generate team suggestions: {e}", "yellow"))
1554
+ traceback.print_exc()
1555
+
1556
+
1557
+
1434
1558
  def run_repl(command_history: CommandHistory, initial_state: ShellState):
1435
1559
  state = initial_state
1436
1560
  print_welcome_message()
1437
- print(f'Using {state.current_mode} mode. Use /agent, /cmd, /chat, or /ride to switch to other modes')
1438
- print(f'To switch to a different NPC, type /<npc_name>')
1561
+
1562
+
1563
+ render_markdown(f'- Using {state.current_mode} mode. Use /agent, /cmd, /chat, or /ride to switch to other modes')
1564
+ render_markdown(f'- To switch to a different NPC, type /npc <npc_name> or /n <npc_name> to switch to that NPC.')
1565
+ render_markdown('\n- Here are the current NPCs available in your team: ' + ', '.join([npc_name for npc_name in state.team.npcs.keys()]))
1566
+
1567
+
1439
1568
  is_windows = platform.system().lower().startswith("win")
1440
1569
  try:
1441
1570
  completer = make_completer(state)
1442
1571
  readline.set_completer(completer)
1443
1572
  except:
1444
1573
  pass
1574
+ session_scopes = set()
1575
+
1445
1576
 
1446
- def exit_shell(state):
1577
+ def exit_shell(current_state: ShellState):
1578
+ """
1579
+ On exit, iterates through all active scopes from the session and
1580
+ creates/updates the specific knowledge graph for each one.
1581
+ """
1447
1582
  print("\nGoodbye!")
1448
- # update the team ctx file to update the context and the preferences
1583
+ print(colored("Processing and archiving all session knowledge...", "cyan"))
1584
+
1585
+ conn = command_history.conn
1586
+ integrator_npc = NPC(name="integrator", model=current_state.chat_model, provider=current_state.chat_provider)
1449
1587
 
1588
+ # Process each unique scope that was active during the session
1589
+ for team_name, npc_name, path in session_scopes:
1590
+ try:
1591
+ print(f" -> Archiving knowledge for: T='{team_name}', N='{npc_name}', P='{path}'")
1592
+
1593
+ # Get all messages for the current conversation that happened in this specific path
1594
+ convo_id = current_state.conversation_id
1595
+ all_messages = command_history.get_conversations_by_id(convo_id)
1596
+
1597
+ scope_messages = [
1598
+ m for m in all_messages
1599
+ if m.get('directory_path') == path and m.get('team') == team_name and m.get('npc') == npc_name
1600
+ ]
1601
+
1602
+ full_text = "\n".join([f"{m['role']}: {m['content']}" for m in scope_messages if m.get('content')])
1450
1603
 
1604
+ if not full_text.strip():
1605
+ print(" ...No content for this scope, skipping.")
1606
+ continue
1451
1607
 
1608
+ # Load the existing KG for this specific, real scope
1609
+ current_kg = load_kg_from_db(conn, team_name, npc_name, path)
1610
+
1611
+ # Evolve it with the full text from the session for this scope
1612
+ evolved_kg, _ = kg_evolve_incremental(
1613
+ existing_kg=current_kg,
1614
+ new_content_text=full_text,
1615
+ model=integrator_npc.model,
1616
+ provider=integrator_npc.provider
1617
+ )
1618
+
1619
+ # Save the updated KG back to the database under the same exact scope
1620
+ save_kg_to_db(conn, evolved_kg, team_name, npc_name, path)
1452
1621
 
1622
+ except Exception as e:
1623
+ import traceback
1624
+ print(colored(f"Failed to process KG for scope ({team_name}, {npc_name}, {path}): {e}", "red"))
1625
+ traceback.print_exc()
1453
1626
 
1454
- #print('beginning knowledge consolidation')
1455
- #try:
1456
- # breathe_result = breathe(state.messages, state.chat_model, state.chat_provider, state.npc)
1457
- # print(breathe_result)
1458
- #except KeyboardInterrupt:
1459
- # print("Knowledge consolidation interrupted. Exiting immediately.")
1460
1627
  sys.exit(0)
1461
1628
 
1629
+
1630
+
1462
1631
  while True:
1463
1632
  try:
1464
1633
  try:
@@ -1467,17 +1636,21 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1467
1636
  except:
1468
1637
  pass
1469
1638
 
1639
+ display_model = state.chat_model
1640
+ if isinstance(state.npc, NPC) and state.npc.model:
1641
+ display_model = state.npc.model
1642
+
1470
1643
  if is_windows:
1471
1644
  cwd_part = os.path.basename(state.current_path)
1472
1645
  if isinstance(state.npc, NPC):
1473
- prompt_end = f":{state.npc.name}> "
1646
+ prompt_end = f":{state.npc.name}:{display_model}> "
1474
1647
  else:
1475
1648
  prompt_end = ":npcsh> "
1476
1649
  prompt = f"{cwd_part}{prompt_end}"
1477
1650
  else:
1478
1651
  cwd_colored = colored(os.path.basename(state.current_path), "blue")
1479
1652
  if isinstance(state.npc, NPC):
1480
- prompt_end = f":🤖{orange(state.npc.name)}:{state.chat_model}> "
1653
+ prompt_end = f":🤖{orange(state.npc.name)}:{display_model}> "
1481
1654
  else:
1482
1655
  prompt_end = f":🤖{colored('npc', 'blue', attrs=['bold'])}{colored('sh', 'yellow')}> "
1483
1656
  prompt = readline_safe_prompt(f"{cwd_colored}{prompt_end}")
@@ -1497,11 +1670,13 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1497
1670
  continue
1498
1671
  else:
1499
1672
  exit_shell(state)
1673
+ team_name = state.team.name if state.team else "__none__"
1674
+ npc_name = state.npc.name if isinstance(state.npc, NPC) else "__none__"
1675
+ session_scopes.add((team_name, npc_name, state.current_path))
1500
1676
 
1501
- state.current_path = os.getcwd()
1502
1677
  state, output = execute_command(user_input, state)
1503
1678
  process_result(user_input, state, output, command_history)
1504
-
1679
+
1505
1680
  except KeyboardInterrupt:
1506
1681
  if is_windows:
1507
1682
  # On Windows, Ctrl+C cancels the current input line, show prompt again
@@ -1513,8 +1688,6 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1513
1688
  except EOFError:
1514
1689
  # Ctrl+D: exit shell cleanly
1515
1690
  exit_shell(state)
1516
-
1517
-
1518
1691
  def main() -> None:
1519
1692
  parser = argparse.ArgumentParser(description="npcsh - An NPC-powered shell.")
1520
1693
  parser.add_argument(
@@ -258,27 +258,7 @@ def init_handler(command: str, **kwargs):
258
258
  traceback.print_exc()
259
259
  output = f"Error initializing project: {e}"
260
260
  return {"output": output, "messages": messages}
261
- # Add these route handlers after the existing imports (around line 50):
262
- @router.route("n")
263
- @router.route("npc")
264
- def switch_npc_handler(command: str, **kwargs) -> dict:
265
- """Switch to a different NPC"""
266
- team = kwargs.get('team')
267
- parts = command.split()
268
-
269
- if len(parts) < 2:
270
- if team:
271
- available_npcs = list(team.npcs.keys())
272
- return {"output": f"Available NPCs: {', '.join(available_npcs)}"}
273
- return {"output": "No team loaded or no NPC specified"}
274
-
275
- npc_name = parts[1]
276
- if team and npc_name in team.npcs:
277
- # We can't directly modify the state here, so return a special signal
278
- return {"output": f"SWITCH_NPC:{npc_name}"}
279
- else:
280
- available_npcs = list(team.npcs.keys()) if team else []
281
- return {"output": f"NPC '{npc_name}' not found. Available: {', '.join(available_npcs)}"}
261
+
282
262
 
283
263
 
284
264
 
@@ -289,8 +269,10 @@ def ots_handler(command: str, **kwargs):
289
269
  npc = safe_get(kwargs, 'npc')
290
270
  vision_model = safe_get(kwargs, 'model', NPCSH_VISION_MODEL)
291
271
  vision_provider = safe_get(kwargs, 'provider', NPCSH_VISION_PROVIDER)
292
- if vision_model == NPCSH_CHAT_MODEL: vision_model = NPCSH_VISION_MODEL
293
- if vision_provider == NPCSH_CHAT_PROVIDER: vision_provider = NPCSH_VISION_PROVIDER
272
+ if vision_model == NPCSH_CHAT_MODEL:
273
+ vision_model = NPCSH_VISION_MODEL
274
+ if vision_provider == NPCSH_CHAT_PROVIDER:
275
+ vision_provider = NPCSH_VISION_PROVIDER
294
276
 
295
277
  messages = safe_get(kwargs, 'messages', [])
296
278
  stream = safe_get(kwargs, 'stream', NPCSH_STREAM_OUTPUT)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.10
3
+ Version: 1.0.12
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -84,14 +84,14 @@ extra_files = package_files("npcpy/npc_team/")
84
84
 
85
85
  setup(
86
86
  name="npcsh",
87
- version="1.0.10",
87
+ version="1.0.12",
88
88
  packages=find_packages(exclude=["tests*"]),
89
89
  install_requires=base_requirements, # Only install base requirements by default
90
90
  extras_require={
91
91
  "lite": api_requirements, # Just API integrations
92
92
  "local": local_requirements, # Local AI/ML features
93
93
  "yap": voice_requirements, # Voice/Audio features
94
- "mcp": mcp_requirements, # MCP integration
94
+ "mcp": mcp_requirements, # MCP integration
95
95
  "all": api_requirements + local_requirements + voice_requirements + mcp_requirements, # Everything
96
96
  },
97
97
  entry_points={
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes