npcsh 1.0.11__py3-none-any.whl → 1.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcsh/_state.py CHANGED
@@ -1044,6 +1044,7 @@ class ShellState:
1044
1044
  current_path: str = field(default_factory=os.getcwd)
1045
1045
  stream_output: bool = NPCSH_STREAM_OUTPUT
1046
1046
  attachments: Optional[List[Any]] = None
1047
+ turn_count: int =0
1047
1048
  def get_model_for_command(self, model_type: str = "chat"):
1048
1049
  if model_type == "chat":
1049
1050
  return self.chat_model, self.chat_provider
npcsh/npcsh.py CHANGED
@@ -25,7 +25,9 @@ try:
25
25
  except ImportError:
26
26
  chromadb = None
27
27
  import shutil
28
-
28
+ import json
29
+ import sqlite3
30
+ import copy
29
31
  import yaml
30
32
 
31
33
  # Local Application Imports
@@ -53,10 +55,22 @@ from npcpy.data.image import capture_screenshot
53
55
  from npcpy.memory.command_history import (
54
56
  CommandHistory,
55
57
  save_conversation_message,
58
+ load_kg_from_db,
59
+ save_kg_to_db,
56
60
  )
57
61
  from npcpy.npc_compiler import NPC, Team, load_jinxs_from_directory
58
- from npcpy.llm_funcs import check_llm_command, get_llm_response, execute_llm_command
62
+ from npcpy.llm_funcs import (
63
+ check_llm_command,
64
+ get_llm_response,
65
+ execute_llm_command,
66
+ breathe
67
+ )
68
+ from npcpy.memory.knowledge_graph import (
69
+ kg_initial,
70
+ kg_evolve_incremental
71
+ )
59
72
  from npcpy.gen.embeddings import get_embeddings
73
+
60
74
  try:
61
75
  import readline
62
76
  except:
@@ -655,8 +669,14 @@ def process_pipeline_command(
655
669
  if not cmd_to_process:
656
670
  return state, stdin_input
657
671
 
658
- exec_model = model_override or state.chat_model
659
- exec_provider = provider_override or state.chat_provider
672
+ # --- Corrected Model Resolution ---
673
+ # Priority: 1. Inline Override, 2. NPC Model, 3. Global Model
674
+ npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
675
+ npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
676
+
677
+ exec_model = model_override or npc_model or state.chat_model
678
+ exec_provider = provider_override or npc_provider or state.chat_provider
679
+ # --- End of Correction ---
660
680
 
661
681
  if cmd_to_process.startswith("/"):
662
682
  return execute_slash_command(cmd_to_process, stdin_input, state, stream_final)
@@ -680,8 +700,8 @@ def process_pipeline_command(
680
700
  fixer_prompt = f"The command '{cmd_to_process}' failed with the error: '{result}'. Provide the correct command."
681
701
  response = execute_llm_command(
682
702
  fixer_prompt,
683
- model=exec_model,
684
- provider=exec_provider,
703
+ model=exec_model, # Uses corrected model
704
+ provider=exec_provider, # Uses corrected provider
685
705
  npc=state.npc,
686
706
  stream=stream_final,
687
707
  messages=state.messages
@@ -697,8 +717,8 @@ def process_pipeline_command(
697
717
 
698
718
  llm_result = check_llm_command(
699
719
  full_llm_cmd,
700
- model=exec_model,
701
- provider=exec_provider,
720
+ model=exec_model, # Uses corrected model
721
+ provider=exec_provider, # Uses corrected provider
702
722
  api_url=state.api_url,
703
723
  api_key=state.api_key,
704
724
  npc=state.npc,
@@ -714,7 +734,8 @@ def process_pipeline_command(
714
734
  output = llm_result.get("output")
715
735
  return state, output
716
736
  else:
717
- return state, llm_result
737
+ return state, llm_result
738
+
718
739
  def check_mode_switch(command:str , state: ShellState):
719
740
  if command in ['/cmd', '/agent', '/chat', '/ride']:
720
741
  state.current_mode = command[1:]
@@ -737,6 +758,10 @@ def execute_command(
737
758
  stdin_for_next = None
738
759
  final_output = None
739
760
  current_state = state
761
+ npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
762
+ npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
763
+ active_model = npc_model or state.chat_model
764
+ active_provider = npc_provider or state.chat_provider
740
765
 
741
766
  if state.current_mode == 'agent':
742
767
  for i, cmd_segment in enumerate(commands):
@@ -821,8 +846,8 @@ def execute_command(
821
846
  # Otherwise, treat as chat (LLM)
822
847
  response = get_llm_response(
823
848
  command,
824
- model=state.chat_model,
825
- provider=state.chat_provider,
849
+ model=active_model,
850
+ provider=active_provider,
826
851
  npc=state.npc,
827
852
  stream=state.stream_output,
828
853
  messages=state.messages
@@ -833,8 +858,8 @@ def execute_command(
833
858
  elif state.current_mode == 'cmd':
834
859
 
835
860
  response = execute_llm_command(command,
836
- model = state.chat_model,
837
- provider = state.chat_provider,
861
+ model=active_model,
862
+ provider=active_provider,
838
863
  npc = state.npc,
839
864
  stream = state.stream_output,
840
865
  messages = state.messages)
@@ -1279,6 +1304,7 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
1279
1304
  os.makedirs(os.path.dirname(db_path), exist_ok=True)
1280
1305
  command_history = CommandHistory(db_path)
1281
1306
 
1307
+
1282
1308
  try:
1283
1309
  history_file = setup_readline()
1284
1310
  atexit.register(save_readline_history)
@@ -1324,7 +1350,7 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
1324
1350
  if use_jinxs == "c":
1325
1351
  global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
1326
1352
  if os.path.exists(global_jinxs_dir):
1327
- shutil.copytree(global_jinxs_dir, os.path.join(team_dir, "jinxs"), dirs_exist_ok=True)
1353
+ shutil.copytree(global_jinxs_dir, team_dir, dirs_exist_ok=True)
1328
1354
  else:
1329
1355
  team_ctx_data["use_global_jinxs"] = True
1330
1356
 
@@ -1352,7 +1378,7 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
1352
1378
  print(f"Warning: Could not load context file {filename}: {e}")
1353
1379
 
1354
1380
  forenpc_name = team_ctx.get("forenpc", default_forenpc_name)
1355
- print(f"Using forenpc: {forenpc_name}")
1381
+ #render_markdown(f"- Using forenpc: {forenpc_name}")
1356
1382
 
1357
1383
  if team_ctx.get("use_global_jinxs", False):
1358
1384
  jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
@@ -1364,83 +1390,171 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
1364
1390
 
1365
1391
  forenpc_obj = None
1366
1392
  forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
1367
- #print('forenpc_path', forenpc_path)
1368
- #print('jinx list', jinxs_list)
1369
- if os.path.exists(forenpc_path):
1370
1393
 
1371
- forenpc_obj = NPC(file = forenpc_path, jinxs=jinxs_list)
1394
+
1395
+ #render_markdown('- Loaded team context'+ json.dumps(team_ctx, indent=2))
1396
+
1397
+
1398
+
1399
+ if os.path.exists(forenpc_path):
1400
+ forenpc_obj = NPC(file = forenpc_path,
1401
+ jinxs=jinxs_list)
1402
+ if forenpc_obj.model is None:
1403
+ forenpc_obj.model= team_ctx.get("model", initial_state.chat_model)
1404
+ if forenpc_obj.provider is None:
1405
+ forenpc_obj.provider=team_ctx.get('provider', initial_state.chat_provider)
1406
+
1372
1407
  else:
1373
1408
  print(f"Warning: Forenpc file '{forenpc_name}.npc' not found in {team_dir}.")
1374
1409
 
1375
- team = Team(team_path=team_dir, forenpc=forenpc_obj, jinxs=jinxs_dict)
1410
+ team = Team(team_path=team_dir,
1411
+ forenpc=forenpc_obj,
1412
+ jinxs=jinxs_dict)
1413
+
1414
+ for npc_name, npc_obj in team.npcs.items():
1415
+ if not npc_obj.model:
1416
+ npc_obj.model = initial_state.chat_model
1417
+ if not npc_obj.provider:
1418
+ npc_obj.provider = initial_state.chat_provider
1419
+
1420
+ # Also apply to the forenpc specifically
1421
+ if team.forenpc and isinstance(team.forenpc, NPC):
1422
+ if not team.forenpc.model:
1423
+ team.forenpc.model = initial_state.chat_model
1424
+ if not team.forenpc.provider:
1425
+ team.forenpc.provider = initial_state.chat_provider
1426
+ team_name_from_ctx = team_ctx.get("name")
1427
+ if team_name_from_ctx:
1428
+ team.name = team_name_from_ctx
1429
+ elif team_dir and os.path.basename(team_dir) != 'npc_team':
1430
+ team.name = os.path.basename(team_dir)
1431
+ else:
1432
+ team.name = "global_team" # fallback for ~/.npcsh/npc_team
1433
+
1376
1434
  return command_history, team, forenpc_obj
1377
1435
 
1436
+ # In your main npcsh.py file
1437
+
1378
1438
  def process_result(
1379
1439
  user_input: str,
1380
1440
  result_state: ShellState,
1381
1441
  output: Any,
1382
- command_history: CommandHistory):
1442
+ command_history: CommandHistory
1443
+ ):
1444
+ # --- Part 1: Save Conversation & Determine Output ---
1445
+
1446
+ # Define team and NPC names early for consistent logging
1447
+ team_name = result_state.team.name if result_state.team else "__none__"
1448
+ npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "__none__"
1449
+
1450
+ # Determine the actual NPC object to use for this turn's operations
1451
+ active_npc = result_state.npc if isinstance(result_state.npc, NPC) else NPC(
1452
+ name="default",
1453
+ model=result_state.chat_model,
1454
+ provider=result_state.chat_provider
1455
+ )
1383
1456
 
1384
- npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else result_state.npc
1385
- team_name = result_state.team.name if isinstance(result_state.team, Team) else result_state.team
1386
1457
  save_conversation_message(
1387
1458
  command_history,
1388
1459
  result_state.conversation_id,
1389
1460
  "user",
1390
1461
  user_input,
1391
1462
  wd=result_state.current_path,
1392
- model=result_state.chat_model, # Log primary chat model? Or specific used one?
1393
- provider=result_state.chat_provider,
1463
+ model=active_npc.model,
1464
+ provider=active_npc.provider,
1394
1465
  npc=npc_name,
1395
1466
  team=team_name,
1396
1467
  attachments=result_state.attachments,
1397
1468
  )
1398
-
1399
- result_state.attachments = None # Clear attachments after logging user message
1469
+ result_state.attachments = None
1400
1470
 
1401
1471
  final_output_str = None
1402
- if user_input =='/help':
1403
- render_markdown(output)
1404
-
1405
- elif result_state.stream_output:
1406
-
1407
- if isinstance(output, dict):
1408
- output_gen = output.get('output')
1409
- model = output.get('model', result_state.chat_model)
1410
- provider = output.get('provider', result_state.chat_provider)
1411
- else:
1412
- output_gen = output
1413
- model = result_state.chat_model
1414
- provider = result_state.chat_provider
1415
- print('processing stream output with markdown...')
1416
-
1417
- final_output_str = print_and_process_stream_with_markdown(output_gen,
1418
- model,
1419
- provider)
1420
-
1421
- elif output is not None:
1422
- final_output_str = str(output)
1423
- render_markdown( final_output_str)
1424
- if final_output_str and result_state.messages and result_state.messages[-1].get("role") != "assistant":
1425
- result_state.messages.append({"role": "assistant", "content": final_output_str})
1426
-
1427
- #print(result_state.messages)
1428
-
1429
-
1472
+ output_content = output.get('output') if isinstance(output, dict) else output
1473
+
1474
+ if result_state.stream_output and isgenerator(output_content):
1475
+ final_output_str = print_and_process_stream_with_markdown(output_content, active_npc.model, active_npc.provider)
1476
+ elif output_content is not None:
1477
+ final_output_str = str(output_content)
1478
+ render_markdown(final_output_str)
1430
1479
 
1480
+ # --- Part 2: Process Output and Evolve Knowledge ---
1431
1481
  if final_output_str:
1482
+ # Append assistant message to state for context continuity
1483
+ if result_state.messages and (not result_state.messages or result_state.messages[-1].get("role") != "assistant"):
1484
+ result_state.messages.append({"role": "assistant", "content": final_output_str})
1485
+
1486
+ # Save assistant message to the database
1432
1487
  save_conversation_message(
1433
1488
  command_history,
1434
1489
  result_state.conversation_id,
1435
1490
  "assistant",
1436
1491
  final_output_str,
1437
1492
  wd=result_state.current_path,
1438
- model=result_state.chat_model,
1439
- provider=result_state.chat_provider,
1493
+ model=active_npc.model,
1494
+ provider=active_npc.provider,
1440
1495
  npc=npc_name,
1441
1496
  team=team_name,
1442
1497
  )
1443
1498
 
1499
+ # --- Hierarchical Knowledge Graph Evolution ---
1500
+ conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
1501
+ conn = command_history.conn
1502
+
1503
+ try:
1504
+
1505
+ npc_kg = load_kg_from_db(conn, team_name, npc_name, "__npc_global__")
1506
+ evolved_npc_kg, _ = kg_evolve_incremental(
1507
+ existing_kg=npc_kg, new_content_text=conversation_turn_text,
1508
+ model=active_npc.model, provider=active_npc.provider
1509
+ )
1510
+ save_kg_to_db(conn, evolved_npc_kg, team_name, npc_name, result_state.current_path)
1511
+ except Exception as e:
1512
+ print(colored(f"Error during real-time KG evolution: {e}", "red"))
1513
+
1514
+ # --- Part 3: Periodic Team Context Suggestions ---
1515
+ result_state.turn_count += 1
1516
+ if result_state.turn_count > 0 and result_state.turn_count % 10 == 0:
1517
+ print(colored("\nChecking for potential team improvements...", "cyan"))
1518
+ try:
1519
+ summary = breathe(messages=result_state.messages[-20:], npc=active_npc)
1520
+ key_facts = summary.get('output', {}).get('facts', [])
1521
+
1522
+ if key_facts and result_state.team:
1523
+ team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
1524
+ ctx_data = {}
1525
+ if os.path.exists(team_ctx_path):
1526
+ with open(team_ctx_path, 'r') as f:
1527
+ ctx_data = yaml.safe_load(f) or {}
1528
+ current_context = ctx_data.get('context', '')
1529
+
1530
+ prompt = f"""Based on these key topics: {key_facts},
1531
+ suggest changes (additions, deletions, edits) to the team's context.
1532
+ Additions need not be fully formed sentences and can simply be equations, relationships, or other plain clear items.
1533
+
1534
+ Current Context: "{current_context}".
1535
+
1536
+ Respond with JSON: {{"suggestion": "Your sentence."}}"""
1537
+ response = get_llm_response(prompt, npc=active_npc, format="json")
1538
+ suggestion = response.get("response", {}).get("suggestion")
1539
+
1540
+ if suggestion:
1541
+ new_context = (current_context + " " + suggestion).strip()
1542
+ print(colored("AI suggests updating team context:", "yellow"))
1543
+ print(f" - OLD: {current_context}\n + NEW: {new_context}")
1544
+ if input("Apply? [y/N]: ").strip().lower() == 'y':
1545
+ ctx_data['context'] = new_context
1546
+ with open(team_ctx_path, 'w') as f:
1547
+ yaml.dump(ctx_data, f)
1548
+ print(colored("Team context updated.", "green"))
1549
+ else:
1550
+ print("Suggestion declined.")
1551
+ except Exception as e:
1552
+ import traceback
1553
+ print(colored(f"Could not generate team suggestions: {e}", "yellow"))
1554
+ traceback.print_exc()
1555
+
1556
+
1557
+
1444
1558
  def run_repl(command_history: CommandHistory, initial_state: ShellState):
1445
1559
  state = initial_state
1446
1560
  print_welcome_message()
@@ -1457,23 +1571,63 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1457
1571
  readline.set_completer(completer)
1458
1572
  except:
1459
1573
  pass
1574
+ session_scopes = set()
1460
1575
 
1461
- def exit_shell(state):
1576
+
1577
+ def exit_shell(current_state: ShellState):
1578
+ """
1579
+ On exit, iterates through all active scopes from the session and
1580
+ creates/updates the specific knowledge graph for each one.
1581
+ """
1462
1582
  print("\nGoodbye!")
1463
- # update the team ctx file to update the context and the preferences
1583
+ print(colored("Processing and archiving all session knowledge...", "cyan"))
1584
+
1585
+ conn = command_history.conn
1586
+ integrator_npc = NPC(name="integrator", model=current_state.chat_model, provider=current_state.chat_provider)
1464
1587
 
1588
+ # Process each unique scope that was active during the session
1589
+ for team_name, npc_name, path in session_scopes:
1590
+ try:
1591
+ print(f" -> Archiving knowledge for: T='{team_name}', N='{npc_name}', P='{path}'")
1592
+
1593
+ # Get all messages for the current conversation that happened in this specific path
1594
+ convo_id = current_state.conversation_id
1595
+ all_messages = command_history.get_conversations_by_id(convo_id)
1596
+
1597
+ scope_messages = [
1598
+ m for m in all_messages
1599
+ if m.get('directory_path') == path and m.get('team') == team_name and m.get('npc') == npc_name
1600
+ ]
1601
+
1602
+ full_text = "\n".join([f"{m['role']}: {m['content']}" for m in scope_messages if m.get('content')])
1465
1603
 
1604
+ if not full_text.strip():
1605
+ print(" ...No content for this scope, skipping.")
1606
+ continue
1466
1607
 
1608
+ # Load the existing KG for this specific, real scope
1609
+ current_kg = load_kg_from_db(conn, team_name, npc_name, path)
1610
+
1611
+ # Evolve it with the full text from the session for this scope
1612
+ evolved_kg, _ = kg_evolve_incremental(
1613
+ existing_kg=current_kg,
1614
+ new_content_text=full_text,
1615
+ model=integrator_npc.model,
1616
+ provider=integrator_npc.provider
1617
+ )
1618
+
1619
+ # Save the updated KG back to the database under the same exact scope
1620
+ save_kg_to_db(conn, evolved_kg, team_name, npc_name, path)
1467
1621
 
1622
+ except Exception as e:
1623
+ import traceback
1624
+ print(colored(f"Failed to process KG for scope ({team_name}, {npc_name}, {path}): {e}", "red"))
1625
+ traceback.print_exc()
1468
1626
 
1469
- #print('beginning knowledge consolidation')
1470
- #try:
1471
- # breathe_result = breathe(state.messages, state.chat_model, state.chat_provider, state.npc)
1472
- # print(breathe_result)
1473
- #except KeyboardInterrupt:
1474
- # print("Knowledge consolidation interrupted. Exiting immediately.")
1475
1627
  sys.exit(0)
1476
1628
 
1629
+
1630
+
1477
1631
  while True:
1478
1632
  try:
1479
1633
  try:
@@ -1482,17 +1636,21 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1482
1636
  except:
1483
1637
  pass
1484
1638
 
1639
+ display_model = state.chat_model
1640
+ if isinstance(state.npc, NPC) and state.npc.model:
1641
+ display_model = state.npc.model
1642
+
1485
1643
  if is_windows:
1486
1644
  cwd_part = os.path.basename(state.current_path)
1487
1645
  if isinstance(state.npc, NPC):
1488
- prompt_end = f":{state.npc.name}> "
1646
+ prompt_end = f":{state.npc.name}:{display_model}> "
1489
1647
  else:
1490
1648
  prompt_end = ":npcsh> "
1491
1649
  prompt = f"{cwd_part}{prompt_end}"
1492
1650
  else:
1493
1651
  cwd_colored = colored(os.path.basename(state.current_path), "blue")
1494
1652
  if isinstance(state.npc, NPC):
1495
- prompt_end = f":🤖{orange(state.npc.name)}:{state.chat_model}> "
1653
+ prompt_end = f":🤖{orange(state.npc.name)}:{display_model}> "
1496
1654
  else:
1497
1655
  prompt_end = f":🤖{colored('npc', 'blue', attrs=['bold'])}{colored('sh', 'yellow')}> "
1498
1656
  prompt = readline_safe_prompt(f"{cwd_colored}{prompt_end}")
@@ -1512,11 +1670,13 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1512
1670
  continue
1513
1671
  else:
1514
1672
  exit_shell(state)
1673
+ team_name = state.team.name if state.team else "__none__"
1674
+ npc_name = state.npc.name if isinstance(state.npc, NPC) else "__none__"
1675
+ session_scopes.add((team_name, npc_name, state.current_path))
1515
1676
 
1516
- state.current_path = os.getcwd()
1517
1677
  state, output = execute_command(user_input, state)
1518
1678
  process_result(user_input, state, output, command_history)
1519
-
1679
+
1520
1680
  except KeyboardInterrupt:
1521
1681
  if is_windows:
1522
1682
  # On Windows, Ctrl+C cancels the current input line, show prompt again
@@ -1528,8 +1688,6 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1528
1688
  except EOFError:
1529
1689
  # Ctrl+D: exit shell cleanly
1530
1690
  exit_shell(state)
1531
-
1532
-
1533
1691
  def main() -> None:
1534
1692
  parser = argparse.ArgumentParser(description="npcsh - An NPC-powered shell.")
1535
1693
  parser.add_argument(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.11
3
+ Version: 1.0.12
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -1,21 +1,21 @@
1
1
  npcsh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- npcsh/_state.py,sha256=GCMUIwgIBlS7LEBLYlfBiPNKVaK19ZyxT833NFU-djU,31109
2
+ npcsh/_state.py,sha256=c8KhDE16SGJiz5eR7t3zlQdrkTV24b-xIf5-23e7Stk,31132
3
3
  npcsh/alicanto.py,sha256=F-zZGjBTo3a_PQHvPC8-DNF6t4mJELo_zx7gBGvDehg,44611
4
4
  npcsh/guac.py,sha256=Ocmk_c4NUtGsC3JOtmkbgLvD6u-XtBPRFRYcckpgUJU,33099
5
5
  npcsh/mcp_helpers.py,sha256=Ktd2yXuBnLL2P7OMalgGLj84PXJSzaucjqmJVvWx6HA,12723
6
6
  npcsh/mcp_npcsh.py,sha256=SfmplH62GS9iI6q4vuQLVUS6tkrok6L7JxODx_iH7ps,36158
7
7
  npcsh/mcp_server.py,sha256=l2Ra0lpFrUu334pvp0Q9ajF2n73KvZswFi0FgbDhh9k,5884
8
8
  npcsh/npc.py,sha256=7ujKrMQFgkeGJ4sX5Kn_dB5tjrPN58xeC91PNt453aM,7827
9
- npcsh/npcsh.py,sha256=_vpphATMKlM2FtUZR3RrwKoRh-eg9QiAp7axIIDAITg,59986
9
+ npcsh/npcsh.py,sha256=_43iSHmq13Td_n0Yr4LOZLOI1ZUpEXX2a0fPWuhldDc,67452
10
10
  npcsh/plonk.py,sha256=U2e9yUJZN95Girzzvgrh-40zOdl5zO3AHPsIjoyLv2M,15261
11
11
  npcsh/pti.py,sha256=jGHGE5SeIcDkV8WlOEHCKQCnYAL4IPS-kUBHrUz0oDA,10019
12
12
  npcsh/routes.py,sha256=5u23bFTbdXXJ3V7I8BJMq42wWUZFeMbzItwBf8WHlpY,36917
13
13
  npcsh/spool.py,sha256=GhnSFX9uAtrB4m_ijuyA5tufH12DrWdABw0z8FmiCHc,11497
14
14
  npcsh/wander.py,sha256=BiN6eYyFnEsFzo8MFLRkdZ8xS9sTKkQpjiCcy9chMcc,23225
15
15
  npcsh/yap.py,sha256=h5KNt9sNOrDPhGe_zfn_yFIeQhizX09zocjcPWH7m3k,20905
16
- npcsh-1.0.11.dist-info/licenses/LICENSE,sha256=IKBvAECHP-aCiJtE4cHGCE5Yl0tozYz02PomGeWS3y4,1070
17
- npcsh-1.0.11.dist-info/METADATA,sha256=kv7abgprmCXu31NjmmaYZhfLz_x1zIVlzUZAIb1RliI,22748
18
- npcsh-1.0.11.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
- npcsh-1.0.11.dist-info/entry_points.txt,sha256=qxOYTm3ym3JWyWf2nv2Mk71uMcJIdUoNEJ8VYMkyHiY,214
20
- npcsh-1.0.11.dist-info/top_level.txt,sha256=kHSNgKMCkfjV95-DH0YSp1LLBi0HXdF3w57j7MQON3E,6
21
- npcsh-1.0.11.dist-info/RECORD,,
16
+ npcsh-1.0.12.dist-info/licenses/LICENSE,sha256=IKBvAECHP-aCiJtE4cHGCE5Yl0tozYz02PomGeWS3y4,1070
17
+ npcsh-1.0.12.dist-info/METADATA,sha256=BolEHapVe72ZJAaF9kiKavX0eNX7ZP4PISBYSQDhLpY,22748
18
+ npcsh-1.0.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ npcsh-1.0.12.dist-info/entry_points.txt,sha256=qxOYTm3ym3JWyWf2nv2Mk71uMcJIdUoNEJ8VYMkyHiY,214
20
+ npcsh-1.0.12.dist-info/top_level.txt,sha256=kHSNgKMCkfjV95-DH0YSp1LLBi0HXdF3w57j7MQON3E,6
21
+ npcsh-1.0.12.dist-info/RECORD,,
File without changes