npcsh 1.0.14__py3-none-any.whl → 1.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcsh/npc.py CHANGED
@@ -1,7 +1,6 @@
1
1
  import argparse
2
2
  import sys
3
3
  import os
4
- import sqlite3
5
4
  import traceback
6
5
  from typing import Optional
7
6
 
@@ -11,7 +10,8 @@ from npcsh._state import (
11
10
  NPCSH_API_URL,
12
11
  NPCSH_DB_PATH,
13
12
  NPCSH_STREAM_OUTPUT,
14
- )
13
+ initial_state,
14
+ )
15
15
  from npcpy.npc_sysenv import (
16
16
  print_and_process_stream_with_markdown,
17
17
  render_markdown,
@@ -19,8 +19,18 @@ from npcpy.npc_sysenv import (
19
19
  from npcpy.npc_compiler import NPC, Team
20
20
  from npcsh.routes import router
21
21
  from npcpy.llm_funcs import check_llm_command
22
+ from sqlalchemy import create_engine
23
+
24
+ # Import the key functions from npcsh
25
+ from npcsh.npcsh import (
26
+ setup_shell,
27
+ execute_slash_command,
28
+ execute_command,
29
+ process_pipeline_command,
30
+ )
22
31
 
23
32
  def load_npc_by_name(npc_name: str = "sibiji", db_path: str = NPCSH_DB_PATH) -> Optional[NPC]:
33
+ """Load NPC by name, with fallback logic matching npcsh"""
24
34
  if not npc_name:
25
35
  npc_name = "sibiji"
26
36
 
@@ -37,7 +47,7 @@ def load_npc_by_name(npc_name: str = "sibiji", db_path: str = NPCSH_DB_PATH) ->
37
47
 
38
48
  if chosen_path:
39
49
  try:
40
- db_conn = sqlite3.connect(db_path)
50
+ db_conn = create_engine(f'sqlite:///{NPCSH_DB_PATH}')
41
51
  npc = NPC(file=chosen_path, db_conn=db_conn)
42
52
  return npc
43
53
  except Exception as e:
@@ -64,34 +74,38 @@ def main():
64
74
  "-n", "--npc", help="Name of the NPC to use (default: sibiji)", type=str, default="sibiji"
65
75
  )
66
76
 
67
- # No subparsers setup at first - we'll conditionally create them
68
-
69
- # First, get any arguments without parsing commands
77
+ # Parse arguments
70
78
  args, all_args = parser.parse_known_args()
71
79
  global_model = args.model
72
80
  global_provider = args.provider
73
81
 
74
- # Check if the first argument is a known command
75
82
  is_valid_command = False
76
83
  command_name = None
77
- if all_args and all_args[0] in router.get_commands():
78
- is_valid_command = True
79
- command_name = all_args[0]
80
- all_args = all_args[1:] # Remove the command from arguments
84
+
85
+ if all_args:
86
+ first_arg = all_args[0]
87
+ if first_arg.startswith('/'):
88
+ is_valid_command = True
89
+ command_name = first_arg
90
+ all_args = all_args[1:]
91
+ elif first_arg in router.get_commands():
92
+ is_valid_command = True
93
+ command_name = '/' + first_arg
94
+ all_args = all_args[1:]
95
+
96
+
81
97
 
82
- # Only set up subparsers if we have a valid command
83
98
  if is_valid_command:
84
99
  subparsers = parser.add_subparsers(dest="command", title="Available Commands",
85
100
  help="Run 'npc <command> --help' for command-specific help")
86
101
 
87
102
  for cmd_name, help_text in router.help_info.items():
88
-
89
103
  cmd_parser = subparsers.add_parser(cmd_name, help=help_text, add_help=False)
90
104
  cmd_parser.add_argument('command_args', nargs=argparse.REMAINDER,
91
105
  help='Arguments passed directly to the command handler')
92
106
 
93
107
  # Re-parse with command subparsers
94
- args = parser.parse_args([command_name] + all_args)
108
+ args = parser.parse_args([command_name.lstrip('/')] + all_args)
95
109
  command_args = args.command_args if hasattr(args, 'command_args') else []
96
110
  unknown_args = []
97
111
  else:
@@ -104,67 +118,86 @@ def main():
104
118
  args.model = global_model
105
119
  if args.provider is None:
106
120
  args.provider = global_provider
107
- # --- END OF FIX ---
108
- npc_instance = load_npc_by_name(args.npc, NPCSH_DB_PATH)
109
-
110
- effective_model = args.model or NPCSH_CHAT_MODEL
111
- effective_provider = args.provider or NPCSH_CHAT_PROVIDER
112
-
113
121
 
122
+ # Use npcsh's setup_shell to get proper team and NPC setup
123
+ try:
124
+ command_history, team, forenpc_obj = setup_shell()
125
+ except Exception as e:
126
+ print(f"Warning: Could not set up full npcsh environment: {e}", file=sys.stderr)
127
+ print("Falling back to basic NPC loading...", file=sys.stderr)
128
+ team = None
129
+ forenpc_obj = load_npc_by_name(args.npc, NPCSH_DB_PATH)
130
+
131
+ # Determine which NPC to use
132
+ npc_instance = None
133
+ if team and args.npc in team.npcs:
134
+ npc_instance = team.npcs[args.npc]
135
+ elif team and args.npc == team.forenpc.name if team.forenpc else False:
136
+ npc_instance = team.forenpc
137
+ else:
138
+ npc_instance = load_npc_by_name(args.npc, NPCSH_DB_PATH)
114
139
 
115
- extras = {}
140
+ if not npc_instance:
141
+ print(f"Error: Could not load NPC '{args.npc}'", file=sys.stderr)
142
+ sys.exit(1)
116
143
 
117
- # Process command args if we have a valid command
118
- if is_valid_command:
119
- # Parse command args properly
120
- if command_args:
121
- i = 0
122
- while i < len(command_args):
123
- arg = command_args[i]
124
- if arg.startswith("--"):
125
- param = arg[2:] # Remove --
126
- if "=" in param:
127
- param_name, param_value = param.split("=", 1)
128
- extras[param_name] = param_value
129
- i += 1
130
- elif i + 1 < len(command_args) and not command_args[i+1].startswith("--"):
131
- extras[param] = command_args[i+1]
132
- i += 2
133
- else:
134
- extras[param] = True
135
- i += 1
136
- else:
137
- i += 1
138
-
139
- handler = router.get_route(command_name)
140
- if not handler:
141
- print(f"Error: Command '{command_name}' recognized but no handler found.", file=sys.stderr)
142
- sys.exit(1)
143
-
144
- full_command_str = command_name
145
- if command_args:
146
- full_command_str += " " + " ".join(command_args)
144
+ # Now check for jinxs if we haven't identified a command yet
145
+ if not is_valid_command and all_args:
146
+ first_arg = all_args[0]
147
147
 
148
- handler_kwargs = {
149
- "model": effective_model,
150
- "provider": effective_provider,
151
- "npc": npc_instance,
152
- "api_url": NPCSH_API_URL,
153
- "stream": NPCSH_STREAM_OUTPUT,
154
- "messages": [],
155
- "team": None,
156
- "current_path": os.getcwd(),
157
- **extras
158
- }
159
-
160
- try:
161
- result = handler(command=full_command_str, **handler_kwargs)
148
+ # Check if first argument is a jinx name
149
+ jinx_found = False
150
+ if team and first_arg in team.jinxs_dict:
151
+ jinx_found = True
152
+ elif isinstance(npc_instance, NPC) and hasattr(npc_instance, 'jinxs_dict') and first_arg in npc_instance.jinxs_dict:
153
+ jinx_found = True
154
+
155
+ if jinx_found:
156
+ is_valid_command = True
157
+ command_name = '/' + first_arg
158
+ all_args = all_args[1:]
159
+
160
+ # Create a shell state object similar to npcsh
161
+ shell_state = initial_state
162
+ shell_state.npc = npc_instance
163
+ shell_state.team = team
164
+ shell_state.current_path = os.getcwd()
165
+ shell_state.stream_output = NPCSH_STREAM_OUTPUT
166
+
167
+ # Override model/provider if specified
168
+ effective_model = args.model or (npc_instance.model if npc_instance.model else NPCSH_CHAT_MODEL)
169
+ effective_provider = args.provider or (npc_instance.provider if npc_instance.provider else NPCSH_CHAT_PROVIDER)
170
+
171
+ # Update the NPC's model/provider for this session if overridden
172
+ if args.model:
173
+ npc_instance.model = effective_model
174
+ if args.provider:
175
+ npc_instance.provider = effective_provider
176
+
177
+ try:
178
+ if is_valid_command:
179
+ # Handle slash command using npcsh's execute_slash_command
180
+ full_command_str = command_name
181
+ if command_args:
182
+ full_command_str += " " + " ".join(command_args)
183
+
184
+ print(f"Executing command: {full_command_str}")
185
+
186
+ updated_state, result = execute_slash_command(
187
+ full_command_str,
188
+ stdin_input=None,
189
+ state=shell_state,
190
+ stream=NPCSH_STREAM_OUTPUT
191
+ )
162
192
 
193
+ # Process and display the result
163
194
  if isinstance(result, dict):
164
195
  output = result.get("output") or result.get("response")
196
+ model_for_stream = result.get('model', effective_model)
197
+ provider_for_stream = result.get('provider', effective_provider)
165
198
 
166
199
  if NPCSH_STREAM_OUTPUT and not isinstance(output, str):
167
- print_and_process_stream_with_markdown(output, effective_model, effective_provider)
200
+ print_and_process_stream_with_markdown(output, model_for_stream, provider_for_stream)
168
201
  elif output is not None:
169
202
  render_markdown(str(output))
170
203
  elif result is not None:
@@ -172,45 +205,38 @@ def main():
172
205
  else:
173
206
  print(f"Command '{command_name}' executed.")
174
207
 
175
- except Exception as e:
176
- print(f"Error executing command '{command_name}': {e}", file=sys.stderr)
177
- traceback.print_exc()
178
- sys.exit(1)
179
- else:
180
- # Process as a prompt
181
- prompt = " ".join(unknown_args)
208
+ else:
209
+ # Process as a regular prompt using npcsh's execution logic
210
+ prompt = " ".join(unknown_args)
182
211
 
183
- if not prompt:
184
- # If no prompt and no command, show help
185
- parser.print_help()
186
- sys.exit(1)
212
+ if not prompt:
213
+ # If no prompt and no command, show help
214
+ parser.print_help()
215
+ sys.exit(1)
187
216
 
188
- print(f"Processing prompt: '{prompt}' with NPC: '{args.npc}'...")
189
- try:
190
- response_data = check_llm_command(
191
- command=prompt,
192
- model=effective_model,
193
- provider=effective_provider,
194
- npc=npc_instance,
195
- stream=NPCSH_STREAM_OUTPUT,
196
- messages=[],
197
- team=None,
198
- api_url=NPCSH_API_URL,
199
- )
217
+ print(f"Processing prompt: '{prompt}' with NPC: '{args.npc}'...")
218
+
219
+ # Use npcsh's execute_command but force it to chat mode for simple prompts
220
+ shell_state.current_mode = 'chat'
221
+ updated_state, result = execute_command(prompt, shell_state)
200
222
 
201
- if isinstance(response_data, dict):
202
- output = response_data.get("output")
223
+ # Process and display the result
224
+ if isinstance(result, dict):
225
+ output = result.get("output")
226
+ model_for_stream = result.get('model', effective_model)
227
+ provider_for_stream = result.get('provider', effective_provider)
228
+
203
229
  if NPCSH_STREAM_OUTPUT and hasattr(output, '__iter__') and not isinstance(output, (str, bytes, dict, list)):
204
- print_and_process_stream_with_markdown(output, effective_model, effective_provider)
230
+ print_and_process_stream_with_markdown(output, model_for_stream, provider_for_stream)
205
231
  elif output is not None:
206
232
  render_markdown(str(output))
207
- elif response_data is not None:
208
- render_markdown(str(response_data))
233
+ elif result is not None:
234
+ render_markdown(str(result))
209
235
 
210
- except Exception as e:
211
- print(f"Error processing prompt: {e}", file=sys.stderr)
212
- traceback.print_exc()
213
- sys.exit(1)
236
+ except Exception as e:
237
+ print(f"Error executing command: {e}", file=sys.stderr)
238
+ traceback.print_exc()
239
+ sys.exit(1)
214
240
 
215
241
  if __name__ == "__main__":
216
242
  main()
npcsh/npcsh.py CHANGED
@@ -664,11 +664,12 @@ def should_skip_kg_processing(user_input: str, assistant_output: str) -> bool:
664
664
  return False
665
665
 
666
666
 
667
-
668
667
  def execute_slash_command(command: str, stdin_input: Optional[str], state: ShellState, stream: bool) -> Tuple[ShellState, Any]:
669
668
  """Executes slash commands using the router or checking NPC/Team jinxs."""
670
669
  all_command_parts = shlex.split(command)
671
670
  command_name = all_command_parts[0].lstrip('/')
671
+
672
+ # Handle NPC switching commands
672
673
  if command_name in ['n', 'npc']:
673
674
  npc_to_switch_to = all_command_parts[1] if len(all_command_parts) > 1 else None
674
675
  if npc_to_switch_to and state.team and npc_to_switch_to in state.team.npcs:
@@ -677,10 +678,11 @@ def execute_slash_command(command: str, stdin_input: Optional[str], state: Shell
677
678
  else:
678
679
  available_npcs = list(state.team.npcs.keys()) if state.team else []
679
680
  return state, colored(f"NPC '{npc_to_switch_to}' not found. Available NPCs: {', '.join(available_npcs)}", "red")
681
+
682
+ # Check router commands first
680
683
  handler = router.get_route(command_name)
681
684
  if handler:
682
685
  parsed_flags, positional_args = parse_generic_command_flags(all_command_parts[1:])
683
-
684
686
  normalized_flags = normalize_and_expand_flags(parsed_flags)
685
687
 
686
688
  handler_kwargs = {
@@ -705,38 +707,37 @@ def execute_slash_command(command: str, stdin_input: Optional[str], state: Shell
705
707
  'igmodel': state.image_gen_model,
706
708
  'igprovider': state.image_gen_provider,
707
709
  'vgmodel': state.video_gen_model,
708
- 'vgprovider':state.video_gen_provider,
710
+ 'vgprovider': state.video_gen_provider,
709
711
  'vmodel': state.vision_model,
710
712
  'vprovider': state.vision_provider,
711
713
  'rmodel': state.reasoning_model,
712
714
  'rprovider': state.reasoning_provider,
713
715
  }
714
716
 
715
- if len(normalized_flags)>0:
717
+ if len(normalized_flags) > 0:
716
718
  kwarg_part = 'with kwargs: \n -' + '\n -'.join(f'{key}={item}' for key, item in normalized_flags.items())
717
719
  else:
718
720
  kwarg_part = ''
719
721
 
720
- # 4. Merge the clean, normalized flags. This will correctly overwrite defaults.
721
722
  render_markdown(f'- Calling {command_name} handler {kwarg_part} ')
723
+
724
+ # Handle model/provider inference
722
725
  if 'model' in normalized_flags and 'provider' not in normalized_flags:
723
- # Call your existing, centralized lookup_provider function
724
726
  inferred_provider = lookup_provider(normalized_flags['model'])
725
727
  if inferred_provider:
726
- # Update the provider that will be used for this command.
727
728
  handler_kwargs['provider'] = inferred_provider
728
729
  print(colored(f"Info: Inferred provider '{inferred_provider}' for model '{normalized_flags['model']}'.", "cyan"))
730
+
729
731
  if 'provider' in normalized_flags and 'model' not in normalized_flags:
730
- # loop up mhandler_kwargs model's provider
731
732
  current_provider = lookup_provider(handler_kwargs['model'])
732
733
  if current_provider != normalized_flags['provider']:
733
- print(f'Please specify a model for the provider: {normalized_flags['provider']}')
734
- handler_kwargs.update(normalized_flags)
734
+ prov = normalized_flags['provider']
735
+ print(f'Please specify a model for the provider: {prov}')
735
736
 
737
+ handler_kwargs.update(normalized_flags)
736
738
 
737
739
  try:
738
740
  result_dict = handler(command=command, **handler_kwargs)
739
- # add the output model and provider for the print_and_process_stream downstream processing
740
741
  if isinstance(result_dict, dict):
741
742
  state.messages = result_dict.get("messages", state.messages)
742
743
  return state, result_dict
@@ -747,39 +748,64 @@ def execute_slash_command(command: str, stdin_input: Optional[str], state: Shell
747
748
  print(f"Error executing slash command '{command_name}':", file=sys.stderr)
748
749
  traceback.print_exc()
749
750
  return state, colored(f"Error executing slash command '{command_name}': {e}", "red")
751
+
752
+ # Check for jinxs in active NPC
750
753
  active_npc = state.npc if isinstance(state.npc, NPC) else None
751
754
  jinx_to_execute = None
752
755
  executor = None
753
- if active_npc and command_name in active_npc.jinxs_dict:
756
+
757
+ if active_npc and hasattr(active_npc, 'jinxs_dict') and command_name in active_npc.jinxs_dict:
754
758
  jinx_to_execute = active_npc.jinxs_dict[command_name]
755
759
  executor = active_npc
756
- elif state.team and command_name in state.team.jinxs_dict:
760
+ elif state.team and hasattr(state.team, 'jinxs_dict') and command_name in state.team.jinxs_dict:
757
761
  jinx_to_execute = state.team.jinxs_dict[command_name]
758
762
  executor = state.team
759
-
760
763
  if jinx_to_execute:
761
- args = command_parts[1:]
764
+ args = all_command_parts[1:] # Fix: use all_command_parts instead of command_parts
762
765
  try:
763
- jinx_output = jinx_to_execute.run(
764
- *args,
765
- state=state,
766
- stdin_input=stdin_input,
767
- messages=state.messages # Pass messages explicitly if needed
768
- )
769
- return state, jinx_output
766
+ # Create input dictionary from args based on jinx inputs
767
+ input_values = {}
768
+ if hasattr(jinx_to_execute, 'inputs') and jinx_to_execute.inputs:
769
+ for i, input_name in enumerate(jinx_to_execute.inputs):
770
+ if i < len(args):
771
+ input_values[input_name] = args[i]
772
+
773
+ # Execute the jinx with proper parameters
774
+ if isinstance(executor, NPC):
775
+ jinx_output = jinx_to_execute.execute(
776
+ input_values=input_values,
777
+ jinxs_dict=executor.jinxs_dict if hasattr(executor, 'jinxs_dict') else {},
778
+ npc=executor,
779
+ messages=state.messages
780
+ )
781
+ else: # Team executor
782
+ jinx_output = jinx_to_execute.execute(
783
+ input_values=input_values,
784
+ jinxs_dict=executor.jinxs_dict if hasattr(executor, 'jinxs_dict') else {},
785
+ npc=active_npc or state.npc,
786
+ messages=state.messages
787
+ )
788
+ if isinstance(jinx_output, dict) and 'messages' in jinx_output:
789
+ state.messages = jinx_output['messages']
790
+ return state, str(jinx_output.get('output', jinx_output))
791
+ elif isinstance(jinx_output, dict):
792
+ return state, str(jinx_output.get('output', jinx_output))
793
+ else:
794
+ return state, jinx_output
795
+
770
796
  except Exception as e:
771
797
  import traceback
772
798
  print(f"Error executing jinx '{command_name}':", file=sys.stderr)
773
799
  traceback.print_exc()
774
800
  return state, colored(f"Error executing jinx '{command_name}': {e}", "red")
775
801
 
802
+ # Check if it's an NPC name for switching
776
803
  if state.team and command_name in state.team.npcs:
777
804
  new_npc = state.team.npcs[command_name]
778
- state.npc = new_npc # Update state directly
805
+ state.npc = new_npc
779
806
  return state, f"Switched to NPC: {new_npc.name}"
780
807
 
781
- return state, colored(f"Unknown slash command or jinx: {command_name}", "red")
782
-
808
+ return state, colored(f"Unknown slash command, jinx, or NPC: {command_name}", "red")
783
809
 
784
810
  def process_pipeline_command(
785
811
  cmd_segment: str,
@@ -893,7 +919,8 @@ def execute_command(
893
919
  active_provider = npc_provider or state.chat_provider
894
920
 
895
921
  if state.current_mode == 'agent':
896
- print(len(commands), commands)
922
+ print('# of parsed commands: ', len(commands))
923
+ print('Commands:' '\n'.join(commands))
897
924
  for i, cmd_segment in enumerate(commands):
898
925
 
899
926
  render_markdown(f'- executing command {i+1}/{len(commands)}')
@@ -970,7 +997,7 @@ def execute_command(
970
997
  else:
971
998
  try:
972
999
  bash_state, bash_output = handle_bash_command(cmd_parts, command, None, state)
973
- return bash_state, bash_output
1000
+ return state, bash_output
974
1001
  except Exception as bash_err:
975
1002
  return state, colored(f"Bash execution failed: {bash_err}", "red")
976
1003
  except Exception:
@@ -1170,7 +1197,8 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
1170
1197
 
1171
1198
  if os.path.exists(forenpc_path):
1172
1199
  forenpc_obj = NPC(file = forenpc_path,
1173
- jinxs=jinxs_list)
1200
+ jinxs=jinxs_list,
1201
+ db_conn=command_history.engine)
1174
1202
  if forenpc_obj.model is None:
1175
1203
  forenpc_obj.model= team_ctx.get("model", initial_state.chat_model)
1176
1204
  if forenpc_obj.provider is None:
@@ -1213,9 +1241,7 @@ def process_result(
1213
1241
  output: Any,
1214
1242
  command_history: CommandHistory
1215
1243
  ):
1216
- # --- Part 1: Save Conversation & Determine Output ---
1217
1244
 
1218
- # Define team and NPC names early for consistent logging
1219
1245
  team_name = result_state.team.name if result_state.team else "__none__"
1220
1246
  npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "__none__"
1221
1247
 
@@ -1223,9 +1249,8 @@ def process_result(
1223
1249
  active_npc = result_state.npc if isinstance(result_state.npc, NPC) else NPC(
1224
1250
  name="default",
1225
1251
  model=result_state.chat_model,
1226
- provider=result_state.chat_provider
1227
- )
1228
-
1252
+ provider=result_state.chat_provider,
1253
+ db_conn=command_history.engine)
1229
1254
  save_conversation_message(
1230
1255
  command_history,
1231
1256
  result_state.conversation_id,
@@ -1377,14 +1402,13 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1377
1402
  print(colored("Processing and archiving all session knowledge...", "cyan"))
1378
1403
 
1379
1404
  engine = command_history.engine
1380
- integrator_npc = NPC(name="integrator", model=current_state.chat_model, provider=current_state.chat_provider)
1381
1405
 
1382
- # Process each unique scope that was active during the session
1406
+
1383
1407
  for team_name, npc_name, path in session_scopes:
1384
1408
  try:
1385
1409
  print(f" -> Archiving knowledge for: T='{team_name}', N='{npc_name}', P='{path}'")
1386
1410
 
1387
- # Get all messages for the current conversation that happened in this specific path
1411
+
1388
1412
  convo_id = current_state.conversation_id
1389
1413
  all_messages = command_history.get_conversations_by_id(convo_id)
1390
1414
 
@@ -1399,15 +1423,16 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1399
1423
  print(" ...No content for this scope, skipping.")
1400
1424
  continue
1401
1425
 
1402
- # Load the existing KG for this specific, real scope
1426
+
1403
1427
  current_kg = load_kg_from_db(engine, team_name, npc_name, path)
1404
1428
 
1405
- # Evolve it with the full text from the session for this scope
1429
+
1406
1430
  evolved_kg, _ = kg_evolve_incremental(
1407
1431
  existing_kg=current_kg,
1408
1432
  new_content_text=full_text,
1409
- model=integrator_npc.model,
1410
- provider=integrator_npc.provider,
1433
+ model=current_state.npc.model,
1434
+ provider=current_state.npc.provider,
1435
+ npc= current_state.npc,
1411
1436
  get_concepts=True,
1412
1437
  link_concepts_facts = True,
1413
1438
  link_concepts_concepts = True,
@@ -1416,7 +1441,11 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1416
1441
  )
1417
1442
 
1418
1443
  # Save the updated KG back to the database under the same exact scope
1419
- save_kg_to_db(engine, evolved_kg, team_name, npc_name, path)
1444
+ save_kg_to_db(engine,
1445
+ evolved_kg,
1446
+ team_name,
1447
+ npc_name,
1448
+ path)
1420
1449
 
1421
1450
  except Exception as e:
1422
1451
  import traceback
@@ -1474,7 +1503,8 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
1474
1503
  session_scopes.add((team_name, npc_name, state.current_path))
1475
1504
 
1476
1505
  state, output = execute_command(user_input, state)
1477
- process_result(user_input, state,
1506
+ process_result(user_input,
1507
+ state,
1478
1508
  output,
1479
1509
  command_history)
1480
1510
 
npcsh/routes.py CHANGED
@@ -18,7 +18,6 @@ from npcpy.llm_funcs import (
18
18
  gen_video,
19
19
  breathe,
20
20
  )
21
- from npcpy.npc_compiler import NPC, Team, Jinx
22
21
  from npcpy.npc_compiler import initialize_npc_project
23
22
  from npcpy.npc_sysenv import render_markdown
24
23
  from npcpy.work.plan import execute_plan_command
@@ -29,8 +28,7 @@ from npcpy.memory.command_history import CommandHistory, load_kg_from_db, save_k
29
28
  from npcpy.serve import start_flask_server
30
29
  from npcpy.mix.debate import run_debate
31
30
  from npcpy.data.image import capture_screenshot
32
- from npcpy.npc_compiler import NPC, Team, Jinx
33
- from npcpy.npc_compiler import initialize_npc_project
31
+ from npcpy.npc_compiler import NPC, Team, Jinx,initialize_npc_project
34
32
  from npcpy.data.web import search_web
35
33
  from npcpy.memory.knowledge_graph import kg_sleep_process, kg_dream_process
36
34
 
@@ -446,13 +444,13 @@ def plan_handler(command: str, **kwargs):
446
444
  user_command = " ".join(command.split()[1:])
447
445
  if not user_command:
448
446
  return {"output": "Usage: /plan <description_of_plan>", "messages": messages}
449
- try:
450
- return execute_plan_command(command=user_command, **kwargs)
451
- except NameError:
452
- return {"output": "Plan function (execute_plan_command) not available.", "messages": messages}
453
- except Exception as e:
454
- traceback.print_exc()
455
- return {"output": f"Error executing plan: {e}", "messages": messages}
447
+ #try:
448
+ return execute_plan_command(command=user_command, **kwargs)
449
+
450
+ #return {"output": "Plan function (execute_plan_command) not available.", "messages": messages}
451
+ #except Exception as e:
452
+ # traceback.print_exc()
453
+ # return {"output": f"Error executing plan: {e}", "messages": messages}
456
454
 
457
455
  @router.route("pti", "Use pardon-the-interruption mode to interact with the LLM")
458
456
  def pti_handler(command: str, **kwargs):
@@ -500,17 +498,18 @@ def brainblast_handler(command: str, **kwargs):
500
498
  parts = shlex.split(command)
501
499
  search_query = " ".join(parts[1:]) if len(parts) > 1 else ""
502
500
 
503
-
504
501
  if not search_query:
505
502
  return {"output": "Usage: /brainblast <search_terms>", "messages": messages}
506
503
 
507
504
  # Get the command history instance
508
505
  command_history = kwargs.get('command_history')
509
506
  if not command_history:
507
+ #print('no command history provided to brainblast')
510
508
  # Create a new one if not provided
511
509
  db_path = safe_get(kwargs, "history_db_path", os.path.expanduser('~/npcsh_history.db'))
512
510
  try:
513
511
  command_history = CommandHistory(db_path)
512
+ kwargs['command_history'] = command_history
514
513
  except Exception as e:
515
514
  return {"output": f"Error connecting to command history: {e}", "messages": messages}
516
515
 
@@ -521,13 +520,8 @@ def brainblast_handler(command: str, **kwargs):
521
520
 
522
521
  # Execute the brainblast command
523
522
  return execute_brainblast_command(
524
- command=search_query,
525
- command_history=command_history,
526
- messages=messages,
527
- top_k=safe_get(kwargs, 'top_k', 5),
528
- **kwargs
529
- )
530
-
523
+ command=search_query,
524
+ **kwargs)
531
525
  except Exception as e:
532
526
  traceback.print_exc()
533
527
  return {"output": f"Error executing brainblast command: {e}", "messages": messages}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.14
3
+ Version: 1.0.16
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -16,6 +16,7 @@ Requires-Dist: litellm
16
16
  Requires-Dist: docx
17
17
  Requires-Dist: scipy
18
18
  Requires-Dist: numpy
19
+ Requires-Dist: thefuzz
19
20
  Requires-Dist: imagehash
20
21
  Requires-Dist: requests
21
22
  Requires-Dist: matplotlib
@@ -40,6 +41,7 @@ Requires-Dist: flask_cors
40
41
  Requires-Dist: redis
41
42
  Requires-Dist: psycopg2-binary
42
43
  Requires-Dist: flask_sse
44
+ Requires-Dist: wikipedia
43
45
  Provides-Extra: lite
44
46
  Requires-Dist: anthropic; extra == "lite"
45
47
  Requires-Dist: openai; extra == "lite"
@@ -150,11 +152,11 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
150
152
  ```
151
153
  <p align="center">
152
154
  <img src="https://raw.githubusercontent.com/npc-worldwide/npcsh/main/test_data/search_example.png" alt="example of search results", width=600>
153
- </p>
155
+ </p>
154
156
 
155
157
  - **Computer Use**
156
158
  ```bash
157
- /plonk -n 'npc_name' -sp 'task for plonk to carry out'
159
+ /plonk 'find out the latest news on cnn'
158
160
  ```
159
161
 
160
162
  - **Generate Image**
@@ -168,9 +170,10 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
168
170
  ```bash
169
171
  /roll 'generate a video of a hat riding a dog'
170
172
  ```
173
+ <!--
171
174
  <p align="center">
172
175
  <img src="https://raw.githubusercontent.com/npc-worldwide/npcsh/main/test_data/hat_video.mp4" alt="video of a hat riding a dog", width=250>
173
- </p>
176
+ </p> -->
174
177
 
175
178
  - **Serve an NPC Team**
176
179
  ```bash
@@ -196,7 +199,8 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
196
199
  - `/init` - Initialize NPC project
197
200
  - `/jinxs` - Show available jinxs for the current NPC/Team
198
201
  - `/ots` - Take screenshot and analyze with vision model
199
- - `/plan` - Execute a plan command\n\n/plonk - Use vision model to interact with GUI. Usage: /plonk <task description>
202
+ - `/plan` - Execute a plan command
203
+ - `/plonk` - Use vision model to interact with GUI. Usage: /plonk <task description>
200
204
  - `/pti` - Use pardon-the-interruption mode to interact with reasoning model LLM
201
205
  - `/rag` - Execute a RAG command using ChromaDB embeddings with optional file input (-f/--file)
202
206
  - `/roll` - generate a video with video generation model
@@ -211,13 +215,13 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
211
215
  - `/wander` - A method for LLMs to think on a problem by switching between states of high temperature and low temperature
212
216
  - `/yap` - Enter voice chat (yap) mode
213
217
 
214
- ## Common Command-Line Flags\n\nThe shortest unambiguous prefix works (e.g., `-t` for `--temperature`).
218
+ ## Common Command-Line Flags:
215
219
 
216
220
  ```
217
221
  Flag Shorthand | Flag Shorthand | Flag Shorthand | Flag Shorthand
218
222
  ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------
219
223
  --attachments (-a) | --height (-h) | --num_npcs (-num_n) | --team (-tea)
220
- --config_dir (-con) | --igmodel (-igm) | --output_file (-o) | --temperature (-tem)
224
+ --config_dir (-con) | --igmodel (-igm) | --output_file (-o) | --temperature (-t)
221
225
  --cors (-cor) | --igprovider (-igp) | --plots_dir (-pl) | --top_k
222
226
  --creativity (-cr) | --lang (-l) | --port (-po) | --top_p
223
227
  --depth (-d) | --max_tokens (-ma) | --provider (-pr) | --vmodel (-vm)
@@ -228,7 +232,7 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
228
232
  ```
229
233
  '
230
234
 
231
- - ## alicanto: a research exploration agent flow.
235
+ - ## `/alicanto`: a research exploration agent flow.
232
236
 
233
237
  <p align="center"><a href ="https://github.com/npc-worldwide/npcsh/blob/main/docs/alicanto.md">
234
238
  <img src="https://raw.githubusercontent.com/npc-worldwide/npcsh/main/npcsh/npc_team/alicanto.png" alt="logo for deep research", width=250></a>
@@ -245,41 +249,44 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
245
249
  # bash
246
250
  npc alicanto "What ethical considerations should guide AI development?" --max_facts_per_chain 0.5 --max_thematic_groups 3 --max_criticisms_per_group 3 max_conceptual_combinations 3 max_experiments 10
247
251
 
248
- npc alicanto "What is the future of remote work?" --format report
252
+ npc alicanto "What is the future of remote work?" --format report # NOTE: Report generation and formatting requires latex installed.
249
253
  ```
250
- - ## Brainblast: searching through past messages:
254
+ - ## `/brainblast`: searching through past messages (soon to incorporate options for knowledge graph)
251
255
  ```bash
252
256
  # npcsh
253
257
  /brainblast 'subtle summer winds' --top_k 10
254
258
  ```
255
259
  ```bash
256
260
  # bash
257
- npc brainblast 'python dictionaries'
261
+ npc brainblast 'executing a mirror in the wonderous moon'
258
262
  ```
259
- - ## Breathe: Condense conversation context (shell only):
263
+ - ## `/breathe`: Condense conversation context (shell only):
260
264
  ```bash
261
265
  # npcsh
262
266
  /breathe
263
267
  /breathe -p ollama -m qwen3:latest
264
268
  ```
265
- - ## Compile: render npcs for use without re-loading npcsh
269
+ - ## `/compile`: render npcs for use without re-loading npcsh
266
270
  ```bash
267
271
  # npcsh
268
- /compile /path/to/npc
272
+ /compile ./npc_team/sibiji.npc
269
273
  ```
270
- - ## flush: flush context (shell only):
274
+ - ## `/flush`: flush context (shell only):
275
+ If you're in the NPC shell and have been in a conversation thats going nowhere and you want to start over... just flush theh contexf.
271
276
  ```bash
272
277
  /flush
273
278
  ```
274
279
 
275
280
 
276
- - ## `guac`
281
+ - ## `/guac`
277
282
 
278
283
  <p align="center"><a href ="https://github.com/npc-worldwide/npcsh/blob/main/docs/guac.md">
279
284
  <img src="https://raw.githubusercontent.com/npc-worldwide/npcsh/main/npcsh/npc_team/guac.png" alt="npcsh logo of a solarpunk sign", width=250></a>
280
285
  </p>
281
286
 
282
- - a replacement shell for interpreters like python/r/node/julia with an avocado input marker 🥑 that brings a pomodoro-like approach to interactive coding.
287
+ - a replacement shell for interpreters like python/r/node/julia with an avocado input marker 🥑 that brings a pomodoro-like approach to interactive coding.
288
+ - available as a standalone program runnable via the `guac` command after `npcsh` has been installed via pip.
289
+
283
290
  - Simulation:
284
291
  `🥑 Make a markov chain simulation of a random walk in 2D space with 1000 steps and visualize`
285
292
  ```
@@ -342,13 +349,61 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
342
349
  A guac session progresses through a series of stages, each of equal length. Each stage adjusts the emoji input prompt. Once the stages have passed, it is time to refresh. Stage 1: `🥑`, Stage 2: `🥑🔪` Stage 3: `🥑🥣` Stage:4 `🥑🥣🧂`, `Stage 5: 🥘 TIME TO REFRESH`. At stage 5, the user is reminded to refresh with the /refresh macro. This will evaluate the session so farand suggest and implement new functions or automations that will aid in future sessions, with the ultimate approval of the user.
343
350
 
344
351
 
345
- - ## help:/ Show help for commands, NPCs, or Jinxs.
346
- ```bash
347
- /help
348
- ```
349
- - ## init - Initialize NPC project
350
- - ## jinxs : show available jinxs
351
- - ## ots: Over-the-shoulder screen shot analysis
352
+ - ## `/help`: Show help for commands, NPCs, or Jinxs.
353
+ ```bash
354
+ /help
355
+ ```
356
+ ```
357
+ npc help
358
+ ```
359
+ - ## `/init` - Initialize NPC project
360
+ -set up bare bones infra for an npc team
361
+ ```bash
362
+ # npcsh
363
+ /init
364
+ ```
365
+ ```bash
366
+ # bash
367
+ npc init
368
+ ```
369
+
370
+
371
+ - ## `/jinxs` : show available jinxs for team
372
+ Jinxs are Jinja execution templates that let users develop small programs that can build on each other and reference each other through jinja templating. Jinx methods allow us to give smaller LLMs the scaffolding to perform `tool calling`, so to speak, reliably
373
+ ```bash
374
+ # npcsh
375
+ /jinxs
376
+ # bash
377
+ npc jinxs
378
+ ```
379
+
380
+ ```python
381
+ Available Jinxs:
382
+ --- Jinxs for NPC: sibiji ---
383
+
384
+ • /bash_executor: Execute bash queries.
385
+
386
+ • /calc: A jinx to simplify and evaluate mathematical expressions (/calc 1+5, /calc 47233*234234)
387
+
388
+ • /data_pull: Execute queries on the ~/npcsh_history.db to pull data. The database contains only information about conversations and other user-provided data. It does not store any information about individual files (/data_pull 'select * from conversation_history limit 10')
389
+
390
+
391
+ • /file_editor: Examines a file, determines what changes are needed, and applies those changes. (/file_editor filename.py 'instructions for carrying out the editing')
392
+
393
+ • /image_generation_jinx: Generates images based on a text prompt. (/image_generation_jinx 'prompt for llm' output_name )
394
+
395
+ • /internet_search: Searches the web for information based on a query in order to verify timiely details (e.g. current events) or to corroborate information in uncertain situations. Should be mainly only used when users
396
+ specifically request a search, otherwise an LLMs basic knowledge should be sufficient. ( /internet_search 'cost of cubs tickets' )
397
+ • /local_search: Searches files in current and downstream directories to find items related to the users query using fuzzy matching. (/local_search 'class NPC')
398
+ Returns only relevant snippets (10 lines around matches) to avoid including too much irrelevant content. Intended for fuzzy searches, not for understanding file sizes.
399
+
400
+ • /python_executor: Execute scripts with python. Set the ultimate result as the "output" variable. It must be a string. Do not add unnecessary print statements. (/python_executor 'import numpy as np; print(np.arange(1000))')
401
+ • /screen_capture_analysis_jinx: Captures the whole screen and sends the image for analysis (mostly redundant with /ots.)
402
+ ```
403
+
404
+
405
+
406
+ - ## `/ots`: Over-the-shoulder screen shot analysis
352
407
  - Screenshot analysis:
353
408
  ```bash
354
409
  #npcsh
@@ -359,17 +414,17 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
359
414
  #bash
360
415
  npc ots ...
361
416
  ```
362
- - ## `plan`: set up cron jobs:
417
+ - ## `/plan`: set up cron jobs:
363
418
  ```bash
364
419
  # npcsh
365
- /plan 'a description of a cron job to implement' -m gemma3:27b -p ollama
420
+ /plan 'set up a cron job that reminds me to stretch every thirty minutes' -m gemma3:27b -p ollama
366
421
  ```
367
422
  ```bash
368
423
  # bash
369
- npc plan
424
+ npc plan 'record my cpu usage percentage every 45 minutes'
370
425
  ```
371
426
 
372
- - ## `plonk`: Computer use:
427
+ - ## `/plonk`: Computer use:
373
428
  ```bash
374
429
  # npcsh
375
430
  /plonk -n 'npc_name' -sp 'task for plonk to carry out '
@@ -377,7 +432,7 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
377
432
  #bash
378
433
  npc plonk
379
434
  ```
380
- - ## `pti`: a reasoning REPL loop with interruptions
435
+ - ## `/pti`: a reasoning REPL loop with interruptions
381
436
 
382
437
  ```npcsh
383
438
  /pti -n frederic -m qwen3:latest -p ollama
@@ -391,14 +446,14 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
391
446
  <img src="https://raw.githubusercontent.com/npc-worldwide/npcsh/main/npcsh/npc_team/frederic4.png" alt="npcsh logo of frederic the bear and the pti logo", width=250></a>
392
447
  </p>
393
448
 
394
- - ## `rag`: embedding search through chroma db, optional file input
395
- - ## `roll`: your video generation assistant
449
+ - ## `/rag`: embedding search through chroma db, optional file input
450
+ - ## `/roll`: your video generation assistant
396
451
  -
397
452
  ```npcsh
398
453
  /roll --provider ollama --model llama3
399
454
  ```
400
455
 
401
- - ## sample: one-shot sampling from LLMs with specific parameters
456
+ - ## `/sample`: one-shot sampling from LLMs with specific parameters
402
457
  ```bash
403
458
  # npcsh
404
459
  /sample 'prompt'
@@ -417,7 +472,7 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
417
472
  npc sample model=gpt-4o-mini "What are the primary colors?" --provider openai
418
473
  ```
419
474
 
420
- - ## search: use an internet search provider
475
+ - ## `/search`: use an internet search provider
421
476
  ```npcsh
422
477
  /search -sp perplexity 'cal bears football schedule'
423
478
  /search --sprovider duckduckgo 'beef tongue'
@@ -429,7 +484,7 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
429
484
  ```
430
485
 
431
486
 
432
- - ## serve: serve an npc team
487
+ - ## `/serve`: serve an npc team
433
488
  ```bash
434
489
  /serve
435
490
  /serve ....
@@ -440,7 +495,7 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
440
495
  npc serve
441
496
  ```
442
497
 
443
- - ## set: change current model, env params
498
+ - ## `/set`: change current model, env params
444
499
  ```bash
445
500
  /set model ...
446
501
  /set provider ...
@@ -450,7 +505,7 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
450
505
  ```bash
451
506
  npc set ...
452
507
  ```
453
- - ## sleep: prune and evolve the current knowledge graph
508
+ - ## `/sleep`: prune and evolve the current knowledge graph
454
509
  ```bash
455
510
  /sleep
456
511
  /sleep --dream
@@ -460,7 +515,7 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
460
515
  ```bash
461
516
  npc sleep
462
517
  ```
463
- - ## `spool`
518
+ - ## `/spool`
464
519
  <p align="center"><a href ="https://github.com/npc-worldwide/npcsh/blob/main/docs/spool.md">
465
520
  <img src="https://raw.githubusercontent.com/npc-worldwide/npcsh/main/npcsh/npc_team/spool.png" alt="logo for spool", width=250></a>
466
521
  </p>
@@ -489,7 +544,7 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
489
544
 
490
545
 
491
546
 
492
- - ## Vixynt: Image generation and editing:
547
+ - ## `/vixynt`: Image generation and editing:
493
548
  ```bash
494
549
  npcsh
495
550
  /vixynt 'an image of a dog eating a hat'
@@ -508,7 +563,7 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
508
563
 
509
564
 
510
565
 
511
- - ## `wander`: daydreaming for LLMs
566
+ - ## `/wander`: daydreaming for LLMs
512
567
 
513
568
  <p align="center"><a href ="https://github.com/npc-worldwide/npcsh/blob/main/docs/wander.md">
514
569
  <img src="https://raw.githubusercontent.com/npc-worldwide/npcsh/main/npcsh/npc_team/kadiefa.png" alt="logo for wander", width=250></a>
@@ -544,7 +599,7 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
544
599
  interruption-likelihood=.1
545
600
  ```
546
601
 
547
- - ## `yap`: an agentic voice control loop
602
+ - ## `/yap`: an agentic voice control loop
548
603
 
549
604
 
550
605
  <p align="center"><a href ="https://github.com/npc-worldwide/npcsh/blob/main/docs/yap.md">
@@ -562,20 +617,9 @@ Once installed, the following CLI tools will be available: `npcsh`, `guac`, `npc
562
617
  yap
563
618
  npc yap
564
619
  ```
565
- - Show available Jinja Execution Templates:
566
- ```bash
567
- # npcsh
568
- /jinxs
569
- ```
570
- ```bash
571
- # bash
572
- npc jinxs
573
- ```
574
-
575
-
576
620
 
577
621
  ## Inference Capabilities
578
- - `npcsh` works with local and enterprise LLM providers through its LiteLLM integration, allowing users to run inference from Ollama, LMStudio, OpenAI, Anthropic, Gemini, and Deepseek, making it a versatile tool for both simple commands and sophisticated AI-driven tasks.
622
+ - `npcsh` works with local and enterprise LLM providers through its LiteLLM integration, allowing users to run inference from Ollama, LMStudio, vLLM, MLX, OpenAI, Anthropic, Gemini, and Deepseek, making it a versatile tool for both simple commands and sophisticated AI-driven tasks.
579
623
 
580
624
  ## Read the Docs
581
625
 
@@ -751,6 +795,10 @@ export PERPLEXITY_API_KEY='your_perplexity_key'
751
795
  ├── npc_team/ # Global NPCs
752
796
  │ ├── jinxs/ # Global tools
753
797
  │ └── assembly_lines/ # Workflow pipelines
798
+ │ └── example.npc # globally available npc
799
+ │ └── global.ctx # global context file
800
+
801
+
754
802
 
755
803
  ```
756
804
  For cases where you wish to set up a project specific set of NPCs, jinxs, and assembly lines, add a `npc_team` directory to your project and `npcsh` should be able to pick up on its presence, like so:
@@ -5,17 +5,17 @@ npcsh/guac.py,sha256=Ocmk_c4NUtGsC3JOtmkbgLvD6u-XtBPRFRYcckpgUJU,33099
5
5
  npcsh/mcp_helpers.py,sha256=Ktd2yXuBnLL2P7OMalgGLj84PXJSzaucjqmJVvWx6HA,12723
6
6
  npcsh/mcp_npcsh.py,sha256=SfmplH62GS9iI6q4vuQLVUS6tkrok6L7JxODx_iH7ps,36158
7
7
  npcsh/mcp_server.py,sha256=l2Ra0lpFrUu334pvp0Q9ajF2n73KvZswFi0FgbDhh9k,5884
8
- npcsh/npc.py,sha256=7ujKrMQFgkeGJ4sX5Kn_dB5tjrPN58xeC91PNt453aM,7827
9
- npcsh/npcsh.py,sha256=uxs_5k-zmuDjdvKMxoBZwdefdgKGESd-EIGCXYNgx0Y,59571
8
+ npcsh/npc.py,sha256=4RI1pI85dU65ARIOwPX6bYfTDVjg-0EFmIb-QlAxYqg,9068
9
+ npcsh/npcsh.py,sha256=bpysELlhR1R_0hIXwo25odBCvfQgjK6I9qcaH2CAV5k,60575
10
10
  npcsh/plonk.py,sha256=7w7J2bht5QXOyV2UK045nAPDmrSrTGLX-sh56KQ3-k0,14653
11
11
  npcsh/pti.py,sha256=jGHGE5SeIcDkV8WlOEHCKQCnYAL4IPS-kUBHrUz0oDA,10019
12
- npcsh/routes.py,sha256=gsKHhdbzcZnE91w86ydr3ABNTxL12Ta0Oa9z3qwEh54,44470
12
+ npcsh/routes.py,sha256=033Lh0vEO3jQ3kSVT64XfFYqN7FL4K99VkY4YdgiZA8,44391
13
13
  npcsh/spool.py,sha256=QF1SuIhj_PWiOYNkAK31f1W_wS8yYxC5XvM2GU7VJMM,9495
14
14
  npcsh/wander.py,sha256=BiN6eYyFnEsFzo8MFLRkdZ8xS9sTKkQpjiCcy9chMcc,23225
15
15
  npcsh/yap.py,sha256=ipkY3uMDw8gNrPSZ9qJFWVQ_fXtLmQ2oz_6_WZt2hew,21097
16
- npcsh-1.0.14.dist-info/licenses/LICENSE,sha256=IKBvAECHP-aCiJtE4cHGCE5Yl0tozYz02PomGeWS3y4,1070
17
- npcsh-1.0.14.dist-info/METADATA,sha256=W0GyU5aR_MP9qUrp7BHWV54p0cgNNrY6E7wzChyvU_4,31027
18
- npcsh-1.0.14.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
- npcsh-1.0.14.dist-info/entry_points.txt,sha256=qxOYTm3ym3JWyWf2nv2Mk71uMcJIdUoNEJ8VYMkyHiY,214
20
- npcsh-1.0.14.dist-info/top_level.txt,sha256=kHSNgKMCkfjV95-DH0YSp1LLBi0HXdF3w57j7MQON3E,6
21
- npcsh-1.0.14.dist-info/RECORD,,
16
+ npcsh-1.0.16.dist-info/licenses/LICENSE,sha256=IKBvAECHP-aCiJtE4cHGCE5Yl0tozYz02PomGeWS3y4,1070
17
+ npcsh-1.0.16.dist-info/METADATA,sha256=UQikBr7efMSpf-2swWyvPBeUJ_mgTr6kmGTUvr47GWQ,34873
18
+ npcsh-1.0.16.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ npcsh-1.0.16.dist-info/entry_points.txt,sha256=qxOYTm3ym3JWyWf2nv2Mk71uMcJIdUoNEJ8VYMkyHiY,214
20
+ npcsh-1.0.16.dist-info/top_level.txt,sha256=kHSNgKMCkfjV95-DH0YSp1LLBi0HXdF3w57j7MQON3E,6
21
+ npcsh-1.0.16.dist-info/RECORD,,
File without changes