npcsh 1.0.12__py3-none-any.whl → 1.0.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcsh/_state.py CHANGED
@@ -1,45 +1,35 @@
1
1
 
2
2
  from colorama import Fore, Back, Style
3
-
4
-
5
- from datetime import datetime
6
- from dotenv import load_dotenv
7
-
8
- import re
3
+ from dataclasses import dataclass, field
4
+ import filecmp
9
5
  import os
10
- from termcolor import colored
11
-
12
-
13
-
14
- from typing import Dict, List
15
- import subprocess
16
- import termios
17
- import tty
6
+ import platform
18
7
  import pty
8
+ import re
19
9
  import select
10
+ import shutil
20
11
  import signal
21
- import time
22
- import os
23
- import re
24
12
  import sqlite3
25
- from datetime import datetime
13
+ import subprocess
14
+ import sys
15
+ from termcolor import colored
16
+ import termios
17
+ import time
18
+ from typing import Dict, List, Any, Tuple, Union, Optional
19
+ import tty
26
20
  import logging
27
21
  import textwrap
28
22
  from termcolor import colored
29
- import sys
30
- import platform
31
-
23
+ from npcpy.memory.command_history import (
24
+ start_new_conversation,
25
+ )
26
+ from npcpy.npc_compiler import NPC, Team
32
27
 
33
28
  def get_npc_path(npc_name: str, db_path: str) -> str:
34
- # First, check in project npc_team directory
35
29
  project_npc_team_dir = os.path.abspath("./npc_team")
36
30
  project_npc_path = os.path.join(project_npc_team_dir, f"{npc_name}.npc")
37
-
38
- # Then, check in global npc_team directory
39
31
  user_npc_team_dir = os.path.expanduser("~/.npcsh/npc_team")
40
32
  global_npc_path = os.path.join(user_npc_team_dir, f"{npc_name}.npc")
41
-
42
- # Check database for compiled NPCs
43
33
  try:
44
34
  with sqlite3.connect(db_path) as conn:
45
35
  cursor = conn.cursor()
@@ -105,9 +95,6 @@ def initialize_base_npcs_if_needed(db_path: str) -> None:
105
95
  package_dir = os.path.dirname(__file__)
106
96
  package_npc_team_dir = os.path.join(package_dir, "npc_team")
107
97
 
108
-
109
-
110
- # User's global npc_team directory
111
98
  user_npc_team_dir = os.path.expanduser("~/.npcsh/npc_team")
112
99
 
113
100
  user_jinxs_dir = os.path.join(user_npc_team_dir, "jinxs")
@@ -115,7 +102,7 @@ def initialize_base_npcs_if_needed(db_path: str) -> None:
115
102
  os.makedirs(user_npc_team_dir, exist_ok=True)
116
103
  os.makedirs(user_jinxs_dir, exist_ok=True)
117
104
  os.makedirs(user_templates_dir, exist_ok=True)
118
- # Copy NPCs from package to user directory
105
+
119
106
  for filename in os.listdir(package_npc_team_dir):
120
107
  if filename.endswith(".npc"):
121
108
  source_path = os.path.join(package_npc_team_dir, filename)
@@ -243,11 +230,11 @@ def ensure_npcshrc_exists() -> str:
243
230
  npcshrc.write("# NPCSH Configuration File\n")
244
231
  npcshrc.write("export NPCSH_INITIALIZED=0\n")
245
232
  npcshrc.write("export NPCSH_DEFAULT_MODE='agent'\n")
233
+ npcshrc.write("export NPCSH_BUILD_KG=1")
246
234
  npcshrc.write("export NPCSH_CHAT_PROVIDER='ollama'\n")
247
- npcshrc.write("export NPCSH_CHAT_MODEL='llama3.2'\n")
235
+ npcshrc.write("export NPCSH_CHAT_MODEL='gemma3:4b'\n")
248
236
  npcshrc.write("export NPCSH_REASONING_PROVIDER='ollama'\n")
249
237
  npcshrc.write("export NPCSH_REASONING_MODEL='deepseek-r1'\n")
250
-
251
238
  npcshrc.write("export NPCSH_EMBEDDING_PROVIDER='ollama'\n")
252
239
  npcshrc.write("export NPCSH_EMBEDDING_MODEL='nomic-embed-text'\n")
253
240
  npcshrc.write("export NPCSH_VISION_PROVIDER='ollama'\n")
@@ -287,6 +274,92 @@ def setup_npcsh_config() -> None:
287
274
  add_npcshrc_to_shell_config()
288
275
 
289
276
 
277
+
278
+ CANONICAL_ARGS = [
279
+ 'model',
280
+ 'provider',
281
+ 'output_file',
282
+ 'attachments',
283
+ 'format',
284
+ 'temperature',
285
+ 'top_k',
286
+ 'top_p',
287
+ 'max_tokens',
288
+ 'messages',
289
+ 'npc',
290
+ 'team',
291
+ 'height',
292
+ 'width',
293
+ 'num_frames',
294
+ 'sprovider',
295
+ 'emodel',
296
+ 'eprovider',
297
+ 'igmodel',
298
+ 'igprovider',
299
+ 'vmodel',
300
+ 'vprovider',
301
+ 'rmodel',
302
+ 'rprovider',
303
+ 'num_npcs',
304
+ 'depth',
305
+ 'exploration',
306
+ 'creativity',
307
+ 'port',
308
+ 'cors',
309
+ 'config_dir',
310
+ 'plots_dir',
311
+ 'refresh_period',
312
+ 'lang',
313
+ ]
314
+
315
+ def get_argument_help() -> Dict[str, List[str]]:
316
+ """
317
+ Analyzes CANONICAL_ARGS to generate a map of canonical arguments
318
+ to all their possible shorthands.
319
+
320
+ Returns -> {'model': ['m', 'mo', 'mod', 'mode'], 'provider': ['p', 'pr', ...]}
321
+ """
322
+ arg_map = {arg: [] for arg in CANONICAL_ARGS}
323
+
324
+ for arg in CANONICAL_ARGS:
325
+ # Generate all possible prefixes for this argument
326
+ for i in range(1, len(arg)):
327
+ prefix = arg[:i]
328
+
329
+ # Check if this prefix is an unambiguous shorthand
330
+ matches = [canonical for canonical in CANONICAL_ARGS if canonical.startswith(prefix)]
331
+
332
+ # If this prefix uniquely resolves to our current argument, it's a valid shorthand
333
+ if len(matches) == 1 and matches[0] == arg:
334
+ arg_map[arg].append(prefix)
335
+
336
+ return arg_map
337
+
338
+
339
+
340
+
341
+ def normalize_and_expand_flags(parsed_flags: Dict[str, Any]) -> Dict[str, Any]:
342
+ """
343
+ Expands argument aliases based on the priority order of CANONICAL_ARGS.
344
+ The first matching prefix in the list wins.
345
+ """
346
+ normalized = {}
347
+ for key, value in parsed_flags.items():
348
+ if key in CANONICAL_ARGS:
349
+ if key in normalized:
350
+ print(colored(f"Warning: Argument '{key}' specified multiple times. Using last value.", "yellow"))
351
+ normalized[key] = value
352
+ continue
353
+ first_match = next((arg for arg in CANONICAL_ARGS if arg.startswith(key)), None)
354
+ if first_match:
355
+ if first_match in normalized:
356
+ print(colored(f"Warning: Argument '{first_match}' specified multiple times (via alias '{key}'). Using last value.", "yellow"))
357
+ normalized[first_match] = value
358
+ else:
359
+ normalized[key] = value
360
+ return normalized
361
+
362
+
290
363
  BASH_COMMANDS = [
291
364
  "npc",
292
365
  "npm",
@@ -973,12 +1046,13 @@ NPCSH_REASONING_PROVIDER = os.environ.get("NPCSH_REASONING_PROVIDER", "ollama")
973
1046
  NPCSH_STREAM_OUTPUT = eval(os.environ.get("NPCSH_STREAM_OUTPUT", "0")) == 1
974
1047
  NPCSH_API_URL = os.environ.get("NPCSH_API_URL", None)
975
1048
  NPCSH_SEARCH_PROVIDER = os.environ.get("NPCSH_SEARCH_PROVIDER", "duckduckgo")
976
-
1049
+ NPCSH_BUILD_KG = os.environ.get("NPCSH_BUILD_KG") == "1"
977
1050
  READLINE_HISTORY_FILE = os.path.expanduser("~/.npcsh_history")
978
1051
 
979
1052
 
980
1053
 
981
1054
  def setup_readline() -> str:
1055
+ import readline
982
1056
  if readline is None:
983
1057
  return None
984
1058
  try:
@@ -1011,14 +1085,6 @@ def save_readline_history():
1011
1085
 
1012
1086
 
1013
1087
 
1014
-
1015
- from npcpy.memory.command_history import (
1016
- start_new_conversation,
1017
- )
1018
- from dataclasses import dataclass, field
1019
- from typing import Optional, List, Dict, Any, Tuple, Union
1020
- from npcpy.npc_compiler import NPC, Team
1021
- import os
1022
1088
  @dataclass
1023
1089
  class ShellState:
1024
1090
  npc: Optional[Union[NPC, str]] = None
@@ -1034,11 +1100,13 @@ class ShellState:
1034
1100
  embedding_provider: str = NPCSH_EMBEDDING_PROVIDER
1035
1101
  reasoning_model: str = NPCSH_REASONING_MODEL
1036
1102
  reasoning_provider: str = NPCSH_REASONING_PROVIDER
1103
+ search_provider: str = NPCSH_SEARCH_PROVIDER
1037
1104
  image_gen_model: str = NPCSH_IMAGE_GEN_MODEL
1038
1105
  image_gen_provider: str = NPCSH_IMAGE_GEN_PROVIDER
1039
1106
  video_gen_model: str = NPCSH_VIDEO_GEN_MODEL
1040
1107
  video_gen_provider: str = NPCSH_VIDEO_GEN_PROVIDER
1041
1108
  current_mode: str = NPCSH_DEFAULT_MODE
1109
+ build_kg: bool = NPCSH_BUILD_KG,
1042
1110
  api_key: Optional[str] = None
1043
1111
  api_url: Optional[str] = NPCSH_API_URL
1044
1112
  current_path: str = field(default_factory=os.getcwd)
@@ -1076,5 +1144,6 @@ initial_state = ShellState(
1076
1144
  image_gen_provider=NPCSH_IMAGE_GEN_PROVIDER,
1077
1145
  video_gen_model=NPCSH_VIDEO_GEN_MODEL,
1078
1146
  video_gen_provider=NPCSH_VIDEO_GEN_PROVIDER,
1147
+ build_kg=NPCSH_BUILD_KG,
1079
1148
  api_url=NPCSH_API_URL,
1080
1149
  )
npcsh/alicanto.py CHANGED
@@ -20,7 +20,10 @@ from npcpy.npc_sysenv import print_and_process_stream_with_markdown
20
20
 
21
21
 
22
22
 
23
- def generate_random_npcs(num_npcs: int, model: str, provider: str, request: str) -> List[NPC]:
23
+ def generate_random_npcs(num_npcs: int,
24
+ model: str,
25
+ provider: str,
26
+ request: str) -> List[NPC]:
24
27
  """
25
28
  Generate a diverse set of NPCs with different expertise and perspectives
26
29
  related to the research request.
@@ -135,10 +138,14 @@ def generate_random_npcs(num_npcs: int, model: str, provider: str, request: str)
135
138
 
136
139
  return npcs
137
140
 
138
- def generate_research_chain(request: str, npc: NPC, depth: int, memory: int = 3,
139
- context: str = None, model: str = None, provider: str = None,
140
- exploration_factor: float = 0.3,
141
- creativity_factor: float = 0.5) -> List[str]:
141
+ def generate_research_chain(request: str,
142
+ npc: NPC, depth: int,
143
+ memory: int = 3,
144
+ context: str = None,
145
+ model: str = None,
146
+ provider: str = None,
147
+ exploration_factor: float = 0.3,
148
+ creativity_factor: float = 0.5) -> List[str]:
142
149
  """
143
150
  Generate a chain of research thoughts from a single NPC, diving deeper with each step.
144
151
 
@@ -209,7 +216,11 @@ def format_facts_list(facts: List[str]) -> str:
209
216
  """Format a list of facts for display in a report"""
210
217
  return "\n".join([f"• {fact}" for fact in facts])
211
218
 
212
- def simulate_experiments(research: Dict[str, Any], request: str, model: str = None, provider: str = None, max_experiments: int = None) -> Dict[str, Dict[str, Any]]:
219
+ def simulate_experiments(research: Dict[str, Any],
220
+ request: str,
221
+ model: str = None,
222
+ provider: str = None,
223
+ max_experiments: int = None) -> Dict[str, Dict[str, Any]]:
213
224
  """
214
225
  Simulate thought experiments based on research findings
215
226
 
@@ -269,7 +280,11 @@ def simulate_experiments(research: Dict[str, Any], request: str, model: str = No
269
280
  simulations, thought experiments, and interdisciplinary methods.
270
281
  """
271
282
 
272
- response = get_llm_response(prompt=prompt, model=model, provider=provider, temperature=0.8, format="json")
283
+ response = get_llm_response(prompt=prompt,
284
+ model=model,
285
+ provider=provider,
286
+ temperature=0.8,
287
+ format="json")
273
288
  experiments = response.get("response", {})
274
289
 
275
290
  # Limit experiments if needed