npcsh 1.0.12__py3-none-any.whl → 1.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +88 -1
- npcsh/alicanto.py +22 -7
- npcsh/npcsh.py +239 -455
- npcsh/plonk.py +300 -367
- npcsh/routes.py +367 -162
- npcsh/spool.py +162 -221
- npcsh-1.0.13.dist-info/METADATA +775 -0
- npcsh-1.0.13.dist-info/RECORD +21 -0
- npcsh-1.0.12.dist-info/METADATA +0 -596
- npcsh-1.0.12.dist-info/RECORD +0 -21
- {npcsh-1.0.12.dist-info → npcsh-1.0.13.dist-info}/WHEEL +0 -0
- {npcsh-1.0.12.dist-info → npcsh-1.0.13.dist-info}/entry_points.txt +0 -0
- {npcsh-1.0.12.dist-info → npcsh-1.0.13.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.0.12.dist-info → npcsh-1.0.13.dist-info}/top_level.txt +0 -0
npcsh/_state.py
CHANGED
|
@@ -11,7 +11,7 @@ from termcolor import colored
|
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
from typing import Dict, List
|
|
14
|
+
from typing import Dict, List, Any
|
|
15
15
|
import subprocess
|
|
16
16
|
import termios
|
|
17
17
|
import tty
|
|
@@ -287,6 +287,92 @@ def setup_npcsh_config() -> None:
|
|
|
287
287
|
add_npcshrc_to_shell_config()
|
|
288
288
|
|
|
289
289
|
|
|
290
|
+
|
|
291
|
+
CANONICAL_ARGS = [
|
|
292
|
+
'model',
|
|
293
|
+
'provider',
|
|
294
|
+
'output_file',
|
|
295
|
+
'attachments',
|
|
296
|
+
'format',
|
|
297
|
+
'temperature',
|
|
298
|
+
'top_k',
|
|
299
|
+
'top_p',
|
|
300
|
+
'max_tokens',
|
|
301
|
+
'messages',
|
|
302
|
+
'npc',
|
|
303
|
+
'team',
|
|
304
|
+
'height',
|
|
305
|
+
'width',
|
|
306
|
+
'num_frames',
|
|
307
|
+
'sprovider',
|
|
308
|
+
'emodel',
|
|
309
|
+
'eprovider',
|
|
310
|
+
'igmodel',
|
|
311
|
+
'igprovider',
|
|
312
|
+
'vmodel',
|
|
313
|
+
'vprovider',
|
|
314
|
+
'rmodel',
|
|
315
|
+
'rprovider',
|
|
316
|
+
'num_npcs',
|
|
317
|
+
'depth',
|
|
318
|
+
'exploration',
|
|
319
|
+
'creativity',
|
|
320
|
+
'port',
|
|
321
|
+
'cors',
|
|
322
|
+
'config_dir',
|
|
323
|
+
'plots_dir',
|
|
324
|
+
'refresh_period',
|
|
325
|
+
'lang',
|
|
326
|
+
]
|
|
327
|
+
|
|
328
|
+
def get_argument_help() -> Dict[str, List[str]]:
|
|
329
|
+
"""
|
|
330
|
+
Analyzes CANONICAL_ARGS to generate a map of canonical arguments
|
|
331
|
+
to all their possible shorthands.
|
|
332
|
+
|
|
333
|
+
Returns -> {'model': ['m', 'mo', 'mod', 'mode'], 'provider': ['p', 'pr', ...]}
|
|
334
|
+
"""
|
|
335
|
+
arg_map = {arg: [] for arg in CANONICAL_ARGS}
|
|
336
|
+
|
|
337
|
+
for arg in CANONICAL_ARGS:
|
|
338
|
+
# Generate all possible prefixes for this argument
|
|
339
|
+
for i in range(1, len(arg)):
|
|
340
|
+
prefix = arg[:i]
|
|
341
|
+
|
|
342
|
+
# Check if this prefix is an unambiguous shorthand
|
|
343
|
+
matches = [canonical for canonical in CANONICAL_ARGS if canonical.startswith(prefix)]
|
|
344
|
+
|
|
345
|
+
# If this prefix uniquely resolves to our current argument, it's a valid shorthand
|
|
346
|
+
if len(matches) == 1 and matches[0] == arg:
|
|
347
|
+
arg_map[arg].append(prefix)
|
|
348
|
+
|
|
349
|
+
return arg_map
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def normalize_and_expand_flags(parsed_flags: Dict[str, Any]) -> Dict[str, Any]:
|
|
355
|
+
"""
|
|
356
|
+
Expands argument aliases based on the priority order of CANONICAL_ARGS.
|
|
357
|
+
The first matching prefix in the list wins.
|
|
358
|
+
"""
|
|
359
|
+
normalized = {}
|
|
360
|
+
for key, value in parsed_flags.items():
|
|
361
|
+
if key in CANONICAL_ARGS:
|
|
362
|
+
if key in normalized:
|
|
363
|
+
print(colored(f"Warning: Argument '{key}' specified multiple times. Using last value.", "yellow"))
|
|
364
|
+
normalized[key] = value
|
|
365
|
+
continue
|
|
366
|
+
first_match = next((arg for arg in CANONICAL_ARGS if arg.startswith(key)), None)
|
|
367
|
+
if first_match:
|
|
368
|
+
if first_match in normalized:
|
|
369
|
+
print(colored(f"Warning: Argument '{first_match}' specified multiple times (via alias '{key}'). Using last value.", "yellow"))
|
|
370
|
+
normalized[first_match] = value
|
|
371
|
+
else:
|
|
372
|
+
normalized[key] = value
|
|
373
|
+
return normalized
|
|
374
|
+
|
|
375
|
+
|
|
290
376
|
BASH_COMMANDS = [
|
|
291
377
|
"npc",
|
|
292
378
|
"npm",
|
|
@@ -1034,6 +1120,7 @@ class ShellState:
|
|
|
1034
1120
|
embedding_provider: str = NPCSH_EMBEDDING_PROVIDER
|
|
1035
1121
|
reasoning_model: str = NPCSH_REASONING_MODEL
|
|
1036
1122
|
reasoning_provider: str = NPCSH_REASONING_PROVIDER
|
|
1123
|
+
search_provider: str = NPCSH_SEARCH_PROVIDER
|
|
1037
1124
|
image_gen_model: str = NPCSH_IMAGE_GEN_MODEL
|
|
1038
1125
|
image_gen_provider: str = NPCSH_IMAGE_GEN_PROVIDER
|
|
1039
1126
|
video_gen_model: str = NPCSH_VIDEO_GEN_MODEL
|
npcsh/alicanto.py
CHANGED
|
@@ -20,7 +20,10 @@ from npcpy.npc_sysenv import print_and_process_stream_with_markdown
|
|
|
20
20
|
|
|
21
21
|
|
|
22
22
|
|
|
23
|
-
def generate_random_npcs(num_npcs: int,
|
|
23
|
+
def generate_random_npcs(num_npcs: int,
|
|
24
|
+
model: str,
|
|
25
|
+
provider: str,
|
|
26
|
+
request: str) -> List[NPC]:
|
|
24
27
|
"""
|
|
25
28
|
Generate a diverse set of NPCs with different expertise and perspectives
|
|
26
29
|
related to the research request.
|
|
@@ -135,10 +138,14 @@ def generate_random_npcs(num_npcs: int, model: str, provider: str, request: str)
|
|
|
135
138
|
|
|
136
139
|
return npcs
|
|
137
140
|
|
|
138
|
-
def generate_research_chain(request: str,
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
141
|
+
def generate_research_chain(request: str,
|
|
142
|
+
npc: NPC, depth: int,
|
|
143
|
+
memory: int = 3,
|
|
144
|
+
context: str = None,
|
|
145
|
+
model: str = None,
|
|
146
|
+
provider: str = None,
|
|
147
|
+
exploration_factor: float = 0.3,
|
|
148
|
+
creativity_factor: float = 0.5) -> List[str]:
|
|
142
149
|
"""
|
|
143
150
|
Generate a chain of research thoughts from a single NPC, diving deeper with each step.
|
|
144
151
|
|
|
@@ -209,7 +216,11 @@ def format_facts_list(facts: List[str]) -> str:
|
|
|
209
216
|
"""Format a list of facts for display in a report"""
|
|
210
217
|
return "\n".join([f"• {fact}" for fact in facts])
|
|
211
218
|
|
|
212
|
-
def simulate_experiments(research: Dict[str, Any],
|
|
219
|
+
def simulate_experiments(research: Dict[str, Any],
|
|
220
|
+
request: str,
|
|
221
|
+
model: str = None,
|
|
222
|
+
provider: str = None,
|
|
223
|
+
max_experiments: int = None) -> Dict[str, Dict[str, Any]]:
|
|
213
224
|
"""
|
|
214
225
|
Simulate thought experiments based on research findings
|
|
215
226
|
|
|
@@ -269,7 +280,11 @@ def simulate_experiments(research: Dict[str, Any], request: str, model: str = No
|
|
|
269
280
|
simulations, thought experiments, and interdisciplinary methods.
|
|
270
281
|
"""
|
|
271
282
|
|
|
272
|
-
response = get_llm_response(prompt=prompt,
|
|
283
|
+
response = get_llm_response(prompt=prompt,
|
|
284
|
+
model=model,
|
|
285
|
+
provider=provider,
|
|
286
|
+
temperature=0.8,
|
|
287
|
+
format="json")
|
|
273
288
|
experiments = response.get("response", {})
|
|
274
289
|
|
|
275
290
|
# Limit experiments if needed
|