npcsh 1.0.33__py3-none-any.whl → 1.0.35__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +214 -42
- npcsh/alicanto.py +215 -131
- npcsh/corca.py +17 -11
- npcsh/routes.py +9 -2
- {npcsh-1.0.33.dist-info → npcsh-1.0.35.dist-info}/METADATA +1 -1
- {npcsh-1.0.33.dist-info → npcsh-1.0.35.dist-info}/RECORD +36 -36
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/bash_executer.jinx +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/image_generation.jinx +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/internet_search.jinx +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/python_executor.jinx +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/screen_cap.jinx +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.0.33.data → npcsh-1.0.35.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.0.33.dist-info → npcsh-1.0.35.dist-info}/WHEEL +0 -0
- {npcsh-1.0.33.dist-info → npcsh-1.0.35.dist-info}/entry_points.txt +0 -0
- {npcsh-1.0.33.dist-info → npcsh-1.0.35.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.0.33.dist-info → npcsh-1.0.35.dist-info}/top_level.txt +0 -0
npcsh/_state.py
CHANGED
|
@@ -2013,6 +2013,9 @@ def execute_slash_command(command: str,
|
|
|
2013
2013
|
|
|
2014
2014
|
return state, colored(f"Unknown slash command, jinx, or NPC: {command_name}", "red")
|
|
2015
2015
|
|
|
2016
|
+
|
|
2017
|
+
|
|
2018
|
+
|
|
2016
2019
|
def process_pipeline_command(
|
|
2017
2020
|
cmd_segment: str,
|
|
2018
2021
|
stdin_input: Optional[str],
|
|
@@ -2454,24 +2457,115 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
|
2454
2457
|
|
|
2455
2458
|
|
|
2456
2459
|
|
|
2460
|
+
|
|
2461
|
+
from npcpy.memory.memory_processor import MemoryApprovalQueue, MemoryItem, memory_approval_ui
|
|
2462
|
+
from npcpy.ft.memory_trainer import MemoryTrainer
|
|
2463
|
+
from npcpy.llm_funcs import get_facts
|
|
2464
|
+
|
|
2465
|
+
_memory_queue = None
|
|
2466
|
+
|
|
2467
|
+
def get_memory_queue(command_history):
|
|
2468
|
+
global _memory_queue
|
|
2469
|
+
if _memory_queue is None:
|
|
2470
|
+
_memory_queue = MemoryApprovalQueue(command_history)
|
|
2471
|
+
_memory_queue.start_background_processing()
|
|
2472
|
+
return _memory_queue
|
|
2473
|
+
|
|
2474
|
+
def format_memory_context(memory_examples):
|
|
2475
|
+
if not memory_examples:
|
|
2476
|
+
return ""
|
|
2477
|
+
|
|
2478
|
+
context_parts = []
|
|
2479
|
+
|
|
2480
|
+
approved_examples = memory_examples.get("approved", [])
|
|
2481
|
+
rejected_examples = memory_examples.get("rejected", [])
|
|
2482
|
+
|
|
2483
|
+
if approved_examples:
|
|
2484
|
+
context_parts.append("EXAMPLES OF GOOD MEMORIES:")
|
|
2485
|
+
for ex in approved_examples[:5]:
|
|
2486
|
+
final = ex.get("final_memory") or ex.get("initial_memory")
|
|
2487
|
+
context_parts.append(f"- {final}")
|
|
2488
|
+
|
|
2489
|
+
if rejected_examples:
|
|
2490
|
+
context_parts.append("\nEXAMPLES OF POOR MEMORIES TO AVOID:")
|
|
2491
|
+
for ex in rejected_examples[:3]:
|
|
2492
|
+
context_parts.append(f"- {ex.get('initial_memory')}")
|
|
2493
|
+
|
|
2494
|
+
if context_parts:
|
|
2495
|
+
context_parts.append("\nLearn from these examples to generate similar high-quality memories.")
|
|
2496
|
+
return "\n".join(context_parts)
|
|
2497
|
+
|
|
2498
|
+
return ""
|
|
2499
|
+
|
|
2500
|
+
def process_memory_approvals(command_history, memory_queue):
|
|
2501
|
+
pending_memories = memory_queue.get_approval_batch(max_items=5)
|
|
2502
|
+
|
|
2503
|
+
if not pending_memories:
|
|
2504
|
+
return
|
|
2505
|
+
|
|
2506
|
+
print(f"\n🧠 Processing {len(pending_memories)} memories...")
|
|
2507
|
+
|
|
2508
|
+
try:
|
|
2509
|
+
trainer = MemoryTrainer()
|
|
2510
|
+
auto_processed = []
|
|
2511
|
+
need_human_review = []
|
|
2512
|
+
|
|
2513
|
+
for memory in pending_memories:
|
|
2514
|
+
result = trainer.auto_approve_memory(
|
|
2515
|
+
memory['content'],
|
|
2516
|
+
memory['context'],
|
|
2517
|
+
confidence_threshold=0.85
|
|
2518
|
+
)
|
|
2519
|
+
|
|
2520
|
+
if result['auto_processed']:
|
|
2521
|
+
auto_processed.append((memory, result))
|
|
2522
|
+
else:
|
|
2523
|
+
need_human_review.append(memory)
|
|
2524
|
+
|
|
2525
|
+
for memory, result in auto_processed:
|
|
2526
|
+
command_history.update_memory_status(
|
|
2527
|
+
memory['memory_id'],
|
|
2528
|
+
result['action']
|
|
2529
|
+
)
|
|
2530
|
+
print(f" Auto-{result['action']}: {memory['content'][:50]}... (confidence: {result['confidence']:.2f})")
|
|
2531
|
+
|
|
2532
|
+
if need_human_review:
|
|
2533
|
+
approvals = memory_approval_ui(need_human_review)
|
|
2534
|
+
|
|
2535
|
+
for approval in approvals:
|
|
2536
|
+
command_history.update_memory_status(
|
|
2537
|
+
approval['memory_id'],
|
|
2538
|
+
approval['decision'],
|
|
2539
|
+
approval.get('final_memory')
|
|
2540
|
+
)
|
|
2541
|
+
|
|
2542
|
+
except Exception as e:
|
|
2543
|
+
print(f"Auto-approval failed: {e}")
|
|
2544
|
+
approvals = memory_approval_ui(pending_memories)
|
|
2545
|
+
|
|
2546
|
+
for approval in approvals:
|
|
2547
|
+
command_history.update_memory_status(
|
|
2548
|
+
approval['memory_id'],
|
|
2549
|
+
approval['decision'],
|
|
2550
|
+
approval.get('final_memory')
|
|
2551
|
+
)
|
|
2552
|
+
|
|
2457
2553
|
def process_result(
|
|
2458
2554
|
user_input: str,
|
|
2459
2555
|
result_state: ShellState,
|
|
2460
2556
|
output: Any,
|
|
2461
|
-
command_history: CommandHistory,
|
|
2462
|
-
|
|
2463
|
-
|
|
2557
|
+
command_history: CommandHistory,
|
|
2464
2558
|
):
|
|
2465
|
-
|
|
2466
2559
|
team_name = result_state.team.name if result_state.team else "__none__"
|
|
2467
2560
|
npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "__none__"
|
|
2468
2561
|
|
|
2469
|
-
|
|
2470
2562
|
active_npc = result_state.npc if isinstance(result_state.npc, NPC) else NPC(
|
|
2471
2563
|
name="default",
|
|
2472
2564
|
model=result_state.chat_model,
|
|
2473
2565
|
provider=result_state.chat_provider,
|
|
2474
|
-
db_conn=command_history.engine
|
|
2566
|
+
db_conn=command_history.engine
|
|
2567
|
+
)
|
|
2568
|
+
|
|
2475
2569
|
save_conversation_message(
|
|
2476
2570
|
command_history,
|
|
2477
2571
|
result_state.conversation_id,
|
|
@@ -2492,23 +2586,27 @@ def process_result(
|
|
|
2492
2586
|
provider_for_stream = output.get('provider', active_npc.provider) if isinstance(output, dict) else active_npc.provider
|
|
2493
2587
|
|
|
2494
2588
|
print('\n')
|
|
2495
|
-
if user_input =='/help':
|
|
2589
|
+
if user_input == '/help':
|
|
2496
2590
|
render_markdown(output.get('output'))
|
|
2497
2591
|
elif result_state.stream_output:
|
|
2498
|
-
|
|
2499
|
-
|
|
2500
|
-
|
|
2501
|
-
|
|
2502
|
-
|
|
2592
|
+
final_output_str = print_and_process_stream_with_markdown(
|
|
2593
|
+
output_content,
|
|
2594
|
+
model_for_stream,
|
|
2595
|
+
provider_for_stream,
|
|
2596
|
+
show=True
|
|
2597
|
+
)
|
|
2503
2598
|
elif output_content is not None:
|
|
2504
2599
|
final_output_str = str(output_content)
|
|
2505
2600
|
render_markdown(final_output_str)
|
|
2506
2601
|
|
|
2507
2602
|
if final_output_str:
|
|
2508
2603
|
if result_state.messages:
|
|
2509
|
-
if result_state.messages[-1].get("role") != "assistant":
|
|
2510
|
-
result_state.messages.append({
|
|
2511
|
-
|
|
2604
|
+
if not result_state.messages or result_state.messages[-1].get("role") != "assistant":
|
|
2605
|
+
result_state.messages.append({
|
|
2606
|
+
"role": "assistant",
|
|
2607
|
+
"content": final_output_str
|
|
2608
|
+
})
|
|
2609
|
+
|
|
2512
2610
|
save_conversation_message(
|
|
2513
2611
|
command_history,
|
|
2514
2612
|
result_state.conversation_id,
|
|
@@ -2524,80 +2622,154 @@ def process_result(
|
|
|
2524
2622
|
conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
|
|
2525
2623
|
engine = command_history.engine
|
|
2526
2624
|
|
|
2625
|
+
memory_examples = command_history.get_memory_examples_for_context(
|
|
2626
|
+
npc=npc_name,
|
|
2627
|
+
team=team_name,
|
|
2628
|
+
directory_path=result_state.current_path
|
|
2629
|
+
)
|
|
2630
|
+
|
|
2631
|
+
memory_context = format_memory_context(memory_examples)
|
|
2632
|
+
|
|
2633
|
+
approved_facts = []
|
|
2634
|
+
try:
|
|
2635
|
+
facts = get_facts(
|
|
2636
|
+
conversation_turn_text,
|
|
2637
|
+
model=active_npc.model,
|
|
2638
|
+
provider=active_npc.provider,
|
|
2639
|
+
npc=active_npc,
|
|
2640
|
+
context=memory_context
|
|
2641
|
+
)
|
|
2642
|
+
|
|
2643
|
+
if facts:
|
|
2644
|
+
memories_for_approval = []
|
|
2645
|
+
for i, fact in enumerate(facts):
|
|
2646
|
+
memories_for_approval.append({
|
|
2647
|
+
"memory_id": f"temp_{i}",
|
|
2648
|
+
"content": fact['statement'],
|
|
2649
|
+
"context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
|
|
2650
|
+
"npc": npc_name,
|
|
2651
|
+
"fact_data": fact
|
|
2652
|
+
})
|
|
2653
|
+
|
|
2654
|
+
approvals = memory_approval_ui(memories_for_approval)
|
|
2655
|
+
|
|
2656
|
+
for approval in approvals:
|
|
2657
|
+
fact_data = next(m['fact_data'] for m in memories_for_approval
|
|
2658
|
+
if m['memory_id'] == approval['memory_id'])
|
|
2659
|
+
|
|
2660
|
+
command_history.add_memory_to_database(
|
|
2661
|
+
message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
|
|
2662
|
+
conversation_id=result_state.conversation_id,
|
|
2663
|
+
npc=npc_name,
|
|
2664
|
+
team=team_name,
|
|
2665
|
+
directory_path=result_state.current_path,
|
|
2666
|
+
initial_memory=fact_data['statement'],
|
|
2667
|
+
status=approval['decision'],
|
|
2668
|
+
model=active_npc.model,
|
|
2669
|
+
provider=active_npc.provider,
|
|
2670
|
+
final_memory=approval.get('final_memory')
|
|
2671
|
+
)
|
|
2672
|
+
|
|
2673
|
+
if approval['decision'] in ['human-approved', 'human-edited']:
|
|
2674
|
+
approved_fact = {
|
|
2675
|
+
'statement': approval.get('final_memory') or fact_data['statement'],
|
|
2676
|
+
'source_text': fact_data.get('source_text', ''),
|
|
2677
|
+
'type': fact_data.get('type', 'explicit'),
|
|
2678
|
+
'generation': 0
|
|
2679
|
+
}
|
|
2680
|
+
approved_facts.append(approved_fact)
|
|
2681
|
+
|
|
2682
|
+
except Exception as e:
|
|
2683
|
+
print(colored(f"Memory generation error: {e}", "yellow"))
|
|
2527
2684
|
|
|
2528
|
-
if result_state.build_kg:
|
|
2529
|
-
import pdb
|
|
2530
|
-
pdb.set_trace()
|
|
2685
|
+
if result_state.build_kg and approved_facts:
|
|
2531
2686
|
try:
|
|
2532
2687
|
if not should_skip_kg_processing(user_input, final_output_str):
|
|
2533
2688
|
npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
|
|
2534
2689
|
evolved_npc_kg, _ = kg_evolve_incremental(
|
|
2535
2690
|
existing_kg=npc_kg,
|
|
2536
|
-
|
|
2691
|
+
new_facts=approved_facts,
|
|
2537
2692
|
model=active_npc.model,
|
|
2538
2693
|
provider=active_npc.provider,
|
|
2694
|
+
npc=active_npc,
|
|
2539
2695
|
get_concepts=True,
|
|
2540
|
-
link_concepts_facts
|
|
2541
|
-
link_concepts_concepts
|
|
2542
|
-
link_facts_facts
|
|
2696
|
+
link_concepts_facts=False,
|
|
2697
|
+
link_concepts_concepts=False,
|
|
2698
|
+
link_facts_facts=False,
|
|
2699
|
+
)
|
|
2700
|
+
save_kg_to_db(
|
|
2701
|
+
engine,
|
|
2702
|
+
evolved_npc_kg,
|
|
2703
|
+
team_name,
|
|
2704
|
+
npc_name,
|
|
2705
|
+
result_state.current_path
|
|
2543
2706
|
)
|
|
2544
|
-
save_kg_to_db(engine,
|
|
2545
|
-
evolved_npc_kg,
|
|
2546
|
-
team_name,
|
|
2547
|
-
npc_name,
|
|
2548
|
-
result_state.current_path)
|
|
2549
2707
|
except Exception as e:
|
|
2550
2708
|
print(colored(f"Error during real-time KG evolution: {e}", "red"))
|
|
2551
2709
|
|
|
2552
|
-
|
|
2553
2710
|
result_state.turn_count += 1
|
|
2554
2711
|
|
|
2555
|
-
if result_state.turn_count
|
|
2712
|
+
if result_state.turn_count % 10 == 0:
|
|
2556
2713
|
print(colored("\nChecking for potential team improvements...", "cyan"))
|
|
2557
2714
|
try:
|
|
2558
|
-
summary = breathe(messages=result_state.messages[-20:],
|
|
2559
|
-
npc=active_npc)
|
|
2715
|
+
summary = breathe(messages=result_state.messages[-20:], npc=active_npc)
|
|
2560
2716
|
characterization = summary.get('output')
|
|
2561
2717
|
|
|
2562
2718
|
if characterization and result_state.team:
|
|
2563
|
-
|
|
2564
|
-
|
|
2719
|
+
team_ctx_path = get_team_ctx_path(result_state.team.team_path)
|
|
2720
|
+
if not team_ctx_path:
|
|
2721
|
+
team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
|
|
2722
|
+
|
|
2565
2723
|
ctx_data = {}
|
|
2566
2724
|
if os.path.exists(team_ctx_path):
|
|
2567
2725
|
with open(team_ctx_path, 'r') as f:
|
|
2568
2726
|
ctx_data = yaml.safe_load(f) or {}
|
|
2727
|
+
|
|
2569
2728
|
current_context = ctx_data.get('context', '')
|
|
2570
2729
|
|
|
2571
2730
|
prompt = f"""Based on this characterization: {characterization},
|
|
2572
|
-
|
|
2573
2731
|
suggest changes (additions, deletions, edits) to the team's context.
|
|
2574
2732
|
Additions need not be fully formed sentences and can simply be equations, relationships, or other plain clear items.
|
|
2575
2733
|
|
|
2576
2734
|
Current Context: "{current_context}".
|
|
2577
2735
|
|
|
2578
|
-
Respond with JSON: {{"suggestion": "Your sentence."
|
|
2579
|
-
|
|
2580
|
-
response = get_llm_response(
|
|
2581
|
-
|
|
2582
|
-
|
|
2736
|
+
Respond with JSON: {{"suggestion": "Your sentence."}}"""
|
|
2737
|
+
|
|
2738
|
+
response = get_llm_response(
|
|
2739
|
+
prompt,
|
|
2740
|
+
npc=active_npc,
|
|
2741
|
+
format="json"
|
|
2742
|
+
)
|
|
2583
2743
|
suggestion = response.get("response", {}).get("suggestion")
|
|
2584
2744
|
|
|
2585
2745
|
if suggestion:
|
|
2586
2746
|
new_context = (current_context + " " + suggestion).strip()
|
|
2587
|
-
print(colored(f"{
|
|
2747
|
+
print(colored(f"{npc_name} suggests updating team context:", "yellow"))
|
|
2588
2748
|
print(f" - OLD: {current_context}\n + NEW: {new_context}")
|
|
2589
|
-
|
|
2749
|
+
|
|
2750
|
+
choice = input("Apply? [y/N/e(dit)]: ").strip().lower()
|
|
2751
|
+
|
|
2752
|
+
if choice == 'y':
|
|
2590
2753
|
ctx_data['context'] = new_context
|
|
2591
2754
|
with open(team_ctx_path, 'w') as f:
|
|
2592
2755
|
yaml.dump(ctx_data, f)
|
|
2593
2756
|
print(colored("Team context updated.", "green"))
|
|
2757
|
+
elif choice == 'e':
|
|
2758
|
+
edited_context = input(f"Edit context [{new_context}]: ").strip()
|
|
2759
|
+
if edited_context:
|
|
2760
|
+
ctx_data['context'] = edited_context
|
|
2761
|
+
else:
|
|
2762
|
+
ctx_data['context'] = new_context
|
|
2763
|
+
with open(team_ctx_path, 'w') as f:
|
|
2764
|
+
yaml.dump(ctx_data, f)
|
|
2765
|
+
print(colored("Team context updated with edits.", "green"))
|
|
2594
2766
|
else:
|
|
2595
2767
|
print("Suggestion declined.")
|
|
2596
2768
|
except Exception as e:
|
|
2597
2769
|
import traceback
|
|
2598
2770
|
print(colored(f"Could not generate team suggestions: {e}", "yellow"))
|
|
2599
2771
|
traceback.print_exc()
|
|
2600
|
-
|
|
2772
|
+
|
|
2601
2773
|
initial_state = ShellState(
|
|
2602
2774
|
conversation_id=start_new_conversation(),
|
|
2603
2775
|
stream_output=NPCSH_STREAM_OUTPUT,
|
npcsh/alicanto.py
CHANGED
|
@@ -133,23 +133,7 @@ def load_and_combine_datasets() -> pd.DataFrame:
|
|
|
133
133
|
except Exception as e:
|
|
134
134
|
print(f"Failed to load CShorten/ML-ArXiv-Papers: {e}")
|
|
135
135
|
|
|
136
|
-
|
|
137
|
-
astro_papers = load_dataset("ashishkgpian/astrorag_papers", split="train")
|
|
138
|
-
for paper in astro_papers:
|
|
139
|
-
all_papers.append({
|
|
140
|
-
'title': paper.get('title', ''),
|
|
141
|
-
'abstract': paper.get('abstract', ''),
|
|
142
|
-
'authors': paper.get('authors', []),
|
|
143
|
-
'year': paper.get('year', None),
|
|
144
|
-
'venue': paper.get('venue', ''),
|
|
145
|
-
'url': paper.get('url', ''),
|
|
146
|
-
'paperId': paper.get('id', ''),
|
|
147
|
-
'citationCount': 0,
|
|
148
|
-
'source': 'astrorag'
|
|
149
|
-
})
|
|
150
|
-
except Exception as e:
|
|
151
|
-
print(f"Failed to load ashishkgpian/astrorag_papers: {e}")
|
|
152
|
-
|
|
136
|
+
|
|
153
137
|
df = pd.DataFrame(all_papers)
|
|
154
138
|
df = df.dropna(subset=['title', 'abstract'])
|
|
155
139
|
df = df[df['abstract'].str.len() > 50]
|
|
@@ -172,7 +156,7 @@ def initialize_dataset_search():
|
|
|
172
156
|
import time
|
|
173
157
|
|
|
174
158
|
LAST_S2_REQUEST_TIME = 0
|
|
175
|
-
S2_RATE_LIMIT_DELAY =
|
|
159
|
+
S2_RATE_LIMIT_DELAY = 30
|
|
176
160
|
|
|
177
161
|
def search_semantic_scholar(query: str, limit: int = 10) -> List[Dict[str, Any]]:
|
|
178
162
|
global LAST_S2_REQUEST_TIME
|
|
@@ -186,8 +170,8 @@ def search_semantic_scholar(query: str, limit: int = 10) -> List[Dict[str, Any]]
|
|
|
186
170
|
|
|
187
171
|
if time_since_last < S2_RATE_LIMIT_DELAY:
|
|
188
172
|
sleep_time = S2_RATE_LIMIT_DELAY - time_since_last
|
|
189
|
-
print(f"Rate limiting:
|
|
190
|
-
|
|
173
|
+
print(f"Rate limiting: still need {sleep_time:.2f}s before S2 request")
|
|
174
|
+
return None
|
|
191
175
|
|
|
192
176
|
LAST_S2_REQUEST_TIME = time.time()
|
|
193
177
|
|
|
@@ -198,10 +182,11 @@ def search_semantic_scholar(query: str, limit: int = 10) -> List[Dict[str, Any]]
|
|
|
198
182
|
"limit": limit,
|
|
199
183
|
"fields": "title,abstract,authors,year,citationCount,url,tldr"
|
|
200
184
|
}
|
|
201
|
-
|
|
185
|
+
print('Semantic SCholar calls')
|
|
202
186
|
try:
|
|
203
187
|
response = requests.get(url, headers=headers, params=params,
|
|
204
188
|
timeout=30)
|
|
189
|
+
print('semantic scholar response')
|
|
205
190
|
response.raise_for_status()
|
|
206
191
|
return response.json().get('data', [])
|
|
207
192
|
except requests.exceptions.RequestException as e:
|
|
@@ -605,7 +590,7 @@ Do not use seaborn. On matplotlib plots, do not use grids or titles.
|
|
|
605
590
|
all_actions = []
|
|
606
591
|
all_outcomes = []
|
|
607
592
|
|
|
608
|
-
for micro_step in range(
|
|
593
|
+
for micro_step in range(11):
|
|
609
594
|
print(f"\n--- Micro-step {micro_step + 1}/4 ---")
|
|
610
595
|
|
|
611
596
|
if micro_step == 0:
|
|
@@ -823,20 +808,24 @@ def format_paper_as_latex(paper: Paper, authors: List[str]) -> str:
|
|
|
823
808
|
\\end{{document}}
|
|
824
809
|
"""
|
|
825
810
|
|
|
826
|
-
|
|
827
|
-
|
|
828
811
|
def alicanto(
|
|
829
812
|
query: str,
|
|
830
813
|
num_agents: int = 3,
|
|
831
814
|
max_steps: int = 10,
|
|
832
815
|
model: str = NPCSH_CHAT_MODEL,
|
|
833
816
|
provider: str = NPCSH_CHAT_PROVIDER,
|
|
817
|
+
skip_research: bool = True,
|
|
834
818
|
**kwargs
|
|
835
819
|
) -> None:
|
|
836
820
|
|
|
837
821
|
print("=== ALICANTO RESEARCH SYSTEM STARTING ===")
|
|
838
822
|
print(f"Query: {query}")
|
|
839
|
-
|
|
823
|
+
|
|
824
|
+
if skip_research:
|
|
825
|
+
print("SKIPPING RESEARCH - GOING DIRECTLY TO PAPER WRITING")
|
|
826
|
+
else:
|
|
827
|
+
print(f"Agents: {num_agents}, Max steps per agent: {max_steps}")
|
|
828
|
+
|
|
840
829
|
print(f"Model: {model}, Provider: {provider}")
|
|
841
830
|
|
|
842
831
|
def wander_wrapper_coordinator(problem_description: str) -> str:
|
|
@@ -865,9 +854,14 @@ def alicanto(
|
|
|
865
854
|
]
|
|
866
855
|
)
|
|
867
856
|
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
857
|
+
messages = []
|
|
858
|
+
summarized_history = []
|
|
859
|
+
file_provenance = {}
|
|
860
|
+
|
|
861
|
+
if not skip_research:
|
|
862
|
+
print("\n--- Step 1: Generating hypotheses and personas ---")
|
|
863
|
+
|
|
864
|
+
one_shot_example_hypotheses = """
|
|
871
865
|
"example_input": "Investigate the impact of quantum annealing on protein folding.",
|
|
872
866
|
"example_output": {
|
|
873
867
|
"hypotheses": [
|
|
@@ -877,7 +871,7 @@ def alicanto(
|
|
|
877
871
|
]
|
|
878
872
|
}
|
|
879
873
|
"""
|
|
880
|
-
|
|
874
|
+
hypotheses_prompt = f"""Based on the following research topic, generate a list of {num_agents} distinct, specific, and empirically testable hypotheses.
|
|
881
875
|
|
|
882
876
|
TOPIC: "{query}"
|
|
883
877
|
|
|
@@ -888,73 +882,73 @@ Here is an example of the expected input and output format:
|
|
|
888
882
|
|
|
889
883
|
Return ONLY the JSON object.
|
|
890
884
|
"""
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
query,
|
|
917
|
-
num_agents,
|
|
918
|
-
model,
|
|
919
|
-
provider,
|
|
920
|
-
alicanto_coordinator
|
|
921
|
-
)
|
|
922
|
-
|
|
923
|
-
if not personas:
|
|
924
|
-
print("ERROR: No personas generated")
|
|
925
|
-
return
|
|
926
|
-
|
|
927
|
-
print(f"Generated {len(personas)} personas:")
|
|
928
|
-
for i, p in enumerate(personas):
|
|
929
|
-
print(f" {i+1}. {p.get('name')}: {p.get('persona')}")
|
|
930
|
-
|
|
931
|
-
print("\n--- Step 2: Delegating hypotheses to Sub-Agents for serial execution ---")
|
|
932
|
-
|
|
933
|
-
all_traces = []
|
|
934
|
-
for i, hypo in enumerate(hypotheses):
|
|
935
|
-
persona = personas[i % len(personas)]
|
|
936
|
-
print(f"\nStarting sub-agent {i+1}/{len(hypotheses)}")
|
|
937
|
-
trace = sub_agent_trace(
|
|
938
|
-
hypo,
|
|
939
|
-
persona,
|
|
885
|
+
|
|
886
|
+
print("Generating hypotheses...")
|
|
887
|
+
response = get_llm_response(
|
|
888
|
+
hypotheses_prompt,
|
|
889
|
+
model=model,
|
|
890
|
+
provider=provider,
|
|
891
|
+
npc=alicanto_coordinator,
|
|
892
|
+
format='json'
|
|
893
|
+
)
|
|
894
|
+
|
|
895
|
+
if not response or not response.get('response'):
|
|
896
|
+
print("ERROR: Failed to get hypotheses response")
|
|
897
|
+
return
|
|
898
|
+
|
|
899
|
+
hypotheses = response.get('response').get('hypotheses')
|
|
900
|
+
if not hypotheses:
|
|
901
|
+
print("ERROR: No hypotheses generated")
|
|
902
|
+
return
|
|
903
|
+
|
|
904
|
+
print(f"Generated {len(hypotheses)} hypotheses:")
|
|
905
|
+
for i, h in enumerate(hypotheses):
|
|
906
|
+
print(f" {i+1}. {h}")
|
|
907
|
+
|
|
908
|
+
print("\nGenerating agent personas...")
|
|
909
|
+
personas = generate_sub_agent_personas(
|
|
940
910
|
query,
|
|
911
|
+
num_agents,
|
|
941
912
|
model,
|
|
942
913
|
provider,
|
|
943
|
-
|
|
914
|
+
alicanto_coordinator
|
|
944
915
|
)
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
916
|
+
|
|
917
|
+
if not personas:
|
|
918
|
+
print("ERROR: No personas generated")
|
|
919
|
+
return
|
|
920
|
+
|
|
921
|
+
print(f"Generated {len(personas)} personas:")
|
|
922
|
+
for i, p in enumerate(personas):
|
|
923
|
+
print(f" {i+1}. {p.get('name')}: {p.get('persona')}")
|
|
951
924
|
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
925
|
+
print("\n--- Step 2: Delegating hypotheses to Sub-Agents for serial execution ---")
|
|
926
|
+
|
|
927
|
+
all_traces = []
|
|
928
|
+
for i, hypo in enumerate(hypotheses):
|
|
929
|
+
persona = personas[i % len(personas)]
|
|
930
|
+
print(f"\nStarting sub-agent {i+1}/{len(hypotheses)}")
|
|
931
|
+
trace = sub_agent_trace(
|
|
932
|
+
hypo,
|
|
933
|
+
persona,
|
|
934
|
+
query,
|
|
935
|
+
model,
|
|
936
|
+
provider,
|
|
937
|
+
max_steps
|
|
938
|
+
)
|
|
939
|
+
all_traces.append(trace)
|
|
940
|
+
print(f"Sub-agent {i+1} completed. Success: {trace.was_successful}")
|
|
941
|
+
|
|
942
|
+
print(f"\nAll sub-agents completed. Saving traces...")
|
|
943
|
+
save_trace_for_training(all_traces)
|
|
944
|
+
compressed_research = compress_traces_for_synthesis(all_traces, model, provider, alicanto_coordinator)
|
|
945
|
+
|
|
946
|
+
print("\n--- Step 3: Creating initial paper structure ---")
|
|
947
|
+
|
|
948
|
+
author_list = [trace.agent_name for trace in all_traces]
|
|
949
|
+
author_string = ", ".join(author_list)
|
|
950
|
+
|
|
951
|
+
initial_latex = f"""\\documentclass{{article}}
|
|
958
952
|
\\title{{% TODO: TITLE}}
|
|
959
953
|
\\author{{{author_string}}}
|
|
960
954
|
\\date{{\\today}}
|
|
@@ -979,59 +973,106 @@ Return ONLY the JSON object.
|
|
|
979
973
|
|
|
980
974
|
\\end{{document}}"""
|
|
981
975
|
|
|
982
|
-
|
|
976
|
+
create_file("paper.tex", initial_latex)
|
|
977
|
+
else:
|
|
978
|
+
print("\n--- Skipping research phase - loading existing data ---")
|
|
979
|
+
|
|
980
|
+
if os.path.exists("paper.tex"):
|
|
981
|
+
print("Found existing paper.tex")
|
|
982
|
+
else:
|
|
983
|
+
print("No existing paper.tex found, creating basic template...")
|
|
984
|
+
basic_latex = f"""\\documentclass{{article}}
|
|
985
|
+
\\title{{{query.title()}}}
|
|
986
|
+
\\author{{Research Team}}
|
|
987
|
+
\\date{{\\today}}
|
|
988
|
+
\\begin{{document}}
|
|
989
|
+
\\maketitle
|
|
990
|
+
|
|
991
|
+
\\begin{{abstract}}
|
|
992
|
+
% TODO: ABSTRACT
|
|
993
|
+
\\end{{abstract}}
|
|
994
|
+
|
|
995
|
+
\\section{{Introduction}}
|
|
996
|
+
% TODO: INTRODUCTION
|
|
997
|
+
|
|
998
|
+
\\section{{Methods}}
|
|
999
|
+
% TODO: METHODS
|
|
1000
|
+
|
|
1001
|
+
\\section{{Results}}
|
|
1002
|
+
% TODO: RESULTS
|
|
1003
|
+
|
|
1004
|
+
\\section{{Discussion}}
|
|
1005
|
+
% TODO: DISCUSSION
|
|
1006
|
+
|
|
1007
|
+
\\end{{document}}"""
|
|
1008
|
+
create_file("paper.tex", basic_latex)
|
|
1009
|
+
|
|
1010
|
+
compressed_research = f"Research topic: {query}. Previous research data should be available in local files."
|
|
983
1011
|
|
|
984
1012
|
print("\n--- Step 4: Iterative paper writing ---")
|
|
985
1013
|
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
for section_round in range(len(todo_sections)):
|
|
1014
|
+
for section_round in range(25):
|
|
989
1015
|
print(f"\n--- Section Round {section_round + 1} ---")
|
|
990
1016
|
|
|
991
|
-
|
|
992
|
-
sections_status = {section: "EMPTY" if f"% TODO: {section}" in current_paper else "COMPLETE"
|
|
993
|
-
for section in todo_sections}
|
|
994
|
-
|
|
995
|
-
print(f"Section status: {sections_status}")
|
|
996
|
-
|
|
997
|
-
# Find next section to work on
|
|
998
|
-
next_section = None
|
|
999
|
-
for section in todo_sections:
|
|
1000
|
-
if sections_status[section] == "EMPTY":
|
|
1001
|
-
next_section = section
|
|
1002
|
-
break
|
|
1003
|
-
|
|
1004
|
-
if not next_section:
|
|
1005
|
-
print("All sections complete")
|
|
1006
|
-
break
|
|
1017
|
+
fs_before = get_filesystem_state()
|
|
1007
1018
|
|
|
1008
|
-
|
|
1019
|
+
provenance_summary = []
|
|
1020
|
+
for filename, prov in file_provenance.items():
|
|
1021
|
+
history = "; ".join([f"Step {step}: {action} ({checksum}) - {changes}" for step, action, checksum, changes in prov.step_history])
|
|
1022
|
+
provenance_summary.append(f"{filename}: {history}")
|
|
1009
1023
|
|
|
1010
|
-
|
|
1011
|
-
|
|
1024
|
+
history_str = "\n".join(summarized_history)
|
|
1025
|
+
current_paper = read_file("paper.tex")
|
|
1012
1026
|
|
|
1013
|
-
initial_prompt = f"""You are writing a research paper about: "{query}"
|
|
1027
|
+
initial_prompt = f"""You are writing a research paper about: "{query}" located at ./paper.tex
|
|
1014
1028
|
|
|
1015
1029
|
Research data from sub-agents: {compressed_research}
|
|
1016
1030
|
|
|
1017
1031
|
Current paper content:
|
|
1018
1032
|
{current_paper}
|
|
1019
1033
|
|
|
1020
|
-
|
|
1034
|
+
FILE PROVENANCE HISTORY:
|
|
1035
|
+
{chr(10).join(provenance_summary)}
|
|
1036
|
+
|
|
1037
|
+
COMPLETE ACTION HISTORY:
|
|
1038
|
+
BEGIN HISTORY
|
|
1039
|
+
{history_str}
|
|
1040
|
+
END HISTORY
|
|
1041
|
+
|
|
1042
|
+
Ensure the paper contains the following sections and that they have a coherent narrative by the end of your work.
|
|
1043
|
+
work iteratively, so do not worry about making it all in one step.
|
|
1044
|
+
|
|
1045
|
+
SECTIONS: Title, Abstract, Intro, Methods, Results, Discussion, Conclusions,
|
|
1046
|
+
|
|
1047
|
+
You may choose to add subsections as you wish, but do not do so for the introduction.
|
|
1048
|
+
|
|
1049
|
+
You must ensure citations are properly included in your results and cited with the \cite{{author_year}} format , keeping in mind
|
|
1050
|
+
to also start and maintain a .bib file separate from any currently provided. be sure to reference this as well.
|
|
1051
|
+
|
|
1052
|
+
Your title short be short, informative, and eye-catching.
|
|
1053
|
+
Every section and paragraph should be written in a formal academic style, motivating pieces of information and ensuring
|
|
1054
|
+
each sentence must flow well into the last, and the paper must have a strong motivation with substantial literature review to establish
|
|
1055
|
+
the need for the investigation. The paper should focus only on 1-2 major findings, with 5-10 minor findings detailed in the conclusions.
|
|
1056
|
+
The discussion should primarily focus on commenting on how previous work may be re-interpreted in light of your findings. Do not simply splatter text
|
|
1057
|
+
into a discussion but be thoughtful and helpful. The discussion should connect to broader works and discuss specifics of those works. Do not simply regurgitate the
|
|
1021
1058
|
|
|
1022
1059
|
Use replace_in_file to update the paper. Use search_papers or search_web if you need more information.
|
|
1023
1060
|
|
|
1024
|
-
|
|
1061
|
+
Write 2-4 paragraphs of substantial academic content. Include figures and tables based on the results of the experiments.
|
|
1025
1062
|
|
|
1026
|
-
Available tools: replace_in_file, read_file, search_papers, search_web"""
|
|
1063
|
+
Available tools: replace_in_file, read_file, search_papers, search_web, list_files"""
|
|
1064
|
+
|
|
1065
|
+
all_thoughts = []
|
|
1066
|
+
all_actions = []
|
|
1067
|
+
all_outcomes = []
|
|
1027
1068
|
|
|
1028
|
-
for micro_step in range(5):
|
|
1029
|
-
print(f"\n--- Micro-step {micro_step + 1}/5
|
|
1069
|
+
for micro_step in range(5):
|
|
1070
|
+
print(f"\n--- Micro-step {micro_step + 1}/5 ---")
|
|
1030
1071
|
|
|
1031
1072
|
if micro_step == 0:
|
|
1032
1073
|
current_prompt = initial_prompt
|
|
1033
1074
|
else:
|
|
1034
|
-
current_prompt = f"
|
|
1075
|
+
current_prompt = f"continue "
|
|
1035
1076
|
|
|
1036
1077
|
try:
|
|
1037
1078
|
response = alicanto_coordinator.get_llm_response(
|
|
@@ -1039,11 +1080,54 @@ Available tools: replace_in_file, read_file, search_papers, search_web"""
|
|
|
1039
1080
|
messages=messages,
|
|
1040
1081
|
auto_process_tool_calls=True
|
|
1041
1082
|
)
|
|
1083
|
+
print('response: ', response['response'])
|
|
1084
|
+
print('tool calls: ', response['tool_calls'])
|
|
1085
|
+
print('tool results: ', response['tool_results'])
|
|
1086
|
+
|
|
1087
|
+
messages = response.get('messages', [])
|
|
1088
|
+
|
|
1089
|
+
thought = response.get('response') or "" # Handle None case
|
|
1090
|
+
all_thoughts.append(thought)
|
|
1091
|
+
|
|
1092
|
+
if response.get('tool_results'):
|
|
1093
|
+
tool_results = response['tool_results']
|
|
1094
|
+
action_str = ", ".join([f"{res['tool_name']}({res.get('arguments', {})})" for res in tool_results])
|
|
1095
|
+
outcomes = [str(res.get('result', '')) for res in tool_results]
|
|
1096
|
+
outcome_str = " | ".join(outcomes)
|
|
1097
|
+
all_actions.append(action_str)
|
|
1098
|
+
all_outcomes.append(outcome_str)
|
|
1099
|
+
|
|
1042
1100
|
except (Timeout, ContextWindowExceededError):
|
|
1043
1101
|
break
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1102
|
+
except Exception as e:
|
|
1103
|
+
print(f"Error in micro-step: {e}")
|
|
1104
|
+
break
|
|
1105
|
+
|
|
1106
|
+
fs_after = get_filesystem_state()
|
|
1107
|
+
|
|
1108
|
+
combined_thought = " ".join(filter(None, all_thoughts)) # Filter out None values
|
|
1109
|
+
combined_action = " | ".join(filter(None, all_actions))
|
|
1110
|
+
combined_outcome = " | ".join(filter(None, all_outcomes))
|
|
1111
|
+
|
|
1112
|
+
print(f"\nCOMPRESSING WRITING SESSION...")
|
|
1113
|
+
print(f"THOUGHTS: {len(all_thoughts)} messages")
|
|
1114
|
+
print(f"ACTIONS: {len(all_actions)} tool uses")
|
|
1115
|
+
|
|
1116
|
+
summary = summarize_step(combined_thought,
|
|
1117
|
+
combined_action,
|
|
1118
|
+
combined_outcome,
|
|
1119
|
+
fs_before,
|
|
1120
|
+
fs_after,
|
|
1121
|
+
file_provenance,
|
|
1122
|
+
section_round + 1,
|
|
1123
|
+
model,
|
|
1124
|
+
provider,
|
|
1125
|
+
alicanto_coordinator)
|
|
1126
|
+
|
|
1127
|
+
print(f"SUMMARY: {summary.get('summary', 'No summary')}")
|
|
1128
|
+
print(f"NEXT STEP: {summary.get('next_step', 'No next step')}")
|
|
1129
|
+
|
|
1130
|
+
summarized_history.append(f"Round {section_round + 1}: {summary.get('summary')} ")
|
|
1047
1131
|
|
|
1048
1132
|
final_paper = read_file("paper.tex")
|
|
1049
1133
|
print(f"\n{'='*60}")
|
|
@@ -1051,8 +1135,9 @@ Available tools: replace_in_file, read_file, search_papers, search_web"""
|
|
|
1051
1135
|
print("="*60)
|
|
1052
1136
|
print(final_paper)
|
|
1053
1137
|
print(f"\nPaper saved as paper.tex")
|
|
1054
|
-
|
|
1055
|
-
|
|
1138
|
+
|
|
1139
|
+
|
|
1140
|
+
|
|
1056
1141
|
def main():
|
|
1057
1142
|
parser = argparse.ArgumentParser(description="Alicanto Multi-Agent Research System")
|
|
1058
1143
|
parser.add_argument("topic", help="Research topic to investigate")
|
|
@@ -1060,6 +1145,7 @@ def main():
|
|
|
1060
1145
|
parser.add_argument("--max-steps", type=int, default=10, help="Maximum steps for each sub-agent.")
|
|
1061
1146
|
parser.add_argument("--model", default=NPCSH_CHAT_MODEL, help="LLM model to use")
|
|
1062
1147
|
parser.add_argument("--provider", default=NPCSH_CHAT_PROVIDER, help="LLM provider to use")
|
|
1148
|
+
parser.add_argument("--skip-research", action="store_true", help="Skip research phase and go directly to paper writing")
|
|
1063
1149
|
|
|
1064
1150
|
args = parser.parse_args()
|
|
1065
1151
|
|
|
@@ -1068,8 +1154,6 @@ def main():
|
|
|
1068
1154
|
num_agents=args.num_agents,
|
|
1069
1155
|
max_steps=args.max_steps,
|
|
1070
1156
|
model=args.model,
|
|
1071
|
-
provider=args.provider
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
if __name__ == "__main__":
|
|
1075
|
-
main()
|
|
1157
|
+
provider=args.provider,
|
|
1158
|
+
skip_research=args.skip_research
|
|
1159
|
+
)
|
npcsh/corca.py
CHANGED
|
@@ -250,8 +250,6 @@ def process_mcp_stream(stream_response, active_npc):
|
|
|
250
250
|
|
|
251
251
|
|
|
252
252
|
|
|
253
|
-
|
|
254
|
-
|
|
255
253
|
def execute_command_corca(command: str, state: ShellState, command_history, selected_mcp_tools_names: Optional[List[str]] = None) -> Tuple[ShellState, Any]:
|
|
256
254
|
mcp_tools_for_llm = []
|
|
257
255
|
|
|
@@ -272,6 +270,13 @@ def execute_command_corca(command: str, state: ShellState, command_history, sele
|
|
|
272
270
|
|
|
273
271
|
active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
|
|
274
272
|
|
|
273
|
+
if not state.messages or not any("working directory" in msg.get("content", "").lower() for msg in state.messages):
|
|
274
|
+
context_message = {
|
|
275
|
+
"role": "system",
|
|
276
|
+
"content": f"You are currently operating in the directory: {state.current_path}. All file operations should be relative to this location unless explicitly specified otherwise."
|
|
277
|
+
}
|
|
278
|
+
state.messages.insert(0, context_message)
|
|
279
|
+
|
|
275
280
|
if len(state.messages) > 50:
|
|
276
281
|
compressed_state = active_npc.compress_planning_state(state.messages)
|
|
277
282
|
state.messages = [{"role": "system", "content": compressed_state}]
|
|
@@ -328,7 +333,6 @@ def execute_command_corca(command: str, state: ShellState, command_history, sele
|
|
|
328
333
|
team=state.team
|
|
329
334
|
)
|
|
330
335
|
|
|
331
|
-
|
|
332
336
|
stream_response = response_dict.get('response')
|
|
333
337
|
messages = response_dict.get('messages', state.messages)
|
|
334
338
|
|
|
@@ -1065,15 +1069,17 @@ def enter_corca_mode(command: str, **kwargs):
|
|
|
1065
1069
|
|
|
1066
1070
|
if not user_input:
|
|
1067
1071
|
continue
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
process_corca_result(user_input,
|
|
1072
|
-
state,
|
|
1073
|
-
output,
|
|
1074
|
-
command_history,
|
|
1075
|
-
)
|
|
1072
|
+
try:
|
|
1073
|
+
state, output = execute_command_corca(user_input, state, command_history)
|
|
1076
1074
|
|
|
1075
|
+
process_corca_result(user_input,
|
|
1076
|
+
state,
|
|
1077
|
+
output,
|
|
1078
|
+
command_history,
|
|
1079
|
+
)
|
|
1080
|
+
except Exception as e:
|
|
1081
|
+
print(f'An Exception has occurred {e}')
|
|
1082
|
+
|
|
1077
1083
|
except KeyboardInterrupt:
|
|
1078
1084
|
print()
|
|
1079
1085
|
continue
|
npcsh/routes.py
CHANGED
|
@@ -232,7 +232,13 @@ def compile_handler(command: str, **kwargs):
|
|
|
232
232
|
|
|
233
233
|
@router.route("corca", "Enter the Corca MCP-powered agentic shell. Usage: /corca [--mcp-server-path path]")
|
|
234
234
|
def corca_handler(command: str, **kwargs):
|
|
235
|
-
|
|
235
|
+
from npcsh._state import initial_state, setup_shell
|
|
236
|
+
command_history, team, default_npc = setup_shell()
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
return enter_corca_mode(command=command,
|
|
240
|
+
command_history = command_history,
|
|
241
|
+
shell_state=initial_state)
|
|
236
242
|
|
|
237
243
|
@router.route("flush", "Flush the last N messages")
|
|
238
244
|
def flush_handler(command: str, **kwargs):
|
|
@@ -1131,7 +1137,7 @@ def yap_handler(command: str, **kwargs):
|
|
|
1131
1137
|
def alicanto_handler(command: str, **kwargs):
|
|
1132
1138
|
messages = safe_get(kwargs, "messages", [])
|
|
1133
1139
|
parts = shlex.split(command)
|
|
1134
|
-
|
|
1140
|
+
skip_research = safe_get(kwargs, "skip_research", True)
|
|
1135
1141
|
query = ""
|
|
1136
1142
|
num_npcs = safe_get(kwargs, 'num_npcs', 5)
|
|
1137
1143
|
depth = safe_get(kwargs, 'depth', 3)
|
|
@@ -1228,6 +1234,7 @@ def alicanto_handler(command: str, **kwargs):
|
|
|
1228
1234
|
model=model,
|
|
1229
1235
|
provider=provider,
|
|
1230
1236
|
max_steps = safe_get(kwargs, 'max_steps', 20),
|
|
1237
|
+
skip_research = skip_research
|
|
1231
1238
|
|
|
1232
1239
|
)
|
|
1233
1240
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
npcsh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
npcsh/_state.py,sha256=
|
|
3
|
-
npcsh/alicanto.py,sha256=
|
|
4
|
-
npcsh/corca.py,sha256=
|
|
2
|
+
npcsh/_state.py,sha256=96NR32dMOytt1XIUvvO4CGODx3i4hMnaPJ-IlMJAMUE,93032
|
|
3
|
+
npcsh/alicanto.py,sha256=xcYlKZY6_NcnPUADosGKRuY5SVeZ_4msw82-fE2JZN8,42117
|
|
4
|
+
npcsh/corca.py,sha256=ZABu9d7HmKR3S-YF5NNP25gqvAhoOPRNJDxKqdOO-OM,49835
|
|
5
5
|
npcsh/guac.py,sha256=sVdLYVkzkQw6TE7crtLUX0QJZFkzQthWZTe1p7IfAQE,80412
|
|
6
6
|
npcsh/mcp_helpers.py,sha256=9TsCfcquGu_vX4WaKlY3J3P13-uxruQKrXng-jJ5YyY,11176
|
|
7
7
|
npcsh/mcp_server.py,sha256=krc1rhiSU9gI76w99Ph3Mk7OyUVzfiEXKMvVid-7-Ik,5201
|
|
@@ -9,7 +9,7 @@ npcsh/npc.py,sha256=OjCDu03hAcXyqPbzMeBBr1cBnmQsB1FBhd7kLNsWALc,8330
|
|
|
9
9
|
npcsh/npcsh.py,sha256=3WLkZxHGzRekVGxHiJUA4VFyhC3A1jwX11aB0kgzE9s,8818
|
|
10
10
|
npcsh/plonk.py,sha256=IfOuiE5FBvk-EIsrWFjGy0SrNywDpn4a49E7seBtEmY,14246
|
|
11
11
|
npcsh/pti.py,sha256=UciiiH2Kz4ERQFy0-FX6BQEU2VxYQEUril-_Cvj76Y0,7853
|
|
12
|
-
npcsh/routes.py,sha256=
|
|
12
|
+
npcsh/routes.py,sha256=MMsHSoVp6r8OeoABO6stM4lSba4fOGz_QpzEIWS1nos,44437
|
|
13
13
|
npcsh/spool.py,sha256=oCive2dbn1o3UGUJnFMzfON6g4bOnauuzyyQBgul6RI,9839
|
|
14
14
|
npcsh/wander.py,sha256=8WOX8az8BXjizXGraEvu-ZVphi6PECKZzo9alTK4gmA,21730
|
|
15
15
|
npcsh/yap.py,sha256=QU-j9eg8zixXG6nyjoIYXsanJ4FjPnzhS4aJ241HLxw,18467
|
|
@@ -39,35 +39,35 @@ npcsh/npc_team/jinxs/image_generation.jinx,sha256=bQxZdEm0_eqvf_OJSHw3qarp8Klp3L
|
|
|
39
39
|
npcsh/npc_team/jinxs/internet_search.jinx,sha256=s8zVxwKFUAof_VzQrwB5dpAQdeJ6hUBkbIdRsT-I5mo,1267
|
|
40
40
|
npcsh/npc_team/jinxs/python_executor.jinx,sha256=vZz3pZaj1BnPFHMs_wpyjRc0b32JR4rLhZfulCMDF1s,398
|
|
41
41
|
npcsh/npc_team/jinxs/screen_cap.jinx,sha256=-4DG0EiEe61N_kMhVqqHKhLVGOLuZQT9ax6IZk20NjI,960
|
|
42
|
-
npcsh-1.0.
|
|
43
|
-
npcsh-1.0.
|
|
44
|
-
npcsh-1.0.
|
|
45
|
-
npcsh-1.0.
|
|
46
|
-
npcsh-1.0.
|
|
47
|
-
npcsh-1.0.
|
|
48
|
-
npcsh-1.0.
|
|
49
|
-
npcsh-1.0.
|
|
50
|
-
npcsh-1.0.
|
|
51
|
-
npcsh-1.0.
|
|
52
|
-
npcsh-1.0.
|
|
53
|
-
npcsh-1.0.
|
|
54
|
-
npcsh-1.0.
|
|
55
|
-
npcsh-1.0.
|
|
56
|
-
npcsh-1.0.
|
|
57
|
-
npcsh-1.0.
|
|
58
|
-
npcsh-1.0.
|
|
59
|
-
npcsh-1.0.
|
|
60
|
-
npcsh-1.0.
|
|
61
|
-
npcsh-1.0.
|
|
62
|
-
npcsh-1.0.
|
|
63
|
-
npcsh-1.0.
|
|
64
|
-
npcsh-1.0.
|
|
65
|
-
npcsh-1.0.
|
|
66
|
-
npcsh-1.0.
|
|
67
|
-
npcsh-1.0.
|
|
68
|
-
npcsh-1.0.
|
|
69
|
-
npcsh-1.0.
|
|
70
|
-
npcsh-1.0.
|
|
71
|
-
npcsh-1.0.
|
|
72
|
-
npcsh-1.0.
|
|
73
|
-
npcsh-1.0.
|
|
42
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/alicanto.npc,sha256=y9yDY3lq8ZwxQxpnrgle8w5IJwZqvxDepZFU4OaZCtg,148
|
|
43
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/alicanto.png,sha256=A7xeMbcoKGjlkELxJEVifCEZLVWbOKZarTN5ZFJG-FM,3519858
|
|
44
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/bash_executer.jinx,sha256=C_oQccOY8cKevMMPLRdznlMOccQvLgyzyOIThXvmrD8,692
|
|
45
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/corca.npc,sha256=HI3Bs6KlUBPMz7icF1TRE8-V3f3EdU_VxvQxEpru3L4,662
|
|
46
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/corca.png,sha256=0lF70hKu6tY-37YmIPVF2cuaPzvnQ4-UtQOzuAbKEf4,1666776
|
|
47
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/edit_file.jinx,sha256=4PaJs8g_cdeDpbQwQSBycU5RDA0rczEC_NpLfLjo74Y,3490
|
|
48
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/foreman.npc,sha256=WqB8jLfBToGmr8c1vip1KOnTHxfXlGXwDUGnZoDMQr0,327
|
|
49
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/frederic.npc,sha256=EE2dOUItp-VKuW3ZMSHffmIEO4evjPcU2W_C4P3WXbY,362
|
|
50
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/frederic4.png,sha256=ll8uoV0npnPp5HVJWv7h0xDSeuq4pqsk_gYGBHLS0VY,1590744
|
|
51
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/guac.png,sha256=MCE7eJuEJwLJEzc9FS7lL62Mm-38jQRHkxXogPfOTuw,211470
|
|
52
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/image_generation.jinx,sha256=bQxZdEm0_eqvf_OJSHw3qarp8Klp3LlBDv1_HY3COo4,1307
|
|
53
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/internet_search.jinx,sha256=s8zVxwKFUAof_VzQrwB5dpAQdeJ6hUBkbIdRsT-I5mo,1267
|
|
54
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/kadiefa.npc,sha256=Yl5a4wrfe4F2f6Ndw_ukzlVVX7NE9g_mG-3QqJSkg_o,381
|
|
55
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/kadiefa.png,sha256=3CAwL8crKIwJko6o75Z6OYYEEM9Rk--yGzCJg7zoszg,3062528
|
|
56
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/npcsh.ctx,sha256=-jKYaPm2YbZHAGgWAXhyPIwhiNe1H1ZRFg1Zc7tHSxk,1049
|
|
57
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/npcsh_sibiji.png,sha256=9fUqgYMsSHmaH-kBTBQ7N5UCS5-eLZF94Log0O3mtFg,4544
|
|
58
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/plonk.npc,sha256=u1m2a1D512XGQ2kC3eWDAY8Y2IvpkNU73DI_CPE65UE,90
|
|
59
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/plonk.png,sha256=IU5ey-Dl4HEKlwnf75RSWNSHpF8rVqGmdbsa0deL4rQ,2727773
|
|
60
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/plonkjr.npc,sha256=It-i-BEuG0XddKk0d85onk2aJr9Pe5pLnJzNaCWaQIM,87
|
|
61
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/plonkjr.png,sha256=MqLEGwsyECUeODZIti0HQQrMMVxA6XERpW01R06NbpY,2606710
|
|
62
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/python_executor.jinx,sha256=vZz3pZaj1BnPFHMs_wpyjRc0b32JR4rLhZfulCMDF1s,398
|
|
63
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/screen_cap.jinx,sha256=-4DG0EiEe61N_kMhVqqHKhLVGOLuZQT9ax6IZk20NjI,960
|
|
64
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/sibiji.npc,sha256=Hb4wXKIObKKgibwnio5hLec9yd_9bKDCA87Nm2zijFA,216
|
|
65
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/sibiji.png,sha256=1dlZb7J3E62FcVo9CVOzLb8nu1bIUV7cr97nsFocHCM,35615
|
|
66
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/spool.png,sha256=LWTLkwDxXBfLuSUCX32_lo5yAmLYGsA67Xpsz-7MmWU,2876725
|
|
67
|
+
npcsh-1.0.35.data/data/npcsh/npc_team/yap.png,sha256=_l7UbWnXJdsy4Mx-x5l9DT0R6ize3HTnkwQQnOFlI18,1548649
|
|
68
|
+
npcsh-1.0.35.dist-info/licenses/LICENSE,sha256=IKBvAECHP-aCiJtE4cHGCE5Yl0tozYz02PomGeWS3y4,1070
|
|
69
|
+
npcsh-1.0.35.dist-info/METADATA,sha256=1EVu7WXtWuOl3yCQxrW9F8mtXpXlXVYxYe3XWZ4PJrM,25486
|
|
70
|
+
npcsh-1.0.35.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
71
|
+
npcsh-1.0.35.dist-info/entry_points.txt,sha256=S5yIuGm8ZXQ4siHYgN5gs0J7bxgobSEULXf8L5HaW5o,206
|
|
72
|
+
npcsh-1.0.35.dist-info/top_level.txt,sha256=kHSNgKMCkfjV95-DH0YSp1LLBi0HXdF3w57j7MQON3E,6
|
|
73
|
+
npcsh-1.0.35.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|