npcsh 1.0.34__tar.gz → 1.0.36__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcsh-1.0.34 → npcsh-1.0.36}/PKG-INFO +1 -1
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/_state.py +214 -42
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/corca.py +373 -241
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh.egg-info/PKG-INFO +1 -1
- {npcsh-1.0.34 → npcsh-1.0.36}/setup.py +1 -1
- {npcsh-1.0.34 → npcsh-1.0.36}/LICENSE +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/README.md +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/__init__.py +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/alicanto.py +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/guac.py +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/mcp_helpers.py +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/mcp_server.py +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc.py +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/jinxs/bash_executer.jinx +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/jinxs/edit_file.jinx +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/jinxs/image_generation.jinx +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/jinxs/internet_search.jinx +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/jinxs/python_executor.jinx +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/jinxs/screen_cap.jinx +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/npcsh.py +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/plonk.py +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/pti.py +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/routes.py +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/spool.py +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/wander.py +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh/yap.py +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh.egg-info/SOURCES.txt +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh.egg-info/dependency_links.txt +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh.egg-info/entry_points.txt +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh.egg-info/requires.txt +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/npcsh.egg-info/top_level.txt +0 -0
- {npcsh-1.0.34 → npcsh-1.0.36}/setup.cfg +0 -0
|
@@ -2013,6 +2013,9 @@ def execute_slash_command(command: str,
|
|
|
2013
2013
|
|
|
2014
2014
|
return state, colored(f"Unknown slash command, jinx, or NPC: {command_name}", "red")
|
|
2015
2015
|
|
|
2016
|
+
|
|
2017
|
+
|
|
2018
|
+
|
|
2016
2019
|
def process_pipeline_command(
|
|
2017
2020
|
cmd_segment: str,
|
|
2018
2021
|
stdin_input: Optional[str],
|
|
@@ -2454,24 +2457,115 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
|
2454
2457
|
|
|
2455
2458
|
|
|
2456
2459
|
|
|
2460
|
+
|
|
2461
|
+
from npcpy.memory.memory_processor import MemoryApprovalQueue, MemoryItem, memory_approval_ui
|
|
2462
|
+
from npcpy.ft.memory_trainer import MemoryTrainer
|
|
2463
|
+
from npcpy.llm_funcs import get_facts
|
|
2464
|
+
|
|
2465
|
+
_memory_queue = None
|
|
2466
|
+
|
|
2467
|
+
def get_memory_queue(command_history):
|
|
2468
|
+
global _memory_queue
|
|
2469
|
+
if _memory_queue is None:
|
|
2470
|
+
_memory_queue = MemoryApprovalQueue(command_history)
|
|
2471
|
+
_memory_queue.start_background_processing()
|
|
2472
|
+
return _memory_queue
|
|
2473
|
+
|
|
2474
|
+
def format_memory_context(memory_examples):
|
|
2475
|
+
if not memory_examples:
|
|
2476
|
+
return ""
|
|
2477
|
+
|
|
2478
|
+
context_parts = []
|
|
2479
|
+
|
|
2480
|
+
approved_examples = memory_examples.get("approved", [])
|
|
2481
|
+
rejected_examples = memory_examples.get("rejected", [])
|
|
2482
|
+
|
|
2483
|
+
if approved_examples:
|
|
2484
|
+
context_parts.append("EXAMPLES OF GOOD MEMORIES:")
|
|
2485
|
+
for ex in approved_examples[:5]:
|
|
2486
|
+
final = ex.get("final_memory") or ex.get("initial_memory")
|
|
2487
|
+
context_parts.append(f"- {final}")
|
|
2488
|
+
|
|
2489
|
+
if rejected_examples:
|
|
2490
|
+
context_parts.append("\nEXAMPLES OF POOR MEMORIES TO AVOID:")
|
|
2491
|
+
for ex in rejected_examples[:3]:
|
|
2492
|
+
context_parts.append(f"- {ex.get('initial_memory')}")
|
|
2493
|
+
|
|
2494
|
+
if context_parts:
|
|
2495
|
+
context_parts.append("\nLearn from these examples to generate similar high-quality memories.")
|
|
2496
|
+
return "\n".join(context_parts)
|
|
2497
|
+
|
|
2498
|
+
return ""
|
|
2499
|
+
|
|
2500
|
+
def process_memory_approvals(command_history, memory_queue):
|
|
2501
|
+
pending_memories = memory_queue.get_approval_batch(max_items=5)
|
|
2502
|
+
|
|
2503
|
+
if not pending_memories:
|
|
2504
|
+
return
|
|
2505
|
+
|
|
2506
|
+
print(f"\n🧠 Processing {len(pending_memories)} memories...")
|
|
2507
|
+
|
|
2508
|
+
try:
|
|
2509
|
+
trainer = MemoryTrainer()
|
|
2510
|
+
auto_processed = []
|
|
2511
|
+
need_human_review = []
|
|
2512
|
+
|
|
2513
|
+
for memory in pending_memories:
|
|
2514
|
+
result = trainer.auto_approve_memory(
|
|
2515
|
+
memory['content'],
|
|
2516
|
+
memory['context'],
|
|
2517
|
+
confidence_threshold=0.85
|
|
2518
|
+
)
|
|
2519
|
+
|
|
2520
|
+
if result['auto_processed']:
|
|
2521
|
+
auto_processed.append((memory, result))
|
|
2522
|
+
else:
|
|
2523
|
+
need_human_review.append(memory)
|
|
2524
|
+
|
|
2525
|
+
for memory, result in auto_processed:
|
|
2526
|
+
command_history.update_memory_status(
|
|
2527
|
+
memory['memory_id'],
|
|
2528
|
+
result['action']
|
|
2529
|
+
)
|
|
2530
|
+
print(f" Auto-{result['action']}: {memory['content'][:50]}... (confidence: {result['confidence']:.2f})")
|
|
2531
|
+
|
|
2532
|
+
if need_human_review:
|
|
2533
|
+
approvals = memory_approval_ui(need_human_review)
|
|
2534
|
+
|
|
2535
|
+
for approval in approvals:
|
|
2536
|
+
command_history.update_memory_status(
|
|
2537
|
+
approval['memory_id'],
|
|
2538
|
+
approval['decision'],
|
|
2539
|
+
approval.get('final_memory')
|
|
2540
|
+
)
|
|
2541
|
+
|
|
2542
|
+
except Exception as e:
|
|
2543
|
+
print(f"Auto-approval failed: {e}")
|
|
2544
|
+
approvals = memory_approval_ui(pending_memories)
|
|
2545
|
+
|
|
2546
|
+
for approval in approvals:
|
|
2547
|
+
command_history.update_memory_status(
|
|
2548
|
+
approval['memory_id'],
|
|
2549
|
+
approval['decision'],
|
|
2550
|
+
approval.get('final_memory')
|
|
2551
|
+
)
|
|
2552
|
+
|
|
2457
2553
|
def process_result(
|
|
2458
2554
|
user_input: str,
|
|
2459
2555
|
result_state: ShellState,
|
|
2460
2556
|
output: Any,
|
|
2461
|
-
command_history: CommandHistory,
|
|
2462
|
-
|
|
2463
|
-
|
|
2557
|
+
command_history: CommandHistory,
|
|
2464
2558
|
):
|
|
2465
|
-
|
|
2466
2559
|
team_name = result_state.team.name if result_state.team else "__none__"
|
|
2467
2560
|
npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "__none__"
|
|
2468
2561
|
|
|
2469
|
-
|
|
2470
2562
|
active_npc = result_state.npc if isinstance(result_state.npc, NPC) else NPC(
|
|
2471
2563
|
name="default",
|
|
2472
2564
|
model=result_state.chat_model,
|
|
2473
2565
|
provider=result_state.chat_provider,
|
|
2474
|
-
db_conn=command_history.engine
|
|
2566
|
+
db_conn=command_history.engine
|
|
2567
|
+
)
|
|
2568
|
+
|
|
2475
2569
|
save_conversation_message(
|
|
2476
2570
|
command_history,
|
|
2477
2571
|
result_state.conversation_id,
|
|
@@ -2492,23 +2586,27 @@ def process_result(
|
|
|
2492
2586
|
provider_for_stream = output.get('provider', active_npc.provider) if isinstance(output, dict) else active_npc.provider
|
|
2493
2587
|
|
|
2494
2588
|
print('\n')
|
|
2495
|
-
if user_input =='/help':
|
|
2589
|
+
if user_input == '/help':
|
|
2496
2590
|
render_markdown(output.get('output'))
|
|
2497
2591
|
elif result_state.stream_output:
|
|
2498
|
-
|
|
2499
|
-
|
|
2500
|
-
|
|
2501
|
-
|
|
2502
|
-
|
|
2592
|
+
final_output_str = print_and_process_stream_with_markdown(
|
|
2593
|
+
output_content,
|
|
2594
|
+
model_for_stream,
|
|
2595
|
+
provider_for_stream,
|
|
2596
|
+
show=True
|
|
2597
|
+
)
|
|
2503
2598
|
elif output_content is not None:
|
|
2504
2599
|
final_output_str = str(output_content)
|
|
2505
2600
|
render_markdown(final_output_str)
|
|
2506
2601
|
|
|
2507
2602
|
if final_output_str:
|
|
2508
2603
|
if result_state.messages:
|
|
2509
|
-
if result_state.messages[-1].get("role") != "assistant":
|
|
2510
|
-
result_state.messages.append({
|
|
2511
|
-
|
|
2604
|
+
if not result_state.messages or result_state.messages[-1].get("role") != "assistant":
|
|
2605
|
+
result_state.messages.append({
|
|
2606
|
+
"role": "assistant",
|
|
2607
|
+
"content": final_output_str
|
|
2608
|
+
})
|
|
2609
|
+
|
|
2512
2610
|
save_conversation_message(
|
|
2513
2611
|
command_history,
|
|
2514
2612
|
result_state.conversation_id,
|
|
@@ -2524,80 +2622,154 @@ def process_result(
|
|
|
2524
2622
|
conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
|
|
2525
2623
|
engine = command_history.engine
|
|
2526
2624
|
|
|
2625
|
+
memory_examples = command_history.get_memory_examples_for_context(
|
|
2626
|
+
npc=npc_name,
|
|
2627
|
+
team=team_name,
|
|
2628
|
+
directory_path=result_state.current_path
|
|
2629
|
+
)
|
|
2630
|
+
|
|
2631
|
+
memory_context = format_memory_context(memory_examples)
|
|
2632
|
+
|
|
2633
|
+
approved_facts = []
|
|
2634
|
+
try:
|
|
2635
|
+
facts = get_facts(
|
|
2636
|
+
conversation_turn_text,
|
|
2637
|
+
model=active_npc.model,
|
|
2638
|
+
provider=active_npc.provider,
|
|
2639
|
+
npc=active_npc,
|
|
2640
|
+
context=memory_context
|
|
2641
|
+
)
|
|
2642
|
+
|
|
2643
|
+
if facts:
|
|
2644
|
+
memories_for_approval = []
|
|
2645
|
+
for i, fact in enumerate(facts):
|
|
2646
|
+
memories_for_approval.append({
|
|
2647
|
+
"memory_id": f"temp_{i}",
|
|
2648
|
+
"content": fact['statement'],
|
|
2649
|
+
"context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
|
|
2650
|
+
"npc": npc_name,
|
|
2651
|
+
"fact_data": fact
|
|
2652
|
+
})
|
|
2653
|
+
|
|
2654
|
+
approvals = memory_approval_ui(memories_for_approval)
|
|
2655
|
+
|
|
2656
|
+
for approval in approvals:
|
|
2657
|
+
fact_data = next(m['fact_data'] for m in memories_for_approval
|
|
2658
|
+
if m['memory_id'] == approval['memory_id'])
|
|
2659
|
+
|
|
2660
|
+
command_history.add_memory_to_database(
|
|
2661
|
+
message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
|
|
2662
|
+
conversation_id=result_state.conversation_id,
|
|
2663
|
+
npc=npc_name,
|
|
2664
|
+
team=team_name,
|
|
2665
|
+
directory_path=result_state.current_path,
|
|
2666
|
+
initial_memory=fact_data['statement'],
|
|
2667
|
+
status=approval['decision'],
|
|
2668
|
+
model=active_npc.model,
|
|
2669
|
+
provider=active_npc.provider,
|
|
2670
|
+
final_memory=approval.get('final_memory')
|
|
2671
|
+
)
|
|
2672
|
+
|
|
2673
|
+
if approval['decision'] in ['human-approved', 'human-edited']:
|
|
2674
|
+
approved_fact = {
|
|
2675
|
+
'statement': approval.get('final_memory') or fact_data['statement'],
|
|
2676
|
+
'source_text': fact_data.get('source_text', ''),
|
|
2677
|
+
'type': fact_data.get('type', 'explicit'),
|
|
2678
|
+
'generation': 0
|
|
2679
|
+
}
|
|
2680
|
+
approved_facts.append(approved_fact)
|
|
2681
|
+
|
|
2682
|
+
except Exception as e:
|
|
2683
|
+
print(colored(f"Memory generation error: {e}", "yellow"))
|
|
2527
2684
|
|
|
2528
|
-
if result_state.build_kg:
|
|
2529
|
-
import pdb
|
|
2530
|
-
pdb.set_trace()
|
|
2685
|
+
if result_state.build_kg and approved_facts:
|
|
2531
2686
|
try:
|
|
2532
2687
|
if not should_skip_kg_processing(user_input, final_output_str):
|
|
2533
2688
|
npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
|
|
2534
2689
|
evolved_npc_kg, _ = kg_evolve_incremental(
|
|
2535
2690
|
existing_kg=npc_kg,
|
|
2536
|
-
|
|
2691
|
+
new_facts=approved_facts,
|
|
2537
2692
|
model=active_npc.model,
|
|
2538
2693
|
provider=active_npc.provider,
|
|
2694
|
+
npc=active_npc,
|
|
2539
2695
|
get_concepts=True,
|
|
2540
|
-
link_concepts_facts
|
|
2541
|
-
link_concepts_concepts
|
|
2542
|
-
link_facts_facts
|
|
2696
|
+
link_concepts_facts=False,
|
|
2697
|
+
link_concepts_concepts=False,
|
|
2698
|
+
link_facts_facts=False,
|
|
2699
|
+
)
|
|
2700
|
+
save_kg_to_db(
|
|
2701
|
+
engine,
|
|
2702
|
+
evolved_npc_kg,
|
|
2703
|
+
team_name,
|
|
2704
|
+
npc_name,
|
|
2705
|
+
result_state.current_path
|
|
2543
2706
|
)
|
|
2544
|
-
save_kg_to_db(engine,
|
|
2545
|
-
evolved_npc_kg,
|
|
2546
|
-
team_name,
|
|
2547
|
-
npc_name,
|
|
2548
|
-
result_state.current_path)
|
|
2549
2707
|
except Exception as e:
|
|
2550
2708
|
print(colored(f"Error during real-time KG evolution: {e}", "red"))
|
|
2551
2709
|
|
|
2552
|
-
|
|
2553
2710
|
result_state.turn_count += 1
|
|
2554
2711
|
|
|
2555
|
-
if result_state.turn_count
|
|
2712
|
+
if result_state.turn_count % 10 == 0:
|
|
2556
2713
|
print(colored("\nChecking for potential team improvements...", "cyan"))
|
|
2557
2714
|
try:
|
|
2558
|
-
summary = breathe(messages=result_state.messages[-20:],
|
|
2559
|
-
npc=active_npc)
|
|
2715
|
+
summary = breathe(messages=result_state.messages[-20:], npc=active_npc)
|
|
2560
2716
|
characterization = summary.get('output')
|
|
2561
2717
|
|
|
2562
2718
|
if characterization and result_state.team:
|
|
2563
|
-
|
|
2564
|
-
|
|
2719
|
+
team_ctx_path = get_team_ctx_path(result_state.team.team_path)
|
|
2720
|
+
if not team_ctx_path:
|
|
2721
|
+
team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
|
|
2722
|
+
|
|
2565
2723
|
ctx_data = {}
|
|
2566
2724
|
if os.path.exists(team_ctx_path):
|
|
2567
2725
|
with open(team_ctx_path, 'r') as f:
|
|
2568
2726
|
ctx_data = yaml.safe_load(f) or {}
|
|
2727
|
+
|
|
2569
2728
|
current_context = ctx_data.get('context', '')
|
|
2570
2729
|
|
|
2571
2730
|
prompt = f"""Based on this characterization: {characterization},
|
|
2572
|
-
|
|
2573
2731
|
suggest changes (additions, deletions, edits) to the team's context.
|
|
2574
2732
|
Additions need not be fully formed sentences and can simply be equations, relationships, or other plain clear items.
|
|
2575
2733
|
|
|
2576
2734
|
Current Context: "{current_context}".
|
|
2577
2735
|
|
|
2578
|
-
Respond with JSON: {{"suggestion": "Your sentence."
|
|
2579
|
-
|
|
2580
|
-
response = get_llm_response(
|
|
2581
|
-
|
|
2582
|
-
|
|
2736
|
+
Respond with JSON: {{"suggestion": "Your sentence."}}"""
|
|
2737
|
+
|
|
2738
|
+
response = get_llm_response(
|
|
2739
|
+
prompt,
|
|
2740
|
+
npc=active_npc,
|
|
2741
|
+
format="json"
|
|
2742
|
+
)
|
|
2583
2743
|
suggestion = response.get("response", {}).get("suggestion")
|
|
2584
2744
|
|
|
2585
2745
|
if suggestion:
|
|
2586
2746
|
new_context = (current_context + " " + suggestion).strip()
|
|
2587
|
-
print(colored(f"{
|
|
2747
|
+
print(colored(f"{npc_name} suggests updating team context:", "yellow"))
|
|
2588
2748
|
print(f" - OLD: {current_context}\n + NEW: {new_context}")
|
|
2589
|
-
|
|
2749
|
+
|
|
2750
|
+
choice = input("Apply? [y/N/e(dit)]: ").strip().lower()
|
|
2751
|
+
|
|
2752
|
+
if choice == 'y':
|
|
2590
2753
|
ctx_data['context'] = new_context
|
|
2591
2754
|
with open(team_ctx_path, 'w') as f:
|
|
2592
2755
|
yaml.dump(ctx_data, f)
|
|
2593
2756
|
print(colored("Team context updated.", "green"))
|
|
2757
|
+
elif choice == 'e':
|
|
2758
|
+
edited_context = input(f"Edit context [{new_context}]: ").strip()
|
|
2759
|
+
if edited_context:
|
|
2760
|
+
ctx_data['context'] = edited_context
|
|
2761
|
+
else:
|
|
2762
|
+
ctx_data['context'] = new_context
|
|
2763
|
+
with open(team_ctx_path, 'w') as f:
|
|
2764
|
+
yaml.dump(ctx_data, f)
|
|
2765
|
+
print(colored("Team context updated with edits.", "green"))
|
|
2594
2766
|
else:
|
|
2595
2767
|
print("Suggestion declined.")
|
|
2596
2768
|
except Exception as e:
|
|
2597
2769
|
import traceback
|
|
2598
2770
|
print(colored(f"Could not generate team suggestions: {e}", "yellow"))
|
|
2599
2771
|
traceback.print_exc()
|
|
2600
|
-
|
|
2772
|
+
|
|
2601
2773
|
initial_state = ShellState(
|
|
2602
2774
|
conversation_id=start_new_conversation(),
|
|
2603
2775
|
stream_output=NPCSH_STREAM_OUTPUT,
|
|
@@ -7,7 +7,7 @@ from contextlib import AsyncExitStack
|
|
|
7
7
|
from typing import Optional, Callable, Dict, Any, Tuple, List
|
|
8
8
|
import shutil
|
|
9
9
|
import traceback
|
|
10
|
-
from litellm.exceptions import Timeout, ContextWindowExceededError, RateLimitError
|
|
10
|
+
from litellm.exceptions import Timeout, ContextWindowExceededError, RateLimitError, BadRequestError
|
|
11
11
|
|
|
12
12
|
try:
|
|
13
13
|
from mcp import ClientSession, StdioServerParameters
|
|
@@ -248,102 +248,189 @@ def process_mcp_stream(stream_response, active_npc):
|
|
|
248
248
|
|
|
249
249
|
return collected_content, tool_calls
|
|
250
250
|
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
|
|
257
|
-
all_available_mcp_tools = state.mcp_client.available_tools_llm
|
|
251
|
+
def clean_orphaned_tool_calls(messages):
|
|
252
|
+
cleaned_messages = []
|
|
253
|
+
i = 0
|
|
254
|
+
while i < len(messages):
|
|
255
|
+
msg = messages[i]
|
|
258
256
|
|
|
259
|
-
if
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
257
|
+
if msg.get("role") == "tool":
|
|
258
|
+
# Check if there's a preceding assistant message with tool_calls
|
|
259
|
+
found_preceding_assistant = False
|
|
260
|
+
for j in range(i-1, -1, -1):
|
|
261
|
+
prev_msg = messages[j]
|
|
262
|
+
if prev_msg.get("role") == "assistant" and prev_msg.get("tool_calls"):
|
|
263
|
+
# Check if this tool response matches any tool call
|
|
264
|
+
tool_call_ids = {tc["id"] for tc in prev_msg["tool_calls"]}
|
|
265
|
+
if msg.get("tool_call_id") in tool_call_ids:
|
|
266
|
+
found_preceding_assistant = True
|
|
267
|
+
break
|
|
268
|
+
elif prev_msg.get("role") in ["user", "assistant"]:
|
|
269
|
+
break
|
|
270
|
+
|
|
271
|
+
if found_preceding_assistant:
|
|
272
|
+
cleaned_messages.append(msg)
|
|
273
|
+
# Skip orphaned tool responses
|
|
274
|
+
|
|
275
|
+
elif (msg.get("role") == "assistant" and msg.get("tool_calls")):
|
|
276
|
+
tool_call_ids = {tc["id"] for tc in msg["tool_calls"]}
|
|
277
|
+
j = i + 1
|
|
278
|
+
found_responses = set()
|
|
279
|
+
|
|
280
|
+
while j < len(messages):
|
|
281
|
+
next_msg = messages[j]
|
|
282
|
+
if next_msg.get("role") == "tool":
|
|
283
|
+
if next_msg.get("tool_call_id") in tool_call_ids:
|
|
284
|
+
found_responses.add(next_msg.get("tool_call_id"))
|
|
285
|
+
elif next_msg.get("role") in ["user", "assistant"]:
|
|
286
|
+
break
|
|
287
|
+
j += 1
|
|
288
|
+
|
|
289
|
+
missing_responses = tool_call_ids - found_responses
|
|
290
|
+
if missing_responses:
|
|
291
|
+
assistant_msg = msg.copy()
|
|
292
|
+
assistant_msg["tool_calls"] = [
|
|
293
|
+
tc for tc in msg["tool_calls"]
|
|
294
|
+
if tc["id"] not in missing_responses
|
|
295
|
+
]
|
|
296
|
+
if not assistant_msg["tool_calls"]:
|
|
297
|
+
del assistant_msg["tool_calls"]
|
|
298
|
+
cleaned_messages.append(assistant_msg)
|
|
299
|
+
else:
|
|
300
|
+
cleaned_messages.append(msg)
|
|
266
301
|
else:
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
|
|
302
|
+
cleaned_messages.append(msg)
|
|
303
|
+
i += 1
|
|
304
|
+
|
|
305
|
+
return cleaned_messages
|
|
272
306
|
|
|
273
|
-
if not state.messages or not any("working directory" in msg.get("content", "").lower() for msg in state.messages):
|
|
274
|
-
context_message = {
|
|
275
|
-
"role": "system",
|
|
276
|
-
"content": f"You are currently operating in the directory: {state.current_path}. All file operations should be relative to this location unless explicitly specified otherwise."
|
|
277
|
-
}
|
|
278
|
-
state.messages.insert(0, context_message)
|
|
279
307
|
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
308
|
+
def get_llm_response_with_handling(prompt, npc, messages, tools, stream, team, context=None):
|
|
309
|
+
"""Unified LLM response with exception handling."""
|
|
310
|
+
messages = clean_orphaned_tool_calls(messages)
|
|
311
|
+
|
|
285
312
|
try:
|
|
286
|
-
|
|
287
|
-
prompt=
|
|
288
|
-
npc=
|
|
289
|
-
messages=
|
|
290
|
-
tools=
|
|
313
|
+
return get_llm_response(
|
|
314
|
+
prompt=prompt,
|
|
315
|
+
npc=npc,
|
|
316
|
+
messages=messages,
|
|
317
|
+
tools=tools,
|
|
291
318
|
auto_process_tool_calls=False,
|
|
292
|
-
stream=
|
|
293
|
-
team=
|
|
319
|
+
stream=stream,
|
|
320
|
+
team=team,
|
|
321
|
+
context=context
|
|
294
322
|
)
|
|
295
323
|
except Timeout:
|
|
296
|
-
|
|
297
|
-
prompt=
|
|
298
|
-
npc=
|
|
299
|
-
messages=
|
|
300
|
-
tools=
|
|
324
|
+
return get_llm_response(
|
|
325
|
+
prompt=prompt,
|
|
326
|
+
npc=npc,
|
|
327
|
+
messages=messages,
|
|
328
|
+
tools=tools,
|
|
301
329
|
auto_process_tool_calls=False,
|
|
302
|
-
stream=
|
|
303
|
-
team=
|
|
330
|
+
stream=stream,
|
|
331
|
+
team=team
|
|
304
332
|
)
|
|
305
333
|
except ContextWindowExceededError:
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
prompt=
|
|
311
|
-
npc=
|
|
312
|
-
messages=
|
|
313
|
-
tools=
|
|
334
|
+
print('compressing..... ')
|
|
335
|
+
compressed_state = npc.compress_planning_state(messages)
|
|
336
|
+
compressed_messages = [{"role": "system", "content": compressed_state}]
|
|
337
|
+
return get_llm_response(
|
|
338
|
+
prompt=prompt,
|
|
339
|
+
npc=npc,
|
|
340
|
+
messages=compressed_messages,
|
|
341
|
+
tools=tools,
|
|
314
342
|
auto_process_tool_calls=False,
|
|
315
|
-
stream=
|
|
316
|
-
team=
|
|
343
|
+
stream=stream,
|
|
344
|
+
team=team
|
|
317
345
|
)
|
|
318
346
|
except RateLimitError:
|
|
319
347
|
import time
|
|
320
348
|
print('rate limit hit... waiting 60 seconds')
|
|
321
349
|
time.sleep(60)
|
|
322
350
|
print('compressing..... ')
|
|
323
|
-
compressed_state =
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
tools=mcp_tools_for_llm,
|
|
351
|
+
compressed_state = npc.compress_planning_state(messages)
|
|
352
|
+
compressed_messages = [{"role": "system", "content": compressed_state}]
|
|
353
|
+
return get_llm_response(
|
|
354
|
+
prompt=prompt,
|
|
355
|
+
npc=npc,
|
|
356
|
+
messages=compressed_messages,
|
|
357
|
+
tools=tools,
|
|
331
358
|
auto_process_tool_calls=False,
|
|
332
|
-
stream=
|
|
333
|
-
team=
|
|
359
|
+
stream=stream,
|
|
360
|
+
team=team
|
|
334
361
|
)
|
|
362
|
+
except BadRequestError as e:
|
|
363
|
+
if "tool_call_id" in str(e).lower():
|
|
364
|
+
cleaned_messages = clean_orphaned_tool_calls(messages)
|
|
365
|
+
return get_llm_response(
|
|
366
|
+
prompt=prompt,
|
|
367
|
+
npc=npc,
|
|
368
|
+
messages=cleaned_messages,
|
|
369
|
+
tools=tools,
|
|
370
|
+
auto_process_tool_calls=False,
|
|
371
|
+
stream=stream,
|
|
372
|
+
team=team,
|
|
373
|
+
context=context
|
|
374
|
+
)
|
|
375
|
+
else:
|
|
376
|
+
raise e
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
def execute_command_corca(command: str, state: ShellState, command_history, selected_mcp_tools_names: Optional[List[str]] = None) -> Tuple[ShellState, Any]:
|
|
381
|
+
mcp_tools_for_llm = []
|
|
382
|
+
|
|
383
|
+
if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
|
|
384
|
+
all_available_mcp_tools = state.mcp_client.available_tools_llm
|
|
385
|
+
|
|
386
|
+
if selected_mcp_tools_names and len(selected_mcp_tools_names) > 0:
|
|
387
|
+
mcp_tools_for_llm = [
|
|
388
|
+
tool_def for tool_def in all_available_mcp_tools
|
|
389
|
+
if tool_def['function']['name'] in selected_mcp_tools_names
|
|
390
|
+
]
|
|
391
|
+
if not mcp_tools_for_llm:
|
|
392
|
+
cprint("Warning: No selected MCP tools found or matched. Corca will proceed without tools.", "yellow", file=sys.stderr)
|
|
393
|
+
else:
|
|
394
|
+
mcp_tools_for_llm = all_available_mcp_tools
|
|
395
|
+
else:
|
|
396
|
+
cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
|
|
397
|
+
|
|
398
|
+
if len(state.messages) > 20:
|
|
399
|
+
compressed_state = state.npc.compress_planning_state(messages)
|
|
400
|
+
state.messages = [{"role": "system", "content": state.npc.get_system_message() + f' Your current task: {compressed_state}'}]
|
|
401
|
+
print("Compressed messages during tool execution.")
|
|
402
|
+
|
|
403
|
+
response_dict = get_llm_response_with_handling(
|
|
404
|
+
prompt=command,
|
|
405
|
+
npc=state.npc,
|
|
406
|
+
messages=state.messages,
|
|
407
|
+
tools=mcp_tools_for_llm,
|
|
408
|
+
stream=state.stream_output,
|
|
409
|
+
team=state.team,
|
|
410
|
+
context=f' The users working directory is {state.current_path}'
|
|
411
|
+
)
|
|
335
412
|
|
|
336
413
|
stream_response = response_dict.get('response')
|
|
337
414
|
messages = response_dict.get('messages', state.messages)
|
|
415
|
+
tool_calls = response_dict.get('tool_calls', [])
|
|
416
|
+
|
|
417
|
+
collected_content, stream_tool_calls = process_mcp_stream(stream_response, state.npc)
|
|
338
418
|
|
|
339
|
-
|
|
419
|
+
if stream_tool_calls:
|
|
420
|
+
tool_calls = stream_tool_calls
|
|
340
421
|
|
|
341
422
|
state.messages = messages
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
423
|
+
|
|
424
|
+
if tool_calls and hasattr(state, 'mcp_client') and state.mcp_client:
|
|
425
|
+
final_content, state.messages = execute_mcp_tool_calls(
|
|
426
|
+
tool_calls,
|
|
427
|
+
state.mcp_client,
|
|
428
|
+
state.messages,
|
|
429
|
+
state.npc,
|
|
430
|
+
state.stream_output
|
|
431
|
+
)
|
|
432
|
+
if final_content:
|
|
433
|
+
collected_content = final_content
|
|
347
434
|
|
|
348
435
|
return state, {
|
|
349
436
|
"output": collected_content,
|
|
@@ -352,6 +439,129 @@ def execute_command_corca(command: str, state: ShellState, command_history, sele
|
|
|
352
439
|
}
|
|
353
440
|
|
|
354
441
|
|
|
442
|
+
def execute_mcp_tool_calls(tool_calls, mcp_client, messages, npc, stream_output):
|
|
443
|
+
if not tool_calls or not mcp_client:
|
|
444
|
+
return None, messages
|
|
445
|
+
|
|
446
|
+
messages = clean_orphaned_tool_calls(messages)
|
|
447
|
+
|
|
448
|
+
print(colored("\n🔧 Executing MCP tools...", "cyan"))
|
|
449
|
+
|
|
450
|
+
while tool_calls:
|
|
451
|
+
tool_responses = []
|
|
452
|
+
|
|
453
|
+
if len(messages) > 20:
|
|
454
|
+
compressed_state = npc.compress_planning_state(messages)
|
|
455
|
+
messages = [{"role": "system", "content": npc.get_system_prompt() + f' Your current task: {compressed_state}'}]
|
|
456
|
+
print("Compressed messages during tool execution.")
|
|
457
|
+
|
|
458
|
+
|
|
459
|
+
for tool_call in tool_calls:
|
|
460
|
+
tool_name = tool_call['function']['name']
|
|
461
|
+
tool_args = tool_call['function']['arguments']
|
|
462
|
+
tool_call_id = tool_call['id']
|
|
463
|
+
|
|
464
|
+
if isinstance(tool_args, str):
|
|
465
|
+
try:
|
|
466
|
+
tool_args = json.loads(tool_args) if tool_args.strip() else {}
|
|
467
|
+
except json.JSONDecodeError:
|
|
468
|
+
tool_args = {}
|
|
469
|
+
|
|
470
|
+
try:
|
|
471
|
+
print(f" Calling MCP tool: {tool_name} with args: {tool_args}")
|
|
472
|
+
|
|
473
|
+
loop = asyncio.get_event_loop()
|
|
474
|
+
if loop.is_closed():
|
|
475
|
+
loop = asyncio.new_event_loop()
|
|
476
|
+
asyncio.set_event_loop(loop)
|
|
477
|
+
|
|
478
|
+
mcp_result = loop.run_until_complete(
|
|
479
|
+
mcp_client.session.call_tool(tool_name, tool_args)
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
tool_content = ""
|
|
483
|
+
if hasattr(mcp_result, 'content') and mcp_result.content:
|
|
484
|
+
for content_item in mcp_result.content:
|
|
485
|
+
if hasattr(content_item, 'text'):
|
|
486
|
+
tool_content += content_item.text
|
|
487
|
+
elif hasattr(content_item, 'data'):
|
|
488
|
+
tool_content += str(content_item.data)
|
|
489
|
+
else:
|
|
490
|
+
tool_content += str(content_item)
|
|
491
|
+
else:
|
|
492
|
+
tool_content = str(mcp_result)
|
|
493
|
+
|
|
494
|
+
tool_responses.append({
|
|
495
|
+
"role": "tool",
|
|
496
|
+
"tool_call_id": tool_call_id,
|
|
497
|
+
"name": tool_name,
|
|
498
|
+
"content": tool_content
|
|
499
|
+
})
|
|
500
|
+
|
|
501
|
+
print(colored(f" ✓ {tool_name} completed", "green"))
|
|
502
|
+
|
|
503
|
+
except KeyboardInterrupt:
|
|
504
|
+
print(colored(f"\n ⚠️ Tool execution interrupted", "yellow"))
|
|
505
|
+
return None, messages
|
|
506
|
+
except Exception as e:
|
|
507
|
+
print(colored(f" ✗ {tool_name} failed: {e}", "red"))
|
|
508
|
+
tool_responses.append({
|
|
509
|
+
"role": "tool",
|
|
510
|
+
"tool_call_id": tool_call_id,
|
|
511
|
+
"name": tool_name,
|
|
512
|
+
"content": f"Error: {str(e)}"
|
|
513
|
+
})
|
|
514
|
+
|
|
515
|
+
current_messages = messages + tool_responses
|
|
516
|
+
|
|
517
|
+
try:
|
|
518
|
+
follow_up_response = get_llm_response_with_handling(
|
|
519
|
+
prompt="",
|
|
520
|
+
npc=npc,
|
|
521
|
+
messages=current_messages,
|
|
522
|
+
tools=mcp_client.available_tools_llm,
|
|
523
|
+
stream=stream_output,
|
|
524
|
+
team=None
|
|
525
|
+
)
|
|
526
|
+
except KeyboardInterrupt:
|
|
527
|
+
print(colored(f"\n ⚠️ Follow-up response interrupted", "yellow"))
|
|
528
|
+
return None, messages
|
|
529
|
+
|
|
530
|
+
follow_up_messages = follow_up_response.get('messages', current_messages)
|
|
531
|
+
follow_up_content = follow_up_response.get('response', '')
|
|
532
|
+
follow_up_tool_calls = []
|
|
533
|
+
|
|
534
|
+
if stream_output:
|
|
535
|
+
if hasattr(follow_up_content, '__iter__'):
|
|
536
|
+
collected_content, follow_up_tool_calls = process_mcp_stream(follow_up_content, npc)
|
|
537
|
+
else:
|
|
538
|
+
collected_content = str(follow_up_content)
|
|
539
|
+
follow_up_content = collected_content
|
|
540
|
+
else:
|
|
541
|
+
if follow_up_messages:
|
|
542
|
+
last_message = follow_up_messages[-1]
|
|
543
|
+
if last_message.get("role") == "assistant" and "tool_calls" in last_message:
|
|
544
|
+
follow_up_tool_calls = last_message["tool_calls"]
|
|
545
|
+
|
|
546
|
+
messages = follow_up_messages
|
|
547
|
+
|
|
548
|
+
if not follow_up_tool_calls:
|
|
549
|
+
if not stream_output:
|
|
550
|
+
print('\n')
|
|
551
|
+
render_markdown(follow_up_content)
|
|
552
|
+
return follow_up_content, messages
|
|
553
|
+
else:
|
|
554
|
+
if follow_up_content or follow_up_tool_calls:
|
|
555
|
+
assistant_message = {"role": "assistant", "content": follow_up_content}
|
|
556
|
+
if follow_up_tool_calls:
|
|
557
|
+
assistant_message["tool_calls"] = follow_up_tool_calls
|
|
558
|
+
messages.append(assistant_message)
|
|
559
|
+
|
|
560
|
+
tool_calls = follow_up_tool_calls
|
|
561
|
+
print(colored("\n🔧 Executing follow-up MCP tools...", "cyan"))
|
|
562
|
+
|
|
563
|
+
return None, messages
|
|
564
|
+
|
|
355
565
|
def _resolve_and_copy_mcp_server_path(
|
|
356
566
|
explicit_path: Optional[str],
|
|
357
567
|
current_path: Optional[str],
|
|
@@ -515,13 +725,17 @@ def create_corca_state_and_mcp_client(conversation_id, command_history, npc=None
|
|
|
515
725
|
|
|
516
726
|
return state
|
|
517
727
|
|
|
518
|
-
|
|
728
|
+
|
|
519
729
|
def process_corca_result(
|
|
520
730
|
user_input: str,
|
|
521
731
|
result_state: ShellState,
|
|
522
732
|
output: Any,
|
|
523
733
|
command_history: CommandHistory,
|
|
524
734
|
):
|
|
735
|
+
from npcpy.llm_funcs import get_facts
|
|
736
|
+
from npcpy.memory.memory_processor import memory_approval_ui
|
|
737
|
+
from npcsh._state import format_memory_context
|
|
738
|
+
|
|
525
739
|
team_name = result_state.team.name if result_state.team else "__none__"
|
|
526
740
|
npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "__none__"
|
|
527
741
|
|
|
@@ -529,7 +743,8 @@ def process_corca_result(
|
|
|
529
743
|
name="default",
|
|
530
744
|
model=result_state.chat_model,
|
|
531
745
|
provider=result_state.chat_provider,
|
|
532
|
-
db_conn=command_history.engine
|
|
746
|
+
db_conn=command_history.engine
|
|
747
|
+
)
|
|
533
748
|
|
|
534
749
|
save_conversation_message(
|
|
535
750
|
command_history,
|
|
@@ -550,167 +765,20 @@ def process_corca_result(
|
|
|
550
765
|
final_output_str = None
|
|
551
766
|
|
|
552
767
|
if tool_calls and hasattr(result_state, 'mcp_client') and result_state.mcp_client:
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
try:
|
|
562
|
-
if isinstance(tool_args, str):
|
|
563
|
-
tool_args = json.loads(tool_args) if tool_args.strip() else {}
|
|
564
|
-
|
|
565
|
-
except json.JSONDecodeError:
|
|
566
|
-
tool_args = {}
|
|
567
|
-
|
|
568
|
-
try:
|
|
569
|
-
|
|
570
|
-
loop = asyncio.get_event_loop()
|
|
571
|
-
if loop.is_closed():
|
|
572
|
-
loop = asyncio.new_event_loop()
|
|
573
|
-
asyncio.set_event_loop(loop)
|
|
574
|
-
|
|
575
|
-
mcp_result = loop.run_until_complete(
|
|
576
|
-
result_state.mcp_client.session.call_tool(tool_name, tool_args)
|
|
577
|
-
)
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
tool_content = ""
|
|
581
|
-
if hasattr(mcp_result, 'content') and mcp_result.content:
|
|
582
|
-
|
|
583
|
-
for i, content_item in enumerate(mcp_result.content):
|
|
584
|
-
|
|
585
|
-
if hasattr(content_item, 'text'):
|
|
586
|
-
tool_content += content_item.text
|
|
587
|
-
else:
|
|
588
|
-
tool_content += str(content_item)
|
|
589
|
-
else:
|
|
590
|
-
tool_content = str(mcp_result)
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
tool_responses.append({
|
|
595
|
-
"role": "tool",
|
|
596
|
-
"tool_call_id": tool_call_id,
|
|
597
|
-
"name": tool_name,
|
|
598
|
-
"content": tool_content
|
|
599
|
-
})
|
|
600
|
-
|
|
601
|
-
print(colored(f" ✓ {tool_name} completed", "green"))
|
|
602
|
-
|
|
603
|
-
except Exception as e:
|
|
604
|
-
print(colored(f" ✗ {tool_name} failed: {e}", "red"))
|
|
605
|
-
tool_responses.append({
|
|
606
|
-
"role": "tool",
|
|
607
|
-
"tool_call_id": tool_call_id,
|
|
608
|
-
"name": tool_name,
|
|
609
|
-
"content": f"Error: {str(e)}"
|
|
610
|
-
})
|
|
611
|
-
|
|
612
|
-
result_state.messages.extend(tool_responses)
|
|
613
|
-
|
|
614
|
-
while True:
|
|
615
|
-
follow_up_response = get_llm_response(
|
|
616
|
-
prompt="",
|
|
617
|
-
model=active_npc.model,
|
|
618
|
-
provider=active_npc.provider,
|
|
619
|
-
npc=active_npc,
|
|
620
|
-
messages=result_state.messages,
|
|
621
|
-
tools=result_state.mcp_client.available_tools_llm,
|
|
622
|
-
auto_process_tool_calls=False,
|
|
623
|
-
stream=result_state.stream_output
|
|
624
|
-
)
|
|
625
|
-
|
|
626
|
-
follow_up_messages = follow_up_response.get('messages', [])
|
|
627
|
-
follow_up_content = follow_up_response.get('response', '')
|
|
628
|
-
follow_up_tool_calls = []
|
|
629
|
-
|
|
630
|
-
if result_state.stream_output:
|
|
631
|
-
if hasattr(follow_up_content, '__iter__'):
|
|
632
|
-
collected_content, follow_up_tool_calls = process_mcp_stream(follow_up_content, active_npc)
|
|
633
|
-
else:
|
|
634
|
-
collected_content = str(follow_up_content)
|
|
635
|
-
follow_up_content = collected_content
|
|
636
|
-
else:
|
|
637
|
-
if follow_up_messages:
|
|
638
|
-
last_message = follow_up_messages[-1]
|
|
639
|
-
if last_message.get("role") == "assistant" and "tool_calls" in last_message:
|
|
640
|
-
follow_up_tool_calls = last_message["tool_calls"]
|
|
641
|
-
|
|
642
|
-
result_state.messages = follow_up_messages
|
|
643
|
-
if follow_up_content or follow_up_tool_calls:
|
|
644
|
-
assistant_message = {"role": "assistant", "content": follow_up_content}
|
|
645
|
-
if follow_up_tool_calls:
|
|
646
|
-
assistant_message["tool_calls"] = follow_up_tool_calls
|
|
647
|
-
result_state.messages.append(assistant_message)
|
|
648
|
-
|
|
649
|
-
if not follow_up_tool_calls:
|
|
650
|
-
final_output_str = follow_up_content
|
|
651
|
-
if not result_state.stream_output:
|
|
652
|
-
print('\n')
|
|
653
|
-
render_markdown(final_output_str)
|
|
654
|
-
break
|
|
655
|
-
|
|
656
|
-
print(colored("\n🔧 Executing follow-up MCP tools...", "cyan"))
|
|
657
|
-
for tool_call in follow_up_tool_calls:
|
|
658
|
-
tool_name = tool_call['function']['name']
|
|
659
|
-
tool_args = tool_call['function']['arguments']
|
|
660
|
-
tool_call_id = tool_call['id']
|
|
661
|
-
|
|
662
|
-
try:
|
|
663
|
-
tool_args = json.loads(tool_args) if tool_args.strip() else {}
|
|
664
|
-
except json.JSONDecodeError:
|
|
665
|
-
tool_args = {}
|
|
666
|
-
|
|
667
|
-
try:
|
|
668
|
-
print(f" Calling MCP tool: {tool_name} with args: {tool_args}")
|
|
669
|
-
|
|
670
|
-
loop = asyncio.get_event_loop()
|
|
671
|
-
if loop.is_closed():
|
|
672
|
-
loop = asyncio.new_event_loop()
|
|
673
|
-
asyncio.set_event_loop(loop)
|
|
674
|
-
|
|
675
|
-
mcp_result = loop.run_until_complete(
|
|
676
|
-
result_state.mcp_client.session.call_tool(tool_name, tool_args)
|
|
677
|
-
)
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
tool_content = ""
|
|
681
|
-
if hasattr(mcp_result, 'content') and mcp_result.content:
|
|
682
|
-
for i, content_item in enumerate(mcp_result.content):
|
|
683
|
-
|
|
684
|
-
if hasattr(content_item, 'text') and content_item.text:
|
|
685
|
-
tool_content += content_item.text
|
|
686
|
-
elif hasattr(content_item, 'data'):
|
|
687
|
-
tool_content += str(content_item.data)
|
|
688
|
-
else:
|
|
689
|
-
tool_content += str(content_item)
|
|
690
|
-
result_state.messages.append({
|
|
691
|
-
"role": "tool",
|
|
692
|
-
"tool_call_id": tool_call_id,
|
|
693
|
-
"name": tool_name,
|
|
694
|
-
"content": tool_content
|
|
695
|
-
})
|
|
696
|
-
|
|
697
|
-
print(colored(f" ✓ {tool_name} completed", "green"))
|
|
698
|
-
|
|
699
|
-
except Exception as e:
|
|
700
|
-
print(colored(f" ✗ {tool_name} failed: {e}", "red"))
|
|
701
|
-
result_state.messages.append({
|
|
702
|
-
"role": "tool",
|
|
703
|
-
"tool_call_id": tool_call_id,
|
|
704
|
-
"name": tool_name,
|
|
705
|
-
"content": f"Error: {str(e)}"
|
|
706
|
-
})
|
|
768
|
+
final_output_str, result_state.messages = execute_mcp_tool_calls(
|
|
769
|
+
tool_calls,
|
|
770
|
+
result_state.mcp_client,
|
|
771
|
+
result_state.messages,
|
|
772
|
+
result_state.npc,
|
|
773
|
+
result_state.stream_output
|
|
774
|
+
)
|
|
707
775
|
else:
|
|
708
776
|
print('\n')
|
|
709
777
|
if result_state.stream_output:
|
|
710
778
|
final_output_str = print_and_process_stream_with_markdown(
|
|
711
779
|
output_content,
|
|
712
|
-
|
|
713
|
-
|
|
780
|
+
result_state.npc.model,
|
|
781
|
+
result_state.npc.provider,
|
|
714
782
|
show=True
|
|
715
783
|
)
|
|
716
784
|
else:
|
|
@@ -736,25 +804,88 @@ def process_corca_result(
|
|
|
736
804
|
conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
|
|
737
805
|
engine = command_history.engine
|
|
738
806
|
|
|
739
|
-
|
|
807
|
+
memory_examples = command_history.get_memory_examples_for_context(
|
|
808
|
+
npc=npc_name,
|
|
809
|
+
team=team_name,
|
|
810
|
+
directory_path=result_state.current_path
|
|
811
|
+
)
|
|
812
|
+
|
|
813
|
+
memory_context = format_memory_context(memory_examples)
|
|
814
|
+
|
|
815
|
+
approved_facts = []
|
|
816
|
+
try:
|
|
817
|
+
facts = get_facts(
|
|
818
|
+
conversation_turn_text,
|
|
819
|
+
model=active_npc.model,
|
|
820
|
+
provider=active_npc.provider,
|
|
821
|
+
npc=active_npc,
|
|
822
|
+
context=memory_context
|
|
823
|
+
)
|
|
824
|
+
|
|
825
|
+
if facts:
|
|
826
|
+
memories_for_approval = []
|
|
827
|
+
for i, fact in enumerate(facts):
|
|
828
|
+
memories_for_approval.append({
|
|
829
|
+
"memory_id": f"temp_{i}",
|
|
830
|
+
"content": fact['statement'],
|
|
831
|
+
"context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
|
|
832
|
+
"npc": npc_name,
|
|
833
|
+
"fact_data": fact
|
|
834
|
+
})
|
|
835
|
+
|
|
836
|
+
approvals = memory_approval_ui(memories_for_approval)
|
|
837
|
+
|
|
838
|
+
for approval in approvals:
|
|
839
|
+
fact_data = next(m['fact_data'] for m in memories_for_approval
|
|
840
|
+
if m['memory_id'] == approval['memory_id'])
|
|
841
|
+
|
|
842
|
+
command_history.add_memory_to_database(
|
|
843
|
+
message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
|
|
844
|
+
conversation_id=result_state.conversation_id,
|
|
845
|
+
npc=npc_name,
|
|
846
|
+
team=team_name,
|
|
847
|
+
directory_path=result_state.current_path,
|
|
848
|
+
initial_memory=fact_data['statement'],
|
|
849
|
+
status=approval['decision'],
|
|
850
|
+
model=active_npc.model,
|
|
851
|
+
provider=active_npc.provider,
|
|
852
|
+
final_memory=approval.get('final_memory')
|
|
853
|
+
)
|
|
854
|
+
|
|
855
|
+
if approval['decision'] in ['human-approved', 'human-edited']:
|
|
856
|
+
approved_fact = {
|
|
857
|
+
'statement': approval.get('final_memory') or fact_data['statement'],
|
|
858
|
+
'source_text': fact_data.get('source_text', ''),
|
|
859
|
+
'type': fact_data.get('type', 'explicit'),
|
|
860
|
+
'generation': 0
|
|
861
|
+
}
|
|
862
|
+
approved_facts.append(approved_fact)
|
|
863
|
+
|
|
864
|
+
except Exception as e:
|
|
865
|
+
print(colored(f"Memory generation error: {e}", "yellow"))
|
|
866
|
+
|
|
867
|
+
if result_state.build_kg and approved_facts:
|
|
740
868
|
try:
|
|
741
869
|
if not should_skip_kg_processing(user_input, final_output_str):
|
|
742
870
|
npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
|
|
743
871
|
evolved_npc_kg, _ = kg_evolve_incremental(
|
|
744
872
|
existing_kg=npc_kg,
|
|
745
|
-
|
|
873
|
+
new_facts=approved_facts,
|
|
746
874
|
model=active_npc.model,
|
|
747
875
|
provider=active_npc.provider,
|
|
876
|
+
npc=active_npc,
|
|
748
877
|
get_concepts=True,
|
|
749
|
-
link_concepts_facts
|
|
750
|
-
link_concepts_concepts
|
|
751
|
-
link_facts_facts
|
|
878
|
+
link_concepts_facts=False,
|
|
879
|
+
link_concepts_concepts=False,
|
|
880
|
+
link_facts_facts=False,
|
|
881
|
+
)
|
|
882
|
+
save_kg_to_db(
|
|
883
|
+
engine,
|
|
884
|
+
evolved_npc_kg,
|
|
885
|
+
team_name,
|
|
886
|
+
npc_name,
|
|
887
|
+
result_state.current_path
|
|
752
888
|
)
|
|
753
|
-
save_kg_to_db(engine,
|
|
754
|
-
evolved_npc_kg,
|
|
755
|
-
team_name,
|
|
756
|
-
npc_name,
|
|
757
|
-
result_state.current_path)
|
|
758
889
|
except Exception as e:
|
|
759
890
|
print(colored(f"Error during real-time KG evolution: {e}", "red"))
|
|
760
891
|
|
|
@@ -819,8 +950,9 @@ def process_corca_result(
|
|
|
819
950
|
except Exception as e:
|
|
820
951
|
import traceback
|
|
821
952
|
print(colored(f"Could not generate team suggestions: {e}", "yellow"))
|
|
822
|
-
traceback.print_exc()
|
|
823
|
-
|
|
953
|
+
traceback.print_exc()
|
|
954
|
+
|
|
955
|
+
|
|
824
956
|
def _read_npcsh_global_env() -> Dict[str, str]:
|
|
825
957
|
global_env_file = Path(".npcsh_global")
|
|
826
958
|
env_vars = {}
|
|
@@ -78,7 +78,7 @@ extra_files = package_files("npcsh/npc_team/")
|
|
|
78
78
|
|
|
79
79
|
setup(
|
|
80
80
|
name="npcsh",
|
|
81
|
-
version="1.0.
|
|
81
|
+
version="1.0.36",
|
|
82
82
|
packages=find_packages(exclude=["tests*"]),
|
|
83
83
|
install_requires=base_requirements, # Only install base requirements by default
|
|
84
84
|
extras_require={
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|