npcsh 1.1.2__py3-none-any.whl → 1.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +1 -30
- npcsh/alicanto.py +10 -5
- npcsh/build.py +291 -0
- npcsh/corca.py +263 -154
- npcsh/npc.py +127 -46
- npcsh/npcsh.py +1 -1
- npcsh/routes.py +229 -21
- {npcsh-1.1.2.dist-info → npcsh-1.1.4.dist-info}/METADATA +10 -1
- {npcsh-1.1.2.dist-info → npcsh-1.1.4.dist-info}/RECORD +41 -40
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/bash_executer.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/image_generation.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/internet_search.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/kg_search.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/memory_search.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/python_executor.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/screen_cap.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.2.dist-info → npcsh-1.1.4.dist-info}/WHEEL +0 -0
- {npcsh-1.1.2.dist-info → npcsh-1.1.4.dist-info}/entry_points.txt +0 -0
- {npcsh-1.1.2.dist-info → npcsh-1.1.4.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.2.dist-info → npcsh-1.1.4.dist-info}/top_level.txt +0 -0
npcsh/corca.py
CHANGED
|
@@ -377,75 +377,87 @@ def get_llm_response_with_handling(prompt, npc, messages, tools, stream, team, c
|
|
|
377
377
|
|
|
378
378
|
|
|
379
379
|
|
|
380
|
-
def
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
|
|
384
|
-
all_available_mcp_tools = state.mcp_client.available_tools_llm
|
|
385
|
-
|
|
386
|
-
if selected_mcp_tools_names and len(selected_mcp_tools_names) > 0:
|
|
387
|
-
mcp_tools_for_llm = [
|
|
388
|
-
tool_def for tool_def in all_available_mcp_tools
|
|
389
|
-
if tool_def['function']['name'] in selected_mcp_tools_names
|
|
390
|
-
]
|
|
391
|
-
if not mcp_tools_for_llm:
|
|
392
|
-
cprint("Warning: No selected MCP tools found or matched. Corca will proceed without tools.", "yellow", file=sys.stderr)
|
|
393
|
-
else:
|
|
394
|
-
mcp_tools_for_llm = all_available_mcp_tools
|
|
395
|
-
else:
|
|
396
|
-
cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
|
|
397
|
-
|
|
398
|
-
if len(state.messages) > 20:
|
|
399
|
-
compressed_state = state.npc.compress_planning_state(state.messages)
|
|
400
|
-
state.messages = [{"role": "system", "content": state.npc.get_system_prompt() + f' Your current task: {compressed_state}'}]
|
|
401
|
-
print("Compressed messages during tool execution.")
|
|
380
|
+
def process_mcp_stream(stream_response, active_npc):
|
|
381
|
+
collected_content = ""
|
|
382
|
+
tool_calls = []
|
|
402
383
|
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
messages=state.messages,
|
|
407
|
-
tools=mcp_tools_for_llm,
|
|
408
|
-
stream=state.stream_output,
|
|
409
|
-
team=state.team,
|
|
410
|
-
context=f' The users working directory is {state.current_path}'
|
|
411
|
-
)
|
|
412
|
-
|
|
413
|
-
stream_response = response_dict.get('response')
|
|
414
|
-
messages = response_dict.get('messages', state.messages)
|
|
415
|
-
tool_calls = response_dict.get('tool_calls', [])
|
|
384
|
+
interrupted = False
|
|
385
|
+
sys.stdout.write('\033[s')
|
|
386
|
+
sys.stdout.flush()
|
|
416
387
|
|
|
417
|
-
|
|
388
|
+
try:
|
|
389
|
+
for chunk in stream_response:
|
|
390
|
+
if hasattr(active_npc, 'provider') and active_npc.provider == "ollama" and 'gpt-oss' not in active_npc.model:
|
|
391
|
+
if hasattr(chunk, 'message') and hasattr(chunk.message, 'tool_calls') and chunk.message.tool_calls:
|
|
392
|
+
for tool_call in chunk.message.tool_calls:
|
|
393
|
+
tool_call_data = {'id': getattr(tool_call, 'id', ''),
|
|
394
|
+
'type': 'function',
|
|
395
|
+
'function': {
|
|
396
|
+
'name': getattr(tool_call.function, 'name', '') if hasattr(tool_call, 'function') else '',
|
|
397
|
+
'arguments': getattr(tool_call.function, 'arguments', {}) if hasattr(tool_call, 'function') else {}
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
if isinstance(tool_call_data['function']['arguments'], str):
|
|
401
|
+
try:
|
|
402
|
+
tool_call_data['function']['arguments'] = json.loads(tool_call_data['function']['arguments'])
|
|
403
|
+
except json.JSONDecodeError:
|
|
404
|
+
tool_call_data['function']['arguments'] = {'raw': tool_call_data['function']['arguments']}
|
|
405
|
+
|
|
406
|
+
tool_calls.append(tool_call_data)
|
|
407
|
+
if hasattr(chunk, 'message') and hasattr(chunk.message, 'content') and chunk.message.content:
|
|
408
|
+
collected_content += chunk.message.content
|
|
409
|
+
print(chunk.message.content, end='', flush=True)
|
|
410
|
+
|
|
411
|
+
else:
|
|
412
|
+
if hasattr(chunk, 'choices') and chunk.choices:
|
|
413
|
+
delta = chunk.choices[0].delta
|
|
414
|
+
|
|
415
|
+
if hasattr(delta, 'content') and delta.content:
|
|
416
|
+
collected_content += delta.content
|
|
417
|
+
print(delta.content, end='', flush=True)
|
|
418
|
+
|
|
419
|
+
if hasattr(delta, 'tool_calls') and delta.tool_calls:
|
|
420
|
+
for tool_call_delta in delta.tool_calls:
|
|
421
|
+
if hasattr(tool_call_delta, 'index'):
|
|
422
|
+
idx = tool_call_delta.index
|
|
423
|
+
|
|
424
|
+
while len(tool_calls) <= idx:
|
|
425
|
+
tool_calls.append({
|
|
426
|
+
'id': '',
|
|
427
|
+
'type': 'function',
|
|
428
|
+
'function': {'name': '', 'arguments': ''}
|
|
429
|
+
})
|
|
430
|
+
|
|
431
|
+
if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
|
|
432
|
+
tool_calls[idx]['id'] = tool_call_delta.id
|
|
433
|
+
if hasattr(tool_call_delta, 'function'):
|
|
434
|
+
if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
|
|
435
|
+
tool_calls[idx]['function']['name'] = tool_call_delta.function.name
|
|
436
|
+
|
|
437
|
+
if hasattr(tool_call_delta.function, 'arguments') and tool_call_delta.function.arguments:
|
|
438
|
+
tool_calls[idx]['function']['arguments'] += tool_call_delta.function.arguments
|
|
439
|
+
except KeyboardInterrupt:
|
|
440
|
+
interrupted = True
|
|
441
|
+
print('\n⚠️ Stream interrupted by user')
|
|
418
442
|
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
state.messages = messages
|
|
443
|
+
sys.stdout.write('\033[u')
|
|
444
|
+
sys.stdout.write('\033[0J')
|
|
445
|
+
sys.stdout.flush()
|
|
423
446
|
|
|
424
|
-
if
|
|
425
|
-
|
|
426
|
-
tool_calls,
|
|
427
|
-
state.mcp_client,
|
|
428
|
-
state.messages,
|
|
429
|
-
state.npc,
|
|
430
|
-
state.stream_output
|
|
431
|
-
)
|
|
432
|
-
if final_content:
|
|
433
|
-
collected_content = final_content
|
|
447
|
+
if collected_content:
|
|
448
|
+
render_markdown(collected_content)
|
|
434
449
|
|
|
435
|
-
return
|
|
436
|
-
"output": collected_content,
|
|
437
|
-
"tool_calls": tool_calls,
|
|
438
|
-
"messages": state.messages
|
|
439
|
-
}
|
|
450
|
+
return collected_content, tool_calls, interrupted
|
|
440
451
|
|
|
441
452
|
|
|
442
453
|
def execute_mcp_tool_calls(tool_calls, mcp_client, messages, npc, stream_output):
|
|
443
454
|
if not tool_calls or not mcp_client:
|
|
444
|
-
return None, messages
|
|
455
|
+
return None, messages, False
|
|
445
456
|
|
|
446
457
|
messages = clean_orphaned_tool_calls(messages)
|
|
447
458
|
|
|
448
459
|
print(colored("\n🔧 Executing MCP tools...", "cyan"))
|
|
460
|
+
user_interrupted = False
|
|
449
461
|
|
|
450
462
|
while tool_calls:
|
|
451
463
|
tool_responses = []
|
|
@@ -455,7 +467,6 @@ def execute_mcp_tool_calls(tool_calls, mcp_client, messages, npc, stream_output)
|
|
|
455
467
|
messages = [{"role": "system", "content": npc.get_system_prompt() + f' Your current task: {compressed_state}'}]
|
|
456
468
|
print("Compressed messages during tool execution.")
|
|
457
469
|
|
|
458
|
-
|
|
459
470
|
for tool_call in tool_calls:
|
|
460
471
|
tool_name = tool_call['function']['name']
|
|
461
472
|
tool_args = tool_call['function']['arguments']
|
|
@@ -501,8 +512,9 @@ def execute_mcp_tool_calls(tool_calls, mcp_client, messages, npc, stream_output)
|
|
|
501
512
|
print(colored(f" ✓ {tool_name} completed", "green"))
|
|
502
513
|
|
|
503
514
|
except KeyboardInterrupt:
|
|
504
|
-
print(colored(f"\n ⚠️ Tool execution interrupted", "yellow"))
|
|
505
|
-
|
|
515
|
+
print(colored(f"\n ⚠️ Tool execution interrupted by user", "yellow"))
|
|
516
|
+
user_interrupted = True
|
|
517
|
+
break
|
|
506
518
|
except Exception as e:
|
|
507
519
|
print(colored(f" ✗ {tool_name} failed: {e}", "red"))
|
|
508
520
|
tool_responses.append({
|
|
@@ -512,6 +524,9 @@ def execute_mcp_tool_calls(tool_calls, mcp_client, messages, npc, stream_output)
|
|
|
512
524
|
"content": f"Error: {str(e)}"
|
|
513
525
|
})
|
|
514
526
|
|
|
527
|
+
if user_interrupted:
|
|
528
|
+
return None, messages, True
|
|
529
|
+
|
|
515
530
|
current_messages = messages + tool_responses
|
|
516
531
|
|
|
517
532
|
try:
|
|
@@ -524,16 +539,17 @@ def execute_mcp_tool_calls(tool_calls, mcp_client, messages, npc, stream_output)
|
|
|
524
539
|
team=None
|
|
525
540
|
)
|
|
526
541
|
except KeyboardInterrupt:
|
|
527
|
-
print(colored(f"\n ⚠️ Follow-up response interrupted", "yellow"))
|
|
528
|
-
return None, messages
|
|
542
|
+
print(colored(f"\n ⚠️ Follow-up response interrupted by user", "yellow"))
|
|
543
|
+
return None, messages, True
|
|
529
544
|
|
|
530
545
|
follow_up_messages = follow_up_response.get('messages', current_messages)
|
|
531
546
|
follow_up_content = follow_up_response.get('response', '')
|
|
532
547
|
follow_up_tool_calls = []
|
|
548
|
+
follow_up_interrupted = False
|
|
533
549
|
|
|
534
550
|
if stream_output:
|
|
535
551
|
if hasattr(follow_up_content, '__iter__'):
|
|
536
|
-
collected_content, follow_up_tool_calls = process_mcp_stream(follow_up_content, npc)
|
|
552
|
+
collected_content, follow_up_tool_calls, follow_up_interrupted = process_mcp_stream(follow_up_content, npc)
|
|
537
553
|
else:
|
|
538
554
|
collected_content = str(follow_up_content)
|
|
539
555
|
follow_up_content = collected_content
|
|
@@ -543,13 +559,16 @@ def execute_mcp_tool_calls(tool_calls, mcp_client, messages, npc, stream_output)
|
|
|
543
559
|
if last_message.get("role") == "assistant" and "tool_calls" in last_message:
|
|
544
560
|
follow_up_tool_calls = last_message["tool_calls"]
|
|
545
561
|
|
|
562
|
+
if follow_up_interrupted:
|
|
563
|
+
return follow_up_content, follow_up_messages, True
|
|
564
|
+
|
|
546
565
|
messages = follow_up_messages
|
|
547
566
|
|
|
548
567
|
if not follow_up_tool_calls:
|
|
549
568
|
if not stream_output:
|
|
550
569
|
print('\n')
|
|
551
570
|
render_markdown(follow_up_content)
|
|
552
|
-
return follow_up_content, messages
|
|
571
|
+
return follow_up_content, messages, False
|
|
553
572
|
else:
|
|
554
573
|
if follow_up_content or follow_up_tool_calls:
|
|
555
574
|
assistant_message = {"role": "assistant", "content": follow_up_content}
|
|
@@ -560,7 +579,87 @@ def execute_mcp_tool_calls(tool_calls, mcp_client, messages, npc, stream_output)
|
|
|
560
579
|
tool_calls = follow_up_tool_calls
|
|
561
580
|
print(colored("\n🔧 Executing follow-up MCP tools...", "cyan"))
|
|
562
581
|
|
|
563
|
-
return None, messages
|
|
582
|
+
return None, messages, False
|
|
583
|
+
|
|
584
|
+
|
|
585
|
+
def execute_command_corca(command: str, state: ShellState, command_history, selected_mcp_tools_names: Optional[List[str]] = None) -> Tuple[ShellState, Any]:
|
|
586
|
+
mcp_tools_for_llm = []
|
|
587
|
+
|
|
588
|
+
if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
|
|
589
|
+
all_available_mcp_tools = state.mcp_client.available_tools_llm
|
|
590
|
+
|
|
591
|
+
if selected_mcp_tools_names and len(selected_mcp_tools_names) > 0:
|
|
592
|
+
mcp_tools_for_llm = [
|
|
593
|
+
tool_def for tool_def in all_available_mcp_tools
|
|
594
|
+
if tool_def['function']['name'] in selected_mcp_tools_names
|
|
595
|
+
]
|
|
596
|
+
if not mcp_tools_for_llm:
|
|
597
|
+
cprint("Warning: No selected MCP tools found or matched. Corca will proceed without tools.", "yellow", file=sys.stderr)
|
|
598
|
+
else:
|
|
599
|
+
mcp_tools_for_llm = all_available_mcp_tools
|
|
600
|
+
else:
|
|
601
|
+
cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
|
|
602
|
+
|
|
603
|
+
if len(state.messages) > 20:
|
|
604
|
+
compressed_state = state.npc.compress_planning_state(state.messages)
|
|
605
|
+
state.messages = [{"role": "system", "content": state.npc.get_system_prompt() + f' Your current task: {compressed_state}'}]
|
|
606
|
+
print("Compressed messages during tool execution.")
|
|
607
|
+
|
|
608
|
+
response_dict = get_llm_response_with_handling(
|
|
609
|
+
prompt=command,
|
|
610
|
+
npc=state.npc,
|
|
611
|
+
messages=state.messages,
|
|
612
|
+
tools=mcp_tools_for_llm,
|
|
613
|
+
stream=state.stream_output,
|
|
614
|
+
team=state.team,
|
|
615
|
+
context=f' The users working directory is {state.current_path}'
|
|
616
|
+
)
|
|
617
|
+
|
|
618
|
+
stream_response = response_dict.get('response')
|
|
619
|
+
messages = response_dict.get('messages', state.messages)
|
|
620
|
+
tool_calls = response_dict.get('tool_calls', [])
|
|
621
|
+
|
|
622
|
+
collected_content, stream_tool_calls, stream_interrupted = process_mcp_stream(stream_response, state.npc)
|
|
623
|
+
|
|
624
|
+
if stream_interrupted:
|
|
625
|
+
state.messages = messages
|
|
626
|
+
return state, {
|
|
627
|
+
"output": collected_content + "\n[Interrupted by user]",
|
|
628
|
+
"tool_calls": [],
|
|
629
|
+
"messages": state.messages,
|
|
630
|
+
"interrupted": True
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
if stream_tool_calls:
|
|
634
|
+
tool_calls = stream_tool_calls
|
|
635
|
+
|
|
636
|
+
state.messages = messages
|
|
637
|
+
|
|
638
|
+
if tool_calls and hasattr(state, 'mcp_client') and state.mcp_client:
|
|
639
|
+
final_content, state.messages, tools_interrupted = execute_mcp_tool_calls(
|
|
640
|
+
tool_calls,
|
|
641
|
+
state.mcp_client,
|
|
642
|
+
state.messages,
|
|
643
|
+
state.npc,
|
|
644
|
+
state.stream_output
|
|
645
|
+
)
|
|
646
|
+
if tools_interrupted:
|
|
647
|
+
return state, {
|
|
648
|
+
"output": (final_content or collected_content) + "\n[Interrupted by user]",
|
|
649
|
+
"tool_calls": tool_calls,
|
|
650
|
+
"messages": state.messages,
|
|
651
|
+
"interrupted": True
|
|
652
|
+
}
|
|
653
|
+
if final_content:
|
|
654
|
+
collected_content = final_content
|
|
655
|
+
|
|
656
|
+
return state, {
|
|
657
|
+
"output": collected_content,
|
|
658
|
+
"tool_calls": tool_calls,
|
|
659
|
+
"messages": state.messages,
|
|
660
|
+
"interrupted": False
|
|
661
|
+
}
|
|
662
|
+
|
|
564
663
|
|
|
565
664
|
def _resolve_and_copy_mcp_server_path(
|
|
566
665
|
explicit_path: Optional[str],
|
|
@@ -766,13 +865,15 @@ def process_corca_result(
|
|
|
766
865
|
final_output_str = None
|
|
767
866
|
|
|
768
867
|
if tool_calls and hasattr(result_state, 'mcp_client') and result_state.mcp_client:
|
|
769
|
-
final_output_str, result_state.messages = execute_mcp_tool_calls(
|
|
868
|
+
final_output_str, result_state.messages, tools_interrupted = execute_mcp_tool_calls(
|
|
770
869
|
tool_calls,
|
|
771
870
|
result_state.mcp_client,
|
|
772
871
|
result_state.messages,
|
|
773
872
|
result_state.npc,
|
|
774
873
|
result_state.stream_output
|
|
775
874
|
)
|
|
875
|
+
if tools_interrupted:
|
|
876
|
+
print(colored("\n⚠️ Tool execution interrupted", "yellow"))
|
|
776
877
|
else:
|
|
777
878
|
print('\n')
|
|
778
879
|
if result_state.stream_output:
|
|
@@ -802,97 +903,97 @@ def process_corca_result(
|
|
|
802
903
|
team=team_name,
|
|
803
904
|
)
|
|
804
905
|
|
|
805
|
-
|
|
806
|
-
engine = command_history.engine
|
|
906
|
+
result_state.turn_count += 1
|
|
807
907
|
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
approved_facts = []
|
|
817
|
-
try:
|
|
818
|
-
facts = get_facts(
|
|
819
|
-
conversation_turn_text,
|
|
820
|
-
model=active_npc.model,
|
|
821
|
-
provider=active_npc.provider,
|
|
822
|
-
npc=active_npc,
|
|
823
|
-
context=memory_context
|
|
908
|
+
if result_state.turn_count > 0 and result_state.turn_count % 10 == 0:
|
|
909
|
+
conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
|
|
910
|
+
engine = command_history.engine
|
|
911
|
+
|
|
912
|
+
memory_examples = command_history.get_memory_examples_for_context(
|
|
913
|
+
npc=npc_name,
|
|
914
|
+
team=team_name,
|
|
915
|
+
directory_path=result_state.current_path
|
|
824
916
|
)
|
|
825
917
|
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
approvals = memory_approval_ui(memories_for_approval)
|
|
918
|
+
memory_context = format_memory_context(memory_examples)
|
|
919
|
+
|
|
920
|
+
approved_facts = []
|
|
921
|
+
try:
|
|
922
|
+
facts = get_facts(
|
|
923
|
+
conversation_turn_text,
|
|
924
|
+
model=active_npc.model,
|
|
925
|
+
provider=active_npc.provider,
|
|
926
|
+
npc=active_npc,
|
|
927
|
+
context=memory_context
|
|
928
|
+
)
|
|
838
929
|
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
930
|
+
if facts:
|
|
931
|
+
memories_for_approval = []
|
|
932
|
+
for i, fact in enumerate(facts):
|
|
933
|
+
memories_for_approval.append({
|
|
934
|
+
"memory_id": f"temp_{i}",
|
|
935
|
+
"content": fact['statement'],
|
|
936
|
+
"context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
|
|
937
|
+
"npc": npc_name,
|
|
938
|
+
"fact_data": fact
|
|
939
|
+
})
|
|
842
940
|
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
941
|
+
approvals = memory_approval_ui(memories_for_approval)
|
|
942
|
+
|
|
943
|
+
for approval in approvals:
|
|
944
|
+
fact_data = next(m['fact_data'] for m in memories_for_approval
|
|
945
|
+
if m['memory_id'] == approval['memory_id'])
|
|
946
|
+
|
|
947
|
+
command_history.add_memory_to_database(
|
|
948
|
+
message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
|
|
949
|
+
conversation_id=result_state.conversation_id,
|
|
950
|
+
npc=npc_name,
|
|
951
|
+
team=team_name,
|
|
952
|
+
directory_path=result_state.current_path,
|
|
953
|
+
initial_memory=fact_data['statement'],
|
|
954
|
+
status=approval['decision'],
|
|
955
|
+
model=active_npc.model,
|
|
956
|
+
provider=active_npc.provider,
|
|
957
|
+
final_memory=approval.get('final_memory')
|
|
958
|
+
)
|
|
959
|
+
|
|
960
|
+
if approval['decision'] in ['human-approved', 'human-edited']:
|
|
961
|
+
approved_fact = {
|
|
962
|
+
'statement': approval.get('final_memory') or fact_data['statement'],
|
|
963
|
+
'source_text': fact_data.get('source_text', ''),
|
|
964
|
+
'type': fact_data.get('type', 'explicit'),
|
|
965
|
+
'generation': 0
|
|
966
|
+
}
|
|
967
|
+
approved_facts.append(approved_fact)
|
|
855
968
|
|
|
856
|
-
if approval['decision'] in ['human-approved', 'human-edited']:
|
|
857
|
-
approved_fact = {
|
|
858
|
-
'statement': approval.get('final_memory') or fact_data['statement'],
|
|
859
|
-
'source_text': fact_data.get('source_text', ''),
|
|
860
|
-
'type': fact_data.get('type', 'explicit'),
|
|
861
|
-
'generation': 0
|
|
862
|
-
}
|
|
863
|
-
approved_facts.append(approved_fact)
|
|
864
|
-
|
|
865
|
-
except Exception as e:
|
|
866
|
-
print(colored(f"Memory generation error: {e}", "yellow"))
|
|
867
|
-
|
|
868
|
-
if result_state.build_kg and approved_facts:
|
|
869
|
-
try:
|
|
870
|
-
if not should_skip_kg_processing(user_input, final_output_str):
|
|
871
|
-
npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
|
|
872
|
-
evolved_npc_kg, _ = kg_evolve_incremental(
|
|
873
|
-
existing_kg=npc_kg,
|
|
874
|
-
new_facts=approved_facts,
|
|
875
|
-
model=active_npc.model,
|
|
876
|
-
provider=active_npc.provider,
|
|
877
|
-
npc=active_npc,
|
|
878
|
-
get_concepts=True,
|
|
879
|
-
link_concepts_facts=False,
|
|
880
|
-
link_concepts_concepts=False,
|
|
881
|
-
link_facts_facts=False,
|
|
882
|
-
)
|
|
883
|
-
save_kg_to_db(
|
|
884
|
-
engine,
|
|
885
|
-
evolved_npc_kg,
|
|
886
|
-
team_name,
|
|
887
|
-
npc_name,
|
|
888
|
-
result_state.current_path
|
|
889
|
-
)
|
|
890
969
|
except Exception as e:
|
|
891
|
-
print(colored(f"
|
|
970
|
+
print(colored(f"Memory generation error: {e}", "yellow"))
|
|
892
971
|
|
|
893
|
-
|
|
972
|
+
if result_state.build_kg and approved_facts:
|
|
973
|
+
try:
|
|
974
|
+
if not should_skip_kg_processing(user_input, final_output_str):
|
|
975
|
+
npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
|
|
976
|
+
evolved_npc_kg, _ = kg_evolve_incremental(
|
|
977
|
+
existing_kg=npc_kg,
|
|
978
|
+
new_facts=approved_facts,
|
|
979
|
+
model=active_npc.model,
|
|
980
|
+
provider=active_npc.provider,
|
|
981
|
+
npc=active_npc,
|
|
982
|
+
get_concepts=True,
|
|
983
|
+
link_concepts_facts=False,
|
|
984
|
+
link_concepts_concepts=False,
|
|
985
|
+
link_facts_facts=False,
|
|
986
|
+
)
|
|
987
|
+
save_kg_to_db(
|
|
988
|
+
engine,
|
|
989
|
+
evolved_npc_kg,
|
|
990
|
+
team_name,
|
|
991
|
+
npc_name,
|
|
992
|
+
result_state.current_path
|
|
993
|
+
)
|
|
994
|
+
except Exception as e:
|
|
995
|
+
print(colored(f"Error during real-time KG evolution: {e}", "red"))
|
|
894
996
|
|
|
895
|
-
if result_state.turn_count > 0 and result_state.turn_count % 10 == 0:
|
|
896
997
|
print(colored("\nChecking for potential team improvements...", "cyan"))
|
|
897
998
|
try:
|
|
898
999
|
summary = breathe(messages=result_state.messages[-20:],
|
|
@@ -916,7 +1017,7 @@ def process_corca_result(
|
|
|
916
1017
|
|
|
917
1018
|
Respond with JSON: """ + """
|
|
918
1019
|
{
|
|
919
|
-
"suggestion": "Your sentence.
|
|
1020
|
+
"suggestion": "Your sentence."
|
|
920
1021
|
}
|
|
921
1022
|
"""
|
|
922
1023
|
response = get_llm_response(prompt,
|
|
@@ -951,9 +1052,8 @@ def process_corca_result(
|
|
|
951
1052
|
except Exception as e:
|
|
952
1053
|
import traceback
|
|
953
1054
|
print(colored(f"Could not generate team suggestions: {e}", "yellow"))
|
|
954
|
-
traceback.print_exc()
|
|
955
|
-
|
|
956
|
-
|
|
1055
|
+
traceback.print_exc()
|
|
1056
|
+
|
|
957
1057
|
def _read_npcsh_global_env() -> Dict[str, str]:
|
|
958
1058
|
global_env_file = Path(".npcsh_global")
|
|
959
1059
|
env_vars = {}
|
|
@@ -1135,6 +1235,7 @@ def create_corca_state_and_mcp_client(conversation_id,
|
|
|
1135
1235
|
|
|
1136
1236
|
return state
|
|
1137
1237
|
|
|
1238
|
+
|
|
1138
1239
|
def enter_corca_mode(command: str, **kwargs):
|
|
1139
1240
|
state: ShellState = kwargs.get('shell_state')
|
|
1140
1241
|
command_history: CommandHistory = kwargs.get('command_history')
|
|
@@ -1202,19 +1303,28 @@ def enter_corca_mode(command: str, **kwargs):
|
|
|
1202
1303
|
|
|
1203
1304
|
if not user_input:
|
|
1204
1305
|
continue
|
|
1306
|
+
|
|
1205
1307
|
try:
|
|
1206
1308
|
state, output = execute_command_corca(user_input, state, command_history)
|
|
1309
|
+
|
|
1310
|
+
if isinstance(output, dict) and output.get('interrupted'):
|
|
1311
|
+
print(colored("\n⚠️ Command interrupted. MCP session maintained.", "yellow"))
|
|
1312
|
+
continue
|
|
1207
1313
|
|
|
1208
1314
|
process_corca_result(user_input,
|
|
1209
1315
|
state,
|
|
1210
1316
|
output,
|
|
1211
1317
|
command_history,
|
|
1212
1318
|
)
|
|
1319
|
+
except KeyboardInterrupt:
|
|
1320
|
+
print(colored("\n⚠️ Interrupted. Type 'exit' to quit Corca mode.", "yellow"))
|
|
1321
|
+
continue
|
|
1213
1322
|
except Exception as e:
|
|
1214
|
-
print(f'An Exception has occurred {e}')
|
|
1323
|
+
print(colored(f'An Exception has occurred: {e}', "red"))
|
|
1324
|
+
traceback.print_exc()
|
|
1215
1325
|
|
|
1216
1326
|
except KeyboardInterrupt:
|
|
1217
|
-
print()
|
|
1327
|
+
print(colored("\n⚠️ Interrupted. Type 'exit' to quit Corca mode.", "yellow"))
|
|
1218
1328
|
continue
|
|
1219
1329
|
except EOFError:
|
|
1220
1330
|
print("\nExiting Corca Mode.")
|
|
@@ -1226,7 +1336,6 @@ def enter_corca_mode(command: str, **kwargs):
|
|
|
1226
1336
|
|
|
1227
1337
|
render_markdown("\n# Exiting Corca Mode")
|
|
1228
1338
|
return {"output": "", "messages": state.messages}
|
|
1229
|
-
|
|
1230
1339
|
def main():
|
|
1231
1340
|
parser = argparse.ArgumentParser(description="Corca - An MCP-powered npcsh shell.")
|
|
1232
1341
|
parser.add_argument("--mcp-server-path", type=str, help="Path to an MCP server script to connect to.")
|