npcsh 1.0.37__tar.gz → 1.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcsh-1.0.37 → npcsh-1.1.1}/PKG-INFO +1 -1
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/_state.py +150 -6
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/corca.py +16 -15
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/corca.npc +1 -2
- npcsh-1.1.1/npcsh/npc_team/jinxs/kg_search.jinx +43 -0
- npcsh-1.1.1/npcsh/npc_team/jinxs/memory_search.jinx +36 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npcsh.py +14 -5
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh.egg-info/PKG-INFO +1 -1
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh.egg-info/SOURCES.txt +2 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/setup.py +1 -1
- {npcsh-1.0.37 → npcsh-1.1.1}/LICENSE +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/README.md +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/__init__.py +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/alicanto.py +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/guac.py +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/mcp_helpers.py +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/mcp_server.py +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc.py +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/jinxs/bash_executer.jinx +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/jinxs/edit_file.jinx +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/jinxs/image_generation.jinx +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/jinxs/internet_search.jinx +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/jinxs/python_executor.jinx +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/jinxs/screen_cap.jinx +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/plonk.py +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/pti.py +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/routes.py +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/spool.py +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/wander.py +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh/yap.py +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh.egg-info/dependency_links.txt +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh.egg-info/entry_points.txt +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh.egg-info/requires.txt +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/npcsh.egg-info/top_level.txt +0 -0
- {npcsh-1.0.37 → npcsh-1.1.1}/setup.cfg +0 -0
|
@@ -2193,16 +2193,51 @@ def execute_command(
|
|
|
2193
2193
|
state: ShellState,
|
|
2194
2194
|
review = True,
|
|
2195
2195
|
router = None,
|
|
2196
|
+
command_history = None,
|
|
2196
2197
|
) -> Tuple[ShellState, Any]:
|
|
2197
2198
|
|
|
2198
2199
|
if not command.strip():
|
|
2199
2200
|
return state, ""
|
|
2201
|
+
|
|
2200
2202
|
mode_change, state = check_mode_switch(command, state)
|
|
2201
2203
|
if mode_change:
|
|
2202
2204
|
return state, 'Mode changed.'
|
|
2203
2205
|
|
|
2206
|
+
npc_name = state.npc.name if isinstance(state.npc, NPC) else "__none__"
|
|
2207
|
+
team_name = state.team.name if state.team else "__none__"
|
|
2208
|
+
|
|
2209
|
+
if command_history:
|
|
2210
|
+
relevant_memories = get_relevant_memories(
|
|
2211
|
+
command_history=command_history,
|
|
2212
|
+
npc_name=npc_name,
|
|
2213
|
+
team_name=team_name,
|
|
2214
|
+
path=state.current_path,
|
|
2215
|
+
query=command,
|
|
2216
|
+
max_memories=5,
|
|
2217
|
+
state=state
|
|
2218
|
+
)
|
|
2219
|
+
print('Memory jogged...')
|
|
2220
|
+
print(relevant_memories)
|
|
2221
|
+
|
|
2222
|
+
if relevant_memories:
|
|
2223
|
+
memory_context = "\n".join([
|
|
2224
|
+
f"- {m.get('final_memory', '')}"
|
|
2225
|
+
for m in relevant_memories
|
|
2226
|
+
])
|
|
2227
|
+
memory_msg = {
|
|
2228
|
+
"role": "system",
|
|
2229
|
+
"content": f"Relevant memories:\n{memory_context}"
|
|
2230
|
+
}
|
|
2231
|
+
if not state.messages or \
|
|
2232
|
+
state.messages[0].get("role") != "system":
|
|
2233
|
+
state.messages.insert(0, memory_msg)
|
|
2234
|
+
else:
|
|
2235
|
+
state.messages[0]["content"] += \
|
|
2236
|
+
f"\n\n{memory_msg['content']}"
|
|
2237
|
+
|
|
2204
2238
|
original_command_for_embedding = command
|
|
2205
2239
|
commands = split_by_pipes(command)
|
|
2240
|
+
|
|
2206
2241
|
stdin_for_next = None
|
|
2207
2242
|
final_output = None
|
|
2208
2243
|
current_state = state
|
|
@@ -2448,10 +2483,16 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
|
2448
2483
|
team_name_from_ctx = team_ctx.get("name")
|
|
2449
2484
|
if team_name_from_ctx:
|
|
2450
2485
|
team.name = team_name_from_ctx
|
|
2451
|
-
elif team_dir
|
|
2452
|
-
|
|
2486
|
+
elif team_dir:
|
|
2487
|
+
normalized_dir = os.path.normpath(team_dir)
|
|
2488
|
+
basename = os.path.basename(normalized_dir)
|
|
2489
|
+
if basename and basename != 'npc_team':
|
|
2490
|
+
team.name = basename
|
|
2491
|
+
else:
|
|
2492
|
+
team.name = "npcsh"
|
|
2453
2493
|
else:
|
|
2454
|
-
team.name = "
|
|
2494
|
+
team.name = "npcsh"
|
|
2495
|
+
|
|
2455
2496
|
|
|
2456
2497
|
return command_history, team, forenpc_obj
|
|
2457
2498
|
|
|
@@ -2462,6 +2503,109 @@ from npcpy.memory.memory_processor import memory_approval_ui
|
|
|
2462
2503
|
from npcpy.ft.memory_trainer import MemoryTrainer
|
|
2463
2504
|
from npcpy.llm_funcs import get_facts
|
|
2464
2505
|
|
|
2506
|
+
def get_relevant_memories(
|
|
2507
|
+
command_history: CommandHistory,
|
|
2508
|
+
npc_name: str,
|
|
2509
|
+
team_name: str,
|
|
2510
|
+
path: str,
|
|
2511
|
+
query: Optional[str] = None,
|
|
2512
|
+
max_memories: int = 10,
|
|
2513
|
+
state: Optional[ShellState] = None
|
|
2514
|
+
) -> List[Dict]:
|
|
2515
|
+
|
|
2516
|
+
engine = command_history.engine
|
|
2517
|
+
|
|
2518
|
+
all_memories = command_history.get_memories_for_scope(
|
|
2519
|
+
npc=npc_name,
|
|
2520
|
+
team=team_name,
|
|
2521
|
+
directory_path=path,
|
|
2522
|
+
status='human-approved'
|
|
2523
|
+
)
|
|
2524
|
+
|
|
2525
|
+
if not all_memories:
|
|
2526
|
+
return []
|
|
2527
|
+
|
|
2528
|
+
if len(all_memories) <= max_memories and not query:
|
|
2529
|
+
return all_memories
|
|
2530
|
+
|
|
2531
|
+
if query:
|
|
2532
|
+
query_lower = query.lower()
|
|
2533
|
+
keyword_matches = [
|
|
2534
|
+
m for m in all_memories
|
|
2535
|
+
if query_lower in (m.get('final_memory') or m.get('initial_memory') or '').lower()
|
|
2536
|
+
]
|
|
2537
|
+
|
|
2538
|
+
if keyword_matches:
|
|
2539
|
+
return keyword_matches[:max_memories]
|
|
2540
|
+
|
|
2541
|
+
if state and state.embedding_model and state.embedding_provider:
|
|
2542
|
+
try:
|
|
2543
|
+
from npcpy.gen.embeddings import get_embeddings
|
|
2544
|
+
|
|
2545
|
+
search_text = query if query else "recent context"
|
|
2546
|
+
query_embedding = get_embeddings(
|
|
2547
|
+
[search_text],
|
|
2548
|
+
state.embedding_model,
|
|
2549
|
+
state.embedding_provider
|
|
2550
|
+
)[0]
|
|
2551
|
+
|
|
2552
|
+
memory_texts = [
|
|
2553
|
+
m.get('final_memory', '') for m in all_memories
|
|
2554
|
+
]
|
|
2555
|
+
memory_embeddings = get_embeddings(
|
|
2556
|
+
memory_texts,
|
|
2557
|
+
state.embedding_model,
|
|
2558
|
+
state.embedding_provider
|
|
2559
|
+
)
|
|
2560
|
+
|
|
2561
|
+
import numpy as np
|
|
2562
|
+
similarities = []
|
|
2563
|
+
for mem_emb in memory_embeddings:
|
|
2564
|
+
similarity = np.dot(query_embedding, mem_emb) / (
|
|
2565
|
+
np.linalg.norm(query_embedding) *
|
|
2566
|
+
np.linalg.norm(mem_emb)
|
|
2567
|
+
)
|
|
2568
|
+
similarities.append(similarity)
|
|
2569
|
+
|
|
2570
|
+
sorted_indices = np.argsort(similarities)[::-1]
|
|
2571
|
+
return [all_memories[i] for i in sorted_indices[:max_memories]]
|
|
2572
|
+
|
|
2573
|
+
except Exception as e:
|
|
2574
|
+
print(colored(
|
|
2575
|
+
f"RAG search failed, using recent: {e}",
|
|
2576
|
+
"yellow"
|
|
2577
|
+
))
|
|
2578
|
+
|
|
2579
|
+
return all_memories[-max_memories:]
|
|
2580
|
+
|
|
2581
|
+
|
|
2582
|
+
def search_kg_facts(
|
|
2583
|
+
self,
|
|
2584
|
+
npc: str,
|
|
2585
|
+
team: str,
|
|
2586
|
+
directory_path: str,
|
|
2587
|
+
query: str
|
|
2588
|
+
) -> List[Dict]:
|
|
2589
|
+
|
|
2590
|
+
kg = load_kg_from_db(
|
|
2591
|
+
self.engine,
|
|
2592
|
+
team,
|
|
2593
|
+
npc,
|
|
2594
|
+
directory_path
|
|
2595
|
+
)
|
|
2596
|
+
|
|
2597
|
+
if not kg or 'facts' not in kg:
|
|
2598
|
+
return []
|
|
2599
|
+
|
|
2600
|
+
query_lower = query.lower()
|
|
2601
|
+
matching_facts = []
|
|
2602
|
+
|
|
2603
|
+
for fact in kg['facts']:
|
|
2604
|
+
statement = fact.get('statement', '').lower()
|
|
2605
|
+
if query_lower in statement:
|
|
2606
|
+
matching_facts.append(fact)
|
|
2607
|
+
|
|
2608
|
+
return matching_facts
|
|
2465
2609
|
|
|
2466
2610
|
def format_memory_context(memory_examples):
|
|
2467
2611
|
if not memory_examples:
|
|
@@ -2548,8 +2692,8 @@ def process_result(
|
|
|
2548
2692
|
output: Any,
|
|
2549
2693
|
command_history: CommandHistory,
|
|
2550
2694
|
):
|
|
2551
|
-
team_name = result_state.team.name if result_state.team else "
|
|
2552
|
-
npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "
|
|
2695
|
+
team_name = result_state.team.name if result_state.team else "npcsh"
|
|
2696
|
+
npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "npcsh"
|
|
2553
2697
|
|
|
2554
2698
|
active_npc = result_state.npc if isinstance(result_state.npc, NPC) else NPC(
|
|
2555
2699
|
name="default",
|
|
@@ -2629,7 +2773,7 @@ def process_result(
|
|
|
2629
2773
|
model=active_npc.model,
|
|
2630
2774
|
provider=active_npc.provider,
|
|
2631
2775
|
npc=active_npc,
|
|
2632
|
-
context=memory_context
|
|
2776
|
+
context=memory_context + 'Memories should be fully self contained. They should not use vague pronouns or words like that or this or it. Do not generate more than 1-2 memories at a time.'
|
|
2633
2777
|
)
|
|
2634
2778
|
|
|
2635
2779
|
if facts:
|
|
@@ -396,8 +396,8 @@ def execute_command_corca(command: str, state: ShellState, command_history, sele
|
|
|
396
396
|
cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
|
|
397
397
|
|
|
398
398
|
if len(state.messages) > 20:
|
|
399
|
-
compressed_state = state.npc.compress_planning_state(messages)
|
|
400
|
-
state.messages = [{"role": "system", "content": state.npc.
|
|
399
|
+
compressed_state = state.npc.compress_planning_state(state.messages)
|
|
400
|
+
state.messages = [{"role": "system", "content": state.npc.get_system_prompt() + f' Your current task: {compressed_state}'}]
|
|
401
401
|
print("Compressed messages during tool execution.")
|
|
402
402
|
|
|
403
403
|
response_dict = get_llm_response_with_handling(
|
|
@@ -640,27 +640,28 @@ def _resolve_and_copy_mcp_server_path(
|
|
|
640
640
|
|
|
641
641
|
cprint("No MCP server script found in any expected location.", "yellow")
|
|
642
642
|
return None
|
|
643
|
-
|
|
644
|
-
|
|
645
643
|
def print_corca_welcome_message():
|
|
646
644
|
turq = "\033[38;2;64;224;208m"
|
|
647
645
|
chrome = "\033[38;2;211;211;211m"
|
|
646
|
+
orange = "\033[38;2;255;165;0m"
|
|
648
647
|
reset = "\033[0m"
|
|
649
648
|
|
|
650
649
|
print(
|
|
651
650
|
f"""
|
|
652
|
-
|
|
653
|
-
{turq}
|
|
654
|
-
{turq}
|
|
655
|
-
{
|
|
656
|
-
{
|
|
657
|
-
{
|
|
658
|
-
{
|
|
659
|
-
|
|
660
|
-
|
|
651
|
+
{turq} ██████ ██████ ██████ ██████ ██████{reset}
|
|
652
|
+
{turq}██ ██ ██ ██ ██ ██ ██ ██ ██🦌🦌██{reset}
|
|
653
|
+
{turq}██ ██ ██ ██ ██ ██ ██🦌🦌██{reset}
|
|
654
|
+
{chrome}██ ██ ██ ████████ ██ ████████{reset}
|
|
655
|
+
{chrome}██ ██ ██ ██ ███ ██ ██ ██{reset}
|
|
656
|
+
{chrome}██ ██ ██ ██ ██ ███ ██ ██ ██ ██{reset}
|
|
657
|
+
{orange} ██████ ██████ ██ ███ ███████ ██ ██{reset}
|
|
658
|
+
|
|
659
|
+
{chrome} 🦌 C O R C A 🦌{reset}
|
|
660
|
+
|
|
661
|
+
{turq}MCP-powered shell for agentic workflows{reset}
|
|
661
662
|
"""
|
|
662
|
-
)
|
|
663
|
-
|
|
663
|
+
)
|
|
664
|
+
|
|
664
665
|
def create_corca_state_and_mcp_client(conversation_id, command_history, npc=None, team=None,
|
|
665
666
|
current_path=None, mcp_server_path_from_request: Optional[str] = None):
|
|
666
667
|
from npcsh._state import ShellState
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
jinx_name: search_kg
|
|
2
|
+
description: Search knowledge graph for relevant facts
|
|
3
|
+
inputs:
|
|
4
|
+
- query
|
|
5
|
+
steps:
|
|
6
|
+
- name: retrieve_facts
|
|
7
|
+
engine: python
|
|
8
|
+
code: |
|
|
9
|
+
from npcpy.memory.command_history import load_kg_from_db
|
|
10
|
+
import os
|
|
11
|
+
|
|
12
|
+
kg = load_kg_from_db(
|
|
13
|
+
command_history.engine,
|
|
14
|
+
team.name if team else '__none__',
|
|
15
|
+
npc.name if hasattr(npc, 'name') else '__none__',
|
|
16
|
+
os.getcwd()
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
query_lower = '{{ query }}'.lower()
|
|
20
|
+
matching_facts = []
|
|
21
|
+
|
|
22
|
+
if kg and 'facts' in kg:
|
|
23
|
+
for fact in kg['facts']:
|
|
24
|
+
statement = fact.get('statement', '').lower()
|
|
25
|
+
if query_lower in statement:
|
|
26
|
+
matching_facts.append(fact)
|
|
27
|
+
|
|
28
|
+
output = []
|
|
29
|
+
for i, fact in enumerate(matching_facts[:10], 1):
|
|
30
|
+
statement = fact.get('statement', '')
|
|
31
|
+
fact_type = fact.get('type', 'unknown')
|
|
32
|
+
output.append(f"{i}. [{fact_type}] {statement}")
|
|
33
|
+
|
|
34
|
+
output = "\n".join(output) if output else "No facts found"
|
|
35
|
+
|
|
36
|
+
- name: analyze_facts
|
|
37
|
+
engine: natural
|
|
38
|
+
code: |
|
|
39
|
+
Knowledge graph facts for query "{{ query }}":
|
|
40
|
+
|
|
41
|
+
{{ retrieve_facts }}
|
|
42
|
+
|
|
43
|
+
Analyze how these facts relate to the query.
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
jinx_name: search_memories
|
|
2
|
+
description: Search through approved memories for relevant context
|
|
3
|
+
inputs:
|
|
4
|
+
- query
|
|
5
|
+
steps:
|
|
6
|
+
- name: retrieve_memories
|
|
7
|
+
engine: python
|
|
8
|
+
code: |
|
|
9
|
+
from npcsh._state import get_relevant_memories
|
|
10
|
+
import os
|
|
11
|
+
|
|
12
|
+
memories = get_relevant_memories(
|
|
13
|
+
command_history=command_history,
|
|
14
|
+
npc_name=npc.name if hasattr(npc, 'name') else '__none__',
|
|
15
|
+
team_name=team.name if team else '__none__',
|
|
16
|
+
path=os.getcwd(),
|
|
17
|
+
query='{{ query }}',
|
|
18
|
+
max_memories=10,
|
|
19
|
+
state=state
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
output = []
|
|
23
|
+
for i, mem in enumerate(memories, 1):
|
|
24
|
+
content = mem.get('final_memory', mem.get('initial_memory', ''))
|
|
25
|
+
output.append(f"{i}. {content}")
|
|
26
|
+
|
|
27
|
+
output = "\n".join(output) if output else "No memories found"
|
|
28
|
+
|
|
29
|
+
- name: format_results
|
|
30
|
+
engine: natural
|
|
31
|
+
code: |
|
|
32
|
+
Found memories for query "{{ query }}":
|
|
33
|
+
|
|
34
|
+
{{ retrieve_memories }}
|
|
35
|
+
|
|
36
|
+
Summarize the key points from these memories.
|
|
@@ -52,15 +52,22 @@ def print_welcome_message():
|
|
|
52
52
|
|
|
53
53
|
print(
|
|
54
54
|
"""
|
|
55
|
+
___________________________________________
|
|
56
|
+
___________________________________________
|
|
57
|
+
___________________________________________
|
|
58
|
+
|
|
55
59
|
Welcome to \033[1;94mnpc\033[0m\033[1;38;5;202msh\033[0m!
|
|
56
60
|
\033[1;94m \033[0m\033[1;38;5;202m _ \\\\
|
|
57
61
|
\033[1;94m _ __ _ __ ___ \033[0m\033[1;38;5;202m ___ | |___ \\\\
|
|
58
|
-
\033[1;94m| '_ \\ | '
|
|
62
|
+
\033[1;94m| '_ \\ | '_ \\ / __|\033[0m\033[1;38;5;202m / __/ | |_ _| \\\\
|
|
59
63
|
\033[1;94m| | | || |_) |( |__ \033[0m\033[1;38;5;202m \\_ \\ | | | | //
|
|
60
64
|
\033[1;94m|_| |_|| .__/ \\___|\033[0m\033[1;38;5;202m |___/ |_| |_| //
|
|
61
|
-
\033[1;94m
|
|
62
|
-
\033[1;94m
|
|
63
|
-
\033[1;94m
|
|
65
|
+
\033[1;94m|🤖| \033[0m\033[1;38;5;202m //
|
|
66
|
+
\033[1;94m|🤖|
|
|
67
|
+
\033[1;94m|🤖|
|
|
68
|
+
___________________________________________
|
|
69
|
+
___________________________________________
|
|
70
|
+
___________________________________________
|
|
64
71
|
|
|
65
72
|
Begin by asking a question, issuing a bash command, or typing '/help' for more information.
|
|
66
73
|
|
|
@@ -197,7 +204,9 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
|
197
204
|
state, output = execute_command(user_input,
|
|
198
205
|
state,
|
|
199
206
|
review = True,
|
|
200
|
-
router=router
|
|
207
|
+
router=router,
|
|
208
|
+
command_history=command_history)
|
|
209
|
+
|
|
201
210
|
process_result(user_input,
|
|
202
211
|
state,
|
|
203
212
|
output,
|
|
@@ -46,5 +46,7 @@ npcsh/npc_team/jinxs/bash_executer.jinx
|
|
|
46
46
|
npcsh/npc_team/jinxs/edit_file.jinx
|
|
47
47
|
npcsh/npc_team/jinxs/image_generation.jinx
|
|
48
48
|
npcsh/npc_team/jinxs/internet_search.jinx
|
|
49
|
+
npcsh/npc_team/jinxs/kg_search.jinx
|
|
50
|
+
npcsh/npc_team/jinxs/memory_search.jinx
|
|
49
51
|
npcsh/npc_team/jinxs/python_executor.jinx
|
|
50
52
|
npcsh/npc_team/jinxs/screen_cap.jinx
|
|
@@ -78,7 +78,7 @@ extra_files = package_files("npcsh/npc_team/")
|
|
|
78
78
|
|
|
79
79
|
setup(
|
|
80
80
|
name="npcsh",
|
|
81
|
-
version="1.
|
|
81
|
+
version="1.1.1",
|
|
82
82
|
packages=find_packages(exclude=["tests*"]),
|
|
83
83
|
install_requires=base_requirements, # Only install base requirements by default
|
|
84
84
|
extras_require={
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|