npcsh 1.1.9__tar.gz → 1.1.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcsh-1.1.9 → npcsh-1.1.11}/PKG-INFO +1 -1
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/_state.py +45 -12
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/guac.py +6 -4
- npcsh-1.1.11/npcsh/npc_team/jinxs/code/sh.jinx +38 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/code/sql.jinx +2 -2
- npcsh-1.1.11/npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
- npcsh-1.1.11/npcsh/npc_team/jinxs/utils/chat.jinx +17 -0
- npcsh-1.1.11/npcsh/npc_team/jinxs/utils/compress.jinx +140 -0
- npcsh-1.1.11/npcsh/npc_team/jinxs/utils/load_file.jinx +35 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/utils/serve.jinx +0 -3
- npcsh-1.1.11/npcsh/npc_team/jinxs/utils/trigger.jinx +61 -0
- npcsh-1.1.11/npcsh/npc_team/jinxs/utils/vixynt.jinx +144 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npcsh.py +2 -1
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/routes.py +0 -1
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh.egg-info/PKG-INFO +1 -1
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh.egg-info/SOURCES.txt +5 -4
- {npcsh-1.1.9 → npcsh-1.1.11}/setup.py +1 -1
- npcsh-1.1.9/npcsh/npc_team/jinxs/code/sh.jinx +0 -19
- npcsh-1.1.9/npcsh/npc_team/jinxs/utils/breathe.jinx +0 -20
- npcsh-1.1.9/npcsh/npc_team/jinxs/utils/flush.jinx +0 -39
- npcsh-1.1.9/npcsh/npc_team/jinxs/utils/plan.jinx +0 -33
- npcsh-1.1.9/npcsh/npc_team/jinxs/utils/trigger.jinx +0 -36
- npcsh-1.1.9/npcsh/npc_team/jinxs/utils/vixynt.jinx +0 -117
- {npcsh-1.1.9 → npcsh-1.1.11}/LICENSE +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/README.md +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/__init__.py +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/alicanto.py +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/build.py +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/corca.py +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/mcp_helpers.py +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/mcp_server.py +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc.py +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/code/python.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/modes/alicanto.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/modes/corca.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/modes/guac.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/modes/plonk.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/modes/pti.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/modes/spool.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/modes/wander.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/modes/yap.jinx +0 -0
- {npcsh-1.1.9/npcsh/npc_team/jinxs/utils → npcsh-1.1.11/npcsh/npc_team/jinxs/npc_studio}/npc-studio.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/utils/core/build.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/utils/core/compile.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/utils/core/help.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/utils/core/init.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/utils/core/jinxs.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/utils/core/set.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/utils/edit_file.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/utils/ots.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/utils/roll.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/utils/sample.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/utils/search.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/jinxs/utils/sleep.jinx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/plonk.py +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/pti.py +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/spool.py +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/wander.py +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh/yap.py +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh.egg-info/dependency_links.txt +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh.egg-info/entry_points.txt +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh.egg-info/requires.txt +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/npcsh.egg-info/top_level.txt +0 -0
- {npcsh-1.1.9 → npcsh-1.1.11}/setup.cfg +0 -0
|
@@ -111,6 +111,7 @@ except importlib.metadata.PackageNotFoundError:
|
|
|
111
111
|
VERSION = "unknown"
|
|
112
112
|
|
|
113
113
|
|
|
114
|
+
from litellm import RateLimitError
|
|
114
115
|
|
|
115
116
|
|
|
116
117
|
NPCSH_CHAT_MODEL = os.environ.get("NPCSH_CHAT_MODEL", "gemma3:4b")
|
|
@@ -2263,7 +2264,7 @@ def process_pipeline_command(
|
|
|
2263
2264
|
images=state.attachments,
|
|
2264
2265
|
stream=stream_final,
|
|
2265
2266
|
context=info,
|
|
2266
|
-
extra_globals=application_globals_for_jinx
|
|
2267
|
+
extra_globals=application_globals_for_jinx
|
|
2267
2268
|
)
|
|
2268
2269
|
if not review:
|
|
2269
2270
|
if isinstance(llm_result, dict):
|
|
@@ -2474,7 +2475,9 @@ def execute_command(
|
|
|
2474
2475
|
review=review,
|
|
2475
2476
|
router=router
|
|
2476
2477
|
)
|
|
2477
|
-
|
|
2478
|
+
if isinstance(output, dict) and 'output' in output:
|
|
2479
|
+
output = output['output']
|
|
2480
|
+
|
|
2478
2481
|
if is_last_command:
|
|
2479
2482
|
print(colored("✅ Pipeline complete", "green"))
|
|
2480
2483
|
return current_state, output
|
|
@@ -2513,7 +2516,23 @@ def execute_command(
|
|
|
2513
2516
|
f" → Passing to stage {stage_num + 1}",
|
|
2514
2517
|
"blue"
|
|
2515
2518
|
))
|
|
2516
|
-
|
|
2519
|
+
except RateLimitError:
|
|
2520
|
+
print(colored('Rate Limit Exceeded'))
|
|
2521
|
+
# wait 30 seconds then truncate messages/condense context with breathing mechanism
|
|
2522
|
+
# for now just limit to first plus last 10
|
|
2523
|
+
messages = current_state.messages[0:1] + current_state.messages[-2:]
|
|
2524
|
+
current_state.messages = messages
|
|
2525
|
+
#retry
|
|
2526
|
+
import time
|
|
2527
|
+
print('sleeping...')
|
|
2528
|
+
print(current_state)
|
|
2529
|
+
print(current_state.messages)
|
|
2530
|
+
time.sleep(30)
|
|
2531
|
+
|
|
2532
|
+
|
|
2533
|
+
return execute_command(command, current_state, review=review, router=router,)
|
|
2534
|
+
|
|
2535
|
+
|
|
2517
2536
|
except Exception as pipeline_error:
|
|
2518
2537
|
import traceback
|
|
2519
2538
|
traceback.print_exc()
|
|
@@ -2665,10 +2684,12 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
|
2665
2684
|
if use_jinxs == "c":
|
|
2666
2685
|
global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
|
|
2667
2686
|
if os.path.exists(global_jinxs_dir):
|
|
2668
|
-
|
|
2687
|
+
# Create the 'jinxs' subfolder within the new team's directory
|
|
2688
|
+
destination_jinxs_dir = os.path.join(team_dir, "jinxs")
|
|
2689
|
+
os.makedirs(destination_jinxs_dir, exist_ok=True)
|
|
2690
|
+
shutil.copytree(global_jinxs_dir, destination_jinxs_dir, dirs_exist_ok=True)
|
|
2669
2691
|
else:
|
|
2670
2692
|
team_ctx_data["use_global_jinxs"] = True
|
|
2671
|
-
|
|
2672
2693
|
with open(ctx_path, "w") as f:
|
|
2673
2694
|
yaml.dump(team_ctx_data, f)
|
|
2674
2695
|
else:
|
|
@@ -2802,7 +2823,6 @@ def process_memory_approvals(command_history, memory_queue):
|
|
|
2802
2823
|
approval['decision'],
|
|
2803
2824
|
approval.get('final_memory')
|
|
2804
2825
|
)
|
|
2805
|
-
|
|
2806
2826
|
def process_result(
|
|
2807
2827
|
user_input: str,
|
|
2808
2828
|
result_state: ShellState,
|
|
@@ -2835,10 +2855,17 @@ def process_result(
|
|
|
2835
2855
|
|
|
2836
2856
|
final_output_str = None
|
|
2837
2857
|
|
|
2858
|
+
# FIX: Handle dict output properly
|
|
2838
2859
|
if isinstance(output, dict):
|
|
2839
2860
|
output_content = output.get('output')
|
|
2840
2861
|
model_for_stream = output.get('model', active_npc.model)
|
|
2841
2862
|
provider_for_stream = output.get('provider', active_npc.provider)
|
|
2863
|
+
|
|
2864
|
+
# If output_content is still a dict or None, convert to string
|
|
2865
|
+
if isinstance(output_content, dict):
|
|
2866
|
+
output_content = str(output_content)
|
|
2867
|
+
elif output_content is None:
|
|
2868
|
+
output_content = "Command completed with no output"
|
|
2842
2869
|
else:
|
|
2843
2870
|
output_content = output
|
|
2844
2871
|
model_for_stream = active_npc.model
|
|
@@ -2851,15 +2878,21 @@ def process_result(
|
|
|
2851
2878
|
else:
|
|
2852
2879
|
render_markdown(str(output_content))
|
|
2853
2880
|
elif result_state.stream_output:
|
|
2854
|
-
|
|
2855
|
-
|
|
2856
|
-
|
|
2857
|
-
|
|
2858
|
-
|
|
2859
|
-
|
|
2881
|
+
# FIX: Only stream if output_content is a generator, not a string
|
|
2882
|
+
if isinstance(output_content, str):
|
|
2883
|
+
final_output_str = output_content
|
|
2884
|
+
render_markdown(final_output_str)
|
|
2885
|
+
else:
|
|
2886
|
+
final_output_str = print_and_process_stream_with_markdown(
|
|
2887
|
+
output_content,
|
|
2888
|
+
model_for_stream,
|
|
2889
|
+
provider_for_stream,
|
|
2890
|
+
show=True
|
|
2891
|
+
)
|
|
2860
2892
|
elif output_content is not None:
|
|
2861
2893
|
final_output_str = str(output_content)
|
|
2862
2894
|
render_markdown(final_output_str)
|
|
2895
|
+
|
|
2863
2896
|
|
|
2864
2897
|
if final_output_str:
|
|
2865
2898
|
if result_state.messages:
|
|
@@ -1376,10 +1376,12 @@ def get_guac_prompt_char(command_count: int, guac_refresh_period = 100) -> str:
|
|
|
1376
1376
|
def execute_guac_command(command: str, state: ShellState, locals_dict: Dict[str, Any], project_name: str, src_dir: Path, router) -> Tuple[ShellState, Any]:
|
|
1377
1377
|
stripped_command = command.strip()
|
|
1378
1378
|
output = None
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
|
|
1379
|
+
try:
|
|
1380
|
+
cmd_parts = shlex.split(stripped_command)
|
|
1381
|
+
if cmd_parts and cmd_parts[0] in ["cd", "ls", "pwd"]:
|
|
1382
|
+
return execute_command(stripped_command, state, review=False, router=router)
|
|
1383
|
+
except Exception as e:
|
|
1384
|
+
pass
|
|
1383
1385
|
npc_team_dir = Path(state.team.team_path) if state.team and hasattr(state.team, 'team_path') else Path.cwd() / "npc_team"
|
|
1384
1386
|
|
|
1385
1387
|
if not stripped_command:
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
jinx_name: sh
|
|
2
|
+
description: Execute bash queries. Should be used to grep for file contents, list directories, explore information to answer user questions more practically.
|
|
3
|
+
inputs:
|
|
4
|
+
- bash_command
|
|
5
|
+
steps:
|
|
6
|
+
- name: execute_bash
|
|
7
|
+
engine: python
|
|
8
|
+
code: |
|
|
9
|
+
import subprocess
|
|
10
|
+
import sys # Import sys to explicitly write to stderr for visibility
|
|
11
|
+
|
|
12
|
+
# Force a simple print to see if anything comes out
|
|
13
|
+
print("--- Jinx 'sh' code started ---", file=sys.stderr)
|
|
14
|
+
|
|
15
|
+
cmd = '{{ bash_command }}'
|
|
16
|
+
|
|
17
|
+
# Initialize output to an empty string to ensure it always exists
|
|
18
|
+
output = ""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
process = subprocess.Popen(
|
|
22
|
+
cmd,
|
|
23
|
+
shell=True,
|
|
24
|
+
stdout=subprocess.PIPE,
|
|
25
|
+
stderr=subprocess.PIPE
|
|
26
|
+
)
|
|
27
|
+
stdout, stderr = process.communicate()
|
|
28
|
+
|
|
29
|
+
# Print raw stdout/stderr to sys.stderr for better visibility in some environments
|
|
30
|
+
print(f"Jinx 'sh' raw stdout: {stdout.decode('utf-8', errors='ignore')}", file=sys.stderr)
|
|
31
|
+
print(f"Jinx 'sh' raw stderr: {stderr.decode('utf-8', errors='ignore')}", file=sys.stderr)
|
|
32
|
+
|
|
33
|
+
if stderr:
|
|
34
|
+
output = f"Error: {stderr.decode('utf-8')}"
|
|
35
|
+
else:
|
|
36
|
+
output = stdout.decode('utf-8')
|
|
37
|
+
|
|
38
|
+
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
jinx_name:
|
|
1
|
+
jinx_name: sql
|
|
2
2
|
description: Execute queries on the ~/npcsh_history.db to pull data. The database
|
|
3
3
|
contains only information about conversations and other user-provided data. It does
|
|
4
4
|
not store any information about individual files. Avoid using percent signs unless absolutely necessary.
|
|
@@ -13,4 +13,4 @@ steps:
|
|
|
13
13
|
df = pd.read_sql_query(query, npc.db_conn)
|
|
14
14
|
except Exception as e:
|
|
15
15
|
df = pd.DataFrame({'Error': [str(e)]})
|
|
16
|
-
output = df.to_string()
|
|
16
|
+
output = df.to_string()
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
jinx_name: agent
|
|
2
|
+
description: Provides an LLM response with tool use enabled.
|
|
3
|
+
inputs:
|
|
4
|
+
- query
|
|
5
|
+
- auto_process_tool_calls: True
|
|
6
|
+
- use_core_tools: True
|
|
7
|
+
steps:
|
|
8
|
+
- name: get_agent_response
|
|
9
|
+
engine: python
|
|
10
|
+
code: |
|
|
11
|
+
response = npc.get_llm_response(
|
|
12
|
+
request=query,
|
|
13
|
+
messages=context.get('messages', []),
|
|
14
|
+
auto_process_tool_calls={{ auto_process_tool_calls | default(True) }},
|
|
15
|
+
use_core_tools={{ use_core_tools | default(True) }}
|
|
16
|
+
)
|
|
17
|
+
output = response.get('response', '')
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
jinx_name: chat
|
|
2
|
+
description: Provides a direct LLM response without tool use.
|
|
3
|
+
inputs:
|
|
4
|
+
- query
|
|
5
|
+
- auto_process_tool_calls: False
|
|
6
|
+
- use_core_tools: False
|
|
7
|
+
steps:
|
|
8
|
+
- name: get_chat_response
|
|
9
|
+
engine: python
|
|
10
|
+
code: |
|
|
11
|
+
response = npc.get_llm_response(
|
|
12
|
+
request=query,
|
|
13
|
+
messages=context.get('messages', []),
|
|
14
|
+
auto_process_tool_calls={{ auto_process_tool_calls | default(False) }},
|
|
15
|
+
use_core_tools={{ use_core_tools | default(False) }}
|
|
16
|
+
)
|
|
17
|
+
output = response.get('response', '')
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
jinx_name: "compress"
|
|
2
|
+
description: "Manages conversation and knowledge context. Defaults to compacting context. Use flags for other operations."
|
|
3
|
+
inputs:
|
|
4
|
+
- flush: "" # The number of recent messages to flush.
|
|
5
|
+
- sleep: False # If true, evolves the knowledge graph.
|
|
6
|
+
- dream: False # Used with --sleep. Runs creative synthesis.
|
|
7
|
+
- ops: "" # Used with --sleep. Comma-separated list of KG operations.
|
|
8
|
+
- model: "" # Used with --sleep. LLM model for KG evolution.
|
|
9
|
+
- provider: "" # Used with --sleep. LLM provider for KG evolution.
|
|
10
|
+
steps:
|
|
11
|
+
- name: "manage_context_and_memory"
|
|
12
|
+
engine: "python"
|
|
13
|
+
code: |
|
|
14
|
+
import os
|
|
15
|
+
import traceback
|
|
16
|
+
from npcpy.llm_funcs import breathe
|
|
17
|
+
from npcpy.memory.command_history import CommandHistory, load_kg_from_db, save_kg_to_db
|
|
18
|
+
from npcpy.memory.knowledge_graph import kg_sleep_process, kg_dream_process
|
|
19
|
+
|
|
20
|
+
# --- Get all inputs from context ---
|
|
21
|
+
flush_n_str = context.get('flush')
|
|
22
|
+
is_sleeping = context.get('sleep')
|
|
23
|
+
is_dreaming = context.get('dream')
|
|
24
|
+
operations_str = context.get('ops')
|
|
25
|
+
llm_model = context.get('model')
|
|
26
|
+
llm_provider = context.get('provider')
|
|
27
|
+
output_messages = context.get('messages', [])
|
|
28
|
+
|
|
29
|
+
USAGE = """Usage:
|
|
30
|
+
/compress (Compacts conversation context)
|
|
31
|
+
/compress --flush <number> (Removes the last N messages)
|
|
32
|
+
/compress --sleep [...] (Evolves the knowledge graph)
|
|
33
|
+
--dream (With --sleep: enables creative synthesis)
|
|
34
|
+
--ops "op1,op2" (With --sleep: specifies KG operations)
|
|
35
|
+
--model <name> (With --sleep: specifies LLM model)
|
|
36
|
+
--provider <name> (With --sleep: specifies LLM provider)"""
|
|
37
|
+
|
|
38
|
+
# --- Argument Validation: Ensure mutual exclusivity ---
|
|
39
|
+
is_flushing = flush_n_str is not None and flush_n_str.strip() != ''
|
|
40
|
+
if is_sleeping and is_flushing:
|
|
41
|
+
context['output'] = f"Error: --sleep and --flush are mutually exclusive.\n{USAGE}"
|
|
42
|
+
context['messages'] = output_messages
|
|
43
|
+
exit()
|
|
44
|
+
|
|
45
|
+
# --- Dispatcher: Route to the correct functionality ---
|
|
46
|
+
|
|
47
|
+
# 1. SLEEP: Evolve the Knowledge Graph
|
|
48
|
+
if is_sleeping:
|
|
49
|
+
current_npc = context.get('npc')
|
|
50
|
+
current_team = context.get('team')
|
|
51
|
+
|
|
52
|
+
# Parameter setup for KG process
|
|
53
|
+
operations_config = [op.strip() for op in operations_str.split(',')] if operations_str else None
|
|
54
|
+
if not llm_model and current_npc: llm_model = current_npc.model
|
|
55
|
+
if not llm_provider and current_npc: llm_provider = current_npc.provider
|
|
56
|
+
if not llm_model: llm_model = "gemini-1.5-pro"
|
|
57
|
+
if not llm_provider: llm_provider = "gemini"
|
|
58
|
+
|
|
59
|
+
team_name = current_team.name if current_team else "__none__"
|
|
60
|
+
npc_name = current_npc.name if current_npc else "__none__"
|
|
61
|
+
current_path = os.getcwd()
|
|
62
|
+
scope_str = f"Team: '{team_name}', NPC: '{npc_name}', Path: '{current_path}'"
|
|
63
|
+
|
|
64
|
+
command_history = None
|
|
65
|
+
try:
|
|
66
|
+
db_path = os.getenv("NPCSH_DB_PATH", os.path.expanduser("~/npcsh_history.db"))
|
|
67
|
+
command_history = CommandHistory(db_path)
|
|
68
|
+
engine = command_history.engine
|
|
69
|
+
current_kg = load_kg_from_db(engine, team_name, npc_name, current_path)
|
|
70
|
+
|
|
71
|
+
if not current_kg or not current_kg.get('facts'):
|
|
72
|
+
context['output'] = f"Knowledge graph for the current scope is empty. Nothing to process.\n- Scope: {scope_str}"
|
|
73
|
+
exit()
|
|
74
|
+
|
|
75
|
+
original_facts = len(current_kg.get('facts', []))
|
|
76
|
+
original_concepts = len(current_kg.get('concepts', []))
|
|
77
|
+
|
|
78
|
+
evolved_kg, _ = kg_sleep_process(existing_kg=current_kg, model=llm_model, provider=llm_provider, npc=current_npc, operations_config=operations_config)
|
|
79
|
+
process_type = "Sleep"
|
|
80
|
+
|
|
81
|
+
if is_dreaming:
|
|
82
|
+
evolved_kg, _ = kg_dream_process(existing_kg=evolved_kg, model=llm_model, provider=llm_provider, npc=current_npc)
|
|
83
|
+
process_type += " & Dream"
|
|
84
|
+
|
|
85
|
+
save_kg_to_db(engine, evolved_kg, team_name, npc_name, current_path)
|
|
86
|
+
|
|
87
|
+
new_facts = len(evolved_kg.get('facts', []))
|
|
88
|
+
new_concepts = len(evolved_kg.get('concepts', []))
|
|
89
|
+
|
|
90
|
+
context['output'] = (f"{process_type} process complete.\n"
|
|
91
|
+
f"- Facts: {original_facts} -> {new_facts} ({new_facts - original_facts:+})\n"
|
|
92
|
+
f"- Concepts: {original_concepts} -> {new_concepts} ({new_concepts - original_concepts:+})")
|
|
93
|
+
except Exception as e:
|
|
94
|
+
traceback.print_exc()
|
|
95
|
+
context['output'] = f"Error during KG evolution: {e}"
|
|
96
|
+
finally:
|
|
97
|
+
if command_history: command_history.close()
|
|
98
|
+
context['messages'] = output_messages
|
|
99
|
+
|
|
100
|
+
# 2. FLUSH: Remove messages from context
|
|
101
|
+
elif is_flushing:
|
|
102
|
+
try:
|
|
103
|
+
n = int(flush_n_str)
|
|
104
|
+
if n <= 0:
|
|
105
|
+
context['output'] = "Error: Number of messages to flush must be positive."
|
|
106
|
+
exit()
|
|
107
|
+
except ValueError:
|
|
108
|
+
context['output'] = f"Error: Invalid number '{flush_n_str}'. {USAGE}"
|
|
109
|
+
exit()
|
|
110
|
+
|
|
111
|
+
messages_list = list(output_messages)
|
|
112
|
+
original_len = len(messages_list)
|
|
113
|
+
final_messages = []
|
|
114
|
+
|
|
115
|
+
if messages_list and messages_list[0].get("role") == "system":
|
|
116
|
+
system_message = messages_list.pop(0)
|
|
117
|
+
num_to_remove = min(n, len(messages_list))
|
|
118
|
+
final_messages = [system_message] + messages_list[:-num_to_remove]
|
|
119
|
+
else:
|
|
120
|
+
num_to_remove = min(n, original_len)
|
|
121
|
+
final_messages = messages_list[:-num_to_remove]
|
|
122
|
+
|
|
123
|
+
removed_count = original_len - len(final_messages)
|
|
124
|
+
context['output'] = f"Flushed {removed_count} message(s). Context is now {len(final_messages)} messages."
|
|
125
|
+
context['messages'] = final_messages
|
|
126
|
+
|
|
127
|
+
# 3. DEFAULT: Compact conversation context
|
|
128
|
+
else:
|
|
129
|
+
try:
|
|
130
|
+
result = breathe(**context)
|
|
131
|
+
if isinstance(result, dict):
|
|
132
|
+
context['output'] = result.get('output', 'Context compressed.')
|
|
133
|
+
context['messages'] = result.get('messages', output_messages)
|
|
134
|
+
else:
|
|
135
|
+
context['output'] = "Context compression process initiated."
|
|
136
|
+
context['messages'] = output_messages
|
|
137
|
+
except Exception as e:
|
|
138
|
+
traceback.print_exc()
|
|
139
|
+
context['output'] = f"Error during context compression: {e}"
|
|
140
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
jinx_name: load_file
|
|
2
|
+
description: Loads and returns the contents of a file using npcpy's file loaders
|
|
3
|
+
inputs:
|
|
4
|
+
- file_path
|
|
5
|
+
steps:
|
|
6
|
+
- name: "load_file"
|
|
7
|
+
engine: "python"
|
|
8
|
+
code: |
|
|
9
|
+
import os
|
|
10
|
+
from npcpy.data.load import load_file_contents
|
|
11
|
+
|
|
12
|
+
# Expand user path and get absolute path
|
|
13
|
+
file_path = os.path.expanduser("{{ file_path }}")
|
|
14
|
+
|
|
15
|
+
# Check if file exists
|
|
16
|
+
if not os.path.exists(file_path):
|
|
17
|
+
output = f"Error: File not found at {file_path}"
|
|
18
|
+
else:
|
|
19
|
+
try:
|
|
20
|
+
# Load file contents using npcpy's loader
|
|
21
|
+
# Returns chunks by default with chunk_size=250
|
|
22
|
+
chunks = load_file_contents(file_path)
|
|
23
|
+
|
|
24
|
+
# Join chunks back together for full content
|
|
25
|
+
if isinstance(chunks, list):
|
|
26
|
+
if chunks and chunks[0].startswith("Error"):
|
|
27
|
+
output = chunks[0]
|
|
28
|
+
else:
|
|
29
|
+
file_content = "\n".join(chunks)
|
|
30
|
+
output = f"File: {file_path}\n\n{file_content}"
|
|
31
|
+
else:
|
|
32
|
+
output = f"File: {file_path}\n\n{chunks}"
|
|
33
|
+
|
|
34
|
+
except Exception as e:
|
|
35
|
+
output = f"Error reading file {file_path}: {str(e)}"
|
|
@@ -17,9 +17,6 @@ steps:
|
|
|
17
17
|
if cors_str and cors_str.strip():
|
|
18
18
|
cors_origins = [origin.strip() for origin in cors_str.split(",")]
|
|
19
19
|
|
|
20
|
-
# start_flask_server blocks, so this will hold the Jinx until the server is stopped.
|
|
21
|
-
# In a real-world scenario, you might want to run this in a separate process
|
|
22
|
-
# or have a non-blocking server start.
|
|
23
20
|
start_flask_server(
|
|
24
21
|
port=int(port), # Ensure port is an integer
|
|
25
22
|
cors_origins=cors_origins,
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
jinx_name: "trigger"
|
|
2
|
+
description: "Creates a persistent listener (--listen) or a scheduled task (--cron)."
|
|
3
|
+
inputs:
|
|
4
|
+
- listen: "" # The description for a persistent, event-driven listener.
|
|
5
|
+
- cron: "" # The description for a scheduled, time-based task.
|
|
6
|
+
steps:
|
|
7
|
+
- name: "execute_command"
|
|
8
|
+
engine: "python"
|
|
9
|
+
code: |
|
|
10
|
+
import traceback
|
|
11
|
+
from npcpy.work.trigger import execute_trigger_command # For --listen
|
|
12
|
+
from npcpy.work.plan import execute_plan_command # For --cron
|
|
13
|
+
|
|
14
|
+
listen_description = context.get('listen')
|
|
15
|
+
cron_description = context.get('cron')
|
|
16
|
+
output_messages = context.get('messages', [])
|
|
17
|
+
|
|
18
|
+
USAGE = 'Usage: /trigger --listen "<description>" OR /trigger --cron "<description>"'
|
|
19
|
+
|
|
20
|
+
# Determine which command was used and set the appropriate variables
|
|
21
|
+
subcommand = None
|
|
22
|
+
description = None
|
|
23
|
+
executor_func = None
|
|
24
|
+
|
|
25
|
+
# --- Argument Validation ---
|
|
26
|
+
# Ensure mutual exclusivity
|
|
27
|
+
if listen_description and cron_description:
|
|
28
|
+
context['output'] = f"Error: --listen and --cron are mutually exclusive. {USAGE}"
|
|
29
|
+
context['messages'] = output_messages
|
|
30
|
+
exit()
|
|
31
|
+
|
|
32
|
+
# --- Command Dispatch ---
|
|
33
|
+
if listen_description:
|
|
34
|
+
subcommand = 'listen'
|
|
35
|
+
description = listen_description
|
|
36
|
+
executor_func = execute_trigger_command
|
|
37
|
+
elif cron_description:
|
|
38
|
+
subcommand = 'cron'
|
|
39
|
+
description = cron_description
|
|
40
|
+
executor_func = execute_plan_command
|
|
41
|
+
else:
|
|
42
|
+
# Handle case where no arguments were provided
|
|
43
|
+
context['output'] = f"Error: You must provide either --listen or --cron. {USAGE}"
|
|
44
|
+
context['messages'] = output_messages
|
|
45
|
+
exit()
|
|
46
|
+
|
|
47
|
+
# --- Execution ---
|
|
48
|
+
try:
|
|
49
|
+
result = executor_func(command=description, **context)
|
|
50
|
+
|
|
51
|
+
if isinstance(result, dict):
|
|
52
|
+
output_key = 'Listener' if subcommand == 'listen' else 'Cron job'
|
|
53
|
+
context['output'] = result.get('output', f'{output_key} created successfully.')
|
|
54
|
+
context['messages'] = result.get('messages', output_messages)
|
|
55
|
+
else:
|
|
56
|
+
context['output'] = str(result)
|
|
57
|
+
context['messages'] = output_messages
|
|
58
|
+
except Exception as e:
|
|
59
|
+
traceback.print_exc()
|
|
60
|
+
context['output'] = f"Error creating {subcommand}: {e}"
|
|
61
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
jinx_name: "vixynt"
|
|
2
|
+
description: "Generates images from text descriptions or edits existing ones."
|
|
3
|
+
inputs:
|
|
4
|
+
- prompt
|
|
5
|
+
- model: null
|
|
6
|
+
- provider: null
|
|
7
|
+
- output_name: null
|
|
8
|
+
- attachments: null
|
|
9
|
+
- n_images: null
|
|
10
|
+
- height: null
|
|
11
|
+
- width: null
|
|
12
|
+
steps:
|
|
13
|
+
- name: "generate_or_edit_image"
|
|
14
|
+
engine: "python"
|
|
15
|
+
code: |
|
|
16
|
+
import os
|
|
17
|
+
import base64
|
|
18
|
+
from io import BytesIO
|
|
19
|
+
from datetime import datetime
|
|
20
|
+
from PIL import Image
|
|
21
|
+
from npcpy.llm_funcs import gen_image
|
|
22
|
+
|
|
23
|
+
# Extract inputs from context with proper type conversion
|
|
24
|
+
image_prompt = str(context.get('prompt', '')).strip()
|
|
25
|
+
output_name = context.get('output_name')
|
|
26
|
+
attachments_str = context.get('attachments')
|
|
27
|
+
|
|
28
|
+
# Handle integer inputs - they may come as strings or ints
|
|
29
|
+
try:
|
|
30
|
+
n_images = int(context.get('n_images', 1))
|
|
31
|
+
except (ValueError, TypeError):
|
|
32
|
+
n_images = 1
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
height = int(context.get('height', 1024))
|
|
36
|
+
except (ValueError, TypeError):
|
|
37
|
+
height = 1024
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
width = int(context.get('width', 1024))
|
|
41
|
+
except (ValueError, TypeError):
|
|
42
|
+
width = 1024
|
|
43
|
+
|
|
44
|
+
# Get model and provider, prioritizing context, then NPC, then environment variables
|
|
45
|
+
model = context.get('model')
|
|
46
|
+
provider = context.get('provider')
|
|
47
|
+
|
|
48
|
+
# Use NPC's model/provider as fallback
|
|
49
|
+
if not model and npc and hasattr(npc, 'model') and npc.model:
|
|
50
|
+
model = npc.model
|
|
51
|
+
if not provider and npc and hasattr(npc, 'provider') and npc.provider:
|
|
52
|
+
provider = npc.provider
|
|
53
|
+
|
|
54
|
+
# Fallback to environment variables
|
|
55
|
+
if not model:
|
|
56
|
+
model = os.getenv('NPCSH_IMAGE_GEN_MODEL')
|
|
57
|
+
if not provider:
|
|
58
|
+
provider = os.getenv('NPCSH_IMAGE_GEN_PROVIDER')
|
|
59
|
+
|
|
60
|
+
# Final hardcoded fallbacks if nothing else is set
|
|
61
|
+
if not model:
|
|
62
|
+
model = "runwayml/stable-diffusion-v1-5"
|
|
63
|
+
if not provider:
|
|
64
|
+
provider = "diffusers"
|
|
65
|
+
|
|
66
|
+
# Parse attachments
|
|
67
|
+
input_images = []
|
|
68
|
+
if attachments_str and str(attachments_str).strip():
|
|
69
|
+
input_images = [p.strip() for p in str(attachments_str).split(',')]
|
|
70
|
+
|
|
71
|
+
output_messages = context.get('messages', [])
|
|
72
|
+
|
|
73
|
+
if not image_prompt:
|
|
74
|
+
output = "Error: No prompt provided for image generation."
|
|
75
|
+
else:
|
|
76
|
+
try:
|
|
77
|
+
# Generate image(s)
|
|
78
|
+
result = gen_image(
|
|
79
|
+
prompt=image_prompt,
|
|
80
|
+
model=model,
|
|
81
|
+
provider=provider,
|
|
82
|
+
npc=npc,
|
|
83
|
+
height=height,
|
|
84
|
+
width=width,
|
|
85
|
+
n_images=n_images,
|
|
86
|
+
input_images=input_images if input_images else None
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
# Ensure we have a list of images
|
|
90
|
+
if not isinstance(result, list):
|
|
91
|
+
images_list = [result] if result is not None else []
|
|
92
|
+
else:
|
|
93
|
+
images_list = result
|
|
94
|
+
|
|
95
|
+
saved_files = []
|
|
96
|
+
html_image_tags = [] # This list will store the raw HTML <img> tags
|
|
97
|
+
|
|
98
|
+
for i, image in enumerate(images_list):
|
|
99
|
+
if image is None:
|
|
100
|
+
continue
|
|
101
|
+
|
|
102
|
+
# Determine output filename
|
|
103
|
+
if output_name and str(output_name).strip():
|
|
104
|
+
base_name, ext = os.path.splitext(os.path.expanduser(str(output_name)))
|
|
105
|
+
if not ext:
|
|
106
|
+
ext = ".png"
|
|
107
|
+
current_output_file = f"{base_name}_{i}{ext}" if len(images_list) > 1 else f"{base_name}{ext}"
|
|
108
|
+
else:
|
|
109
|
+
os.makedirs(os.path.expanduser("~/.npcsh/images/"), exist_ok=True)
|
|
110
|
+
current_output_file = (
|
|
111
|
+
os.path.expanduser("~/.npcsh/images/")
|
|
112
|
+
+ f"image_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{i}.png"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Save image to file
|
|
116
|
+
image.save(current_output_file)
|
|
117
|
+
saved_files.append(current_output_file)
|
|
118
|
+
|
|
119
|
+
# Convert image to base64 and create an HTML <img> tag
|
|
120
|
+
with open(current_output_file, 'rb') as f:
|
|
121
|
+
img_data = base64.b64encode(f.read()).decode()
|
|
122
|
+
# Using raw HTML <img> tag with data URI
|
|
123
|
+
html_image_tags.append(f'<img src="data:image/png;base64,{img_data}" alt="Generated Image {i+1}" style="max-width: 100%; display: block; margin-top: 10px;">')
|
|
124
|
+
|
|
125
|
+
if saved_files:
|
|
126
|
+
output_text_message = f"Image(s) generated and saved to: {', '.join(saved_files)}"
|
|
127
|
+
if input_images:
|
|
128
|
+
output_text_message = f"Image(s) edited and saved to: {', '.join(saved_files)}"
|
|
129
|
+
|
|
130
|
+
output = output_text_message # Keep the text message clean
|
|
131
|
+
output += f"\n\nThe image files have been saved and are ready to view."
|
|
132
|
+
output += "\n\n" + "\n".join(html_image_tags) # Append all HTML <img> tags to the output
|
|
133
|
+
else:
|
|
134
|
+
output = "No images were generated."
|
|
135
|
+
|
|
136
|
+
except Exception as e:
|
|
137
|
+
import traceback
|
|
138
|
+
traceback.print_exc()
|
|
139
|
+
output = f"Error {'editing' if input_images else 'generating'} image: {str(e)}"
|
|
140
|
+
|
|
141
|
+
context['output'] = output
|
|
142
|
+
context['messages'] = output_messages
|
|
143
|
+
context['model'] = model
|
|
144
|
+
context['provider'] = provider
|
|
@@ -78,7 +78,8 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState, router)
|
|
|
78
78
|
render_markdown(f'- Using {state.current_mode} mode. Use /agent, /cmd, or /chat to switch to other modes')
|
|
79
79
|
render_markdown(f'- To switch to a different NPC, type /npc <npc_name> or /n <npc_name> to switch to that NPC.')
|
|
80
80
|
render_markdown('\n- Here are the current NPCs available in your team: ' + ', '.join([npc_name for npc_name in state.team.npcs.keys()]))
|
|
81
|
-
|
|
81
|
+
render_markdown('\n- Here are the currently available Jinxs: ' + ', '.join([jinx_name for jinx_name in state.team.jinxs_dict.keys()]))
|
|
82
|
+
|
|
82
83
|
is_windows = platform.system().lower().startswith("win")
|
|
83
84
|
try:
|
|
84
85
|
completer = make_completer(state, router)
|
|
@@ -55,12 +55,13 @@ npcsh/npc_team/jinxs/modes/pti.jinx
|
|
|
55
55
|
npcsh/npc_team/jinxs/modes/spool.jinx
|
|
56
56
|
npcsh/npc_team/jinxs/modes/wander.jinx
|
|
57
57
|
npcsh/npc_team/jinxs/modes/yap.jinx
|
|
58
|
-
npcsh/npc_team/jinxs/
|
|
58
|
+
npcsh/npc_team/jinxs/npc_studio/npc-studio.jinx
|
|
59
|
+
npcsh/npc_team/jinxs/utils/agent.jinx
|
|
60
|
+
npcsh/npc_team/jinxs/utils/chat.jinx
|
|
61
|
+
npcsh/npc_team/jinxs/utils/compress.jinx
|
|
59
62
|
npcsh/npc_team/jinxs/utils/edit_file.jinx
|
|
60
|
-
npcsh/npc_team/jinxs/utils/
|
|
61
|
-
npcsh/npc_team/jinxs/utils/npc-studio.jinx
|
|
63
|
+
npcsh/npc_team/jinxs/utils/load_file.jinx
|
|
62
64
|
npcsh/npc_team/jinxs/utils/ots.jinx
|
|
63
|
-
npcsh/npc_team/jinxs/utils/plan.jinx
|
|
64
65
|
npcsh/npc_team/jinxs/utils/roll.jinx
|
|
65
66
|
npcsh/npc_team/jinxs/utils/sample.jinx
|
|
66
67
|
npcsh/npc_team/jinxs/utils/search.jinx
|
|
@@ -78,7 +78,7 @@ extra_files = package_files("npcsh/npc_team/")
|
|
|
78
78
|
|
|
79
79
|
setup(
|
|
80
80
|
name="npcsh",
|
|
81
|
-
version="1.1.
|
|
81
|
+
version="1.1.11",
|
|
82
82
|
packages=find_packages(exclude=["tests*"]),
|
|
83
83
|
install_requires=base_requirements, # Only install base requirements by default
|
|
84
84
|
extras_require={
|
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
jinx_name: sh
|
|
2
|
-
description: Execute bash queries. Should be used to grep for file contents, list directories, explore information to answer user questions more practically.
|
|
3
|
-
inputs:
|
|
4
|
-
- bash_command
|
|
5
|
-
steps:
|
|
6
|
-
- engine: python
|
|
7
|
-
code: |
|
|
8
|
-
import subprocess
|
|
9
|
-
import os
|
|
10
|
-
cmd = '{{bash_command}}'
|
|
11
|
-
def run_command(cmd):
|
|
12
|
-
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
13
|
-
stdout, stderr = process.communicate()
|
|
14
|
-
if stderr:
|
|
15
|
-
print(f"Error: {stderr.decode('utf-8')}")
|
|
16
|
-
return stderr
|
|
17
|
-
return stdout
|
|
18
|
-
result = run_command(cmd)
|
|
19
|
-
output = result.decode('utf-8')
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
jinx_name: "breathe"
|
|
2
|
-
description: "Condense context on a regular cadence"
|
|
3
|
-
inputs: [] # The breathe command takes all relevant context from the NPC's environment
|
|
4
|
-
steps:
|
|
5
|
-
- name: "condense_context"
|
|
6
|
-
engine: "python"
|
|
7
|
-
code: |
|
|
8
|
-
from npcpy.llm_funcs import breathe
|
|
9
|
-
|
|
10
|
-
output_messages = context.get('messages', [])
|
|
11
|
-
|
|
12
|
-
# Pass all current context as kwargs to breathe
|
|
13
|
-
result = breathe(**context)
|
|
14
|
-
|
|
15
|
-
if isinstance(result, dict):
|
|
16
|
-
context['output'] = result.get('output', 'Context condensed.')
|
|
17
|
-
context['messages'] = result.get('messages', output_messages)
|
|
18
|
-
else:
|
|
19
|
-
context['output'] = "Context condensation process initiated."
|
|
20
|
-
context['messages'] = output_messages
|
|
@@ -1,39 +0,0 @@
|
|
|
1
|
-
jinx_name: "flush"
|
|
2
|
-
description: "Flush the last N messages from the conversation history."
|
|
3
|
-
inputs:
|
|
4
|
-
- n: 1 # The number of messages to flush (default to 1).
|
|
5
|
-
steps:
|
|
6
|
-
- name: "flush_messages"
|
|
7
|
-
engine: "python"
|
|
8
|
-
code: |
|
|
9
|
-
n = int(context.get('n', 1))
|
|
10
|
-
output_messages = context.get('messages', [])
|
|
11
|
-
|
|
12
|
-
if n <= 0:
|
|
13
|
-
context['output'] = "Error: Number of messages must be positive."
|
|
14
|
-
context['messages'] = output_messages
|
|
15
|
-
exit()
|
|
16
|
-
|
|
17
|
-
new_messages = list(output_messages)
|
|
18
|
-
original_len = len(new_messages)
|
|
19
|
-
removed_count = 0
|
|
20
|
-
|
|
21
|
-
if new_messages and new_messages[0].get("role") == "system":
|
|
22
|
-
system_message = new_messages[0]
|
|
23
|
-
working_messages = new_messages[1:]
|
|
24
|
-
num_to_remove = min(n, len(working_messages))
|
|
25
|
-
if num_to_remove > 0:
|
|
26
|
-
final_messages = [system_message] + working_messages[:-num_to_remove]
|
|
27
|
-
removed_count = num_to_remove
|
|
28
|
-
else:
|
|
29
|
-
final_messages = [system_message]
|
|
30
|
-
else:
|
|
31
|
-
num_to_remove = min(n, original_len)
|
|
32
|
-
if num_to_remove > 0:
|
|
33
|
-
final_messages = new_messages[:-num_to_remove]
|
|
34
|
-
removed_count = num_to_remove
|
|
35
|
-
else:
|
|
36
|
-
final_messages = []
|
|
37
|
-
|
|
38
|
-
context['output'] = f"Flushed {removed_count} message(s). Context is now {len(final_messages)} messages."
|
|
39
|
-
context['messages'] = final_messages
|
|
@@ -1,33 +0,0 @@
|
|
|
1
|
-
jinx_name: "plan"
|
|
2
|
-
description: "Execute a plan command"
|
|
3
|
-
inputs:
|
|
4
|
-
- plan_description: "" # Description of the plan to execute.
|
|
5
|
-
steps:
|
|
6
|
-
- name: "execute_plan"
|
|
7
|
-
engine: "python"
|
|
8
|
-
code: |
|
|
9
|
-
import traceback
|
|
10
|
-
from npcpy.work.plan import execute_plan_command
|
|
11
|
-
|
|
12
|
-
plan_description = context.get('plan_description')
|
|
13
|
-
output_messages = context.get('messages', [])
|
|
14
|
-
|
|
15
|
-
if not plan_description or not plan_description.strip():
|
|
16
|
-
context['output'] = "Usage: /plan <description_of_plan>"
|
|
17
|
-
context['messages'] = output_messages
|
|
18
|
-
exit()
|
|
19
|
-
|
|
20
|
-
try:
|
|
21
|
-
# Pass all current context as kwargs to execute_plan_command
|
|
22
|
-
result = execute_plan_command(command=plan_description, **context)
|
|
23
|
-
|
|
24
|
-
if isinstance(result, dict):
|
|
25
|
-
context['output'] = result.get('output', 'Plan executed.')
|
|
26
|
-
context['messages'] = result.get('messages', output_messages)
|
|
27
|
-
else:
|
|
28
|
-
context['output'] = str(result)
|
|
29
|
-
context['messages'] = output_messages
|
|
30
|
-
except Exception as e:
|
|
31
|
-
traceback.print_exc()
|
|
32
|
-
context['output'] = f"Error executing plan: {e}"
|
|
33
|
-
context['messages'] = output_messages
|
|
@@ -1,36 +0,0 @@
|
|
|
1
|
-
jinx_name: "trigger"
|
|
2
|
-
description: "Execute a trigger command"
|
|
3
|
-
inputs:
|
|
4
|
-
- trigger_description: "" # Required description of the trigger to execute.
|
|
5
|
-
steps:
|
|
6
|
-
- name: "execute_trigger"
|
|
7
|
-
engine: "python"
|
|
8
|
-
code: |
|
|
9
|
-
import traceback
|
|
10
|
-
from npcpy.work.trigger import execute_trigger_command
|
|
11
|
-
|
|
12
|
-
trigger_description = context.get('trigger_description')
|
|
13
|
-
output_messages = context.get('messages', [])
|
|
14
|
-
|
|
15
|
-
if not trigger_description or not trigger_description.strip():
|
|
16
|
-
context['output'] = "Usage: /trigger <trigger_description>"
|
|
17
|
-
context['messages'] = output_messages
|
|
18
|
-
exit()
|
|
19
|
-
|
|
20
|
-
try:
|
|
21
|
-
# Pass all current context as kwargs to execute_trigger_command
|
|
22
|
-
result = execute_trigger_command(command=trigger_description, **context)
|
|
23
|
-
|
|
24
|
-
if isinstance(result, dict):
|
|
25
|
-
context['output'] = result.get('output', 'Trigger executed.')
|
|
26
|
-
context['messages'] = result.get('messages', output_messages)
|
|
27
|
-
else:
|
|
28
|
-
context['output'] = str(result)
|
|
29
|
-
context['messages'] = output_messages
|
|
30
|
-
except NameError:
|
|
31
|
-
context['output'] = "Trigger function (execute_trigger_command) not available."
|
|
32
|
-
context['messages'] = output_messages
|
|
33
|
-
except Exception as e:
|
|
34
|
-
traceback.print_exc()
|
|
35
|
-
context['output'] = f"Error executing trigger: {e}"
|
|
36
|
-
context['messages'] = output_messages
|
|
@@ -1,117 +0,0 @@
|
|
|
1
|
-
jinx_name: "vixynt"
|
|
2
|
-
description: "Generates images from text descriptions or edits existing ones."
|
|
3
|
-
inputs:
|
|
4
|
-
- prompt
|
|
5
|
-
- model: ""
|
|
6
|
-
- provider: ""
|
|
7
|
-
- output_name: ""
|
|
8
|
-
- attachments: ""
|
|
9
|
-
- n_images: 1
|
|
10
|
-
- height: 1024
|
|
11
|
-
- width: 1024
|
|
12
|
-
steps:
|
|
13
|
-
- name: "generate_or_edit_image"
|
|
14
|
-
engine: "python"
|
|
15
|
-
code: |
|
|
16
|
-
import os
|
|
17
|
-
import base64
|
|
18
|
-
from io import BytesIO
|
|
19
|
-
from datetime import datetime
|
|
20
|
-
from PIL import Image
|
|
21
|
-
from npcpy.llm_funcs import gen_image
|
|
22
|
-
|
|
23
|
-
# Extract inputs from context
|
|
24
|
-
image_prompt = context.get('prompt', '').strip()
|
|
25
|
-
output_name = context.get('output_name')
|
|
26
|
-
attachments_str = context.get('attachments')
|
|
27
|
-
n_images = int(context.get('n_images', 1))
|
|
28
|
-
height = int(context.get('height', 1024))
|
|
29
|
-
width = int(context.get('width', 1024))
|
|
30
|
-
model = context.get('model')
|
|
31
|
-
provider = context.get('provider')
|
|
32
|
-
|
|
33
|
-
input_images = []
|
|
34
|
-
if attachments_str and attachments_str.strip():
|
|
35
|
-
input_images = [p.strip() for p in attachments_str.split(',')]
|
|
36
|
-
|
|
37
|
-
# Use NPC's model/provider as fallback
|
|
38
|
-
if not model and npc and npc.model:
|
|
39
|
-
model = npc.model
|
|
40
|
-
if not provider and npc and npc.provider:
|
|
41
|
-
provider = npc.provider
|
|
42
|
-
|
|
43
|
-
# Final fallbacks
|
|
44
|
-
if not model:
|
|
45
|
-
model = "runwayml/stable-diffusion-v1-5"
|
|
46
|
-
if not provider:
|
|
47
|
-
provider = "diffusers"
|
|
48
|
-
|
|
49
|
-
output_messages = context.get('messages', [])
|
|
50
|
-
|
|
51
|
-
if not image_prompt:
|
|
52
|
-
context['output'] = "Error: No prompt provided for image generation."
|
|
53
|
-
context['messages'] = output_messages
|
|
54
|
-
exit()
|
|
55
|
-
|
|
56
|
-
try:
|
|
57
|
-
# Generate image(s)
|
|
58
|
-
result = gen_image(
|
|
59
|
-
prompt=image_prompt,
|
|
60
|
-
model=model,
|
|
61
|
-
provider=provider,
|
|
62
|
-
npc=npc,
|
|
63
|
-
height=height,
|
|
64
|
-
width=width,
|
|
65
|
-
n_images=n_images,
|
|
66
|
-
input_images=input_images if input_images else None
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
# Ensure we have a list of images
|
|
70
|
-
if not isinstance(result, list):
|
|
71
|
-
images_list = [result] if result is not None else []
|
|
72
|
-
else:
|
|
73
|
-
images_list = result
|
|
74
|
-
|
|
75
|
-
saved_files = []
|
|
76
|
-
|
|
77
|
-
for i, image in enumerate(images_list):
|
|
78
|
-
if image is None:
|
|
79
|
-
continue
|
|
80
|
-
|
|
81
|
-
# Determine output filename
|
|
82
|
-
if output_name and output_name.strip():
|
|
83
|
-
base_name, ext = os.path.splitext(os.path.expanduser(output_name))
|
|
84
|
-
if not ext:
|
|
85
|
-
ext = ".png"
|
|
86
|
-
current_output_file = f"{base_name}_{i}{ext}" if len(images_list) > 1 else f"{base_name}{ext}"
|
|
87
|
-
else:
|
|
88
|
-
os.makedirs(os.path.expanduser("~/.npcsh/images/"), exist_ok=True)
|
|
89
|
-
current_output_file = (
|
|
90
|
-
os.path.expanduser("~/.npcsh/images/")
|
|
91
|
-
+ f"image_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{i}.png"
|
|
92
|
-
)
|
|
93
|
-
|
|
94
|
-
# Save image to file
|
|
95
|
-
image.save(current_output_file)
|
|
96
|
-
saved_files.append(current_output_file)
|
|
97
|
-
|
|
98
|
-
if saved_files:
|
|
99
|
-
if input_images:
|
|
100
|
-
output = f"Image(s) edited and saved to: {', '.join(saved_files)}"
|
|
101
|
-
else:
|
|
102
|
-
output = f"Image(s) generated and saved to: {', '.join(saved_files)}"
|
|
103
|
-
|
|
104
|
-
# DO NOT include base64 data - just reference the file paths
|
|
105
|
-
output += f"\n\nThe image files have been saved and are ready to view."
|
|
106
|
-
else:
|
|
107
|
-
output = "No images were generated."
|
|
108
|
-
|
|
109
|
-
except Exception as e:
|
|
110
|
-
import traceback
|
|
111
|
-
traceback.print_exc()
|
|
112
|
-
output = f"Error {'editing' if input_images else 'generating'} image: {str(e)}"
|
|
113
|
-
|
|
114
|
-
context['output'] = output
|
|
115
|
-
context['messages'] = output_messages
|
|
116
|
-
context['model'] = model
|
|
117
|
-
context['provider'] = provider
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|