npcsh 1.1.10__py3-none-any.whl → 1.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +409 -362
- npcsh/corca.py +28 -2
- npcsh/guac.py +4 -1
- npcsh/npc_team/jinxs/code/sh.jinx +32 -13
- npcsh/npc_team/jinxs/code/sql.jinx +2 -2
- npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
- npcsh/npc_team/jinxs/utils/chat.jinx +17 -0
- npcsh/npc_team/jinxs/utils/vixynt.jinx +104 -77
- npcsh-1.1.12.data/data/npcsh/npc_team/agent.jinx +17 -0
- npcsh-1.1.12.data/data/npcsh/npc_team/chat.jinx +17 -0
- npcsh-1.1.12.data/data/npcsh/npc_team/sh.jinx +38 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/sql.jinx +2 -2
- npcsh-1.1.12.data/data/npcsh/npc_team/vixynt.jinx +144 -0
- {npcsh-1.1.10.dist-info → npcsh-1.1.12.dist-info}/METADATA +1 -1
- {npcsh-1.1.10.dist-info → npcsh-1.1.12.dist-info}/RECORD +66 -62
- npcsh-1.1.10.data/data/npcsh/npc_team/sh.jinx +0 -19
- npcsh-1.1.10.data/data/npcsh/npc_team/vixynt.jinx +0 -117
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/alicanto.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/build.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/compile.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/compress.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/corca.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/guac.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/help.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/init.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/load_file.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/ots.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/plonk.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/pti.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/python.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/roll.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/sample.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/search.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/serve.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/set.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/sleep.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/spool.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/trigger.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/wander.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/yap.jinx +0 -0
- {npcsh-1.1.10.data → npcsh-1.1.12.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.10.dist-info → npcsh-1.1.12.dist-info}/WHEEL +0 -0
- {npcsh-1.1.10.dist-info → npcsh-1.1.12.dist-info}/entry_points.txt +0 -0
- {npcsh-1.1.10.dist-info → npcsh-1.1.12.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.10.dist-info → npcsh-1.1.12.dist-info}/top_level.txt +0 -0
npcsh/corca.py
CHANGED
|
@@ -18,6 +18,7 @@ except ImportError:
|
|
|
18
18
|
|
|
19
19
|
from termcolor import colored, cprint
|
|
20
20
|
import json
|
|
21
|
+
import readline
|
|
21
22
|
from npcpy.llm_funcs import get_llm_response, breathe
|
|
22
23
|
from npcpy.npc_compiler import NPC
|
|
23
24
|
from npcpy.npc_sysenv import render_markdown, print_and_process_stream_with_markdown
|
|
@@ -34,8 +35,11 @@ from npcsh._state import (
|
|
|
34
35
|
should_skip_kg_processing,
|
|
35
36
|
NPCSH_CHAT_PROVIDER,
|
|
36
37
|
NPCSH_CHAT_MODEL,
|
|
37
|
-
get_team_ctx_path
|
|
38
|
+
get_team_ctx_path,
|
|
39
|
+
make_completer,
|
|
40
|
+
execute_slash_command,
|
|
38
41
|
)
|
|
42
|
+
from npcsh.routes import router
|
|
39
43
|
import yaml
|
|
40
44
|
from pathlib import Path
|
|
41
45
|
|
|
@@ -1281,6 +1285,14 @@ def corca_session(
|
|
|
1281
1285
|
state.command_history = command_history
|
|
1282
1286
|
|
|
1283
1287
|
print_corca_welcome_message()
|
|
1288
|
+
try:
|
|
1289
|
+
readline.set_completer(make_completer(state, router))
|
|
1290
|
+
except Exception:
|
|
1291
|
+
pass
|
|
1292
|
+
|
|
1293
|
+
display_model = state.npc.model if state.npc and state.npc.model else state.chat_model
|
|
1294
|
+
display_provider = state.npc.provider if state.npc and state.npc.provider else state.chat_provider
|
|
1295
|
+
print(f"Current LLM model: {display_model} ({display_provider})")
|
|
1284
1296
|
|
|
1285
1297
|
# Resolve MCP server path
|
|
1286
1298
|
auto_copy_bypass = os.getenv("NPCSH_CORCA_AUTO_COPY_MCP_SERVER", "false").lower() == "true"
|
|
@@ -1324,10 +1336,24 @@ def corca_session(
|
|
|
1324
1336
|
while True:
|
|
1325
1337
|
try:
|
|
1326
1338
|
prompt_npc_name = state.npc.name if state.npc else "npc"
|
|
1327
|
-
|
|
1339
|
+
current_model = state.npc.model if state.npc and state.npc.model else state.chat_model
|
|
1340
|
+
current_provider = state.npc.provider if state.npc and state.npc.provider else state.chat_provider
|
|
1341
|
+
model_segment = f"{current_model}@{current_provider}"
|
|
1342
|
+
prompt_str = f"{colored(os.path.basename(state.current_path), 'blue')}:{prompt_npc_name}:{model_segment}🦌> "
|
|
1328
1343
|
prompt = readline_safe_prompt(prompt_str)
|
|
1329
1344
|
user_input = get_multiline_input(prompt).strip()
|
|
1330
1345
|
|
|
1346
|
+
if user_input.startswith('/'):
|
|
1347
|
+
state, slash_result = execute_slash_command(
|
|
1348
|
+
user_input,
|
|
1349
|
+
None,
|
|
1350
|
+
state,
|
|
1351
|
+
state.stream_output,
|
|
1352
|
+
router
|
|
1353
|
+
)
|
|
1354
|
+
process_result(user_input, state, slash_result, command_history)
|
|
1355
|
+
continue
|
|
1356
|
+
|
|
1331
1357
|
if user_input.lower() in ["exit", "quit", "done"]:
|
|
1332
1358
|
break
|
|
1333
1359
|
|
npcsh/guac.py
CHANGED
|
@@ -1781,12 +1781,15 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
|
|
|
1781
1781
|
display_model = state.chat_model
|
|
1782
1782
|
if isinstance(state.npc, NPC) and state.npc.model:
|
|
1783
1783
|
display_model = state.npc.model
|
|
1784
|
+
display_provider = state.chat_provider
|
|
1785
|
+
if isinstance(state.npc, NPC) and state.npc.provider:
|
|
1786
|
+
display_provider = state.npc.provider
|
|
1784
1787
|
|
|
1785
1788
|
cwd_colored = colored(os.path.basename(state.current_path), "blue")
|
|
1786
1789
|
npc_name = state.npc.name if state.npc and state.npc.name else "guac"
|
|
1787
1790
|
prompt_char = get_guac_prompt_char(command_count)
|
|
1788
1791
|
|
|
1789
|
-
prompt_str = f"{cwd_colored}:{npc_name}:{display_model}{prompt_char}> "
|
|
1792
|
+
prompt_str = f"{cwd_colored}:{npc_name}:{display_model}@{display_provider}{prompt_char}> "
|
|
1790
1793
|
prompt = readline_safe_prompt(prompt_str)
|
|
1791
1794
|
|
|
1792
1795
|
user_input = get_multiline_input(prompt).strip()
|
|
@@ -1,19 +1,38 @@
|
|
|
1
1
|
jinx_name: sh
|
|
2
2
|
description: Execute bash queries. Should be used to grep for file contents, list directories, explore information to answer user questions more practically.
|
|
3
3
|
inputs:
|
|
4
|
-
- bash_command
|
|
4
|
+
- bash_command
|
|
5
5
|
steps:
|
|
6
|
-
-
|
|
6
|
+
- name: execute_bash
|
|
7
|
+
engine: python
|
|
7
8
|
code: |
|
|
8
9
|
import subprocess
|
|
9
|
-
import
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
10
|
+
import sys # Import sys to explicitly write to stderr for visibility
|
|
11
|
+
|
|
12
|
+
# Force a simple print to see if anything comes out
|
|
13
|
+
print("--- Jinx 'sh' code started ---", file=sys.stderr)
|
|
14
|
+
|
|
15
|
+
cmd = '{{ bash_command }}'
|
|
16
|
+
|
|
17
|
+
# Initialize output to an empty string to ensure it always exists
|
|
18
|
+
output = ""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
process = subprocess.Popen(
|
|
22
|
+
cmd,
|
|
23
|
+
shell=True,
|
|
24
|
+
stdout=subprocess.PIPE,
|
|
25
|
+
stderr=subprocess.PIPE
|
|
26
|
+
)
|
|
27
|
+
stdout, stderr = process.communicate()
|
|
28
|
+
|
|
29
|
+
# Print raw stdout/stderr to sys.stderr for better visibility in some environments
|
|
30
|
+
print(f"Jinx 'sh' raw stdout: {stdout.decode('utf-8', errors='ignore')}", file=sys.stderr)
|
|
31
|
+
print(f"Jinx 'sh' raw stderr: {stderr.decode('utf-8', errors='ignore')}", file=sys.stderr)
|
|
32
|
+
|
|
33
|
+
if stderr:
|
|
34
|
+
output = f"Error: {stderr.decode('utf-8')}"
|
|
35
|
+
else:
|
|
36
|
+
output = stdout.decode('utf-8')
|
|
37
|
+
|
|
38
|
+
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
jinx_name:
|
|
1
|
+
jinx_name: sql
|
|
2
2
|
description: Execute queries on the ~/npcsh_history.db to pull data. The database
|
|
3
3
|
contains only information about conversations and other user-provided data. It does
|
|
4
4
|
not store any information about individual files. Avoid using percent signs unless absolutely necessary.
|
|
@@ -13,4 +13,4 @@ steps:
|
|
|
13
13
|
df = pd.read_sql_query(query, npc.db_conn)
|
|
14
14
|
except Exception as e:
|
|
15
15
|
df = pd.DataFrame({'Error': [str(e)]})
|
|
16
|
-
output = df.to_string()
|
|
16
|
+
output = df.to_string()
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
jinx_name: agent
|
|
2
|
+
description: Provides an LLM response with tool use enabled.
|
|
3
|
+
inputs:
|
|
4
|
+
- query
|
|
5
|
+
- auto_process_tool_calls: True
|
|
6
|
+
- use_core_tools: True
|
|
7
|
+
steps:
|
|
8
|
+
- name: get_agent_response
|
|
9
|
+
engine: python
|
|
10
|
+
code: |
|
|
11
|
+
response = npc.get_llm_response(
|
|
12
|
+
request=query,
|
|
13
|
+
messages=context.get('messages', []),
|
|
14
|
+
auto_process_tool_calls={{ auto_process_tool_calls | default(True) }},
|
|
15
|
+
use_core_tools={{ use_core_tools | default(True) }}
|
|
16
|
+
)
|
|
17
|
+
output = response.get('response', '')
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
jinx_name: chat
|
|
2
|
+
description: Provides a direct LLM response without tool use.
|
|
3
|
+
inputs:
|
|
4
|
+
- query
|
|
5
|
+
- auto_process_tool_calls: False
|
|
6
|
+
- use_core_tools: False
|
|
7
|
+
steps:
|
|
8
|
+
- name: get_chat_response
|
|
9
|
+
engine: python
|
|
10
|
+
code: |
|
|
11
|
+
response = npc.get_llm_response(
|
|
12
|
+
request=query,
|
|
13
|
+
messages=context.get('messages', []),
|
|
14
|
+
auto_process_tool_calls={{ auto_process_tool_calls | default(False) }},
|
|
15
|
+
use_core_tools={{ use_core_tools | default(False) }}
|
|
16
|
+
)
|
|
17
|
+
output = response.get('response', '')
|
|
@@ -2,13 +2,13 @@ jinx_name: "vixynt"
|
|
|
2
2
|
description: "Generates images from text descriptions or edits existing ones."
|
|
3
3
|
inputs:
|
|
4
4
|
- prompt
|
|
5
|
-
- model:
|
|
6
|
-
- provider:
|
|
7
|
-
- output_name:
|
|
8
|
-
- attachments:
|
|
9
|
-
- n_images:
|
|
10
|
-
- height:
|
|
11
|
-
- width:
|
|
5
|
+
- model: null
|
|
6
|
+
- provider: null
|
|
7
|
+
- output_name: null
|
|
8
|
+
- attachments: null
|
|
9
|
+
- n_images: null
|
|
10
|
+
- height: null
|
|
11
|
+
- width: null
|
|
12
12
|
steps:
|
|
13
13
|
- name: "generate_or_edit_image"
|
|
14
14
|
engine: "python"
|
|
@@ -20,98 +20,125 @@ steps:
|
|
|
20
20
|
from PIL import Image
|
|
21
21
|
from npcpy.llm_funcs import gen_image
|
|
22
22
|
|
|
23
|
-
# Extract inputs from context
|
|
24
|
-
image_prompt = context.get('prompt', '').strip()
|
|
23
|
+
# Extract inputs from context with proper type conversion
|
|
24
|
+
image_prompt = str(context.get('prompt', '')).strip()
|
|
25
25
|
output_name = context.get('output_name')
|
|
26
26
|
attachments_str = context.get('attachments')
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
27
|
+
|
|
28
|
+
# Handle integer inputs - they may come as strings or ints
|
|
29
|
+
try:
|
|
30
|
+
n_images = int(context.get('n_images', 1))
|
|
31
|
+
except (ValueError, TypeError):
|
|
32
|
+
n_images = 1
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
height = int(context.get('height', 1024))
|
|
36
|
+
except (ValueError, TypeError):
|
|
37
|
+
height = 1024
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
width = int(context.get('width', 1024))
|
|
41
|
+
except (ValueError, TypeError):
|
|
42
|
+
width = 1024
|
|
43
|
+
|
|
44
|
+
# Get model and provider, prioritizing context, then NPC, then environment variables
|
|
30
45
|
model = context.get('model')
|
|
31
46
|
provider = context.get('provider')
|
|
32
|
-
|
|
33
|
-
input_images = []
|
|
34
|
-
if attachments_str and attachments_str.strip():
|
|
35
|
-
input_images = [p.strip() for p in attachments_str.split(',')]
|
|
36
|
-
|
|
47
|
+
|
|
37
48
|
# Use NPC's model/provider as fallback
|
|
38
|
-
if not model and npc and npc.model:
|
|
49
|
+
if not model and npc and hasattr(npc, 'model') and npc.model:
|
|
39
50
|
model = npc.model
|
|
40
|
-
if not provider and npc and npc.provider:
|
|
51
|
+
if not provider and npc and hasattr(npc, 'provider') and npc.provider:
|
|
41
52
|
provider = npc.provider
|
|
42
53
|
|
|
43
|
-
#
|
|
54
|
+
# Fallback to environment variables
|
|
55
|
+
if not model:
|
|
56
|
+
model = os.getenv('NPCSH_IMAGE_GEN_MODEL')
|
|
57
|
+
if not provider:
|
|
58
|
+
provider = os.getenv('NPCSH_IMAGE_GEN_PROVIDER')
|
|
59
|
+
|
|
60
|
+
# Final hardcoded fallbacks if nothing else is set
|
|
44
61
|
if not model:
|
|
45
62
|
model = "runwayml/stable-diffusion-v1-5"
|
|
46
63
|
if not provider:
|
|
47
64
|
provider = "diffusers"
|
|
48
65
|
|
|
66
|
+
# Parse attachments
|
|
67
|
+
input_images = []
|
|
68
|
+
if attachments_str and str(attachments_str).strip():
|
|
69
|
+
input_images = [p.strip() for p in str(attachments_str).split(',')]
|
|
70
|
+
|
|
49
71
|
output_messages = context.get('messages', [])
|
|
50
72
|
|
|
51
73
|
if not image_prompt:
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
input_images=input_images if input_images else None
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
# Ensure we have a list of images
|
|
70
|
-
if not isinstance(result, list):
|
|
71
|
-
images_list = [result] if result is not None else []
|
|
72
|
-
else:
|
|
73
|
-
images_list = result
|
|
74
|
-
|
|
75
|
-
saved_files = []
|
|
76
|
-
|
|
77
|
-
for i, image in enumerate(images_list):
|
|
78
|
-
if image is None:
|
|
79
|
-
continue
|
|
74
|
+
output = "Error: No prompt provided for image generation."
|
|
75
|
+
else:
|
|
76
|
+
try:
|
|
77
|
+
# Generate image(s)
|
|
78
|
+
result = gen_image(
|
|
79
|
+
prompt=image_prompt,
|
|
80
|
+
model=model,
|
|
81
|
+
provider=provider,
|
|
82
|
+
npc=npc,
|
|
83
|
+
height=height,
|
|
84
|
+
width=width,
|
|
85
|
+
n_images=n_images,
|
|
86
|
+
input_images=input_images if input_images else None
|
|
87
|
+
)
|
|
80
88
|
|
|
81
|
-
#
|
|
82
|
-
if
|
|
83
|
-
|
|
84
|
-
if not ext:
|
|
85
|
-
ext = ".png"
|
|
86
|
-
current_output_file = f"{base_name}_{i}{ext}" if len(images_list) > 1 else f"{base_name}{ext}"
|
|
89
|
+
# Ensure we have a list of images
|
|
90
|
+
if not isinstance(result, list):
|
|
91
|
+
images_list = [result] if result is not None else []
|
|
87
92
|
else:
|
|
88
|
-
|
|
89
|
-
current_output_file = (
|
|
90
|
-
os.path.expanduser("~/.npcsh/images/")
|
|
91
|
-
+ f"image_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{i}.png"
|
|
92
|
-
)
|
|
93
|
+
images_list = result
|
|
93
94
|
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
saved_files.append(current_output_file)
|
|
97
|
-
|
|
98
|
-
if saved_files:
|
|
99
|
-
if input_images:
|
|
100
|
-
output = f"Image(s) edited and saved to: {', '.join(saved_files)}"
|
|
101
|
-
else:
|
|
102
|
-
output = f"Image(s) generated and saved to: {', '.join(saved_files)}"
|
|
95
|
+
saved_files = []
|
|
96
|
+
html_image_tags = [] # This list will store the raw HTML <img> tags
|
|
103
97
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
98
|
+
for i, image in enumerate(images_list):
|
|
99
|
+
if image is None:
|
|
100
|
+
continue
|
|
101
|
+
|
|
102
|
+
# Determine output filename
|
|
103
|
+
if output_name and str(output_name).strip():
|
|
104
|
+
base_name, ext = os.path.splitext(os.path.expanduser(str(output_name)))
|
|
105
|
+
if not ext:
|
|
106
|
+
ext = ".png"
|
|
107
|
+
current_output_file = f"{base_name}_{i}{ext}" if len(images_list) > 1 else f"{base_name}{ext}"
|
|
108
|
+
else:
|
|
109
|
+
os.makedirs(os.path.expanduser("~/.npcsh/images/"), exist_ok=True)
|
|
110
|
+
current_output_file = (
|
|
111
|
+
os.path.expanduser("~/.npcsh/images/")
|
|
112
|
+
+ f"image_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{i}.png"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Save image to file
|
|
116
|
+
image.save(current_output_file)
|
|
117
|
+
saved_files.append(current_output_file)
|
|
118
|
+
|
|
119
|
+
# Convert image to base64 and create an HTML <img> tag
|
|
120
|
+
with open(current_output_file, 'rb') as f:
|
|
121
|
+
img_data = base64.b64encode(f.read()).decode()
|
|
122
|
+
# Using raw HTML <img> tag with data URI
|
|
123
|
+
html_image_tags.append(f'<img src="data:image/png;base64,{img_data}" alt="Generated Image {i+1}" style="max-width: 100%; display: block; margin-top: 10px;">')
|
|
108
124
|
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
125
|
+
if saved_files:
|
|
126
|
+
output_text_message = f"Image(s) generated and saved to: {', '.join(saved_files)}"
|
|
127
|
+
if input_images:
|
|
128
|
+
output_text_message = f"Image(s) edited and saved to: {', '.join(saved_files)}"
|
|
129
|
+
|
|
130
|
+
output = output_text_message # Keep the text message clean
|
|
131
|
+
output += f"\n\nThe image files have been saved and are ready to view."
|
|
132
|
+
output += "\n\n" + "\n".join(html_image_tags) # Append all HTML <img> tags to the output
|
|
133
|
+
else:
|
|
134
|
+
output = "No images were generated."
|
|
135
|
+
|
|
136
|
+
except Exception as e:
|
|
137
|
+
import traceback
|
|
138
|
+
traceback.print_exc()
|
|
139
|
+
output = f"Error {'editing' if input_images else 'generating'} image: {str(e)}"
|
|
113
140
|
|
|
114
141
|
context['output'] = output
|
|
115
142
|
context['messages'] = output_messages
|
|
116
143
|
context['model'] = model
|
|
117
|
-
context['provider'] = provider
|
|
144
|
+
context['provider'] = provider
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
jinx_name: agent
|
|
2
|
+
description: Provides an LLM response with tool use enabled.
|
|
3
|
+
inputs:
|
|
4
|
+
- query
|
|
5
|
+
- auto_process_tool_calls: True
|
|
6
|
+
- use_core_tools: True
|
|
7
|
+
steps:
|
|
8
|
+
- name: get_agent_response
|
|
9
|
+
engine: python
|
|
10
|
+
code: |
|
|
11
|
+
response = npc.get_llm_response(
|
|
12
|
+
request=query,
|
|
13
|
+
messages=context.get('messages', []),
|
|
14
|
+
auto_process_tool_calls={{ auto_process_tool_calls | default(True) }},
|
|
15
|
+
use_core_tools={{ use_core_tools | default(True) }}
|
|
16
|
+
)
|
|
17
|
+
output = response.get('response', '')
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
jinx_name: chat
|
|
2
|
+
description: Provides a direct LLM response without tool use.
|
|
3
|
+
inputs:
|
|
4
|
+
- query
|
|
5
|
+
- auto_process_tool_calls: False
|
|
6
|
+
- use_core_tools: False
|
|
7
|
+
steps:
|
|
8
|
+
- name: get_chat_response
|
|
9
|
+
engine: python
|
|
10
|
+
code: |
|
|
11
|
+
response = npc.get_llm_response(
|
|
12
|
+
request=query,
|
|
13
|
+
messages=context.get('messages', []),
|
|
14
|
+
auto_process_tool_calls={{ auto_process_tool_calls | default(False) }},
|
|
15
|
+
use_core_tools={{ use_core_tools | default(False) }}
|
|
16
|
+
)
|
|
17
|
+
output = response.get('response', '')
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
jinx_name: sh
|
|
2
|
+
description: Execute bash queries. Should be used to grep for file contents, list directories, explore information to answer user questions more practically.
|
|
3
|
+
inputs:
|
|
4
|
+
- bash_command
|
|
5
|
+
steps:
|
|
6
|
+
- name: execute_bash
|
|
7
|
+
engine: python
|
|
8
|
+
code: |
|
|
9
|
+
import subprocess
|
|
10
|
+
import sys # Import sys to explicitly write to stderr for visibility
|
|
11
|
+
|
|
12
|
+
# Force a simple print to see if anything comes out
|
|
13
|
+
print("--- Jinx 'sh' code started ---", file=sys.stderr)
|
|
14
|
+
|
|
15
|
+
cmd = '{{ bash_command }}'
|
|
16
|
+
|
|
17
|
+
# Initialize output to an empty string to ensure it always exists
|
|
18
|
+
output = ""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
process = subprocess.Popen(
|
|
22
|
+
cmd,
|
|
23
|
+
shell=True,
|
|
24
|
+
stdout=subprocess.PIPE,
|
|
25
|
+
stderr=subprocess.PIPE
|
|
26
|
+
)
|
|
27
|
+
stdout, stderr = process.communicate()
|
|
28
|
+
|
|
29
|
+
# Print raw stdout/stderr to sys.stderr for better visibility in some environments
|
|
30
|
+
print(f"Jinx 'sh' raw stdout: {stdout.decode('utf-8', errors='ignore')}", file=sys.stderr)
|
|
31
|
+
print(f"Jinx 'sh' raw stderr: {stderr.decode('utf-8', errors='ignore')}", file=sys.stderr)
|
|
32
|
+
|
|
33
|
+
if stderr:
|
|
34
|
+
output = f"Error: {stderr.decode('utf-8')}"
|
|
35
|
+
else:
|
|
36
|
+
output = stdout.decode('utf-8')
|
|
37
|
+
|
|
38
|
+
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
jinx_name:
|
|
1
|
+
jinx_name: sql
|
|
2
2
|
description: Execute queries on the ~/npcsh_history.db to pull data. The database
|
|
3
3
|
contains only information about conversations and other user-provided data. It does
|
|
4
4
|
not store any information about individual files. Avoid using percent signs unless absolutely necessary.
|
|
@@ -13,4 +13,4 @@ steps:
|
|
|
13
13
|
df = pd.read_sql_query(query, npc.db_conn)
|
|
14
14
|
except Exception as e:
|
|
15
15
|
df = pd.DataFrame({'Error': [str(e)]})
|
|
16
|
-
output = df.to_string()
|
|
16
|
+
output = df.to_string()
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
jinx_name: "vixynt"
|
|
2
|
+
description: "Generates images from text descriptions or edits existing ones."
|
|
3
|
+
inputs:
|
|
4
|
+
- prompt
|
|
5
|
+
- model: null
|
|
6
|
+
- provider: null
|
|
7
|
+
- output_name: null
|
|
8
|
+
- attachments: null
|
|
9
|
+
- n_images: null
|
|
10
|
+
- height: null
|
|
11
|
+
- width: null
|
|
12
|
+
steps:
|
|
13
|
+
- name: "generate_or_edit_image"
|
|
14
|
+
engine: "python"
|
|
15
|
+
code: |
|
|
16
|
+
import os
|
|
17
|
+
import base64
|
|
18
|
+
from io import BytesIO
|
|
19
|
+
from datetime import datetime
|
|
20
|
+
from PIL import Image
|
|
21
|
+
from npcpy.llm_funcs import gen_image
|
|
22
|
+
|
|
23
|
+
# Extract inputs from context with proper type conversion
|
|
24
|
+
image_prompt = str(context.get('prompt', '')).strip()
|
|
25
|
+
output_name = context.get('output_name')
|
|
26
|
+
attachments_str = context.get('attachments')
|
|
27
|
+
|
|
28
|
+
# Handle integer inputs - they may come as strings or ints
|
|
29
|
+
try:
|
|
30
|
+
n_images = int(context.get('n_images', 1))
|
|
31
|
+
except (ValueError, TypeError):
|
|
32
|
+
n_images = 1
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
height = int(context.get('height', 1024))
|
|
36
|
+
except (ValueError, TypeError):
|
|
37
|
+
height = 1024
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
width = int(context.get('width', 1024))
|
|
41
|
+
except (ValueError, TypeError):
|
|
42
|
+
width = 1024
|
|
43
|
+
|
|
44
|
+
# Get model and provider, prioritizing context, then NPC, then environment variables
|
|
45
|
+
model = context.get('model')
|
|
46
|
+
provider = context.get('provider')
|
|
47
|
+
|
|
48
|
+
# Use NPC's model/provider as fallback
|
|
49
|
+
if not model and npc and hasattr(npc, 'model') and npc.model:
|
|
50
|
+
model = npc.model
|
|
51
|
+
if not provider and npc and hasattr(npc, 'provider') and npc.provider:
|
|
52
|
+
provider = npc.provider
|
|
53
|
+
|
|
54
|
+
# Fallback to environment variables
|
|
55
|
+
if not model:
|
|
56
|
+
model = os.getenv('NPCSH_IMAGE_GEN_MODEL')
|
|
57
|
+
if not provider:
|
|
58
|
+
provider = os.getenv('NPCSH_IMAGE_GEN_PROVIDER')
|
|
59
|
+
|
|
60
|
+
# Final hardcoded fallbacks if nothing else is set
|
|
61
|
+
if not model:
|
|
62
|
+
model = "runwayml/stable-diffusion-v1-5"
|
|
63
|
+
if not provider:
|
|
64
|
+
provider = "diffusers"
|
|
65
|
+
|
|
66
|
+
# Parse attachments
|
|
67
|
+
input_images = []
|
|
68
|
+
if attachments_str and str(attachments_str).strip():
|
|
69
|
+
input_images = [p.strip() for p in str(attachments_str).split(',')]
|
|
70
|
+
|
|
71
|
+
output_messages = context.get('messages', [])
|
|
72
|
+
|
|
73
|
+
if not image_prompt:
|
|
74
|
+
output = "Error: No prompt provided for image generation."
|
|
75
|
+
else:
|
|
76
|
+
try:
|
|
77
|
+
# Generate image(s)
|
|
78
|
+
result = gen_image(
|
|
79
|
+
prompt=image_prompt,
|
|
80
|
+
model=model,
|
|
81
|
+
provider=provider,
|
|
82
|
+
npc=npc,
|
|
83
|
+
height=height,
|
|
84
|
+
width=width,
|
|
85
|
+
n_images=n_images,
|
|
86
|
+
input_images=input_images if input_images else None
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
# Ensure we have a list of images
|
|
90
|
+
if not isinstance(result, list):
|
|
91
|
+
images_list = [result] if result is not None else []
|
|
92
|
+
else:
|
|
93
|
+
images_list = result
|
|
94
|
+
|
|
95
|
+
saved_files = []
|
|
96
|
+
html_image_tags = [] # This list will store the raw HTML <img> tags
|
|
97
|
+
|
|
98
|
+
for i, image in enumerate(images_list):
|
|
99
|
+
if image is None:
|
|
100
|
+
continue
|
|
101
|
+
|
|
102
|
+
# Determine output filename
|
|
103
|
+
if output_name and str(output_name).strip():
|
|
104
|
+
base_name, ext = os.path.splitext(os.path.expanduser(str(output_name)))
|
|
105
|
+
if not ext:
|
|
106
|
+
ext = ".png"
|
|
107
|
+
current_output_file = f"{base_name}_{i}{ext}" if len(images_list) > 1 else f"{base_name}{ext}"
|
|
108
|
+
else:
|
|
109
|
+
os.makedirs(os.path.expanduser("~/.npcsh/images/"), exist_ok=True)
|
|
110
|
+
current_output_file = (
|
|
111
|
+
os.path.expanduser("~/.npcsh/images/")
|
|
112
|
+
+ f"image_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{i}.png"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Save image to file
|
|
116
|
+
image.save(current_output_file)
|
|
117
|
+
saved_files.append(current_output_file)
|
|
118
|
+
|
|
119
|
+
# Convert image to base64 and create an HTML <img> tag
|
|
120
|
+
with open(current_output_file, 'rb') as f:
|
|
121
|
+
img_data = base64.b64encode(f.read()).decode()
|
|
122
|
+
# Using raw HTML <img> tag with data URI
|
|
123
|
+
html_image_tags.append(f'<img src="data:image/png;base64,{img_data}" alt="Generated Image {i+1}" style="max-width: 100%; display: block; margin-top: 10px;">')
|
|
124
|
+
|
|
125
|
+
if saved_files:
|
|
126
|
+
output_text_message = f"Image(s) generated and saved to: {', '.join(saved_files)}"
|
|
127
|
+
if input_images:
|
|
128
|
+
output_text_message = f"Image(s) edited and saved to: {', '.join(saved_files)}"
|
|
129
|
+
|
|
130
|
+
output = output_text_message # Keep the text message clean
|
|
131
|
+
output += f"\n\nThe image files have been saved and are ready to view."
|
|
132
|
+
output += "\n\n" + "\n".join(html_image_tags) # Append all HTML <img> tags to the output
|
|
133
|
+
else:
|
|
134
|
+
output = "No images were generated."
|
|
135
|
+
|
|
136
|
+
except Exception as e:
|
|
137
|
+
import traceback
|
|
138
|
+
traceback.print_exc()
|
|
139
|
+
output = f"Error {'editing' if input_images else 'generating'} image: {str(e)}"
|
|
140
|
+
|
|
141
|
+
context['output'] = output
|
|
142
|
+
context['messages'] = output_messages
|
|
143
|
+
context['model'] = model
|
|
144
|
+
context['provider'] = provider
|