npcsh 1.1.4__py3-none-any.whl → 1.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +470 -367
- npcsh/npc_team/corca_example.png +0 -0
- npcsh/npc_team/jinxs/{python_executor.jinx → code/python.jinx} +1 -1
- npcsh/npc_team/jinxs/{bash_executer.jinx → code/sh.jinx} +1 -2
- npcsh/npc_team/jinxs/code/sql.jinx +16 -0
- npcsh/npc_team/jinxs/modes/alicanto.jinx +88 -0
- npcsh/npc_team/jinxs/modes/corca.jinx +28 -0
- npcsh/npc_team/jinxs/modes/guac.jinx +46 -0
- npcsh/npc_team/jinxs/modes/plonk.jinx +57 -0
- npcsh/npc_team/jinxs/modes/pti.jinx +28 -0
- npcsh/npc_team/jinxs/modes/spool.jinx +40 -0
- npcsh/npc_team/jinxs/modes/wander.jinx +81 -0
- npcsh/npc_team/jinxs/modes/yap.jinx +25 -0
- npcsh/npc_team/jinxs/utils/breathe.jinx +20 -0
- npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
- npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
- npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
- npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
- npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
- npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
- npcsh/npc_team/jinxs/{edit_file.jinx → utils/edit_file.jinx} +1 -1
- npcsh/npc_team/jinxs/utils/flush.jinx +39 -0
- npcsh/npc_team/jinxs/utils/npc-studio.jinx +77 -0
- npcsh/npc_team/jinxs/utils/ots.jinx +61 -0
- npcsh/npc_team/jinxs/utils/plan.jinx +33 -0
- npcsh/npc_team/jinxs/utils/roll.jinx +66 -0
- npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
- npcsh/npc_team/jinxs/utils/search.jinx +130 -0
- npcsh/npc_team/jinxs/utils/serve.jinx +29 -0
- npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
- npcsh/npc_team/jinxs/utils/trigger.jinx +36 -0
- npcsh/npc_team/jinxs/utils/vixynt.jinx +117 -0
- npcsh/npcsh.py +13 -11
- npcsh/routes.py +97 -1419
- npcsh-1.1.6.data/data/npcsh/npc_team/alicanto.jinx +88 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/breathe.jinx +20 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/build.jinx +65 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/compile.jinx +50 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/corca.jinx +28 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/edit_file.jinx +1 -1
- npcsh-1.1.6.data/data/npcsh/npc_team/flush.jinx +39 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/guac.jinx +46 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/help.jinx +52 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/init.jinx +41 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/jinxs.jinx +32 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/npc-studio.jinx +77 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/ots.jinx +61 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/plan.jinx +33 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/plonk.jinx +57 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/pti.jinx +28 -0
- npcsh-1.1.4.data/data/npcsh/npc_team/python_executor.jinx → npcsh-1.1.6.data/data/npcsh/npc_team/python.jinx +1 -1
- npcsh-1.1.6.data/data/npcsh/npc_team/roll.jinx +66 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/sample.jinx +56 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/search.jinx +130 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/serve.jinx +29 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/set.jinx +40 -0
- npcsh-1.1.4.data/data/npcsh/npc_team/bash_executer.jinx → npcsh-1.1.6.data/data/npcsh/npc_team/sh.jinx +1 -2
- npcsh-1.1.6.data/data/npcsh/npc_team/sleep.jinx +116 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/spool.jinx +40 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/sql.jinx +16 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/trigger.jinx +36 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/vixynt.jinx +117 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/wander.jinx +81 -0
- npcsh-1.1.6.data/data/npcsh/npc_team/yap.jinx +25 -0
- {npcsh-1.1.4.dist-info → npcsh-1.1.6.dist-info}/METADATA +1 -10
- npcsh-1.1.6.dist-info/RECORD +124 -0
- npcsh/npc_team/jinxs/image_generation.jinx +0 -29
- npcsh/npc_team/jinxs/internet_search.jinx +0 -31
- npcsh/npc_team/jinxs/kg_search.jinx +0 -43
- npcsh/npc_team/jinxs/memory_search.jinx +0 -36
- npcsh/npc_team/jinxs/screen_cap.jinx +0 -25
- npcsh-1.1.4.data/data/npcsh/npc_team/image_generation.jinx +0 -29
- npcsh-1.1.4.data/data/npcsh/npc_team/internet_search.jinx +0 -31
- npcsh-1.1.4.data/data/npcsh/npc_team/kg_search.jinx +0 -43
- npcsh-1.1.4.data/data/npcsh/npc_team/memory_search.jinx +0 -36
- npcsh-1.1.4.data/data/npcsh/npc_team/screen_cap.jinx +0 -25
- npcsh-1.1.4.dist-info/RECORD +0 -78
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.4.dist-info → npcsh-1.1.6.dist-info}/WHEEL +0 -0
- {npcsh-1.1.4.dist-info → npcsh-1.1.6.dist-info}/entry_points.txt +0 -0
- {npcsh-1.1.4.dist-info → npcsh-1.1.6.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.4.dist-info → npcsh-1.1.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
jinx_name: "init"
|
|
2
|
+
description: "Initialize NPC project"
|
|
3
|
+
inputs:
|
|
4
|
+
- directory: "." # The directory where the NPC project should be initialized.
|
|
5
|
+
- templates: "" # Optional templates to use for initialization.
|
|
6
|
+
- context: "" # Optional context for project initialization.
|
|
7
|
+
- model: "" # Optional LLM model to set as default for the project.
|
|
8
|
+
- provider: "" # Optional LLM provider to set as default for the project.
|
|
9
|
+
steps:
|
|
10
|
+
- name: "initialize_project"
|
|
11
|
+
engine: "python"
|
|
12
|
+
code: |
|
|
13
|
+
import os
|
|
14
|
+
import traceback
|
|
15
|
+
from npcpy.npc_compiler import initialize_npc_project
|
|
16
|
+
|
|
17
|
+
directory = context.get('directory')
|
|
18
|
+
templates = context.get('templates')
|
|
19
|
+
context_param = context.get('context') # Renamed to avoid conflict with Jinx context
|
|
20
|
+
model = context.get('model')
|
|
21
|
+
provider = context.get('provider')
|
|
22
|
+
output_messages = context.get('messages', [])
|
|
23
|
+
|
|
24
|
+
output_result = ""
|
|
25
|
+
try:
|
|
26
|
+
initialize_npc_project(
|
|
27
|
+
directory=directory,
|
|
28
|
+
templates=templates,
|
|
29
|
+
context=context_param, # Use the renamed context parameter
|
|
30
|
+
model=model,
|
|
31
|
+
provider=provider
|
|
32
|
+
)
|
|
33
|
+
output_result = f"NPC project initialized in {os.path.abspath(directory)}."
|
|
34
|
+
except NameError:
|
|
35
|
+
output_result = "Init function (initialize_npc_project) not available."
|
|
36
|
+
except Exception as e:
|
|
37
|
+
traceback.print_exc()
|
|
38
|
+
output_result = f"Error initializing project: {e}"
|
|
39
|
+
|
|
40
|
+
context['output'] = output_result
|
|
41
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
jinx_name: jinxs
|
|
2
|
+
description: Show available jinxs for the current NPC/Team
|
|
3
|
+
inputs: []
|
|
4
|
+
steps:
|
|
5
|
+
- name: list_jinxs
|
|
6
|
+
engine: python
|
|
7
|
+
code: |
|
|
8
|
+
output_lines = ["Available Jinxs:\n"]
|
|
9
|
+
jinxs_listed = set()
|
|
10
|
+
|
|
11
|
+
if hasattr(npc, 'team') and npc.team:
|
|
12
|
+
team = npc.team
|
|
13
|
+
|
|
14
|
+
if hasattr(team, 'jinxs_dict') and team.jinxs_dict:
|
|
15
|
+
output_lines.append(f"\n--- Team Jinxs ---\n")
|
|
16
|
+
for name, jinx_obj in sorted(team.jinxs_dict.items()):
|
|
17
|
+
desc = getattr(jinx_obj, 'description', 'No description available.')
|
|
18
|
+
output_lines.append(f"- /{name}: {desc}\n")
|
|
19
|
+
jinxs_listed.add(name)
|
|
20
|
+
|
|
21
|
+
if hasattr(npc, 'jinxs_dict') and npc.jinxs_dict:
|
|
22
|
+
output_lines.append(f"\n--- NPC Jinxs for {npc.name} ---\n")
|
|
23
|
+
for name, jinx_obj in sorted(npc.jinxs_dict.items()):
|
|
24
|
+
if name not in jinxs_listed:
|
|
25
|
+
desc = getattr(jinx_obj, 'description', 'No description available.')
|
|
26
|
+
output_lines.append(f"- /{name}: {desc}\n")
|
|
27
|
+
jinxs_listed.add(name)
|
|
28
|
+
|
|
29
|
+
if not jinxs_listed:
|
|
30
|
+
output = "No jinxs available for the current context."
|
|
31
|
+
else:
|
|
32
|
+
output = "".join(output_lines)
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
jinx_name: "set"
|
|
2
|
+
description: "Set configuration values"
|
|
3
|
+
inputs:
|
|
4
|
+
- key: "" # The configuration key to set.
|
|
5
|
+
- value: "" # The value to set for the configuration key.
|
|
6
|
+
steps:
|
|
7
|
+
- name: "set_config_value"
|
|
8
|
+
engine: "python"
|
|
9
|
+
code: |
|
|
10
|
+
import traceback
|
|
11
|
+
# Assuming set_npcsh_config_value is accessible
|
|
12
|
+
try:
|
|
13
|
+
from npcsh._state import set_npcsh_config_value
|
|
14
|
+
except ImportError:
|
|
15
|
+
def set_npcsh_config_value(key, value):
|
|
16
|
+
print(f"Mock: Setting config '{key}' to '{value}'")
|
|
17
|
+
# In a real scenario, this might write to a config file or global state
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
key = context.get('key')
|
|
21
|
+
value = context.get('value')
|
|
22
|
+
output_messages = context.get('messages', [])
|
|
23
|
+
|
|
24
|
+
output_result = ""
|
|
25
|
+
if not key or not value:
|
|
26
|
+
context['output'] = "Usage: /set <key>=<value>"
|
|
27
|
+
context['messages'] = output_messages
|
|
28
|
+
exit()
|
|
29
|
+
|
|
30
|
+
try:
|
|
31
|
+
set_npcsh_config_value(key, value)
|
|
32
|
+
output_result = f"Configuration value '{key}' set."
|
|
33
|
+
except NameError:
|
|
34
|
+
output_result = "Set function (set_npcsh_config_value) not available."
|
|
35
|
+
except Exception as e:
|
|
36
|
+
traceback.print_exc()
|
|
37
|
+
output_result = f"Error setting configuration '{key}': {e}"
|
|
38
|
+
|
|
39
|
+
context['output'] = output_result
|
|
40
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
jinx_name: "flush"
|
|
2
|
+
description: "Flush the last N messages from the conversation history."
|
|
3
|
+
inputs:
|
|
4
|
+
- n: 1 # The number of messages to flush (default to 1).
|
|
5
|
+
steps:
|
|
6
|
+
- name: "flush_messages"
|
|
7
|
+
engine: "python"
|
|
8
|
+
code: |
|
|
9
|
+
n = int(context.get('n', 1))
|
|
10
|
+
output_messages = context.get('messages', [])
|
|
11
|
+
|
|
12
|
+
if n <= 0:
|
|
13
|
+
context['output'] = "Error: Number of messages must be positive."
|
|
14
|
+
context['messages'] = output_messages
|
|
15
|
+
exit()
|
|
16
|
+
|
|
17
|
+
new_messages = list(output_messages)
|
|
18
|
+
original_len = len(new_messages)
|
|
19
|
+
removed_count = 0
|
|
20
|
+
|
|
21
|
+
if new_messages and new_messages[0].get("role") == "system":
|
|
22
|
+
system_message = new_messages[0]
|
|
23
|
+
working_messages = new_messages[1:]
|
|
24
|
+
num_to_remove = min(n, len(working_messages))
|
|
25
|
+
if num_to_remove > 0:
|
|
26
|
+
final_messages = [system_message] + working_messages[:-num_to_remove]
|
|
27
|
+
removed_count = num_to_remove
|
|
28
|
+
else:
|
|
29
|
+
final_messages = [system_message]
|
|
30
|
+
else:
|
|
31
|
+
num_to_remove = min(n, original_len)
|
|
32
|
+
if num_to_remove > 0:
|
|
33
|
+
final_messages = new_messages[:-num_to_remove]
|
|
34
|
+
removed_count = num_to_remove
|
|
35
|
+
else:
|
|
36
|
+
final_messages = []
|
|
37
|
+
|
|
38
|
+
context['output'] = f"Flushed {removed_count} message(s). Context is now {len(final_messages)} messages."
|
|
39
|
+
context['messages'] = final_messages
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
jinx_name: "npc-studio"
|
|
2
|
+
description: "Start npc studio"
|
|
3
|
+
inputs:
|
|
4
|
+
- user_command: ""
|
|
5
|
+
steps:
|
|
6
|
+
- name: "launch_npc_studio"
|
|
7
|
+
engine: "python"
|
|
8
|
+
code: |
|
|
9
|
+
import os
|
|
10
|
+
import subprocess
|
|
11
|
+
import sys
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
import traceback
|
|
14
|
+
|
|
15
|
+
NPC_STUDIO_DIR = Path.home() / ".npcsh" / "npc-studio"
|
|
16
|
+
|
|
17
|
+
user_command = context.get('user_command')
|
|
18
|
+
output_messages = context.get('messages', [])
|
|
19
|
+
output_result = ""
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
if not NPC_STUDIO_DIR.exists():
|
|
23
|
+
os.makedirs(NPC_STUDIO_DIR.parent, exist_ok=True)
|
|
24
|
+
subprocess.check_call([
|
|
25
|
+
"git", "clone",
|
|
26
|
+
"https://github.com/npc-worldwide/npc-studio.git",
|
|
27
|
+
str(NPC_STUDIO_DIR)
|
|
28
|
+
])
|
|
29
|
+
else:
|
|
30
|
+
subprocess.check_call(
|
|
31
|
+
["git", "pull"],
|
|
32
|
+
cwd=NPC_STUDIO_DIR
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
subprocess.check_call(
|
|
36
|
+
["npm", "install"],
|
|
37
|
+
cwd=NPC_STUDIO_DIR
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
req_file = NPC_STUDIO_DIR / "requirements.txt"
|
|
41
|
+
if req_file.exists():
|
|
42
|
+
subprocess.check_call([
|
|
43
|
+
sys.executable,
|
|
44
|
+
"-m",
|
|
45
|
+
"pip",
|
|
46
|
+
"install",
|
|
47
|
+
"-r",
|
|
48
|
+
str(req_file)
|
|
49
|
+
])
|
|
50
|
+
|
|
51
|
+
backend = subprocess.Popen(
|
|
52
|
+
[sys.executable, "npc_studio_serve.py"],
|
|
53
|
+
cwd=NPC_STUDIO_DIR
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
dev_server = subprocess.Popen(
|
|
57
|
+
["npm", "run", "dev"],
|
|
58
|
+
cwd=NPC_STUDIO_DIR
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
frontend = subprocess.Popen(
|
|
62
|
+
["npm", "start"],
|
|
63
|
+
cwd=NPC_STUDIO_DIR
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
output_result = (
|
|
67
|
+
f"NPC Studio started!\n"
|
|
68
|
+
f"Backend PID={backend.pid}, "
|
|
69
|
+
f"Dev Server PID={dev_server.pid}, "
|
|
70
|
+
f"Frontend PID={frontend.pid}"
|
|
71
|
+
)
|
|
72
|
+
except Exception as e:
|
|
73
|
+
traceback.print_exc()
|
|
74
|
+
output_result = f"Failed to start NPC Studio: {e}"
|
|
75
|
+
|
|
76
|
+
context['output'] = output_result
|
|
77
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
jinx_name: "ots"
|
|
2
|
+
description: "Take screenshot and analyze with vision model. Usage: /ots <prompt>"
|
|
3
|
+
inputs:
|
|
4
|
+
- prompt
|
|
5
|
+
- image_paths_args: ""
|
|
6
|
+
- vmodel: ""
|
|
7
|
+
- vprovider: ""
|
|
8
|
+
steps:
|
|
9
|
+
- name: "analyze_screenshot_or_image"
|
|
10
|
+
engine: "python"
|
|
11
|
+
code: |
|
|
12
|
+
import os
|
|
13
|
+
import traceback
|
|
14
|
+
from npcpy.llm_funcs import get_llm_response
|
|
15
|
+
from npcpy.data.image import capture_screenshot
|
|
16
|
+
|
|
17
|
+
user_prompt = context.get('prompt') or ""
|
|
18
|
+
image_paths_args_str = context.get('image_paths_args') or ""
|
|
19
|
+
vision_model = context.get('vmodel') or ""
|
|
20
|
+
vision_provider = context.get('vprovider') or ""
|
|
21
|
+
stream_output = context.get('stream') or False
|
|
22
|
+
api_url = context.get('api_url') or ""
|
|
23
|
+
api_key = context.get('api_key') or ""
|
|
24
|
+
output_messages = context.get('messages', [])
|
|
25
|
+
current_npc = context.get('npc')
|
|
26
|
+
|
|
27
|
+
image_paths = []
|
|
28
|
+
if image_paths_args_str.strip():
|
|
29
|
+
for img_path_arg in image_paths_args_str.split(','):
|
|
30
|
+
full_path = os.path.abspath(os.path.expanduser(img_path_arg.strip()))
|
|
31
|
+
if os.path.exists(full_path):
|
|
32
|
+
image_paths.append(full_path)
|
|
33
|
+
|
|
34
|
+
if not image_paths:
|
|
35
|
+
screenshot_info = capture_screenshot(full=False)
|
|
36
|
+
if screenshot_info and "file_path" in screenshot_info:
|
|
37
|
+
image_paths.append(screenshot_info["file_path"])
|
|
38
|
+
print(f"📸 Screenshot captured: {screenshot_info.get('filename', os.path.basename(screenshot_info['file_path']))}")
|
|
39
|
+
|
|
40
|
+
if not vision_model:
|
|
41
|
+
vision_model = getattr(current_npc, 'model', 'gpt-4o-mini')
|
|
42
|
+
|
|
43
|
+
if not vision_provider:
|
|
44
|
+
vision_provider = getattr(current_npc, 'provider', 'openai')
|
|
45
|
+
|
|
46
|
+
response_data = get_llm_response(
|
|
47
|
+
prompt=user_prompt,
|
|
48
|
+
model=vision_model,
|
|
49
|
+
provider=vision_provider,
|
|
50
|
+
messages=output_messages,
|
|
51
|
+
images=image_paths,
|
|
52
|
+
stream=stream_output,
|
|
53
|
+
npc=current_npc,
|
|
54
|
+
api_url=api_url or None,
|
|
55
|
+
api_key=api_key or None
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
context['output'] = response_data.get('response', 'No response received')
|
|
59
|
+
context['messages'] = response_data.get('messages', output_messages)
|
|
60
|
+
context['model'] = vision_model
|
|
61
|
+
context['provider'] = vision_provider
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
jinx_name: "plan"
|
|
2
|
+
description: "Execute a plan command"
|
|
3
|
+
inputs:
|
|
4
|
+
- plan_description: "" # Description of the plan to execute.
|
|
5
|
+
steps:
|
|
6
|
+
- name: "execute_plan"
|
|
7
|
+
engine: "python"
|
|
8
|
+
code: |
|
|
9
|
+
import traceback
|
|
10
|
+
from npcpy.work.plan import execute_plan_command
|
|
11
|
+
|
|
12
|
+
plan_description = context.get('plan_description')
|
|
13
|
+
output_messages = context.get('messages', [])
|
|
14
|
+
|
|
15
|
+
if not plan_description or not plan_description.strip():
|
|
16
|
+
context['output'] = "Usage: /plan <description_of_plan>"
|
|
17
|
+
context['messages'] = output_messages
|
|
18
|
+
exit()
|
|
19
|
+
|
|
20
|
+
try:
|
|
21
|
+
# Pass all current context as kwargs to execute_plan_command
|
|
22
|
+
result = execute_plan_command(command=plan_description, **context)
|
|
23
|
+
|
|
24
|
+
if isinstance(result, dict):
|
|
25
|
+
context['output'] = result.get('output', 'Plan executed.')
|
|
26
|
+
context['messages'] = result.get('messages', output_messages)
|
|
27
|
+
else:
|
|
28
|
+
context['output'] = str(result)
|
|
29
|
+
context['messages'] = output_messages
|
|
30
|
+
except Exception as e:
|
|
31
|
+
traceback.print_exc()
|
|
32
|
+
context['output'] = f"Error executing plan: {e}"
|
|
33
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
jinx_name: "roll"
|
|
2
|
+
description: "Generate a video from a text prompt."
|
|
3
|
+
inputs:
|
|
4
|
+
- prompt: "" # Required text prompt for video generation.
|
|
5
|
+
- num_frames: 125 # Number of frames for the video.
|
|
6
|
+
- width: 256 # Width of the video.
|
|
7
|
+
- height: 256 # Height of the video.
|
|
8
|
+
- output_path: "output.mp4" # Output file path for the video.
|
|
9
|
+
- vgmodel: "" # Video generation model to use. Defaults to NPCSH_VIDEO_GEN_MODEL or NPC's model.
|
|
10
|
+
- vgprovider: "" # Video generation provider to use. Defaults to NPCSH_VIDEO_GEN_PROVIDER or NPC's provider.
|
|
11
|
+
steps:
|
|
12
|
+
- name: "generate_video"
|
|
13
|
+
engine: "python"
|
|
14
|
+
code: |
|
|
15
|
+
import traceback
|
|
16
|
+
from npcpy.llm_funcs import gen_video
|
|
17
|
+
# Assuming NPCSH_VIDEO_GEN_MODEL and NPCSH_VIDEO_GEN_PROVIDER are accessible
|
|
18
|
+
|
|
19
|
+
prompt = context.get('prompt')
|
|
20
|
+
num_frames = int(context.get('num_frames', 125)) # Ensure int type
|
|
21
|
+
width = int(context.get('width', 256)) # Ensure int type
|
|
22
|
+
height = int(context.get('height', 256)) # Ensure int type
|
|
23
|
+
output_path = context.get('output_path')
|
|
24
|
+
video_gen_model = context.get('vgmodel')
|
|
25
|
+
video_gen_provider = context.get('vgprovider')
|
|
26
|
+
output_messages = context.get('messages', [])
|
|
27
|
+
current_npc = context.get('npc')
|
|
28
|
+
|
|
29
|
+
if not prompt or not prompt.strip():
|
|
30
|
+
context['output'] = "Usage: /roll <your prompt>"
|
|
31
|
+
context['messages'] = output_messages
|
|
32
|
+
exit()
|
|
33
|
+
|
|
34
|
+
# Fallback for model/provider if not explicitly set in Jinx inputs
|
|
35
|
+
if not video_gen_model and current_npc and current_npc.model:
|
|
36
|
+
video_gen_model = current_npc.model
|
|
37
|
+
if not video_gen_provider and current_npc and current_npc.provider:
|
|
38
|
+
video_gen_provider = current_npc.provider
|
|
39
|
+
|
|
40
|
+
# Final fallbacks (these would ideally come from npcsh._state config)
|
|
41
|
+
if not video_gen_model: video_gen_model = "stable-video-diffusion" # Example default
|
|
42
|
+
if not video_gen_provider: video_gen_provider = "diffusers" # Example default
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
result = gen_video(
|
|
46
|
+
prompt=prompt,
|
|
47
|
+
model=video_gen_model,
|
|
48
|
+
provider=video_gen_provider,
|
|
49
|
+
npc=current_npc,
|
|
50
|
+
num_frames=num_frames,
|
|
51
|
+
width=width,
|
|
52
|
+
height=height,
|
|
53
|
+
output_path=output_path,
|
|
54
|
+
**context.get('api_kwargs', {}) # Assuming api_kwargs might be passed
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
if isinstance(result, dict):
|
|
58
|
+
context['output'] = result.get('output', 'Video generated.')
|
|
59
|
+
context['messages'] = result.get('messages', output_messages)
|
|
60
|
+
else:
|
|
61
|
+
context['output'] = str(result)
|
|
62
|
+
context['messages'] = output_messages
|
|
63
|
+
except Exception as e:
|
|
64
|
+
traceback.print_exc()
|
|
65
|
+
context['output'] = f"Error generating video: {e}"
|
|
66
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
jinx_name: "sample"
|
|
2
|
+
description: "Send a prompt directly to the LLM."
|
|
3
|
+
inputs:
|
|
4
|
+
- prompt: "" # Required text prompt to send to the LLM.
|
|
5
|
+
- model: "" # LLM model to use. Defaults to NPC's model.
|
|
6
|
+
- provider: "" # LLM provider to use. Defaults to NPC's provider.
|
|
7
|
+
steps:
|
|
8
|
+
- name: "send_prompt_to_llm"
|
|
9
|
+
engine: "python"
|
|
10
|
+
code: |
|
|
11
|
+
import traceback
|
|
12
|
+
from npcpy.llm_funcs import get_llm_response
|
|
13
|
+
|
|
14
|
+
prompt = context.get('prompt')
|
|
15
|
+
llm_model = context.get('model')
|
|
16
|
+
llm_provider = context.get('provider')
|
|
17
|
+
output_messages = context.get('messages', [])
|
|
18
|
+
current_npc = context.get('npc')
|
|
19
|
+
|
|
20
|
+
if not prompt or not prompt.strip():
|
|
21
|
+
context['output'] = "Usage: /sample <your prompt> [-m --model] model [-p --provider] provider"
|
|
22
|
+
context['messages'] = output_messages
|
|
23
|
+
exit()
|
|
24
|
+
|
|
25
|
+
# Fallback for model/provider if not explicitly set in Jinx inputs
|
|
26
|
+
if not llm_model and current_npc and current_npc.model:
|
|
27
|
+
llm_model = current_npc.model
|
|
28
|
+
if not llm_provider and current_npc and current_npc.provider:
|
|
29
|
+
llm_provider = current_npc.provider
|
|
30
|
+
|
|
31
|
+
# Final fallbacks (these would ideally come from npcsh._state config)
|
|
32
|
+
if not llm_model: llm_model = "gemini-1.5-pro" # Example default
|
|
33
|
+
if not llm_provider: llm_provider = "gemini" # Example default
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
result = get_llm_response(
|
|
37
|
+
prompt=prompt,
|
|
38
|
+
model=llm_model,
|
|
39
|
+
provider=llm_provider,
|
|
40
|
+
npc=current_npc,
|
|
41
|
+
**{k:v for k,v in context.items() if k not in ['messages', 'prompt', 'model', 'provider']} # Pass other context
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
if isinstance(result, dict):
|
|
45
|
+
context['output'] = result.get('response')
|
|
46
|
+
context['messages'] = result.get('messages', output_messages)
|
|
47
|
+
context['model'] = llm_model
|
|
48
|
+
context['provider'] = llm_provider
|
|
49
|
+
else:
|
|
50
|
+
context['output'] = str(result)
|
|
51
|
+
context['messages'] = output_messages
|
|
52
|
+
|
|
53
|
+
except Exception as e:
|
|
54
|
+
traceback.print_exc()
|
|
55
|
+
context['output'] = f"Error sampling LLM: {e}"
|
|
56
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
jinx_name: "search"
|
|
2
|
+
description: >
|
|
3
|
+
Executes a search across various sources.
|
|
4
|
+
Usage:
|
|
5
|
+
/search <query> (Default: Web Search)
|
|
6
|
+
/search --memory <query> (Search approved memories)
|
|
7
|
+
/search --kg <query> (Search the knowledge graph)
|
|
8
|
+
/search --rag [-f <paths>] <query> (Execute a RAG search)
|
|
9
|
+
/search --brainblast <query> (Advanced history search)
|
|
10
|
+
inputs:
|
|
11
|
+
- query: ""
|
|
12
|
+
- memory: false
|
|
13
|
+
- kg: false
|
|
14
|
+
- rag: false
|
|
15
|
+
- brainblast: false
|
|
16
|
+
- file_paths: ""
|
|
17
|
+
- history_db_path: "~/npcsh_history.db"
|
|
18
|
+
- vector_db_path: "~/npcsh_chroma.db"
|
|
19
|
+
- sprovider: ""
|
|
20
|
+
- emodel: ""
|
|
21
|
+
- eprovider: ""
|
|
22
|
+
steps:
|
|
23
|
+
- name: "execute_unified_search"
|
|
24
|
+
engine: "python"
|
|
25
|
+
code: |
|
|
26
|
+
import os
|
|
27
|
+
import traceback
|
|
28
|
+
|
|
29
|
+
# Access query from context
|
|
30
|
+
query = context.get('query')
|
|
31
|
+
if not query or not query.strip():
|
|
32
|
+
context['output'] = "Usage: /search [--memory|--kg|--rag|--brainblast] <query>"
|
|
33
|
+
else:
|
|
34
|
+
# state is available as a GLOBAL variable (from extra_globals)
|
|
35
|
+
# Access it directly, not from context
|
|
36
|
+
try:
|
|
37
|
+
current_state = state # This should work now
|
|
38
|
+
except NameError:
|
|
39
|
+
context['output'] = "Error: Shell state not available in jinx context"
|
|
40
|
+
raise
|
|
41
|
+
|
|
42
|
+
current_npc = current_state.npc
|
|
43
|
+
current_team = current_state.team
|
|
44
|
+
|
|
45
|
+
npc_name = getattr(current_npc, 'name', '__none__') if current_npc else '__none__'
|
|
46
|
+
team_name = getattr(current_team, 'name', '__none__') if current_team else '__none__'
|
|
47
|
+
current_path = os.getcwd()
|
|
48
|
+
db_path = os.path.expanduser(context.get("history_db_path"))
|
|
49
|
+
|
|
50
|
+
try:
|
|
51
|
+
cmd_history = CommandHistory(db_path)
|
|
52
|
+
|
|
53
|
+
if context.get('memory'):
|
|
54
|
+
memories = get_relevant_memories(
|
|
55
|
+
command_history=cmd_history,
|
|
56
|
+
npc_name=npc_name,
|
|
57
|
+
team_name=team_name,
|
|
58
|
+
path=current_path,
|
|
59
|
+
query=query,
|
|
60
|
+
max_memories=10,
|
|
61
|
+
state=current_state # Pass the state object
|
|
62
|
+
)
|
|
63
|
+
print(memories)
|
|
64
|
+
|
|
65
|
+
if not memories:
|
|
66
|
+
output = f"No memories found for query: '{query}'"
|
|
67
|
+
else:
|
|
68
|
+
output = f"Found {len(memories)} memories:\n\n" + "\n".join(
|
|
69
|
+
f"{i}. [{mem.get('timestamp', 'unknown')}] {mem.get('final_memory') or mem.get('initial_memory')}"
|
|
70
|
+
for i, mem in enumerate(memories, 1)
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
elif context.get('kg'):
|
|
74
|
+
facts = search_kg_facts(
|
|
75
|
+
cmd_history,
|
|
76
|
+
npc_name,
|
|
77
|
+
team_name,
|
|
78
|
+
current_path,
|
|
79
|
+
query
|
|
80
|
+
)
|
|
81
|
+
print(facts)
|
|
82
|
+
|
|
83
|
+
if not facts:
|
|
84
|
+
output = f"No KG facts found for query: '{query}'"
|
|
85
|
+
else:
|
|
86
|
+
output = f"Found {len(facts)} KG facts:\n\n" + "\n".join(
|
|
87
|
+
f"{i}. {fact.get('statement')}" for i, fact in enumerate(facts, 1)
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
elif context.get('rag'):
|
|
91
|
+
file_paths_str = context.get('file_paths', '')
|
|
92
|
+
file_paths = [os.path.abspath(os.path.expanduser(p.strip())) for p in file_paths_str.split(',') if p.strip()]
|
|
93
|
+
emodel = context.get('emodel') or current_state.embedding_model
|
|
94
|
+
eprovider = context.get('eprovider') or current_state.embedding_provider
|
|
95
|
+
|
|
96
|
+
file_contents = []
|
|
97
|
+
for path in file_paths:
|
|
98
|
+
chunks = load_file_contents(path)
|
|
99
|
+
basename = os.path.basename(path)
|
|
100
|
+
file_contents.extend([f"{basename}: {chunk}" for chunk in chunks])
|
|
101
|
+
|
|
102
|
+
result = execute_rag_command(
|
|
103
|
+
command=query,
|
|
104
|
+
vector_db_path=os.path.expanduser(context.get('vector_db_path')),
|
|
105
|
+
embedding_model=emodel,
|
|
106
|
+
embedding_provider=eprovider,
|
|
107
|
+
file_contents=file_contents or None
|
|
108
|
+
)
|
|
109
|
+
print(result)
|
|
110
|
+
output = result.get('response', 'No response from RAG.')
|
|
111
|
+
|
|
112
|
+
elif context.get('brainblast'):
|
|
113
|
+
result = execute_brainblast_command(
|
|
114
|
+
command=query,
|
|
115
|
+
command_history=cmd_history,
|
|
116
|
+
**context
|
|
117
|
+
)
|
|
118
|
+
print(result)
|
|
119
|
+
output = result.get('output', 'Brainblast search executed.')
|
|
120
|
+
|
|
121
|
+
else:
|
|
122
|
+
# Default to web search
|
|
123
|
+
provider = context.get('sprovider') or current_state.search_provider
|
|
124
|
+
results = search_web(query, provider=provider)
|
|
125
|
+
output = "\n".join([f"- {res}" for res in results]) if results else "No web results found."
|
|
126
|
+
|
|
127
|
+
except Exception as e:
|
|
128
|
+
output = f"An error occurred in the search jinx: {e}\n{traceback.format_exc()}"
|
|
129
|
+
|
|
130
|
+
context['output'] = output
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
jinx_name: "serve"
|
|
2
|
+
description: "Serve an NPC Team"
|
|
3
|
+
inputs:
|
|
4
|
+
- port: 5337 # The port to run the Flask server on.
|
|
5
|
+
- cors: "" # Comma-separated CORS origins.
|
|
6
|
+
steps:
|
|
7
|
+
- name: "start_flask_server"
|
|
8
|
+
engine: "python"
|
|
9
|
+
code: |
|
|
10
|
+
from npcpy.serve import start_flask_server
|
|
11
|
+
|
|
12
|
+
port = context.get('port')
|
|
13
|
+
cors_str = context.get('cors')
|
|
14
|
+
output_messages = context.get('messages', [])
|
|
15
|
+
|
|
16
|
+
cors_origins = None
|
|
17
|
+
if cors_str and cors_str.strip():
|
|
18
|
+
cors_origins = [origin.strip() for origin in cors_str.split(",")]
|
|
19
|
+
|
|
20
|
+
# start_flask_server blocks, so this will hold the Jinx until the server is stopped.
|
|
21
|
+
# In a real-world scenario, you might want to run this in a separate process
|
|
22
|
+
# or have a non-blocking server start.
|
|
23
|
+
start_flask_server(
|
|
24
|
+
port=int(port), # Ensure port is an integer
|
|
25
|
+
cors_origins=cors_origins,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
context['output'] = "NPC Team server started. Execution of this jinx will pause until the server is stopped."
|
|
29
|
+
context['messages'] = output_messages
|