npcsh 0.1.2__py3-none-any.whl → 1.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +3508 -0
- npcsh/alicanto.py +65 -0
- npcsh/build.py +291 -0
- npcsh/completion.py +206 -0
- npcsh/config.py +163 -0
- npcsh/corca.py +50 -0
- npcsh/execution.py +185 -0
- npcsh/guac.py +46 -0
- npcsh/mcp_helpers.py +357 -0
- npcsh/mcp_server.py +299 -0
- npcsh/npc.py +323 -0
- npcsh/npc_team/alicanto.npc +2 -0
- npcsh/npc_team/alicanto.png +0 -0
- npcsh/npc_team/corca.npc +12 -0
- npcsh/npc_team/corca.png +0 -0
- npcsh/npc_team/corca_example.png +0 -0
- npcsh/npc_team/foreman.npc +7 -0
- npcsh/npc_team/frederic.npc +6 -0
- npcsh/npc_team/frederic4.png +0 -0
- npcsh/npc_team/guac.png +0 -0
- npcsh/npc_team/jinxs/code/python.jinx +11 -0
- npcsh/npc_team/jinxs/code/sh.jinx +34 -0
- npcsh/npc_team/jinxs/code/sql.jinx +16 -0
- npcsh/npc_team/jinxs/modes/alicanto.jinx +194 -0
- npcsh/npc_team/jinxs/modes/corca.jinx +249 -0
- npcsh/npc_team/jinxs/modes/guac.jinx +317 -0
- npcsh/npc_team/jinxs/modes/plonk.jinx +214 -0
- npcsh/npc_team/jinxs/modes/pti.jinx +170 -0
- npcsh/npc_team/jinxs/modes/spool.jinx +161 -0
- npcsh/npc_team/jinxs/modes/wander.jinx +186 -0
- npcsh/npc_team/jinxs/modes/yap.jinx +262 -0
- npcsh/npc_team/jinxs/npc_studio/npc-studio.jinx +77 -0
- npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
- npcsh/npc_team/jinxs/utils/chat.jinx +44 -0
- npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
- npcsh/npc_team/jinxs/utils/compress.jinx +140 -0
- npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
- npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
- npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
- npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
- npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
- npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
- npcsh/npc_team/jinxs/utils/edit_file.jinx +94 -0
- npcsh/npc_team/jinxs/utils/load_file.jinx +35 -0
- npcsh/npc_team/jinxs/utils/ots.jinx +61 -0
- npcsh/npc_team/jinxs/utils/roll.jinx +68 -0
- npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
- npcsh/npc_team/jinxs/utils/search.jinx +130 -0
- npcsh/npc_team/jinxs/utils/serve.jinx +26 -0
- npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
- npcsh/npc_team/jinxs/utils/trigger.jinx +61 -0
- npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
- npcsh/npc_team/jinxs/utils/vixynt.jinx +144 -0
- npcsh/npc_team/kadiefa.npc +3 -0
- npcsh/npc_team/kadiefa.png +0 -0
- npcsh/npc_team/npcsh.ctx +18 -0
- npcsh/npc_team/npcsh_sibiji.png +0 -0
- npcsh/npc_team/plonk.npc +2 -0
- npcsh/npc_team/plonk.png +0 -0
- npcsh/npc_team/plonkjr.npc +2 -0
- npcsh/npc_team/plonkjr.png +0 -0
- npcsh/npc_team/sibiji.npc +3 -0
- npcsh/npc_team/sibiji.png +0 -0
- npcsh/npc_team/spool.png +0 -0
- npcsh/npc_team/yap.png +0 -0
- npcsh/npcsh.py +296 -112
- npcsh/parsing.py +118 -0
- npcsh/plonk.py +54 -0
- npcsh/pti.py +54 -0
- npcsh/routes.py +139 -0
- npcsh/spool.py +48 -0
- npcsh/ui.py +199 -0
- npcsh/wander.py +62 -0
- npcsh/yap.py +50 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/agent.jinx +17 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.jinx +194 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.npc +2 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/build.jinx +65 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/chat.jinx +44 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/cmd.jinx +44 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/compile.jinx +50 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/compress.jinx +140 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.jinx +249 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.npc +12 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca_example.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/edit_file.jinx +94 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/foreman.npc +7 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/frederic.npc +6 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/frederic4.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/guac.jinx +317 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/guac.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/help.jinx +52 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/init.jinx +41 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/jinxs.jinx +32 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.npc +3 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/load_file.jinx +35 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/npc-studio.jinx +77 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/npcsh.ctx +18 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/ots.jinx +61 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.jinx +214 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.npc +2 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.npc +2 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/pti.jinx +170 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/python.jinx +11 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/roll.jinx +68 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sample.jinx +56 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/search.jinx +130 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/serve.jinx +26 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/set.jinx +40 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sh.jinx +34 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.npc +3 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sleep.jinx +116 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/spool.jinx +161 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/spool.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sql.jinx +16 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/trigger.jinx +61 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/usage.jinx +33 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/vixynt.jinx +144 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/wander.jinx +186 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/yap.jinx +262 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/yap.png +0 -0
- npcsh-1.1.13.dist-info/METADATA +522 -0
- npcsh-1.1.13.dist-info/RECORD +135 -0
- {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/WHEEL +1 -1
- npcsh-1.1.13.dist-info/entry_points.txt +9 -0
- {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info/licenses}/LICENSE +1 -1
- npcsh/command_history.py +0 -81
- npcsh/helpers.py +0 -36
- npcsh/llm_funcs.py +0 -295
- npcsh/main.py +0 -5
- npcsh/modes.py +0 -343
- npcsh/npc_compiler.py +0 -124
- npcsh-0.1.2.dist-info/METADATA +0 -99
- npcsh-0.1.2.dist-info/RECORD +0 -14
- npcsh-0.1.2.dist-info/entry_points.txt +0 -2
- {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
jinx_name: "serve"
|
|
2
|
+
description: "Serve an NPC Team"
|
|
3
|
+
inputs:
|
|
4
|
+
- port: 5337 # The port to run the Flask server on.
|
|
5
|
+
- cors: "" # Comma-separated CORS origins.
|
|
6
|
+
steps:
|
|
7
|
+
- name: "start_flask_server"
|
|
8
|
+
engine: "python"
|
|
9
|
+
code: |
|
|
10
|
+
from npcpy.serve import start_flask_server
|
|
11
|
+
|
|
12
|
+
port = context.get('port')
|
|
13
|
+
cors_str = context.get('cors')
|
|
14
|
+
output_messages = context.get('messages', [])
|
|
15
|
+
|
|
16
|
+
cors_origins = None
|
|
17
|
+
if cors_str and cors_str.strip():
|
|
18
|
+
cors_origins = [origin.strip() for origin in cors_str.split(",")]
|
|
19
|
+
|
|
20
|
+
start_flask_server(
|
|
21
|
+
port=int(port), # Ensure port is an integer
|
|
22
|
+
cors_origins=cors_origins,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
context['output'] = "NPC Team server started. Execution of this jinx will pause until the server is stopped."
|
|
26
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
jinx_name: "set"
|
|
2
|
+
description: "Set configuration values"
|
|
3
|
+
inputs:
|
|
4
|
+
- key: "" # The configuration key to set.
|
|
5
|
+
- value: "" # The value to set for the configuration key.
|
|
6
|
+
steps:
|
|
7
|
+
- name: "set_config_value"
|
|
8
|
+
engine: "python"
|
|
9
|
+
code: |
|
|
10
|
+
import traceback
|
|
11
|
+
# Assuming set_npcsh_config_value is accessible
|
|
12
|
+
try:
|
|
13
|
+
from npcsh._state import set_npcsh_config_value
|
|
14
|
+
except ImportError:
|
|
15
|
+
def set_npcsh_config_value(key, value):
|
|
16
|
+
print(f"Mock: Setting config '{key}' to '{value}'")
|
|
17
|
+
# In a real scenario, this might write to a config file or global state
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
key = context.get('key')
|
|
21
|
+
value = context.get('value')
|
|
22
|
+
output_messages = context.get('messages', [])
|
|
23
|
+
|
|
24
|
+
output_result = ""
|
|
25
|
+
if not key or not value:
|
|
26
|
+
context['output'] = "Usage: /set <key>=<value>"
|
|
27
|
+
context['messages'] = output_messages
|
|
28
|
+
exit()
|
|
29
|
+
|
|
30
|
+
try:
|
|
31
|
+
set_npcsh_config_value(key, value)
|
|
32
|
+
output_result = f"Configuration value '{key}' set."
|
|
33
|
+
except NameError:
|
|
34
|
+
output_result = "Set function (set_npcsh_config_value) not available."
|
|
35
|
+
except Exception as e:
|
|
36
|
+
traceback.print_exc()
|
|
37
|
+
output_result = f"Error setting configuration '{key}': {e}"
|
|
38
|
+
|
|
39
|
+
context['output'] = output_result
|
|
40
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
jinx_name: sh
|
|
2
|
+
description: Execute bash queries. Should be used to grep for file contents, list directories, explore information to answer user questions more practically.
|
|
3
|
+
inputs:
|
|
4
|
+
- bash_command
|
|
5
|
+
steps:
|
|
6
|
+
- name: execute_bash
|
|
7
|
+
engine: python
|
|
8
|
+
code: |
|
|
9
|
+
import subprocess
|
|
10
|
+
import os
|
|
11
|
+
|
|
12
|
+
cmd = '{{ bash_command }}'
|
|
13
|
+
output = ""
|
|
14
|
+
|
|
15
|
+
process = subprocess.Popen(
|
|
16
|
+
cmd,
|
|
17
|
+
shell=True,
|
|
18
|
+
stdout=subprocess.PIPE,
|
|
19
|
+
stderr=subprocess.PIPE
|
|
20
|
+
)
|
|
21
|
+
stdout, stderr = process.communicate()
|
|
22
|
+
|
|
23
|
+
# Only show debug output if NPCSH_DEBUG is set
|
|
24
|
+
if os.environ.get("NPCSH_DEBUG") == "1":
|
|
25
|
+
import sys
|
|
26
|
+
print(f"[sh] cmd: {cmd}", file=sys.stderr)
|
|
27
|
+
print(f"[sh] stdout: {stdout.decode('utf-8', errors='ignore')[:200]}", file=sys.stderr)
|
|
28
|
+
|
|
29
|
+
if stderr:
|
|
30
|
+
output = f"Error: {stderr.decode('utf-8')}"
|
|
31
|
+
else:
|
|
32
|
+
output = stdout.decode('utf-8')
|
|
33
|
+
|
|
34
|
+
|
|
Binary file
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
jinx_name: "sleep"
|
|
2
|
+
description: "Evolve knowledge graph. Use --dream to also run creative synthesis."
|
|
3
|
+
inputs:
|
|
4
|
+
- dream: False # Boolean flag to also run creative synthesis (dream process).
|
|
5
|
+
- ops: "" # Comma-separated list of operations to configure KG sleep process.
|
|
6
|
+
- model: "" # LLM model to use for KG evolution. Defaults to NPC's model.
|
|
7
|
+
- provider: "" # LLM provider to use for KG evolution. Defaults to NPC's provider.
|
|
8
|
+
steps:
|
|
9
|
+
- name: "evolve_knowledge_graph"
|
|
10
|
+
engine: "python"
|
|
11
|
+
code: |
|
|
12
|
+
import os
|
|
13
|
+
import traceback
|
|
14
|
+
from npcpy.memory.command_history import CommandHistory, load_kg_from_db, save_kg_to_db
|
|
15
|
+
from npcpy.memory.knowledge_graph import kg_sleep_process, kg_dream_process
|
|
16
|
+
# Assuming render_markdown is available if needed for logging progress
|
|
17
|
+
|
|
18
|
+
is_dreaming = context.get('dream')
|
|
19
|
+
operations_str = context.get('ops')
|
|
20
|
+
llm_model = context.get('model')
|
|
21
|
+
llm_provider = context.get('provider')
|
|
22
|
+
output_messages = context.get('messages', [])
|
|
23
|
+
current_npc = context.get('npc')
|
|
24
|
+
current_team = context.get('team')
|
|
25
|
+
|
|
26
|
+
operations_config = None
|
|
27
|
+
if operations_str and isinstance(operations_str, str):
|
|
28
|
+
operations_config = [op.strip() for op in operations_str.split(',')]
|
|
29
|
+
|
|
30
|
+
# Fallback for model/provider if not explicitly set in Jinx inputs
|
|
31
|
+
if not llm_model and current_npc and current_npc.model:
|
|
32
|
+
llm_model = current_npc.model
|
|
33
|
+
if not llm_provider and current_npc and current_npc.provider:
|
|
34
|
+
llm_provider = current_npc.provider
|
|
35
|
+
|
|
36
|
+
# Final fallbacks (these would ideally come from npcsh._state config)
|
|
37
|
+
if not llm_model: llm_model = "gemini-1.5-pro" # Example default
|
|
38
|
+
if not llm_provider: llm_provider = "gemini" # Example default
|
|
39
|
+
|
|
40
|
+
team_name = current_team.name if current_team else "__none__"
|
|
41
|
+
npc_name = current_npc.name if isinstance(current_npc, type(None).__class__) else "__none__"
|
|
42
|
+
current_path = os.getcwd()
|
|
43
|
+
scope_str = f"Team: '{team_name}', NPC: '{npc_name}', Path: '{current_path}'"
|
|
44
|
+
|
|
45
|
+
# Assume render_markdown exists
|
|
46
|
+
# render_markdown(f"- Checking knowledge graph for scope: {scope_str}")
|
|
47
|
+
|
|
48
|
+
command_history = None
|
|
49
|
+
try:
|
|
50
|
+
db_path = os.getenv("NPCSH_DB_PATH", os.path.expanduser("~/npcsh_history.db"))
|
|
51
|
+
command_history = CommandHistory(db_path)
|
|
52
|
+
engine = command_history.engine
|
|
53
|
+
except Exception as e:
|
|
54
|
+
context['output'] = f"Error connecting to history database for KG access: {e}"
|
|
55
|
+
context['messages'] = output_messages
|
|
56
|
+
exit()
|
|
57
|
+
|
|
58
|
+
output_result = ""
|
|
59
|
+
try:
|
|
60
|
+
current_kg = load_kg_from_db(engine, team_name, npc_name, current_path)
|
|
61
|
+
|
|
62
|
+
if not current_kg or not current_kg.get('facts'):
|
|
63
|
+
output_msg = f"Knowledge graph for the current scope is empty. Nothing to process.\n"
|
|
64
|
+
output_msg += f" - Scope Checked: {scope_str}\n\n"
|
|
65
|
+
output_msg += "**Hint:** Have a conversation or run some commands first to build up knowledge in this specific context. The KG is unique to each combination of Team, NPC, and directory."
|
|
66
|
+
context['output'] = output_msg
|
|
67
|
+
context['messages'] = output_messages
|
|
68
|
+
exit()
|
|
69
|
+
|
|
70
|
+
original_facts = len(current_kg.get('facts', []))
|
|
71
|
+
original_concepts = len(current_kg.get('concepts', []))
|
|
72
|
+
|
|
73
|
+
process_type = "Sleep"
|
|
74
|
+
ops_display = f"with operations: {operations_config}" if operations_config else "with random operations"
|
|
75
|
+
# render_markdown(f"- Initiating sleep process {ops_display}")
|
|
76
|
+
|
|
77
|
+
evolved_kg, _ = kg_sleep_process(
|
|
78
|
+
existing_kg=current_kg,
|
|
79
|
+
model=llm_model,
|
|
80
|
+
provider=llm_provider,
|
|
81
|
+
npc=current_npc,
|
|
82
|
+
operations_config=operations_config
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
if is_dreaming:
|
|
86
|
+
process_type += " & Dream"
|
|
87
|
+
# render_markdown(f"- Initiating dream process on the evolved KG...")
|
|
88
|
+
evolved_kg, _ = kg_dream_process(
|
|
89
|
+
existing_kg=evolved_kg,
|
|
90
|
+
model=llm_model,
|
|
91
|
+
provider=llm_provider,
|
|
92
|
+
npc=current_npc
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
save_kg_to_db(engine, evolved_kg, team_name, npc_name, current_path) # Changed conn to engine
|
|
96
|
+
|
|
97
|
+
new_facts = len(evolved_kg.get('facts', []))
|
|
98
|
+
new_concepts = len(evolved_kg.get('concepts', []))
|
|
99
|
+
|
|
100
|
+
output_result = f"{process_type} process complete.\n"
|
|
101
|
+
output_result += f"- Facts: {original_facts} -> {new_facts} ({new_facts - original_facts:+})\n"
|
|
102
|
+
output_result += f"- Concepts: {original_concepts} -> {new_concepts} ({new_concepts - original_concepts:+})"
|
|
103
|
+
|
|
104
|
+
print('Evolved facts:', evolved_kg.get('facts'))
|
|
105
|
+
print('Evolved concepts:', evolved_kg.get('concepts'))
|
|
106
|
+
|
|
107
|
+
context['output'] = output_result
|
|
108
|
+
context['messages'] = output_messages
|
|
109
|
+
|
|
110
|
+
except Exception as e:
|
|
111
|
+
traceback.print_exc()
|
|
112
|
+
context['output'] = f"Error during KG evolution process: {e}"
|
|
113
|
+
context['messages'] = output_messages
|
|
114
|
+
finally:
|
|
115
|
+
if command_history: # Check if it was successfully initialized
|
|
116
|
+
command_history.close()
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
jinx_name: spool
|
|
2
|
+
description: Interactive chat mode - simple conversational interface with an NPC
|
|
3
|
+
inputs:
|
|
4
|
+
- model: null
|
|
5
|
+
- provider: null
|
|
6
|
+
- attachments: null
|
|
7
|
+
- stream: true
|
|
8
|
+
|
|
9
|
+
steps:
|
|
10
|
+
- name: spool_repl
|
|
11
|
+
engine: python
|
|
12
|
+
code: |
|
|
13
|
+
import os
|
|
14
|
+
import sys
|
|
15
|
+
from termcolor import colored
|
|
16
|
+
|
|
17
|
+
from npcpy.llm_funcs import get_llm_response
|
|
18
|
+
from npcpy.npc_sysenv import get_system_message, render_markdown
|
|
19
|
+
from npcpy.data.load import load_file_contents
|
|
20
|
+
from npcpy.data.text import rag_search
|
|
21
|
+
|
|
22
|
+
npc = context.get('npc')
|
|
23
|
+
team = context.get('team')
|
|
24
|
+
messages = context.get('messages', [])
|
|
25
|
+
stream = context.get('stream', True)
|
|
26
|
+
attachments = context.get('attachments')
|
|
27
|
+
|
|
28
|
+
# Use NPC's model/provider or fallback
|
|
29
|
+
model = context.get('model') or (npc.model if npc else None)
|
|
30
|
+
provider = context.get('provider') or (npc.provider if npc else None)
|
|
31
|
+
|
|
32
|
+
# ASCII art
|
|
33
|
+
print("""
|
|
34
|
+
_____ ____ ____ ____ _
|
|
35
|
+
/ ___/| _ \ / __ \ / __ \| |
|
|
36
|
+
\___ \| |_) | | | | | | | |
|
|
37
|
+
___) | __/| | | | | | | |___
|
|
38
|
+
|____/|_| \____/ \____/|_____|
|
|
39
|
+
""")
|
|
40
|
+
|
|
41
|
+
npc_name = npc.name if npc else "chat"
|
|
42
|
+
print(f"Entering spool mode (NPC: {npc_name}). Type '/sq' to exit.")
|
|
43
|
+
|
|
44
|
+
# Load attachments if provided
|
|
45
|
+
loaded_chunks = {}
|
|
46
|
+
if attachments:
|
|
47
|
+
if isinstance(attachments, str):
|
|
48
|
+
attachments = [f.strip() for f in attachments.split(',')]
|
|
49
|
+
for file_path in attachments:
|
|
50
|
+
file_path = os.path.expanduser(file_path)
|
|
51
|
+
if os.path.exists(file_path):
|
|
52
|
+
try:
|
|
53
|
+
chunks = load_file_contents(file_path)
|
|
54
|
+
loaded_chunks[file_path] = chunks
|
|
55
|
+
print(colored(f"Loaded {len(chunks)} chunks from: {file_path}", "green"))
|
|
56
|
+
except Exception as e:
|
|
57
|
+
print(colored(f"Error loading {file_path}: {e}", "red"))
|
|
58
|
+
|
|
59
|
+
# Ensure system message
|
|
60
|
+
if not messages or messages[0].get("role") != "system":
|
|
61
|
+
sys_msg = get_system_message(npc) if npc else "You are a helpful assistant."
|
|
62
|
+
messages.insert(0, {"role": "system", "content": sys_msg})
|
|
63
|
+
|
|
64
|
+
# REPL loop
|
|
65
|
+
while True:
|
|
66
|
+
try:
|
|
67
|
+
prompt_str = f"{npc_name}> "
|
|
68
|
+
user_input = input(prompt_str).strip()
|
|
69
|
+
|
|
70
|
+
if not user_input:
|
|
71
|
+
continue
|
|
72
|
+
|
|
73
|
+
if user_input.lower() == "/sq":
|
|
74
|
+
print("Exiting spool mode.")
|
|
75
|
+
break
|
|
76
|
+
|
|
77
|
+
# Handle /ots for screenshots inline
|
|
78
|
+
if user_input.startswith("/ots"):
|
|
79
|
+
from npcpy.data.image import capture_screenshot
|
|
80
|
+
parts = user_input.split()
|
|
81
|
+
image_paths = []
|
|
82
|
+
if len(parts) > 1:
|
|
83
|
+
for p in parts[1:]:
|
|
84
|
+
fp = os.path.expanduser(p)
|
|
85
|
+
if os.path.exists(fp):
|
|
86
|
+
image_paths.append(fp)
|
|
87
|
+
else:
|
|
88
|
+
ss = capture_screenshot()
|
|
89
|
+
if ss and "file_path" in ss:
|
|
90
|
+
image_paths.append(ss["file_path"])
|
|
91
|
+
print(colored(f"Screenshot: {ss['filename']}", "green"))
|
|
92
|
+
|
|
93
|
+
if image_paths:
|
|
94
|
+
vision_prompt = input("Prompt for image(s): ").strip() or "Describe these images."
|
|
95
|
+
resp = get_llm_response(
|
|
96
|
+
vision_prompt,
|
|
97
|
+
model=npc.vision_model if hasattr(npc, 'vision_model') else model,
|
|
98
|
+
provider=npc.vision_provider if hasattr(npc, 'vision_provider') else provider,
|
|
99
|
+
messages=messages,
|
|
100
|
+
images=image_paths,
|
|
101
|
+
stream=stream,
|
|
102
|
+
npc=npc
|
|
103
|
+
)
|
|
104
|
+
messages = resp.get('messages', messages)
|
|
105
|
+
render_markdown(str(resp.get('response', '')))
|
|
106
|
+
continue
|
|
107
|
+
|
|
108
|
+
# Add RAG context if files loaded
|
|
109
|
+
current_prompt = user_input
|
|
110
|
+
if loaded_chunks:
|
|
111
|
+
context_content = ""
|
|
112
|
+
for filename, chunks in loaded_chunks.items():
|
|
113
|
+
full_text = "\n".join(chunks)
|
|
114
|
+
retrieved = rag_search(user_input, full_text, similarity_threshold=0.3)
|
|
115
|
+
if retrieved:
|
|
116
|
+
context_content += f"\n\nContext from {filename}:\n{retrieved}\n"
|
|
117
|
+
if context_content:
|
|
118
|
+
current_prompt += f"\n\n--- Relevant context ---{context_content}"
|
|
119
|
+
|
|
120
|
+
# Get response
|
|
121
|
+
resp = get_llm_response(
|
|
122
|
+
current_prompt,
|
|
123
|
+
model=model,
|
|
124
|
+
provider=provider,
|
|
125
|
+
messages=messages,
|
|
126
|
+
stream=stream,
|
|
127
|
+
npc=npc
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
messages = resp.get('messages', messages)
|
|
131
|
+
response_text = resp.get('response', '')
|
|
132
|
+
|
|
133
|
+
# Handle streaming vs non-streaming
|
|
134
|
+
if hasattr(response_text, '__iter__') and not isinstance(response_text, str):
|
|
135
|
+
full_response = ""
|
|
136
|
+
for chunk in response_text:
|
|
137
|
+
if hasattr(chunk, 'choices') and chunk.choices:
|
|
138
|
+
delta = chunk.choices[0].delta
|
|
139
|
+
if hasattr(delta, 'content') and delta.content:
|
|
140
|
+
print(delta.content, end='', flush=True)
|
|
141
|
+
full_response += delta.content
|
|
142
|
+
print()
|
|
143
|
+
else:
|
|
144
|
+
render_markdown(str(response_text))
|
|
145
|
+
|
|
146
|
+
# Track usage if available
|
|
147
|
+
if 'usage' in resp and npc and hasattr(npc, 'shared_context'):
|
|
148
|
+
usage = resp['usage']
|
|
149
|
+
npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
|
|
150
|
+
npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
|
|
151
|
+
npc.shared_context['turn_count'] += 1
|
|
152
|
+
|
|
153
|
+
except KeyboardInterrupt:
|
|
154
|
+
print("\nUse '/sq' to exit or continue.")
|
|
155
|
+
continue
|
|
156
|
+
except EOFError:
|
|
157
|
+
print("\nExiting spool mode.")
|
|
158
|
+
break
|
|
159
|
+
|
|
160
|
+
context['output'] = "Exited spool mode."
|
|
161
|
+
context['messages'] = messages
|
|
Binary file
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
jinx_name: sql
|
|
2
|
+
description: Execute queries on the ~/npcsh_history.db to pull data. The database
|
|
3
|
+
contains only information about conversations and other user-provided data. It does
|
|
4
|
+
not store any information about individual files. Avoid using percent signs unless absolutely necessary.
|
|
5
|
+
inputs:
|
|
6
|
+
- sql_query
|
|
7
|
+
steps:
|
|
8
|
+
- engine: python
|
|
9
|
+
code: |
|
|
10
|
+
import pandas as pd
|
|
11
|
+
query = "{{ sql_query }}"
|
|
12
|
+
try:
|
|
13
|
+
df = pd.read_sql_query(query, npc.db_conn)
|
|
14
|
+
except Exception as e:
|
|
15
|
+
df = pd.DataFrame({'Error': [str(e)]})
|
|
16
|
+
output = df.to_string()
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
jinx_name: "trigger"
|
|
2
|
+
description: "Creates a persistent listener (--listen) or a scheduled task (--cron)."
|
|
3
|
+
inputs:
|
|
4
|
+
- listen: "" # The description for a persistent, event-driven listener.
|
|
5
|
+
- cron: "" # The description for a scheduled, time-based task.
|
|
6
|
+
steps:
|
|
7
|
+
- name: "execute_command"
|
|
8
|
+
engine: "python"
|
|
9
|
+
code: |
|
|
10
|
+
import traceback
|
|
11
|
+
from npcpy.work.trigger import execute_trigger_command # For --listen
|
|
12
|
+
from npcpy.work.plan import execute_plan_command # For --cron
|
|
13
|
+
|
|
14
|
+
listen_description = context.get('listen')
|
|
15
|
+
cron_description = context.get('cron')
|
|
16
|
+
output_messages = context.get('messages', [])
|
|
17
|
+
|
|
18
|
+
USAGE = 'Usage: /trigger --listen "<description>" OR /trigger --cron "<description>"'
|
|
19
|
+
|
|
20
|
+
# Determine which command was used and set the appropriate variables
|
|
21
|
+
subcommand = None
|
|
22
|
+
description = None
|
|
23
|
+
executor_func = None
|
|
24
|
+
|
|
25
|
+
# --- Argument Validation ---
|
|
26
|
+
# Ensure mutual exclusivity
|
|
27
|
+
if listen_description and cron_description:
|
|
28
|
+
context['output'] = f"Error: --listen and --cron are mutually exclusive. {USAGE}"
|
|
29
|
+
context['messages'] = output_messages
|
|
30
|
+
exit()
|
|
31
|
+
|
|
32
|
+
# --- Command Dispatch ---
|
|
33
|
+
if listen_description:
|
|
34
|
+
subcommand = 'listen'
|
|
35
|
+
description = listen_description
|
|
36
|
+
executor_func = execute_trigger_command
|
|
37
|
+
elif cron_description:
|
|
38
|
+
subcommand = 'cron'
|
|
39
|
+
description = cron_description
|
|
40
|
+
executor_func = execute_plan_command
|
|
41
|
+
else:
|
|
42
|
+
# Handle case where no arguments were provided
|
|
43
|
+
context['output'] = f"Error: You must provide either --listen or --cron. {USAGE}"
|
|
44
|
+
context['messages'] = output_messages
|
|
45
|
+
exit()
|
|
46
|
+
|
|
47
|
+
# --- Execution ---
|
|
48
|
+
try:
|
|
49
|
+
result = executor_func(command=description, **context)
|
|
50
|
+
|
|
51
|
+
if isinstance(result, dict):
|
|
52
|
+
output_key = 'Listener' if subcommand == 'listen' else 'Cron job'
|
|
53
|
+
context['output'] = result.get('output', f'{output_key} created successfully.')
|
|
54
|
+
context['messages'] = result.get('messages', output_messages)
|
|
55
|
+
else:
|
|
56
|
+
context['output'] = str(result)
|
|
57
|
+
context['messages'] = output_messages
|
|
58
|
+
except Exception as e:
|
|
59
|
+
traceback.print_exc()
|
|
60
|
+
context['output'] = f"Error creating {subcommand}: {e}"
|
|
61
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
jinx_name: usage
|
|
2
|
+
description: Display current session token usage and cost
|
|
3
|
+
inputs: []
|
|
4
|
+
steps:
|
|
5
|
+
- name: show_usage
|
|
6
|
+
engine: python
|
|
7
|
+
code: |
|
|
8
|
+
state = context.get('state')
|
|
9
|
+
if not state:
|
|
10
|
+
output = "No state available"
|
|
11
|
+
else:
|
|
12
|
+
inp = getattr(state, 'session_input_tokens', 0)
|
|
13
|
+
out = getattr(state, 'session_output_tokens', 0)
|
|
14
|
+
cost = getattr(state, 'session_cost_usd', 0.0)
|
|
15
|
+
turns = getattr(state, 'turn_count', 0)
|
|
16
|
+
total = inp + out
|
|
17
|
+
|
|
18
|
+
def fmt(n):
|
|
19
|
+
return f"{n/1000:.1f}k" if n >= 1000 else str(n)
|
|
20
|
+
|
|
21
|
+
def fmt_cost(c):
|
|
22
|
+
if c == 0:
|
|
23
|
+
return "free (local)"
|
|
24
|
+
elif c < 0.01:
|
|
25
|
+
return f"${c:.4f}"
|
|
26
|
+
else:
|
|
27
|
+
return f"${c:.2f}"
|
|
28
|
+
|
|
29
|
+
output = f"Session Usage\n"
|
|
30
|
+
output += f"Tokens: {fmt(inp)} in / {fmt(out)} out ({fmt(total)} total)\n"
|
|
31
|
+
output += f"Cost: {fmt_cost(cost)}\n"
|
|
32
|
+
output += f"Turns: {turns}"
|
|
33
|
+
context['output'] = output
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
jinx_name: "vixynt"
|
|
2
|
+
description: "Generates images from text descriptions or edits existing ones."
|
|
3
|
+
inputs:
|
|
4
|
+
- prompt
|
|
5
|
+
- model: null
|
|
6
|
+
- provider: null
|
|
7
|
+
- output_name: null
|
|
8
|
+
- attachments: null
|
|
9
|
+
- n_images: null
|
|
10
|
+
- height: null
|
|
11
|
+
- width: null
|
|
12
|
+
steps:
|
|
13
|
+
- name: "generate_or_edit_image"
|
|
14
|
+
engine: "python"
|
|
15
|
+
code: |
|
|
16
|
+
import os
|
|
17
|
+
import base64
|
|
18
|
+
from io import BytesIO
|
|
19
|
+
from datetime import datetime
|
|
20
|
+
from PIL import Image
|
|
21
|
+
from npcpy.llm_funcs import gen_image
|
|
22
|
+
|
|
23
|
+
# Extract inputs from context with proper type conversion
|
|
24
|
+
image_prompt = str(context.get('prompt', '')).strip()
|
|
25
|
+
output_name = context.get('output_name')
|
|
26
|
+
attachments_str = context.get('attachments')
|
|
27
|
+
|
|
28
|
+
# Handle integer inputs - they may come as strings or ints
|
|
29
|
+
try:
|
|
30
|
+
n_images = int(context.get('n_images', 1))
|
|
31
|
+
except (ValueError, TypeError):
|
|
32
|
+
n_images = 1
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
height = int(context.get('height', 1024))
|
|
36
|
+
except (ValueError, TypeError):
|
|
37
|
+
height = 1024
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
width = int(context.get('width', 1024))
|
|
41
|
+
except (ValueError, TypeError):
|
|
42
|
+
width = 1024
|
|
43
|
+
|
|
44
|
+
# Get model and provider, prioritizing context, then NPC, then environment variables
|
|
45
|
+
model = context.get('model')
|
|
46
|
+
provider = context.get('provider')
|
|
47
|
+
|
|
48
|
+
# Use NPC's model/provider as fallback
|
|
49
|
+
if not model and npc and hasattr(npc, 'model') and npc.model:
|
|
50
|
+
model = npc.model
|
|
51
|
+
if not provider and npc and hasattr(npc, 'provider') and npc.provider:
|
|
52
|
+
provider = npc.provider
|
|
53
|
+
|
|
54
|
+
# Fallback to environment variables
|
|
55
|
+
if not model:
|
|
56
|
+
model = os.getenv('NPCSH_IMAGE_GEN_MODEL')
|
|
57
|
+
if not provider:
|
|
58
|
+
provider = os.getenv('NPCSH_IMAGE_GEN_PROVIDER')
|
|
59
|
+
|
|
60
|
+
# Final hardcoded fallbacks if nothing else is set
|
|
61
|
+
if not model:
|
|
62
|
+
model = "runwayml/stable-diffusion-v1-5"
|
|
63
|
+
if not provider:
|
|
64
|
+
provider = "diffusers"
|
|
65
|
+
|
|
66
|
+
# Parse attachments
|
|
67
|
+
input_images = []
|
|
68
|
+
if attachments_str and str(attachments_str).strip():
|
|
69
|
+
input_images = [p.strip() for p in str(attachments_str).split(',')]
|
|
70
|
+
|
|
71
|
+
output_messages = context.get('messages', [])
|
|
72
|
+
|
|
73
|
+
if not image_prompt:
|
|
74
|
+
output = "Error: No prompt provided for image generation."
|
|
75
|
+
else:
|
|
76
|
+
try:
|
|
77
|
+
# Generate image(s)
|
|
78
|
+
result = gen_image(
|
|
79
|
+
prompt=image_prompt,
|
|
80
|
+
model=model,
|
|
81
|
+
provider=provider,
|
|
82
|
+
npc=npc,
|
|
83
|
+
height=height,
|
|
84
|
+
width=width,
|
|
85
|
+
n_images=n_images,
|
|
86
|
+
input_images=input_images if input_images else None
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
# Ensure we have a list of images
|
|
90
|
+
if not isinstance(result, list):
|
|
91
|
+
images_list = [result] if result is not None else []
|
|
92
|
+
else:
|
|
93
|
+
images_list = result
|
|
94
|
+
|
|
95
|
+
saved_files = []
|
|
96
|
+
html_image_tags = [] # This list will store the raw HTML <img> tags
|
|
97
|
+
|
|
98
|
+
for i, image in enumerate(images_list):
|
|
99
|
+
if image is None:
|
|
100
|
+
continue
|
|
101
|
+
|
|
102
|
+
# Determine output filename
|
|
103
|
+
if output_name and str(output_name).strip():
|
|
104
|
+
base_name, ext = os.path.splitext(os.path.expanduser(str(output_name)))
|
|
105
|
+
if not ext:
|
|
106
|
+
ext = ".png"
|
|
107
|
+
current_output_file = f"{base_name}_{i}{ext}" if len(images_list) > 1 else f"{base_name}{ext}"
|
|
108
|
+
else:
|
|
109
|
+
os.makedirs(os.path.expanduser("~/.npcsh/images/"), exist_ok=True)
|
|
110
|
+
current_output_file = (
|
|
111
|
+
os.path.expanduser("~/.npcsh/images/")
|
|
112
|
+
+ f"image_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{i}.png"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Save image to file
|
|
116
|
+
image.save(current_output_file)
|
|
117
|
+
saved_files.append(current_output_file)
|
|
118
|
+
|
|
119
|
+
# Convert image to base64 and create an HTML <img> tag
|
|
120
|
+
with open(current_output_file, 'rb') as f:
|
|
121
|
+
img_data = base64.b64encode(f.read()).decode()
|
|
122
|
+
# Using raw HTML <img> tag with data URI
|
|
123
|
+
html_image_tags.append(f'<img src="data:image/png;base64,{img_data}" alt="Generated Image {i+1}" style="max-width: 100%; display: block; margin-top: 10px;">')
|
|
124
|
+
|
|
125
|
+
if saved_files:
|
|
126
|
+
output_text_message = f"Image(s) generated and saved to: {', '.join(saved_files)}"
|
|
127
|
+
if input_images:
|
|
128
|
+
output_text_message = f"Image(s) edited and saved to: {', '.join(saved_files)}"
|
|
129
|
+
|
|
130
|
+
output = output_text_message # Keep the text message clean
|
|
131
|
+
output += f"\n\nThe image files have been saved and are ready to view."
|
|
132
|
+
output += "\n\n" + "\n".join(html_image_tags) # Append all HTML <img> tags to the output
|
|
133
|
+
else:
|
|
134
|
+
output = "No images were generated."
|
|
135
|
+
|
|
136
|
+
except Exception as e:
|
|
137
|
+
import traceback
|
|
138
|
+
traceback.print_exc()
|
|
139
|
+
output = f"Error {'editing' if input_images else 'generating'} image: {str(e)}"
|
|
140
|
+
|
|
141
|
+
context['output'] = output
|
|
142
|
+
context['messages'] = output_messages
|
|
143
|
+
context['model'] = model
|
|
144
|
+
context['provider'] = provider
|