npcsh 0.1.2__py3-none-any.whl → 1.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +3508 -0
- npcsh/alicanto.py +65 -0
- npcsh/build.py +291 -0
- npcsh/completion.py +206 -0
- npcsh/config.py +163 -0
- npcsh/corca.py +50 -0
- npcsh/execution.py +185 -0
- npcsh/guac.py +46 -0
- npcsh/mcp_helpers.py +357 -0
- npcsh/mcp_server.py +299 -0
- npcsh/npc.py +323 -0
- npcsh/npc_team/alicanto.npc +2 -0
- npcsh/npc_team/alicanto.png +0 -0
- npcsh/npc_team/corca.npc +12 -0
- npcsh/npc_team/corca.png +0 -0
- npcsh/npc_team/corca_example.png +0 -0
- npcsh/npc_team/foreman.npc +7 -0
- npcsh/npc_team/frederic.npc +6 -0
- npcsh/npc_team/frederic4.png +0 -0
- npcsh/npc_team/guac.png +0 -0
- npcsh/npc_team/jinxs/code/python.jinx +11 -0
- npcsh/npc_team/jinxs/code/sh.jinx +34 -0
- npcsh/npc_team/jinxs/code/sql.jinx +16 -0
- npcsh/npc_team/jinxs/modes/alicanto.jinx +194 -0
- npcsh/npc_team/jinxs/modes/corca.jinx +249 -0
- npcsh/npc_team/jinxs/modes/guac.jinx +317 -0
- npcsh/npc_team/jinxs/modes/plonk.jinx +214 -0
- npcsh/npc_team/jinxs/modes/pti.jinx +170 -0
- npcsh/npc_team/jinxs/modes/spool.jinx +161 -0
- npcsh/npc_team/jinxs/modes/wander.jinx +186 -0
- npcsh/npc_team/jinxs/modes/yap.jinx +262 -0
- npcsh/npc_team/jinxs/npc_studio/npc-studio.jinx +77 -0
- npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
- npcsh/npc_team/jinxs/utils/chat.jinx +44 -0
- npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
- npcsh/npc_team/jinxs/utils/compress.jinx +140 -0
- npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
- npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
- npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
- npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
- npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
- npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
- npcsh/npc_team/jinxs/utils/edit_file.jinx +94 -0
- npcsh/npc_team/jinxs/utils/load_file.jinx +35 -0
- npcsh/npc_team/jinxs/utils/ots.jinx +61 -0
- npcsh/npc_team/jinxs/utils/roll.jinx +68 -0
- npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
- npcsh/npc_team/jinxs/utils/search.jinx +130 -0
- npcsh/npc_team/jinxs/utils/serve.jinx +26 -0
- npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
- npcsh/npc_team/jinxs/utils/trigger.jinx +61 -0
- npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
- npcsh/npc_team/jinxs/utils/vixynt.jinx +144 -0
- npcsh/npc_team/kadiefa.npc +3 -0
- npcsh/npc_team/kadiefa.png +0 -0
- npcsh/npc_team/npcsh.ctx +18 -0
- npcsh/npc_team/npcsh_sibiji.png +0 -0
- npcsh/npc_team/plonk.npc +2 -0
- npcsh/npc_team/plonk.png +0 -0
- npcsh/npc_team/plonkjr.npc +2 -0
- npcsh/npc_team/plonkjr.png +0 -0
- npcsh/npc_team/sibiji.npc +3 -0
- npcsh/npc_team/sibiji.png +0 -0
- npcsh/npc_team/spool.png +0 -0
- npcsh/npc_team/yap.png +0 -0
- npcsh/npcsh.py +296 -112
- npcsh/parsing.py +118 -0
- npcsh/plonk.py +54 -0
- npcsh/pti.py +54 -0
- npcsh/routes.py +139 -0
- npcsh/spool.py +48 -0
- npcsh/ui.py +199 -0
- npcsh/wander.py +62 -0
- npcsh/yap.py +50 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/agent.jinx +17 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.jinx +194 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.npc +2 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/build.jinx +65 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/chat.jinx +44 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/cmd.jinx +44 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/compile.jinx +50 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/compress.jinx +140 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.jinx +249 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.npc +12 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca_example.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/edit_file.jinx +94 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/foreman.npc +7 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/frederic.npc +6 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/frederic4.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/guac.jinx +317 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/guac.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/help.jinx +52 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/init.jinx +41 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/jinxs.jinx +32 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.npc +3 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/load_file.jinx +35 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/npc-studio.jinx +77 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/npcsh.ctx +18 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/ots.jinx +61 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.jinx +214 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.npc +2 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.npc +2 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/pti.jinx +170 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/python.jinx +11 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/roll.jinx +68 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sample.jinx +56 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/search.jinx +130 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/serve.jinx +26 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/set.jinx +40 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sh.jinx +34 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.npc +3 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sleep.jinx +116 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/spool.jinx +161 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/spool.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sql.jinx +16 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/trigger.jinx +61 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/usage.jinx +33 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/vixynt.jinx +144 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/wander.jinx +186 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/yap.jinx +262 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/yap.png +0 -0
- npcsh-1.1.13.dist-info/METADATA +522 -0
- npcsh-1.1.13.dist-info/RECORD +135 -0
- {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/WHEEL +1 -1
- npcsh-1.1.13.dist-info/entry_points.txt +9 -0
- {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info/licenses}/LICENSE +1 -1
- npcsh/command_history.py +0 -81
- npcsh/helpers.py +0 -36
- npcsh/llm_funcs.py +0 -295
- npcsh/main.py +0 -5
- npcsh/modes.py +0 -343
- npcsh/npc_compiler.py +0 -124
- npcsh-0.1.2.dist-info/METADATA +0 -99
- npcsh-0.1.2.dist-info/RECORD +0 -14
- npcsh-0.1.2.dist-info/entry_points.txt +0 -2
- {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/top_level.txt +0 -0
npcsh/npc.py
ADDED
|
@@ -0,0 +1,323 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import sys
|
|
3
|
+
import os
|
|
4
|
+
import traceback
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
from npcsh._state import (
|
|
8
|
+
NPCSH_CHAT_MODEL,
|
|
9
|
+
NPCSH_CHAT_PROVIDER,
|
|
10
|
+
NPCSH_API_URL,
|
|
11
|
+
NPCSH_DB_PATH,
|
|
12
|
+
NPCSH_STREAM_OUTPUT,
|
|
13
|
+
initial_state,
|
|
14
|
+
)
|
|
15
|
+
from npcpy.npc_sysenv import (
|
|
16
|
+
print_and_process_stream_with_markdown,
|
|
17
|
+
render_markdown,
|
|
18
|
+
)
|
|
19
|
+
from npcpy.npc_compiler import NPC, Team
|
|
20
|
+
from npcsh.routes import router
|
|
21
|
+
from npcpy.llm_funcs import check_llm_command
|
|
22
|
+
from sqlalchemy import create_engine
|
|
23
|
+
|
|
24
|
+
from npcsh._state import (
|
|
25
|
+
setup_shell,
|
|
26
|
+
execute_slash_command,
|
|
27
|
+
execute_command,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
def load_npc_by_name(npc_name: str = "sibiji", db_path: str = NPCSH_DB_PATH) -> Optional[NPC]:
|
|
31
|
+
"""Load NPC by name, with fallback logic matching npcsh"""
|
|
32
|
+
if not npc_name:
|
|
33
|
+
npc_name = "sibiji"
|
|
34
|
+
|
|
35
|
+
project_npc_path = os.path.abspath(f"./npc_team/{npc_name}.npc")
|
|
36
|
+
global_npc_path = os.path.expanduser(f"~/.npcsh/npc_team/{npc_name}.npc")
|
|
37
|
+
|
|
38
|
+
chosen_path = None
|
|
39
|
+
if os.path.exists(project_npc_path):
|
|
40
|
+
chosen_path = project_npc_path
|
|
41
|
+
elif os.path.exists(global_npc_path):
|
|
42
|
+
chosen_path = global_npc_path
|
|
43
|
+
elif os.path.exists(f"npcs/{npc_name}.npc"):
|
|
44
|
+
chosen_path = f"npcs/{npc_name}.npc"
|
|
45
|
+
|
|
46
|
+
if chosen_path:
|
|
47
|
+
try:
|
|
48
|
+
db_conn = create_engine(f'sqlite:///{NPCSH_DB_PATH}')
|
|
49
|
+
npc = NPC(file=chosen_path, db_conn=db_conn)
|
|
50
|
+
return npc
|
|
51
|
+
except Exception as e:
|
|
52
|
+
print(f"Warning: Failed to load NPC '{npc_name}' from {chosen_path}: {e}", file=sys.stderr)
|
|
53
|
+
return None
|
|
54
|
+
else:
|
|
55
|
+
print(f"Warning: NPC file for '{npc_name}' not found in project or global paths.", file=sys.stderr)
|
|
56
|
+
if npc_name != "sibiji":
|
|
57
|
+
return load_npc_by_name("sibiji", db_path)
|
|
58
|
+
return None
|
|
59
|
+
def main():
|
|
60
|
+
from npcsh.routes import router
|
|
61
|
+
|
|
62
|
+
parser = argparse.ArgumentParser(
|
|
63
|
+
description=(
|
|
64
|
+
"NPC Command Line Utilities. "
|
|
65
|
+
"Call a command or provide a prompt for the default NPC."
|
|
66
|
+
),
|
|
67
|
+
usage=(
|
|
68
|
+
"npc <command> [command_args...] | "
|
|
69
|
+
"<prompt> [--npc NAME] [--model MODEL] [--provider PROV]"
|
|
70
|
+
)
|
|
71
|
+
)
|
|
72
|
+
parser.add_argument(
|
|
73
|
+
"--model",
|
|
74
|
+
"-m",
|
|
75
|
+
help="LLM model to use (overrides NPC/defaults)",
|
|
76
|
+
type=str,
|
|
77
|
+
default=None
|
|
78
|
+
)
|
|
79
|
+
parser.add_argument(
|
|
80
|
+
"--provider",
|
|
81
|
+
"-pr",
|
|
82
|
+
help="LLM provider to use (overrides NPC/defaults)",
|
|
83
|
+
type=str,
|
|
84
|
+
default=None
|
|
85
|
+
)
|
|
86
|
+
parser.add_argument(
|
|
87
|
+
"-n",
|
|
88
|
+
"--npc",
|
|
89
|
+
help="Name of the NPC to use (default: sibiji)",
|
|
90
|
+
type=str,
|
|
91
|
+
default="sibiji"
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
args, all_args = parser.parse_known_args()
|
|
95
|
+
global_model = args.model
|
|
96
|
+
global_provider = args.provider
|
|
97
|
+
|
|
98
|
+
is_valid_command = False
|
|
99
|
+
command_name = None
|
|
100
|
+
|
|
101
|
+
if all_args:
|
|
102
|
+
first_arg = all_args[0]
|
|
103
|
+
if first_arg.startswith('/'):
|
|
104
|
+
is_valid_command = True
|
|
105
|
+
command_name = first_arg
|
|
106
|
+
all_args = all_args[1:]
|
|
107
|
+
elif first_arg in router.get_commands():
|
|
108
|
+
is_valid_command = True
|
|
109
|
+
command_name = '/' + first_arg
|
|
110
|
+
all_args = all_args[1:]
|
|
111
|
+
|
|
112
|
+
if is_valid_command:
|
|
113
|
+
subparsers = parser.add_subparsers(
|
|
114
|
+
dest="command",
|
|
115
|
+
title="Available Commands",
|
|
116
|
+
help="Run 'npc <command> --help' for command-specific help"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
for cmd_name, help_text in router.help_info.items():
|
|
120
|
+
cmd_parser = subparsers.add_parser(
|
|
121
|
+
cmd_name,
|
|
122
|
+
help=help_text,
|
|
123
|
+
add_help=False
|
|
124
|
+
)
|
|
125
|
+
cmd_parser.add_argument(
|
|
126
|
+
'command_args',
|
|
127
|
+
nargs=argparse.REMAINDER,
|
|
128
|
+
help='Arguments passed directly to the command handler'
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
args = parser.parse_args([command_name.lstrip('/')] + all_args)
|
|
132
|
+
command_args = (
|
|
133
|
+
args.command_args
|
|
134
|
+
if hasattr(args, 'command_args')
|
|
135
|
+
else []
|
|
136
|
+
)
|
|
137
|
+
unknown_args = []
|
|
138
|
+
else:
|
|
139
|
+
args.command = None
|
|
140
|
+
command_args = []
|
|
141
|
+
unknown_args = all_args
|
|
142
|
+
|
|
143
|
+
if args.model is None:
|
|
144
|
+
args.model = global_model
|
|
145
|
+
if args.provider is None:
|
|
146
|
+
args.provider = global_provider
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
command_history, team, forenpc_obj = setup_shell()
|
|
150
|
+
except Exception as e:
|
|
151
|
+
print(
|
|
152
|
+
f"Warning: Could not set up full npcsh environment: {e}",
|
|
153
|
+
file=sys.stderr
|
|
154
|
+
)
|
|
155
|
+
print("Falling back to basic NPC loading...", file=sys.stderr)
|
|
156
|
+
team = None
|
|
157
|
+
forenpc_obj = load_npc_by_name(args.npc, NPCSH_DB_PATH)
|
|
158
|
+
|
|
159
|
+
npc_instance = None
|
|
160
|
+
if team and args.npc in team.npcs:
|
|
161
|
+
npc_instance = team.npcs[args.npc]
|
|
162
|
+
elif (
|
|
163
|
+
team
|
|
164
|
+
and args.npc == team.forenpc.name
|
|
165
|
+
if team.forenpc
|
|
166
|
+
else False
|
|
167
|
+
):
|
|
168
|
+
npc_instance = team.forenpc
|
|
169
|
+
else:
|
|
170
|
+
npc_instance = load_npc_by_name(args.npc, NPCSH_DB_PATH)
|
|
171
|
+
|
|
172
|
+
if not npc_instance:
|
|
173
|
+
print(f"Error: Could not load NPC '{args.npc}'", file=sys.stderr)
|
|
174
|
+
sys.exit(1)
|
|
175
|
+
|
|
176
|
+
if not is_valid_command and all_args:
|
|
177
|
+
first_arg = all_args[0]
|
|
178
|
+
|
|
179
|
+
jinx_found = False
|
|
180
|
+
if team and first_arg in team.jinxs_dict:
|
|
181
|
+
jinx_found = True
|
|
182
|
+
elif (
|
|
183
|
+
isinstance(npc_instance, NPC)
|
|
184
|
+
and hasattr(npc_instance, 'jinxs_dict')
|
|
185
|
+
and first_arg in npc_instance.jinxs_dict
|
|
186
|
+
):
|
|
187
|
+
jinx_found = True
|
|
188
|
+
|
|
189
|
+
if jinx_found:
|
|
190
|
+
is_valid_command = True
|
|
191
|
+
command_name = '/' + first_arg
|
|
192
|
+
all_args = all_args[1:]
|
|
193
|
+
unknown_args = all_args
|
|
194
|
+
|
|
195
|
+
shell_state = initial_state
|
|
196
|
+
shell_state.npc = npc_instance
|
|
197
|
+
shell_state.team = team
|
|
198
|
+
shell_state.current_path = os.getcwd()
|
|
199
|
+
shell_state.stream_output = NPCSH_STREAM_OUTPUT
|
|
200
|
+
|
|
201
|
+
effective_model = (
|
|
202
|
+
args.model
|
|
203
|
+
or (
|
|
204
|
+
npc_instance.model
|
|
205
|
+
if npc_instance.model
|
|
206
|
+
else NPCSH_CHAT_MODEL
|
|
207
|
+
)
|
|
208
|
+
)
|
|
209
|
+
effective_provider = (
|
|
210
|
+
args.provider
|
|
211
|
+
or (
|
|
212
|
+
npc_instance.provider
|
|
213
|
+
if npc_instance.provider
|
|
214
|
+
else NPCSH_CHAT_PROVIDER
|
|
215
|
+
)
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
if args.model:
|
|
219
|
+
npc_instance.model = effective_model
|
|
220
|
+
if args.provider:
|
|
221
|
+
npc_instance.provider = effective_provider
|
|
222
|
+
|
|
223
|
+
try:
|
|
224
|
+
if is_valid_command:
|
|
225
|
+
full_command_str = command_name
|
|
226
|
+
if command_args:
|
|
227
|
+
full_command_str += " " + " ".join(command_args)
|
|
228
|
+
|
|
229
|
+
print(f"Executing command: {full_command_str}")
|
|
230
|
+
|
|
231
|
+
updated_state, result = execute_slash_command(
|
|
232
|
+
full_command_str,
|
|
233
|
+
stdin_input=None,
|
|
234
|
+
state=shell_state,
|
|
235
|
+
stream=NPCSH_STREAM_OUTPUT,
|
|
236
|
+
router=router
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
if isinstance(result, dict):
|
|
240
|
+
output = result.get("output") or result.get("response")
|
|
241
|
+
model_for_stream = result.get('model', effective_model)
|
|
242
|
+
provider_for_stream = result.get(
|
|
243
|
+
'provider',
|
|
244
|
+
effective_provider
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
if (
|
|
248
|
+
NPCSH_STREAM_OUTPUT
|
|
249
|
+
and not isinstance(output, str)
|
|
250
|
+
):
|
|
251
|
+
print_and_process_stream_with_markdown(
|
|
252
|
+
output,
|
|
253
|
+
model_for_stream,
|
|
254
|
+
provider_for_stream
|
|
255
|
+
)
|
|
256
|
+
elif output is not None:
|
|
257
|
+
render_markdown(str(output))
|
|
258
|
+
elif result is not None:
|
|
259
|
+
render_markdown(str(result))
|
|
260
|
+
else:
|
|
261
|
+
print(f"Command '{command_name}' executed.")
|
|
262
|
+
|
|
263
|
+
else:
|
|
264
|
+
prompt = " ".join(unknown_args)
|
|
265
|
+
|
|
266
|
+
if not prompt:
|
|
267
|
+
parser.print_help()
|
|
268
|
+
sys.exit(1)
|
|
269
|
+
|
|
270
|
+
print(
|
|
271
|
+
f"Processing prompt: '{prompt}' with NPC: '{args.npc}'..."
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
shell_state.current_mode = 'chat'
|
|
275
|
+
updated_state, result = execute_command(
|
|
276
|
+
prompt,
|
|
277
|
+
shell_state,
|
|
278
|
+
router=router,
|
|
279
|
+
command_history=command_history
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
if isinstance(result, dict):
|
|
283
|
+
output = result.get("output")
|
|
284
|
+
model_for_stream = result.get('model', effective_model)
|
|
285
|
+
provider_for_stream = result.get(
|
|
286
|
+
'provider',
|
|
287
|
+
effective_provider
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
if (
|
|
291
|
+
hasattr(output, '__iter__')
|
|
292
|
+
and not isinstance(output, (str, bytes, dict, list))
|
|
293
|
+
):
|
|
294
|
+
final_output = print_and_process_stream_with_markdown(
|
|
295
|
+
output,
|
|
296
|
+
model_for_stream,
|
|
297
|
+
provider_for_stream,
|
|
298
|
+
show=True
|
|
299
|
+
)
|
|
300
|
+
elif output is not None:
|
|
301
|
+
render_markdown(str(output))
|
|
302
|
+
elif (
|
|
303
|
+
hasattr(result, '__iter__')
|
|
304
|
+
and not isinstance(result, (str, bytes, dict, list))
|
|
305
|
+
):
|
|
306
|
+
final_output = print_and_process_stream_with_markdown(
|
|
307
|
+
result,
|
|
308
|
+
effective_model,
|
|
309
|
+
effective_provider,
|
|
310
|
+
show=True
|
|
311
|
+
)
|
|
312
|
+
elif result is not None:
|
|
313
|
+
render_markdown(str(result))
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
except Exception as e:
|
|
317
|
+
print(f"Error executing command: {e}", file=sys.stderr)
|
|
318
|
+
traceback.print_exc()
|
|
319
|
+
sys.exit(1)
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
if __name__ == "__main__":
|
|
323
|
+
main()
|
|
Binary file
|
npcsh/npc_team/corca.npc
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
name: corca
|
|
2
|
+
primary_directive: |
|
|
3
|
+
You are corca, a distinguished member of the NPC team.
|
|
4
|
+
Your expertise is in the area of software development and
|
|
5
|
+
you have a knack for thinking through problems carefully.
|
|
6
|
+
You favor solutions that prioritize simplicity and clarity and
|
|
7
|
+
ought to always consider how some suggestion may increase rather than reduce tech debt
|
|
8
|
+
unnecessarily. Now, the key is in this last term, "unnecessarily".
|
|
9
|
+
You must distinguish carefully and when in doubt, opt to ask for further
|
|
10
|
+
information or clarification with concrete clear options that make it
|
|
11
|
+
easy for a user to choose.
|
|
12
|
+
|
npcsh/npc_team/corca.png
ADDED
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
name: foreman
|
|
2
|
+
primary_directive: You are the foreman of an NPC team. It is your duty
|
|
3
|
+
to delegate tasks to your team members or to other specialized teams
|
|
4
|
+
in order to complete the project. You are responsible for the
|
|
5
|
+
completion of the project and the safety of your team members.
|
|
6
|
+
model: gpt-4o-mini
|
|
7
|
+
provider: openai
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
name: frederic
|
|
2
|
+
primary_directive: |
|
|
3
|
+
You are frederic the polar bear. Your job is help users think through problems and
|
|
4
|
+
to provide straightforward ways forward on problems. Cut through the ice
|
|
5
|
+
to get to what matters and keep things simple. You are to respond in a
|
|
6
|
+
witty tone like richard feynman but with the romantic tambor of Frederic Chopin.
|
|
Binary file
|
npcsh/npc_team/guac.png
ADDED
|
Binary file
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
jinx_name: python
|
|
2
|
+
description: Execute scripts with python. You must set the ultimate result as the "output"
|
|
3
|
+
variable. It MUST be a string.
|
|
4
|
+
Do not add unnecessary print statements.
|
|
5
|
+
This jinx is intended for executing code snippets that are not
|
|
6
|
+
accomplished by other jinxes. Use it only when the others are insufficient.
|
|
7
|
+
inputs:
|
|
8
|
+
- code
|
|
9
|
+
steps:
|
|
10
|
+
- code: '{{code}}'
|
|
11
|
+
engine: python
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
jinx_name: sh
|
|
2
|
+
description: Execute bash queries. Should be used to grep for file contents, list directories, explore information to answer user questions more practically.
|
|
3
|
+
inputs:
|
|
4
|
+
- bash_command
|
|
5
|
+
steps:
|
|
6
|
+
- name: execute_bash
|
|
7
|
+
engine: python
|
|
8
|
+
code: |
|
|
9
|
+
import subprocess
|
|
10
|
+
import os
|
|
11
|
+
|
|
12
|
+
cmd = '{{ bash_command }}'
|
|
13
|
+
output = ""
|
|
14
|
+
|
|
15
|
+
process = subprocess.Popen(
|
|
16
|
+
cmd,
|
|
17
|
+
shell=True,
|
|
18
|
+
stdout=subprocess.PIPE,
|
|
19
|
+
stderr=subprocess.PIPE
|
|
20
|
+
)
|
|
21
|
+
stdout, stderr = process.communicate()
|
|
22
|
+
|
|
23
|
+
# Only show debug output if NPCSH_DEBUG is set
|
|
24
|
+
if os.environ.get("NPCSH_DEBUG") == "1":
|
|
25
|
+
import sys
|
|
26
|
+
print(f"[sh] cmd: {cmd}", file=sys.stderr)
|
|
27
|
+
print(f"[sh] stdout: {stdout.decode('utf-8', errors='ignore')[:200]}", file=sys.stderr)
|
|
28
|
+
|
|
29
|
+
if stderr:
|
|
30
|
+
output = f"Error: {stderr.decode('utf-8')}"
|
|
31
|
+
else:
|
|
32
|
+
output = stdout.decode('utf-8')
|
|
33
|
+
|
|
34
|
+
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
jinx_name: sql
|
|
2
|
+
description: Execute queries on the ~/npcsh_history.db to pull data. The database
|
|
3
|
+
contains only information about conversations and other user-provided data. It does
|
|
4
|
+
not store any information about individual files. Avoid using percent signs unless absolutely necessary.
|
|
5
|
+
inputs:
|
|
6
|
+
- sql_query
|
|
7
|
+
steps:
|
|
8
|
+
- engine: python
|
|
9
|
+
code: |
|
|
10
|
+
import pandas as pd
|
|
11
|
+
query = "{{ sql_query }}"
|
|
12
|
+
try:
|
|
13
|
+
df = pd.read_sql_query(query, npc.db_conn)
|
|
14
|
+
except Exception as e:
|
|
15
|
+
df = pd.DataFrame({'Error': [str(e)]})
|
|
16
|
+
output = df.to_string()
|
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
jinx_name: alicanto
|
|
2
|
+
description: Deep research mode - multi-perspective exploration with gold insights and cliff warnings
|
|
3
|
+
npc: forenpc
|
|
4
|
+
inputs:
|
|
5
|
+
- query: null
|
|
6
|
+
- num_npcs: 5
|
|
7
|
+
- depth: 3
|
|
8
|
+
- model: null
|
|
9
|
+
- provider: null
|
|
10
|
+
- max_steps: 20
|
|
11
|
+
- skip_research: true
|
|
12
|
+
- exploration: 0.3
|
|
13
|
+
- creativity: 0.5
|
|
14
|
+
- format: report
|
|
15
|
+
|
|
16
|
+
steps:
|
|
17
|
+
- name: alicanto_research
|
|
18
|
+
engine: python
|
|
19
|
+
code: |
|
|
20
|
+
import os
|
|
21
|
+
from termcolor import colored
|
|
22
|
+
|
|
23
|
+
from npcpy.llm_funcs import get_llm_response
|
|
24
|
+
from npcpy.data.web import search_web
|
|
25
|
+
from npcpy.npc_compiler import NPC
|
|
26
|
+
|
|
27
|
+
npc = context.get('npc')
|
|
28
|
+
team = context.get('team')
|
|
29
|
+
messages = context.get('messages', [])
|
|
30
|
+
|
|
31
|
+
query = context.get('query')
|
|
32
|
+
num_npcs = int(context.get('num_npcs', 5))
|
|
33
|
+
depth = int(context.get('depth', 3))
|
|
34
|
+
max_steps = int(context.get('max_steps', 20))
|
|
35
|
+
skip_research = context.get('skip_research', True)
|
|
36
|
+
exploration = float(context.get('exploration', 0.3))
|
|
37
|
+
creativity = float(context.get('creativity', 0.5))
|
|
38
|
+
output_format = context.get('format', 'report')
|
|
39
|
+
|
|
40
|
+
model = context.get('model') or (npc.model if npc else 'gemini-1.5-pro')
|
|
41
|
+
provider = context.get('provider') or (npc.provider if npc else 'gemini')
|
|
42
|
+
|
|
43
|
+
if not query:
|
|
44
|
+
context['output'] = """Usage: /alicanto <research query>
|
|
45
|
+
|
|
46
|
+
Options:
|
|
47
|
+
--num-npcs N Number of research perspectives (default: 5)
|
|
48
|
+
--depth N Research depth (default: 3)
|
|
49
|
+
--max-steps N Maximum research steps (default: 20)
|
|
50
|
+
--exploration F Exploration factor 0-1 (default: 0.3)
|
|
51
|
+
--creativity F Creativity factor 0-1 (default: 0.5)
|
|
52
|
+
--format FORMAT Output: report|summary|full (default: report)
|
|
53
|
+
|
|
54
|
+
Example: /alicanto What are the latest advances in quantum computing?"""
|
|
55
|
+
context['messages'] = messages
|
|
56
|
+
exit()
|
|
57
|
+
|
|
58
|
+
print(f"""
|
|
59
|
+
█████╗ ██╗ ██╗ ██████╗ █████╗ ███╗ ██╗████████╗ ██████╗
|
|
60
|
+
██╔══██╗██║ ██║██╔════╝██╔══██╗████╗ ██║╚══██╔══╝██╔═══██╗
|
|
61
|
+
███████║██║ ██║██║ ███████║██╔██╗ ██║ ██║ ██║ ██║
|
|
62
|
+
██╔══██║██║ ██║██║ ██╔══██║██║╚██╗██║ ██║ ██║ ██║
|
|
63
|
+
██║ ██║███████╗██║╚██████╗██║ ██║██║ ╚████║ ██║ ╚██████╔╝
|
|
64
|
+
╚═╝ ╚═╝╚══════╝╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═════╝
|
|
65
|
+
|
|
66
|
+
Deep Research Mode
|
|
67
|
+
Query: {query}
|
|
68
|
+
Perspectives: {num_npcs} | Depth: {depth} | Max Steps: {max_steps}
|
|
69
|
+
""")
|
|
70
|
+
|
|
71
|
+
# Generate research perspectives
|
|
72
|
+
perspectives_prompt = f"""Generate {num_npcs} distinct research perspectives for investigating: "{query}"
|
|
73
|
+
|
|
74
|
+
For each perspective, provide:
|
|
75
|
+
1. Name (a descriptive title)
|
|
76
|
+
2. Approach (how this perspective would investigate)
|
|
77
|
+
3. Key questions to explore
|
|
78
|
+
|
|
79
|
+
Return as a numbered list."""
|
|
80
|
+
|
|
81
|
+
print(colored("Generating research perspectives...", "cyan"))
|
|
82
|
+
resp = get_llm_response(
|
|
83
|
+
perspectives_prompt,
|
|
84
|
+
model=model,
|
|
85
|
+
provider=provider,
|
|
86
|
+
npc=npc
|
|
87
|
+
)
|
|
88
|
+
perspectives = str(resp.get('response', ''))
|
|
89
|
+
print(perspectives)
|
|
90
|
+
|
|
91
|
+
# Conduct web research if not skipped
|
|
92
|
+
research_findings = ""
|
|
93
|
+
if not skip_research:
|
|
94
|
+
print(colored("\nConducting web research...", "cyan"))
|
|
95
|
+
try:
|
|
96
|
+
search_results = search_web(query, n_results=5)
|
|
97
|
+
if search_results:
|
|
98
|
+
research_findings = "\n\nWeb Research Findings:\n"
|
|
99
|
+
for i, result in enumerate(search_results[:5], 1):
|
|
100
|
+
title = result.get('title', 'No title')
|
|
101
|
+
snippet = result.get('snippet', result.get('body', ''))[:200]
|
|
102
|
+
research_findings += f"\n{i}. {title}\n {snippet}...\n"
|
|
103
|
+
print(colored(f"Found {len(search_results)} sources", "green"))
|
|
104
|
+
except Exception as e:
|
|
105
|
+
print(colored(f"Web search error: {e}", "yellow"))
|
|
106
|
+
|
|
107
|
+
# Multi-step exploration from each perspective
|
|
108
|
+
all_insights = []
|
|
109
|
+
gold_insights = [] # Key valuable findings
|
|
110
|
+
cliff_warnings = [] # Potential pitfalls or caveats
|
|
111
|
+
|
|
112
|
+
for step in range(min(depth, max_steps)):
|
|
113
|
+
print(colored(f"\n--- Research Depth {step + 1}/{depth} ---", "cyan"))
|
|
114
|
+
|
|
115
|
+
explore_prompt = f"""Research query: "{query}"
|
|
116
|
+
|
|
117
|
+
Perspectives generated:
|
|
118
|
+
{perspectives}
|
|
119
|
+
|
|
120
|
+
{research_findings}
|
|
121
|
+
|
|
122
|
+
Previous insights: {all_insights[-3:] if all_insights else 'None yet'}
|
|
123
|
+
|
|
124
|
+
For depth level {step + 1}:
|
|
125
|
+
1. Explore deeper implications from each perspective
|
|
126
|
+
2. Identify GOLD insights (valuable, non-obvious findings) - mark with [GOLD]
|
|
127
|
+
3. Identify CLIFF warnings (pitfalls, caveats, risks) - mark with [CLIFF]
|
|
128
|
+
4. Connect insights across perspectives
|
|
129
|
+
|
|
130
|
+
Exploration factor: {exploration} (higher = more diverse exploration)
|
|
131
|
+
Creativity factor: {creativity} (higher = more novel connections)"""
|
|
132
|
+
|
|
133
|
+
resp = get_llm_response(
|
|
134
|
+
explore_prompt,
|
|
135
|
+
model=model,
|
|
136
|
+
provider=provider,
|
|
137
|
+
temperature=creativity,
|
|
138
|
+
npc=npc
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
step_insights = str(resp.get('response', ''))
|
|
142
|
+
print(step_insights)
|
|
143
|
+
|
|
144
|
+
# Extract gold and cliff markers
|
|
145
|
+
if '[GOLD]' in step_insights:
|
|
146
|
+
gold_insights.extend([line.strip() for line in step_insights.split('\n') if '[GOLD]' in line])
|
|
147
|
+
if '[CLIFF]' in step_insights:
|
|
148
|
+
cliff_warnings.extend([line.strip() for line in step_insights.split('\n') if '[CLIFF]' in line])
|
|
149
|
+
|
|
150
|
+
all_insights.append(step_insights)
|
|
151
|
+
|
|
152
|
+
# Generate final synthesis
|
|
153
|
+
print(colored("\n--- Synthesizing Research ---", "cyan"))
|
|
154
|
+
|
|
155
|
+
synthesis_prompt = f"""Synthesize research on: "{query}"
|
|
156
|
+
|
|
157
|
+
All insights gathered:
|
|
158
|
+
{chr(10).join(all_insights)}
|
|
159
|
+
|
|
160
|
+
Gold insights identified:
|
|
161
|
+
{chr(10).join(gold_insights) if gold_insights else 'None explicitly marked'}
|
|
162
|
+
|
|
163
|
+
Cliff warnings identified:
|
|
164
|
+
{chr(10).join(cliff_warnings) if cliff_warnings else 'None explicitly marked'}
|
|
165
|
+
|
|
166
|
+
Generate a {output_format} that:
|
|
167
|
+
1. Summarizes key findings
|
|
168
|
+
2. Highlights the most valuable insights (gold)
|
|
169
|
+
3. Notes important caveats and risks (cliffs)
|
|
170
|
+
4. Provides actionable conclusions"""
|
|
171
|
+
|
|
172
|
+
resp = get_llm_response(
|
|
173
|
+
synthesis_prompt,
|
|
174
|
+
model=model,
|
|
175
|
+
provider=provider,
|
|
176
|
+
npc=npc
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
final_report = str(resp.get('response', ''))
|
|
180
|
+
print("\n" + "="*60)
|
|
181
|
+
print(colored("ALICANTO RESEARCH REPORT", "green", attrs=['bold']))
|
|
182
|
+
print("="*60)
|
|
183
|
+
print(final_report)
|
|
184
|
+
|
|
185
|
+
context['output'] = final_report
|
|
186
|
+
context['messages'] = messages
|
|
187
|
+
context['alicanto_result'] = {
|
|
188
|
+
'query': query,
|
|
189
|
+
'perspectives': perspectives,
|
|
190
|
+
'insights': all_insights,
|
|
191
|
+
'gold': gold_insights,
|
|
192
|
+
'cliffs': cliff_warnings,
|
|
193
|
+
'report': final_report
|
|
194
|
+
}
|