npcsh 1.1.2__py3-none-any.whl → 1.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +1 -30
- npcsh/alicanto.py +10 -5
- npcsh/build.py +291 -0
- npcsh/corca.py +263 -154
- npcsh/npc.py +127 -46
- npcsh/npcsh.py +1 -1
- npcsh/routes.py +229 -21
- {npcsh-1.1.2.dist-info → npcsh-1.1.4.dist-info}/METADATA +10 -1
- {npcsh-1.1.2.dist-info → npcsh-1.1.4.dist-info}/RECORD +41 -40
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/bash_executer.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/image_generation.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/internet_search.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/kg_search.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/memory_search.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/python_executor.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/screen_cap.jinx +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.2.data → npcsh-1.1.4.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.2.dist-info → npcsh-1.1.4.dist-info}/WHEEL +0 -0
- {npcsh-1.1.2.dist-info → npcsh-1.1.4.dist-info}/entry_points.txt +0 -0
- {npcsh-1.1.2.dist-info → npcsh-1.1.4.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.2.dist-info → npcsh-1.1.4.dist-info}/top_level.txt +0 -0
npcsh/npc.py
CHANGED
|
@@ -56,25 +56,41 @@ def load_npc_by_name(npc_name: str = "sibiji", db_path: str = NPCSH_DB_PATH) ->
|
|
|
56
56
|
if npc_name != "sibiji":
|
|
57
57
|
return load_npc_by_name("sibiji", db_path)
|
|
58
58
|
return None
|
|
59
|
-
|
|
60
59
|
def main():
|
|
61
60
|
from npcsh.routes import router
|
|
62
61
|
|
|
63
62
|
parser = argparse.ArgumentParser(
|
|
64
|
-
description=
|
|
65
|
-
|
|
63
|
+
description=(
|
|
64
|
+
"NPC Command Line Utilities. "
|
|
65
|
+
"Call a command or provide a prompt for the default NPC."
|
|
66
|
+
),
|
|
67
|
+
usage=(
|
|
68
|
+
"npc <command> [command_args...] | "
|
|
69
|
+
"<prompt> [--npc NAME] [--model MODEL] [--provider PROV]"
|
|
70
|
+
)
|
|
66
71
|
)
|
|
67
72
|
parser.add_argument(
|
|
68
|
-
"--model",
|
|
73
|
+
"--model",
|
|
74
|
+
"-m",
|
|
75
|
+
help="LLM model to use (overrides NPC/defaults)",
|
|
76
|
+
type=str,
|
|
77
|
+
default=None
|
|
69
78
|
)
|
|
70
79
|
parser.add_argument(
|
|
71
|
-
"--provider",
|
|
80
|
+
"--provider",
|
|
81
|
+
"-pr",
|
|
82
|
+
help="LLM provider to use (overrides NPC/defaults)",
|
|
83
|
+
type=str,
|
|
84
|
+
default=None
|
|
72
85
|
)
|
|
73
86
|
parser.add_argument(
|
|
74
|
-
"-n",
|
|
87
|
+
"-n",
|
|
88
|
+
"--npc",
|
|
89
|
+
help="Name of the NPC to use (default: sibiji)",
|
|
90
|
+
type=str,
|
|
91
|
+
default="sibiji"
|
|
75
92
|
)
|
|
76
93
|
|
|
77
|
-
|
|
78
94
|
args, all_args = parser.parse_known_args()
|
|
79
95
|
global_model = args.model
|
|
80
96
|
global_provider = args.provider
|
|
@@ -93,23 +109,33 @@ def main():
|
|
|
93
109
|
command_name = '/' + first_arg
|
|
94
110
|
all_args = all_args[1:]
|
|
95
111
|
|
|
96
|
-
|
|
97
|
-
|
|
98
112
|
if is_valid_command:
|
|
99
|
-
subparsers = parser.add_subparsers(
|
|
100
|
-
|
|
113
|
+
subparsers = parser.add_subparsers(
|
|
114
|
+
dest="command",
|
|
115
|
+
title="Available Commands",
|
|
116
|
+
help="Run 'npc <command> --help' for command-specific help"
|
|
117
|
+
)
|
|
101
118
|
|
|
102
119
|
for cmd_name, help_text in router.help_info.items():
|
|
103
|
-
cmd_parser = subparsers.add_parser(
|
|
104
|
-
|
|
105
|
-
|
|
120
|
+
cmd_parser = subparsers.add_parser(
|
|
121
|
+
cmd_name,
|
|
122
|
+
help=help_text,
|
|
123
|
+
add_help=False
|
|
124
|
+
)
|
|
125
|
+
cmd_parser.add_argument(
|
|
126
|
+
'command_args',
|
|
127
|
+
nargs=argparse.REMAINDER,
|
|
128
|
+
help='Arguments passed directly to the command handler'
|
|
129
|
+
)
|
|
106
130
|
|
|
107
|
-
|
|
108
131
|
args = parser.parse_args([command_name.lstrip('/')] + all_args)
|
|
109
|
-
command_args =
|
|
132
|
+
command_args = (
|
|
133
|
+
args.command_args
|
|
134
|
+
if hasattr(args, 'command_args')
|
|
135
|
+
else []
|
|
136
|
+
)
|
|
110
137
|
unknown_args = []
|
|
111
138
|
else:
|
|
112
|
-
|
|
113
139
|
args.command = None
|
|
114
140
|
command_args = []
|
|
115
141
|
unknown_args = all_args
|
|
@@ -119,20 +145,26 @@ def main():
|
|
|
119
145
|
if args.provider is None:
|
|
120
146
|
args.provider = global_provider
|
|
121
147
|
|
|
122
|
-
|
|
123
148
|
try:
|
|
124
149
|
command_history, team, forenpc_obj = setup_shell()
|
|
125
150
|
except Exception as e:
|
|
126
|
-
print(
|
|
151
|
+
print(
|
|
152
|
+
f"Warning: Could not set up full npcsh environment: {e}",
|
|
153
|
+
file=sys.stderr
|
|
154
|
+
)
|
|
127
155
|
print("Falling back to basic NPC loading...", file=sys.stderr)
|
|
128
156
|
team = None
|
|
129
157
|
forenpc_obj = load_npc_by_name(args.npc, NPCSH_DB_PATH)
|
|
130
158
|
|
|
131
|
-
|
|
132
159
|
npc_instance = None
|
|
133
160
|
if team and args.npc in team.npcs:
|
|
134
161
|
npc_instance = team.npcs[args.npc]
|
|
135
|
-
elif
|
|
162
|
+
elif (
|
|
163
|
+
team
|
|
164
|
+
and args.npc == team.forenpc.name
|
|
165
|
+
if team.forenpc
|
|
166
|
+
else False
|
|
167
|
+
):
|
|
136
168
|
npc_instance = team.forenpc
|
|
137
169
|
else:
|
|
138
170
|
npc_instance = load_npc_by_name(args.npc, NPCSH_DB_PATH)
|
|
@@ -141,41 +173,55 @@ def main():
|
|
|
141
173
|
print(f"Error: Could not load NPC '{args.npc}'", file=sys.stderr)
|
|
142
174
|
sys.exit(1)
|
|
143
175
|
|
|
144
|
-
|
|
145
176
|
if not is_valid_command and all_args:
|
|
146
177
|
first_arg = all_args[0]
|
|
147
178
|
|
|
148
|
-
|
|
149
179
|
jinx_found = False
|
|
150
180
|
if team and first_arg in team.jinxs_dict:
|
|
151
181
|
jinx_found = True
|
|
152
|
-
elif
|
|
182
|
+
elif (
|
|
183
|
+
isinstance(npc_instance, NPC)
|
|
184
|
+
and hasattr(npc_instance, 'jinxs_dict')
|
|
185
|
+
and first_arg in npc_instance.jinxs_dict
|
|
186
|
+
):
|
|
153
187
|
jinx_found = True
|
|
154
188
|
|
|
155
189
|
if jinx_found:
|
|
156
190
|
is_valid_command = True
|
|
157
191
|
command_name = '/' + first_arg
|
|
158
192
|
all_args = all_args[1:]
|
|
193
|
+
unknown_args = all_args
|
|
159
194
|
|
|
160
|
-
|
|
161
195
|
shell_state = initial_state
|
|
162
196
|
shell_state.npc = npc_instance
|
|
163
197
|
shell_state.team = team
|
|
164
198
|
shell_state.current_path = os.getcwd()
|
|
165
199
|
shell_state.stream_output = NPCSH_STREAM_OUTPUT
|
|
166
200
|
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
201
|
+
effective_model = (
|
|
202
|
+
args.model
|
|
203
|
+
or (
|
|
204
|
+
npc_instance.model
|
|
205
|
+
if npc_instance.model
|
|
206
|
+
else NPCSH_CHAT_MODEL
|
|
207
|
+
)
|
|
208
|
+
)
|
|
209
|
+
effective_provider = (
|
|
210
|
+
args.provider
|
|
211
|
+
or (
|
|
212
|
+
npc_instance.provider
|
|
213
|
+
if npc_instance.provider
|
|
214
|
+
else NPCSH_CHAT_PROVIDER
|
|
215
|
+
)
|
|
216
|
+
)
|
|
170
217
|
|
|
171
|
-
|
|
172
218
|
if args.model:
|
|
173
219
|
npc_instance.model = effective_model
|
|
174
220
|
if args.provider:
|
|
175
221
|
npc_instance.provider = effective_provider
|
|
222
|
+
|
|
176
223
|
try:
|
|
177
224
|
if is_valid_command:
|
|
178
|
-
|
|
179
225
|
full_command_str = command_name
|
|
180
226
|
if command_args:
|
|
181
227
|
full_command_str += " " + " ".join(command_args)
|
|
@@ -187,56 +233,91 @@ def main():
|
|
|
187
233
|
stdin_input=None,
|
|
188
234
|
state=shell_state,
|
|
189
235
|
stream=NPCSH_STREAM_OUTPUT,
|
|
190
|
-
router
|
|
236
|
+
router=router
|
|
191
237
|
)
|
|
192
238
|
|
|
193
|
-
|
|
194
239
|
if isinstance(result, dict):
|
|
195
240
|
output = result.get("output") or result.get("response")
|
|
196
241
|
model_for_stream = result.get('model', effective_model)
|
|
197
|
-
provider_for_stream = result.get(
|
|
242
|
+
provider_for_stream = result.get(
|
|
243
|
+
'provider',
|
|
244
|
+
effective_provider
|
|
245
|
+
)
|
|
198
246
|
|
|
199
|
-
if
|
|
200
|
-
|
|
247
|
+
if (
|
|
248
|
+
NPCSH_STREAM_OUTPUT
|
|
249
|
+
and not isinstance(output, str)
|
|
250
|
+
):
|
|
251
|
+
print_and_process_stream_with_markdown(
|
|
252
|
+
output,
|
|
253
|
+
model_for_stream,
|
|
254
|
+
provider_for_stream
|
|
255
|
+
)
|
|
201
256
|
elif output is not None:
|
|
202
|
-
|
|
257
|
+
render_markdown(str(output))
|
|
203
258
|
elif result is not None:
|
|
204
259
|
render_markdown(str(result))
|
|
205
260
|
else:
|
|
206
261
|
print(f"Command '{command_name}' executed.")
|
|
207
262
|
|
|
208
263
|
else:
|
|
209
|
-
|
|
210
264
|
prompt = " ".join(unknown_args)
|
|
211
265
|
|
|
212
266
|
if not prompt:
|
|
213
|
-
|
|
214
267
|
parser.print_help()
|
|
215
268
|
sys.exit(1)
|
|
216
269
|
|
|
217
|
-
print(
|
|
270
|
+
print(
|
|
271
|
+
f"Processing prompt: '{prompt}' with NPC: '{args.npc}'..."
|
|
272
|
+
)
|
|
218
273
|
|
|
219
|
-
|
|
220
274
|
shell_state.current_mode = 'chat'
|
|
221
|
-
updated_state, result = execute_command(
|
|
275
|
+
updated_state, result = execute_command(
|
|
276
|
+
prompt,
|
|
277
|
+
shell_state,
|
|
278
|
+
router=router,
|
|
279
|
+
command_history=command_history
|
|
280
|
+
)
|
|
222
281
|
|
|
223
|
-
|
|
224
282
|
if isinstance(result, dict):
|
|
225
283
|
output = result.get("output")
|
|
226
284
|
model_for_stream = result.get('model', effective_model)
|
|
227
|
-
provider_for_stream = result.get(
|
|
285
|
+
provider_for_stream = result.get(
|
|
286
|
+
'provider',
|
|
287
|
+
effective_provider
|
|
288
|
+
)
|
|
228
289
|
|
|
229
|
-
if
|
|
230
|
-
|
|
290
|
+
if (
|
|
291
|
+
hasattr(output, '__iter__')
|
|
292
|
+
and not isinstance(output, (str, bytes, dict, list))
|
|
293
|
+
):
|
|
294
|
+
final_output = print_and_process_stream_with_markdown(
|
|
295
|
+
output,
|
|
296
|
+
model_for_stream,
|
|
297
|
+
provider_for_stream,
|
|
298
|
+
show=True
|
|
299
|
+
)
|
|
231
300
|
elif output is not None:
|
|
232
301
|
render_markdown(str(output))
|
|
302
|
+
elif (
|
|
303
|
+
hasattr(result, '__iter__')
|
|
304
|
+
and not isinstance(result, (str, bytes, dict, list))
|
|
305
|
+
):
|
|
306
|
+
final_output = print_and_process_stream_with_markdown(
|
|
307
|
+
result,
|
|
308
|
+
effective_model,
|
|
309
|
+
effective_provider,
|
|
310
|
+
show=True
|
|
311
|
+
)
|
|
233
312
|
elif result is not None:
|
|
234
|
-
|
|
313
|
+
render_markdown(str(result))
|
|
314
|
+
|
|
235
315
|
|
|
236
316
|
except Exception as e:
|
|
237
317
|
print(f"Error executing command: {e}", file=sys.stderr)
|
|
238
318
|
traceback.print_exc()
|
|
239
319
|
sys.exit(1)
|
|
240
320
|
|
|
321
|
+
|
|
241
322
|
if __name__ == "__main__":
|
|
242
323
|
main()
|
npcsh/npcsh.py
CHANGED
npcsh/routes.py
CHANGED
|
@@ -56,7 +56,9 @@ from npcsh._state import (
|
|
|
56
56
|
NPCSH_SEARCH_PROVIDER,
|
|
57
57
|
CANONICAL_ARGS,
|
|
58
58
|
normalize_and_expand_flags,
|
|
59
|
-
get_argument_help
|
|
59
|
+
get_argument_help,
|
|
60
|
+
get_relevant_memories
|
|
61
|
+
|
|
60
62
|
)
|
|
61
63
|
from npcsh.corca import enter_corca_mode
|
|
62
64
|
from npcsh.guac import enter_guac_mode
|
|
@@ -190,6 +192,41 @@ def get_help_text():
|
|
|
190
192
|
def safe_get(kwargs, key, default=None):
|
|
191
193
|
return kwargs.get(key, default)
|
|
192
194
|
|
|
195
|
+
|
|
196
|
+
@router.route("build", "Build deployment artifacts for NPC team")
|
|
197
|
+
def build_handler(command: str, **kwargs):
|
|
198
|
+
parts = shlex.split(command)
|
|
199
|
+
|
|
200
|
+
target = safe_get(kwargs, 'target', 'flask')
|
|
201
|
+
output_dir = safe_get(kwargs, 'output', './build')
|
|
202
|
+
team_path = safe_get(kwargs, 'team', './npc_team')
|
|
203
|
+
|
|
204
|
+
if len(parts) > 1:
|
|
205
|
+
target = parts[1]
|
|
206
|
+
|
|
207
|
+
build_config = {
|
|
208
|
+
'team_path': os.path.abspath(team_path),
|
|
209
|
+
'output_dir': os.path.abspath(output_dir),
|
|
210
|
+
'target': target,
|
|
211
|
+
'port': safe_get(kwargs, 'port', 5337),
|
|
212
|
+
'cors_origins': safe_get(kwargs, 'cors', None),
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
builders = {
|
|
216
|
+
'flask': build_flask_server,
|
|
217
|
+
'docker': build_docker_compose,
|
|
218
|
+
'cli': build_cli_executable,
|
|
219
|
+
'static': build_static_site,
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
if target not in builders:
|
|
223
|
+
return {
|
|
224
|
+
"output": f"Unknown target: {target}. Available: {list(builders.keys())}",
|
|
225
|
+
"messages": kwargs.get('messages', [])
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
return builders[target](build_config, **kwargs)
|
|
229
|
+
|
|
193
230
|
@router.route("breathe", "Condense context on a regular cadence")
|
|
194
231
|
def breathe_handler(command: str, **kwargs):
|
|
195
232
|
|
|
@@ -730,40 +767,211 @@ def sample_handler(command: str, **kwargs):
|
|
|
730
767
|
except Exception as e:
|
|
731
768
|
traceback.print_exc()
|
|
732
769
|
return {"output": f"Error sampling LLM: {e}", "messages": messages}
|
|
733
|
-
|
|
770
|
+
|
|
771
|
+
|
|
772
|
+
|
|
773
|
+
@router.route("search", "Execute web search or memory/KG search")
|
|
734
774
|
def search_handler(command: str, **kwargs):
|
|
735
|
-
"""
|
|
736
|
-
Executes a search command.
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
"""
|
|
744
775
|
messages = safe_get(kwargs, "messages", [])
|
|
745
776
|
|
|
746
|
-
|
|
747
777
|
positional_args = safe_get(kwargs, 'positional_args', [])
|
|
748
|
-
|
|
778
|
+
|
|
779
|
+
search_type = None
|
|
780
|
+
query_parts = []
|
|
781
|
+
|
|
782
|
+
i = 0
|
|
783
|
+
while i < len(positional_args):
|
|
784
|
+
arg = positional_args[i]
|
|
785
|
+
if arg in ['-m', '-mem', '--memory']:
|
|
786
|
+
search_type = 'memory'
|
|
787
|
+
i += 1
|
|
788
|
+
elif arg in ['-kg', '--knowledge-graph']:
|
|
789
|
+
search_type = 'kg'
|
|
790
|
+
i += 1
|
|
791
|
+
else:
|
|
792
|
+
query_parts.append(arg)
|
|
793
|
+
i += 1
|
|
794
|
+
|
|
795
|
+
query = " ".join(query_parts)
|
|
749
796
|
|
|
750
797
|
if not query:
|
|
751
|
-
return {
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
798
|
+
return {
|
|
799
|
+
"output": (
|
|
800
|
+
"Usage:\n"
|
|
801
|
+
" /search <query> - Web search\n"
|
|
802
|
+
" /search -m <query> - Memory search\n"
|
|
803
|
+
" /search -kg <query> - Knowledge graph search"
|
|
804
|
+
),
|
|
805
|
+
"messages": messages
|
|
806
|
+
}
|
|
807
|
+
|
|
808
|
+
if search_type == 'memory':
|
|
809
|
+
return search_memories(query, kwargs, messages)
|
|
810
|
+
elif search_type == 'kg':
|
|
811
|
+
return search_knowledge_graph(query, kwargs, messages)
|
|
812
|
+
else:
|
|
813
|
+
return search_web_default(query, kwargs, messages)
|
|
755
814
|
|
|
815
|
+
def search_memories(query: str, kwargs: dict, messages: list):
|
|
816
|
+
command_history = kwargs.get('command_history')
|
|
817
|
+
|
|
818
|
+
if not command_history:
|
|
819
|
+
db_path = safe_get(
|
|
820
|
+
kwargs,
|
|
821
|
+
"history_db_path",
|
|
822
|
+
os.path.expanduser('~/npcsh_history.db')
|
|
823
|
+
)
|
|
824
|
+
try:
|
|
825
|
+
command_history = CommandHistory(db_path)
|
|
826
|
+
except Exception as e:
|
|
827
|
+
return {
|
|
828
|
+
"output": f"Error connecting to history: {e}",
|
|
829
|
+
"messages": messages
|
|
830
|
+
}
|
|
831
|
+
|
|
832
|
+
state = kwargs.get('state')
|
|
833
|
+
npc = safe_get(kwargs, 'npc')
|
|
834
|
+
team = safe_get(kwargs, 'team')
|
|
835
|
+
|
|
836
|
+
npc_name = npc.name if isinstance(npc, NPC) else "__none__"
|
|
837
|
+
team_name = team.name if team else "__none__"
|
|
838
|
+
current_path = safe_get(kwargs, 'current_path', os.getcwd())
|
|
839
|
+
|
|
840
|
+
try:
|
|
841
|
+
memories = get_relevant_memories(
|
|
842
|
+
command_history=command_history,
|
|
843
|
+
npc_name=npc_name,
|
|
844
|
+
team_name=team_name,
|
|
845
|
+
path=current_path,
|
|
846
|
+
query=query,
|
|
847
|
+
max_memories=10,
|
|
848
|
+
state=state
|
|
849
|
+
)
|
|
850
|
+
|
|
851
|
+
if not memories:
|
|
852
|
+
output = f"No memories found for query: '{query}'"
|
|
853
|
+
else:
|
|
854
|
+
output = f"Found {len(memories)} memories:\n\n"
|
|
855
|
+
for i, mem in enumerate(memories, 1):
|
|
856
|
+
final_mem = (
|
|
857
|
+
mem.get('final_memory') or
|
|
858
|
+
mem.get('initial_memory')
|
|
859
|
+
)
|
|
860
|
+
timestamp = mem.get('timestamp', 'unknown')
|
|
861
|
+
output += f"{i}. [{timestamp}] {final_mem}\n"
|
|
862
|
+
|
|
863
|
+
return {"output": output, "messages": messages}
|
|
864
|
+
|
|
865
|
+
except Exception as e:
|
|
866
|
+
import traceback
|
|
867
|
+
traceback.print_exc()
|
|
868
|
+
return {
|
|
869
|
+
"output": f"Error searching memories: {e}",
|
|
870
|
+
"messages": messages
|
|
871
|
+
}
|
|
756
872
|
|
|
873
|
+
def search_knowledge_graph(query: str, kwargs: dict, messages: list):
|
|
874
|
+
command_history = kwargs.get('command_history')
|
|
875
|
+
|
|
876
|
+
if not command_history:
|
|
877
|
+
db_path = safe_get(
|
|
878
|
+
kwargs,
|
|
879
|
+
"history_db_path",
|
|
880
|
+
os.path.expanduser('~/npcsh_history.db')
|
|
881
|
+
)
|
|
882
|
+
try:
|
|
883
|
+
command_history = CommandHistory(db_path)
|
|
884
|
+
except Exception as e:
|
|
885
|
+
return {
|
|
886
|
+
"output": f"Error connecting to history: {e}",
|
|
887
|
+
"messages": messages
|
|
888
|
+
}
|
|
889
|
+
|
|
890
|
+
npc = safe_get(kwargs, 'npc')
|
|
891
|
+
team = safe_get(kwargs, 'team')
|
|
892
|
+
|
|
893
|
+
npc_name = npc.name if isinstance(npc, NPC) else "__none__"
|
|
894
|
+
team_name = team.name if team else "__none__"
|
|
895
|
+
current_path = safe_get(kwargs, 'current_path', os.getcwd())
|
|
896
|
+
|
|
897
|
+
try:
|
|
898
|
+
engine = command_history.engine
|
|
899
|
+
kg = load_kg_from_db(
|
|
900
|
+
engine,
|
|
901
|
+
team_name,
|
|
902
|
+
npc_name,
|
|
903
|
+
current_path
|
|
904
|
+
)
|
|
905
|
+
|
|
906
|
+
if not kg or not kg.get('facts'):
|
|
907
|
+
return {
|
|
908
|
+
"output": (
|
|
909
|
+
f"No knowledge graph found for current scope.\n"
|
|
910
|
+
f"Scope: Team='{team_name}', "
|
|
911
|
+
f"NPC='{npc_name}', Path='{current_path}'"
|
|
912
|
+
),
|
|
913
|
+
"messages": messages
|
|
914
|
+
}
|
|
915
|
+
|
|
916
|
+
query_lower = query.lower()
|
|
917
|
+
matching_facts = []
|
|
918
|
+
matching_concepts = []
|
|
919
|
+
|
|
920
|
+
for fact in kg.get('facts', []):
|
|
921
|
+
statement = fact.get('statement', '').lower()
|
|
922
|
+
if query_lower in statement:
|
|
923
|
+
matching_facts.append(fact)
|
|
924
|
+
|
|
925
|
+
for concept in kg.get('concepts', []):
|
|
926
|
+
name = concept.get('name', '').lower()
|
|
927
|
+
desc = concept.get('description', '').lower()
|
|
928
|
+
if query_lower in name or query_lower in desc:
|
|
929
|
+
matching_concepts.append(concept)
|
|
930
|
+
|
|
931
|
+
output = f"Knowledge Graph Search Results for '{query}':\n\n"
|
|
932
|
+
|
|
933
|
+
if matching_facts:
|
|
934
|
+
output += f"## Facts ({len(matching_facts)}):\n"
|
|
935
|
+
for i, fact in enumerate(matching_facts, 1):
|
|
936
|
+
output += f"{i}. {fact.get('statement')}\n"
|
|
937
|
+
output += "\n"
|
|
938
|
+
|
|
939
|
+
if matching_concepts:
|
|
940
|
+
output += f"## Concepts ({len(matching_concepts)}):\n"
|
|
941
|
+
for i, concept in enumerate(matching_concepts, 1):
|
|
942
|
+
name = concept.get('name')
|
|
943
|
+
desc = concept.get('description', '')
|
|
944
|
+
output += f"{i}. {name}: {desc}\n"
|
|
945
|
+
|
|
946
|
+
if not matching_facts and not matching_concepts:
|
|
947
|
+
output += "No matching facts or concepts found."
|
|
948
|
+
|
|
949
|
+
return {"output": output, "messages": messages}
|
|
950
|
+
|
|
951
|
+
except Exception as e:
|
|
952
|
+
import traceback
|
|
953
|
+
traceback.print_exc()
|
|
954
|
+
return {
|
|
955
|
+
"output": f"Error searching KG: {e}",
|
|
956
|
+
"messages": messages
|
|
957
|
+
}
|
|
958
|
+
|
|
959
|
+
def search_web_default(query: str, kwargs: dict, messages: list):
|
|
960
|
+
search_provider = safe_get(kwargs, 'sprovider', NPCSH_SEARCH_PROVIDER)
|
|
961
|
+
render_markdown(f'- Searching {search_provider} for "{query}"')
|
|
757
962
|
|
|
758
|
-
if not query:
|
|
759
|
-
return {"output": "Usage: /search <query>", "messages": messages}
|
|
760
|
-
search_provider = safe_get(kwargs, 'search_provider', NPCSH_SEARCH_PROVIDER)
|
|
761
963
|
try:
|
|
762
964
|
search_results = search_web(query, provider=search_provider)
|
|
763
|
-
output =
|
|
965
|
+
output = (
|
|
966
|
+
"\n".join([f"- {res}" for res in search_results])
|
|
967
|
+
if search_results
|
|
968
|
+
else "No results found."
|
|
969
|
+
)
|
|
764
970
|
except Exception as e:
|
|
971
|
+
import traceback
|
|
765
972
|
traceback.print_exc()
|
|
766
973
|
output = f"Error during web search: {e}"
|
|
974
|
+
|
|
767
975
|
return {"output": output, "messages": messages}
|
|
768
976
|
|
|
769
977
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: npcsh
|
|
3
|
-
Version: 1.1.
|
|
3
|
+
Version: 1.1.4
|
|
4
4
|
Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
|
|
5
5
|
Home-page: https://github.com/NPC-Worldwide/npcsh
|
|
6
6
|
Author: Christopher Agostino
|
|
@@ -194,6 +194,14 @@ and you will enter the NPC shell. Additionally, the pip installation includes th
|
|
|
194
194
|
/corca --mcp-server-path /path.to.server.py
|
|
195
195
|
```
|
|
196
196
|
|
|
197
|
+
- **Build an NPC Team**:
|
|
198
|
+
|
|
199
|
+
``` bash
|
|
200
|
+
npc build flask --output ./dist --port 5337
|
|
201
|
+
npc build docker --output ./deploy
|
|
202
|
+
npc build cli --output ./bin
|
|
203
|
+
npc build static --api_url https://api.example.com
|
|
204
|
+
```
|
|
197
205
|
|
|
198
206
|
# NPC Data Layer
|
|
199
207
|
|
|
@@ -217,6 +225,7 @@ Importantly, users can switch easily between the NPCs they are chatting with by
|
|
|
217
225
|
- activated by invoking `/<command> ...` in `npcsh`, macros can be called in bash or through the `npc` CLI. In our examples, we provide both `npcsh` calls as well as bash calls with the `npc` cli where relevant. For converting any `/<command>` in `npcsh` to a bash version, replace the `/` with `npc ` and the macro command will be invoked as a positional argument. Some, like breathe, flush,
|
|
218
226
|
|
|
219
227
|
- `/alicanto` - Conduct deep research with multiple perspectives, identifying gold insights and cliff warnings. Usage: `/alicanto 'query to be researched' --num-npcs <int> --depth <int>`
|
|
228
|
+
- `/build` - Builds the current npc team to an executable format . Usage: `/build <output[flask,docker,cli,static]> --options`
|
|
220
229
|
- `/brainblast` - Execute an advanced chunked search on command history. Usage: `/brainblast 'query' --top_k 10`
|
|
221
230
|
- `/breathe` - Condense context on a regular cadence. Usage: `/breathe -p <provider: NPCSH_CHAT_PROVIDER> -m <model: NPCSH_CHAT_MODEL>`
|
|
222
231
|
- `/compile` - Compile NPC profiles. Usage: `/compile <path_to_npc> `
|