npcsh 0.3.29__tar.gz → 0.3.31__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcsh-0.3.29/npcsh.egg-info → npcsh-0.3.31}/PKG-INFO +12 -21
- {npcsh-0.3.29 → npcsh-0.3.31}/README.md +11 -20
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/cli.py +30 -10
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/image.py +0 -2
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/llm_funcs.py +32 -23
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_compiler.py +35 -4
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/npcsh.ctx +8 -2
- npcsh-0.3.31/npcsh/npc_team/tools/bash_executer.tool +32 -0
- npcsh-0.3.31/npcsh/npc_team/tools/code_executor.tool +16 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/tools/generic_search.tool +9 -1
- npcsh-0.3.31/npcsh/npc_team/tools/npcsh_executor.tool +9 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/tools/sql_executor.tool +2 -2
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/search.py +15 -8
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/shell.py +103 -89
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/shell_helpers.py +75 -52
- {npcsh-0.3.29 → npcsh-0.3.31/npcsh.egg-info}/PKG-INFO +12 -21
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh.egg-info/SOURCES.txt +3 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/setup.py +1 -1
- {npcsh-0.3.29 → npcsh-0.3.31}/LICENSE +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/MANIFEST.in +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/__init__.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/audio.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/command_history.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/conversation.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/data_models.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/dataframes.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/embeddings.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/helpers.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/image_gen.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/knowledge_graph.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/load_data.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/main.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/model_runner.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_sysenv.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/assembly_lines/test_pipeline.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/corca.npc +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/templates/analytics/celona.npc +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/templates/hr_support/raone.npc +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/templates/humanities/eriane.npc +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/templates/it_support/lineru.npc +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/templates/marketing/slean.npc +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/templates/sales/turnic.npc +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/templates/software/welxor.npc +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/tools/calculator.tool +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/tools/image_generation.tool +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/tools/local_search.tool +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/npc_team/tools/screen_cap.tool +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/plonk.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/response.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/serve.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/stream.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh/video.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh.egg-info/dependency_links.txt +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh.egg-info/entry_points.txt +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh.egg-info/requires.txt +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/npcsh.egg-info/top_level.txt +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/setup.cfg +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/tests/test_chromadb.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/tests/test_embedding_check.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/tests/test_embedding_methods.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/tests/test_helpers.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/tests/test_knowledge_graph_rag.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/tests/test_llm_funcs.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/tests/test_networkx_vis.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/tests/test_npc_compiler.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/tests/test_npcsh.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/tests/test_npcteam.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/tests/test_shell_helpers.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/tests/test_tars.py +0 -0
- {npcsh-0.3.29 → npcsh-0.3.31}/tests/test_tool_use.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: npcsh
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.31
|
|
4
4
|
Summary: npcsh is a command line tool for integrating LLMs into everyday workflows and for orchestrating teams of NPCs.
|
|
5
5
|
Home-page: https://github.com/cagostino/npcsh
|
|
6
6
|
Author: Christopher Agostino
|
|
@@ -112,22 +112,23 @@ Interested to stay in the loop and to hear the latest and greatest about `npcsh`
|
|
|
112
112
|
## TLDR Cheat Sheet
|
|
113
113
|
Users can take advantage of `npcsh` through its custom shell or through a command-line interface (CLI) tool. Below is a cheat sheet that shows how to use `npcsh` commands in both the shell and the CLI. For the npcsh commands to work, one must activate `npcsh` by typing it in a shell.
|
|
114
114
|
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
|
|
115
118
|
| Task | npc CLI | npcsh |
|
|
116
119
|
|----------|----------|----------|
|
|
117
120
|
| Ask a generic question | npc 'prompt' | 'prompt' |
|
|
118
121
|
| Compile an NPC | npc compile /path/to/npc.npc | /compile /path/to/npc.npc |
|
|
119
122
|
| Computer use | npc plonk -n 'npc_name' -sp 'task for plonk to carry out '| /plonk -n 'npc_name' -sp 'task for plonk to carry out ' |
|
|
120
123
|
| Conjure an NPC team from context and templates | npc init -t 'template1, template2' -ctx 'context' | /conjure -t 'template1, 'template2' -ctx 'context' |
|
|
121
|
-
| Enter a chat with an NPC (NPC needs to be compiled first) | npc npc_name | /npc_name |
|
|
124
|
+
| Enter a chat with an NPC (NPC needs to be compiled first) | npc chat -n npc_name | /spool npc=<npc_name> |
|
|
122
125
|
| Generate image | npc vixynt 'prompt' | /vixynt prompt |
|
|
123
126
|
| Get a sample LLM response | npc sample 'prompt' | /sample prompt for llm |
|
|
124
|
-
| Invoke a tool | npc tool {tool_name} -args --flags | /tool_name -args --flags |
|
|
125
|
-
| Search locally | npc tool local_search -args --flags | /local_search -args --flags |
|
|
126
127
|
| Search for a term in the npcsh_db only in conversations with a specific npc | npc rag -n 'npc_name' -f 'filename' -q 'query' | /rag -n 'npc_name' -f 'filename' -q 'query' |
|
|
127
|
-
| Search the web | npc search -
|
|
128
|
+
| Search the web | npc search -q "cal golden bears football schedule" -sp perplexity | /search -p perplexity 'cal bears football schedule' |
|
|
128
129
|
| Serve an NPC team | npc serve --port 5337 --cors='http://localhost:5137/' | /serve --port 5337 --cors='http://localhost:5137/' |
|
|
129
130
|
| Screenshot analysis | npc ots | /ots |
|
|
130
|
-
| Voice Chat | npc whisper 'npc_name' | /whisper |
|
|
131
|
+
| Voice Chat | npc whisper -n 'npc_name' | /whisper |
|
|
131
132
|
|
|
132
133
|
|
|
133
134
|
## Python Examples
|
|
@@ -725,19 +726,6 @@ The code in the visible section of your VS Code window appears to be a script fo
|
|
|
725
726
|
In summary, this code automates the process of capturing a screenshot, saving it with a unique filename, and analyzing that image for specific content or patterns.
|
|
726
727
|
|
|
727
728
|
|
|
728
|
-
```
|
|
729
|
-
|
|
730
|
-
```npcsh
|
|
731
|
-
npcsh> What is the biggest file in my current folder?
|
|
732
|
-
|
|
733
|
-
LLM suggests the following bash command: ls -S | head -n 1
|
|
734
|
-
|
|
735
|
-
Running command: ls -S | head -n 1
|
|
736
|
-
|
|
737
|
-
Command executed with output: image_20241111_000033.png
|
|
738
|
-
|
|
739
|
-
I ran the command ls -S | head -n 1 in your current folder. This command sorts all files by size in descending order and then selects the first entry, which represents the largest file. The result of this operation shows that the biggest file in your current folder is image_20241111_000033.png.
|
|
740
|
-
|
|
741
729
|
```
|
|
742
730
|
|
|
743
731
|
```npcsh
|
|
@@ -869,7 +857,7 @@ and then the associated image :
|
|
|
869
857
|
An important facet that makes `npcsh` so powerful is the ability to pipe outputs from one tool call to another. This allows for the chaining of commands and the creation of complex workflows. For example, you can use the output of a search to generate an image, or you can use the output of an image analysis to generate a report. Here is an example of how this might look in practice:
|
|
870
858
|
```npcsh
|
|
871
859
|
npcsh> what is the gdp of russia in 2024? | /vixynt 'generate an image that contains {0}'
|
|
872
|
-
|
|
860
|
+
```
|
|
873
861
|
### Executing Bash Commands
|
|
874
862
|
You can execute bash commands directly within npcsh. The LLM can also generate and execute bash commands based on your natural language requests.
|
|
875
863
|
For example:
|
|
@@ -1167,7 +1155,10 @@ Search can be accomplished through the `/search` macro. You can specify the prov
|
|
|
1167
1155
|
you must set a perplexity api key as an environment variable as described above. The default provider is duckduckgo.
|
|
1168
1156
|
|
|
1169
1157
|
NOTE: while google is an available search engine, they recently implemented changes (early 2025) that make the python google search package no longer as reliable.
|
|
1170
|
-
|
|
1158
|
+
Duckduckgo's search toool also givies rate limit errors often, so until a more robust
|
|
1159
|
+
solution is implemented for it, Perplexity's will be the most reliable.
|
|
1160
|
+
|
|
1161
|
+
|
|
1171
1162
|
|
|
1172
1163
|
|
|
1173
1164
|
```npcsh
|
|
@@ -28,22 +28,23 @@ Interested to stay in the loop and to hear the latest and greatest about `npcsh`
|
|
|
28
28
|
## TLDR Cheat Sheet
|
|
29
29
|
Users can take advantage of `npcsh` through its custom shell or through a command-line interface (CLI) tool. Below is a cheat sheet that shows how to use `npcsh` commands in both the shell and the CLI. For the npcsh commands to work, one must activate `npcsh` by typing it in a shell.
|
|
30
30
|
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
|
|
31
34
|
| Task | npc CLI | npcsh |
|
|
32
35
|
|----------|----------|----------|
|
|
33
36
|
| Ask a generic question | npc 'prompt' | 'prompt' |
|
|
34
37
|
| Compile an NPC | npc compile /path/to/npc.npc | /compile /path/to/npc.npc |
|
|
35
38
|
| Computer use | npc plonk -n 'npc_name' -sp 'task for plonk to carry out '| /plonk -n 'npc_name' -sp 'task for plonk to carry out ' |
|
|
36
39
|
| Conjure an NPC team from context and templates | npc init -t 'template1, template2' -ctx 'context' | /conjure -t 'template1, 'template2' -ctx 'context' |
|
|
37
|
-
| Enter a chat with an NPC (NPC needs to be compiled first) | npc npc_name | /npc_name |
|
|
40
|
+
| Enter a chat with an NPC (NPC needs to be compiled first) | npc chat -n npc_name | /spool npc=<npc_name> |
|
|
38
41
|
| Generate image | npc vixynt 'prompt' | /vixynt prompt |
|
|
39
42
|
| Get a sample LLM response | npc sample 'prompt' | /sample prompt for llm |
|
|
40
|
-
| Invoke a tool | npc tool {tool_name} -args --flags | /tool_name -args --flags |
|
|
41
|
-
| Search locally | npc tool local_search -args --flags | /local_search -args --flags |
|
|
42
43
|
| Search for a term in the npcsh_db only in conversations with a specific npc | npc rag -n 'npc_name' -f 'filename' -q 'query' | /rag -n 'npc_name' -f 'filename' -q 'query' |
|
|
43
|
-
| Search the web | npc search -
|
|
44
|
+
| Search the web | npc search -q "cal golden bears football schedule" -sp perplexity | /search -p perplexity 'cal bears football schedule' |
|
|
44
45
|
| Serve an NPC team | npc serve --port 5337 --cors='http://localhost:5137/' | /serve --port 5337 --cors='http://localhost:5137/' |
|
|
45
46
|
| Screenshot analysis | npc ots | /ots |
|
|
46
|
-
| Voice Chat | npc whisper 'npc_name' | /whisper |
|
|
47
|
+
| Voice Chat | npc whisper -n 'npc_name' | /whisper |
|
|
47
48
|
|
|
48
49
|
|
|
49
50
|
## Python Examples
|
|
@@ -641,19 +642,6 @@ The code in the visible section of your VS Code window appears to be a script fo
|
|
|
641
642
|
In summary, this code automates the process of capturing a screenshot, saving it with a unique filename, and analyzing that image for specific content or patterns.
|
|
642
643
|
|
|
643
644
|
|
|
644
|
-
```
|
|
645
|
-
|
|
646
|
-
```npcsh
|
|
647
|
-
npcsh> What is the biggest file in my current folder?
|
|
648
|
-
|
|
649
|
-
LLM suggests the following bash command: ls -S | head -n 1
|
|
650
|
-
|
|
651
|
-
Running command: ls -S | head -n 1
|
|
652
|
-
|
|
653
|
-
Command executed with output: image_20241111_000033.png
|
|
654
|
-
|
|
655
|
-
I ran the command ls -S | head -n 1 in your current folder. This command sorts all files by size in descending order and then selects the first entry, which represents the largest file. The result of this operation shows that the biggest file in your current folder is image_20241111_000033.png.
|
|
656
|
-
|
|
657
645
|
```
|
|
658
646
|
|
|
659
647
|
```npcsh
|
|
@@ -785,7 +773,7 @@ and then the associated image :
|
|
|
785
773
|
An important facet that makes `npcsh` so powerful is the ability to pipe outputs from one tool call to another. This allows for the chaining of commands and the creation of complex workflows. For example, you can use the output of a search to generate an image, or you can use the output of an image analysis to generate a report. Here is an example of how this might look in practice:
|
|
786
774
|
```npcsh
|
|
787
775
|
npcsh> what is the gdp of russia in 2024? | /vixynt 'generate an image that contains {0}'
|
|
788
|
-
|
|
776
|
+
```
|
|
789
777
|
### Executing Bash Commands
|
|
790
778
|
You can execute bash commands directly within npcsh. The LLM can also generate and execute bash commands based on your natural language requests.
|
|
791
779
|
For example:
|
|
@@ -1083,7 +1071,10 @@ Search can be accomplished through the `/search` macro. You can specify the prov
|
|
|
1083
1071
|
you must set a perplexity api key as an environment variable as described above. The default provider is duckduckgo.
|
|
1084
1072
|
|
|
1085
1073
|
NOTE: while google is an available search engine, they recently implemented changes (early 2025) that make the python google search package no longer as reliable.
|
|
1086
|
-
|
|
1074
|
+
Duckduckgo's search toool also givies rate limit errors often, so until a more robust
|
|
1075
|
+
solution is implemented for it, Perplexity's will be the most reliable.
|
|
1076
|
+
|
|
1077
|
+
|
|
1087
1078
|
|
|
1088
1079
|
|
|
1089
1080
|
```npcsh
|
|
@@ -33,6 +33,7 @@ from .llm_funcs import (
|
|
|
33
33
|
get_stream,
|
|
34
34
|
get_conversation,
|
|
35
35
|
)
|
|
36
|
+
from .plonk import plonk, action_space
|
|
36
37
|
from .search import search_web
|
|
37
38
|
from .shell_helpers import *
|
|
38
39
|
import os
|
|
@@ -54,10 +55,12 @@ def main():
|
|
|
54
55
|
"assemble",
|
|
55
56
|
"build",
|
|
56
57
|
"compile",
|
|
58
|
+
"chat",
|
|
57
59
|
"init",
|
|
58
60
|
"new",
|
|
59
61
|
"plonk",
|
|
60
62
|
"sample",
|
|
63
|
+
"search",
|
|
61
64
|
"select",
|
|
62
65
|
"serve",
|
|
63
66
|
"spool",
|
|
@@ -165,6 +168,10 @@ def main():
|
|
|
165
168
|
"directory", nargs="?", default=".", help="Directory to build project in"
|
|
166
169
|
)
|
|
167
170
|
|
|
171
|
+
# chat
|
|
172
|
+
chat_parser = subparsers.add_parser("chat", help="chat with an NPC")
|
|
173
|
+
chat_parser.add_argument("-n", "--npc_name", help="name of npc")
|
|
174
|
+
|
|
168
175
|
# Compile command
|
|
169
176
|
compile_parser = subparsers.add_parser("compile", help="Compile an NPC")
|
|
170
177
|
compile_parser.add_argument("path", help="Path to NPC file")
|
|
@@ -300,7 +307,7 @@ def main():
|
|
|
300
307
|
|
|
301
308
|
# Web search
|
|
302
309
|
search_parser = subparsers.add_parser("search", help="search the web")
|
|
303
|
-
search_parser.add_argument("query", help="search query")
|
|
310
|
+
search_parser.add_argument("--query", "-q", help="search query")
|
|
304
311
|
search_parser.add_argument(
|
|
305
312
|
"--search_provider",
|
|
306
313
|
"-sp",
|
|
@@ -317,7 +324,7 @@ def main():
|
|
|
317
324
|
|
|
318
325
|
# Voice chat
|
|
319
326
|
whisper_parser = subparsers.add_parser("whisper", help="start voice chat")
|
|
320
|
-
whisper_parser.add_argument("npc_name", help="name of the NPC to chat with")
|
|
327
|
+
whisper_parser.add_argument("-n", "--npc_name", help="name of the NPC to chat with")
|
|
321
328
|
|
|
322
329
|
args = parser.parse_args()
|
|
323
330
|
|
|
@@ -367,6 +374,13 @@ def main():
|
|
|
367
374
|
port=args.port if args.port else 5337,
|
|
368
375
|
cors_origins=cors_origins,
|
|
369
376
|
)
|
|
377
|
+
elif args.command == "chat":
|
|
378
|
+
npc_name = args.npc_name
|
|
379
|
+
npc_path = get_npc_path(npc_name, NPCSH_DB_PATH)
|
|
380
|
+
current_npc = load_npc_from_file(npc_path, sqlite3.connect(NPCSH_DB_PATH))
|
|
381
|
+
return enter_spool_mode(
|
|
382
|
+
model=args.model, provider=args.provider, npc=current_npc
|
|
383
|
+
)
|
|
370
384
|
|
|
371
385
|
elif args.command == "init":
|
|
372
386
|
if args.templates:
|
|
@@ -395,16 +409,18 @@ def main():
|
|
|
395
409
|
)
|
|
396
410
|
|
|
397
411
|
elif args.command == "compile":
|
|
398
|
-
|
|
412
|
+
npc_compiler = NPCCompiler(npc_directory, NPCSH_DB_PATH)
|
|
413
|
+
compiled = npc_compiler.compile(args.path)
|
|
414
|
+
print("NPC compiled to:", compiled)
|
|
399
415
|
|
|
400
416
|
elif args.command == "plonk":
|
|
401
417
|
task = args.task or args.spell
|
|
402
418
|
npc_name = args.name
|
|
403
|
-
|
|
404
|
-
task
|
|
405
|
-
|
|
406
|
-
model=args.model or
|
|
407
|
-
provider=args.provider or
|
|
419
|
+
plonk(
|
|
420
|
+
task,
|
|
421
|
+
action_space,
|
|
422
|
+
model=args.model or NPCSH_CHAT_MODEL,
|
|
423
|
+
provider=args.provider or NPCSH_CHAT_PROVIDER,
|
|
408
424
|
)
|
|
409
425
|
|
|
410
426
|
elif args.command == "sample":
|
|
@@ -443,10 +459,14 @@ def main():
|
|
|
443
459
|
model=args.model,
|
|
444
460
|
provider=args.provider,
|
|
445
461
|
)
|
|
446
|
-
print(result)
|
|
462
|
+
print(result["output"])
|
|
447
463
|
|
|
448
464
|
elif args.command == "whisper":
|
|
449
|
-
|
|
465
|
+
npc_name = args.npc_name
|
|
466
|
+
npc_path = get_npc_path(npc_name, NPCSH_DB_PATH)
|
|
467
|
+
current_npc = load_npc_from_file(npc_path, sqlite3.connect(NPCSH_DB_PATH))
|
|
468
|
+
|
|
469
|
+
enter_whisper_mode(npc=current_npc)
|
|
450
470
|
|
|
451
471
|
elif args.command == "tool":
|
|
452
472
|
result = invoke_tool(
|
|
@@ -802,15 +802,20 @@ ReAct choices then will enter reasoning flow
|
|
|
802
802
|
|
|
803
803
|
prompt = f"""
|
|
804
804
|
A user submitted this query: {command}
|
|
805
|
+
|
|
805
806
|
Determine the nature of the user's request:
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
807
|
+
|
|
808
|
+
1. Should a tool be invoked to fulfill the request?
|
|
809
|
+
|
|
810
|
+
2. Is it a general question that requires an informative answer or a highly specific question that
|
|
809
811
|
requires inforrmation on the web?
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
812
|
+
|
|
813
|
+
3. Would this question be best answered by an alternative NPC?
|
|
814
|
+
|
|
815
|
+
4. Is it a complex request that actually requires more than one
|
|
816
|
+
tool to be called, perhaps in a sequence?
|
|
817
|
+
|
|
818
|
+
5. is there a need for the user to provide additional input to fulfill the request?
|
|
814
819
|
|
|
815
820
|
|
|
816
821
|
|
|
@@ -877,8 +882,12 @@ ReAct choices then will enter reasoning flow
|
|
|
877
882
|
|
|
878
883
|
prompt += f"""
|
|
879
884
|
In considering how to answer this, consider:
|
|
880
|
-
|
|
881
|
-
- Whether more context from the user is required to adequately answer the question.
|
|
885
|
+
|
|
886
|
+
- Whether more context from the user is required to adequately answer the question.
|
|
887
|
+
e.g. if a user asks for a joke about their favorite city but they don't include the city ,
|
|
888
|
+
it would be helpful to ask for that information. Similarly, if a user asks to open a browser
|
|
889
|
+
and to check the weather in a city, it would be helpful to ask for the city and which website
|
|
890
|
+
or source to use.
|
|
882
891
|
- Whether a tool should be used.
|
|
883
892
|
|
|
884
893
|
|
|
@@ -887,14 +896,17 @@ ReAct choices then will enter reasoning flow
|
|
|
887
896
|
extra tools or agent passes.
|
|
888
897
|
Only use tools or pass to other NPCs
|
|
889
898
|
when it is obvious that the answer needs to be as up-to-date as possible. For example,
|
|
890
|
-
|
|
899
|
+
a question about where mount everest is does not necessarily need to be answered by a tool call or an agent pass.
|
|
891
900
|
Similarly, if a user asks to explain the plot of the aeneid, this can be answered without a tool call or agent pass.
|
|
892
|
-
|
|
901
|
+
|
|
902
|
+
If a user were to ask for the current weather in tokyo or the current price of bitcoin or who the mayor of a city is,
|
|
903
|
+
then a tool call or agent pass may be appropriate.
|
|
904
|
+
|
|
893
905
|
Tools are valuable but their use should be limited and purposeful to
|
|
894
906
|
ensure the best user experience.
|
|
895
907
|
|
|
896
908
|
Respond with a JSON object containing:
|
|
897
|
-
- "action": one of ["
|
|
909
|
+
- "action": one of ["invoke_tool", "answer_question", "pass_to_npc", "execute_sequence", "request_input"]
|
|
898
910
|
- "tool_name": : if action is "invoke_tool": the name of the tool to use.
|
|
899
911
|
else if action is "execute_sequence", a list of tool names to use.
|
|
900
912
|
- "explanation": a brief explanation of why you chose this action.
|
|
@@ -907,7 +919,7 @@ ReAct choices then will enter reasoning flow
|
|
|
907
919
|
|
|
908
920
|
The format of the JSON object is:
|
|
909
921
|
{{
|
|
910
|
-
"action": "
|
|
922
|
+
"action": "invoke_tool" | "answer_question" | "pass_to_npc" | "execute_sequence" | "request_input",
|
|
911
923
|
"tool_name": "<tool_name(s)_if_applicable>",
|
|
912
924
|
"explanation": "<your_explanation>",
|
|
913
925
|
"npc_name": "<npc_name(s)_if_applicable>"
|
|
@@ -915,7 +927,9 @@ ReAct choices then will enter reasoning flow
|
|
|
915
927
|
|
|
916
928
|
If you execute a sequence, ensure that you have a specified NPC for each tool use.
|
|
917
929
|
|
|
918
|
-
Remember, do not include ANY ADDITIONAL MARKDOWN FORMATTING.
|
|
930
|
+
Remember, do not include ANY ADDITIONAL MARKDOWN FORMATTING.
|
|
931
|
+
There should be no leading ```json.
|
|
932
|
+
|
|
919
933
|
"""
|
|
920
934
|
|
|
921
935
|
if docs_context:
|
|
@@ -932,11 +946,6 @@ ReAct choices then will enter reasoning flow
|
|
|
932
946
|
{context}
|
|
933
947
|
|
|
934
948
|
"""
|
|
935
|
-
|
|
936
|
-
# print(prompt)
|
|
937
|
-
|
|
938
|
-
# For action determination, we don't need to pass the conversation messages to avoid confusion
|
|
939
|
-
# print(npc, model, provider)
|
|
940
949
|
action_response = get_llm_response(
|
|
941
950
|
prompt,
|
|
942
951
|
model=model,
|
|
@@ -965,12 +974,11 @@ ReAct choices then will enter reasoning flow
|
|
|
965
974
|
else:
|
|
966
975
|
response_content_parsed = response_content
|
|
967
976
|
|
|
968
|
-
# Proceed according to the action specified
|
|
969
977
|
action = response_content_parsed.get("action")
|
|
970
978
|
explanation = response_content["explanation"]
|
|
971
|
-
# Include the user's command in the conversation messages
|
|
972
979
|
print(f"action chosen: {action}")
|
|
973
980
|
print(f"explanation given: {explanation}")
|
|
981
|
+
|
|
974
982
|
if response_content_parsed.get("tool_name"):
|
|
975
983
|
print(f"tool name: {response_content_parsed.get('tool_name')}")
|
|
976
984
|
|
|
@@ -1316,8 +1324,9 @@ def handle_tool_call(
|
|
|
1316
1324
|
stream=stream,
|
|
1317
1325
|
messages=messages,
|
|
1318
1326
|
)
|
|
1319
|
-
if
|
|
1320
|
-
|
|
1327
|
+
if not stream:
|
|
1328
|
+
if "Error" in tool_output:
|
|
1329
|
+
raise Exception(tool_output)
|
|
1321
1330
|
except Exception as e:
|
|
1322
1331
|
# diagnose_problem = get_llm_response(
|
|
1323
1332
|
## f"""a problem has occurred.
|
|
@@ -501,6 +501,18 @@ class SilentUndefined(Undefined):
|
|
|
501
501
|
return ""
|
|
502
502
|
|
|
503
503
|
|
|
504
|
+
class Context:
|
|
505
|
+
def __init__(self, context=None, mcp_servers=None, databases=None, files=None):
|
|
506
|
+
self.context = context
|
|
507
|
+
self.mcp_servers = mcp_servers
|
|
508
|
+
self.databases = databases
|
|
509
|
+
self.files = files
|
|
510
|
+
|
|
511
|
+
def load_context_file(self, path):
|
|
512
|
+
with open(path, "r") as f:
|
|
513
|
+
self.context = yaml.safe_load(f)
|
|
514
|
+
|
|
515
|
+
|
|
504
516
|
class Tool:
|
|
505
517
|
def __init__(self, tool_data: dict):
|
|
506
518
|
if not tool_data or not isinstance(tool_data, dict):
|
|
@@ -551,6 +563,7 @@ class Tool:
|
|
|
551
563
|
|
|
552
564
|
# Process Steps
|
|
553
565
|
for i, step in enumerate(self.steps):
|
|
566
|
+
|
|
554
567
|
context = self.execute_step(
|
|
555
568
|
step,
|
|
556
569
|
context,
|
|
@@ -564,6 +577,7 @@ class Tool:
|
|
|
564
577
|
# if i is the last step and the user has reuqested a streaming output
|
|
565
578
|
# then we should return the stream
|
|
566
579
|
if i == len(self.steps) - 1 and stream: # this was causing the big issue X:
|
|
580
|
+
print("tool successful, passing output to stream")
|
|
567
581
|
return context
|
|
568
582
|
# Return the final output
|
|
569
583
|
if context.get("output") is not None:
|
|
@@ -592,8 +606,14 @@ class Tool:
|
|
|
592
606
|
except Exception as e:
|
|
593
607
|
print(f"Error rendering template: {e}")
|
|
594
608
|
rendered_code = code
|
|
595
|
-
|
|
596
|
-
|
|
609
|
+
# render engine if necessary
|
|
610
|
+
try:
|
|
611
|
+
template = jinja_env.from_string(engine)
|
|
612
|
+
rendered_engine = template.render(**context)
|
|
613
|
+
except:
|
|
614
|
+
print("error rendering engine")
|
|
615
|
+
rendered_engine = engine
|
|
616
|
+
if rendered_engine == "natural":
|
|
597
617
|
if len(rendered_code.strip()) > 0:
|
|
598
618
|
# print(f"Executing natural language step: {rendered_code}")
|
|
599
619
|
if stream:
|
|
@@ -610,7 +630,7 @@ class Tool:
|
|
|
610
630
|
context["llm_response"] = response_text
|
|
611
631
|
context["results"] = response_text
|
|
612
632
|
|
|
613
|
-
elif
|
|
633
|
+
elif rendered_engine == "python":
|
|
614
634
|
exec_globals = {
|
|
615
635
|
"__builtins__": __builtins__,
|
|
616
636
|
"npc": npc,
|
|
@@ -639,12 +659,23 @@ class Tool:
|
|
|
639
659
|
exec_env = context.copy()
|
|
640
660
|
try:
|
|
641
661
|
exec(rendered_code, exec_globals, new_locals)
|
|
662
|
+
exec_env.update(new_locals)
|
|
663
|
+
|
|
664
|
+
context.update(exec_env)
|
|
665
|
+
|
|
642
666
|
exec_env.update(new_locals)
|
|
643
667
|
context.update(exec_env)
|
|
644
|
-
|
|
668
|
+
|
|
669
|
+
# Add this line to explicitly copy the output
|
|
670
|
+
if "output" in new_locals:
|
|
671
|
+
context["output"] = new_locals["output"]
|
|
672
|
+
|
|
673
|
+
# Then your existing code
|
|
645
674
|
if "output" in exec_env:
|
|
646
675
|
if exec_env["output"] is not None:
|
|
647
676
|
context["results"] = exec_env["output"]
|
|
677
|
+
print("result from code execution: ", exec_env["output"])
|
|
678
|
+
|
|
648
679
|
except NameError as e:
|
|
649
680
|
tb_lines = traceback.format_exc().splitlines()
|
|
650
681
|
limited_tb = (
|
|
@@ -1,5 +1,11 @@
|
|
|
1
|
-
|
|
1
|
+
context: |
|
|
2
2
|
The npcsh NPC team is devoted to providing a safe and helpful
|
|
3
3
|
environment for users where they can work and be as successful as possible.
|
|
4
4
|
npcsh is a command-line tool that makes it easy for users to harness
|
|
5
|
-
the power of LLMs from a command line shell.
|
|
5
|
+
the power of LLMs from a command line shell.
|
|
6
|
+
databases:
|
|
7
|
+
- ~/npcsh_history.db
|
|
8
|
+
mcp_servers:
|
|
9
|
+
- /path/to/mcp/server.py
|
|
10
|
+
- @npm for server
|
|
11
|
+
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
tool_name: bash_executor
|
|
2
|
+
description: Execute bash queries.
|
|
3
|
+
inputs:
|
|
4
|
+
- bash_command
|
|
5
|
+
- user_request
|
|
6
|
+
steps:
|
|
7
|
+
- engine: python
|
|
8
|
+
code: |
|
|
9
|
+
import subprocess
|
|
10
|
+
import os
|
|
11
|
+
cmd = '{{bash_command}}' # Properly quote the command input
|
|
12
|
+
def run_command(cmd):
|
|
13
|
+
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
14
|
+
stdout, stderr = process.communicate()
|
|
15
|
+
if stderr:
|
|
16
|
+
print(f"Error: {stderr.decode('utf-8')}")
|
|
17
|
+
return stderr
|
|
18
|
+
return stdout
|
|
19
|
+
result = run_command(cmd)
|
|
20
|
+
output = result.decode('utf-8')
|
|
21
|
+
|
|
22
|
+
- engine: natural
|
|
23
|
+
code: |
|
|
24
|
+
|
|
25
|
+
Here is the result of the bash command:
|
|
26
|
+
```
|
|
27
|
+
{{ output }}
|
|
28
|
+
```
|
|
29
|
+
This was the original user request: {{ user_request }}
|
|
30
|
+
|
|
31
|
+
Please provide a response accordingly.
|
|
32
|
+
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
tool_name: code_executor
|
|
2
|
+
description: Execute scripts with a specified language. choose from python, bash, R, or javascript. Set the ultimate result as the "output" variable. It must be a string. Do not add unnecessary print statements.
|
|
3
|
+
inputs:
|
|
4
|
+
- code
|
|
5
|
+
- language
|
|
6
|
+
steps:
|
|
7
|
+
- engine: '{{ language }}'
|
|
8
|
+
code: |
|
|
9
|
+
{{code}}
|
|
10
|
+
- engine: natural
|
|
11
|
+
code: |
|
|
12
|
+
Here is the result of the code execution that an agent ran.
|
|
13
|
+
```
|
|
14
|
+
{{ output }}
|
|
15
|
+
```
|
|
16
|
+
please provide a response accordingly.
|
|
@@ -2,13 +2,21 @@ tool_name: "internet_search"
|
|
|
2
2
|
description: Searches the web for information based on a query in order to verify timiely details (e.g. current events) or to corroborate information in uncertain situations. Should be mainly only used when users specifically request a search, otherwise an LLMs basic knowledge should be sufficient.
|
|
3
3
|
inputs:
|
|
4
4
|
- query
|
|
5
|
+
- provider: ''
|
|
5
6
|
steps:
|
|
6
7
|
- engine: "python"
|
|
7
8
|
code: |
|
|
8
9
|
from npcsh.search import search_web
|
|
10
|
+
from npcsh.npc_sysenv import NPCSH_SEARCH_PROVIDER
|
|
9
11
|
query = "{{ query }}"
|
|
12
|
+
provider = '{{ provider }}'
|
|
13
|
+
if provider.strip() != '':
|
|
14
|
+
results = search_web(query, num_results=5, provider = provider)
|
|
15
|
+
else:
|
|
16
|
+
results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
|
|
17
|
+
|
|
10
18
|
print('QUERY in tool', query)
|
|
11
|
-
results = search_web(query, num_results=5)
|
|
19
|
+
results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
|
|
12
20
|
print('RESULTS in tool', results)
|
|
13
21
|
- engine: "natural"
|
|
14
22
|
code: |
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
tool_name:
|
|
2
|
-
description: Execute
|
|
1
|
+
tool_name: data_pull
|
|
2
|
+
description: Execute queries on the ~/npcsh_history.db to pull data. The database contains only information about conversations and other user-provided data. It does not store any information about individual files.
|
|
3
3
|
inputs:
|
|
4
4
|
- sql_query
|
|
5
5
|
- interpret: false # Note that this is not a boolean, but a string
|
|
@@ -5,6 +5,7 @@ import os
|
|
|
5
5
|
|
|
6
6
|
from bs4 import BeautifulSoup
|
|
7
7
|
from duckduckgo_search import DDGS
|
|
8
|
+
from duckduckgo_search.exceptions import DuckDuckGoSearchException
|
|
8
9
|
|
|
9
10
|
try:
|
|
10
11
|
from googlesearch import search
|
|
@@ -41,7 +42,6 @@ def search_perplexity(
|
|
|
41
42
|
"max_tokens": max_tokens,
|
|
42
43
|
"temperature": temperature,
|
|
43
44
|
"top_p": top_p,
|
|
44
|
-
"search_domain_filter": ["perplexity.ai"],
|
|
45
45
|
"return_images": False,
|
|
46
46
|
"return_related_questions": False,
|
|
47
47
|
"search_recency_filter": "month",
|
|
@@ -58,6 +58,7 @@ def search_perplexity(
|
|
|
58
58
|
# Make the POST request to the API
|
|
59
59
|
response = requests.post(url, json=payload, headers=headers)
|
|
60
60
|
response = json.loads(response.text)
|
|
61
|
+
print(response)
|
|
61
62
|
return [response["choices"][0]["message"]["content"], response["citations"]]
|
|
62
63
|
|
|
63
64
|
|
|
@@ -89,13 +90,19 @@ def search_web(
|
|
|
89
90
|
|
|
90
91
|
if provider == "duckduckgo":
|
|
91
92
|
ddgs = DDGS()
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
93
|
+
try:
|
|
94
|
+
search_results = ddgs.text(query, max_results=num_results)
|
|
95
|
+
print(search_results, type(search_results))
|
|
96
|
+
urls = [r["href"] for r in search_results]
|
|
97
|
+
results = [
|
|
98
|
+
{"title": r["title"], "link": r["href"], "content": r["body"]}
|
|
99
|
+
for r in search_results
|
|
100
|
+
]
|
|
101
|
+
except DuckDuckGoSearchException as e:
|
|
102
|
+
print("DuckDuckGo search failed: ", e)
|
|
103
|
+
urls = []
|
|
104
|
+
results = []
|
|
105
|
+
|
|
99
106
|
else: # google
|
|
100
107
|
urls = list(search(query, num_results=num_results))
|
|
101
108
|
# google shit doesnt seem to be working anymore, apparently a lbock they made on browsers without js?
|