npcsh 1.0.21__tar.gz → 1.0.23__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcsh-1.0.21 → npcsh-1.0.23}/PKG-INFO +8 -3
- {npcsh-1.0.21 → npcsh-1.0.23}/README.md +7 -2
- npcsh-1.0.23/npcsh/npc_team/alicanto.npc +2 -0
- npcsh-1.0.23/npcsh/npc_team/alicanto.png +0 -0
- npcsh-1.0.23/npcsh/npc_team/corca.npc +13 -0
- npcsh-1.0.23/npcsh/npc_team/corca.png +0 -0
- npcsh-1.0.23/npcsh/npc_team/foreman.npc +7 -0
- npcsh-1.0.23/npcsh/npc_team/frederic.npc +6 -0
- npcsh-1.0.23/npcsh/npc_team/frederic4.png +0 -0
- npcsh-1.0.23/npcsh/npc_team/guac.png +0 -0
- npcsh-1.0.23/npcsh/npc_team/jinxs/bash_executer.jinx +20 -0
- npcsh-1.0.23/npcsh/npc_team/jinxs/edit_file.jinx +94 -0
- npcsh-1.0.23/npcsh/npc_team/jinxs/image_generation.jinx +29 -0
- npcsh-1.0.23/npcsh/npc_team/jinxs/internet_search.jinx +31 -0
- npcsh-1.0.23/npcsh/npc_team/jinxs/python_executor.jinx +11 -0
- npcsh-1.0.23/npcsh/npc_team/jinxs/screen_cap.jinx +25 -0
- npcsh-1.0.23/npcsh/npc_team/kadiefa.npc +3 -0
- npcsh-1.0.23/npcsh/npc_team/kadiefa.png +0 -0
- npcsh-1.0.23/npcsh/npc_team/npcsh.ctx +18 -0
- npcsh-1.0.23/npcsh/npc_team/npcsh_sibiji.png +0 -0
- npcsh-1.0.23/npcsh/npc_team/plonk.npc +2 -0
- npcsh-1.0.23/npcsh/npc_team/plonk.png +0 -0
- npcsh-1.0.23/npcsh/npc_team/plonkjr.npc +2 -0
- npcsh-1.0.23/npcsh/npc_team/plonkjr.png +0 -0
- npcsh-1.0.23/npcsh/npc_team/sibiji.npc +3 -0
- npcsh-1.0.23/npcsh/npc_team/sibiji.png +0 -0
- npcsh-1.0.23/npcsh/npc_team/spool.png +0 -0
- npcsh-1.0.23/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/routes.py +1 -1
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh.egg-info/PKG-INFO +8 -3
- npcsh-1.0.23/npcsh.egg-info/SOURCES.txt +50 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/setup.py +2 -2
- npcsh-1.0.21/npcsh.egg-info/SOURCES.txt +0 -24
- {npcsh-1.0.21 → npcsh-1.0.23}/LICENSE +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/__init__.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/_state.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/alicanto.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/corca.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/guac.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/mcp_helpers.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/mcp_server.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/npc.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/npcsh.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/plonk.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/pti.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/spool.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/wander.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh/yap.py +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh.egg-info/dependency_links.txt +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh.egg-info/entry_points.txt +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh.egg-info/requires.txt +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/npcsh.egg-info/top_level.txt +0 -0
- {npcsh-1.0.21 → npcsh-1.0.23}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: npcsh
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.23
|
|
4
4
|
Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
|
|
5
5
|
Home-page: https://github.com/NPC-Worldwide/npcsh
|
|
6
6
|
Author: Christopher Agostino
|
|
@@ -105,7 +105,12 @@ Dynamic: summary
|
|
|
105
105
|
|
|
106
106
|
# NPC Shell
|
|
107
107
|
|
|
108
|
-
The NPC shell is the toolkit for tomorrow, providing a suite of programs to make use of multi-modal LLMs and agents in novel interactive modes. `npcsh` is
|
|
108
|
+
The NPC shell is the toolkit for tomorrow, providing a suite of programs to make use of multi-modal LLMs and agents in novel interactive modes. `npcsh` is based in the command line, and so can be used wherever you work.
|
|
109
|
+
|
|
110
|
+
- It is developed to work reliably with small models and performs excellently with the state-of-the-art models from major model providers.
|
|
111
|
+
- Fundamentally, the core program of npcsh extends the familiar bash environment with an intelligent layer that lets users seamlessly ask agents questions, run pre-built or custom macros or agents, all without breaking the flow of command-line work.
|
|
112
|
+
- Switching between agents is a breeze in `npcsh`, letting you quickly and easily take advantage of a variety of agents (e.g. coding agents versus tool-calling agents versus prompt-based ReACT Flow agents) and personas (e.g. Data scientist, mapmaker with ennui, etc.).
|
|
113
|
+
- Project variables and context can be stored in team `.ctx` files. Personas (`.npc`) and Jinja execution templates (`.jinx`) are likewise stored in `yaml` within the global `npcsh` team or your project-specific one, letting you focus on adjusting and engineering context and system prompts iteratively so you can constantly improve your agent team's performance.
|
|
109
114
|
|
|
110
115
|
To get started:
|
|
111
116
|
```bash
|
|
@@ -126,7 +131,7 @@ and you will enter the NPC shell. Additionally, the pip installation includes ma
|
|
|
126
131
|
# Usage
|
|
127
132
|
- Get help with a task:
|
|
128
133
|
```bash
|
|
129
|
-
npcsh
|
|
134
|
+
npcsh>can you help me identify what process is listening on port 5337?
|
|
130
135
|
```
|
|
131
136
|
<p align="center">
|
|
132
137
|
<img src="https://raw.githubusercontent.com/npc-worldwide/npcsh/main/test_data/port5337.png" alt="example of running npcsh to check what processes are listening on port 5337", width=600>
|
|
@@ -5,7 +5,12 @@
|
|
|
5
5
|
|
|
6
6
|
# NPC Shell
|
|
7
7
|
|
|
8
|
-
The NPC shell is the toolkit for tomorrow, providing a suite of programs to make use of multi-modal LLMs and agents in novel interactive modes. `npcsh` is
|
|
8
|
+
The NPC shell is the toolkit for tomorrow, providing a suite of programs to make use of multi-modal LLMs and agents in novel interactive modes. `npcsh` is based in the command line, and so can be used wherever you work.
|
|
9
|
+
|
|
10
|
+
- It is developed to work reliably with small models and performs excellently with the state-of-the-art models from major model providers.
|
|
11
|
+
- Fundamentally, the core program of npcsh extends the familiar bash environment with an intelligent layer that lets users seamlessly ask agents questions, run pre-built or custom macros or agents, all without breaking the flow of command-line work.
|
|
12
|
+
- Switching between agents is a breeze in `npcsh`, letting you quickly and easily take advantage of a variety of agents (e.g. coding agents versus tool-calling agents versus prompt-based ReACT Flow agents) and personas (e.g. Data scientist, mapmaker with ennui, etc.).
|
|
13
|
+
- Project variables and context can be stored in team `.ctx` files. Personas (`.npc`) and Jinja execution templates (`.jinx`) are likewise stored in `yaml` within the global `npcsh` team or your project-specific one, letting you focus on adjusting and engineering context and system prompts iteratively so you can constantly improve your agent team's performance.
|
|
9
14
|
|
|
10
15
|
To get started:
|
|
11
16
|
```bash
|
|
@@ -26,7 +31,7 @@ and you will enter the NPC shell. Additionally, the pip installation includes ma
|
|
|
26
31
|
# Usage
|
|
27
32
|
- Get help with a task:
|
|
28
33
|
```bash
|
|
29
|
-
npcsh
|
|
34
|
+
npcsh>can you help me identify what process is listening on port 5337?
|
|
30
35
|
```
|
|
31
36
|
<p align="center">
|
|
32
37
|
<img src="https://raw.githubusercontent.com/npc-worldwide/npcsh/main/test_data/port5337.png" alt="example of running npcsh to check what processes are listening on port 5337", width=600>
|
|
Binary file
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
name: corca
|
|
2
|
+
primary_directive: |
|
|
3
|
+
You are corca, a distinguished member of the NPC team.
|
|
4
|
+
Your expertise is in the area of software development and
|
|
5
|
+
you have a knack for thinking through problems carefully.
|
|
6
|
+
You favor solutions that prioritize simplicity and clarity and
|
|
7
|
+
ought to always consider how some suggestion may increase rather than reduce tech debt
|
|
8
|
+
unnecessarily. Now, the key is in this last term, "unnecessarily".
|
|
9
|
+
You must distinguish carefully and when in doubt, opt to ask for further
|
|
10
|
+
information or clarification with concrete clear options that make it
|
|
11
|
+
easy for a user to choose.
|
|
12
|
+
model: gpt-4o-mini
|
|
13
|
+
provider: openai
|
|
Binary file
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
name: foreman
|
|
2
|
+
primary_directive: You are the foreman of an NPC team. It is your duty
|
|
3
|
+
to delegate tasks to your team members or to other specialized teams
|
|
4
|
+
in order to complete the project. You are responsible for the
|
|
5
|
+
completion of the project and the safety of your team members.
|
|
6
|
+
model: gpt-4o-mini
|
|
7
|
+
provider: openai
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
name: frederic
|
|
2
|
+
primary_directive: |
|
|
3
|
+
You are frederic the polar bear. Your job is help users think through problems and
|
|
4
|
+
to provide straightforward ways forward on problems. Cut through the ice
|
|
5
|
+
to get to what matters and keep things simple. You are to respond in a
|
|
6
|
+
witty tone like richard feynman but with the romantic tambor of Frederic Chopin.
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
jinx_name: bash_executor
|
|
2
|
+
description: Execute bash queries. Should be used to grep for file contents, list directories, explore information to answer user questions more practically.
|
|
3
|
+
inputs:
|
|
4
|
+
- bash_command
|
|
5
|
+
- user_request
|
|
6
|
+
steps:
|
|
7
|
+
- engine: python
|
|
8
|
+
code: |
|
|
9
|
+
import subprocess
|
|
10
|
+
import os
|
|
11
|
+
cmd = '{{bash_command}}' # Properly quote the command input
|
|
12
|
+
def run_command(cmd):
|
|
13
|
+
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
14
|
+
stdout, stderr = process.communicate()
|
|
15
|
+
if stderr:
|
|
16
|
+
print(f"Error: {stderr.decode('utf-8')}")
|
|
17
|
+
return stderr
|
|
18
|
+
return stdout
|
|
19
|
+
result = run_command(cmd)
|
|
20
|
+
output = result.decode('utf-8')
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
jinx_name: file_editor
|
|
2
|
+
description: Examines a file, determines what changes are needed, and applies those
|
|
3
|
+
changes.
|
|
4
|
+
inputs:
|
|
5
|
+
- file_path
|
|
6
|
+
- edit_instructions
|
|
7
|
+
- backup: true
|
|
8
|
+
steps:
|
|
9
|
+
- name: "edit_file"
|
|
10
|
+
engine: "python"
|
|
11
|
+
code: |
|
|
12
|
+
import os
|
|
13
|
+
from npcpy.llm_funcs import get_llm_response
|
|
14
|
+
|
|
15
|
+
# Get inputs
|
|
16
|
+
file_path = os.path.expanduser("{{ file_path }}")
|
|
17
|
+
edit_instructions = "{{ edit_instructions }}"
|
|
18
|
+
backup_str = "{{ backup }}"
|
|
19
|
+
create_backup = backup_str.lower() not in ('false', 'no', '0', '')
|
|
20
|
+
|
|
21
|
+
# Read file content
|
|
22
|
+
with open(file_path, 'r') as f:
|
|
23
|
+
original_content = f.read()
|
|
24
|
+
|
|
25
|
+
# Create backup if requested
|
|
26
|
+
if create_backup:
|
|
27
|
+
backup_path = file_path + ".bak"
|
|
28
|
+
with open(backup_path, 'w') as f:
|
|
29
|
+
f.write(original_content)
|
|
30
|
+
|
|
31
|
+
# Make the prompt for the LLM
|
|
32
|
+
prompt = """You are a code editing assistant. Analyze this file and make the requested changes.
|
|
33
|
+
|
|
34
|
+
File content:
|
|
35
|
+
""" + original_content + """
|
|
36
|
+
|
|
37
|
+
Edit instructions: """ + edit_instructions + """
|
|
38
|
+
|
|
39
|
+
Return a JSON object with these fields:
|
|
40
|
+
1. "modifications": An array of modification objects, where each object has:
|
|
41
|
+
- "type": One of "replace", "insert_after", "insert_before", or "delete"
|
|
42
|
+
- "target": For "insert_after" and "insert_before", the text to insert after/before
|
|
43
|
+
For "delete", the text to delete
|
|
44
|
+
- "original": For "replace", the text to be replaced
|
|
45
|
+
- "replacement": For "replace", the text to replace with
|
|
46
|
+
- "insertion": For "insert_after" and "insert_before", the text to insert
|
|
47
|
+
2. "explanation": Brief explanation of the changes made
|
|
48
|
+
"""
|
|
49
|
+
# Get the LLM response with JSON formatting
|
|
50
|
+
response = get_llm_response(prompt, model=npc.model, provider=npc.provider, npc=npc, format="json")
|
|
51
|
+
|
|
52
|
+
result = response.get("response", {})
|
|
53
|
+
modifications = result.get("modifications", [])
|
|
54
|
+
explanation = result.get("explanation", "No explanation provided")
|
|
55
|
+
|
|
56
|
+
# Apply modifications
|
|
57
|
+
updated_content = original_content
|
|
58
|
+
changes_applied = 0
|
|
59
|
+
|
|
60
|
+
for mod in modifications:
|
|
61
|
+
print(mod)
|
|
62
|
+
mod_type = mod.get("type")
|
|
63
|
+
|
|
64
|
+
if mod_type == "replace":
|
|
65
|
+
original = mod.get("original")
|
|
66
|
+
replacement = mod.get("replacement")
|
|
67
|
+
if original in updated_content:
|
|
68
|
+
updated_content = updated_content.replace(original, replacement)
|
|
69
|
+
changes_applied += 1
|
|
70
|
+
|
|
71
|
+
elif mod_type == "insert_after":
|
|
72
|
+
target = mod.get("target")
|
|
73
|
+
insertion = mod.get("insertion")
|
|
74
|
+
if target in updated_content:
|
|
75
|
+
updated_content = updated_content.replace(target, target + insertion)
|
|
76
|
+
changes_applied += 1
|
|
77
|
+
|
|
78
|
+
elif mod_type == "insert_before":
|
|
79
|
+
target = mod.get("target")
|
|
80
|
+
insertion = mod.get("insertion")
|
|
81
|
+
if target in updated_content:
|
|
82
|
+
updated_content = updated_content.replace(target, insertion + target)
|
|
83
|
+
changes_applied += 1
|
|
84
|
+
|
|
85
|
+
elif mod_type == "delete":
|
|
86
|
+
target = mod.get("target")
|
|
87
|
+
if target in updated_content:
|
|
88
|
+
updated_content = updated_content.replace(target, "")
|
|
89
|
+
changes_applied += 1
|
|
90
|
+
|
|
91
|
+
with open(file_path, 'w') as f:
|
|
92
|
+
f.write(updated_content)
|
|
93
|
+
|
|
94
|
+
output = "Applied " + str(changes_applied) + " changes to " + file_path + "\n\n" + explanation
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
jinx_name: image_generation_jinx
|
|
2
|
+
description: 'Generates images based on a text prompt. Prompt must be specified to facilitate the users goal and should NOT be a verbatim replication of their request unless asked. The output name must be a path relative to the current directory. do NOT use placeholders. specify ./descriptive_image_name.png. Model must be specified if the user indicates a specific model or provider. For example, if they want gemini-2.5-flash you should specify gemini-2.5-flash as the model and gemini as the provider . if they specify gpt-image-1 you should specify gpt-image-1 as the model and openai as the provider. Only leave it the default if the user does not indicate the model preference.'
|
|
3
|
+
inputs:
|
|
4
|
+
- prompt
|
|
5
|
+
- output_name
|
|
6
|
+
- model: runwayml/stable-diffusion-v1-5
|
|
7
|
+
- provider: diffusers
|
|
8
|
+
steps:
|
|
9
|
+
- engine: "python"
|
|
10
|
+
code: |
|
|
11
|
+
image_prompt = '{{prompt}}'.strip()
|
|
12
|
+
from npcpy.llm_funcs import gen_image
|
|
13
|
+
# Generate the image
|
|
14
|
+
pil_image = gen_image(
|
|
15
|
+
image_prompt,
|
|
16
|
+
npc=npc,
|
|
17
|
+
model='{{model}}', # You can adjust the model as needed
|
|
18
|
+
provider='{{provider}}'
|
|
19
|
+
)
|
|
20
|
+
if pil_image:
|
|
21
|
+
image_generated = True
|
|
22
|
+
else:
|
|
23
|
+
image_generated = False
|
|
24
|
+
# save the image
|
|
25
|
+
output_name = '{{output_name}}'
|
|
26
|
+
pil_image.save(f'{output_name}.png')
|
|
27
|
+
# open the image to display it
|
|
28
|
+
#pil_image.show()
|
|
29
|
+
output = output_name
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
jinx_name: internet_search
|
|
2
|
+
description: Searches the web for information based on a query in order to verify
|
|
3
|
+
timiely details (e.g. current events) or to corroborate information in uncertain
|
|
4
|
+
situations. Should be mainly only used when users specifically request a search,
|
|
5
|
+
otherwise an LLMs basic knowledge should be sufficient. The Query must be written specifically
|
|
6
|
+
as a search query, it should not be a direct parroting of the user's input text. Returns a LLM summary so no post-summary is required
|
|
7
|
+
inputs:
|
|
8
|
+
- query
|
|
9
|
+
- provider: ''
|
|
10
|
+
steps:
|
|
11
|
+
- engine: "python"
|
|
12
|
+
code: |
|
|
13
|
+
from npcpy.data.web import search_web
|
|
14
|
+
from npcsh._state import NPCSH_SEARCH_PROVIDER
|
|
15
|
+
query = "{{ query }}"
|
|
16
|
+
provider = '{{ provider }}'
|
|
17
|
+
if provider.strip() != '':
|
|
18
|
+
results = search_web(query, num_results=5, provider = provider)
|
|
19
|
+
else:
|
|
20
|
+
results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
|
|
21
|
+
|
|
22
|
+
print('QUERY in jinx', query)
|
|
23
|
+
results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
|
|
24
|
+
print('RESULTS in jinx', results)
|
|
25
|
+
- engine: "natural"
|
|
26
|
+
code: |
|
|
27
|
+
Using the following information extracted from the web:
|
|
28
|
+
|
|
29
|
+
{{ results }}
|
|
30
|
+
|
|
31
|
+
Answer the users question: {{ query }}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
jinx_name: python_executor
|
|
2
|
+
description: Execute scripts with python. You must set the ultimate result as the "output"
|
|
3
|
+
variable. It MUST be a string.
|
|
4
|
+
Do not add unnecessary print statements.
|
|
5
|
+
This jinx is intended for executing code snippets that are not
|
|
6
|
+
accomplished by other jinxes. Use it only when the others are insufficient.
|
|
7
|
+
inputs:
|
|
8
|
+
- code
|
|
9
|
+
steps:
|
|
10
|
+
- code: '{{code}}'
|
|
11
|
+
engine: python
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
jinx_name: screen_cap
|
|
2
|
+
description: Captures the whole screen and sends the image for analysis
|
|
3
|
+
inputs:
|
|
4
|
+
- prompt
|
|
5
|
+
steps:
|
|
6
|
+
- engine: "python"
|
|
7
|
+
code: |
|
|
8
|
+
import os
|
|
9
|
+
from npcpy.data.image import capture_screenshot
|
|
10
|
+
out = capture_screenshot(full=True)
|
|
11
|
+
prompt = "{{prompt}}"
|
|
12
|
+
# Now properly use get_llm_response to analyze the image
|
|
13
|
+
# Create a prompt that includes the user's request and instructions
|
|
14
|
+
analysis_prompt = prompt + "\n\nAttached is a screenshot of my screen currently. Please use this to evaluate the situation. If the user asked for you to explain what's on their screen or something similar, they are referring to the details contained within the attached image."
|
|
15
|
+
llm_response = get_llm_response(
|
|
16
|
+
prompt=analysis_prompt,
|
|
17
|
+
model=npc.model if npc else None,
|
|
18
|
+
provider=npc.provider if npc else None,
|
|
19
|
+
api_url=npc.api_url if npc else None,
|
|
20
|
+
api_key=npc.api_key if npc else None,
|
|
21
|
+
npc=npc,
|
|
22
|
+
images=[out['file_path']],
|
|
23
|
+
)
|
|
24
|
+
output = llm_response['response']
|
|
25
|
+
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
name: kadiefa
|
|
2
|
+
primary_directive: |
|
|
3
|
+
You are kadiefa, the exploratory snow leopard. You love to find new paths and to explore hidden gems. You go into caverns no cat has ventured into before. You climb peaks that others call crazy. You are at the height of your power. Your role is to lead the way for users to explore complex research questions and to think outside of the box.
|
|
Binary file
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
context: |
|
|
2
|
+
The npcsh NPC team is devoted to providing a safe and helpful
|
|
3
|
+
environment for users where they can work and be as successful as possible.
|
|
4
|
+
npcsh is a command-line tool that makes it easy for users to harness
|
|
5
|
+
the power of LLMs from a command line shell. npcsh is a command line toolkit consisting of several programs.
|
|
6
|
+
databases:
|
|
7
|
+
- ~/npcsh_history.db
|
|
8
|
+
mcp_servers:
|
|
9
|
+
- ~/.npcsh/mcp_server.py
|
|
10
|
+
use_global_jinxs: true
|
|
11
|
+
forenpc: sibiji
|
|
12
|
+
preferences:
|
|
13
|
+
- If you come up with an idea, it is critical that you also provide a way to validate the idea.
|
|
14
|
+
- Never change function names unless requested. keep things idempotent.
|
|
15
|
+
- If plots are requested for python code, prefer to use matplotlib. Do not ever use seaborn.
|
|
16
|
+
- Object oriented programming should be used sparingly and only when practical. Otherwise, opt for functional implementations.
|
|
17
|
+
- Never write unit tests unless explicitly requested.
|
|
18
|
+
- If we want you to write tests, we mean we want you to write example use cases that show how the code works.
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: npcsh
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.23
|
|
4
4
|
Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
|
|
5
5
|
Home-page: https://github.com/NPC-Worldwide/npcsh
|
|
6
6
|
Author: Christopher Agostino
|
|
@@ -105,7 +105,12 @@ Dynamic: summary
|
|
|
105
105
|
|
|
106
106
|
# NPC Shell
|
|
107
107
|
|
|
108
|
-
The NPC shell is the toolkit for tomorrow, providing a suite of programs to make use of multi-modal LLMs and agents in novel interactive modes. `npcsh` is
|
|
108
|
+
The NPC shell is the toolkit for tomorrow, providing a suite of programs to make use of multi-modal LLMs and agents in novel interactive modes. `npcsh` is based in the command line, and so can be used wherever you work.
|
|
109
|
+
|
|
110
|
+
- It is developed to work reliably with small models and performs excellently with the state-of-the-art models from major model providers.
|
|
111
|
+
- Fundamentally, the core program of npcsh extends the familiar bash environment with an intelligent layer that lets users seamlessly ask agents questions, run pre-built or custom macros or agents, all without breaking the flow of command-line work.
|
|
112
|
+
- Switching between agents is a breeze in `npcsh`, letting you quickly and easily take advantage of a variety of agents (e.g. coding agents versus tool-calling agents versus prompt-based ReACT Flow agents) and personas (e.g. Data scientist, mapmaker with ennui, etc.).
|
|
113
|
+
- Project variables and context can be stored in team `.ctx` files. Personas (`.npc`) and Jinja execution templates (`.jinx`) are likewise stored in `yaml` within the global `npcsh` team or your project-specific one, letting you focus on adjusting and engineering context and system prompts iteratively so you can constantly improve your agent team's performance.
|
|
109
114
|
|
|
110
115
|
To get started:
|
|
111
116
|
```bash
|
|
@@ -126,7 +131,7 @@ and you will enter the NPC shell. Additionally, the pip installation includes ma
|
|
|
126
131
|
# Usage
|
|
127
132
|
- Get help with a task:
|
|
128
133
|
```bash
|
|
129
|
-
npcsh
|
|
134
|
+
npcsh>can you help me identify what process is listening on port 5337?
|
|
130
135
|
```
|
|
131
136
|
<p align="center">
|
|
132
137
|
<img src="https://raw.githubusercontent.com/npc-worldwide/npcsh/main/test_data/port5337.png" alt="example of running npcsh to check what processes are listening on port 5337", width=600>
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
README.md
|
|
3
|
+
setup.py
|
|
4
|
+
npcsh/__init__.py
|
|
5
|
+
npcsh/_state.py
|
|
6
|
+
npcsh/alicanto.py
|
|
7
|
+
npcsh/corca.py
|
|
8
|
+
npcsh/guac.py
|
|
9
|
+
npcsh/mcp_helpers.py
|
|
10
|
+
npcsh/mcp_server.py
|
|
11
|
+
npcsh/npc.py
|
|
12
|
+
npcsh/npcsh.py
|
|
13
|
+
npcsh/plonk.py
|
|
14
|
+
npcsh/pti.py
|
|
15
|
+
npcsh/routes.py
|
|
16
|
+
npcsh/spool.py
|
|
17
|
+
npcsh/wander.py
|
|
18
|
+
npcsh/yap.py
|
|
19
|
+
npcsh.egg-info/PKG-INFO
|
|
20
|
+
npcsh.egg-info/SOURCES.txt
|
|
21
|
+
npcsh.egg-info/dependency_links.txt
|
|
22
|
+
npcsh.egg-info/entry_points.txt
|
|
23
|
+
npcsh.egg-info/requires.txt
|
|
24
|
+
npcsh.egg-info/top_level.txt
|
|
25
|
+
npcsh/npc_team/alicanto.npc
|
|
26
|
+
npcsh/npc_team/alicanto.png
|
|
27
|
+
npcsh/npc_team/corca.npc
|
|
28
|
+
npcsh/npc_team/corca.png
|
|
29
|
+
npcsh/npc_team/foreman.npc
|
|
30
|
+
npcsh/npc_team/frederic.npc
|
|
31
|
+
npcsh/npc_team/frederic4.png
|
|
32
|
+
npcsh/npc_team/guac.png
|
|
33
|
+
npcsh/npc_team/kadiefa.npc
|
|
34
|
+
npcsh/npc_team/kadiefa.png
|
|
35
|
+
npcsh/npc_team/npcsh.ctx
|
|
36
|
+
npcsh/npc_team/npcsh_sibiji.png
|
|
37
|
+
npcsh/npc_team/plonk.npc
|
|
38
|
+
npcsh/npc_team/plonk.png
|
|
39
|
+
npcsh/npc_team/plonkjr.npc
|
|
40
|
+
npcsh/npc_team/plonkjr.png
|
|
41
|
+
npcsh/npc_team/sibiji.npc
|
|
42
|
+
npcsh/npc_team/sibiji.png
|
|
43
|
+
npcsh/npc_team/spool.png
|
|
44
|
+
npcsh/npc_team/yap.png
|
|
45
|
+
npcsh/npc_team/jinxs/bash_executer.jinx
|
|
46
|
+
npcsh/npc_team/jinxs/edit_file.jinx
|
|
47
|
+
npcsh/npc_team/jinxs/image_generation.jinx
|
|
48
|
+
npcsh/npc_team/jinxs/internet_search.jinx
|
|
49
|
+
npcsh/npc_team/jinxs/python_executor.jinx
|
|
50
|
+
npcsh/npc_team/jinxs/screen_cap.jinx
|
|
@@ -74,11 +74,11 @@ voice_requirements = [
|
|
|
74
74
|
"pyttsx3",
|
|
75
75
|
]
|
|
76
76
|
|
|
77
|
-
extra_files = package_files("
|
|
77
|
+
extra_files = package_files("npcsh/npc_team/")
|
|
78
78
|
|
|
79
79
|
setup(
|
|
80
80
|
name="npcsh",
|
|
81
|
-
version="1.0.
|
|
81
|
+
version="1.0.23",
|
|
82
82
|
packages=find_packages(exclude=["tests*"]),
|
|
83
83
|
install_requires=base_requirements, # Only install base requirements by default
|
|
84
84
|
extras_require={
|
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
LICENSE
|
|
2
|
-
README.md
|
|
3
|
-
setup.py
|
|
4
|
-
npcsh/__init__.py
|
|
5
|
-
npcsh/_state.py
|
|
6
|
-
npcsh/alicanto.py
|
|
7
|
-
npcsh/corca.py
|
|
8
|
-
npcsh/guac.py
|
|
9
|
-
npcsh/mcp_helpers.py
|
|
10
|
-
npcsh/mcp_server.py
|
|
11
|
-
npcsh/npc.py
|
|
12
|
-
npcsh/npcsh.py
|
|
13
|
-
npcsh/plonk.py
|
|
14
|
-
npcsh/pti.py
|
|
15
|
-
npcsh/routes.py
|
|
16
|
-
npcsh/spool.py
|
|
17
|
-
npcsh/wander.py
|
|
18
|
-
npcsh/yap.py
|
|
19
|
-
npcsh.egg-info/PKG-INFO
|
|
20
|
-
npcsh.egg-info/SOURCES.txt
|
|
21
|
-
npcsh.egg-info/dependency_links.txt
|
|
22
|
-
npcsh.egg-info/entry_points.txt
|
|
23
|
-
npcsh.egg-info/requires.txt
|
|
24
|
-
npcsh.egg-info/top_level.txt
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|