npcsh 0.3.31__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +942 -0
- npcsh/alicanto.py +1074 -0
- npcsh/guac.py +785 -0
- npcsh/mcp_helpers.py +357 -0
- npcsh/mcp_npcsh.py +822 -0
- npcsh/mcp_server.py +184 -0
- npcsh/npc.py +218 -0
- npcsh/npcsh.py +1161 -0
- npcsh/plonk.py +387 -269
- npcsh/pti.py +234 -0
- npcsh/routes.py +958 -0
- npcsh/spool.py +315 -0
- npcsh/wander.py +550 -0
- npcsh/yap.py +573 -0
- npcsh-1.0.0.dist-info/METADATA +596 -0
- npcsh-1.0.0.dist-info/RECORD +21 -0
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/WHEEL +1 -1
- npcsh-1.0.0.dist-info/entry_points.txt +9 -0
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/licenses/LICENSE +1 -1
- npcsh/audio.py +0 -210
- npcsh/cli.py +0 -545
- npcsh/command_history.py +0 -566
- npcsh/conversation.py +0 -291
- npcsh/data_models.py +0 -46
- npcsh/dataframes.py +0 -163
- npcsh/embeddings.py +0 -168
- npcsh/helpers.py +0 -641
- npcsh/image.py +0 -298
- npcsh/image_gen.py +0 -79
- npcsh/knowledge_graph.py +0 -1006
- npcsh/llm_funcs.py +0 -2027
- npcsh/load_data.py +0 -83
- npcsh/main.py +0 -5
- npcsh/model_runner.py +0 -189
- npcsh/npc_compiler.py +0 -2870
- npcsh/npc_sysenv.py +0 -383
- npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
- npcsh/npc_team/corca.npc +0 -13
- npcsh/npc_team/foreman.npc +0 -7
- npcsh/npc_team/npcsh.ctx +0 -11
- npcsh/npc_team/sibiji.npc +0 -4
- npcsh/npc_team/templates/analytics/celona.npc +0 -0
- npcsh/npc_team/templates/hr_support/raone.npc +0 -0
- npcsh/npc_team/templates/humanities/eriane.npc +0 -4
- npcsh/npc_team/templates/it_support/lineru.npc +0 -0
- npcsh/npc_team/templates/marketing/slean.npc +0 -4
- npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
- npcsh/npc_team/templates/sales/turnic.npc +0 -4
- npcsh/npc_team/templates/software/welxor.npc +0 -0
- npcsh/npc_team/tools/bash_executer.tool +0 -32
- npcsh/npc_team/tools/calculator.tool +0 -8
- npcsh/npc_team/tools/code_executor.tool +0 -16
- npcsh/npc_team/tools/generic_search.tool +0 -27
- npcsh/npc_team/tools/image_generation.tool +0 -25
- npcsh/npc_team/tools/local_search.tool +0 -149
- npcsh/npc_team/tools/npcsh_executor.tool +0 -9
- npcsh/npc_team/tools/screen_cap.tool +0 -27
- npcsh/npc_team/tools/sql_executor.tool +0 -26
- npcsh/response.py +0 -623
- npcsh/search.py +0 -248
- npcsh/serve.py +0 -1460
- npcsh/shell.py +0 -538
- npcsh/shell_helpers.py +0 -3529
- npcsh/stream.py +0 -700
- npcsh/video.py +0 -49
- npcsh-0.3.31.data/data/npcsh/npc_team/bash_executer.tool +0 -32
- npcsh-0.3.31.data/data/npcsh/npc_team/calculator.tool +0 -8
- npcsh-0.3.31.data/data/npcsh/npc_team/celona.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/code_executor.tool +0 -16
- npcsh-0.3.31.data/data/npcsh/npc_team/corca.npc +0 -13
- npcsh-0.3.31.data/data/npcsh/npc_team/eriane.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/foreman.npc +0 -7
- npcsh-0.3.31.data/data/npcsh/npc_team/generic_search.tool +0 -27
- npcsh-0.3.31.data/data/npcsh/npc_team/image_generation.tool +0 -25
- npcsh-0.3.31.data/data/npcsh/npc_team/lineru.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/local_search.tool +0 -149
- npcsh-0.3.31.data/data/npcsh/npc_team/maurawa.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/npcsh.ctx +0 -11
- npcsh-0.3.31.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
- npcsh-0.3.31.data/data/npcsh/npc_team/raone.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/screen_cap.tool +0 -27
- npcsh-0.3.31.data/data/npcsh/npc_team/sibiji.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/slean.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/sql_executor.tool +0 -26
- npcsh-0.3.31.data/data/npcsh/npc_team/test_pipeline.py +0 -181
- npcsh-0.3.31.data/data/npcsh/npc_team/turnic.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/welxor.npc +0 -0
- npcsh-0.3.31.dist-info/METADATA +0 -1853
- npcsh-0.3.31.dist-info/RECORD +0 -76
- npcsh-0.3.31.dist-info/entry_points.txt +0 -3
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/top_level.txt +0 -0
npcsh/video.py
DELETED
|
@@ -1,49 +0,0 @@
|
|
|
1
|
-
# video.py
|
|
2
|
-
|
|
3
|
-
def process_video(file_path, table_name):
|
|
4
|
-
embeddings = []
|
|
5
|
-
texts = []
|
|
6
|
-
try:
|
|
7
|
-
video = cv2.VideoCapture(file_path)
|
|
8
|
-
fps = video.get(cv2.CAP_PROP_FPS)
|
|
9
|
-
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
10
|
-
|
|
11
|
-
for i in range(frame_count):
|
|
12
|
-
ret, frame = video.read()
|
|
13
|
-
if not ret:
|
|
14
|
-
break
|
|
15
|
-
|
|
16
|
-
# Process every nth frame (adjust n as needed for performance)
|
|
17
|
-
n = 10 # Process every 10th frame
|
|
18
|
-
if i % n == 0:
|
|
19
|
-
# Image Embeddings
|
|
20
|
-
_, buffer = cv2.imencode(".jpg", frame) # Encode frame as JPG
|
|
21
|
-
base64_image = base64.b64encode(buffer).decode("utf-8")
|
|
22
|
-
image_info = {
|
|
23
|
-
"filename": f"frame_{i}.jpg",
|
|
24
|
-
"file_path": f"data:image/jpeg;base64,{base64_image}",
|
|
25
|
-
} # Use data URL for OpenAI
|
|
26
|
-
image_embedding_response = get_llm_response(
|
|
27
|
-
"Describe this image.",
|
|
28
|
-
image=image_info,
|
|
29
|
-
model="gpt-4",
|
|
30
|
-
provider="openai",
|
|
31
|
-
) # Replace with your image embedding model
|
|
32
|
-
if (
|
|
33
|
-
isinstance(image_embedding_response, dict)
|
|
34
|
-
and "error" in image_embedding_response
|
|
35
|
-
):
|
|
36
|
-
print(
|
|
37
|
-
f"Error generating image embedding: {image_embedding_response['error']}"
|
|
38
|
-
)
|
|
39
|
-
else:
|
|
40
|
-
# Assuming your image embedding model returns a textual description
|
|
41
|
-
embeddings.append(image_embedding_response)
|
|
42
|
-
texts.append(f"Frame {i}: {image_embedding_response}")
|
|
43
|
-
|
|
44
|
-
video.release()
|
|
45
|
-
return embeddings, texts
|
|
46
|
-
|
|
47
|
-
except Exception as e:
|
|
48
|
-
print(f"Error processing video: {e}")
|
|
49
|
-
return [], [] # Return empty lists in case of error
|
|
@@ -1,32 +0,0 @@
|
|
|
1
|
-
tool_name: bash_executor
|
|
2
|
-
description: Execute bash queries.
|
|
3
|
-
inputs:
|
|
4
|
-
- bash_command
|
|
5
|
-
- user_request
|
|
6
|
-
steps:
|
|
7
|
-
- engine: python
|
|
8
|
-
code: |
|
|
9
|
-
import subprocess
|
|
10
|
-
import os
|
|
11
|
-
cmd = '{{bash_command}}' # Properly quote the command input
|
|
12
|
-
def run_command(cmd):
|
|
13
|
-
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
14
|
-
stdout, stderr = process.communicate()
|
|
15
|
-
if stderr:
|
|
16
|
-
print(f"Error: {stderr.decode('utf-8')}")
|
|
17
|
-
return stderr
|
|
18
|
-
return stdout
|
|
19
|
-
result = run_command(cmd)
|
|
20
|
-
output = result.decode('utf-8')
|
|
21
|
-
|
|
22
|
-
- engine: natural
|
|
23
|
-
code: |
|
|
24
|
-
|
|
25
|
-
Here is the result of the bash command:
|
|
26
|
-
```
|
|
27
|
-
{{ output }}
|
|
28
|
-
```
|
|
29
|
-
This was the original user request: {{ user_request }}
|
|
30
|
-
|
|
31
|
-
Please provide a response accordingly.
|
|
32
|
-
|
|
File without changes
|
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
tool_name: code_executor
|
|
2
|
-
description: Execute scripts with a specified language. choose from python, bash, R, or javascript. Set the ultimate result as the "output" variable. It must be a string. Do not add unnecessary print statements.
|
|
3
|
-
inputs:
|
|
4
|
-
- code
|
|
5
|
-
- language
|
|
6
|
-
steps:
|
|
7
|
-
- engine: '{{ language }}'
|
|
8
|
-
code: |
|
|
9
|
-
{{code}}
|
|
10
|
-
- engine: natural
|
|
11
|
-
code: |
|
|
12
|
-
Here is the result of the code execution that an agent ran.
|
|
13
|
-
```
|
|
14
|
-
{{ output }}
|
|
15
|
-
```
|
|
16
|
-
please provide a response accordingly.
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
name: corca
|
|
2
|
-
primary_directive: |
|
|
3
|
-
You are corca, a distinguished member of the NPC team.
|
|
4
|
-
Your expertise is in the area of software development and
|
|
5
|
-
you have a kanck for thinking through problems carefully.
|
|
6
|
-
You favor solutions that prioritize simplicity and clarity and
|
|
7
|
-
ought to always consider how some suggestion may increase rather than reduce tech debt
|
|
8
|
-
unnecessarily. Now, the key is in this last term, "unnecessarily".
|
|
9
|
-
You must distinguish carefully and when in doubt, opt to ask for further
|
|
10
|
-
information or clarification with concrete clear options that make it
|
|
11
|
-
easy for a user to choose.
|
|
12
|
-
model: gpt-4o-mini
|
|
13
|
-
provider: openai
|
|
@@ -1,4 +0,0 @@
|
|
|
1
|
-
name: eriane
|
|
2
|
-
primary_directive: you are an expert in the humanities and you must draw from your vast knowledge of history, literature, art, and philosophy to aid users in their requests, pulling real useful examples that can make users better understand results.
|
|
3
|
-
model: gpt-4o-mini
|
|
4
|
-
provider: openai
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
name: foreman
|
|
2
|
-
primary_directive: You are the foreman of an NPC team. It is your duty
|
|
3
|
-
to delegate tasks to your team members or to other specialized teams
|
|
4
|
-
in order to complete the project. You are responsible for the
|
|
5
|
-
completion of the project and the safety of your team members.
|
|
6
|
-
model: gpt-4o-mini
|
|
7
|
-
provider: openai
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
tool_name: "internet_search"
|
|
2
|
-
description: Searches the web for information based on a query in order to verify timiely details (e.g. current events) or to corroborate information in uncertain situations. Should be mainly only used when users specifically request a search, otherwise an LLMs basic knowledge should be sufficient.
|
|
3
|
-
inputs:
|
|
4
|
-
- query
|
|
5
|
-
- provider: ''
|
|
6
|
-
steps:
|
|
7
|
-
- engine: "python"
|
|
8
|
-
code: |
|
|
9
|
-
from npcsh.search import search_web
|
|
10
|
-
from npcsh.npc_sysenv import NPCSH_SEARCH_PROVIDER
|
|
11
|
-
query = "{{ query }}"
|
|
12
|
-
provider = '{{ provider }}'
|
|
13
|
-
if provider.strip() != '':
|
|
14
|
-
results = search_web(query, num_results=5, provider = provider)
|
|
15
|
-
else:
|
|
16
|
-
results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
|
|
17
|
-
|
|
18
|
-
print('QUERY in tool', query)
|
|
19
|
-
results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
|
|
20
|
-
print('RESULTS in tool', results)
|
|
21
|
-
- engine: "natural"
|
|
22
|
-
code: |
|
|
23
|
-
Using the following information extracted from the web:
|
|
24
|
-
|
|
25
|
-
{{ results }}
|
|
26
|
-
|
|
27
|
-
Answer the users question: {{ query }}
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
tool_name: "image_generation_tool"
|
|
2
|
-
description: |
|
|
3
|
-
Generates images based on a text prompt.
|
|
4
|
-
inputs:
|
|
5
|
-
- prompt
|
|
6
|
-
- model: 'runwayml/stable-diffusion-v1-5'
|
|
7
|
-
- provider: 'diffusers'
|
|
8
|
-
|
|
9
|
-
steps:
|
|
10
|
-
- engine: "python"
|
|
11
|
-
code: |
|
|
12
|
-
image_prompt = '{{prompt}}'.strip()
|
|
13
|
-
|
|
14
|
-
# Generate the image
|
|
15
|
-
filename = generate_image(
|
|
16
|
-
image_prompt,
|
|
17
|
-
npc=npc,
|
|
18
|
-
model='{{model}}', # You can adjust the model as needed
|
|
19
|
-
provider='{{provider}}'
|
|
20
|
-
)
|
|
21
|
-
if filename:
|
|
22
|
-
image_generated = True
|
|
23
|
-
else:
|
|
24
|
-
image_generated = False
|
|
25
|
-
|
|
File without changes
|
|
@@ -1,149 +0,0 @@
|
|
|
1
|
-
tool_name: local_search
|
|
2
|
-
description: |
|
|
3
|
-
Searches files in current and downstream directories to find items related to the user's query using fuzzy matching.
|
|
4
|
-
Returns only relevant snippets (10 lines around matches) to avoid including too much irrelevant content.
|
|
5
|
-
Intended for fuzzy searches, not for understanding file sizes.
|
|
6
|
-
inputs:
|
|
7
|
-
- query
|
|
8
|
-
- summarize: false # Optional - set to true to summarize the results
|
|
9
|
-
- file_filter: 'none' # Optional - can be filename patterns or folder names
|
|
10
|
-
- depth: 2 # Optional - search depth for nested directories
|
|
11
|
-
- fuzzy_threshold: 70 # Optional - minimum fuzzy match score (0-100)
|
|
12
|
-
steps:
|
|
13
|
-
- engine: python
|
|
14
|
-
code: |
|
|
15
|
-
# Search parameters are directly available
|
|
16
|
-
query = "{{ query }}"
|
|
17
|
-
file_filter = "{{ file_filter | default('None') }}"
|
|
18
|
-
if isinstance(file_filter, str) and file_filter.lower() == 'none':
|
|
19
|
-
file_filter = None
|
|
20
|
-
max_depth = {{ depth | default(2) }}
|
|
21
|
-
fuzzy_threshold = {{ fuzzy_threshold | default(70) }}
|
|
22
|
-
|
|
23
|
-
import os
|
|
24
|
-
import fnmatch
|
|
25
|
-
from pathlib import Path
|
|
26
|
-
from thefuzz import fuzz # Fuzzy string matching library
|
|
27
|
-
|
|
28
|
-
def find_files(file_filter=None, max_depth=2):
|
|
29
|
-
default_extensions = ['.py', '.txt', '.md',
|
|
30
|
-
'.json', '.yml', '.yaml',
|
|
31
|
-
'.log', '.csv', '.html',
|
|
32
|
-
'.js', '.css']
|
|
33
|
-
matches = []
|
|
34
|
-
root_path = Path('.').resolve() # Resolve to absolute path
|
|
35
|
-
|
|
36
|
-
# First, check files in the current directory
|
|
37
|
-
for path in root_path.iterdir():
|
|
38
|
-
if path.is_file():
|
|
39
|
-
# Skip hidden files
|
|
40
|
-
if path.name.startswith('.'):
|
|
41
|
-
continue
|
|
42
|
-
|
|
43
|
-
# If no filter specified, include files with default extensions
|
|
44
|
-
if file_filter is None:
|
|
45
|
-
if path.suffix in default_extensions:
|
|
46
|
-
matches.append(str(path))
|
|
47
|
-
else:
|
|
48
|
-
# If filter specified, check if file matches the filter
|
|
49
|
-
filters = [file_filter] if isinstance(file_filter, str) else file_filter
|
|
50
|
-
for f in filters:
|
|
51
|
-
if (fnmatch.fnmatch(path.name, f) or
|
|
52
|
-
fnmatch.fnmatch(str(path), f'*{f}*')):
|
|
53
|
-
matches.append(str(path))
|
|
54
|
-
break
|
|
55
|
-
|
|
56
|
-
# Then, check subdirectories with depth control
|
|
57
|
-
for path in root_path.rglob('*'):
|
|
58
|
-
# Skip hidden folders and common directories to ignore
|
|
59
|
-
if '/.' in str(path) or '__pycache__' in str(path) or '.git' in str(path) or 'node_modules' in str(path) or 'venv' in str(path):
|
|
60
|
-
continue
|
|
61
|
-
|
|
62
|
-
# Skip if we've gone too deep
|
|
63
|
-
relative_depth = len(path.relative_to(root_path).parts)
|
|
64
|
-
if relative_depth > max_depth:
|
|
65
|
-
continue
|
|
66
|
-
|
|
67
|
-
if path.is_file():
|
|
68
|
-
# If no filter specified, include files with default extensions
|
|
69
|
-
if file_filter is None:
|
|
70
|
-
if path.suffix in default_extensions:
|
|
71
|
-
matches.append(str(path))
|
|
72
|
-
else:
|
|
73
|
-
# If filter specified, check if file matches the filter
|
|
74
|
-
filters = [file_filter] if isinstance(file_filter, str) else file_filter
|
|
75
|
-
for f in filters:
|
|
76
|
-
if (fnmatch.fnmatch(path.name, f) or
|
|
77
|
-
fnmatch.fnmatch(str(path), f'*{f}*')):
|
|
78
|
-
matches.append(str(path))
|
|
79
|
-
break
|
|
80
|
-
|
|
81
|
-
return matches
|
|
82
|
-
|
|
83
|
-
# Find and load files
|
|
84
|
-
files = find_files(file_filter, max_depth)
|
|
85
|
-
|
|
86
|
-
# Process documents
|
|
87
|
-
relevant_chunks = []
|
|
88
|
-
for file_path in files:
|
|
89
|
-
with open(file_path, 'r', encoding='utf-8') as f:
|
|
90
|
-
lines = f.readlines() # Read file as lines
|
|
91
|
-
if lines:
|
|
92
|
-
# Join lines into a single string for fuzzy matching
|
|
93
|
-
content = ''.join(lines)
|
|
94
|
-
match_score = fuzz.partial_ratio(query.lower(), content.lower())
|
|
95
|
-
if match_score >= fuzzy_threshold:
|
|
96
|
-
# Find the best matching line
|
|
97
|
-
best_line_index = -1
|
|
98
|
-
best_line_score = 0
|
|
99
|
-
for i, line in enumerate(lines):
|
|
100
|
-
line_score = fuzz.partial_ratio(query.lower(), line.lower())
|
|
101
|
-
if line_score > best_line_score:
|
|
102
|
-
best_line_score = line_score
|
|
103
|
-
best_line_index = i
|
|
104
|
-
|
|
105
|
-
# Extract 10 lines around the best matching line
|
|
106
|
-
if best_line_index != -1:
|
|
107
|
-
start = max(0, best_line_index - 5) # 5 lines before
|
|
108
|
-
end = min(len(lines), best_line_index + 6) # 5 lines after
|
|
109
|
-
snippet = ''.join(lines[start:end])
|
|
110
|
-
relevant_chunks.append({
|
|
111
|
-
'path': file_path,
|
|
112
|
-
'snippet': snippet,
|
|
113
|
-
'ext': Path(file_path).suffix.lower(),
|
|
114
|
-
'score': match_score
|
|
115
|
-
})
|
|
116
|
-
|
|
117
|
-
# Sort results by match score (highest first)
|
|
118
|
-
relevant_chunks.sort(key=lambda x: x['score'], reverse=True)
|
|
119
|
-
|
|
120
|
-
# Format results
|
|
121
|
-
if relevant_chunks:
|
|
122
|
-
context_text = "Here are the most relevant code sections:\n\n"
|
|
123
|
-
for chunk in relevant_chunks:
|
|
124
|
-
file_path = chunk['path'].replace('./', '')
|
|
125
|
-
context_text += f"File: {file_path} (match score: {chunk['score']})\n"
|
|
126
|
-
context_text += f"```{chunk['ext'][1:] if chunk['ext'] else ''}\n"
|
|
127
|
-
context_text += f"{chunk['snippet'].strip()}\n"
|
|
128
|
-
context_text += "```\n\n"
|
|
129
|
-
else:
|
|
130
|
-
context_text = "No relevant code sections found.\n"
|
|
131
|
-
|
|
132
|
-
output = context_text
|
|
133
|
-
|
|
134
|
-
- engine: natural
|
|
135
|
-
code: |
|
|
136
|
-
{% if summarize %}
|
|
137
|
-
You are a helpful coding assistant.
|
|
138
|
-
Please help with this query:
|
|
139
|
-
|
|
140
|
-
`{{ query }}`
|
|
141
|
-
|
|
142
|
-
The user is attempting to carry out a local search. This search returned the following results:
|
|
143
|
-
|
|
144
|
-
`{{ results }}`
|
|
145
|
-
|
|
146
|
-
Please analyze the code sections above and provide a clear, helpful response that directly addresses the query.
|
|
147
|
-
If you reference specific files or code sections in your response, indicate which file they came from.
|
|
148
|
-
Make sure to explain your reasoning and how the provided code relates to the query.
|
|
149
|
-
{% endif %}
|
|
File without changes
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
context: |
|
|
2
|
-
The npcsh NPC team is devoted to providing a safe and helpful
|
|
3
|
-
environment for users where they can work and be as successful as possible.
|
|
4
|
-
npcsh is a command-line tool that makes it easy for users to harness
|
|
5
|
-
the power of LLMs from a command line shell.
|
|
6
|
-
databases:
|
|
7
|
-
- ~/npcsh_history.db
|
|
8
|
-
mcp_servers:
|
|
9
|
-
- /path/to/mcp/server.py
|
|
10
|
-
- @npm for server
|
|
11
|
-
|
|
File without changes
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
tool_name: "screen_capture_analysis_tool"
|
|
2
|
-
description: Captures the whole screen and sends the image for analysis
|
|
3
|
-
inputs:
|
|
4
|
-
- "prompt"
|
|
5
|
-
steps:
|
|
6
|
-
- engine: "python"
|
|
7
|
-
code: |
|
|
8
|
-
# Capture the screen
|
|
9
|
-
import pyautogui
|
|
10
|
-
import datetime
|
|
11
|
-
import os
|
|
12
|
-
from PIL import Image
|
|
13
|
-
import time
|
|
14
|
-
from npcsh.image import analyze_image_base, capture_screenshot
|
|
15
|
-
|
|
16
|
-
out = capture_screenshot(npc = npc, full = True)
|
|
17
|
-
|
|
18
|
-
llm_response = analyze_image_base( '{{prompt}}' + "\n\nAttached is a screenshot of my screen currently. Please use this to evaluate the situation. If the user asked for you to explain what's on their screen or something similar, they are referring to the details contained within the attached image. You do not need to actually view their screen. You do not need to mention that you cannot view or interpret images directly. You only need to answer the user's request based on the attached screenshot!",
|
|
19
|
-
out['file_path'],
|
|
20
|
-
out['filename'],
|
|
21
|
-
npc=npc,
|
|
22
|
-
**out['model_kwargs'])
|
|
23
|
-
# To this:
|
|
24
|
-
if isinstance(llm_response, dict):
|
|
25
|
-
llm_response = llm_response.get('response', 'No response from image analysis')
|
|
26
|
-
else:
|
|
27
|
-
llm_response = 'No response from image analysis'
|
|
@@ -1,4 +0,0 @@
|
|
|
1
|
-
name: slean
|
|
2
|
-
primary_directive: Assist with marketing issues, challenges and questions. When responding, be careful to always think through the problems as if you are a wmarketing wiz who has launched and hyper scaled companies through effective marketing by always thinking outside the box.
|
|
3
|
-
model: gpt-4o-mini
|
|
4
|
-
provider: openai
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
tool_name: data_pull
|
|
2
|
-
description: Execute queries on the ~/npcsh_history.db to pull data. The database contains only information about conversations and other user-provided data. It does not store any information about individual files.
|
|
3
|
-
inputs:
|
|
4
|
-
- sql_query
|
|
5
|
-
- interpret: false # Note that this is not a boolean, but a string
|
|
6
|
-
|
|
7
|
-
steps:
|
|
8
|
-
- engine: python
|
|
9
|
-
code: |
|
|
10
|
-
import pandas as pd
|
|
11
|
-
try:
|
|
12
|
-
df = pd.read_sql_query('{{sql_query}}', npc.db_conn)
|
|
13
|
-
except pandas.errors.DatabaseError as e:
|
|
14
|
-
df = pd.DataFrame({'Error': [str(e)]})
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
output = df.to_string()
|
|
18
|
-
|
|
19
|
-
- engine: natural
|
|
20
|
-
code: |
|
|
21
|
-
{% if interpret %}
|
|
22
|
-
Here is the result of the SQL query:
|
|
23
|
-
```
|
|
24
|
-
{{ df.to_string() }} # Convert DataFrame to string for a nicer display
|
|
25
|
-
```
|
|
26
|
-
{% endif %}
|
|
@@ -1,181 +0,0 @@
|
|
|
1
|
-
import pandas as pd
|
|
2
|
-
from sqlalchemy import create_engine
|
|
3
|
-
import os
|
|
4
|
-
|
|
5
|
-
# Sample market events data
|
|
6
|
-
market_events_data = {
|
|
7
|
-
"datetime": [
|
|
8
|
-
"2023-10-15 09:00:00",
|
|
9
|
-
"2023-10-16 10:30:00",
|
|
10
|
-
"2023-10-17 11:45:00",
|
|
11
|
-
"2023-10-18 13:15:00",
|
|
12
|
-
"2023-10-19 14:30:00",
|
|
13
|
-
],
|
|
14
|
-
"headline": [
|
|
15
|
-
"Stock Market Rallies Amid Positive Economic Data",
|
|
16
|
-
"Tech Giant Announces New Product Line",
|
|
17
|
-
"Federal Reserve Hints at Interest Rate Pause",
|
|
18
|
-
"Oil Prices Surge Following Supply Concerns",
|
|
19
|
-
"Retail Sector Reports Record Q3 Earnings",
|
|
20
|
-
],
|
|
21
|
-
}
|
|
22
|
-
|
|
23
|
-
# Create a DataFrame
|
|
24
|
-
market_events_df = pd.DataFrame(market_events_data)
|
|
25
|
-
|
|
26
|
-
# Define database path relative to user's home directory
|
|
27
|
-
db_path = os.path.expanduser("~/npcsh_history.db")
|
|
28
|
-
|
|
29
|
-
# Create a connection to the SQLite database
|
|
30
|
-
engine = create_engine(f"sqlite:///{db_path}")
|
|
31
|
-
with engine.connect() as connection:
|
|
32
|
-
# Write the data to a new table 'market_events', replacing existing data
|
|
33
|
-
market_events_df.to_sql(
|
|
34
|
-
"market_events", con=connection, if_exists="replace", index=False
|
|
35
|
-
)
|
|
36
|
-
|
|
37
|
-
print("Market events have been added to the database.")
|
|
38
|
-
|
|
39
|
-
email_data = {
|
|
40
|
-
"datetime": [
|
|
41
|
-
"2023-10-10 10:00:00",
|
|
42
|
-
"2023-10-11 11:00:00",
|
|
43
|
-
"2023-10-12 12:00:00",
|
|
44
|
-
"2023-10-13 13:00:00",
|
|
45
|
-
"2023-10-14 14:00:00",
|
|
46
|
-
],
|
|
47
|
-
"subject": [
|
|
48
|
-
"Meeting Reminder",
|
|
49
|
-
"Project Update",
|
|
50
|
-
"Invoice Attached",
|
|
51
|
-
"Weekly Report",
|
|
52
|
-
"Holiday Notice",
|
|
53
|
-
],
|
|
54
|
-
"sender": [
|
|
55
|
-
"alice@example.com",
|
|
56
|
-
"bob@example.com",
|
|
57
|
-
"carol@example.com",
|
|
58
|
-
"dave@example.com",
|
|
59
|
-
"eve@example.com",
|
|
60
|
-
],
|
|
61
|
-
"recipient": [
|
|
62
|
-
"bob@example.com",
|
|
63
|
-
"carol@example.com",
|
|
64
|
-
"dave@example.com",
|
|
65
|
-
"eve@example.com",
|
|
66
|
-
"alice@example.com",
|
|
67
|
-
],
|
|
68
|
-
"body": [
|
|
69
|
-
"Don't forget the meeting tomorrow at 10 AM.",
|
|
70
|
-
"The project is progressing well, see attached update.",
|
|
71
|
-
"Please find your invoice attached.",
|
|
72
|
-
"Here is the weekly report.",
|
|
73
|
-
"The office will be closed on holidays, have a great time!",
|
|
74
|
-
],
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
# Create a DataFrame
|
|
78
|
-
emails_df = pd.DataFrame(email_data)
|
|
79
|
-
|
|
80
|
-
# Define database path relative to user's home directory
|
|
81
|
-
db_path = os.path.expanduser("~/npcsh_history.db")
|
|
82
|
-
|
|
83
|
-
# Create a connection to the SQLite database
|
|
84
|
-
engine = create_engine(f"sqlite:///{db_path}")
|
|
85
|
-
with engine.connect() as connection:
|
|
86
|
-
# Write the data to a new table 'emails', replacing existing data
|
|
87
|
-
emails_df.to_sql("emails", con=connection, if_exists="replace", index=False)
|
|
88
|
-
|
|
89
|
-
print("Sample emails have been added to the database.")
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
from npcsh.npc_compiler import PipelineRunner
|
|
93
|
-
import os
|
|
94
|
-
|
|
95
|
-
pipeline_runner = PipelineRunner(
|
|
96
|
-
pipeline_file="morning_routine.pipe",
|
|
97
|
-
npc_root_dir=os.path.abspath("."), # Use absolute path to parent directory
|
|
98
|
-
db_path="~/npcsh_history.db",
|
|
99
|
-
)
|
|
100
|
-
pipeline_runner.execute_pipeline()
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
import pandas as pd
|
|
104
|
-
from sqlalchemy import create_engine
|
|
105
|
-
import os
|
|
106
|
-
|
|
107
|
-
# Sample data generation for news articles
|
|
108
|
-
news_articles_data = {
|
|
109
|
-
"news_article_id": list(range(1, 21)),
|
|
110
|
-
"headline": [
|
|
111
|
-
"Economy sees unexpected growth in Q4",
|
|
112
|
-
"New tech gadget takes the world by storm",
|
|
113
|
-
"Political debate heats up over new policy",
|
|
114
|
-
"Health concerns rise amid new disease outbreak",
|
|
115
|
-
"Sports team secures victory in last minute",
|
|
116
|
-
"New economic policy introduced by government",
|
|
117
|
-
"Breakthrough in AI technology announced",
|
|
118
|
-
"Political leader delivers speech on reforms",
|
|
119
|
-
"Healthcare systems pushed to limits",
|
|
120
|
-
"Celebrated athlete breaks world record",
|
|
121
|
-
"Controversial economic measures spark debate",
|
|
122
|
-
"Innovative tech startup gains traction",
|
|
123
|
-
"Political scandal shakes administration",
|
|
124
|
-
"Healthcare workers protest for better pay",
|
|
125
|
-
"Major sports event postponed due to weather",
|
|
126
|
-
"Trade tensions impact global economy",
|
|
127
|
-
"Tech company accused of data breach",
|
|
128
|
-
"Election results lead to political upheaval",
|
|
129
|
-
"Vaccine developments offer hope amid pandemic",
|
|
130
|
-
"Sports league announces return to action",
|
|
131
|
-
],
|
|
132
|
-
"content": ["Article content here..." for _ in range(20)],
|
|
133
|
-
"publication_date": pd.date_range(start="1/1/2023", periods=20, freq="D"),
|
|
134
|
-
}
|
|
135
|
-
|
|
136
|
-
# Create a DataFrame
|
|
137
|
-
news_df = pd.DataFrame(news_articles_data)
|
|
138
|
-
|
|
139
|
-
# Define the database path
|
|
140
|
-
db_path = os.path.expanduser("~/npcsh_history.db")
|
|
141
|
-
|
|
142
|
-
# Create a connection to the SQLite database
|
|
143
|
-
engine = create_engine(f"sqlite:///{db_path}")
|
|
144
|
-
with engine.connect() as connection:
|
|
145
|
-
# Write the data to a new table 'news_articles', replacing existing data
|
|
146
|
-
news_df.to_sql("news_articles", con=connection, if_exists="replace", index=False)
|
|
147
|
-
|
|
148
|
-
print("News articles have been added to the database.")
|
|
149
|
-
|
|
150
|
-
from npcsh.npc_compiler import PipelineRunner
|
|
151
|
-
import os
|
|
152
|
-
|
|
153
|
-
runner = PipelineRunner(
|
|
154
|
-
"./news_analysis.pipe",
|
|
155
|
-
db_path=os.path.expanduser("~/npcsh_history.db"),
|
|
156
|
-
npc_root_dir=os.path.abspath("."),
|
|
157
|
-
)
|
|
158
|
-
results = runner.execute_pipeline()
|
|
159
|
-
|
|
160
|
-
print("\nResults:")
|
|
161
|
-
print("\nClassifications (processed row by row):")
|
|
162
|
-
print(results["classify_news"])
|
|
163
|
-
print("\nAnalysis (processed in batch):")
|
|
164
|
-
print(results["analyze_news"])
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
from npcsh.npc_compiler import PipelineRunner
|
|
168
|
-
import os
|
|
169
|
-
|
|
170
|
-
runner = PipelineRunner(
|
|
171
|
-
"./news_analysis_mixa.pipe",
|
|
172
|
-
db_path=os.path.expanduser("~/npcsh_history.db"),
|
|
173
|
-
npc_root_dir=os.path.abspath("."),
|
|
174
|
-
)
|
|
175
|
-
results = runner.execute_pipeline()
|
|
176
|
-
|
|
177
|
-
print("\nResults:")
|
|
178
|
-
print("\nClassifications (processed row by row):")
|
|
179
|
-
print(results["classify_news"])
|
|
180
|
-
print("\nAnalysis (processed in batch):")
|
|
181
|
-
print(results["analyze_news"])
|
|
@@ -1,4 +0,0 @@
|
|
|
1
|
-
name: turnic
|
|
2
|
-
primary_directive: Assist with sales challenges and questions. When responding, keep in mind that sales professionals tend to be interested in achieving results quickly so you must ensure that you opt for simpler and more straightforward solutions and explanations without much fanfare.
|
|
3
|
-
model: gpt-4o-mini
|
|
4
|
-
provider: openai
|
|
File without changes
|