npcsh 0.3.32__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +942 -0
- npcsh/alicanto.py +1074 -0
- npcsh/guac.py +785 -0
- npcsh/mcp_helpers.py +357 -0
- npcsh/mcp_npcsh.py +822 -0
- npcsh/mcp_server.py +184 -0
- npcsh/npc.py +218 -0
- npcsh/npcsh.py +1161 -0
- npcsh/plonk.py +387 -269
- npcsh/pti.py +234 -0
- npcsh/routes.py +958 -0
- npcsh/spool.py +315 -0
- npcsh/wander.py +550 -0
- npcsh/yap.py +573 -0
- npcsh-1.0.1.dist-info/METADATA +596 -0
- npcsh-1.0.1.dist-info/RECORD +21 -0
- {npcsh-0.3.32.dist-info → npcsh-1.0.1.dist-info}/WHEEL +1 -1
- npcsh-1.0.1.dist-info/entry_points.txt +9 -0
- {npcsh-0.3.32.dist-info → npcsh-1.0.1.dist-info}/licenses/LICENSE +1 -1
- npcsh/audio.py +0 -569
- npcsh/audio_gen.py +0 -1
- npcsh/cli.py +0 -543
- npcsh/command_history.py +0 -566
- npcsh/conversation.py +0 -54
- npcsh/data_models.py +0 -46
- npcsh/dataframes.py +0 -171
- npcsh/embeddings.py +0 -168
- npcsh/helpers.py +0 -646
- npcsh/image.py +0 -298
- npcsh/image_gen.py +0 -79
- npcsh/knowledge_graph.py +0 -1006
- npcsh/llm_funcs.py +0 -2195
- npcsh/load_data.py +0 -83
- npcsh/main.py +0 -5
- npcsh/model_runner.py +0 -189
- npcsh/npc_compiler.py +0 -2879
- npcsh/npc_sysenv.py +0 -388
- npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
- npcsh/npc_team/corca.npc +0 -13
- npcsh/npc_team/foreman.npc +0 -7
- npcsh/npc_team/npcsh.ctx +0 -11
- npcsh/npc_team/sibiji.npc +0 -4
- npcsh/npc_team/templates/analytics/celona.npc +0 -0
- npcsh/npc_team/templates/hr_support/raone.npc +0 -0
- npcsh/npc_team/templates/humanities/eriane.npc +0 -4
- npcsh/npc_team/templates/it_support/lineru.npc +0 -0
- npcsh/npc_team/templates/marketing/slean.npc +0 -4
- npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
- npcsh/npc_team/templates/sales/turnic.npc +0 -4
- npcsh/npc_team/templates/software/welxor.npc +0 -0
- npcsh/npc_team/tools/bash_executer.tool +0 -32
- npcsh/npc_team/tools/calculator.tool +0 -8
- npcsh/npc_team/tools/code_executor.tool +0 -16
- npcsh/npc_team/tools/generic_search.tool +0 -27
- npcsh/npc_team/tools/image_generation.tool +0 -25
- npcsh/npc_team/tools/local_search.tool +0 -149
- npcsh/npc_team/tools/npcsh_executor.tool +0 -9
- npcsh/npc_team/tools/screen_cap.tool +0 -27
- npcsh/npc_team/tools/sql_executor.tool +0 -26
- npcsh/response.py +0 -272
- npcsh/search.py +0 -252
- npcsh/serve.py +0 -1467
- npcsh/shell.py +0 -524
- npcsh/shell_helpers.py +0 -3919
- npcsh/stream.py +0 -233
- npcsh/video.py +0 -52
- npcsh/video_gen.py +0 -69
- npcsh-0.3.32.data/data/npcsh/npc_team/bash_executer.tool +0 -32
- npcsh-0.3.32.data/data/npcsh/npc_team/calculator.tool +0 -8
- npcsh-0.3.32.data/data/npcsh/npc_team/celona.npc +0 -0
- npcsh-0.3.32.data/data/npcsh/npc_team/code_executor.tool +0 -16
- npcsh-0.3.32.data/data/npcsh/npc_team/corca.npc +0 -13
- npcsh-0.3.32.data/data/npcsh/npc_team/eriane.npc +0 -4
- npcsh-0.3.32.data/data/npcsh/npc_team/foreman.npc +0 -7
- npcsh-0.3.32.data/data/npcsh/npc_team/generic_search.tool +0 -27
- npcsh-0.3.32.data/data/npcsh/npc_team/image_generation.tool +0 -25
- npcsh-0.3.32.data/data/npcsh/npc_team/lineru.npc +0 -0
- npcsh-0.3.32.data/data/npcsh/npc_team/local_search.tool +0 -149
- npcsh-0.3.32.data/data/npcsh/npc_team/maurawa.npc +0 -0
- npcsh-0.3.32.data/data/npcsh/npc_team/npcsh.ctx +0 -11
- npcsh-0.3.32.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
- npcsh-0.3.32.data/data/npcsh/npc_team/raone.npc +0 -0
- npcsh-0.3.32.data/data/npcsh/npc_team/screen_cap.tool +0 -27
- npcsh-0.3.32.data/data/npcsh/npc_team/sibiji.npc +0 -4
- npcsh-0.3.32.data/data/npcsh/npc_team/slean.npc +0 -4
- npcsh-0.3.32.data/data/npcsh/npc_team/sql_executor.tool +0 -26
- npcsh-0.3.32.data/data/npcsh/npc_team/test_pipeline.py +0 -181
- npcsh-0.3.32.data/data/npcsh/npc_team/turnic.npc +0 -4
- npcsh-0.3.32.data/data/npcsh/npc_team/welxor.npc +0 -0
- npcsh-0.3.32.dist-info/METADATA +0 -779
- npcsh-0.3.32.dist-info/RECORD +0 -78
- npcsh-0.3.32.dist-info/entry_points.txt +0 -3
- {npcsh-0.3.32.dist-info → npcsh-1.0.1.dist-info}/top_level.txt +0 -0
npcsh/stream.py
DELETED
|
@@ -1,233 +0,0 @@
|
|
|
1
|
-
########
|
|
2
|
-
########
|
|
3
|
-
########
|
|
4
|
-
########
|
|
5
|
-
######## STREAM
|
|
6
|
-
########
|
|
7
|
-
########
|
|
8
|
-
|
|
9
|
-
from typing import Any, Dict, Generator, List
|
|
10
|
-
import os
|
|
11
|
-
import base64
|
|
12
|
-
import json
|
|
13
|
-
import requests
|
|
14
|
-
from PIL import Image
|
|
15
|
-
from typing import Any, Dict, Generator, List, Union
|
|
16
|
-
|
|
17
|
-
from pydantic import BaseModel
|
|
18
|
-
from npcsh.npc_sysenv import (
|
|
19
|
-
get_system_message,
|
|
20
|
-
compress_image,
|
|
21
|
-
available_chat_models,
|
|
22
|
-
available_reasoning_models,
|
|
23
|
-
)
|
|
24
|
-
|
|
25
|
-
from litellm import completion
|
|
26
|
-
|
|
27
|
-
# import litellm
|
|
28
|
-
# litellm._turn_on_debug()
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
def get_litellm_stream(
|
|
32
|
-
messages: List[Dict[str, str]],
|
|
33
|
-
model: str,
|
|
34
|
-
provider: str = None,
|
|
35
|
-
npc: Any = None,
|
|
36
|
-
tools: list = None,
|
|
37
|
-
images: List[Dict[str, str]] = None,
|
|
38
|
-
api_key: str = None,
|
|
39
|
-
api_url: str = None,
|
|
40
|
-
tool_choice: Dict = None,
|
|
41
|
-
**kwargs,
|
|
42
|
-
) -> Generator:
|
|
43
|
-
"""Streams responses from OpenAI, supporting images, tools and yielding raw text chunks."""
|
|
44
|
-
system_message = get_system_message(npc) if npc else "You are a helpful assistant."
|
|
45
|
-
|
|
46
|
-
if not messages:
|
|
47
|
-
messages = [{"role": "system", "content": system_message}]
|
|
48
|
-
|
|
49
|
-
# Add images if provided
|
|
50
|
-
if images:
|
|
51
|
-
last_user_message = (
|
|
52
|
-
messages[-1]
|
|
53
|
-
if messages and messages[-1]["role"] == "user"
|
|
54
|
-
else {"role": "user", "content": []}
|
|
55
|
-
)
|
|
56
|
-
|
|
57
|
-
if isinstance(last_user_message["content"], str):
|
|
58
|
-
last_user_message["content"] = [
|
|
59
|
-
{"type": "text", "text": last_user_message["content"]}
|
|
60
|
-
]
|
|
61
|
-
|
|
62
|
-
for image in images:
|
|
63
|
-
with open(image["file_path"], "rb") as image_file:
|
|
64
|
-
image_data = base64.b64encode(image_file.read()).decode("utf-8")
|
|
65
|
-
last_user_message["content"].append(
|
|
66
|
-
{
|
|
67
|
-
"type": "image_url",
|
|
68
|
-
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
|
|
69
|
-
}
|
|
70
|
-
)
|
|
71
|
-
|
|
72
|
-
if last_user_message not in messages:
|
|
73
|
-
messages.append(last_user_message)
|
|
74
|
-
|
|
75
|
-
# Prepare API call parameters
|
|
76
|
-
# print("provider", provider)
|
|
77
|
-
# print("model", model)
|
|
78
|
-
if provider is not None:
|
|
79
|
-
model_str = f"{provider}/{model}"
|
|
80
|
-
else:
|
|
81
|
-
model_str = model
|
|
82
|
-
|
|
83
|
-
api_params = {
|
|
84
|
-
"model": model_str,
|
|
85
|
-
"messages": messages,
|
|
86
|
-
"stream": True,
|
|
87
|
-
}
|
|
88
|
-
# print(api_params["model"])
|
|
89
|
-
|
|
90
|
-
if api_key is not None and provider == "openai-like":
|
|
91
|
-
print(api_key)
|
|
92
|
-
api_params["api_key"] = api_key
|
|
93
|
-
|
|
94
|
-
if api_url is not None and provider == "openai-like":
|
|
95
|
-
api_params["api_url"] = api_url
|
|
96
|
-
|
|
97
|
-
# Add tools if provided
|
|
98
|
-
if tools:
|
|
99
|
-
api_params["tools"] = tools
|
|
100
|
-
|
|
101
|
-
# Add tool choice if specified
|
|
102
|
-
if tool_choice:
|
|
103
|
-
api_params["tool_choice"] = tool_choice
|
|
104
|
-
if kwargs:
|
|
105
|
-
for key, value in kwargs.items():
|
|
106
|
-
if key in [
|
|
107
|
-
"stream",
|
|
108
|
-
"stop",
|
|
109
|
-
"temperature",
|
|
110
|
-
"top_p",
|
|
111
|
-
"max_tokens",
|
|
112
|
-
"max_completion_tokens",
|
|
113
|
-
"tools",
|
|
114
|
-
"tool_choice",
|
|
115
|
-
"extra_headers",
|
|
116
|
-
"parallel_tool_calls",
|
|
117
|
-
"response_format",
|
|
118
|
-
"user",
|
|
119
|
-
]:
|
|
120
|
-
api_params[key] = value
|
|
121
|
-
# print(api_params)
|
|
122
|
-
stream = completion(**api_params)
|
|
123
|
-
|
|
124
|
-
for chunk in stream:
|
|
125
|
-
yield chunk
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
def process_litellm_tool_stream(stream, tool_map: Dict[str, callable]) -> List[Dict]:
|
|
129
|
-
"""
|
|
130
|
-
Process the litellm tool use stream
|
|
131
|
-
"""
|
|
132
|
-
final_tool_calls = {}
|
|
133
|
-
tool_results = []
|
|
134
|
-
|
|
135
|
-
for chunk in stream:
|
|
136
|
-
delta = chunk.choices[0].delta
|
|
137
|
-
|
|
138
|
-
# Process tool calls if present
|
|
139
|
-
if delta.tool_calls:
|
|
140
|
-
for tool_call in delta.tool_calls:
|
|
141
|
-
index = tool_call.index
|
|
142
|
-
|
|
143
|
-
# Initialize tool call if new
|
|
144
|
-
if index not in final_tool_calls:
|
|
145
|
-
final_tool_calls[index] = {
|
|
146
|
-
"id": tool_call.id,
|
|
147
|
-
"name": tool_call.function.name if tool_call.function else None,
|
|
148
|
-
"arguments": (
|
|
149
|
-
tool_call.function.arguments if tool_call.function else ""
|
|
150
|
-
),
|
|
151
|
-
}
|
|
152
|
-
# Append arguments if continuing
|
|
153
|
-
elif tool_call.function and tool_call.function.arguments:
|
|
154
|
-
final_tool_calls[index]["arguments"] += tool_call.function.arguments
|
|
155
|
-
|
|
156
|
-
# Process all complete tool calls
|
|
157
|
-
for tool_call in final_tool_calls.values():
|
|
158
|
-
try:
|
|
159
|
-
# Parse the arguments
|
|
160
|
-
tool_input = (
|
|
161
|
-
json.loads(tool_call["arguments"])
|
|
162
|
-
if tool_call["arguments"].strip()
|
|
163
|
-
else {}
|
|
164
|
-
)
|
|
165
|
-
|
|
166
|
-
# Execute the tool
|
|
167
|
-
tool_func = tool_map.get(tool_call["name"])
|
|
168
|
-
if tool_func:
|
|
169
|
-
result = tool_func(tool_input)
|
|
170
|
-
tool_results.append(
|
|
171
|
-
{
|
|
172
|
-
"tool_name": tool_call["name"],
|
|
173
|
-
"tool_input": tool_input,
|
|
174
|
-
"tool_result": result,
|
|
175
|
-
}
|
|
176
|
-
)
|
|
177
|
-
else:
|
|
178
|
-
tool_results.append(
|
|
179
|
-
{
|
|
180
|
-
"tool_name": tool_call["name"],
|
|
181
|
-
"tool_input": tool_input,
|
|
182
|
-
"error": f"Tool {tool_call['name']} not found",
|
|
183
|
-
}
|
|
184
|
-
)
|
|
185
|
-
|
|
186
|
-
except Exception as e:
|
|
187
|
-
tool_results.append(
|
|
188
|
-
{
|
|
189
|
-
"tool_name": tool_call["name"],
|
|
190
|
-
"tool_input": tool_call["arguments"],
|
|
191
|
-
"error": str(e),
|
|
192
|
-
}
|
|
193
|
-
)
|
|
194
|
-
|
|
195
|
-
return tool_results
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
from typing import List, Dict, Any, Literal
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
def generate_tool_schema(
|
|
202
|
-
name: str,
|
|
203
|
-
description: str,
|
|
204
|
-
parameters: Dict[str, Any],
|
|
205
|
-
required: List[str] = None,
|
|
206
|
-
) -> Dict[str, Any]:
|
|
207
|
-
"""
|
|
208
|
-
Generate provider-specific function/tool schema from common parameters
|
|
209
|
-
|
|
210
|
-
Args:
|
|
211
|
-
name: Name of the function
|
|
212
|
-
description: Description of what the function does
|
|
213
|
-
parameters: Dict of parameter names and their properties
|
|
214
|
-
provider: Which provider to generate schema for
|
|
215
|
-
required: List of required parameter names
|
|
216
|
-
"""
|
|
217
|
-
if required is None:
|
|
218
|
-
required = []
|
|
219
|
-
|
|
220
|
-
return {
|
|
221
|
-
"type": "function",
|
|
222
|
-
"function": {
|
|
223
|
-
"name": name,
|
|
224
|
-
"description": description,
|
|
225
|
-
"parameters": {
|
|
226
|
-
"type": "object",
|
|
227
|
-
"properties": parameters,
|
|
228
|
-
"required": required,
|
|
229
|
-
"additionalProperties": False,
|
|
230
|
-
},
|
|
231
|
-
"strict": True,
|
|
232
|
-
},
|
|
233
|
-
}
|
npcsh/video.py
DELETED
|
@@ -1,52 +0,0 @@
|
|
|
1
|
-
# video.py
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
def process_video(file_path, table_name):
|
|
5
|
-
# implement with moon dream
|
|
6
|
-
|
|
7
|
-
embeddings = []
|
|
8
|
-
texts = []
|
|
9
|
-
try:
|
|
10
|
-
video = cv2.VideoCapture(file_path)
|
|
11
|
-
fps = video.get(cv2.CAP_PROP_FPS)
|
|
12
|
-
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
13
|
-
|
|
14
|
-
for i in range(frame_count):
|
|
15
|
-
ret, frame = video.read()
|
|
16
|
-
if not ret:
|
|
17
|
-
break
|
|
18
|
-
|
|
19
|
-
# Process every nth frame (adjust n as needed for performance)
|
|
20
|
-
n = 10 # Process every 10th frame
|
|
21
|
-
if i % n == 0:
|
|
22
|
-
# Image Embeddings
|
|
23
|
-
_, buffer = cv2.imencode(".jpg", frame) # Encode frame as JPG
|
|
24
|
-
base64_image = base64.b64encode(buffer).decode("utf-8")
|
|
25
|
-
image_info = {
|
|
26
|
-
"filename": f"frame_{i}.jpg",
|
|
27
|
-
"file_path": f"data:image/jpeg;base64,{base64_image}",
|
|
28
|
-
} # Use data URL for OpenAI
|
|
29
|
-
image_embedding_response = get_llm_response(
|
|
30
|
-
"Describe this image.",
|
|
31
|
-
image=image_info,
|
|
32
|
-
model="gpt-4",
|
|
33
|
-
provider="openai",
|
|
34
|
-
) # Replace with your image embedding model
|
|
35
|
-
if (
|
|
36
|
-
isinstance(image_embedding_response, dict)
|
|
37
|
-
and "error" in image_embedding_response
|
|
38
|
-
):
|
|
39
|
-
print(
|
|
40
|
-
f"Error generating image embedding: {image_embedding_response['error']}"
|
|
41
|
-
)
|
|
42
|
-
else:
|
|
43
|
-
# Assuming your image embedding model returns a textual description
|
|
44
|
-
embeddings.append(image_embedding_response)
|
|
45
|
-
texts.append(f"Frame {i}: {image_embedding_response}")
|
|
46
|
-
|
|
47
|
-
video.release()
|
|
48
|
-
return embeddings, texts
|
|
49
|
-
|
|
50
|
-
except Exception as e:
|
|
51
|
-
print(f"Error processing video: {e}")
|
|
52
|
-
return [], [] # Return empty lists in case of error
|
npcsh/video_gen.py
DELETED
|
@@ -1,69 +0,0 @@
|
|
|
1
|
-
def generate_video_diffusers(
|
|
2
|
-
prompt,
|
|
3
|
-
model,
|
|
4
|
-
npc=None,
|
|
5
|
-
device="cpu",
|
|
6
|
-
output_path="",
|
|
7
|
-
num_inference_steps=10,
|
|
8
|
-
num_frames=125,
|
|
9
|
-
height=256,
|
|
10
|
-
width=256,
|
|
11
|
-
):
|
|
12
|
-
|
|
13
|
-
import torch
|
|
14
|
-
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
|
15
|
-
import numpy as np
|
|
16
|
-
import cv2
|
|
17
|
-
|
|
18
|
-
# Load pipeline
|
|
19
|
-
pipe = DiffusionPipeline.from_pretrained(
|
|
20
|
-
"damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float32
|
|
21
|
-
).to(device)
|
|
22
|
-
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
|
23
|
-
|
|
24
|
-
output = pipe(
|
|
25
|
-
prompt,
|
|
26
|
-
num_inference_steps=num_inference_steps,
|
|
27
|
-
num_frames=num_frames,
|
|
28
|
-
height=height,
|
|
29
|
-
width=width,
|
|
30
|
-
)
|
|
31
|
-
|
|
32
|
-
def save_frames_to_video(frames, output_path, fps=8):
|
|
33
|
-
"""Handle the specific 5D array format (1, num_frames, H, W, 3) with proper type conversion"""
|
|
34
|
-
# Verify input format
|
|
35
|
-
if not (
|
|
36
|
-
isinstance(frames, np.ndarray)
|
|
37
|
-
and frames.ndim == 5
|
|
38
|
-
and frames.shape[-1] == 3
|
|
39
|
-
):
|
|
40
|
-
raise ValueError(
|
|
41
|
-
f"Unexpected frame format. Expected 5D RGB array, got {frames.shape}"
|
|
42
|
-
)
|
|
43
|
-
|
|
44
|
-
# Remove batch dimension and convert to 0-255 uint8
|
|
45
|
-
frames = (frames[0] * 255).astype(np.uint8) # Shape: (num_frames, H, W, 3)
|
|
46
|
-
|
|
47
|
-
# Get video dimensions
|
|
48
|
-
height, width = frames.shape[1:3]
|
|
49
|
-
|
|
50
|
-
# Create video writer
|
|
51
|
-
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
|
52
|
-
video_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
|
53
|
-
|
|
54
|
-
if not video_writer.isOpened():
|
|
55
|
-
raise IOError(f"Could not open video writer for {output_path}")
|
|
56
|
-
|
|
57
|
-
# Write frames (convert RGB to BGR for OpenCV)
|
|
58
|
-
for frame in frames:
|
|
59
|
-
video_writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
|
|
60
|
-
|
|
61
|
-
video_writer.release()
|
|
62
|
-
print(f"Successfully saved {frames.shape[0]} frames to {output_path}")
|
|
63
|
-
|
|
64
|
-
os.makedirs("~/.npcsh/videos/")
|
|
65
|
-
if output_path == "":
|
|
66
|
-
|
|
67
|
-
output_path = "~/.npcsh/videos/" + prompt[0:8] + ".mp4"
|
|
68
|
-
save_frames_to_video(output.frames, output_path)
|
|
69
|
-
return output_path
|
|
@@ -1,32 +0,0 @@
|
|
|
1
|
-
tool_name: bash_executor
|
|
2
|
-
description: Execute bash queries.
|
|
3
|
-
inputs:
|
|
4
|
-
- bash_command
|
|
5
|
-
- user_request
|
|
6
|
-
steps:
|
|
7
|
-
- engine: python
|
|
8
|
-
code: |
|
|
9
|
-
import subprocess
|
|
10
|
-
import os
|
|
11
|
-
cmd = '{{bash_command}}' # Properly quote the command input
|
|
12
|
-
def run_command(cmd):
|
|
13
|
-
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
14
|
-
stdout, stderr = process.communicate()
|
|
15
|
-
if stderr:
|
|
16
|
-
print(f"Error: {stderr.decode('utf-8')}")
|
|
17
|
-
return stderr
|
|
18
|
-
return stdout
|
|
19
|
-
result = run_command(cmd)
|
|
20
|
-
output = result.decode('utf-8')
|
|
21
|
-
|
|
22
|
-
- engine: natural
|
|
23
|
-
code: |
|
|
24
|
-
|
|
25
|
-
Here is the result of the bash command:
|
|
26
|
-
```
|
|
27
|
-
{{ output }}
|
|
28
|
-
```
|
|
29
|
-
This was the original user request: {{ user_request }}
|
|
30
|
-
|
|
31
|
-
Please provide a response accordingly.
|
|
32
|
-
|
|
File without changes
|
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
tool_name: code_executor
|
|
2
|
-
description: Execute scripts with a specified language. choose from python, bash, R, or javascript. Set the ultimate result as the "output" variable. It must be a string. Do not add unnecessary print statements.
|
|
3
|
-
inputs:
|
|
4
|
-
- code
|
|
5
|
-
- language
|
|
6
|
-
steps:
|
|
7
|
-
- engine: '{{ language }}'
|
|
8
|
-
code: |
|
|
9
|
-
{{code}}
|
|
10
|
-
- engine: natural
|
|
11
|
-
code: |
|
|
12
|
-
Here is the result of the code execution that an agent ran.
|
|
13
|
-
```
|
|
14
|
-
{{ output }}
|
|
15
|
-
```
|
|
16
|
-
please provide a response accordingly.
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
name: corca
|
|
2
|
-
primary_directive: |
|
|
3
|
-
You are corca, a distinguished member of the NPC team.
|
|
4
|
-
Your expertise is in the area of software development and
|
|
5
|
-
you have a kanck for thinking through problems carefully.
|
|
6
|
-
You favor solutions that prioritize simplicity and clarity and
|
|
7
|
-
ought to always consider how some suggestion may increase rather than reduce tech debt
|
|
8
|
-
unnecessarily. Now, the key is in this last term, "unnecessarily".
|
|
9
|
-
You must distinguish carefully and when in doubt, opt to ask for further
|
|
10
|
-
information or clarification with concrete clear options that make it
|
|
11
|
-
easy for a user to choose.
|
|
12
|
-
model: gpt-4o-mini
|
|
13
|
-
provider: openai
|
|
@@ -1,4 +0,0 @@
|
|
|
1
|
-
name: eriane
|
|
2
|
-
primary_directive: you are an expert in the humanities and you must draw from your vast knowledge of history, literature, art, and philosophy to aid users in their requests, pulling real useful examples that can make users better understand results.
|
|
3
|
-
model: gpt-4o-mini
|
|
4
|
-
provider: openai
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
name: foreman
|
|
2
|
-
primary_directive: You are the foreman of an NPC team. It is your duty
|
|
3
|
-
to delegate tasks to your team members or to other specialized teams
|
|
4
|
-
in order to complete the project. You are responsible for the
|
|
5
|
-
completion of the project and the safety of your team members.
|
|
6
|
-
model: gpt-4o-mini
|
|
7
|
-
provider: openai
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
tool_name: "internet_search"
|
|
2
|
-
description: Searches the web for information based on a query in order to verify timiely details (e.g. current events) or to corroborate information in uncertain situations. Should be mainly only used when users specifically request a search, otherwise an LLMs basic knowledge should be sufficient.
|
|
3
|
-
inputs:
|
|
4
|
-
- query
|
|
5
|
-
- provider: ''
|
|
6
|
-
steps:
|
|
7
|
-
- engine: "python"
|
|
8
|
-
code: |
|
|
9
|
-
from npcsh.search import search_web
|
|
10
|
-
from npcsh.npc_sysenv import NPCSH_SEARCH_PROVIDER
|
|
11
|
-
query = "{{ query }}"
|
|
12
|
-
provider = '{{ provider }}'
|
|
13
|
-
if provider.strip() != '':
|
|
14
|
-
results = search_web(query, num_results=5, provider = provider)
|
|
15
|
-
else:
|
|
16
|
-
results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
|
|
17
|
-
|
|
18
|
-
print('QUERY in tool', query)
|
|
19
|
-
results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
|
|
20
|
-
print('RESULTS in tool', results)
|
|
21
|
-
- engine: "natural"
|
|
22
|
-
code: |
|
|
23
|
-
Using the following information extracted from the web:
|
|
24
|
-
|
|
25
|
-
{{ results }}
|
|
26
|
-
|
|
27
|
-
Answer the users question: {{ query }}
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
tool_name: "image_generation_tool"
|
|
2
|
-
description: |
|
|
3
|
-
Generates images based on a text prompt.
|
|
4
|
-
inputs:
|
|
5
|
-
- prompt
|
|
6
|
-
- model: 'runwayml/stable-diffusion-v1-5'
|
|
7
|
-
- provider: 'diffusers'
|
|
8
|
-
|
|
9
|
-
steps:
|
|
10
|
-
- engine: "python"
|
|
11
|
-
code: |
|
|
12
|
-
image_prompt = '{{prompt}}'.strip()
|
|
13
|
-
|
|
14
|
-
# Generate the image
|
|
15
|
-
filename = generate_image(
|
|
16
|
-
image_prompt,
|
|
17
|
-
npc=npc,
|
|
18
|
-
model='{{model}}', # You can adjust the model as needed
|
|
19
|
-
provider='{{provider}}'
|
|
20
|
-
)
|
|
21
|
-
if filename:
|
|
22
|
-
image_generated = True
|
|
23
|
-
else:
|
|
24
|
-
image_generated = False
|
|
25
|
-
|
|
File without changes
|
|
@@ -1,149 +0,0 @@
|
|
|
1
|
-
tool_name: local_search
|
|
2
|
-
description: |
|
|
3
|
-
Searches files in current and downstream directories to find items related to the user's query using fuzzy matching.
|
|
4
|
-
Returns only relevant snippets (10 lines around matches) to avoid including too much irrelevant content.
|
|
5
|
-
Intended for fuzzy searches, not for understanding file sizes.
|
|
6
|
-
inputs:
|
|
7
|
-
- query
|
|
8
|
-
- summarize: false # Optional - set to true to summarize the results
|
|
9
|
-
- file_filter: 'none' # Optional - can be filename patterns or folder names
|
|
10
|
-
- depth: 2 # Optional - search depth for nested directories
|
|
11
|
-
- fuzzy_threshold: 70 # Optional - minimum fuzzy match score (0-100)
|
|
12
|
-
steps:
|
|
13
|
-
- engine: python
|
|
14
|
-
code: |
|
|
15
|
-
# Search parameters are directly available
|
|
16
|
-
query = "{{ query }}"
|
|
17
|
-
file_filter = "{{ file_filter | default('None') }}"
|
|
18
|
-
if isinstance(file_filter, str) and file_filter.lower() == 'none':
|
|
19
|
-
file_filter = None
|
|
20
|
-
max_depth = {{ depth | default(2) }}
|
|
21
|
-
fuzzy_threshold = {{ fuzzy_threshold | default(70) }}
|
|
22
|
-
|
|
23
|
-
import os
|
|
24
|
-
import fnmatch
|
|
25
|
-
from pathlib import Path
|
|
26
|
-
from thefuzz import fuzz # Fuzzy string matching library
|
|
27
|
-
|
|
28
|
-
def find_files(file_filter=None, max_depth=2):
|
|
29
|
-
default_extensions = ['.py', '.txt', '.md',
|
|
30
|
-
'.json', '.yml', '.yaml',
|
|
31
|
-
'.log', '.csv', '.html',
|
|
32
|
-
'.js', '.css']
|
|
33
|
-
matches = []
|
|
34
|
-
root_path = Path('.').resolve() # Resolve to absolute path
|
|
35
|
-
|
|
36
|
-
# First, check files in the current directory
|
|
37
|
-
for path in root_path.iterdir():
|
|
38
|
-
if path.is_file():
|
|
39
|
-
# Skip hidden files
|
|
40
|
-
if path.name.startswith('.'):
|
|
41
|
-
continue
|
|
42
|
-
|
|
43
|
-
# If no filter specified, include files with default extensions
|
|
44
|
-
if file_filter is None:
|
|
45
|
-
if path.suffix in default_extensions:
|
|
46
|
-
matches.append(str(path))
|
|
47
|
-
else:
|
|
48
|
-
# If filter specified, check if file matches the filter
|
|
49
|
-
filters = [file_filter] if isinstance(file_filter, str) else file_filter
|
|
50
|
-
for f in filters:
|
|
51
|
-
if (fnmatch.fnmatch(path.name, f) or
|
|
52
|
-
fnmatch.fnmatch(str(path), f'*{f}*')):
|
|
53
|
-
matches.append(str(path))
|
|
54
|
-
break
|
|
55
|
-
|
|
56
|
-
# Then, check subdirectories with depth control
|
|
57
|
-
for path in root_path.rglob('*'):
|
|
58
|
-
# Skip hidden folders and common directories to ignore
|
|
59
|
-
if '/.' in str(path) or '__pycache__' in str(path) or '.git' in str(path) or 'node_modules' in str(path) or 'venv' in str(path):
|
|
60
|
-
continue
|
|
61
|
-
|
|
62
|
-
# Skip if we've gone too deep
|
|
63
|
-
relative_depth = len(path.relative_to(root_path).parts)
|
|
64
|
-
if relative_depth > max_depth:
|
|
65
|
-
continue
|
|
66
|
-
|
|
67
|
-
if path.is_file():
|
|
68
|
-
# If no filter specified, include files with default extensions
|
|
69
|
-
if file_filter is None:
|
|
70
|
-
if path.suffix in default_extensions:
|
|
71
|
-
matches.append(str(path))
|
|
72
|
-
else:
|
|
73
|
-
# If filter specified, check if file matches the filter
|
|
74
|
-
filters = [file_filter] if isinstance(file_filter, str) else file_filter
|
|
75
|
-
for f in filters:
|
|
76
|
-
if (fnmatch.fnmatch(path.name, f) or
|
|
77
|
-
fnmatch.fnmatch(str(path), f'*{f}*')):
|
|
78
|
-
matches.append(str(path))
|
|
79
|
-
break
|
|
80
|
-
|
|
81
|
-
return matches
|
|
82
|
-
|
|
83
|
-
# Find and load files
|
|
84
|
-
files = find_files(file_filter, max_depth)
|
|
85
|
-
|
|
86
|
-
# Process documents
|
|
87
|
-
relevant_chunks = []
|
|
88
|
-
for file_path in files:
|
|
89
|
-
with open(file_path, 'r', encoding='utf-8') as f:
|
|
90
|
-
lines = f.readlines() # Read file as lines
|
|
91
|
-
if lines:
|
|
92
|
-
# Join lines into a single string for fuzzy matching
|
|
93
|
-
content = ''.join(lines)
|
|
94
|
-
match_score = fuzz.partial_ratio(query.lower(), content.lower())
|
|
95
|
-
if match_score >= fuzzy_threshold:
|
|
96
|
-
# Find the best matching line
|
|
97
|
-
best_line_index = -1
|
|
98
|
-
best_line_score = 0
|
|
99
|
-
for i, line in enumerate(lines):
|
|
100
|
-
line_score = fuzz.partial_ratio(query.lower(), line.lower())
|
|
101
|
-
if line_score > best_line_score:
|
|
102
|
-
best_line_score = line_score
|
|
103
|
-
best_line_index = i
|
|
104
|
-
|
|
105
|
-
# Extract 10 lines around the best matching line
|
|
106
|
-
if best_line_index != -1:
|
|
107
|
-
start = max(0, best_line_index - 5) # 5 lines before
|
|
108
|
-
end = min(len(lines), best_line_index + 6) # 5 lines after
|
|
109
|
-
snippet = ''.join(lines[start:end])
|
|
110
|
-
relevant_chunks.append({
|
|
111
|
-
'path': file_path,
|
|
112
|
-
'snippet': snippet,
|
|
113
|
-
'ext': Path(file_path).suffix.lower(),
|
|
114
|
-
'score': match_score
|
|
115
|
-
})
|
|
116
|
-
|
|
117
|
-
# Sort results by match score (highest first)
|
|
118
|
-
relevant_chunks.sort(key=lambda x: x['score'], reverse=True)
|
|
119
|
-
|
|
120
|
-
# Format results
|
|
121
|
-
if relevant_chunks:
|
|
122
|
-
context_text = "Here are the most relevant code sections:\n\n"
|
|
123
|
-
for chunk in relevant_chunks:
|
|
124
|
-
file_path = chunk['path'].replace('./', '')
|
|
125
|
-
context_text += f"File: {file_path} (match score: {chunk['score']})\n"
|
|
126
|
-
context_text += f"```{chunk['ext'][1:] if chunk['ext'] else ''}\n"
|
|
127
|
-
context_text += f"{chunk['snippet'].strip()}\n"
|
|
128
|
-
context_text += "```\n\n"
|
|
129
|
-
else:
|
|
130
|
-
context_text = "No relevant code sections found.\n"
|
|
131
|
-
|
|
132
|
-
output = context_text
|
|
133
|
-
|
|
134
|
-
- engine: natural
|
|
135
|
-
code: |
|
|
136
|
-
{% if summarize %}
|
|
137
|
-
You are a helpful coding assistant.
|
|
138
|
-
Please help with this query:
|
|
139
|
-
|
|
140
|
-
`{{ query }}`
|
|
141
|
-
|
|
142
|
-
The user is attempting to carry out a local search. This search returned the following results:
|
|
143
|
-
|
|
144
|
-
`{{ results }}`
|
|
145
|
-
|
|
146
|
-
Please analyze the code sections above and provide a clear, helpful response that directly addresses the query.
|
|
147
|
-
If you reference specific files or code sections in your response, indicate which file they came from.
|
|
148
|
-
Make sure to explain your reasoning and how the provided code relates to the query.
|
|
149
|
-
{% endif %}
|
|
File without changes
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
context: |
|
|
2
|
-
The npcsh NPC team is devoted to providing a safe and helpful
|
|
3
|
-
environment for users where they can work and be as successful as possible.
|
|
4
|
-
npcsh is a command-line tool that makes it easy for users to harness
|
|
5
|
-
the power of LLMs from a command line shell.
|
|
6
|
-
databases:
|
|
7
|
-
- ~/npcsh_history.db
|
|
8
|
-
mcp_servers:
|
|
9
|
-
- /path/to/mcp/server.py
|
|
10
|
-
- @npm for server
|
|
11
|
-
|
|
File without changes
|