npcsh 1.1.3__py3-none-any.whl → 1.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +48 -64
- npcsh/npc_team/corca_example.png +0 -0
- npcsh/npc_team/jinxs/{python_executor.jinx → code/python.jinx} +1 -1
- npcsh/npc_team/jinxs/{bash_executer.jinx → code/sh.jinx} +1 -1
- npcsh/npc_team/jinxs/code/sql.jinx +18 -0
- npcsh/npc_team/jinxs/modes/alicanto.jinx +88 -0
- npcsh/npc_team/jinxs/modes/corca.jinx +28 -0
- npcsh/npc_team/jinxs/modes/guac.jinx +46 -0
- npcsh/npc_team/jinxs/modes/plonk.jinx +57 -0
- npcsh/npc_team/jinxs/modes/pti.jinx +28 -0
- npcsh/npc_team/jinxs/modes/spool.jinx +40 -0
- npcsh/npc_team/jinxs/modes/wander.jinx +81 -0
- npcsh/npc_team/jinxs/modes/yap.jinx +25 -0
- npcsh/npc_team/jinxs/utils/breathe.jinx +20 -0
- npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
- npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
- npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
- npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
- npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
- npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
- npcsh/npc_team/jinxs/{edit_file.jinx → utils/edit_file.jinx} +1 -1
- npcsh/npc_team/jinxs/utils/flush.jinx +39 -0
- npcsh/npc_team/jinxs/utils/npc-studio.jinx +82 -0
- npcsh/npc_team/jinxs/utils/ots.jinx +92 -0
- npcsh/npc_team/jinxs/utils/plan.jinx +33 -0
- npcsh/npc_team/jinxs/utils/roll.jinx +66 -0
- npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
- npcsh/npc_team/jinxs/utils/search/brainblast.jinx +51 -0
- npcsh/npc_team/jinxs/utils/search/rag.jinx +70 -0
- npcsh/npc_team/jinxs/utils/search/search.jinx +192 -0
- npcsh/npc_team/jinxs/utils/serve.jinx +29 -0
- npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
- npcsh/npc_team/jinxs/utils/trigger.jinx +36 -0
- npcsh/npc_team/jinxs/utils/vixynt.jinx +129 -0
- npcsh/npcsh.py +14 -12
- npcsh/routes.py +80 -1420
- npcsh-1.1.5.data/data/npcsh/npc_team/alicanto.jinx +88 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/brainblast.jinx +51 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/breathe.jinx +20 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/build.jinx +65 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/compile.jinx +50 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/corca.jinx +28 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/edit_file.jinx +1 -1
- npcsh-1.1.5.data/data/npcsh/npc_team/flush.jinx +39 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/guac.jinx +46 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/help.jinx +52 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/init.jinx +41 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/jinxs.jinx +32 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/npc-studio.jinx +82 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/ots.jinx +92 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/plan.jinx +33 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/plonk.jinx +57 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/pti.jinx +28 -0
- npcsh-1.1.3.data/data/npcsh/npc_team/python_executor.jinx → npcsh-1.1.5.data/data/npcsh/npc_team/python.jinx +1 -1
- npcsh-1.1.5.data/data/npcsh/npc_team/rag.jinx +70 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/roll.jinx +66 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/sample.jinx +56 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/search.jinx +192 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/serve.jinx +29 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/set.jinx +40 -0
- npcsh-1.1.3.data/data/npcsh/npc_team/bash_executer.jinx → npcsh-1.1.5.data/data/npcsh/npc_team/sh.jinx +1 -1
- npcsh-1.1.5.data/data/npcsh/npc_team/sleep.jinx +116 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/spool.jinx +40 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/sql.jinx +18 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/trigger.jinx +36 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/vixynt.jinx +129 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/wander.jinx +81 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/yap.jinx +25 -0
- {npcsh-1.1.3.dist-info → npcsh-1.1.5.dist-info}/METADATA +1 -1
- npcsh-1.1.5.dist-info/RECORD +132 -0
- npcsh/npc_team/jinxs/image_generation.jinx +0 -29
- npcsh/npc_team/jinxs/internet_search.jinx +0 -31
- npcsh/npc_team/jinxs/screen_cap.jinx +0 -25
- npcsh-1.1.3.data/data/npcsh/npc_team/image_generation.jinx +0 -29
- npcsh-1.1.3.data/data/npcsh/npc_team/internet_search.jinx +0 -31
- npcsh-1.1.3.data/data/npcsh/npc_team/screen_cap.jinx +0 -25
- npcsh-1.1.3.dist-info/RECORD +0 -78
- /npcsh/npc_team/jinxs/{kg_search.jinx → utils/search/kg_search.jinx} +0 -0
- /npcsh/npc_team/jinxs/{memory_search.jinx → utils/search/memory_search.jinx} +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/kg_search.jinx +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/memory_search.jinx +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.3.data → npcsh-1.1.5.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.3.dist-info → npcsh-1.1.5.dist-info}/WHEEL +0 -0
- {npcsh-1.1.3.dist-info → npcsh-1.1.5.dist-info}/entry_points.txt +0 -0
- {npcsh-1.1.3.dist-info → npcsh-1.1.5.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.3.dist-info → npcsh-1.1.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
jinx_name: "ots"
|
|
2
|
+
description: "Take screenshot and analyze with vision model"
|
|
3
|
+
inputs:
|
|
4
|
+
- image_paths_args: "" # Optional comma-separated paths to image files for analysis.
|
|
5
|
+
- prompt: "" # The prompt for the LLM about the image(s).
|
|
6
|
+
- vmodel: "" # Vision model to use. Defaults to NPCSH_VISION_MODEL or NPC's model.
|
|
7
|
+
- vprovider: "" # Vision model provider. Defaults to NPCSH_VISION_PROVIDER or NPC's provider.
|
|
8
|
+
- stream: False # Whether to stream the output from the LLM.
|
|
9
|
+
- api_url: "" # API URL for the LLM.
|
|
10
|
+
- api_key: "" # API key for the LLM.
|
|
11
|
+
steps:
|
|
12
|
+
- name: "analyze_screenshot_or_image"
|
|
13
|
+
engine: "python"
|
|
14
|
+
code: |
|
|
15
|
+
import os
|
|
16
|
+
import traceback
|
|
17
|
+
from npcpy.llm_funcs import get_llm_response
|
|
18
|
+
from npcpy.data.image import capture_screenshot
|
|
19
|
+
# Assuming NPCSH_VISION_MODEL and NPCSH_VISION_PROVIDER are accessible through _state or defaults
|
|
20
|
+
# For simplicity in Jinx, we'll use fallbacks or assume context will provide
|
|
21
|
+
|
|
22
|
+
image_paths_args_str = context.get('image_paths_args')
|
|
23
|
+
user_prompt = context.get('prompt')
|
|
24
|
+
vision_model = context.get('vmodel')
|
|
25
|
+
vision_provider = context.get('vprovider')
|
|
26
|
+
stream_output = context.get('stream')
|
|
27
|
+
api_url = context.get('api_url')
|
|
28
|
+
api_key = context.get('api_key')
|
|
29
|
+
output_messages = context.get('messages', [])
|
|
30
|
+
current_npc = context.get('npc')
|
|
31
|
+
|
|
32
|
+
image_paths = []
|
|
33
|
+
if image_paths_args_str and image_paths_args_str.strip():
|
|
34
|
+
for img_path_arg in image_paths_args_str.split(','):
|
|
35
|
+
full_path = os.path.abspath(os.path.expanduser(img_path_arg.strip()))
|
|
36
|
+
if os.path.exists(full_path):
|
|
37
|
+
image_paths.append(full_path)
|
|
38
|
+
else:
|
|
39
|
+
context['output'] = f"Error: Image file not found at {full_path}"
|
|
40
|
+
context['messages'] = output_messages
|
|
41
|
+
exit()
|
|
42
|
+
|
|
43
|
+
if not image_paths:
|
|
44
|
+
screenshot_info = capture_screenshot(full=False)
|
|
45
|
+
if screenshot_info and "file_path" in screenshot_info:
|
|
46
|
+
image_paths.append(screenshot_info["file_path"])
|
|
47
|
+
print(f"Screenshot captured: {screenshot_info.get('filename', os.path.basename(screenshot_info['file_path']))}")
|
|
48
|
+
else:
|
|
49
|
+
context['output'] = "Error: Failed to capture screenshot."
|
|
50
|
+
context['messages'] = output_messages
|
|
51
|
+
exit()
|
|
52
|
+
|
|
53
|
+
if not image_paths:
|
|
54
|
+
context['output'] = "No valid images found or captured."
|
|
55
|
+
context['messages'] = output_messages
|
|
56
|
+
exit()
|
|
57
|
+
|
|
58
|
+
if not user_prompt or not user_prompt.strip():
|
|
59
|
+
# In a non-interactive Jinx, a default prompt is better than waiting for input
|
|
60
|
+
user_prompt = "Describe the image(s)."
|
|
61
|
+
|
|
62
|
+
# Fallback for model/provider if not explicitly set in Jinx inputs
|
|
63
|
+
if not vision_model and current_npc and current_npc.model:
|
|
64
|
+
vision_model = current_npc.model
|
|
65
|
+
if not vision_provider and current_npc and current_npc.provider:
|
|
66
|
+
vision_provider = current_npc.provider
|
|
67
|
+
|
|
68
|
+
# Final fallbacks (these would ideally come from npcsh._state config)
|
|
69
|
+
if not vision_model: vision_model = "gemini-1.5-pro-vision" # Example default
|
|
70
|
+
if not vision_provider: vision_provider = "gemini" # Example default
|
|
71
|
+
|
|
72
|
+
try:
|
|
73
|
+
response_data = get_llm_response(
|
|
74
|
+
prompt=user_prompt,
|
|
75
|
+
model=vision_model,
|
|
76
|
+
provider=vision_provider,
|
|
77
|
+
messages=output_messages, # Pass current messages to LLM
|
|
78
|
+
images=image_paths,
|
|
79
|
+
stream=stream_output,
|
|
80
|
+
npc=current_npc,
|
|
81
|
+
api_url=api_url,
|
|
82
|
+
api_key=api_key
|
|
83
|
+
)
|
|
84
|
+
context['output'] = response_data.get('response')
|
|
85
|
+
context['messages'] = response_data.get('messages', output_messages)
|
|
86
|
+
context['model'] = vision_model
|
|
87
|
+
context['provider'] = vision_provider
|
|
88
|
+
|
|
89
|
+
except Exception as e:
|
|
90
|
+
traceback.print_exc()
|
|
91
|
+
context['output'] = f"Error during /ots command: {e}"
|
|
92
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
jinx_name: "plan"
|
|
2
|
+
description: "Execute a plan command"
|
|
3
|
+
inputs:
|
|
4
|
+
- plan_description: "" # Description of the plan to execute.
|
|
5
|
+
steps:
|
|
6
|
+
- name: "execute_plan"
|
|
7
|
+
engine: "python"
|
|
8
|
+
code: |
|
|
9
|
+
import traceback
|
|
10
|
+
from npcpy.work.plan import execute_plan_command
|
|
11
|
+
|
|
12
|
+
plan_description = context.get('plan_description')
|
|
13
|
+
output_messages = context.get('messages', [])
|
|
14
|
+
|
|
15
|
+
if not plan_description or not plan_description.strip():
|
|
16
|
+
context['output'] = "Usage: /plan <description_of_plan>"
|
|
17
|
+
context['messages'] = output_messages
|
|
18
|
+
exit()
|
|
19
|
+
|
|
20
|
+
try:
|
|
21
|
+
# Pass all current context as kwargs to execute_plan_command
|
|
22
|
+
result = execute_plan_command(command=plan_description, **context)
|
|
23
|
+
|
|
24
|
+
if isinstance(result, dict):
|
|
25
|
+
context['output'] = result.get('output', 'Plan executed.')
|
|
26
|
+
context['messages'] = result.get('messages', output_messages)
|
|
27
|
+
else:
|
|
28
|
+
context['output'] = str(result)
|
|
29
|
+
context['messages'] = output_messages
|
|
30
|
+
except Exception as e:
|
|
31
|
+
traceback.print_exc()
|
|
32
|
+
context['output'] = f"Error executing plan: {e}"
|
|
33
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
jinx_name: "plonk"
|
|
2
|
+
description: "Use vision model to interact with GUI. Usage: /plonk <task description>"
|
|
3
|
+
inputs:
|
|
4
|
+
- task_description: "" # Required task description for GUI interaction.
|
|
5
|
+
- vmodel: "" # Vision model to use. Defaults to NPCSH_VISION_MODEL or NPC's model.
|
|
6
|
+
- vprovider: "" # Vision model provider. Defaults to NPCSH_VISION_PROVIDER or NPC's provider.
|
|
7
|
+
steps:
|
|
8
|
+
- name: "execute_plonk"
|
|
9
|
+
engine: "python"
|
|
10
|
+
code: |
|
|
11
|
+
import traceback
|
|
12
|
+
from npcsh.plonk import execute_plonk_command, format_plonk_summary
|
|
13
|
+
# Assuming NPCSH_VISION_MODEL and NPCSH_VISION_PROVIDER are accessible
|
|
14
|
+
|
|
15
|
+
task_description = context.get('task_description')
|
|
16
|
+
vision_model = context.get('vmodel')
|
|
17
|
+
vision_provider = context.get('vprovider')
|
|
18
|
+
plonk_context = context.get('plonk_context') # Passed from original context
|
|
19
|
+
current_npc = context.get('npc')
|
|
20
|
+
output_messages = context.get('messages', [])
|
|
21
|
+
|
|
22
|
+
if not task_description or not task_description.strip():
|
|
23
|
+
context['output'] = "Usage: /plonk <task_description> [--vmodel model_name] [--vprovider provider_name]"
|
|
24
|
+
context['messages'] = output_messages
|
|
25
|
+
exit()
|
|
26
|
+
|
|
27
|
+
# Fallback for model/provider if not explicitly set in Jinx inputs
|
|
28
|
+
if not vision_model and current_npc and current_npc.model:
|
|
29
|
+
vision_model = current_npc.model
|
|
30
|
+
if not vision_provider and current_npc and current_npc.provider:
|
|
31
|
+
vision_provider = current_npc.provider
|
|
32
|
+
|
|
33
|
+
# Final fallbacks (these would ideally come from npcsh._state config)
|
|
34
|
+
if not vision_model: vision_model = "gemini-1.5-pro-vision" # Example default
|
|
35
|
+
if not vision_provider: vision_provider = "gemini" # Example default
|
|
36
|
+
|
|
37
|
+
try:
|
|
38
|
+
summary_data = execute_plonk_command(
|
|
39
|
+
request=task_description,
|
|
40
|
+
model=vision_model,
|
|
41
|
+
provider=vision_provider,
|
|
42
|
+
npc=current_npc,
|
|
43
|
+
plonk_context=plonk_context,
|
|
44
|
+
debug=True # Assuming debug is often desired for plonk
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
if summary_data and isinstance(summary_data, list):
|
|
48
|
+
output_report = format_plonk_summary(summary_data)
|
|
49
|
+
context['output'] = output_report
|
|
50
|
+
else:
|
|
51
|
+
context['output'] = "Plonk command did not complete within the maximum number of iterations."
|
|
52
|
+
|
|
53
|
+
except Exception as e:
|
|
54
|
+
traceback.print_exc()
|
|
55
|
+
context['output'] = f"Error executing plonk command: {e}"
|
|
56
|
+
|
|
57
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
jinx_name: "pti"
|
|
2
|
+
description: "Enter Pardon-The-Interruption mode for human-in-the-loop reasoning."
|
|
3
|
+
inputs:
|
|
4
|
+
- command_args: "" # The full command string or specific arguments for PTI mode.
|
|
5
|
+
steps:
|
|
6
|
+
- name: "enter_pti"
|
|
7
|
+
engine: "python"
|
|
8
|
+
code: |
|
|
9
|
+
import traceback
|
|
10
|
+
from npcsh.pti import enter_pti_mode
|
|
11
|
+
|
|
12
|
+
command_args = context.get('command_args', '') # The full command string from router
|
|
13
|
+
output_messages = context.get('messages', [])
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
# enter_pti_mode likely expects the full command string for its own parsing
|
|
17
|
+
result = enter_pti_mode(command=command_args, **context)
|
|
18
|
+
|
|
19
|
+
if isinstance(result, dict):
|
|
20
|
+
context['output'] = result.get('output', 'Entered PTI mode.')
|
|
21
|
+
context['messages'] = result.get('messages', output_messages)
|
|
22
|
+
else:
|
|
23
|
+
context['output'] = str(result)
|
|
24
|
+
context['messages'] = output_messages
|
|
25
|
+
except Exception as e:
|
|
26
|
+
traceback.print_exc()
|
|
27
|
+
context['output'] = f"Error entering pti mode: {e}"
|
|
28
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
jinx_name: "rag"
|
|
2
|
+
description: "Execute a RAG command using ChromaDB embeddings with optional file input (-f/--file)"
|
|
3
|
+
inputs:
|
|
4
|
+
- query: "" # Required search query for RAG.
|
|
5
|
+
- file_paths: "" # Optional comma-separated file paths to include in RAG.
|
|
6
|
+
- vector_db_path: "~/npcsh_chroma.db" # Path to the ChromaDB vector database.
|
|
7
|
+
- emodel: "" # Embedding model to use. Defaults to NPCSH_EMBEDDING_MODEL or NPC's model.
|
|
8
|
+
- eprovider: "" # Embedding provider to use. Defaults to NPCSH_EMBEDDING_PROVIDER or NPC's provider.
|
|
9
|
+
steps:
|
|
10
|
+
- name: "execute_rag"
|
|
11
|
+
engine: "python"
|
|
12
|
+
code: |
|
|
13
|
+
import os
|
|
14
|
+
import traceback
|
|
15
|
+
from npcpy.data.load import load_file_contents
|
|
16
|
+
from npcpy.memory.search import execute_rag_command
|
|
17
|
+
# Assuming NPCSH_EMBEDDING_MODEL and NPCSH_EMBEDDING_PROVIDER are accessible
|
|
18
|
+
|
|
19
|
+
query = context.get('query')
|
|
20
|
+
file_paths_str = context.get('file_paths')
|
|
21
|
+
vector_db_path = context.get('vector_db_path')
|
|
22
|
+
embedding_model = context.get('emodel')
|
|
23
|
+
embedding_provider = context.get('eprovider')
|
|
24
|
+
output_messages = context.get('messages', [])
|
|
25
|
+
current_npc = context.get('npc')
|
|
26
|
+
|
|
27
|
+
file_paths = []
|
|
28
|
+
if file_paths_str and file_paths_str.strip():
|
|
29
|
+
file_paths = [os.path.abspath(os.path.expanduser(p.strip())) for p in file_paths_str.split(',')]
|
|
30
|
+
|
|
31
|
+
if not query and not file_paths:
|
|
32
|
+
context['output'] = "Usage: /rag [-f file_path] <query>"
|
|
33
|
+
context['messages'] = output_messages
|
|
34
|
+
exit()
|
|
35
|
+
|
|
36
|
+
# Fallback for model/provider if not explicitly set in Jinx inputs
|
|
37
|
+
if not embedding_model and current_npc and current_npc.model:
|
|
38
|
+
embedding_model = current_npc.model
|
|
39
|
+
if not embedding_provider and current_npc and current_npc.provider:
|
|
40
|
+
embedding_provider = current_npc.provider
|
|
41
|
+
|
|
42
|
+
# Final fallbacks (these would ideally come from npcsh._state config)
|
|
43
|
+
if not embedding_model: embedding_model = "nomic-ai/nomic-embed-text-v1.5" # Example default
|
|
44
|
+
if not embedding_provider: embedding_provider = "ollama" # Example default
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
file_contents = []
|
|
48
|
+
for file_path in file_paths:
|
|
49
|
+
try:
|
|
50
|
+
chunks = load_file_contents(file_path)
|
|
51
|
+
file_name = os.path.basename(file_path)
|
|
52
|
+
file_contents.extend([f"[{file_name}] {chunk}" for chunk in chunks])
|
|
53
|
+
except Exception as file_err:
|
|
54
|
+
file_contents.append(f"Error processing file {file_path}: {str(file_err)}")
|
|
55
|
+
|
|
56
|
+
result = execute_rag_command(
|
|
57
|
+
command=query,
|
|
58
|
+
vector_db_path=os.path.expanduser(vector_db_path),
|
|
59
|
+
embedding_model=embedding_model,
|
|
60
|
+
embedding_provider=embedding_provider,
|
|
61
|
+
file_contents=file_contents if file_paths else None,
|
|
62
|
+
**{k:v for k,v in context.items() if k not in ['messages', 'query', 'file_paths']} # Pass other context
|
|
63
|
+
)
|
|
64
|
+
context['output'] = result.get('response')
|
|
65
|
+
context['messages'] = result.get('messages', output_messages)
|
|
66
|
+
|
|
67
|
+
except Exception as e:
|
|
68
|
+
traceback.print_exc()
|
|
69
|
+
context['output'] = f"Error executing RAG command: {e}"
|
|
70
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
jinx_name: "roll"
|
|
2
|
+
description: "Generate a video from a text prompt."
|
|
3
|
+
inputs:
|
|
4
|
+
- prompt: "" # Required text prompt for video generation.
|
|
5
|
+
- num_frames: 125 # Number of frames for the video.
|
|
6
|
+
- width: 256 # Width of the video.
|
|
7
|
+
- height: 256 # Height of the video.
|
|
8
|
+
- output_path: "output.mp4" # Output file path for the video.
|
|
9
|
+
- vgmodel: "" # Video generation model to use. Defaults to NPCSH_VIDEO_GEN_MODEL or NPC's model.
|
|
10
|
+
- vgprovider: "" # Video generation provider to use. Defaults to NPCSH_VIDEO_GEN_PROVIDER or NPC's provider.
|
|
11
|
+
steps:
|
|
12
|
+
- name: "generate_video"
|
|
13
|
+
engine: "python"
|
|
14
|
+
code: |
|
|
15
|
+
import traceback
|
|
16
|
+
from npcpy.llm_funcs import gen_video
|
|
17
|
+
# Assuming NPCSH_VIDEO_GEN_MODEL and NPCSH_VIDEO_GEN_PROVIDER are accessible
|
|
18
|
+
|
|
19
|
+
prompt = context.get('prompt')
|
|
20
|
+
num_frames = int(context.get('num_frames', 125)) # Ensure int type
|
|
21
|
+
width = int(context.get('width', 256)) # Ensure int type
|
|
22
|
+
height = int(context.get('height', 256)) # Ensure int type
|
|
23
|
+
output_path = context.get('output_path')
|
|
24
|
+
video_gen_model = context.get('vgmodel')
|
|
25
|
+
video_gen_provider = context.get('vgprovider')
|
|
26
|
+
output_messages = context.get('messages', [])
|
|
27
|
+
current_npc = context.get('npc')
|
|
28
|
+
|
|
29
|
+
if not prompt or not prompt.strip():
|
|
30
|
+
context['output'] = "Usage: /roll <your prompt>"
|
|
31
|
+
context['messages'] = output_messages
|
|
32
|
+
exit()
|
|
33
|
+
|
|
34
|
+
# Fallback for model/provider if not explicitly set in Jinx inputs
|
|
35
|
+
if not video_gen_model and current_npc and current_npc.model:
|
|
36
|
+
video_gen_model = current_npc.model
|
|
37
|
+
if not video_gen_provider and current_npc and current_npc.provider:
|
|
38
|
+
video_gen_provider = current_npc.provider
|
|
39
|
+
|
|
40
|
+
# Final fallbacks (these would ideally come from npcsh._state config)
|
|
41
|
+
if not video_gen_model: video_gen_model = "stable-video-diffusion" # Example default
|
|
42
|
+
if not video_gen_provider: video_gen_provider = "diffusers" # Example default
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
result = gen_video(
|
|
46
|
+
prompt=prompt,
|
|
47
|
+
model=video_gen_model,
|
|
48
|
+
provider=video_gen_provider,
|
|
49
|
+
npc=current_npc,
|
|
50
|
+
num_frames=num_frames,
|
|
51
|
+
width=width,
|
|
52
|
+
height=height,
|
|
53
|
+
output_path=output_path,
|
|
54
|
+
**context.get('api_kwargs', {}) # Assuming api_kwargs might be passed
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
if isinstance(result, dict):
|
|
58
|
+
context['output'] = result.get('output', 'Video generated.')
|
|
59
|
+
context['messages'] = result.get('messages', output_messages)
|
|
60
|
+
else:
|
|
61
|
+
context['output'] = str(result)
|
|
62
|
+
context['messages'] = output_messages
|
|
63
|
+
except Exception as e:
|
|
64
|
+
traceback.print_exc()
|
|
65
|
+
context['output'] = f"Error generating video: {e}"
|
|
66
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
jinx_name: "sample"
|
|
2
|
+
description: "Send a prompt directly to the LLM."
|
|
3
|
+
inputs:
|
|
4
|
+
- prompt: "" # Required text prompt to send to the LLM.
|
|
5
|
+
- model: "" # LLM model to use. Defaults to NPC's model.
|
|
6
|
+
- provider: "" # LLM provider to use. Defaults to NPC's provider.
|
|
7
|
+
steps:
|
|
8
|
+
- name: "send_prompt_to_llm"
|
|
9
|
+
engine: "python"
|
|
10
|
+
code: |
|
|
11
|
+
import traceback
|
|
12
|
+
from npcpy.llm_funcs import get_llm_response
|
|
13
|
+
|
|
14
|
+
prompt = context.get('prompt')
|
|
15
|
+
llm_model = context.get('model')
|
|
16
|
+
llm_provider = context.get('provider')
|
|
17
|
+
output_messages = context.get('messages', [])
|
|
18
|
+
current_npc = context.get('npc')
|
|
19
|
+
|
|
20
|
+
if not prompt or not prompt.strip():
|
|
21
|
+
context['output'] = "Usage: /sample <your prompt> [-m --model] model [-p --provider] provider"
|
|
22
|
+
context['messages'] = output_messages
|
|
23
|
+
exit()
|
|
24
|
+
|
|
25
|
+
# Fallback for model/provider if not explicitly set in Jinx inputs
|
|
26
|
+
if not llm_model and current_npc and current_npc.model:
|
|
27
|
+
llm_model = current_npc.model
|
|
28
|
+
if not llm_provider and current_npc and current_npc.provider:
|
|
29
|
+
llm_provider = current_npc.provider
|
|
30
|
+
|
|
31
|
+
# Final fallbacks (these would ideally come from npcsh._state config)
|
|
32
|
+
if not llm_model: llm_model = "gemini-1.5-pro" # Example default
|
|
33
|
+
if not llm_provider: llm_provider = "gemini" # Example default
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
result = get_llm_response(
|
|
37
|
+
prompt=prompt,
|
|
38
|
+
model=llm_model,
|
|
39
|
+
provider=llm_provider,
|
|
40
|
+
npc=current_npc,
|
|
41
|
+
**{k:v for k,v in context.items() if k not in ['messages', 'prompt', 'model', 'provider']} # Pass other context
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
if isinstance(result, dict):
|
|
45
|
+
context['output'] = result.get('response')
|
|
46
|
+
context['messages'] = result.get('messages', output_messages)
|
|
47
|
+
context['model'] = llm_model
|
|
48
|
+
context['provider'] = llm_provider
|
|
49
|
+
else:
|
|
50
|
+
context['output'] = str(result)
|
|
51
|
+
context['messages'] = output_messages
|
|
52
|
+
|
|
53
|
+
except Exception as e:
|
|
54
|
+
traceback.print_exc()
|
|
55
|
+
context['output'] = f"Error sampling LLM: {e}"
|
|
56
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
jinx_name: "search"
|
|
2
|
+
description: "Execute web search or memory/KG search. Usage: /search [-m/-mem | -kg] <query>"
|
|
3
|
+
inputs:
|
|
4
|
+
- query: "" # Required search query.
|
|
5
|
+
- search_type: "web" # Type of search: "web", "memory", or "kg".
|
|
6
|
+
- sprovider: "" # Search provider for web search. Defaults to NPCSH_SEARCH_PROVIDER.
|
|
7
|
+
- history_db_path: "~/npcsh_history.db" # Path to the command history database for memory/KG search.
|
|
8
|
+
steps:
|
|
9
|
+
- name: "execute_search"
|
|
10
|
+
engine: "python"
|
|
11
|
+
code: |
|
|
12
|
+
import os
|
|
13
|
+
import traceback
|
|
14
|
+
from npcpy.data.web import search_web
|
|
15
|
+
from npcpy.memory.command_history import CommandHistory, load_kg_from_db, save_kg_to_db # For KG search
|
|
16
|
+
from npcsh._state import get_relevant_memories # For memory search
|
|
17
|
+
# Assuming NPCSH_SEARCH_PROVIDER is accessible
|
|
18
|
+
|
|
19
|
+
query = context.get('query')
|
|
20
|
+
search_type = context.get('search_type')
|
|
21
|
+
search_provider = context.get('sprovider')
|
|
22
|
+
history_db_path = context.get('history_db_path')
|
|
23
|
+
output_messages = context.get('messages', [])
|
|
24
|
+
current_npc = context.get('npc')
|
|
25
|
+
current_team = context.get('team')
|
|
26
|
+
|
|
27
|
+
if not query or not query.strip():
|
|
28
|
+
context['output'] = (
|
|
29
|
+
"Usage:\n"
|
|
30
|
+
" /search <query> - Web search\n"
|
|
31
|
+
" /search -m <query> - Memory search\n"
|
|
32
|
+
" /search -kg <query> - Knowledge graph search"
|
|
33
|
+
)
|
|
34
|
+
context['messages'] = output_messages
|
|
35
|
+
exit()
|
|
36
|
+
|
|
37
|
+
def search_memories(query_str, current_context, output_msgs):
|
|
38
|
+
command_history = current_context.get('command_history')
|
|
39
|
+
if not command_history:
|
|
40
|
+
db_path = os.path.expanduser(current_context.get("history_db_path", "~/npcsh_history.db"))
|
|
41
|
+
try:
|
|
42
|
+
command_history = CommandHistory(db_path)
|
|
43
|
+
current_context['command_history'] = command_history
|
|
44
|
+
except Exception as e:
|
|
45
|
+
return {"output": f"Error connecting to history: {e}", "messages": output_msgs}
|
|
46
|
+
|
|
47
|
+
npc_name = current_context.get('npc_name', '__none__')
|
|
48
|
+
team_name = current_context.get('team_name', '__none__')
|
|
49
|
+
current_path = current_context.get('current_path', os.getcwd())
|
|
50
|
+
state = current_context.get('state')
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
memories = get_relevant_memories(
|
|
54
|
+
command_history=command_history,
|
|
55
|
+
npc_name=npc_name,
|
|
56
|
+
team_name=team_name,
|
|
57
|
+
path=current_path,
|
|
58
|
+
query=query_str,
|
|
59
|
+
max_memories=10,
|
|
60
|
+
state=state
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
if not memories:
|
|
64
|
+
output = f"No memories found for query: '{query_str}'"
|
|
65
|
+
else:
|
|
66
|
+
output = f"Found {len(memories)} memories:\n\n"
|
|
67
|
+
for i, mem in enumerate(memories, 1):
|
|
68
|
+
final_mem = (
|
|
69
|
+
mem.get('final_memory') or
|
|
70
|
+
mem.get('initial_memory')
|
|
71
|
+
)
|
|
72
|
+
timestamp = mem.get('timestamp', 'unknown')
|
|
73
|
+
output += f"{i}. [{timestamp}] {final_mem}\n"
|
|
74
|
+
|
|
75
|
+
return {"output": output, "messages": output_msgs}
|
|
76
|
+
|
|
77
|
+
except Exception as e:
|
|
78
|
+
traceback.print_exc()
|
|
79
|
+
return {"output": f"Error searching memories: {e}", "messages": output_msgs}
|
|
80
|
+
|
|
81
|
+
def search_knowledge_graph(query_str, current_context, output_msgs):
|
|
82
|
+
command_history = current_context.get('command_history')
|
|
83
|
+
if not command_history:
|
|
84
|
+
db_path = os.path.expanduser(current_context.get("history_db_path", "~/npcsh_history.db"))
|
|
85
|
+
try:
|
|
86
|
+
command_history = CommandHistory(db_path)
|
|
87
|
+
current_context['command_history'] = command_history
|
|
88
|
+
except Exception as e:
|
|
89
|
+
return {"output": f"Error connecting to history: {e}", "messages": output_msgs}
|
|
90
|
+
|
|
91
|
+
npc_name = current_context.get('npc_name', '__none__')
|
|
92
|
+
team_name = current_context.get('team_name', '__none__')
|
|
93
|
+
current_path = current_context.get('current_path', os.getcwd())
|
|
94
|
+
|
|
95
|
+
try:
|
|
96
|
+
engine = command_history.engine
|
|
97
|
+
kg = load_kg_from_db(
|
|
98
|
+
engine,
|
|
99
|
+
team_name,
|
|
100
|
+
npc_name,
|
|
101
|
+
current_path
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
if not kg or not kg.get('facts'):
|
|
105
|
+
return {
|
|
106
|
+
"output": (
|
|
107
|
+
f"No knowledge graph found for current scope.\n"
|
|
108
|
+
f"Scope: Team='{team_name}', "
|
|
109
|
+
f"NPC='{npc_name}', Path='{current_path}'"
|
|
110
|
+
),
|
|
111
|
+
"messages": output_msgs
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
query_lower = query_str.lower()
|
|
115
|
+
matching_facts = []
|
|
116
|
+
matching_concepts = []
|
|
117
|
+
|
|
118
|
+
for fact in kg.get('facts', []):
|
|
119
|
+
statement = fact.get('statement', '').lower()
|
|
120
|
+
if query_lower in statement:
|
|
121
|
+
matching_facts.append(fact)
|
|
122
|
+
|
|
123
|
+
for concept in kg.get('concepts', []):
|
|
124
|
+
name = concept.get('name', '').lower()
|
|
125
|
+
desc = concept.get('description', '').lower()
|
|
126
|
+
if query_lower in name or query_lower in desc:
|
|
127
|
+
matching_concepts.append(concept)
|
|
128
|
+
|
|
129
|
+
output = f"Knowledge Graph Search Results for '{query_str}':\n\n"
|
|
130
|
+
|
|
131
|
+
if matching_facts:
|
|
132
|
+
output += f"## Facts ({len(matching_facts)}):\n"
|
|
133
|
+
for i, fact in enumerate(matching_facts, 1):
|
|
134
|
+
output += f"{i}. {fact.get('statement')}\n"
|
|
135
|
+
output += "\n"
|
|
136
|
+
|
|
137
|
+
if matching_concepts:
|
|
138
|
+
output += f"## Concepts ({len(matching_concepts)}):\n"
|
|
139
|
+
for i, concept in enumerate(matching_concepts, 1):
|
|
140
|
+
name = concept.get('name')
|
|
141
|
+
desc = concept.get('description', '')
|
|
142
|
+
output += f"{i}. {name}: {desc}\n"
|
|
143
|
+
|
|
144
|
+
if not matching_facts and not matching_concepts:
|
|
145
|
+
output += "No matching facts or concepts found."
|
|
146
|
+
|
|
147
|
+
return {"output": output, "messages": output_msgs}
|
|
148
|
+
|
|
149
|
+
except Exception as e:
|
|
150
|
+
traceback.print_exc()
|
|
151
|
+
return {"output": f"Error searching KG: {e}", "messages": output_msgs}
|
|
152
|
+
|
|
153
|
+
def search_web_default(query_str, current_context, output_msgs):
|
|
154
|
+
# Fallback for search_provider if not explicitly set in Jinx inputs
|
|
155
|
+
current_search_provider = current_context.get('sprovider')
|
|
156
|
+
# If NPCSH_SEARCH_PROVIDER is accessible, use it. Otherwise, a default.
|
|
157
|
+
# For Jinx, let's just use a hardcoded default if not provided.
|
|
158
|
+
if not current_search_provider:
|
|
159
|
+
current_search_provider = "google" # Example default
|
|
160
|
+
|
|
161
|
+
# Assuming render_markdown is accessible
|
|
162
|
+
# render_markdown(f'- Searching {current_search_provider} for "{query_str}"') # Not directly supported in Jinx steps
|
|
163
|
+
|
|
164
|
+
try:
|
|
165
|
+
search_results = search_web(query_str, provider=current_search_provider)
|
|
166
|
+
output = (
|
|
167
|
+
"\n".join([f"- {res}" for res in search_results])
|
|
168
|
+
if search_results
|
|
169
|
+
else "No results found."
|
|
170
|
+
)
|
|
171
|
+
except Exception as e:
|
|
172
|
+
traceback.print_exc()
|
|
173
|
+
output = f"Error during web search: {e}"
|
|
174
|
+
|
|
175
|
+
return {"output": output, "messages": output_msgs}
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
# Populate npc_name, team_name, current_path for search functions
|
|
179
|
+
context['npc_name'] = current_npc.name if isinstance(current_npc, type(None).__class__) else "__none__"
|
|
180
|
+
context['team_name'] = current_team.name if current_team else "__none__"
|
|
181
|
+
context['current_path'] = os.getcwd() # Or get from context if available
|
|
182
|
+
|
|
183
|
+
final_result = None
|
|
184
|
+
if search_type == 'memory':
|
|
185
|
+
final_result = search_memories(query, context, output_messages)
|
|
186
|
+
elif search_type == 'kg':
|
|
187
|
+
final_result = search_knowledge_graph(query, context, output_messages)
|
|
188
|
+
else:
|
|
189
|
+
final_result = search_web_default(query, context, output_messages)
|
|
190
|
+
|
|
191
|
+
context['output'] = final_result.get('output')
|
|
192
|
+
context['messages'] = final_result.get('messages', output_messages)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
jinx_name: "serve"
|
|
2
|
+
description: "Serve an NPC Team"
|
|
3
|
+
inputs:
|
|
4
|
+
- port: 5337 # The port to run the Flask server on.
|
|
5
|
+
- cors: "" # Comma-separated CORS origins.
|
|
6
|
+
steps:
|
|
7
|
+
- name: "start_flask_server"
|
|
8
|
+
engine: "python"
|
|
9
|
+
code: |
|
|
10
|
+
from npcpy.serve import start_flask_server
|
|
11
|
+
|
|
12
|
+
port = context.get('port')
|
|
13
|
+
cors_str = context.get('cors')
|
|
14
|
+
output_messages = context.get('messages', [])
|
|
15
|
+
|
|
16
|
+
cors_origins = None
|
|
17
|
+
if cors_str and cors_str.strip():
|
|
18
|
+
cors_origins = [origin.strip() for origin in cors_str.split(",")]
|
|
19
|
+
|
|
20
|
+
# start_flask_server blocks, so this will hold the Jinx until the server is stopped.
|
|
21
|
+
# In a real-world scenario, you might want to run this in a separate process
|
|
22
|
+
# or have a non-blocking server start.
|
|
23
|
+
start_flask_server(
|
|
24
|
+
port=int(port), # Ensure port is an integer
|
|
25
|
+
cors_origins=cors_origins,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
context['output'] = "NPC Team server started. Execution of this jinx will pause until the server is stopped."
|
|
29
|
+
context['messages'] = output_messages
|