npcsh 1.1.9__py3-none-any.whl → 1.1.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +45 -12
- npcsh/guac.py +6 -4
- npcsh/npc_team/jinxs/code/sh.jinx +32 -13
- npcsh/npc_team/jinxs/code/sql.jinx +2 -2
- npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
- npcsh/npc_team/jinxs/utils/chat.jinx +17 -0
- npcsh/npc_team/jinxs/utils/compress.jinx +140 -0
- npcsh/npc_team/jinxs/utils/load_file.jinx +35 -0
- npcsh/npc_team/jinxs/utils/serve.jinx +0 -3
- npcsh/npc_team/jinxs/utils/trigger.jinx +39 -14
- npcsh/npc_team/jinxs/utils/vixynt.jinx +104 -77
- npcsh/npcsh.py +2 -1
- npcsh/routes.py +0 -1
- npcsh-1.1.11.data/data/npcsh/npc_team/agent.jinx +17 -0
- npcsh-1.1.11.data/data/npcsh/npc_team/chat.jinx +17 -0
- npcsh-1.1.11.data/data/npcsh/npc_team/compress.jinx +140 -0
- npcsh-1.1.11.data/data/npcsh/npc_team/load_file.jinx +35 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/serve.jinx +0 -3
- npcsh-1.1.11.data/data/npcsh/npc_team/sh.jinx +38 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/sql.jinx +2 -2
- npcsh-1.1.11.data/data/npcsh/npc_team/trigger.jinx +61 -0
- npcsh-1.1.11.data/data/npcsh/npc_team/vixynt.jinx +144 -0
- {npcsh-1.1.9.dist-info → npcsh-1.1.11.dist-info}/METADATA +1 -1
- npcsh-1.1.11.dist-info/RECORD +126 -0
- npcsh/npc_team/jinxs/utils/breathe.jinx +0 -20
- npcsh/npc_team/jinxs/utils/flush.jinx +0 -39
- npcsh/npc_team/jinxs/utils/plan.jinx +0 -33
- npcsh-1.1.9.data/data/npcsh/npc_team/breathe.jinx +0 -20
- npcsh-1.1.9.data/data/npcsh/npc_team/flush.jinx +0 -39
- npcsh-1.1.9.data/data/npcsh/npc_team/plan.jinx +0 -33
- npcsh-1.1.9.data/data/npcsh/npc_team/sh.jinx +0 -19
- npcsh-1.1.9.data/data/npcsh/npc_team/trigger.jinx +0 -36
- npcsh-1.1.9.data/data/npcsh/npc_team/vixynt.jinx +0 -117
- npcsh-1.1.9.dist-info/RECORD +0 -124
- /npcsh/npc_team/jinxs/{utils → npc_studio}/npc-studio.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/alicanto.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/build.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/compile.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/corca.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/guac.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/help.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/init.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/ots.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/plonk.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/pti.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/python.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/roll.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/sample.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/search.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/set.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/sleep.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/spool.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/wander.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/yap.jinx +0 -0
- {npcsh-1.1.9.data → npcsh-1.1.11.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.9.dist-info → npcsh-1.1.11.dist-info}/WHEEL +0 -0
- {npcsh-1.1.9.dist-info → npcsh-1.1.11.dist-info}/entry_points.txt +0 -0
- {npcsh-1.1.9.dist-info → npcsh-1.1.11.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.9.dist-info → npcsh-1.1.11.dist-info}/top_level.txt +0 -0
|
@@ -2,13 +2,13 @@ jinx_name: "vixynt"
|
|
|
2
2
|
description: "Generates images from text descriptions or edits existing ones."
|
|
3
3
|
inputs:
|
|
4
4
|
- prompt
|
|
5
|
-
- model:
|
|
6
|
-
- provider:
|
|
7
|
-
- output_name:
|
|
8
|
-
- attachments:
|
|
9
|
-
- n_images:
|
|
10
|
-
- height:
|
|
11
|
-
- width:
|
|
5
|
+
- model: null
|
|
6
|
+
- provider: null
|
|
7
|
+
- output_name: null
|
|
8
|
+
- attachments: null
|
|
9
|
+
- n_images: null
|
|
10
|
+
- height: null
|
|
11
|
+
- width: null
|
|
12
12
|
steps:
|
|
13
13
|
- name: "generate_or_edit_image"
|
|
14
14
|
engine: "python"
|
|
@@ -20,98 +20,125 @@ steps:
|
|
|
20
20
|
from PIL import Image
|
|
21
21
|
from npcpy.llm_funcs import gen_image
|
|
22
22
|
|
|
23
|
-
# Extract inputs from context
|
|
24
|
-
image_prompt = context.get('prompt', '').strip()
|
|
23
|
+
# Extract inputs from context with proper type conversion
|
|
24
|
+
image_prompt = str(context.get('prompt', '')).strip()
|
|
25
25
|
output_name = context.get('output_name')
|
|
26
26
|
attachments_str = context.get('attachments')
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
27
|
+
|
|
28
|
+
# Handle integer inputs - they may come as strings or ints
|
|
29
|
+
try:
|
|
30
|
+
n_images = int(context.get('n_images', 1))
|
|
31
|
+
except (ValueError, TypeError):
|
|
32
|
+
n_images = 1
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
height = int(context.get('height', 1024))
|
|
36
|
+
except (ValueError, TypeError):
|
|
37
|
+
height = 1024
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
width = int(context.get('width', 1024))
|
|
41
|
+
except (ValueError, TypeError):
|
|
42
|
+
width = 1024
|
|
43
|
+
|
|
44
|
+
# Get model and provider, prioritizing context, then NPC, then environment variables
|
|
30
45
|
model = context.get('model')
|
|
31
46
|
provider = context.get('provider')
|
|
32
|
-
|
|
33
|
-
input_images = []
|
|
34
|
-
if attachments_str and attachments_str.strip():
|
|
35
|
-
input_images = [p.strip() for p in attachments_str.split(',')]
|
|
36
|
-
|
|
47
|
+
|
|
37
48
|
# Use NPC's model/provider as fallback
|
|
38
|
-
if not model and npc and npc.model:
|
|
49
|
+
if not model and npc and hasattr(npc, 'model') and npc.model:
|
|
39
50
|
model = npc.model
|
|
40
|
-
if not provider and npc and npc.provider:
|
|
51
|
+
if not provider and npc and hasattr(npc, 'provider') and npc.provider:
|
|
41
52
|
provider = npc.provider
|
|
42
53
|
|
|
43
|
-
#
|
|
54
|
+
# Fallback to environment variables
|
|
55
|
+
if not model:
|
|
56
|
+
model = os.getenv('NPCSH_IMAGE_GEN_MODEL')
|
|
57
|
+
if not provider:
|
|
58
|
+
provider = os.getenv('NPCSH_IMAGE_GEN_PROVIDER')
|
|
59
|
+
|
|
60
|
+
# Final hardcoded fallbacks if nothing else is set
|
|
44
61
|
if not model:
|
|
45
62
|
model = "runwayml/stable-diffusion-v1-5"
|
|
46
63
|
if not provider:
|
|
47
64
|
provider = "diffusers"
|
|
48
65
|
|
|
66
|
+
# Parse attachments
|
|
67
|
+
input_images = []
|
|
68
|
+
if attachments_str and str(attachments_str).strip():
|
|
69
|
+
input_images = [p.strip() for p in str(attachments_str).split(',')]
|
|
70
|
+
|
|
49
71
|
output_messages = context.get('messages', [])
|
|
50
72
|
|
|
51
73
|
if not image_prompt:
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
input_images=input_images if input_images else None
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
# Ensure we have a list of images
|
|
70
|
-
if not isinstance(result, list):
|
|
71
|
-
images_list = [result] if result is not None else []
|
|
72
|
-
else:
|
|
73
|
-
images_list = result
|
|
74
|
-
|
|
75
|
-
saved_files = []
|
|
76
|
-
|
|
77
|
-
for i, image in enumerate(images_list):
|
|
78
|
-
if image is None:
|
|
79
|
-
continue
|
|
74
|
+
output = "Error: No prompt provided for image generation."
|
|
75
|
+
else:
|
|
76
|
+
try:
|
|
77
|
+
# Generate image(s)
|
|
78
|
+
result = gen_image(
|
|
79
|
+
prompt=image_prompt,
|
|
80
|
+
model=model,
|
|
81
|
+
provider=provider,
|
|
82
|
+
npc=npc,
|
|
83
|
+
height=height,
|
|
84
|
+
width=width,
|
|
85
|
+
n_images=n_images,
|
|
86
|
+
input_images=input_images if input_images else None
|
|
87
|
+
)
|
|
80
88
|
|
|
81
|
-
#
|
|
82
|
-
if
|
|
83
|
-
|
|
84
|
-
if not ext:
|
|
85
|
-
ext = ".png"
|
|
86
|
-
current_output_file = f"{base_name}_{i}{ext}" if len(images_list) > 1 else f"{base_name}{ext}"
|
|
89
|
+
# Ensure we have a list of images
|
|
90
|
+
if not isinstance(result, list):
|
|
91
|
+
images_list = [result] if result is not None else []
|
|
87
92
|
else:
|
|
88
|
-
|
|
89
|
-
current_output_file = (
|
|
90
|
-
os.path.expanduser("~/.npcsh/images/")
|
|
91
|
-
+ f"image_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{i}.png"
|
|
92
|
-
)
|
|
93
|
+
images_list = result
|
|
93
94
|
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
saved_files.append(current_output_file)
|
|
97
|
-
|
|
98
|
-
if saved_files:
|
|
99
|
-
if input_images:
|
|
100
|
-
output = f"Image(s) edited and saved to: {', '.join(saved_files)}"
|
|
101
|
-
else:
|
|
102
|
-
output = f"Image(s) generated and saved to: {', '.join(saved_files)}"
|
|
95
|
+
saved_files = []
|
|
96
|
+
html_image_tags = [] # This list will store the raw HTML <img> tags
|
|
103
97
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
98
|
+
for i, image in enumerate(images_list):
|
|
99
|
+
if image is None:
|
|
100
|
+
continue
|
|
101
|
+
|
|
102
|
+
# Determine output filename
|
|
103
|
+
if output_name and str(output_name).strip():
|
|
104
|
+
base_name, ext = os.path.splitext(os.path.expanduser(str(output_name)))
|
|
105
|
+
if not ext:
|
|
106
|
+
ext = ".png"
|
|
107
|
+
current_output_file = f"{base_name}_{i}{ext}" if len(images_list) > 1 else f"{base_name}{ext}"
|
|
108
|
+
else:
|
|
109
|
+
os.makedirs(os.path.expanduser("~/.npcsh/images/"), exist_ok=True)
|
|
110
|
+
current_output_file = (
|
|
111
|
+
os.path.expanduser("~/.npcsh/images/")
|
|
112
|
+
+ f"image_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{i}.png"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Save image to file
|
|
116
|
+
image.save(current_output_file)
|
|
117
|
+
saved_files.append(current_output_file)
|
|
118
|
+
|
|
119
|
+
# Convert image to base64 and create an HTML <img> tag
|
|
120
|
+
with open(current_output_file, 'rb') as f:
|
|
121
|
+
img_data = base64.b64encode(f.read()).decode()
|
|
122
|
+
# Using raw HTML <img> tag with data URI
|
|
123
|
+
html_image_tags.append(f'<img src="data:image/png;base64,{img_data}" alt="Generated Image {i+1}" style="max-width: 100%; display: block; margin-top: 10px;">')
|
|
108
124
|
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
125
|
+
if saved_files:
|
|
126
|
+
output_text_message = f"Image(s) generated and saved to: {', '.join(saved_files)}"
|
|
127
|
+
if input_images:
|
|
128
|
+
output_text_message = f"Image(s) edited and saved to: {', '.join(saved_files)}"
|
|
129
|
+
|
|
130
|
+
output = output_text_message # Keep the text message clean
|
|
131
|
+
output += f"\n\nThe image files have been saved and are ready to view."
|
|
132
|
+
output += "\n\n" + "\n".join(html_image_tags) # Append all HTML <img> tags to the output
|
|
133
|
+
else:
|
|
134
|
+
output = "No images were generated."
|
|
135
|
+
|
|
136
|
+
except Exception as e:
|
|
137
|
+
import traceback
|
|
138
|
+
traceback.print_exc()
|
|
139
|
+
output = f"Error {'editing' if input_images else 'generating'} image: {str(e)}"
|
|
113
140
|
|
|
114
141
|
context['output'] = output
|
|
115
142
|
context['messages'] = output_messages
|
|
116
143
|
context['model'] = model
|
|
117
|
-
context['provider'] = provider
|
|
144
|
+
context['provider'] = provider
|
npcsh/npcsh.py
CHANGED
|
@@ -78,7 +78,8 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState, router)
|
|
|
78
78
|
render_markdown(f'- Using {state.current_mode} mode. Use /agent, /cmd, or /chat to switch to other modes')
|
|
79
79
|
render_markdown(f'- To switch to a different NPC, type /npc <npc_name> or /n <npc_name> to switch to that NPC.')
|
|
80
80
|
render_markdown('\n- Here are the current NPCs available in your team: ' + ', '.join([npc_name for npc_name in state.team.npcs.keys()]))
|
|
81
|
-
|
|
81
|
+
render_markdown('\n- Here are the currently available Jinxs: ' + ', '.join([jinx_name for jinx_name in state.team.jinxs_dict.keys()]))
|
|
82
|
+
|
|
82
83
|
is_windows = platform.system().lower().startswith("win")
|
|
83
84
|
try:
|
|
84
85
|
completer = make_completer(state, router)
|
npcsh/routes.py
CHANGED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
jinx_name: agent
|
|
2
|
+
description: Provides an LLM response with tool use enabled.
|
|
3
|
+
inputs:
|
|
4
|
+
- query
|
|
5
|
+
- auto_process_tool_calls: True
|
|
6
|
+
- use_core_tools: True
|
|
7
|
+
steps:
|
|
8
|
+
- name: get_agent_response
|
|
9
|
+
engine: python
|
|
10
|
+
code: |
|
|
11
|
+
response = npc.get_llm_response(
|
|
12
|
+
request=query,
|
|
13
|
+
messages=context.get('messages', []),
|
|
14
|
+
auto_process_tool_calls={{ auto_process_tool_calls | default(True) }},
|
|
15
|
+
use_core_tools={{ use_core_tools | default(True) }}
|
|
16
|
+
)
|
|
17
|
+
output = response.get('response', '')
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
jinx_name: chat
|
|
2
|
+
description: Provides a direct LLM response without tool use.
|
|
3
|
+
inputs:
|
|
4
|
+
- query
|
|
5
|
+
- auto_process_tool_calls: False
|
|
6
|
+
- use_core_tools: False
|
|
7
|
+
steps:
|
|
8
|
+
- name: get_chat_response
|
|
9
|
+
engine: python
|
|
10
|
+
code: |
|
|
11
|
+
response = npc.get_llm_response(
|
|
12
|
+
request=query,
|
|
13
|
+
messages=context.get('messages', []),
|
|
14
|
+
auto_process_tool_calls={{ auto_process_tool_calls | default(False) }},
|
|
15
|
+
use_core_tools={{ use_core_tools | default(False) }}
|
|
16
|
+
)
|
|
17
|
+
output = response.get('response', '')
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
jinx_name: "compress"
|
|
2
|
+
description: "Manages conversation and knowledge context. Defaults to compacting context. Use flags for other operations."
|
|
3
|
+
inputs:
|
|
4
|
+
- flush: "" # The number of recent messages to flush.
|
|
5
|
+
- sleep: False # If true, evolves the knowledge graph.
|
|
6
|
+
- dream: False # Used with --sleep. Runs creative synthesis.
|
|
7
|
+
- ops: "" # Used with --sleep. Comma-separated list of KG operations.
|
|
8
|
+
- model: "" # Used with --sleep. LLM model for KG evolution.
|
|
9
|
+
- provider: "" # Used with --sleep. LLM provider for KG evolution.
|
|
10
|
+
steps:
|
|
11
|
+
- name: "manage_context_and_memory"
|
|
12
|
+
engine: "python"
|
|
13
|
+
code: |
|
|
14
|
+
import os
|
|
15
|
+
import traceback
|
|
16
|
+
from npcpy.llm_funcs import breathe
|
|
17
|
+
from npcpy.memory.command_history import CommandHistory, load_kg_from_db, save_kg_to_db
|
|
18
|
+
from npcpy.memory.knowledge_graph import kg_sleep_process, kg_dream_process
|
|
19
|
+
|
|
20
|
+
# --- Get all inputs from context ---
|
|
21
|
+
flush_n_str = context.get('flush')
|
|
22
|
+
is_sleeping = context.get('sleep')
|
|
23
|
+
is_dreaming = context.get('dream')
|
|
24
|
+
operations_str = context.get('ops')
|
|
25
|
+
llm_model = context.get('model')
|
|
26
|
+
llm_provider = context.get('provider')
|
|
27
|
+
output_messages = context.get('messages', [])
|
|
28
|
+
|
|
29
|
+
USAGE = """Usage:
|
|
30
|
+
/compress (Compacts conversation context)
|
|
31
|
+
/compress --flush <number> (Removes the last N messages)
|
|
32
|
+
/compress --sleep [...] (Evolves the knowledge graph)
|
|
33
|
+
--dream (With --sleep: enables creative synthesis)
|
|
34
|
+
--ops "op1,op2" (With --sleep: specifies KG operations)
|
|
35
|
+
--model <name> (With --sleep: specifies LLM model)
|
|
36
|
+
--provider <name> (With --sleep: specifies LLM provider)"""
|
|
37
|
+
|
|
38
|
+
# --- Argument Validation: Ensure mutual exclusivity ---
|
|
39
|
+
is_flushing = flush_n_str is not None and flush_n_str.strip() != ''
|
|
40
|
+
if is_sleeping and is_flushing:
|
|
41
|
+
context['output'] = f"Error: --sleep and --flush are mutually exclusive.\n{USAGE}"
|
|
42
|
+
context['messages'] = output_messages
|
|
43
|
+
exit()
|
|
44
|
+
|
|
45
|
+
# --- Dispatcher: Route to the correct functionality ---
|
|
46
|
+
|
|
47
|
+
# 1. SLEEP: Evolve the Knowledge Graph
|
|
48
|
+
if is_sleeping:
|
|
49
|
+
current_npc = context.get('npc')
|
|
50
|
+
current_team = context.get('team')
|
|
51
|
+
|
|
52
|
+
# Parameter setup for KG process
|
|
53
|
+
operations_config = [op.strip() for op in operations_str.split(',')] if operations_str else None
|
|
54
|
+
if not llm_model and current_npc: llm_model = current_npc.model
|
|
55
|
+
if not llm_provider and current_npc: llm_provider = current_npc.provider
|
|
56
|
+
if not llm_model: llm_model = "gemini-1.5-pro"
|
|
57
|
+
if not llm_provider: llm_provider = "gemini"
|
|
58
|
+
|
|
59
|
+
team_name = current_team.name if current_team else "__none__"
|
|
60
|
+
npc_name = current_npc.name if current_npc else "__none__"
|
|
61
|
+
current_path = os.getcwd()
|
|
62
|
+
scope_str = f"Team: '{team_name}', NPC: '{npc_name}', Path: '{current_path}'"
|
|
63
|
+
|
|
64
|
+
command_history = None
|
|
65
|
+
try:
|
|
66
|
+
db_path = os.getenv("NPCSH_DB_PATH", os.path.expanduser("~/npcsh_history.db"))
|
|
67
|
+
command_history = CommandHistory(db_path)
|
|
68
|
+
engine = command_history.engine
|
|
69
|
+
current_kg = load_kg_from_db(engine, team_name, npc_name, current_path)
|
|
70
|
+
|
|
71
|
+
if not current_kg or not current_kg.get('facts'):
|
|
72
|
+
context['output'] = f"Knowledge graph for the current scope is empty. Nothing to process.\n- Scope: {scope_str}"
|
|
73
|
+
exit()
|
|
74
|
+
|
|
75
|
+
original_facts = len(current_kg.get('facts', []))
|
|
76
|
+
original_concepts = len(current_kg.get('concepts', []))
|
|
77
|
+
|
|
78
|
+
evolved_kg, _ = kg_sleep_process(existing_kg=current_kg, model=llm_model, provider=llm_provider, npc=current_npc, operations_config=operations_config)
|
|
79
|
+
process_type = "Sleep"
|
|
80
|
+
|
|
81
|
+
if is_dreaming:
|
|
82
|
+
evolved_kg, _ = kg_dream_process(existing_kg=evolved_kg, model=llm_model, provider=llm_provider, npc=current_npc)
|
|
83
|
+
process_type += " & Dream"
|
|
84
|
+
|
|
85
|
+
save_kg_to_db(engine, evolved_kg, team_name, npc_name, current_path)
|
|
86
|
+
|
|
87
|
+
new_facts = len(evolved_kg.get('facts', []))
|
|
88
|
+
new_concepts = len(evolved_kg.get('concepts', []))
|
|
89
|
+
|
|
90
|
+
context['output'] = (f"{process_type} process complete.\n"
|
|
91
|
+
f"- Facts: {original_facts} -> {new_facts} ({new_facts - original_facts:+})\n"
|
|
92
|
+
f"- Concepts: {original_concepts} -> {new_concepts} ({new_concepts - original_concepts:+})")
|
|
93
|
+
except Exception as e:
|
|
94
|
+
traceback.print_exc()
|
|
95
|
+
context['output'] = f"Error during KG evolution: {e}"
|
|
96
|
+
finally:
|
|
97
|
+
if command_history: command_history.close()
|
|
98
|
+
context['messages'] = output_messages
|
|
99
|
+
|
|
100
|
+
# 2. FLUSH: Remove messages from context
|
|
101
|
+
elif is_flushing:
|
|
102
|
+
try:
|
|
103
|
+
n = int(flush_n_str)
|
|
104
|
+
if n <= 0:
|
|
105
|
+
context['output'] = "Error: Number of messages to flush must be positive."
|
|
106
|
+
exit()
|
|
107
|
+
except ValueError:
|
|
108
|
+
context['output'] = f"Error: Invalid number '{flush_n_str}'. {USAGE}"
|
|
109
|
+
exit()
|
|
110
|
+
|
|
111
|
+
messages_list = list(output_messages)
|
|
112
|
+
original_len = len(messages_list)
|
|
113
|
+
final_messages = []
|
|
114
|
+
|
|
115
|
+
if messages_list and messages_list[0].get("role") == "system":
|
|
116
|
+
system_message = messages_list.pop(0)
|
|
117
|
+
num_to_remove = min(n, len(messages_list))
|
|
118
|
+
final_messages = [system_message] + messages_list[:-num_to_remove]
|
|
119
|
+
else:
|
|
120
|
+
num_to_remove = min(n, original_len)
|
|
121
|
+
final_messages = messages_list[:-num_to_remove]
|
|
122
|
+
|
|
123
|
+
removed_count = original_len - len(final_messages)
|
|
124
|
+
context['output'] = f"Flushed {removed_count} message(s). Context is now {len(final_messages)} messages."
|
|
125
|
+
context['messages'] = final_messages
|
|
126
|
+
|
|
127
|
+
# 3. DEFAULT: Compact conversation context
|
|
128
|
+
else:
|
|
129
|
+
try:
|
|
130
|
+
result = breathe(**context)
|
|
131
|
+
if isinstance(result, dict):
|
|
132
|
+
context['output'] = result.get('output', 'Context compressed.')
|
|
133
|
+
context['messages'] = result.get('messages', output_messages)
|
|
134
|
+
else:
|
|
135
|
+
context['output'] = "Context compression process initiated."
|
|
136
|
+
context['messages'] = output_messages
|
|
137
|
+
except Exception as e:
|
|
138
|
+
traceback.print_exc()
|
|
139
|
+
context['output'] = f"Error during context compression: {e}"
|
|
140
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
jinx_name: load_file
|
|
2
|
+
description: Loads and returns the contents of a file using npcpy's file loaders
|
|
3
|
+
inputs:
|
|
4
|
+
- file_path
|
|
5
|
+
steps:
|
|
6
|
+
- name: "load_file"
|
|
7
|
+
engine: "python"
|
|
8
|
+
code: |
|
|
9
|
+
import os
|
|
10
|
+
from npcpy.data.load import load_file_contents
|
|
11
|
+
|
|
12
|
+
# Expand user path and get absolute path
|
|
13
|
+
file_path = os.path.expanduser("{{ file_path }}")
|
|
14
|
+
|
|
15
|
+
# Check if file exists
|
|
16
|
+
if not os.path.exists(file_path):
|
|
17
|
+
output = f"Error: File not found at {file_path}"
|
|
18
|
+
else:
|
|
19
|
+
try:
|
|
20
|
+
# Load file contents using npcpy's loader
|
|
21
|
+
# Returns chunks by default with chunk_size=250
|
|
22
|
+
chunks = load_file_contents(file_path)
|
|
23
|
+
|
|
24
|
+
# Join chunks back together for full content
|
|
25
|
+
if isinstance(chunks, list):
|
|
26
|
+
if chunks and chunks[0].startswith("Error"):
|
|
27
|
+
output = chunks[0]
|
|
28
|
+
else:
|
|
29
|
+
file_content = "\n".join(chunks)
|
|
30
|
+
output = f"File: {file_path}\n\n{file_content}"
|
|
31
|
+
else:
|
|
32
|
+
output = f"File: {file_path}\n\n{chunks}"
|
|
33
|
+
|
|
34
|
+
except Exception as e:
|
|
35
|
+
output = f"Error reading file {file_path}: {str(e)}"
|
|
@@ -17,9 +17,6 @@ steps:
|
|
|
17
17
|
if cors_str and cors_str.strip():
|
|
18
18
|
cors_origins = [origin.strip() for origin in cors_str.split(",")]
|
|
19
19
|
|
|
20
|
-
# start_flask_server blocks, so this will hold the Jinx until the server is stopped.
|
|
21
|
-
# In a real-world scenario, you might want to run this in a separate process
|
|
22
|
-
# or have a non-blocking server start.
|
|
23
20
|
start_flask_server(
|
|
24
21
|
port=int(port), # Ensure port is an integer
|
|
25
22
|
cors_origins=cors_origins,
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
jinx_name: sh
|
|
2
|
+
description: Execute bash queries. Should be used to grep for file contents, list directories, explore information to answer user questions more practically.
|
|
3
|
+
inputs:
|
|
4
|
+
- bash_command
|
|
5
|
+
steps:
|
|
6
|
+
- name: execute_bash
|
|
7
|
+
engine: python
|
|
8
|
+
code: |
|
|
9
|
+
import subprocess
|
|
10
|
+
import sys # Import sys to explicitly write to stderr for visibility
|
|
11
|
+
|
|
12
|
+
# Force a simple print to see if anything comes out
|
|
13
|
+
print("--- Jinx 'sh' code started ---", file=sys.stderr)
|
|
14
|
+
|
|
15
|
+
cmd = '{{ bash_command }}'
|
|
16
|
+
|
|
17
|
+
# Initialize output to an empty string to ensure it always exists
|
|
18
|
+
output = ""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
process = subprocess.Popen(
|
|
22
|
+
cmd,
|
|
23
|
+
shell=True,
|
|
24
|
+
stdout=subprocess.PIPE,
|
|
25
|
+
stderr=subprocess.PIPE
|
|
26
|
+
)
|
|
27
|
+
stdout, stderr = process.communicate()
|
|
28
|
+
|
|
29
|
+
# Print raw stdout/stderr to sys.stderr for better visibility in some environments
|
|
30
|
+
print(f"Jinx 'sh' raw stdout: {stdout.decode('utf-8', errors='ignore')}", file=sys.stderr)
|
|
31
|
+
print(f"Jinx 'sh' raw stderr: {stderr.decode('utf-8', errors='ignore')}", file=sys.stderr)
|
|
32
|
+
|
|
33
|
+
if stderr:
|
|
34
|
+
output = f"Error: {stderr.decode('utf-8')}"
|
|
35
|
+
else:
|
|
36
|
+
output = stdout.decode('utf-8')
|
|
37
|
+
|
|
38
|
+
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
jinx_name:
|
|
1
|
+
jinx_name: sql
|
|
2
2
|
description: Execute queries on the ~/npcsh_history.db to pull data. The database
|
|
3
3
|
contains only information about conversations and other user-provided data. It does
|
|
4
4
|
not store any information about individual files. Avoid using percent signs unless absolutely necessary.
|
|
@@ -13,4 +13,4 @@ steps:
|
|
|
13
13
|
df = pd.read_sql_query(query, npc.db_conn)
|
|
14
14
|
except Exception as e:
|
|
15
15
|
df = pd.DataFrame({'Error': [str(e)]})
|
|
16
|
-
output = df.to_string()
|
|
16
|
+
output = df.to_string()
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
jinx_name: "trigger"
|
|
2
|
+
description: "Creates a persistent listener (--listen) or a scheduled task (--cron)."
|
|
3
|
+
inputs:
|
|
4
|
+
- listen: "" # The description for a persistent, event-driven listener.
|
|
5
|
+
- cron: "" # The description for a scheduled, time-based task.
|
|
6
|
+
steps:
|
|
7
|
+
- name: "execute_command"
|
|
8
|
+
engine: "python"
|
|
9
|
+
code: |
|
|
10
|
+
import traceback
|
|
11
|
+
from npcpy.work.trigger import execute_trigger_command # For --listen
|
|
12
|
+
from npcpy.work.plan import execute_plan_command # For --cron
|
|
13
|
+
|
|
14
|
+
listen_description = context.get('listen')
|
|
15
|
+
cron_description = context.get('cron')
|
|
16
|
+
output_messages = context.get('messages', [])
|
|
17
|
+
|
|
18
|
+
USAGE = 'Usage: /trigger --listen "<description>" OR /trigger --cron "<description>"'
|
|
19
|
+
|
|
20
|
+
# Determine which command was used and set the appropriate variables
|
|
21
|
+
subcommand = None
|
|
22
|
+
description = None
|
|
23
|
+
executor_func = None
|
|
24
|
+
|
|
25
|
+
# --- Argument Validation ---
|
|
26
|
+
# Ensure mutual exclusivity
|
|
27
|
+
if listen_description and cron_description:
|
|
28
|
+
context['output'] = f"Error: --listen and --cron are mutually exclusive. {USAGE}"
|
|
29
|
+
context['messages'] = output_messages
|
|
30
|
+
exit()
|
|
31
|
+
|
|
32
|
+
# --- Command Dispatch ---
|
|
33
|
+
if listen_description:
|
|
34
|
+
subcommand = 'listen'
|
|
35
|
+
description = listen_description
|
|
36
|
+
executor_func = execute_trigger_command
|
|
37
|
+
elif cron_description:
|
|
38
|
+
subcommand = 'cron'
|
|
39
|
+
description = cron_description
|
|
40
|
+
executor_func = execute_plan_command
|
|
41
|
+
else:
|
|
42
|
+
# Handle case where no arguments were provided
|
|
43
|
+
context['output'] = f"Error: You must provide either --listen or --cron. {USAGE}"
|
|
44
|
+
context['messages'] = output_messages
|
|
45
|
+
exit()
|
|
46
|
+
|
|
47
|
+
# --- Execution ---
|
|
48
|
+
try:
|
|
49
|
+
result = executor_func(command=description, **context)
|
|
50
|
+
|
|
51
|
+
if isinstance(result, dict):
|
|
52
|
+
output_key = 'Listener' if subcommand == 'listen' else 'Cron job'
|
|
53
|
+
context['output'] = result.get('output', f'{output_key} created successfully.')
|
|
54
|
+
context['messages'] = result.get('messages', output_messages)
|
|
55
|
+
else:
|
|
56
|
+
context['output'] = str(result)
|
|
57
|
+
context['messages'] = output_messages
|
|
58
|
+
except Exception as e:
|
|
59
|
+
traceback.print_exc()
|
|
60
|
+
context['output'] = f"Error creating {subcommand}: {e}"
|
|
61
|
+
context['messages'] = output_messages
|