npcsh 1.1.12__py3-none-any.whl → 1.1.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +700 -377
- npcsh/alicanto.py +54 -1153
- npcsh/completion.py +206 -0
- npcsh/config.py +163 -0
- npcsh/corca.py +35 -1462
- npcsh/execution.py +185 -0
- npcsh/guac.py +31 -1986
- npcsh/npc_team/jinxs/code/sh.jinx +11 -15
- npcsh/npc_team/jinxs/modes/alicanto.jinx +186 -80
- npcsh/npc_team/jinxs/modes/corca.jinx +243 -22
- npcsh/npc_team/jinxs/modes/guac.jinx +313 -42
- npcsh/npc_team/jinxs/modes/plonk.jinx +209 -48
- npcsh/npc_team/jinxs/modes/pti.jinx +167 -25
- npcsh/npc_team/jinxs/modes/spool.jinx +158 -37
- npcsh/npc_team/jinxs/modes/wander.jinx +179 -74
- npcsh/npc_team/jinxs/modes/yap.jinx +258 -21
- npcsh/npc_team/jinxs/utils/chat.jinx +39 -12
- npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
- npcsh/npc_team/jinxs/utils/search.jinx +3 -3
- npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
- npcsh/npcsh.py +76 -20
- npcsh/parsing.py +118 -0
- npcsh/plonk.py +41 -329
- npcsh/pti.py +41 -201
- npcsh/spool.py +34 -239
- npcsh/ui.py +199 -0
- npcsh/wander.py +54 -542
- npcsh/yap.py +38 -570
- npcsh-1.1.14.data/data/npcsh/npc_team/alicanto.jinx +194 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/chat.jinx +44 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/cmd.jinx +44 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/corca.jinx +249 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/guac.jinx +317 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/plonk.jinx +214 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/pti.jinx +170 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/search.jinx +3 -3
- npcsh-1.1.14.data/data/npcsh/npc_team/sh.jinx +34 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/spool.jinx +161 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/usage.jinx +33 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/wander.jinx +186 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/yap.jinx +262 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/METADATA +1 -1
- npcsh-1.1.14.dist-info/RECORD +135 -0
- npcsh-1.1.12.data/data/npcsh/npc_team/alicanto.jinx +0 -88
- npcsh-1.1.12.data/data/npcsh/npc_team/chat.jinx +0 -17
- npcsh-1.1.12.data/data/npcsh/npc_team/corca.jinx +0 -28
- npcsh-1.1.12.data/data/npcsh/npc_team/guac.jinx +0 -46
- npcsh-1.1.12.data/data/npcsh/npc_team/plonk.jinx +0 -53
- npcsh-1.1.12.data/data/npcsh/npc_team/pti.jinx +0 -28
- npcsh-1.1.12.data/data/npcsh/npc_team/sh.jinx +0 -38
- npcsh-1.1.12.data/data/npcsh/npc_team/spool.jinx +0 -40
- npcsh-1.1.12.data/data/npcsh/npc_team/wander.jinx +0 -81
- npcsh-1.1.12.data/data/npcsh/npc_team/yap.jinx +0 -25
- npcsh-1.1.12.dist-info/RECORD +0 -126
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/agent.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/build.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compile.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compress.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/help.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/init.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/load_file.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/ots.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/python.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/roll.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sample.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/serve.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/set.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sleep.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sql.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/trigger.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/vixynt.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/WHEEL +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/entry_points.txt +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/top_level.txt +0 -0
|
@@ -1,25 +1,262 @@
|
|
|
1
|
-
jinx_name:
|
|
2
|
-
description:
|
|
3
|
-
inputs:
|
|
1
|
+
jinx_name: yap
|
|
2
|
+
description: Voice chat mode - speech-to-text input, text-to-speech output
|
|
3
|
+
inputs:
|
|
4
|
+
- model: null
|
|
5
|
+
- provider: null
|
|
6
|
+
- tts_model: kokoro
|
|
7
|
+
- voice: af_heart
|
|
8
|
+
- files: null
|
|
9
|
+
|
|
4
10
|
steps:
|
|
5
|
-
- name:
|
|
6
|
-
engine:
|
|
11
|
+
- name: yap_repl
|
|
12
|
+
engine: python
|
|
7
13
|
code: |
|
|
8
|
-
import
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
14
|
+
import os
|
|
15
|
+
import sys
|
|
16
|
+
import time
|
|
17
|
+
import tempfile
|
|
18
|
+
import threading
|
|
19
|
+
import queue
|
|
20
|
+
from termcolor import colored
|
|
12
21
|
|
|
22
|
+
# Audio imports with graceful fallback
|
|
13
23
|
try:
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
24
|
+
import torch
|
|
25
|
+
import pyaudio
|
|
26
|
+
import wave
|
|
27
|
+
import numpy as np
|
|
28
|
+
from faster_whisper import WhisperModel
|
|
29
|
+
from gtts import gTTS
|
|
30
|
+
from npcpy.data.audio import (
|
|
31
|
+
FORMAT, CHANNELS, RATE, CHUNK,
|
|
32
|
+
transcribe_recording, convert_mp3_to_wav, cleanup_temp_files
|
|
33
|
+
)
|
|
34
|
+
AUDIO_AVAILABLE = True
|
|
35
|
+
except ImportError as e:
|
|
36
|
+
AUDIO_AVAILABLE = False
|
|
37
|
+
print(colored(f"Audio dependencies not available: {e}", "yellow"))
|
|
38
|
+
print("Install with: pip install npcsh[audio]")
|
|
39
|
+
|
|
40
|
+
from npcpy.llm_funcs import get_llm_response
|
|
41
|
+
from npcpy.npc_sysenv import get_system_message, render_markdown
|
|
42
|
+
from npcpy.data.load import load_file_contents
|
|
43
|
+
from npcpy.data.text import rag_search
|
|
44
|
+
|
|
45
|
+
npc = context.get('npc')
|
|
46
|
+
team = context.get('team')
|
|
47
|
+
messages = context.get('messages', [])
|
|
48
|
+
files = context.get('files')
|
|
49
|
+
tts_model = context.get('tts_model', 'kokoro')
|
|
50
|
+
voice = context.get('voice', 'af_heart')
|
|
51
|
+
|
|
52
|
+
model = context.get('model') or (npc.model if npc else None)
|
|
53
|
+
provider = context.get('provider') or (npc.provider if npc else None)
|
|
54
|
+
|
|
55
|
+
print("""
|
|
56
|
+
██╗ ██╗ █████╗ ██████╗
|
|
57
|
+
╚██╗ ██╔╝██╔══██╗██╔══██╗
|
|
58
|
+
╚████╔╝ ███████║██████╔╝
|
|
59
|
+
╚██╔╝ ██╔══██║██╔═══╝
|
|
60
|
+
██║ ██║ ██║██║
|
|
61
|
+
╚═╝ ╚═╝ ╚═╝╚═╝
|
|
62
|
+
|
|
63
|
+
Voice Chat Mode
|
|
64
|
+
""")
|
|
65
|
+
|
|
66
|
+
npc_name = npc.name if npc else "yap"
|
|
67
|
+
print(f"Entering yap mode (NPC: {npc_name}). Type '/yq' to exit.")
|
|
68
|
+
|
|
69
|
+
if not AUDIO_AVAILABLE:
|
|
70
|
+
print(colored("Audio not available. Falling back to text mode.", "yellow"))
|
|
71
|
+
|
|
72
|
+
# Load files for RAG context
|
|
73
|
+
loaded_chunks = {}
|
|
74
|
+
if files:
|
|
75
|
+
if isinstance(files, str):
|
|
76
|
+
files = [f.strip() for f in files.split(',')]
|
|
77
|
+
for file_path in files:
|
|
78
|
+
file_path = os.path.expanduser(file_path)
|
|
79
|
+
if os.path.exists(file_path):
|
|
80
|
+
try:
|
|
81
|
+
chunks = load_file_contents(file_path)
|
|
82
|
+
loaded_chunks[file_path] = chunks
|
|
83
|
+
print(colored(f"Loaded: {file_path}", "green"))
|
|
84
|
+
except Exception as e:
|
|
85
|
+
print(colored(f"Error loading {file_path}: {e}", "red"))
|
|
86
|
+
|
|
87
|
+
# System message for concise voice responses
|
|
88
|
+
sys_msg = get_system_message(npc) if npc else "You are a helpful assistant."
|
|
89
|
+
sys_msg += "\n\nProvide brief responses of 1-2 sentences unless asked for more detail. Keep responses clear and conversational for voice."
|
|
90
|
+
|
|
91
|
+
if not messages or messages[0].get("role") != "system":
|
|
92
|
+
messages.insert(0, {"role": "system", "content": sys_msg})
|
|
93
|
+
|
|
94
|
+
# Audio state
|
|
95
|
+
vad_model = None
|
|
96
|
+
whisper_model = None
|
|
97
|
+
|
|
98
|
+
if AUDIO_AVAILABLE:
|
|
99
|
+
try:
|
|
100
|
+
# Load VAD model for voice activity detection
|
|
101
|
+
vad_model, _ = torch.hub.load(
|
|
102
|
+
repo_or_dir="snakers4/silero-vad",
|
|
103
|
+
model="silero_vad",
|
|
104
|
+
force_reload=False,
|
|
105
|
+
onnx=False,
|
|
106
|
+
verbose=False
|
|
107
|
+
)
|
|
108
|
+
vad_model.to('cpu')
|
|
109
|
+
print(colored("VAD model loaded.", "green"))
|
|
110
|
+
|
|
111
|
+
# Load Whisper for STT
|
|
112
|
+
whisper_model = WhisperModel("base", device="cpu", compute_type="int8")
|
|
113
|
+
print(colored("Whisper model loaded.", "green"))
|
|
114
|
+
except Exception as e:
|
|
115
|
+
print(colored(f"Error loading audio models: {e}", "red"))
|
|
116
|
+
AUDIO_AVAILABLE = False
|
|
117
|
+
|
|
118
|
+
def speak_text(text, tts_model='kokoro', voice='af_heart'):
|
|
119
|
+
"""Convert text to speech and play it"""
|
|
120
|
+
if not AUDIO_AVAILABLE:
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
try:
|
|
124
|
+
# Use gTTS as fallback
|
|
125
|
+
tts = gTTS(text=text, lang='en')
|
|
126
|
+
with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as f:
|
|
127
|
+
tts.save(f.name)
|
|
128
|
+
wav_path = convert_mp3_to_wav(f.name)
|
|
129
|
+
|
|
130
|
+
# Play audio
|
|
131
|
+
import subprocess
|
|
132
|
+
if sys.platform == 'darwin':
|
|
133
|
+
subprocess.run(['afplay', wav_path], check=True)
|
|
134
|
+
elif sys.platform == 'linux':
|
|
135
|
+
subprocess.run(['aplay', wav_path], check=True)
|
|
136
|
+
else:
|
|
137
|
+
# Windows
|
|
138
|
+
import winsound
|
|
139
|
+
winsound.PlaySound(wav_path, winsound.SND_FILENAME)
|
|
140
|
+
|
|
141
|
+
cleanup_temp_files([f.name, wav_path])
|
|
142
|
+
except Exception as e:
|
|
143
|
+
print(colored(f"TTS error: {e}", "red"))
|
|
144
|
+
|
|
145
|
+
def record_audio(duration=5):
|
|
146
|
+
"""Record audio from microphone"""
|
|
147
|
+
if not AUDIO_AVAILABLE:
|
|
148
|
+
return None
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
p = pyaudio.PyAudio()
|
|
152
|
+
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)
|
|
153
|
+
|
|
154
|
+
print(colored("Recording...", "cyan"), end='', flush=True)
|
|
155
|
+
frames = []
|
|
156
|
+
for _ in range(0, int(RATE / CHUNK * duration)):
|
|
157
|
+
data = stream.read(CHUNK)
|
|
158
|
+
frames.append(data)
|
|
159
|
+
print(colored(" Done.", "cyan"))
|
|
160
|
+
|
|
161
|
+
stream.stop_stream()
|
|
162
|
+
stream.close()
|
|
163
|
+
p.terminate()
|
|
164
|
+
|
|
165
|
+
# Save to temp file
|
|
166
|
+
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as f:
|
|
167
|
+
wf = wave.open(f.name, 'wb')
|
|
168
|
+
wf.setnchannels(CHANNELS)
|
|
169
|
+
wf.setsampwidth(p.get_sample_size(FORMAT))
|
|
170
|
+
wf.setframerate(RATE)
|
|
171
|
+
wf.writeframes(b''.join(frames))
|
|
172
|
+
wf.close()
|
|
173
|
+
return f.name
|
|
174
|
+
except Exception as e:
|
|
175
|
+
print(colored(f"Recording error: {e}", "red"))
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
def transcribe_audio(audio_path):
|
|
179
|
+
"""Transcribe audio to text using Whisper"""
|
|
180
|
+
if not whisper_model or not audio_path:
|
|
181
|
+
return ""
|
|
182
|
+
|
|
183
|
+
try:
|
|
184
|
+
segments, _ = whisper_model.transcribe(audio_path, beam_size=5)
|
|
185
|
+
text = " ".join([seg.text for seg in segments])
|
|
186
|
+
cleanup_temp_files([audio_path])
|
|
187
|
+
return text.strip()
|
|
188
|
+
except Exception as e:
|
|
189
|
+
print(colored(f"Transcription error: {e}", "red"))
|
|
190
|
+
return ""
|
|
191
|
+
|
|
192
|
+
# REPL loop
|
|
193
|
+
while True:
|
|
194
|
+
try:
|
|
195
|
+
# Voice input or text input
|
|
196
|
+
if AUDIO_AVAILABLE:
|
|
197
|
+
prompt_str = f"{npc_name}:yap> [Press Enter to speak, or type] "
|
|
198
|
+
else:
|
|
199
|
+
prompt_str = f"{npc_name}:yap> "
|
|
200
|
+
|
|
201
|
+
user_input = input(prompt_str).strip()
|
|
202
|
+
|
|
203
|
+
if user_input.lower() == "/yq":
|
|
204
|
+
print("Exiting yap mode.")
|
|
205
|
+
break
|
|
206
|
+
|
|
207
|
+
# Empty input = record audio
|
|
208
|
+
if not user_input and AUDIO_AVAILABLE:
|
|
209
|
+
audio_path = record_audio(5)
|
|
210
|
+
if audio_path:
|
|
211
|
+
user_input = transcribe_audio(audio_path)
|
|
212
|
+
if user_input:
|
|
213
|
+
print(colored(f"You said: {user_input}", "cyan"))
|
|
214
|
+
else:
|
|
215
|
+
print(colored("Could not transcribe audio.", "yellow"))
|
|
216
|
+
continue
|
|
217
|
+
else:
|
|
218
|
+
continue
|
|
219
|
+
|
|
220
|
+
if not user_input:
|
|
221
|
+
continue
|
|
222
|
+
|
|
223
|
+
# Add RAG context if files loaded
|
|
224
|
+
current_prompt = user_input
|
|
225
|
+
if loaded_chunks:
|
|
226
|
+
context_content = ""
|
|
227
|
+
for filename, chunks in loaded_chunks.items():
|
|
228
|
+
full_text = "\n".join(chunks)
|
|
229
|
+
retrieved = rag_search(user_input, full_text, similarity_threshold=0.3)
|
|
230
|
+
if retrieved:
|
|
231
|
+
context_content += f"\n{retrieved}\n"
|
|
232
|
+
if context_content:
|
|
233
|
+
current_prompt += f"\n\nContext:{context_content}"
|
|
234
|
+
|
|
235
|
+
# Get response
|
|
236
|
+
resp = get_llm_response(
|
|
237
|
+
current_prompt,
|
|
238
|
+
model=model,
|
|
239
|
+
provider=provider,
|
|
240
|
+
messages=messages,
|
|
241
|
+
stream=False, # Don't stream for voice
|
|
242
|
+
npc=npc
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
messages = resp.get('messages', messages)
|
|
246
|
+
response_text = str(resp.get('response', ''))
|
|
247
|
+
|
|
248
|
+
# Display and speak response
|
|
249
|
+
print(colored(f"{npc_name}: ", "green") + response_text)
|
|
250
|
+
|
|
251
|
+
if AUDIO_AVAILABLE:
|
|
252
|
+
speak_text(response_text, tts_model, voice)
|
|
253
|
+
|
|
254
|
+
except KeyboardInterrupt:
|
|
255
|
+
print("\nUse '/yq' to exit or continue.")
|
|
256
|
+
continue
|
|
257
|
+
except EOFError:
|
|
258
|
+
print("\nExiting yap mode.")
|
|
259
|
+
break
|
|
260
|
+
|
|
261
|
+
context['output'] = "Exited yap mode."
|
|
262
|
+
context['messages'] = messages
|
|
@@ -1,17 +1,44 @@
|
|
|
1
1
|
jinx_name: chat
|
|
2
|
-
description:
|
|
2
|
+
description: Simple chat mode - LLM conversation without tool execution
|
|
3
3
|
inputs:
|
|
4
|
-
- query
|
|
5
|
-
-
|
|
6
|
-
-
|
|
4
|
+
- query: null
|
|
5
|
+
- model: null
|
|
6
|
+
- provider: null
|
|
7
|
+
- stream: true
|
|
8
|
+
|
|
7
9
|
steps:
|
|
8
|
-
- name:
|
|
10
|
+
- name: chat_response
|
|
9
11
|
engine: python
|
|
10
12
|
code: |
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
)
|
|
17
|
-
|
|
13
|
+
from npcpy.llm_funcs import get_llm_response
|
|
14
|
+
|
|
15
|
+
npc = context.get('npc')
|
|
16
|
+
messages = context.get('messages', [])
|
|
17
|
+
query = context.get('query', '')
|
|
18
|
+
stream = context.get('stream', True)
|
|
19
|
+
|
|
20
|
+
model = context.get('model') or (npc.model if npc else None)
|
|
21
|
+
provider = context.get('provider') or (npc.provider if npc else None)
|
|
22
|
+
|
|
23
|
+
if not query:
|
|
24
|
+
context['output'] = ''
|
|
25
|
+
context['messages'] = messages
|
|
26
|
+
else:
|
|
27
|
+
response = get_llm_response(
|
|
28
|
+
query,
|
|
29
|
+
model=model,
|
|
30
|
+
provider=provider,
|
|
31
|
+
npc=npc,
|
|
32
|
+
stream=stream,
|
|
33
|
+
messages=messages
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
context['output'] = response.get('response', '')
|
|
37
|
+
context['messages'] = response.get('messages', messages)
|
|
38
|
+
|
|
39
|
+
# Track usage
|
|
40
|
+
if 'usage' in response and npc and hasattr(npc, 'shared_context'):
|
|
41
|
+
usage = response['usage']
|
|
42
|
+
npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
|
|
43
|
+
npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
|
|
44
|
+
npc.shared_context['turn_count'] += 1
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
jinx_name: cmd
|
|
2
|
+
description: Command mode - LLM generates and executes shell commands
|
|
3
|
+
inputs:
|
|
4
|
+
- query: null
|
|
5
|
+
- model: null
|
|
6
|
+
- provider: null
|
|
7
|
+
- stream: true
|
|
8
|
+
|
|
9
|
+
steps:
|
|
10
|
+
- name: cmd_execute
|
|
11
|
+
engine: python
|
|
12
|
+
code: |
|
|
13
|
+
from npcpy.llm_funcs import execute_llm_command
|
|
14
|
+
|
|
15
|
+
npc = context.get('npc')
|
|
16
|
+
messages = context.get('messages', [])
|
|
17
|
+
query = context.get('query', '')
|
|
18
|
+
stream = context.get('stream', True)
|
|
19
|
+
|
|
20
|
+
model = context.get('model') or (npc.model if npc else None)
|
|
21
|
+
provider = context.get('provider') or (npc.provider if npc else None)
|
|
22
|
+
|
|
23
|
+
if not query:
|
|
24
|
+
context['output'] = ''
|
|
25
|
+
context['messages'] = messages
|
|
26
|
+
else:
|
|
27
|
+
response = execute_llm_command(
|
|
28
|
+
query,
|
|
29
|
+
model=model,
|
|
30
|
+
provider=provider,
|
|
31
|
+
npc=npc,
|
|
32
|
+
stream=stream,
|
|
33
|
+
messages=messages
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
context['output'] = response.get('response', '')
|
|
37
|
+
context['messages'] = response.get('messages', messages)
|
|
38
|
+
|
|
39
|
+
# Track usage
|
|
40
|
+
if 'usage' in response and npc and hasattr(npc, 'shared_context'):
|
|
41
|
+
usage = response['usage']
|
|
42
|
+
npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
|
|
43
|
+
npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
|
|
44
|
+
npc.shared_context['turn_count'] += 1
|
|
@@ -41,11 +41,11 @@ steps:
|
|
|
41
41
|
|
|
42
42
|
current_npc = current_state.npc
|
|
43
43
|
current_team = current_state.team
|
|
44
|
-
|
|
44
|
+
|
|
45
45
|
npc_name = getattr(current_npc, 'name', '__none__') if current_npc else '__none__'
|
|
46
46
|
team_name = getattr(current_team, 'name', '__none__') if current_team else '__none__'
|
|
47
47
|
current_path = os.getcwd()
|
|
48
|
-
db_path = os.path.expanduser(context.get("history_db_path"))
|
|
48
|
+
db_path = os.path.expanduser(context.get("history_db_path") or "~/.npcsh/npcsh_history.db")
|
|
49
49
|
|
|
50
50
|
try:
|
|
51
51
|
cmd_history = CommandHistory(db_path)
|
|
@@ -101,7 +101,7 @@ steps:
|
|
|
101
101
|
|
|
102
102
|
result = execute_rag_command(
|
|
103
103
|
command=query,
|
|
104
|
-
vector_db_path=os.path.expanduser(context.get('vector_db_path')),
|
|
104
|
+
vector_db_path=os.path.expanduser(context.get('vector_db_path') or "~/.npcsh/npcsh_chroma.db"),
|
|
105
105
|
embedding_model=emodel,
|
|
106
106
|
embedding_provider=eprovider,
|
|
107
107
|
file_contents=file_contents or None
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
jinx_name: usage
|
|
2
|
+
description: Display current session token usage and cost
|
|
3
|
+
inputs: []
|
|
4
|
+
steps:
|
|
5
|
+
- name: show_usage
|
|
6
|
+
engine: python
|
|
7
|
+
code: |
|
|
8
|
+
state = context.get('state')
|
|
9
|
+
if not state:
|
|
10
|
+
output = "No state available"
|
|
11
|
+
else:
|
|
12
|
+
inp = getattr(state, 'session_input_tokens', 0)
|
|
13
|
+
out = getattr(state, 'session_output_tokens', 0)
|
|
14
|
+
cost = getattr(state, 'session_cost_usd', 0.0)
|
|
15
|
+
turns = getattr(state, 'turn_count', 0)
|
|
16
|
+
total = inp + out
|
|
17
|
+
|
|
18
|
+
def fmt(n):
|
|
19
|
+
return f"{n/1000:.1f}k" if n >= 1000 else str(n)
|
|
20
|
+
|
|
21
|
+
def fmt_cost(c):
|
|
22
|
+
if c == 0:
|
|
23
|
+
return "free (local)"
|
|
24
|
+
elif c < 0.01:
|
|
25
|
+
return f"${c:.4f}"
|
|
26
|
+
else:
|
|
27
|
+
return f"${c:.2f}"
|
|
28
|
+
|
|
29
|
+
output = f"Session Usage\n"
|
|
30
|
+
output += f"Tokens: {fmt(inp)} in / {fmt(out)} out ({fmt(total)} total)\n"
|
|
31
|
+
output += f"Cost: {fmt_cost(cost)}\n"
|
|
32
|
+
output += f"Turns: {turns}"
|
|
33
|
+
context['output'] = output
|
npcsh/npcsh.py
CHANGED
|
@@ -32,16 +32,43 @@ except importlib.metadata.PackageNotFoundError:
|
|
|
32
32
|
VERSION = "unknown"
|
|
33
33
|
|
|
34
34
|
from npcsh._state import (
|
|
35
|
-
initial_state,
|
|
35
|
+
initial_state,
|
|
36
36
|
orange,
|
|
37
37
|
ShellState,
|
|
38
|
-
execute_command,
|
|
38
|
+
execute_command,
|
|
39
39
|
make_completer,
|
|
40
40
|
process_result,
|
|
41
41
|
readline_safe_prompt,
|
|
42
|
-
setup_shell,
|
|
42
|
+
setup_shell,
|
|
43
43
|
get_multiline_input,
|
|
44
|
-
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def display_usage(state: ShellState):
|
|
48
|
+
"""Display token usage and cost summary."""
|
|
49
|
+
inp = state.session_input_tokens
|
|
50
|
+
out = state.session_output_tokens
|
|
51
|
+
cost = state.session_cost_usd
|
|
52
|
+
turns = state.turn_count
|
|
53
|
+
total = inp + out
|
|
54
|
+
|
|
55
|
+
def fmt(n):
|
|
56
|
+
return f"{n/1000:.1f}k" if n >= 1000 else str(n)
|
|
57
|
+
|
|
58
|
+
def fmt_cost(c):
|
|
59
|
+
if c == 0:
|
|
60
|
+
return "free"
|
|
61
|
+
elif c < 0.01:
|
|
62
|
+
return f"${c:.4f}"
|
|
63
|
+
else:
|
|
64
|
+
return f"${c:.2f}"
|
|
65
|
+
|
|
66
|
+
print(colored("\n─────────────────────────────", "cyan"))
|
|
67
|
+
print(colored("📊 Session Usage", "cyan", attrs=["bold"]))
|
|
68
|
+
print(f" Tokens: {fmt(inp)} in / {fmt(out)} out ({fmt(total)} total)")
|
|
69
|
+
print(f" Cost: {fmt_cost(cost)}")
|
|
70
|
+
print(f" Turns: {turns}")
|
|
71
|
+
print(colored("─────────────────────────────\n", "cyan"))
|
|
45
72
|
|
|
46
73
|
|
|
47
74
|
def print_welcome_message():
|
|
@@ -143,10 +170,13 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState, router)
|
|
|
143
170
|
try:
|
|
144
171
|
if state.messages is not None:
|
|
145
172
|
if len(state.messages) > 20:
|
|
173
|
+
# Display usage before compacting
|
|
174
|
+
display_usage(state)
|
|
175
|
+
|
|
146
176
|
planning_state = {
|
|
147
|
-
"goal": "ongoing npcsh session",
|
|
148
|
-
"facts": [f"Working in {state.current_path}", f"Current mode: {state.current_mode}"],
|
|
149
|
-
"successes": [],
|
|
177
|
+
"goal": "ongoing npcsh session",
|
|
178
|
+
"facts": [f"Working in {state.current_path}", f"Current mode: {state.current_mode}"],
|
|
179
|
+
"successes": [],
|
|
150
180
|
"mistakes": [],
|
|
151
181
|
"todos": [],
|
|
152
182
|
"constraints": ["Follow user requests", "Use appropriate mode for tasks"]
|
|
@@ -164,22 +194,48 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState, router)
|
|
|
164
194
|
if isinstance(state.npc, NPC) and state.npc.model:
|
|
165
195
|
display_model = state.npc.model
|
|
166
196
|
|
|
197
|
+
npc_name = state.npc.name if isinstance(state.npc, NPC) else "npcsh"
|
|
198
|
+
team_name = state.team.name if state.team else ""
|
|
199
|
+
|
|
200
|
+
# Check if model is local (ollama) or remote (has cost)
|
|
201
|
+
provider = state.chat_provider
|
|
202
|
+
if isinstance(state.npc, NPC) and state.npc.provider:
|
|
203
|
+
provider = state.npc.provider
|
|
204
|
+
is_local = provider and provider.lower() in ['ollama', 'transformers', 'local']
|
|
205
|
+
|
|
206
|
+
# Build token/cost string for hint line
|
|
207
|
+
if state.session_input_tokens > 0 or state.session_output_tokens > 0:
|
|
208
|
+
usage_str = f"📊 {state.session_input_tokens:,} in / {state.session_output_tokens:,} out"
|
|
209
|
+
if not is_local and state.session_cost_usd > 0:
|
|
210
|
+
usage_str += f" | ${state.session_cost_usd:.4f}"
|
|
211
|
+
token_hint = colored(usage_str, "white", attrs=["dark"])
|
|
212
|
+
else:
|
|
213
|
+
token_hint = ""
|
|
214
|
+
|
|
167
215
|
if is_windows:
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
216
|
+
print(f"cwd: {state.current_path}")
|
|
217
|
+
status = f"{npc_name}"
|
|
218
|
+
if team_name:
|
|
219
|
+
status += f" | {team_name}"
|
|
220
|
+
status += f" | {display_model}"
|
|
221
|
+
print(status)
|
|
222
|
+
prompt = "> "
|
|
174
223
|
else:
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
224
|
+
# Line 1: cwd (full path)
|
|
225
|
+
cwd_line = colored("📁 ", "blue") + colored(state.current_path, "blue")
|
|
226
|
+
print(cwd_line)
|
|
227
|
+
|
|
228
|
+
# Line 2: npc | team | model
|
|
229
|
+
npc_colored = orange(npc_name) if isinstance(state.npc, NPC) else colored("npcsh", "cyan")
|
|
230
|
+
parts = [colored("🤖 ", "yellow") + npc_colored]
|
|
231
|
+
if team_name:
|
|
232
|
+
parts.append(colored("👥 ", "magenta") + colored(team_name, "magenta"))
|
|
233
|
+
parts.append(colored(display_model, "white", attrs=["dark"]))
|
|
234
|
+
print(" | ".join(parts))
|
|
235
|
+
|
|
236
|
+
prompt = colored("> ", "green")
|
|
181
237
|
|
|
182
|
-
user_input = get_multiline_input(prompt).strip()
|
|
238
|
+
user_input = get_multiline_input(prompt, state=state, router=router, token_hint=token_hint).strip()
|
|
183
239
|
|
|
184
240
|
if user_input == "\x1a":
|
|
185
241
|
exit_shell(state)
|