npcsh 0.1.2__py3-none-any.whl → 1.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +3508 -0
- npcsh/alicanto.py +65 -0
- npcsh/build.py +291 -0
- npcsh/completion.py +206 -0
- npcsh/config.py +163 -0
- npcsh/corca.py +50 -0
- npcsh/execution.py +185 -0
- npcsh/guac.py +46 -0
- npcsh/mcp_helpers.py +357 -0
- npcsh/mcp_server.py +299 -0
- npcsh/npc.py +323 -0
- npcsh/npc_team/alicanto.npc +2 -0
- npcsh/npc_team/alicanto.png +0 -0
- npcsh/npc_team/corca.npc +12 -0
- npcsh/npc_team/corca.png +0 -0
- npcsh/npc_team/corca_example.png +0 -0
- npcsh/npc_team/foreman.npc +7 -0
- npcsh/npc_team/frederic.npc +6 -0
- npcsh/npc_team/frederic4.png +0 -0
- npcsh/npc_team/guac.png +0 -0
- npcsh/npc_team/jinxs/code/python.jinx +11 -0
- npcsh/npc_team/jinxs/code/sh.jinx +34 -0
- npcsh/npc_team/jinxs/code/sql.jinx +16 -0
- npcsh/npc_team/jinxs/modes/alicanto.jinx +194 -0
- npcsh/npc_team/jinxs/modes/corca.jinx +249 -0
- npcsh/npc_team/jinxs/modes/guac.jinx +317 -0
- npcsh/npc_team/jinxs/modes/plonk.jinx +214 -0
- npcsh/npc_team/jinxs/modes/pti.jinx +170 -0
- npcsh/npc_team/jinxs/modes/spool.jinx +161 -0
- npcsh/npc_team/jinxs/modes/wander.jinx +186 -0
- npcsh/npc_team/jinxs/modes/yap.jinx +262 -0
- npcsh/npc_team/jinxs/npc_studio/npc-studio.jinx +77 -0
- npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
- npcsh/npc_team/jinxs/utils/chat.jinx +44 -0
- npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
- npcsh/npc_team/jinxs/utils/compress.jinx +140 -0
- npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
- npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
- npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
- npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
- npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
- npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
- npcsh/npc_team/jinxs/utils/edit_file.jinx +94 -0
- npcsh/npc_team/jinxs/utils/load_file.jinx +35 -0
- npcsh/npc_team/jinxs/utils/ots.jinx +61 -0
- npcsh/npc_team/jinxs/utils/roll.jinx +68 -0
- npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
- npcsh/npc_team/jinxs/utils/search.jinx +130 -0
- npcsh/npc_team/jinxs/utils/serve.jinx +26 -0
- npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
- npcsh/npc_team/jinxs/utils/trigger.jinx +61 -0
- npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
- npcsh/npc_team/jinxs/utils/vixynt.jinx +144 -0
- npcsh/npc_team/kadiefa.npc +3 -0
- npcsh/npc_team/kadiefa.png +0 -0
- npcsh/npc_team/npcsh.ctx +18 -0
- npcsh/npc_team/npcsh_sibiji.png +0 -0
- npcsh/npc_team/plonk.npc +2 -0
- npcsh/npc_team/plonk.png +0 -0
- npcsh/npc_team/plonkjr.npc +2 -0
- npcsh/npc_team/plonkjr.png +0 -0
- npcsh/npc_team/sibiji.npc +3 -0
- npcsh/npc_team/sibiji.png +0 -0
- npcsh/npc_team/spool.png +0 -0
- npcsh/npc_team/yap.png +0 -0
- npcsh/npcsh.py +296 -112
- npcsh/parsing.py +118 -0
- npcsh/plonk.py +54 -0
- npcsh/pti.py +54 -0
- npcsh/routes.py +139 -0
- npcsh/spool.py +48 -0
- npcsh/ui.py +199 -0
- npcsh/wander.py +62 -0
- npcsh/yap.py +50 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/agent.jinx +17 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.jinx +194 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.npc +2 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/build.jinx +65 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/chat.jinx +44 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/cmd.jinx +44 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/compile.jinx +50 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/compress.jinx +140 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.jinx +249 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.npc +12 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca_example.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/edit_file.jinx +94 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/foreman.npc +7 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/frederic.npc +6 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/frederic4.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/guac.jinx +317 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/guac.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/help.jinx +52 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/init.jinx +41 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/jinxs.jinx +32 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.npc +3 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/load_file.jinx +35 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/npc-studio.jinx +77 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/npcsh.ctx +18 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/ots.jinx +61 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.jinx +214 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.npc +2 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.npc +2 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/pti.jinx +170 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/python.jinx +11 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/roll.jinx +68 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sample.jinx +56 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/search.jinx +130 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/serve.jinx +26 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/set.jinx +40 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sh.jinx +34 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.npc +3 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sleep.jinx +116 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/spool.jinx +161 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/spool.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sql.jinx +16 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/trigger.jinx +61 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/usage.jinx +33 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/vixynt.jinx +144 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/wander.jinx +186 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/yap.jinx +262 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/yap.png +0 -0
- npcsh-1.1.13.dist-info/METADATA +522 -0
- npcsh-1.1.13.dist-info/RECORD +135 -0
- {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/WHEEL +1 -1
- npcsh-1.1.13.dist-info/entry_points.txt +9 -0
- {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info/licenses}/LICENSE +1 -1
- npcsh/command_history.py +0 -81
- npcsh/helpers.py +0 -36
- npcsh/llm_funcs.py +0 -295
- npcsh/main.py +0 -5
- npcsh/modes.py +0 -343
- npcsh/npc_compiler.py +0 -124
- npcsh-0.1.2.dist-info/METADATA +0 -99
- npcsh-0.1.2.dist-info/RECORD +0 -14
- npcsh-0.1.2.dist-info/entry_points.txt +0 -2
- {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
jinx_name: wander
|
|
2
|
+
description: Experimental wandering mode - creative exploration with varied temperatures and random events
|
|
3
|
+
inputs:
|
|
4
|
+
- problem: null
|
|
5
|
+
- environment: null
|
|
6
|
+
- low_temp: 0.5
|
|
7
|
+
- high_temp: 1.9
|
|
8
|
+
- interruption_likelihood: 1.0
|
|
9
|
+
- sample_rate: 0.4
|
|
10
|
+
- n_streams: 5
|
|
11
|
+
- include_events: false
|
|
12
|
+
- num_events: 3
|
|
13
|
+
- model: null
|
|
14
|
+
- provider: null
|
|
15
|
+
|
|
16
|
+
steps:
|
|
17
|
+
- name: wander_explore
|
|
18
|
+
engine: python
|
|
19
|
+
code: |
|
|
20
|
+
import os
|
|
21
|
+
import random
|
|
22
|
+
from termcolor import colored
|
|
23
|
+
|
|
24
|
+
from npcpy.llm_funcs import get_llm_response
|
|
25
|
+
|
|
26
|
+
npc = context.get('npc')
|
|
27
|
+
messages = context.get('messages', [])
|
|
28
|
+
|
|
29
|
+
problem = context.get('problem')
|
|
30
|
+
environment = context.get('environment')
|
|
31
|
+
low_temp = float(context.get('low_temp', 0.5))
|
|
32
|
+
high_temp = float(context.get('high_temp', 1.9))
|
|
33
|
+
interruption_likelihood = float(context.get('interruption_likelihood', 1.0))
|
|
34
|
+
sample_rate = float(context.get('sample_rate', 0.4))
|
|
35
|
+
n_streams = int(context.get('n_streams', 5))
|
|
36
|
+
include_events = context.get('include_events', False)
|
|
37
|
+
num_events = int(context.get('num_events', 3))
|
|
38
|
+
|
|
39
|
+
model = context.get('model') or (npc.model if npc else 'gemini-1.5-pro')
|
|
40
|
+
provider = context.get('provider') or (npc.provider if npc else 'gemini')
|
|
41
|
+
|
|
42
|
+
if not problem:
|
|
43
|
+
context['output'] = """Usage: /wander <problem to explore>
|
|
44
|
+
|
|
45
|
+
Options:
|
|
46
|
+
--environment DESC Metaphorical environment for wandering
|
|
47
|
+
--low-temp F Low temperature (default: 0.5)
|
|
48
|
+
--high-temp F High temperature (default: 1.9)
|
|
49
|
+
--n-streams N Number of exploration streams (default: 5)
|
|
50
|
+
--include-events Add random events during wandering
|
|
51
|
+
|
|
52
|
+
Example: /wander How might we reimagine urban transportation?"""
|
|
53
|
+
context['messages'] = messages
|
|
54
|
+
exit()
|
|
55
|
+
|
|
56
|
+
print(f"""
|
|
57
|
+
██╗ ██╗ █████╗ ███╗ ██╗██████╗ ███████╗██████╗
|
|
58
|
+
██║ ██║██╔══██╗████╗ ██║██╔══██╗██╔════╝██╔══██╗
|
|
59
|
+
██║ █╗ ██║███████║██╔██╗ ██║██║ ██║█████╗ ██████╔╝
|
|
60
|
+
██║███╗██║██╔══██║██║╚██╗██║██║ ██║██╔══╝ ██╔══██╗
|
|
61
|
+
╚███╔███╔╝██║ ██║██║ ╚████║██████╔╝███████╗██║ ██║
|
|
62
|
+
╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚══════╝╚═╝ ╚═╝
|
|
63
|
+
|
|
64
|
+
Experimental Wandering Mode
|
|
65
|
+
Problem: {problem}
|
|
66
|
+
Temperature range: {low_temp} - {high_temp}
|
|
67
|
+
Streams: {n_streams}
|
|
68
|
+
""")
|
|
69
|
+
|
|
70
|
+
# Generate environment if not provided
|
|
71
|
+
if not environment:
|
|
72
|
+
env_prompt = f"""Create a rich, metaphorical environment for wandering through while thinking about:
|
|
73
|
+
"{problem}"
|
|
74
|
+
|
|
75
|
+
The environment should:
|
|
76
|
+
1. Have distinct regions or areas
|
|
77
|
+
2. Include various elements and features
|
|
78
|
+
3. Be metaphorically related to the problem
|
|
79
|
+
4. Be described in 3-5 sentences
|
|
80
|
+
|
|
81
|
+
Provide only the description, no framing."""
|
|
82
|
+
|
|
83
|
+
print(colored("Generating wandering environment...", "cyan"))
|
|
84
|
+
resp = get_llm_response(env_prompt, model=model, provider=provider, temperature=0.7, npc=npc)
|
|
85
|
+
environment = str(resp.get('response', 'A vast conceptual landscape stretches before you.'))
|
|
86
|
+
print(f"\n{environment}\n")
|
|
87
|
+
|
|
88
|
+
# Event types for random encounters
|
|
89
|
+
event_types = ["encounter", "discovery", "obstacle", "insight", "shift", "memory"]
|
|
90
|
+
|
|
91
|
+
all_insights = []
|
|
92
|
+
wandering_log = []
|
|
93
|
+
|
|
94
|
+
for stream_idx in range(n_streams):
|
|
95
|
+
# Alternate between low and high temperature
|
|
96
|
+
if stream_idx % 2 == 0:
|
|
97
|
+
temp = low_temp
|
|
98
|
+
mode = "focused"
|
|
99
|
+
else:
|
|
100
|
+
temp = high_temp
|
|
101
|
+
mode = "creative"
|
|
102
|
+
|
|
103
|
+
print(colored(f"\n--- Stream {stream_idx + 1}/{n_streams} ({mode}, temp={temp}) ---", "cyan"))
|
|
104
|
+
|
|
105
|
+
# Generate random event if enabled
|
|
106
|
+
event_context = ""
|
|
107
|
+
if include_events and random.random() < sample_rate:
|
|
108
|
+
event_type = random.choice(event_types)
|
|
109
|
+
event_prompt = f"""In the environment: {environment}
|
|
110
|
+
|
|
111
|
+
While exploring the problem "{problem}", generate a {event_type} event.
|
|
112
|
+
The event should be metaphorical and relate to the problem.
|
|
113
|
+
Describe it in 2-3 sentences."""
|
|
114
|
+
|
|
115
|
+
event_resp = get_llm_response(event_prompt, model=model, provider=provider, temperature=0.9, npc=npc)
|
|
116
|
+
event = str(event_resp.get('response', ''))
|
|
117
|
+
event_context = f"\n\nEvent ({event_type}): {event}"
|
|
118
|
+
print(colored(f"[{event_type.upper()}] {event[:100]}...", "yellow"))
|
|
119
|
+
|
|
120
|
+
# Main wandering exploration
|
|
121
|
+
wander_prompt = f"""You are wandering through: {environment}
|
|
122
|
+
|
|
123
|
+
Problem being explored: "{problem}"
|
|
124
|
+
{event_context}
|
|
125
|
+
|
|
126
|
+
Previous insights: {all_insights[-3:] if all_insights else 'Starting fresh'}
|
|
127
|
+
|
|
128
|
+
In this {mode} exploration (temperature {temp}):
|
|
129
|
+
1. Let your mind wander through the conceptual space
|
|
130
|
+
2. Make unexpected connections
|
|
131
|
+
3. Notice what emerges from the wandering
|
|
132
|
+
4. Share any insights, questions, or realizations
|
|
133
|
+
|
|
134
|
+
Think freely and explore."""
|
|
135
|
+
|
|
136
|
+
resp = get_llm_response(wander_prompt, model=model, provider=provider, temperature=temp, npc=npc)
|
|
137
|
+
stream_output = str(resp.get('response', ''))
|
|
138
|
+
print(stream_output)
|
|
139
|
+
|
|
140
|
+
all_insights.append(stream_output)
|
|
141
|
+
wandering_log.append({
|
|
142
|
+
"stream": stream_idx + 1,
|
|
143
|
+
"mode": mode,
|
|
144
|
+
"temperature": temp,
|
|
145
|
+
"event": event_context if include_events else None,
|
|
146
|
+
"insight": stream_output
|
|
147
|
+
})
|
|
148
|
+
|
|
149
|
+
# Random interruption
|
|
150
|
+
if random.random() < interruption_likelihood * 0.2:
|
|
151
|
+
print(colored("\n[Pause for reflection...]", "magenta"))
|
|
152
|
+
reflect_prompt = f"Briefly reflect on what's emerged so far about: {problem}"
|
|
153
|
+
reflect_resp = get_llm_response(reflect_prompt, model=model, provider=provider, temperature=0.4, npc=npc)
|
|
154
|
+
print(colored(str(reflect_resp.get('response', ''))[:200], "magenta"))
|
|
155
|
+
|
|
156
|
+
# Synthesis
|
|
157
|
+
print(colored("\n--- Synthesizing Wanderings ---", "cyan"))
|
|
158
|
+
|
|
159
|
+
synthesis_prompt = f"""After wandering through "{environment}" exploring "{problem}":
|
|
160
|
+
|
|
161
|
+
All insights gathered:
|
|
162
|
+
{chr(10).join(all_insights)}
|
|
163
|
+
|
|
164
|
+
Synthesize what emerged from this wandering:
|
|
165
|
+
1. Key themes that appeared
|
|
166
|
+
2. Unexpected connections made
|
|
167
|
+
3. New questions raised
|
|
168
|
+
4. Potential directions to explore further"""
|
|
169
|
+
|
|
170
|
+
resp = get_llm_response(synthesis_prompt, model=model, provider=provider, temperature=0.5, npc=npc)
|
|
171
|
+
synthesis = str(resp.get('response', ''))
|
|
172
|
+
|
|
173
|
+
print("\n" + "="*50)
|
|
174
|
+
print(colored("WANDERING SYNTHESIS", "green", attrs=['bold']))
|
|
175
|
+
print("="*50)
|
|
176
|
+
print(synthesis)
|
|
177
|
+
|
|
178
|
+
context['output'] = synthesis
|
|
179
|
+
context['messages'] = messages
|
|
180
|
+
context['wander_result'] = {
|
|
181
|
+
'problem': problem,
|
|
182
|
+
'environment': environment,
|
|
183
|
+
'log': wandering_log,
|
|
184
|
+
'insights': all_insights,
|
|
185
|
+
'synthesis': synthesis
|
|
186
|
+
}
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
jinx_name: yap
|
|
2
|
+
description: Voice chat mode - speech-to-text input, text-to-speech output
|
|
3
|
+
inputs:
|
|
4
|
+
- model: null
|
|
5
|
+
- provider: null
|
|
6
|
+
- tts_model: kokoro
|
|
7
|
+
- voice: af_heart
|
|
8
|
+
- files: null
|
|
9
|
+
|
|
10
|
+
steps:
|
|
11
|
+
- name: yap_repl
|
|
12
|
+
engine: python
|
|
13
|
+
code: |
|
|
14
|
+
import os
|
|
15
|
+
import sys
|
|
16
|
+
import time
|
|
17
|
+
import tempfile
|
|
18
|
+
import threading
|
|
19
|
+
import queue
|
|
20
|
+
from termcolor import colored
|
|
21
|
+
|
|
22
|
+
# Audio imports with graceful fallback
|
|
23
|
+
try:
|
|
24
|
+
import torch
|
|
25
|
+
import pyaudio
|
|
26
|
+
import wave
|
|
27
|
+
import numpy as np
|
|
28
|
+
from faster_whisper import WhisperModel
|
|
29
|
+
from gtts import gTTS
|
|
30
|
+
from npcpy.data.audio import (
|
|
31
|
+
FORMAT, CHANNELS, RATE, CHUNK,
|
|
32
|
+
transcribe_recording, convert_mp3_to_wav, cleanup_temp_files
|
|
33
|
+
)
|
|
34
|
+
AUDIO_AVAILABLE = True
|
|
35
|
+
except ImportError as e:
|
|
36
|
+
AUDIO_AVAILABLE = False
|
|
37
|
+
print(colored(f"Audio dependencies not available: {e}", "yellow"))
|
|
38
|
+
print("Install with: pip install npcsh[audio]")
|
|
39
|
+
|
|
40
|
+
from npcpy.llm_funcs import get_llm_response
|
|
41
|
+
from npcpy.npc_sysenv import get_system_message, render_markdown
|
|
42
|
+
from npcpy.data.load import load_file_contents
|
|
43
|
+
from npcpy.data.text import rag_search
|
|
44
|
+
|
|
45
|
+
npc = context.get('npc')
|
|
46
|
+
team = context.get('team')
|
|
47
|
+
messages = context.get('messages', [])
|
|
48
|
+
files = context.get('files')
|
|
49
|
+
tts_model = context.get('tts_model', 'kokoro')
|
|
50
|
+
voice = context.get('voice', 'af_heart')
|
|
51
|
+
|
|
52
|
+
model = context.get('model') or (npc.model if npc else None)
|
|
53
|
+
provider = context.get('provider') or (npc.provider if npc else None)
|
|
54
|
+
|
|
55
|
+
print("""
|
|
56
|
+
██╗ ██╗ █████╗ ██████╗
|
|
57
|
+
╚██╗ ██╔╝██╔══██╗██╔══██╗
|
|
58
|
+
╚████╔╝ ███████║██████╔╝
|
|
59
|
+
╚██╔╝ ██╔══██║██╔═══╝
|
|
60
|
+
██║ ██║ ██║██║
|
|
61
|
+
╚═╝ ╚═╝ ╚═╝╚═╝
|
|
62
|
+
|
|
63
|
+
Voice Chat Mode
|
|
64
|
+
""")
|
|
65
|
+
|
|
66
|
+
npc_name = npc.name if npc else "yap"
|
|
67
|
+
print(f"Entering yap mode (NPC: {npc_name}). Type '/yq' to exit.")
|
|
68
|
+
|
|
69
|
+
if not AUDIO_AVAILABLE:
|
|
70
|
+
print(colored("Audio not available. Falling back to text mode.", "yellow"))
|
|
71
|
+
|
|
72
|
+
# Load files for RAG context
|
|
73
|
+
loaded_chunks = {}
|
|
74
|
+
if files:
|
|
75
|
+
if isinstance(files, str):
|
|
76
|
+
files = [f.strip() for f in files.split(',')]
|
|
77
|
+
for file_path in files:
|
|
78
|
+
file_path = os.path.expanduser(file_path)
|
|
79
|
+
if os.path.exists(file_path):
|
|
80
|
+
try:
|
|
81
|
+
chunks = load_file_contents(file_path)
|
|
82
|
+
loaded_chunks[file_path] = chunks
|
|
83
|
+
print(colored(f"Loaded: {file_path}", "green"))
|
|
84
|
+
except Exception as e:
|
|
85
|
+
print(colored(f"Error loading {file_path}: {e}", "red"))
|
|
86
|
+
|
|
87
|
+
# System message for concise voice responses
|
|
88
|
+
sys_msg = get_system_message(npc) if npc else "You are a helpful assistant."
|
|
89
|
+
sys_msg += "\n\nProvide brief responses of 1-2 sentences unless asked for more detail. Keep responses clear and conversational for voice."
|
|
90
|
+
|
|
91
|
+
if not messages or messages[0].get("role") != "system":
|
|
92
|
+
messages.insert(0, {"role": "system", "content": sys_msg})
|
|
93
|
+
|
|
94
|
+
# Audio state
|
|
95
|
+
vad_model = None
|
|
96
|
+
whisper_model = None
|
|
97
|
+
|
|
98
|
+
if AUDIO_AVAILABLE:
|
|
99
|
+
try:
|
|
100
|
+
# Load VAD model for voice activity detection
|
|
101
|
+
vad_model, _ = torch.hub.load(
|
|
102
|
+
repo_or_dir="snakers4/silero-vad",
|
|
103
|
+
model="silero_vad",
|
|
104
|
+
force_reload=False,
|
|
105
|
+
onnx=False,
|
|
106
|
+
verbose=False
|
|
107
|
+
)
|
|
108
|
+
vad_model.to('cpu')
|
|
109
|
+
print(colored("VAD model loaded.", "green"))
|
|
110
|
+
|
|
111
|
+
# Load Whisper for STT
|
|
112
|
+
whisper_model = WhisperModel("base", device="cpu", compute_type="int8")
|
|
113
|
+
print(colored("Whisper model loaded.", "green"))
|
|
114
|
+
except Exception as e:
|
|
115
|
+
print(colored(f"Error loading audio models: {e}", "red"))
|
|
116
|
+
AUDIO_AVAILABLE = False
|
|
117
|
+
|
|
118
|
+
def speak_text(text, tts_model='kokoro', voice='af_heart'):
|
|
119
|
+
"""Convert text to speech and play it"""
|
|
120
|
+
if not AUDIO_AVAILABLE:
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
try:
|
|
124
|
+
# Use gTTS as fallback
|
|
125
|
+
tts = gTTS(text=text, lang='en')
|
|
126
|
+
with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as f:
|
|
127
|
+
tts.save(f.name)
|
|
128
|
+
wav_path = convert_mp3_to_wav(f.name)
|
|
129
|
+
|
|
130
|
+
# Play audio
|
|
131
|
+
import subprocess
|
|
132
|
+
if sys.platform == 'darwin':
|
|
133
|
+
subprocess.run(['afplay', wav_path], check=True)
|
|
134
|
+
elif sys.platform == 'linux':
|
|
135
|
+
subprocess.run(['aplay', wav_path], check=True)
|
|
136
|
+
else:
|
|
137
|
+
# Windows
|
|
138
|
+
import winsound
|
|
139
|
+
winsound.PlaySound(wav_path, winsound.SND_FILENAME)
|
|
140
|
+
|
|
141
|
+
cleanup_temp_files([f.name, wav_path])
|
|
142
|
+
except Exception as e:
|
|
143
|
+
print(colored(f"TTS error: {e}", "red"))
|
|
144
|
+
|
|
145
|
+
def record_audio(duration=5):
|
|
146
|
+
"""Record audio from microphone"""
|
|
147
|
+
if not AUDIO_AVAILABLE:
|
|
148
|
+
return None
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
p = pyaudio.PyAudio()
|
|
152
|
+
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)
|
|
153
|
+
|
|
154
|
+
print(colored("Recording...", "cyan"), end='', flush=True)
|
|
155
|
+
frames = []
|
|
156
|
+
for _ in range(0, int(RATE / CHUNK * duration)):
|
|
157
|
+
data = stream.read(CHUNK)
|
|
158
|
+
frames.append(data)
|
|
159
|
+
print(colored(" Done.", "cyan"))
|
|
160
|
+
|
|
161
|
+
stream.stop_stream()
|
|
162
|
+
stream.close()
|
|
163
|
+
p.terminate()
|
|
164
|
+
|
|
165
|
+
# Save to temp file
|
|
166
|
+
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as f:
|
|
167
|
+
wf = wave.open(f.name, 'wb')
|
|
168
|
+
wf.setnchannels(CHANNELS)
|
|
169
|
+
wf.setsampwidth(p.get_sample_size(FORMAT))
|
|
170
|
+
wf.setframerate(RATE)
|
|
171
|
+
wf.writeframes(b''.join(frames))
|
|
172
|
+
wf.close()
|
|
173
|
+
return f.name
|
|
174
|
+
except Exception as e:
|
|
175
|
+
print(colored(f"Recording error: {e}", "red"))
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
def transcribe_audio(audio_path):
|
|
179
|
+
"""Transcribe audio to text using Whisper"""
|
|
180
|
+
if not whisper_model or not audio_path:
|
|
181
|
+
return ""
|
|
182
|
+
|
|
183
|
+
try:
|
|
184
|
+
segments, _ = whisper_model.transcribe(audio_path, beam_size=5)
|
|
185
|
+
text = " ".join([seg.text for seg in segments])
|
|
186
|
+
cleanup_temp_files([audio_path])
|
|
187
|
+
return text.strip()
|
|
188
|
+
except Exception as e:
|
|
189
|
+
print(colored(f"Transcription error: {e}", "red"))
|
|
190
|
+
return ""
|
|
191
|
+
|
|
192
|
+
# REPL loop
|
|
193
|
+
while True:
|
|
194
|
+
try:
|
|
195
|
+
# Voice input or text input
|
|
196
|
+
if AUDIO_AVAILABLE:
|
|
197
|
+
prompt_str = f"{npc_name}:yap> [Press Enter to speak, or type] "
|
|
198
|
+
else:
|
|
199
|
+
prompt_str = f"{npc_name}:yap> "
|
|
200
|
+
|
|
201
|
+
user_input = input(prompt_str).strip()
|
|
202
|
+
|
|
203
|
+
if user_input.lower() == "/yq":
|
|
204
|
+
print("Exiting yap mode.")
|
|
205
|
+
break
|
|
206
|
+
|
|
207
|
+
# Empty input = record audio
|
|
208
|
+
if not user_input and AUDIO_AVAILABLE:
|
|
209
|
+
audio_path = record_audio(5)
|
|
210
|
+
if audio_path:
|
|
211
|
+
user_input = transcribe_audio(audio_path)
|
|
212
|
+
if user_input:
|
|
213
|
+
print(colored(f"You said: {user_input}", "cyan"))
|
|
214
|
+
else:
|
|
215
|
+
print(colored("Could not transcribe audio.", "yellow"))
|
|
216
|
+
continue
|
|
217
|
+
else:
|
|
218
|
+
continue
|
|
219
|
+
|
|
220
|
+
if not user_input:
|
|
221
|
+
continue
|
|
222
|
+
|
|
223
|
+
# Add RAG context if files loaded
|
|
224
|
+
current_prompt = user_input
|
|
225
|
+
if loaded_chunks:
|
|
226
|
+
context_content = ""
|
|
227
|
+
for filename, chunks in loaded_chunks.items():
|
|
228
|
+
full_text = "\n".join(chunks)
|
|
229
|
+
retrieved = rag_search(user_input, full_text, similarity_threshold=0.3)
|
|
230
|
+
if retrieved:
|
|
231
|
+
context_content += f"\n{retrieved}\n"
|
|
232
|
+
if context_content:
|
|
233
|
+
current_prompt += f"\n\nContext:{context_content}"
|
|
234
|
+
|
|
235
|
+
# Get response
|
|
236
|
+
resp = get_llm_response(
|
|
237
|
+
current_prompt,
|
|
238
|
+
model=model,
|
|
239
|
+
provider=provider,
|
|
240
|
+
messages=messages,
|
|
241
|
+
stream=False, # Don't stream for voice
|
|
242
|
+
npc=npc
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
messages = resp.get('messages', messages)
|
|
246
|
+
response_text = str(resp.get('response', ''))
|
|
247
|
+
|
|
248
|
+
# Display and speak response
|
|
249
|
+
print(colored(f"{npc_name}: ", "green") + response_text)
|
|
250
|
+
|
|
251
|
+
if AUDIO_AVAILABLE:
|
|
252
|
+
speak_text(response_text, tts_model, voice)
|
|
253
|
+
|
|
254
|
+
except KeyboardInterrupt:
|
|
255
|
+
print("\nUse '/yq' to exit or continue.")
|
|
256
|
+
continue
|
|
257
|
+
except EOFError:
|
|
258
|
+
print("\nExiting yap mode.")
|
|
259
|
+
break
|
|
260
|
+
|
|
261
|
+
context['output'] = "Exited yap mode."
|
|
262
|
+
context['messages'] = messages
|
|
Binary file
|