npcsh 1.0.26__py3-none-any.whl → 1.0.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +105 -105
- npcsh/alicanto.py +88 -88
- npcsh/corca.py +423 -81
- npcsh/guac.py +110 -107
- npcsh/mcp_helpers.py +45 -45
- npcsh/mcp_server.py +16 -17
- npcsh/npc.py +16 -17
- npcsh/npc_team/jinxs/bash_executer.jinx +1 -1
- npcsh/npc_team/jinxs/edit_file.jinx +6 -6
- npcsh/npc_team/jinxs/image_generation.jinx +5 -5
- npcsh/npc_team/jinxs/screen_cap.jinx +2 -2
- npcsh/npcsh.py +5 -2
- npcsh/plonk.py +8 -8
- npcsh/routes.py +77 -77
- npcsh/spool.py +13 -13
- npcsh/wander.py +37 -37
- npcsh/yap.py +72 -72
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/bash_executer.jinx +1 -1
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/edit_file.jinx +6 -6
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/image_generation.jinx +5 -5
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/screen_cap.jinx +2 -2
- {npcsh-1.0.26.dist-info → npcsh-1.0.27.dist-info}/METADATA +1 -1
- npcsh-1.0.27.dist-info/RECORD +73 -0
- npcsh-1.0.26.dist-info/RECORD +0 -73
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/internet_search.jinx +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/python_executor.jinx +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.0.26.data → npcsh-1.0.27.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.0.26.dist-info → npcsh-1.0.27.dist-info}/WHEEL +0 -0
- {npcsh-1.0.26.dist-info → npcsh-1.0.27.dist-info}/entry_points.txt +0 -0
- {npcsh-1.0.26.dist-info → npcsh-1.0.27.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.0.26.dist-info → npcsh-1.0.27.dist-info}/top_level.txt +0 -0
npcsh/wander.py
CHANGED
|
@@ -37,7 +37,7 @@ def generate_random_events(
|
|
|
37
37
|
- impact: How this might impact the problem-solving process
|
|
38
38
|
- location: Where in the environment this occurs
|
|
39
39
|
"""
|
|
40
|
-
|
|
40
|
+
|
|
41
41
|
if not environment:
|
|
42
42
|
env_prompt = f"""
|
|
43
43
|
I need to create an imaginative environment for an AI to wander through while thinking about this problem:
|
|
@@ -65,29 +65,29 @@ def generate_random_events(
|
|
|
65
65
|
|
|
66
66
|
environment = env_response.get('response', '')
|
|
67
67
|
if isinstance(environment, (list, dict)) or hasattr(environment, '__iter__') and not isinstance(environment, (str, bytes)):
|
|
68
|
-
|
|
68
|
+
|
|
69
69
|
environment = ''.join([str(chunk) for chunk in environment])
|
|
70
70
|
|
|
71
71
|
print(f"\nGenerated wandering environment:\n{environment}\n")
|
|
72
72
|
|
|
73
|
-
|
|
73
|
+
|
|
74
74
|
event_types = [
|
|
75
|
-
{"type": "encounter", "weight": 0.25},
|
|
76
|
-
{"type": "discovery", "weight": 0.2},
|
|
77
|
-
{"type": "obstacle", "weight": 0.15},
|
|
78
|
-
{"type": "insight", "weight": 0.2},
|
|
79
|
-
{"type": "shift", "weight": 0.1},
|
|
80
|
-
{"type": "memory", "weight": 0.1}
|
|
75
|
+
{"type": "encounter", "weight": 0.25},
|
|
76
|
+
{"type": "discovery", "weight": 0.2},
|
|
77
|
+
{"type": "obstacle", "weight": 0.15},
|
|
78
|
+
{"type": "insight", "weight": 0.2},
|
|
79
|
+
{"type": "shift", "weight": 0.1},
|
|
80
|
+
{"type": "memory", "weight": 0.1}
|
|
81
81
|
]
|
|
82
82
|
|
|
83
|
-
|
|
83
|
+
|
|
84
84
|
cumulative_weights = []
|
|
85
85
|
current_sum = 0
|
|
86
86
|
for event in event_types:
|
|
87
87
|
current_sum += event["weight"]
|
|
88
88
|
cumulative_weights.append(current_sum)
|
|
89
89
|
|
|
90
|
-
|
|
90
|
+
|
|
91
91
|
selected_event_types = []
|
|
92
92
|
for _ in range(num_events):
|
|
93
93
|
r = random.random() * current_sum
|
|
@@ -96,7 +96,7 @@ def generate_random_events(
|
|
|
96
96
|
selected_event_types.append(event_types[i]["type"])
|
|
97
97
|
break
|
|
98
98
|
|
|
99
|
-
|
|
99
|
+
|
|
100
100
|
events_prompt = f"""
|
|
101
101
|
I'm wandering through this environment while thinking about a problem:
|
|
102
102
|
|
|
@@ -126,18 +126,18 @@ def generate_random_events(
|
|
|
126
126
|
|
|
127
127
|
events_text = events_response.get('response', '')
|
|
128
128
|
if isinstance(events_text, (list, dict)) or hasattr(events_text, '__iter__') and not isinstance(events_text, (str, bytes)):
|
|
129
|
-
|
|
129
|
+
|
|
130
130
|
events_text = ''.join([str(chunk) for chunk in events_text])
|
|
131
131
|
|
|
132
|
-
|
|
132
|
+
|
|
133
133
|
try:
|
|
134
134
|
import json
|
|
135
135
|
events = json.loads(events_text)
|
|
136
136
|
if not isinstance(events, list):
|
|
137
|
-
|
|
137
|
+
|
|
138
138
|
events = [{"type": "fallback", "description": events_text, "location": "unknown", "impact": "unknown"}]
|
|
139
139
|
except:
|
|
140
|
-
|
|
140
|
+
|
|
141
141
|
events = []
|
|
142
142
|
event_chunks = events_text.split("\n\n")
|
|
143
143
|
for i, chunk in enumerate(event_chunks[:num_events]):
|
|
@@ -149,7 +149,7 @@ def generate_random_events(
|
|
|
149
149
|
"impact": "See description"
|
|
150
150
|
})
|
|
151
151
|
|
|
152
|
-
|
|
152
|
+
|
|
153
153
|
while len(events) < num_events:
|
|
154
154
|
i = len(events)
|
|
155
155
|
event_type = selected_event_types[i] if i < len(selected_event_types) else "unknown"
|
|
@@ -197,7 +197,7 @@ def perform_single_wandering(problem,
|
|
|
197
197
|
Returns:
|
|
198
198
|
tuple: (high_temp_streams, high_temp_samples, assistant_insight, events, environment)
|
|
199
199
|
"""
|
|
200
|
-
|
|
200
|
+
|
|
201
201
|
events = []
|
|
202
202
|
if include_events:
|
|
203
203
|
events = generate_random_events(
|
|
@@ -209,9 +209,9 @@ def perform_single_wandering(problem,
|
|
|
209
209
|
num_events=num_events,
|
|
210
210
|
**api_kwargs
|
|
211
211
|
)
|
|
212
|
-
|
|
212
|
+
|
|
213
213
|
if not environment and events:
|
|
214
|
-
|
|
214
|
+
|
|
215
215
|
environment = get_llm_response(
|
|
216
216
|
prompt=f"Summarize the environment described in these events: {events}",
|
|
217
217
|
model=model,
|
|
@@ -221,7 +221,7 @@ def perform_single_wandering(problem,
|
|
|
221
221
|
**api_kwargs
|
|
222
222
|
).get('response', '')
|
|
223
223
|
|
|
224
|
-
|
|
224
|
+
|
|
225
225
|
event_context = ""
|
|
226
226
|
if events:
|
|
227
227
|
event_descriptions = [f"• {event['type'].capitalize()} at {event['location']}: {event['description']}"
|
|
@@ -265,17 +265,17 @@ def perform_single_wandering(problem,
|
|
|
265
265
|
high_temp_streams = []
|
|
266
266
|
high_temp_samples = []
|
|
267
267
|
|
|
268
|
-
|
|
268
|
+
|
|
269
269
|
events_to_use = events.copy() if events else []
|
|
270
270
|
|
|
271
271
|
for n in range(n_high_temp_streams):
|
|
272
272
|
print(f'\nStream #{n+1}')
|
|
273
273
|
|
|
274
|
-
|
|
274
|
+
|
|
275
275
|
if events_to_use and random.random() < 0.1:
|
|
276
276
|
event = events_to_use.pop(0)
|
|
277
277
|
print(f"\n[EVENT: {event['type']} at {event['location']}]\n{event['description']}\n")
|
|
278
|
-
|
|
278
|
+
|
|
279
279
|
event_prompt = f"\nSuddenly, {event['description']} This happens at {event['location']}."
|
|
280
280
|
else:
|
|
281
281
|
event_prompt = ""
|
|
@@ -336,7 +336,7 @@ def perform_single_wandering(problem,
|
|
|
336
336
|
|
|
337
337
|
print('\n\n--- Wandering complete ---\n')
|
|
338
338
|
|
|
339
|
-
|
|
339
|
+
|
|
340
340
|
event_insights = ""
|
|
341
341
|
if events:
|
|
342
342
|
event_insights = "\n\nDuring your wandering, you encountered these events:\n" + "\n".join(
|
|
@@ -434,7 +434,7 @@ def enter_wander_mode(problem,
|
|
|
434
434
|
while True:
|
|
435
435
|
print(f"\nCurrent exploration: {current_problem}\n")
|
|
436
436
|
|
|
437
|
-
|
|
437
|
+
|
|
438
438
|
high_temp_streams, high_temp_samples, insight, events, env = perform_single_wandering(
|
|
439
439
|
current_problem,
|
|
440
440
|
npc=npc,
|
|
@@ -453,11 +453,11 @@ def enter_wander_mode(problem,
|
|
|
453
453
|
**api_kwargs
|
|
454
454
|
)
|
|
455
455
|
|
|
456
|
-
|
|
456
|
+
|
|
457
457
|
if not current_environment and env:
|
|
458
458
|
current_environment = env
|
|
459
459
|
|
|
460
|
-
|
|
460
|
+
|
|
461
461
|
wandering_history.append({
|
|
462
462
|
"problem": current_problem,
|
|
463
463
|
"environment": current_environment,
|
|
@@ -468,7 +468,7 @@ def enter_wander_mode(problem,
|
|
|
468
468
|
})
|
|
469
469
|
if interactive:
|
|
470
470
|
|
|
471
|
-
|
|
471
|
+
|
|
472
472
|
print("\n\n--- Wandering session complete ---")
|
|
473
473
|
print("Options:")
|
|
474
474
|
print("1. Continue wandering with the same problem and environment")
|
|
@@ -480,22 +480,22 @@ def enter_wander_mode(problem,
|
|
|
480
480
|
choice = input("\nEnter your choice (1-5): ").strip()
|
|
481
481
|
|
|
482
482
|
if choice == "1":
|
|
483
|
-
|
|
483
|
+
|
|
484
484
|
pass
|
|
485
485
|
elif choice == "2":
|
|
486
|
-
|
|
486
|
+
|
|
487
487
|
print("\nBased on the insights gained, what new problem would you like to explore?")
|
|
488
488
|
new_problem = input("New problem: ").strip()
|
|
489
489
|
if new_problem:
|
|
490
490
|
current_problem = new_problem
|
|
491
491
|
elif choice == "3":
|
|
492
|
-
|
|
492
|
+
|
|
493
493
|
print("\nDescribe a new environment for your wandering:")
|
|
494
494
|
new_env = input("New environment: ").strip()
|
|
495
495
|
if new_env:
|
|
496
496
|
current_environment = new_env
|
|
497
497
|
elif choice == "4":
|
|
498
|
-
|
|
498
|
+
|
|
499
499
|
print("\nBased on the insights gained, what new problem would you like to explore?")
|
|
500
500
|
new_problem = input("New problem: ").strip()
|
|
501
501
|
print("\nDescribe a new environment for your wandering:")
|
|
@@ -505,17 +505,17 @@ def enter_wander_mode(problem,
|
|
|
505
505
|
if new_env:
|
|
506
506
|
current_environment = new_env
|
|
507
507
|
else:
|
|
508
|
-
|
|
508
|
+
|
|
509
509
|
print("\n=== Exiting Wander Mode ===\n")
|
|
510
510
|
break
|
|
511
511
|
else:
|
|
512
512
|
break
|
|
513
513
|
|
|
514
|
-
|
|
514
|
+
|
|
515
515
|
return wandering_history
|
|
516
516
|
|
|
517
517
|
def main():
|
|
518
|
-
|
|
518
|
+
|
|
519
519
|
import argparse
|
|
520
520
|
parser = argparse.ArgumentParser(description="Enter wander mode for chatting with an LLM")
|
|
521
521
|
parser.add_argument("problem", type=str, help="Problem to solve")
|
|
@@ -534,7 +534,7 @@ def main():
|
|
|
534
534
|
print('npc: ', args.npc)
|
|
535
535
|
print(args.stream)
|
|
536
536
|
|
|
537
|
-
|
|
537
|
+
|
|
538
538
|
enter_wander_mode(
|
|
539
539
|
args.problem,
|
|
540
540
|
npc=npc,
|
npcsh/yap.py
CHANGED
|
@@ -99,7 +99,7 @@ def enter_yap_mode(
|
|
|
99
99
|
|
|
100
100
|
system_message = get_system_message(npc) if npc else "You are a helpful assistant."
|
|
101
101
|
|
|
102
|
-
|
|
102
|
+
|
|
103
103
|
system_message = system_message + " " + concise_instruction
|
|
104
104
|
|
|
105
105
|
if messages is None or len(messages) == 0:
|
|
@@ -117,14 +117,14 @@ def enter_yap_mode(
|
|
|
117
117
|
|
|
118
118
|
|
|
119
119
|
|
|
120
|
-
|
|
120
|
+
|
|
121
121
|
pyaudio_instance = pyaudio.PyAudio()
|
|
122
|
-
audio_stream = None
|
|
122
|
+
audio_stream = None
|
|
123
123
|
transcription_queue = queue.Queue()
|
|
124
124
|
|
|
125
|
-
|
|
125
|
+
|
|
126
126
|
is_speaking = threading.Event()
|
|
127
|
-
is_speaking.clear()
|
|
127
|
+
is_speaking.clear()
|
|
128
128
|
|
|
129
129
|
speech_queue = queue.Queue(maxsize=20)
|
|
130
130
|
speech_thread_active = threading.Event()
|
|
@@ -134,22 +134,22 @@ def enter_yap_mode(
|
|
|
134
134
|
nonlocal running, audio_stream
|
|
135
135
|
|
|
136
136
|
while running and speech_thread_active.is_set():
|
|
137
|
-
|
|
138
|
-
|
|
137
|
+
|
|
138
|
+
|
|
139
139
|
print('.', end='', flush=True)
|
|
140
140
|
if not speech_queue.empty():
|
|
141
141
|
print('\n')
|
|
142
142
|
text_to_speak = speech_queue.get(timeout=0.1)
|
|
143
143
|
|
|
144
|
-
|
|
144
|
+
|
|
145
145
|
if text_to_speak.strip():
|
|
146
|
-
|
|
146
|
+
|
|
147
147
|
is_speaking.set()
|
|
148
148
|
|
|
149
|
-
|
|
149
|
+
|
|
150
150
|
current_audio_stream = audio_stream
|
|
151
151
|
audio_stream = (
|
|
152
|
-
None
|
|
152
|
+
None
|
|
153
153
|
)
|
|
154
154
|
|
|
155
155
|
if current_audio_stream and current_audio_stream.is_active():
|
|
@@ -158,21 +158,21 @@ def enter_yap_mode(
|
|
|
158
158
|
|
|
159
159
|
print(f"Speaking full response...")
|
|
160
160
|
print(text_to_speak)
|
|
161
|
-
|
|
161
|
+
|
|
162
162
|
generate_and_play_speech(text_to_speak)
|
|
163
163
|
|
|
164
|
-
|
|
164
|
+
|
|
165
165
|
time.sleep(0.005 * len(text_to_speak))
|
|
166
166
|
print(len(text_to_speak))
|
|
167
167
|
|
|
168
|
-
|
|
168
|
+
|
|
169
169
|
is_speaking.clear()
|
|
170
170
|
else:
|
|
171
171
|
time.sleep(0.5)
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
|
|
176
176
|
|
|
177
177
|
def safely_close_audio_stream(stream):
|
|
178
178
|
"""Safely close an audio stream with error handling"""
|
|
@@ -184,38 +184,38 @@ def enter_yap_mode(
|
|
|
184
184
|
except Exception as e:
|
|
185
185
|
print(f"Error closing audio stream: {e}")
|
|
186
186
|
|
|
187
|
-
|
|
187
|
+
|
|
188
188
|
speech_thread = threading.Thread(target=speech_playback_thread)
|
|
189
189
|
speech_thread.daemon = True
|
|
190
190
|
speech_thread.start()
|
|
191
191
|
|
|
192
192
|
def generate_and_play_speech(text):
|
|
193
193
|
try:
|
|
194
|
-
|
|
194
|
+
|
|
195
195
|
unique_id = str(time.time()).replace(".", "")
|
|
196
196
|
temp_dir = tempfile.gettempdir()
|
|
197
197
|
wav_file = os.path.join(temp_dir, f"temp_{unique_id}.wav")
|
|
198
198
|
|
|
199
|
-
|
|
199
|
+
|
|
200
200
|
if tts_model == "kokoro" and kokoro_pipeline:
|
|
201
|
-
|
|
201
|
+
|
|
202
202
|
generator = kokoro_pipeline(text, voice=voice)
|
|
203
203
|
|
|
204
|
-
|
|
204
|
+
|
|
205
205
|
for _, _, audio in generator:
|
|
206
|
-
|
|
206
|
+
|
|
207
207
|
import soundfile as sf
|
|
208
208
|
|
|
209
209
|
sf.write(wav_file, audio, 24000)
|
|
210
|
-
break
|
|
210
|
+
break
|
|
211
211
|
else:
|
|
212
|
-
|
|
212
|
+
|
|
213
213
|
mp3_file = os.path.join(temp_dir, f"temp_{unique_id}.mp3")
|
|
214
214
|
tts = gTTS(text=text, lang="en", slow=False)
|
|
215
215
|
tts.save(mp3_file)
|
|
216
216
|
convert_mp3_to_wav(mp3_file, wav_file)
|
|
217
217
|
|
|
218
|
-
|
|
218
|
+
|
|
219
219
|
wf = wave.open(wav_file, "rb")
|
|
220
220
|
p = pyaudio.PyAudio()
|
|
221
221
|
|
|
@@ -235,7 +235,7 @@ def enter_yap_mode(
|
|
|
235
235
|
stream.close()
|
|
236
236
|
p.terminate()
|
|
237
237
|
|
|
238
|
-
|
|
238
|
+
|
|
239
239
|
try:
|
|
240
240
|
if os.path.exists(wav_file):
|
|
241
241
|
os.remove(wav_file)
|
|
@@ -248,15 +248,15 @@ def enter_yap_mode(
|
|
|
248
248
|
except Exception as e:
|
|
249
249
|
print(f"Error in TTS process: {e}")
|
|
250
250
|
|
|
251
|
-
|
|
251
|
+
|
|
252
252
|
def speak_text(text):
|
|
253
253
|
speech_queue.put(text)
|
|
254
254
|
|
|
255
255
|
def process_input(user_input, messages):
|
|
256
|
-
|
|
256
|
+
|
|
257
257
|
full_response = ""
|
|
258
258
|
|
|
259
|
-
|
|
259
|
+
|
|
260
260
|
check = check_llm_command(
|
|
261
261
|
user_input,
|
|
262
262
|
npc=npc,
|
|
@@ -266,48 +266,48 @@ def enter_yap_mode(
|
|
|
266
266
|
provider=provider,
|
|
267
267
|
stream=False,
|
|
268
268
|
)
|
|
269
|
-
|
|
270
|
-
|
|
269
|
+
|
|
270
|
+
|
|
271
271
|
assistant_reply = check["output"]
|
|
272
272
|
messages = check['messages']
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
|
|
276
276
|
if stream and not isinstance(assistant_reply,str) and not isinstance(assistant_reply, dict):
|
|
277
277
|
assistant_reply = print_and_process_stream_with_markdown(assistant_reply, model, provider)
|
|
278
278
|
elif isinstance(assistant_reply,dict):
|
|
279
|
-
|
|
279
|
+
|
|
280
280
|
assistant_reply = assistant_reply.get('output')
|
|
281
281
|
render_markdown(assistant_reply)
|
|
282
282
|
full_response += assistant_reply
|
|
283
283
|
|
|
284
|
-
print("\n")
|
|
284
|
+
print("\n")
|
|
285
285
|
|
|
286
|
-
|
|
286
|
+
|
|
287
287
|
if full_response.strip():
|
|
288
288
|
processed_text = process_text_for_tts(full_response)
|
|
289
289
|
speak_text(processed_text)
|
|
290
290
|
|
|
291
|
-
|
|
291
|
+
|
|
292
292
|
messages.append({"role": "assistant", "content": full_response})
|
|
293
293
|
return messages
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
|
|
297
297
|
|
|
298
|
-
|
|
298
|
+
|
|
299
299
|
|
|
300
300
|
|
|
301
301
|
def capture_audio():
|
|
302
302
|
nonlocal is_recording, recording_data, buffer_data, last_speech_time, running, is_speaking
|
|
303
303
|
nonlocal audio_stream, transcription_queue
|
|
304
304
|
|
|
305
|
-
|
|
305
|
+
|
|
306
306
|
if is_speaking.is_set():
|
|
307
307
|
return False
|
|
308
308
|
|
|
309
309
|
try:
|
|
310
|
-
|
|
310
|
+
|
|
311
311
|
if audio_stream is None and not is_speaking.is_set():
|
|
312
312
|
audio_stream = pyaudio_instance.open(
|
|
313
313
|
format=FORMAT,
|
|
@@ -317,9 +317,9 @@ def enter_yap_mode(
|
|
|
317
317
|
frames_per_buffer=CHUNK,
|
|
318
318
|
)
|
|
319
319
|
|
|
320
|
-
|
|
320
|
+
|
|
321
321
|
timeout_counter = 0
|
|
322
|
-
max_timeout = 100
|
|
322
|
+
max_timeout = 100
|
|
323
323
|
|
|
324
324
|
print("\nListening for speech...")
|
|
325
325
|
|
|
@@ -331,7 +331,7 @@ def enter_yap_mode(
|
|
|
331
331
|
and timeout_counter < max_timeout
|
|
332
332
|
):
|
|
333
333
|
try:
|
|
334
|
-
|
|
334
|
+
|
|
335
335
|
data = audio_stream.read(CHUNK, exception_on_overflow=False)
|
|
336
336
|
|
|
337
337
|
if not data:
|
|
@@ -339,7 +339,7 @@ def enter_yap_mode(
|
|
|
339
339
|
time.sleep(0.1)
|
|
340
340
|
continue
|
|
341
341
|
|
|
342
|
-
|
|
342
|
+
|
|
343
343
|
timeout_counter = 0
|
|
344
344
|
|
|
345
345
|
audio_array = np.frombuffer(data, dtype=np.int16)
|
|
@@ -349,11 +349,11 @@ def enter_yap_mode(
|
|
|
349
349
|
audio_float = audio_array.astype(np.float32) / 32768.0
|
|
350
350
|
tensor = torch.from_numpy(audio_float).to(device)
|
|
351
351
|
|
|
352
|
-
|
|
352
|
+
|
|
353
353
|
speech_prob = vad_model(tensor, RATE).item()
|
|
354
354
|
current_time = time.time()
|
|
355
355
|
|
|
356
|
-
if speech_prob > 0.5:
|
|
356
|
+
if speech_prob > 0.5:
|
|
357
357
|
last_speech_time = current_time
|
|
358
358
|
if not is_recording:
|
|
359
359
|
is_recording = True
|
|
@@ -365,28 +365,28 @@ def enter_yap_mode(
|
|
|
365
365
|
if is_recording:
|
|
366
366
|
if (
|
|
367
367
|
current_time - last_speech_time > 1
|
|
368
|
-
):
|
|
368
|
+
):
|
|
369
369
|
is_recording = False
|
|
370
370
|
print("Speech ended, transcribing...")
|
|
371
371
|
|
|
372
|
-
|
|
372
|
+
|
|
373
373
|
safely_close_audio_stream(audio_stream)
|
|
374
374
|
audio_stream = None
|
|
375
375
|
|
|
376
|
-
|
|
376
|
+
|
|
377
377
|
transcription = transcribe_recording(recording_data)
|
|
378
378
|
if transcription:
|
|
379
379
|
transcription_queue.put(transcription)
|
|
380
380
|
recording_data = []
|
|
381
|
-
return True
|
|
381
|
+
return True
|
|
382
382
|
else:
|
|
383
383
|
buffer_data.append(data)
|
|
384
384
|
if len(buffer_data) > int(
|
|
385
385
|
0.65 * RATE / CHUNK
|
|
386
|
-
):
|
|
386
|
+
):
|
|
387
387
|
buffer_data.pop(0)
|
|
388
388
|
|
|
389
|
-
|
|
389
|
+
|
|
390
390
|
if is_speaking.is_set():
|
|
391
391
|
safely_close_audio_stream(audio_stream)
|
|
392
392
|
audio_stream = None
|
|
@@ -399,30 +399,30 @@ def enter_yap_mode(
|
|
|
399
399
|
except Exception as e:
|
|
400
400
|
print(f"Error in audio capture: {e}")
|
|
401
401
|
|
|
402
|
-
|
|
402
|
+
|
|
403
403
|
safely_close_audio_stream(audio_stream)
|
|
404
404
|
audio_stream = None
|
|
405
405
|
|
|
406
406
|
return False
|
|
407
407
|
|
|
408
408
|
def process_text_for_tts(text):
|
|
409
|
-
|
|
409
|
+
|
|
410
410
|
text = re.sub(r"[*<>{}()\[\]&%#@^_=+~]", "", text)
|
|
411
411
|
text = text.strip()
|
|
412
|
-
|
|
412
|
+
|
|
413
413
|
text = re.sub(r"(\w)\.(\w)\.", r"\1 \2 ", text)
|
|
414
414
|
text = re.sub(r"([.!?])(\w)", r"\1 \2", text)
|
|
415
415
|
return text
|
|
416
416
|
|
|
417
|
-
|
|
417
|
+
|
|
418
418
|
speak_text("Entering yap mode. Please wait.")
|
|
419
419
|
|
|
420
420
|
try:
|
|
421
|
-
loaded_content = {}
|
|
421
|
+
loaded_content = {}
|
|
422
422
|
if not conversation_id:
|
|
423
423
|
conversation_id = start_new_conversation()
|
|
424
424
|
command_history = CommandHistory()
|
|
425
|
-
|
|
425
|
+
|
|
426
426
|
if files:
|
|
427
427
|
for file in files:
|
|
428
428
|
extension = os.path.splitext(file)[1].lower()
|
|
@@ -503,12 +503,12 @@ def enter_yap_mode(
|
|
|
503
503
|
)
|
|
504
504
|
|
|
505
505
|
|
|
506
|
-
continue
|
|
507
|
-
if not is_speaking.is_set():
|
|
506
|
+
continue
|
|
507
|
+
if not is_speaking.is_set():
|
|
508
508
|
print('capturing audio')
|
|
509
509
|
got_speech = capture_audio()
|
|
510
510
|
|
|
511
|
-
|
|
511
|
+
|
|
512
512
|
if got_speech:
|
|
513
513
|
try:
|
|
514
514
|
transcription = transcription_queue.get_nowait()
|
|
@@ -517,18 +517,18 @@ def enter_yap_mode(
|
|
|
517
517
|
except queue.Empty:
|
|
518
518
|
pass
|
|
519
519
|
else:
|
|
520
|
-
|
|
520
|
+
|
|
521
521
|
time.sleep(0.1)
|
|
522
522
|
|
|
523
523
|
except KeyboardInterrupt:
|
|
524
524
|
print("\nInterrupted by user.")
|
|
525
525
|
|
|
526
526
|
finally:
|
|
527
|
-
|
|
527
|
+
|
|
528
528
|
running = False
|
|
529
529
|
speech_thread_active.clear()
|
|
530
530
|
|
|
531
|
-
|
|
531
|
+
|
|
532
532
|
safely_close_audio_stream(audio_stream)
|
|
533
533
|
|
|
534
534
|
if pyaudio_instance:
|
|
@@ -542,7 +542,7 @@ def enter_yap_mode(
|
|
|
542
542
|
return {"messages": messages, "output": "yap mode session ended."}
|
|
543
543
|
|
|
544
544
|
def main():
|
|
545
|
-
|
|
545
|
+
|
|
546
546
|
import argparse
|
|
547
547
|
parser = argparse.ArgumentParser(description="Enter yap mode for chatting with an NPC")
|
|
548
548
|
parser.add_argument("--model", default=NPCSH_CHAT_MODEL, help="Model to use")
|
|
@@ -567,7 +567,7 @@ def main():
|
|
|
567
567
|
provider = args.provider
|
|
568
568
|
else:
|
|
569
569
|
provider = sibiji.provider
|
|
570
|
-
|
|
570
|
+
|
|
571
571
|
enter_yap_mode(
|
|
572
572
|
messages=None,
|
|
573
573
|
model= model,
|
|
@@ -8,7 +8,7 @@ steps:
|
|
|
8
8
|
code: |
|
|
9
9
|
import subprocess
|
|
10
10
|
import os
|
|
11
|
-
cmd = '{{bash_command}}'
|
|
11
|
+
cmd = '{{bash_command}}'
|
|
12
12
|
def run_command(cmd):
|
|
13
13
|
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
14
14
|
stdout, stderr = process.communicate()
|
|
@@ -12,23 +12,23 @@ steps:
|
|
|
12
12
|
import os
|
|
13
13
|
from npcpy.llm_funcs import get_llm_response
|
|
14
14
|
|
|
15
|
-
|
|
15
|
+
|
|
16
16
|
file_path = os.path.expanduser("{{ file_path }}")
|
|
17
17
|
edit_instructions = "{{ edit_instructions }}"
|
|
18
18
|
backup_str = "{{ backup }}"
|
|
19
19
|
create_backup = backup_str.lower() not in ('false', 'no', '0', '')
|
|
20
20
|
|
|
21
|
-
|
|
21
|
+
|
|
22
22
|
with open(file_path, 'r') as f:
|
|
23
23
|
original_content = f.read()
|
|
24
24
|
|
|
25
|
-
|
|
25
|
+
|
|
26
26
|
if create_backup:
|
|
27
27
|
backup_path = file_path + ".bak"
|
|
28
28
|
with open(backup_path, 'w') as f:
|
|
29
29
|
f.write(original_content)
|
|
30
30
|
|
|
31
|
-
|
|
31
|
+
|
|
32
32
|
prompt = """You are a code editing assistant. Analyze this file and make the requested changes.
|
|
33
33
|
|
|
34
34
|
File content:
|
|
@@ -46,14 +46,14 @@ steps:
|
|
|
46
46
|
- "insertion": For "insert_after" and "insert_before", the text to insert
|
|
47
47
|
2. "explanation": Brief explanation of the changes made
|
|
48
48
|
"""
|
|
49
|
-
|
|
49
|
+
|
|
50
50
|
response = get_llm_response(prompt, model=npc.model, provider=npc.provider, npc=npc, format="json")
|
|
51
51
|
|
|
52
52
|
result = response.get("response", {})
|
|
53
53
|
modifications = result.get("modifications", [])
|
|
54
54
|
explanation = result.get("explanation", "No explanation provided")
|
|
55
55
|
|
|
56
|
-
|
|
56
|
+
|
|
57
57
|
updated_content = original_content
|
|
58
58
|
changes_applied = 0
|
|
59
59
|
|