npcsh 0.3.31__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. npcsh/_state.py +942 -0
  2. npcsh/alicanto.py +1074 -0
  3. npcsh/guac.py +785 -0
  4. npcsh/mcp_helpers.py +357 -0
  5. npcsh/mcp_npcsh.py +822 -0
  6. npcsh/mcp_server.py +184 -0
  7. npcsh/npc.py +218 -0
  8. npcsh/npcsh.py +1161 -0
  9. npcsh/plonk.py +387 -269
  10. npcsh/pti.py +234 -0
  11. npcsh/routes.py +958 -0
  12. npcsh/spool.py +315 -0
  13. npcsh/wander.py +550 -0
  14. npcsh/yap.py +573 -0
  15. npcsh-1.0.0.dist-info/METADATA +596 -0
  16. npcsh-1.0.0.dist-info/RECORD +21 -0
  17. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/WHEEL +1 -1
  18. npcsh-1.0.0.dist-info/entry_points.txt +9 -0
  19. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/licenses/LICENSE +1 -1
  20. npcsh/audio.py +0 -210
  21. npcsh/cli.py +0 -545
  22. npcsh/command_history.py +0 -566
  23. npcsh/conversation.py +0 -291
  24. npcsh/data_models.py +0 -46
  25. npcsh/dataframes.py +0 -163
  26. npcsh/embeddings.py +0 -168
  27. npcsh/helpers.py +0 -641
  28. npcsh/image.py +0 -298
  29. npcsh/image_gen.py +0 -79
  30. npcsh/knowledge_graph.py +0 -1006
  31. npcsh/llm_funcs.py +0 -2027
  32. npcsh/load_data.py +0 -83
  33. npcsh/main.py +0 -5
  34. npcsh/model_runner.py +0 -189
  35. npcsh/npc_compiler.py +0 -2870
  36. npcsh/npc_sysenv.py +0 -383
  37. npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
  38. npcsh/npc_team/corca.npc +0 -13
  39. npcsh/npc_team/foreman.npc +0 -7
  40. npcsh/npc_team/npcsh.ctx +0 -11
  41. npcsh/npc_team/sibiji.npc +0 -4
  42. npcsh/npc_team/templates/analytics/celona.npc +0 -0
  43. npcsh/npc_team/templates/hr_support/raone.npc +0 -0
  44. npcsh/npc_team/templates/humanities/eriane.npc +0 -4
  45. npcsh/npc_team/templates/it_support/lineru.npc +0 -0
  46. npcsh/npc_team/templates/marketing/slean.npc +0 -4
  47. npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
  48. npcsh/npc_team/templates/sales/turnic.npc +0 -4
  49. npcsh/npc_team/templates/software/welxor.npc +0 -0
  50. npcsh/npc_team/tools/bash_executer.tool +0 -32
  51. npcsh/npc_team/tools/calculator.tool +0 -8
  52. npcsh/npc_team/tools/code_executor.tool +0 -16
  53. npcsh/npc_team/tools/generic_search.tool +0 -27
  54. npcsh/npc_team/tools/image_generation.tool +0 -25
  55. npcsh/npc_team/tools/local_search.tool +0 -149
  56. npcsh/npc_team/tools/npcsh_executor.tool +0 -9
  57. npcsh/npc_team/tools/screen_cap.tool +0 -27
  58. npcsh/npc_team/tools/sql_executor.tool +0 -26
  59. npcsh/response.py +0 -623
  60. npcsh/search.py +0 -248
  61. npcsh/serve.py +0 -1460
  62. npcsh/shell.py +0 -538
  63. npcsh/shell_helpers.py +0 -3529
  64. npcsh/stream.py +0 -700
  65. npcsh/video.py +0 -49
  66. npcsh-0.3.31.data/data/npcsh/npc_team/bash_executer.tool +0 -32
  67. npcsh-0.3.31.data/data/npcsh/npc_team/calculator.tool +0 -8
  68. npcsh-0.3.31.data/data/npcsh/npc_team/celona.npc +0 -0
  69. npcsh-0.3.31.data/data/npcsh/npc_team/code_executor.tool +0 -16
  70. npcsh-0.3.31.data/data/npcsh/npc_team/corca.npc +0 -13
  71. npcsh-0.3.31.data/data/npcsh/npc_team/eriane.npc +0 -4
  72. npcsh-0.3.31.data/data/npcsh/npc_team/foreman.npc +0 -7
  73. npcsh-0.3.31.data/data/npcsh/npc_team/generic_search.tool +0 -27
  74. npcsh-0.3.31.data/data/npcsh/npc_team/image_generation.tool +0 -25
  75. npcsh-0.3.31.data/data/npcsh/npc_team/lineru.npc +0 -0
  76. npcsh-0.3.31.data/data/npcsh/npc_team/local_search.tool +0 -149
  77. npcsh-0.3.31.data/data/npcsh/npc_team/maurawa.npc +0 -0
  78. npcsh-0.3.31.data/data/npcsh/npc_team/npcsh.ctx +0 -11
  79. npcsh-0.3.31.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
  80. npcsh-0.3.31.data/data/npcsh/npc_team/raone.npc +0 -0
  81. npcsh-0.3.31.data/data/npcsh/npc_team/screen_cap.tool +0 -27
  82. npcsh-0.3.31.data/data/npcsh/npc_team/sibiji.npc +0 -4
  83. npcsh-0.3.31.data/data/npcsh/npc_team/slean.npc +0 -4
  84. npcsh-0.3.31.data/data/npcsh/npc_team/sql_executor.tool +0 -26
  85. npcsh-0.3.31.data/data/npcsh/npc_team/test_pipeline.py +0 -181
  86. npcsh-0.3.31.data/data/npcsh/npc_team/turnic.npc +0 -4
  87. npcsh-0.3.31.data/data/npcsh/npc_team/welxor.npc +0 -0
  88. npcsh-0.3.31.dist-info/METADATA +0 -1853
  89. npcsh-0.3.31.dist-info/RECORD +0 -76
  90. npcsh-0.3.31.dist-info/entry_points.txt +0 -3
  91. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/top_level.txt +0 -0
npcsh/yap.py ADDED
@@ -0,0 +1,573 @@
1
+
2
+ try:
3
+ from faster_whisper import WhisperModel
4
+ from gtts import gTTS
5
+ import torch
6
+ import pyaudio
7
+ import wave
8
+ import queue
9
+
10
+ from npcpy.data.audio import (
11
+ cleanup_temp_files,
12
+ FORMAT,
13
+ CHANNELS,
14
+ RATE,
15
+ CHUNK,
16
+ transcribe_recording,
17
+ convert_mp3_to_wav,
18
+ )
19
+ import threading
20
+ import tempfile
21
+ import os
22
+ import re
23
+ import time
24
+ import numpy as np
25
+
26
+
27
+ except Exception as e:
28
+ print(
29
+ "Exception: "
30
+ + str(e)
31
+ + "\n"
32
+ + "Could not load the whisper package. If you want to use tts/stt features, please run `pip install npcsh[audio]` and follow the instructions in the npcsh github readme to ensure your OS can handle the audio dependencies."
33
+ )
34
+ from npcpy.data.load import load_csv, load_pdf
35
+ from npcsh._state import (
36
+ NPCSH_CHAT_MODEL,
37
+ NPCSH_CHAT_PROVIDER,
38
+ NPCSH_DB_PATH,
39
+ NPCSH_API_URL,
40
+ NPCSH_STREAM_OUTPUT
41
+ )
42
+
43
+ from npcpy.npc_sysenv import (
44
+ get_system_message,
45
+ print_and_process_stream_with_markdown,
46
+ render_markdown,
47
+ )
48
+ from sqlalchemy import create_engine
49
+ from npcpy.llm_funcs import check_llm_command
50
+ from npcpy.data.text import rag_search
51
+ from npcpy.npc_compiler import (
52
+ NPC, Team
53
+ )
54
+ from npcpy.memory.command_history import CommandHistory, save_conversation_message,start_new_conversation
55
+ from typing import Dict, Any, List
56
+ def enter_yap_mode(
57
+
58
+ model: str ,
59
+ provider: str ,
60
+ messages: list = None,
61
+ npc = None,
62
+ team= None,
63
+ tts_model="kokoro",
64
+ voice="af_heart",
65
+ files: List[str] = None,
66
+ rag_similarity_threshold: float = 0.3,
67
+ stream: bool = NPCSH_STREAM_OUTPUT,
68
+ conversation_id = None,
69
+ ) -> Dict[str, Any]:
70
+ running = True
71
+ is_recording = False
72
+ recording_data = []
73
+ buffer_data = []
74
+ last_speech_time = 0
75
+ vad_model, _ = torch.hub.load(
76
+ repo_or_dir="snakers4/silero-vad",
77
+ model="silero_vad",
78
+ force_reload=False,
79
+ onnx=False,
80
+ verbose=False,
81
+
82
+ )
83
+ device = 'cpu'
84
+ vad_model.to(device)
85
+
86
+
87
+ print("Entering yap mode. Initializing...")
88
+
89
+ concise_instruction = "Please provide brief responses of 1-2 sentences unless the user specifically asks for more detailed information. Keep responses clear and concise."
90
+
91
+ provider = (
92
+ NPCSH_CHAT_PROVIDER if npc is None else npc.provider or NPCSH_CHAT_PROVIDER
93
+ )
94
+ api_url = NPCSH_API_URL if npc is None else npc.api_url or NPCSH_API_URL
95
+
96
+ print(f"\nUsing model: {model} with provider: {provider}")
97
+
98
+ system_message = get_system_message(npc) if npc else "You are a helpful assistant."
99
+
100
+ # Add conciseness instruction to the system message
101
+ system_message = system_message + " " + concise_instruction
102
+
103
+ if messages is None:
104
+ messages = [{"role": "system", "content": system_message}]
105
+ elif messages is not None and messages[0]['role'] != 'system':
106
+ messages.insert(0, {"role": "system", "content": system_message})
107
+
108
+ kokoro_pipeline = None
109
+ if tts_model == "kokoro":
110
+ try:
111
+ from kokoro import KPipeline
112
+ import soundfile as sf
113
+
114
+ kokoro_pipeline = KPipeline(lang_code="a")
115
+ print("Kokoro TTS model initialized")
116
+ except ImportError:
117
+ print("Kokoro not installed, falling back to gTTS")
118
+ tts_model = "gtts"
119
+
120
+ # Initialize PyAudio
121
+ pyaudio_instance = pyaudio.PyAudio()
122
+ audio_stream = None # We'll open and close as needed
123
+ transcription_queue = queue.Queue()
124
+
125
+ # Create and properly use the is_speaking event
126
+ is_speaking = threading.Event()
127
+ is_speaking.clear() # Not speaking initially
128
+
129
+ speech_queue = queue.Queue(maxsize=20)
130
+ speech_thread_active = threading.Event()
131
+ speech_thread_active.set()
132
+
133
+ def speech_playback_thread():
134
+ nonlocal running, audio_stream
135
+
136
+ while running and speech_thread_active.is_set():
137
+ try:
138
+ # Get next speech item from queue
139
+ if not speech_queue.empty():
140
+ text_to_speak = speech_queue.get(timeout=0.1)
141
+
142
+ # Only process if there's text to speak
143
+ if text_to_speak.strip():
144
+ # IMPORTANT: Set is_speaking flag BEFORE starting audio output
145
+ is_speaking.set()
146
+
147
+ # Safely close the audio input stream before speaking
148
+ current_audio_stream = audio_stream
149
+ audio_stream = (
150
+ None # Set to None to prevent capture thread from using it
151
+ )
152
+
153
+ if current_audio_stream and current_audio_stream.is_active():
154
+ current_audio_stream.stop_stream()
155
+ current_audio_stream.close()
156
+
157
+ print(f"Speaking full response...")
158
+
159
+ # Generate and play speech
160
+ generate_and_play_speech(text_to_speak)
161
+
162
+ # Delay after speech to prevent echo
163
+ time.sleep(0.005 * len(text_to_speak))
164
+ print(len(text_to_speak))
165
+
166
+ # Clear the speaking flag to allow listening again
167
+ is_speaking.clear()
168
+ else:
169
+ time.sleep(0.5)
170
+ except Exception as e:
171
+ print(f"Error in speech thread: {e}")
172
+ is_speaking.clear() # Make sure to clear the flag if there's an error
173
+ time.sleep(0.1)
174
+
175
+ def safely_close_audio_stream(stream):
176
+ """Safely close an audio stream with error handling"""
177
+ if stream:
178
+ try:
179
+ if stream.is_active():
180
+ stream.stop_stream()
181
+ stream.close()
182
+ except Exception as e:
183
+ print(f"Error closing audio stream: {e}")
184
+
185
+ # Start speech thread
186
+ speech_thread = threading.Thread(target=speech_playback_thread)
187
+ speech_thread.daemon = True
188
+ speech_thread.start()
189
+
190
+ def generate_and_play_speech(text):
191
+ try:
192
+ # Create a temporary file for audio
193
+ unique_id = str(time.time()).replace(".", "")
194
+ temp_dir = tempfile.gettempdir()
195
+ wav_file = os.path.join(temp_dir, f"temp_{unique_id}.wav")
196
+
197
+ # Generate speech based on selected TTS model
198
+ if tts_model == "kokoro" and kokoro_pipeline:
199
+ # Use Kokoro for generation
200
+ generator = kokoro_pipeline(text, voice=voice)
201
+
202
+ # Get the audio from the generator
203
+ for _, _, audio in generator:
204
+ # Save audio to WAV file
205
+ import soundfile as sf
206
+
207
+ sf.write(wav_file, audio, 24000)
208
+ break # Just use the first chunk for now
209
+ else:
210
+ # Fall back to gTTS
211
+ mp3_file = os.path.join(temp_dir, f"temp_{unique_id}.mp3")
212
+ tts = gTTS(text=text, lang="en", slow=False)
213
+ tts.save(mp3_file)
214
+ convert_mp3_to_wav(mp3_file, wav_file)
215
+
216
+ # Play the audio
217
+ wf = wave.open(wav_file, "rb")
218
+ p = pyaudio.PyAudio()
219
+
220
+ stream = p.open(
221
+ format=p.get_format_from_width(wf.getsampwidth()),
222
+ channels=wf.getnchannels(),
223
+ rate=wf.getframerate(),
224
+ output=True,
225
+ )
226
+
227
+ data = wf.readframes(4096)
228
+ while data and running:
229
+ stream.write(data)
230
+ data = wf.readframes(4096)
231
+
232
+ stream.stop_stream()
233
+ stream.close()
234
+ p.terminate()
235
+
236
+ # Cleanup temp files
237
+ try:
238
+ if os.path.exists(wav_file):
239
+ os.remove(wav_file)
240
+ if tts_model == "gtts" and "mp3_file" in locals():
241
+ if os.path.exists(mp3_file):
242
+ os.remove(mp3_file)
243
+ except Exception as e:
244
+ print(f"Error removing temp file: {e}")
245
+
246
+ except Exception as e:
247
+ print(f"Error in TTS process: {e}")
248
+
249
+ # Modified speak_text function that just queues text
250
+ def speak_text(text):
251
+ speech_queue.put(text)
252
+
253
+ def process_input(user_input, messages):
254
+ #try:
255
+ full_response = ""
256
+
257
+ # Use get_stream for streaming response
258
+ check = check_llm_command(
259
+ user_input,
260
+ npc=npc,
261
+ team=team,
262
+ messages=messages,
263
+ model=model,
264
+ provider=provider,
265
+ stream=False,
266
+ )
267
+ #mport pdb
268
+ #pdb.set_trace()
269
+ assistant_reply = check["output"]
270
+ messages = check['messages']
271
+ #print(messages)
272
+ #import pdb
273
+ #pdb.set_trace()
274
+ if stream and not isinstance(assistant_reply,str) and not isinstance(assistant_reply, dict):
275
+ assistant_reply = print_and_process_stream_with_markdown(assistant_reply, model, provider)
276
+ elif isinstance(assistant_reply,dict):
277
+ # assume its a jinx output, to fix later
278
+ assistant_reply = assistant_reply.get('output')
279
+ render_markdown(assistant_reply)
280
+ full_response += assistant_reply
281
+
282
+ print("\n") # End the progress display
283
+
284
+ # Process and speak the entire response at once
285
+ if full_response.strip():
286
+ processed_text = process_text_for_tts(full_response)
287
+ speak_text(processed_text)
288
+
289
+ # Add assistant's response to messages
290
+ messages.append({"role": "assistant", "content": full_response})
291
+ return messages
292
+ #except Exception as e:
293
+ # print(f"Error in LLM response: {e}")
294
+ # speak_text("I'm sorry, there was an error processing your request.")
295
+
296
+ # Function to capture and process audio
297
+
298
+
299
+ def capture_audio():
300
+ nonlocal is_recording, recording_data, buffer_data, last_speech_time, running, is_speaking
301
+ nonlocal audio_stream, transcription_queue
302
+
303
+ # Don't try to record if we're speaking
304
+ if is_speaking.is_set():
305
+ return False
306
+
307
+ try:
308
+ # Only create a new audio stream if we don't have one
309
+ if audio_stream is None and not is_speaking.is_set():
310
+ audio_stream = pyaudio_instance.open(
311
+ format=FORMAT,
312
+ channels=CHANNELS,
313
+ rate=RATE,
314
+ input=True,
315
+ frames_per_buffer=CHUNK,
316
+ )
317
+
318
+ # Initialize or reset the recording variables
319
+ is_recording = False
320
+ recording_data = []
321
+ buffer_data = []
322
+
323
+ print("\nListening for speech...")
324
+
325
+ while (
326
+ running
327
+ and audio_stream
328
+ and audio_stream.is_active()
329
+ and not is_speaking.is_set()
330
+ ):
331
+ try:
332
+ data = audio_stream.read(CHUNK, exception_on_overflow=False)
333
+ if data:
334
+ audio_array = np.frombuffer(data, dtype=np.int16)
335
+ audio_float = audio_array.astype(np.float32) / 32768.0
336
+
337
+ tensor = torch.from_numpy(audio_float).to(device)
338
+ speech_prob = vad_model(tensor, RATE).item()
339
+ current_time = time.time()
340
+
341
+ if speech_prob > 0.5: # VAD threshold
342
+ last_speech_time = current_time
343
+ if not is_recording:
344
+ is_recording = True
345
+ print("\nSpeech detected, listening...")
346
+ recording_data.extend(buffer_data)
347
+ buffer_data = []
348
+ recording_data.append(data)
349
+ else:
350
+ if is_recording:
351
+ if (
352
+ current_time - last_speech_time > 1
353
+ ): # silence duration
354
+ is_recording = False
355
+ print("Speech ended, transcribing...")
356
+
357
+ # Stop stream before transcribing
358
+ safely_close_audio_stream(audio_stream)
359
+ audio_stream = None
360
+
361
+ # Transcribe in this thread to avoid race conditions
362
+ transcription = transcribe_recording(recording_data)
363
+ if transcription:
364
+ transcription_queue.put(transcription)
365
+ recording_data = []
366
+ return True # Got speech
367
+ else:
368
+ buffer_data.append(data)
369
+ if len(buffer_data) > int(
370
+ 0.65 * RATE / CHUNK
371
+ ): # buffer duration
372
+ buffer_data.pop(0)
373
+
374
+ # Check frequently if we need to stop capturing
375
+ if is_speaking.is_set():
376
+ safely_close_audio_stream(audio_stream)
377
+ audio_stream = None
378
+ return False
379
+
380
+ except Exception as e:
381
+ print(f"Error processing audio frame: {e}")
382
+ time.sleep(0.1)
383
+
384
+ except Exception as e:
385
+ print(f"Error in audio capture: {e}")
386
+
387
+ # Close stream if we exit without finding speech
388
+ safely_close_audio_stream(audio_stream)
389
+ audio_stream = None
390
+
391
+ return False
392
+
393
+ def process_text_for_tts(text):
394
+ # Remove special characters that might cause issues in TTS
395
+ text = re.sub(r"[*<>{}()\[\]&%#@^_=+~]", "", text)
396
+ text = text.strip()
397
+ # Add spaces after periods that are followed by words (for better pronunciation)
398
+ text = re.sub(r"(\w)\.(\w)\.", r"\1 \2 ", text)
399
+ text = re.sub(r"([.!?])(\w)", r"\1 \2", text)
400
+ return text
401
+
402
+ # Now that functions are defined, play welcome messages
403
+ speak_text("Entering yap mode. Please wait.")
404
+
405
+ try:
406
+ loaded_content = {} # New dictionary to hold loaded content
407
+ if not conversation_id:
408
+ conversation_id = start_new_conversation()
409
+ command_history = CommandHistory()
410
+ # Load specified files if any
411
+ if files:
412
+ for file in files:
413
+ extension = os.path.splitext(file)[1].lower()
414
+ try:
415
+ if extension == ".pdf":
416
+ content = load_pdf(file)["texts"].iloc[0]
417
+ elif extension == ".csv":
418
+ content = load_csv(file)
419
+ else:
420
+ print(f"Unsupported file type: {file}")
421
+ continue
422
+ loaded_content[file] = content
423
+ print(f"Loaded content from: {file}")
424
+ except Exception as e:
425
+ print(f"Error loading {file}: {str(e)}")
426
+
427
+
428
+
429
+ while running:
430
+
431
+ # First check for typed input (non-blocking)
432
+ import select
433
+ import sys
434
+
435
+ # Don't spam the console with prompts when speaking
436
+ if not is_speaking.is_set():
437
+ print(
438
+ "🎤🎤🎤🎤\n Speak or type your message (or 'exit' to quit): ",
439
+ end="",
440
+ flush=True,
441
+ )
442
+
443
+ rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
444
+ if rlist:
445
+ user_input = sys.stdin.readline().strip()
446
+ if user_input.lower() in ("exit", "quit", "goodbye"):
447
+ print("\nExiting yap mode.")
448
+ break
449
+ if user_input:
450
+ print(f"\nYou (typed): {user_input}")
451
+ # Handle RAG context
452
+ if loaded_content:
453
+ context_content = ""
454
+ for filename, content in loaded_content.items():
455
+ retrieved_docs = rag_search(
456
+ user_input,
457
+ content,
458
+ similarity_threshold=rag_similarity_threshold,
459
+ )
460
+ if retrieved_docs:
461
+ context_content += (
462
+ f"\n\nLoaded content from: {filename}\n{content}\n\n"
463
+ )
464
+ if len(context_content) > 0:
465
+ user_input += f"""
466
+ Here is the loaded content that may be relevant to your query:
467
+ {context_content}
468
+ Please reference it explicitly in your response and use it for answering.
469
+ """
470
+ message_id = save_conversation_message(
471
+ command_history,
472
+ conversation_id,
473
+ "user",
474
+ user_input,
475
+ wd=os.getcwd(),
476
+ model=model,
477
+ provider=provider,
478
+ npc=npc.name if npc else None,
479
+ )
480
+
481
+
482
+ messages= process_input(user_input, messages)
483
+
484
+ message_id = save_conversation_message(
485
+ command_history,
486
+ conversation_id,
487
+ "assistant",
488
+ messages[-1]["content"],
489
+ wd=os.getcwd(),
490
+ model=model,
491
+ provider=provider,
492
+ npc=npc.name if npc else None,
493
+ )
494
+
495
+
496
+ continue # Skip audio capture this cycle
497
+
498
+ # Then try to capture some audio (if no typed input)
499
+ if not is_speaking.is_set(): # Only capture if not currently speaking
500
+ got_speech = capture_audio()
501
+
502
+ # If we got speech, process it
503
+ if got_speech:
504
+ try:
505
+ transcription = transcription_queue.get_nowait()
506
+ print(f"\nYou (spoke): {transcription}")
507
+ messages = process_input(transcription, messages)
508
+ except queue.Empty:
509
+ pass
510
+ else:
511
+ # If we're speaking, just wait a bit without spamming the console
512
+ time.sleep(0.1)
513
+
514
+ except KeyboardInterrupt:
515
+ print("\nInterrupted by user.")
516
+
517
+ finally:
518
+ # Set running to False to signal threads to exit
519
+ running = False
520
+ speech_thread_active.clear()
521
+
522
+ # Clean up audio resources
523
+ safely_close_audio_stream(audio_stream)
524
+
525
+ if pyaudio_instance:
526
+ pyaudio_instance.terminate()
527
+
528
+ print("\nExiting yap mode.")
529
+ speak_text("Exiting yap mode. Goodbye!")
530
+ time.sleep(1)
531
+ cleanup_temp_files()
532
+
533
+ return {"messages": messages, "output": "yap mode session ended."}
534
+
535
+ def main():
536
+ # Example usage
537
+ import argparse
538
+ parser = argparse.ArgumentParser(description="Enter yap mode for chatting with an NPC")
539
+ parser.add_argument("--model", default=NPCSH_CHAT_MODEL, help="Model to use")
540
+ parser.add_argument("--provider", default=NPCSH_CHAT_PROVIDER, help="Provider to use")
541
+ parser.add_argument("--files", nargs="*", help="Files to load into context")
542
+ parser.add_argument("--stream", default="true", help="Use streaming mode")
543
+ parser.add_argument("--npc", type=str, default=os.path.expanduser('~/.npcsh/npc_team/sibiji.npc'), help="Path to NPC file")
544
+ args = parser.parse_args()
545
+ npc_db_conn = create_engine(
546
+ f"sqlite:///{NPCSH_DB_PATH}")
547
+
548
+ sibiji = NPC(file=args.npc, db_conn=npc_db_conn)
549
+
550
+ team = Team(team_path = '~/.npcsh/npc_team/', db_conn=npc_db_conn, forenpc= sibiji)
551
+ if sibiji.model is None:
552
+ sibiji.model = args.model
553
+ model = args.model
554
+ else:
555
+ model = sibiji.model
556
+ if sibiji.provider is None:
557
+ sibiji.provider = args.provider
558
+ provider = args.provider
559
+ else:
560
+ provider = sibiji.provider
561
+ # Enter spool mode
562
+ enter_yap_mode(
563
+ model,
564
+ provider,
565
+ messages=None,
566
+ npc=sibiji,
567
+ team = team,
568
+ files=args.files,
569
+ stream= args.stream.lower() == "true",
570
+ )
571
+
572
+ if __name__ == "__main__":
573
+ main()