npcsh 1.1.20__py3-none-any.whl → 1.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (186) hide show
  1. npcsh/_state.py +15 -76
  2. npcsh/benchmark/npcsh_agent.py +22 -14
  3. npcsh/benchmark/templates/install-npcsh.sh.j2 +2 -2
  4. npcsh/diff_viewer.py +3 -3
  5. npcsh/mcp_server.py +9 -1
  6. npcsh/npc_team/alicanto.npc +12 -6
  7. npcsh/npc_team/corca.npc +0 -1
  8. npcsh/npc_team/frederic.npc +2 -3
  9. npcsh/npc_team/jinxs/lib/core/compress.jinx +373 -85
  10. npcsh/npc_team/jinxs/lib/core/edit_file.jinx +83 -61
  11. npcsh/npc_team/jinxs/lib/core/search/db_search.jinx +17 -6
  12. npcsh/npc_team/jinxs/lib/core/search/file_search.jinx +17 -6
  13. npcsh/npc_team/jinxs/lib/core/search/web_search.jinx +52 -14
  14. npcsh/npc_team/jinxs/{bin → lib/utils}/benchmark.jinx +2 -2
  15. npcsh/npc_team/jinxs/{bin → lib/utils}/jinxs.jinx +12 -12
  16. npcsh/npc_team/jinxs/{bin → lib/utils}/models.jinx +7 -7
  17. npcsh/npc_team/jinxs/{bin → lib/utils}/setup.jinx +6 -6
  18. npcsh/npc_team/jinxs/modes/alicanto.jinx +1633 -295
  19. npcsh/npc_team/jinxs/modes/arxiv.jinx +5 -5
  20. npcsh/npc_team/jinxs/modes/build.jinx +378 -0
  21. npcsh/npc_team/jinxs/modes/config_tui.jinx +300 -0
  22. npcsh/npc_team/jinxs/modes/convene.jinx +597 -0
  23. npcsh/npc_team/jinxs/modes/corca.jinx +777 -387
  24. npcsh/npc_team/jinxs/modes/git.jinx +795 -0
  25. {npcsh-1.1.20.data/data/npcsh/npc_team → npcsh/npc_team/jinxs/modes}/kg.jinx +82 -15
  26. npcsh/npc_team/jinxs/modes/memories.jinx +414 -0
  27. npcsh/npc_team/jinxs/{bin → modes}/nql.jinx +10 -21
  28. npcsh/npc_team/jinxs/modes/papers.jinx +578 -0
  29. npcsh/npc_team/jinxs/modes/plonk.jinx +503 -308
  30. npcsh/npc_team/jinxs/modes/reattach.jinx +3 -3
  31. npcsh/npc_team/jinxs/modes/spool.jinx +3 -3
  32. npcsh/npc_team/jinxs/{bin → modes}/team.jinx +12 -12
  33. npcsh/npc_team/jinxs/modes/vixynt.jinx +388 -0
  34. npcsh/npc_team/jinxs/modes/wander.jinx +454 -181
  35. npcsh/npc_team/jinxs/modes/yap.jinx +630 -182
  36. npcsh/npc_team/kadiefa.npc +2 -1
  37. npcsh/npc_team/sibiji.npc +3 -3
  38. npcsh/npcsh.py +112 -47
  39. npcsh/routes.py +4 -1
  40. npcsh/salmon_simulation.py +0 -0
  41. npcsh-1.1.22.data/data/npcsh/npc_team/alicanto.jinx +1694 -0
  42. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/alicanto.npc +12 -6
  43. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/arxiv.jinx +5 -5
  44. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/benchmark.jinx +2 -2
  45. npcsh-1.1.22.data/data/npcsh/npc_team/build.jinx +378 -0
  46. npcsh-1.1.22.data/data/npcsh/npc_team/compress.jinx +428 -0
  47. npcsh-1.1.22.data/data/npcsh/npc_team/config_tui.jinx +300 -0
  48. npcsh-1.1.22.data/data/npcsh/npc_team/corca.jinx +820 -0
  49. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/corca.npc +0 -1
  50. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/db_search.jinx +17 -6
  51. npcsh-1.1.22.data/data/npcsh/npc_team/edit_file.jinx +119 -0
  52. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/file_search.jinx +17 -6
  53. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/frederic.npc +2 -3
  54. npcsh-1.1.22.data/data/npcsh/npc_team/git.jinx +795 -0
  55. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/jinxs.jinx +12 -12
  56. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/kadiefa.npc +2 -1
  57. {npcsh/npc_team/jinxs/bin → npcsh-1.1.22.data/data/npcsh/npc_team}/kg.jinx +82 -15
  58. npcsh-1.1.22.data/data/npcsh/npc_team/memories.jinx +414 -0
  59. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/models.jinx +7 -7
  60. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/nql.jinx +10 -21
  61. npcsh-1.1.22.data/data/npcsh/npc_team/papers.jinx +578 -0
  62. npcsh-1.1.22.data/data/npcsh/npc_team/plonk.jinx +574 -0
  63. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/reattach.jinx +3 -3
  64. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/setup.jinx +6 -6
  65. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/sibiji.npc +3 -3
  66. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/spool.jinx +3 -3
  67. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/team.jinx +12 -12
  68. npcsh-1.1.22.data/data/npcsh/npc_team/vixynt.jinx +388 -0
  69. npcsh-1.1.22.data/data/npcsh/npc_team/wander.jinx +728 -0
  70. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/web_search.jinx +52 -14
  71. npcsh-1.1.22.data/data/npcsh/npc_team/yap.jinx +716 -0
  72. {npcsh-1.1.20.dist-info → npcsh-1.1.22.dist-info}/METADATA +246 -281
  73. npcsh-1.1.22.dist-info/RECORD +240 -0
  74. npcsh-1.1.22.dist-info/entry_points.txt +11 -0
  75. npcsh/npc_team/jinxs/bin/config_tui.jinx +0 -300
  76. npcsh/npc_team/jinxs/bin/memories.jinx +0 -317
  77. npcsh/npc_team/jinxs/bin/vixynt.jinx +0 -122
  78. npcsh/npc_team/jinxs/lib/core/search/kg_search.jinx +0 -418
  79. npcsh/npc_team/jinxs/lib/core/search/mem_review.jinx +0 -73
  80. npcsh/npc_team/jinxs/lib/core/search/mem_search.jinx +0 -388
  81. npcsh/npc_team/jinxs/lib/core/search.jinx +0 -54
  82. npcsh/npc_team/jinxs/lib/research/paper_search.jinx +0 -412
  83. npcsh/npc_team/jinxs/lib/research/semantic_scholar.jinx +0 -386
  84. npcsh/npc_team/jinxs/lib/utils/build.jinx +0 -65
  85. npcsh/npc_team/plonkjr.npc +0 -23
  86. npcsh-1.1.20.data/data/npcsh/npc_team/alicanto.jinx +0 -356
  87. npcsh-1.1.20.data/data/npcsh/npc_team/build.jinx +0 -65
  88. npcsh-1.1.20.data/data/npcsh/npc_team/compress.jinx +0 -140
  89. npcsh-1.1.20.data/data/npcsh/npc_team/config_tui.jinx +0 -300
  90. npcsh-1.1.20.data/data/npcsh/npc_team/corca.jinx +0 -430
  91. npcsh-1.1.20.data/data/npcsh/npc_team/edit_file.jinx +0 -97
  92. npcsh-1.1.20.data/data/npcsh/npc_team/kg_search.jinx +0 -418
  93. npcsh-1.1.20.data/data/npcsh/npc_team/mem_review.jinx +0 -73
  94. npcsh-1.1.20.data/data/npcsh/npc_team/mem_search.jinx +0 -388
  95. npcsh-1.1.20.data/data/npcsh/npc_team/memories.jinx +0 -317
  96. npcsh-1.1.20.data/data/npcsh/npc_team/paper_search.jinx +0 -412
  97. npcsh-1.1.20.data/data/npcsh/npc_team/plonk.jinx +0 -379
  98. npcsh-1.1.20.data/data/npcsh/npc_team/plonkjr.npc +0 -23
  99. npcsh-1.1.20.data/data/npcsh/npc_team/search.jinx +0 -54
  100. npcsh-1.1.20.data/data/npcsh/npc_team/semantic_scholar.jinx +0 -386
  101. npcsh-1.1.20.data/data/npcsh/npc_team/vixynt.jinx +0 -122
  102. npcsh-1.1.20.data/data/npcsh/npc_team/wander.jinx +0 -455
  103. npcsh-1.1.20.data/data/npcsh/npc_team/yap.jinx +0 -268
  104. npcsh-1.1.20.dist-info/RECORD +0 -248
  105. npcsh-1.1.20.dist-info/entry_points.txt +0 -25
  106. /npcsh/npc_team/jinxs/lib/{orchestration → core}/convene.jinx +0 -0
  107. /npcsh/npc_team/jinxs/lib/{orchestration → core}/delegate.jinx +0 -0
  108. /npcsh/npc_team/jinxs/{bin → lib/core}/sample.jinx +0 -0
  109. /npcsh/npc_team/jinxs/lib/{core → utils}/chat.jinx +0 -0
  110. /npcsh/npc_team/jinxs/lib/{core → utils}/cmd.jinx +0 -0
  111. /npcsh/npc_team/jinxs/{bin → lib/utils}/sync.jinx +0 -0
  112. /npcsh/npc_team/jinxs/{bin → modes}/roll.jinx +0 -0
  113. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/add_tab.jinx +0 -0
  114. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/alicanto.png +0 -0
  115. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/browser_action.jinx +0 -0
  116. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/browser_screenshot.jinx +0 -0
  117. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/chat.jinx +0 -0
  118. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/click.jinx +0 -0
  119. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/close_browser.jinx +0 -0
  120. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/close_pane.jinx +0 -0
  121. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/close_tab.jinx +0 -0
  122. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/cmd.jinx +0 -0
  123. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/compile.jinx +0 -0
  124. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/confirm.jinx +0 -0
  125. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/convene.jinx +0 -0
  126. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/corca.png +0 -0
  127. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/corca_example.png +0 -0
  128. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/delegate.jinx +0 -0
  129. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/focus_pane.jinx +0 -0
  130. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/frederic4.png +0 -0
  131. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/guac.jinx +0 -0
  132. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/guac.npc +0 -0
  133. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/guac.png +0 -0
  134. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/help.jinx +0 -0
  135. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/incognide.jinx +0 -0
  136. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/init.jinx +0 -0
  137. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  138. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/key_press.jinx +0 -0
  139. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/launch_app.jinx +0 -0
  140. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/list_panes.jinx +0 -0
  141. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/load_file.jinx +0 -0
  142. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/navigate.jinx +0 -0
  143. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/notify.jinx +0 -0
  144. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  145. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  146. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/open_browser.jinx +0 -0
  147. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/open_pane.jinx +0 -0
  148. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/ots.jinx +0 -0
  149. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/paste.jinx +0 -0
  150. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/plonk.npc +0 -0
  151. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/plonk.png +0 -0
  152. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  153. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/pti.jinx +0 -0
  154. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/python.jinx +0 -0
  155. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/read_pane.jinx +0 -0
  156. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/roll.jinx +0 -0
  157. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/run_terminal.jinx +0 -0
  158. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/sample.jinx +0 -0
  159. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/screenshot.jinx +0 -0
  160. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/send_message.jinx +0 -0
  161. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/serve.jinx +0 -0
  162. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/set.jinx +0 -0
  163. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/sh.jinx +0 -0
  164. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/shh.jinx +0 -0
  165. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/sibiji.png +0 -0
  166. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/sleep.jinx +0 -0
  167. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/split_pane.jinx +0 -0
  168. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/spool.png +0 -0
  169. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/sql.jinx +0 -0
  170. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/switch.jinx +0 -0
  171. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/switch_npc.jinx +0 -0
  172. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/switch_tab.jinx +0 -0
  173. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/switches.jinx +0 -0
  174. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/sync.jinx +0 -0
  175. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/teamviz.jinx +0 -0
  176. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/trigger.jinx +0 -0
  177. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/type_text.jinx +0 -0
  178. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/usage.jinx +0 -0
  179. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/verbose.jinx +0 -0
  180. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/wait.jinx +0 -0
  181. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/write_file.jinx +0 -0
  182. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/yap.png +0 -0
  183. {npcsh-1.1.20.data → npcsh-1.1.22.data}/data/npcsh/npc_team/zen_mode.jinx +0 -0
  184. {npcsh-1.1.20.dist-info → npcsh-1.1.22.dist-info}/WHEEL +0 -0
  185. {npcsh-1.1.20.dist-info → npcsh-1.1.22.dist-info}/licenses/LICENSE +0 -0
  186. {npcsh-1.1.20.dist-info → npcsh-1.1.22.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,6 @@
1
1
  jinx_name: yap
2
- description: Voice chat mode - speech-to-text input, text-to-speech output
2
+ description: Voice chat TUI - speech-to-text input, text-to-speech output
3
+ interactive: true
3
4
  inputs:
4
5
  - model: null
5
6
  - provider: null
@@ -8,18 +9,14 @@ inputs:
8
9
  - files: null
9
10
 
10
11
  steps:
11
- - name: yap_repl
12
+ - name: yap_tui
12
13
  engine: python
13
14
  code: |
14
- import os
15
- import sys
16
- import time
17
- import tempfile
18
- import threading
19
- import queue
15
+ import os, sys, tty, termios, time, tempfile, threading, queue
16
+ import select as _sel
20
17
  from termcolor import colored
21
18
 
22
- # Audio imports with graceful fallback
19
+ # Audio imports
23
20
  try:
24
21
  import torch
25
22
  import pyaudio
@@ -29,13 +26,11 @@ steps:
29
26
  from gtts import gTTS
30
27
  from npcpy.data.audio import (
31
28
  FORMAT, CHANNELS, RATE, CHUNK,
32
- transcribe_recording, convert_mp3_to_wav, cleanup_temp_files
29
+ transcribe_recording, convert_mp3_to_wav
33
30
  )
34
31
  AUDIO_AVAILABLE = True
35
- except ImportError as e:
32
+ except ImportError:
36
33
  AUDIO_AVAILABLE = False
37
- print(colored(f"Audio dependencies not available: {e}", "yellow"))
38
- print("Install with: pip install npcsh[audio]")
39
34
 
40
35
  from npcpy.llm_funcs import get_llm_response
41
36
  from npcpy.npc_sysenv import get_system_message, render_markdown
@@ -46,10 +41,9 @@ steps:
46
41
  team = context.get('team')
47
42
  messages = context.get('messages', [])
48
43
  files = context.get('files')
49
- tts_model = context.get('tts_model', 'kokoro')
50
- voice = context.get('voice', 'af_heart')
44
+ tts_model_name = context.get('tts_model', 'kokoro')
45
+ voice_name = context.get('voice', 'af_heart')
51
46
 
52
- # Resolve npc if it's a string (npc name) rather than NPC object
53
47
  if isinstance(npc, str) and team:
54
48
  npc = team.get(npc) if hasattr(team, 'get') else None
55
49
  elif isinstance(npc, str):
@@ -57,53 +51,24 @@ steps:
57
51
 
58
52
  model = context.get('model') or (npc.model if npc and hasattr(npc, 'model') else None)
59
53
  provider = context.get('provider') or (npc.provider if npc and hasattr(npc, 'provider') else None)
60
-
61
- print("""
62
- ██╗ ██╗ █████╗ ██████╗
63
- ╚██╗ ██╔╝██╔══██╗██╔══██╗
64
- ╚████╔╝ ███████║██████╔╝
65
- ╚██╔╝ ██╔══██║██╔═══╝
66
- ██║ ██║ ██║██║
67
- ╚═╝ ╚═╝ ╚═╝╚═╝
68
-
69
- Voice Chat Mode
70
- """)
71
-
72
54
  npc_name = npc.name if npc else "yap"
73
- print(f"Entering yap mode (NPC: {npc_name}). Type '/yq' to exit.")
74
-
75
- if not AUDIO_AVAILABLE:
76
- print(colored("Audio not available. Falling back to text mode.", "yellow"))
77
-
78
- # Load files for RAG context
79
- loaded_chunks = {}
80
- if files:
81
- if isinstance(files, str):
82
- files = [f.strip() for f in files.split(',')]
83
- for file_path in files:
84
- file_path = os.path.expanduser(file_path)
85
- if os.path.exists(file_path):
86
- try:
87
- chunks = load_file_contents(file_path)
88
- loaded_chunks[file_path] = chunks
89
- print(colored(f"Loaded: {file_path}", "green"))
90
- except Exception as e:
91
- print(colored(f"Error loading {file_path}: {e}", "red"))
92
55
 
93
- # System message for concise voice responses
94
- sys_msg = get_system_message(npc) if npc else "You are a helpful assistant."
95
- sys_msg += "\n\nProvide brief responses of 1-2 sentences unless asked for more detail. Keep responses clear and conversational for voice."
96
-
97
- if not messages or messages[0].get("role") != "system":
98
- messages.insert(0, {"role": "system", "content": sys_msg})
99
-
100
- # Audio state
56
+ # ================================================================
57
+ # Non-interactive fallback
58
+ # ================================================================
59
+ if not sys.stdin.isatty():
60
+ context['output'] = "Yap requires an interactive terminal."
61
+ context['messages'] = messages
62
+ exit()
63
+
64
+ # ================================================================
65
+ # Audio models
66
+ # ================================================================
101
67
  vad_model = None
102
68
  whisper_model = None
103
69
 
104
70
  if AUDIO_AVAILABLE:
105
71
  try:
106
- # Load VAD model for voice activity detection
107
72
  vad_model, _ = torch.hub.load(
108
73
  repo_or_dir="snakers4/silero-vad",
109
74
  model="silero_vad",
@@ -112,157 +77,640 @@ steps:
112
77
  verbose=False
113
78
  )
114
79
  vad_model.to('cpu')
115
- print(colored("VAD model loaded.", "green"))
116
-
117
- # Load Whisper for STT
80
+ except Exception:
81
+ pass
82
+ try:
118
83
  whisper_model = WhisperModel("base", device="cpu", compute_type="int8")
119
- print(colored("Whisper model loaded.", "green"))
120
- except Exception as e:
121
- print(colored(f"Error loading audio models: {e}", "red"))
84
+ except Exception:
122
85
  AUDIO_AVAILABLE = False
123
86
 
124
- def speak_text(text, tts_model='kokoro', voice='af_heart'):
125
- """Convert text to speech and play it"""
126
- if not AUDIO_AVAILABLE:
127
- return
87
+ # ================================================================
88
+ # File loading for RAG
89
+ # ================================================================
90
+ loaded_chunks = {}
91
+ if files:
92
+ if isinstance(files, str):
93
+ files = [f.strip() for f in files.split(',')]
94
+ for fp in files:
95
+ fp = os.path.expanduser(fp)
96
+ if os.path.exists(fp):
97
+ try:
98
+ loaded_chunks[fp] = load_file_contents(fp)
99
+ except Exception:
100
+ pass
128
101
 
102
+ # System message
103
+ sys_msg = get_system_message(npc) if npc else "You are a helpful assistant."
104
+ sys_msg += "\n\nProvide brief responses of 1-2 sentences unless asked for more detail. Keep responses clear and conversational for voice."
105
+ if not messages or messages[0].get("role") != "system":
106
+ messages.insert(0, {"role": "system", "content": sys_msg})
107
+
108
+ # ================================================================
109
+ # State
110
+ # ================================================================
111
+ class UI:
112
+ tab = 0 # 0=chat, 1=settings
113
+ TAB_NAMES = ['Chat', 'Settings']
114
+
115
+ # chat
116
+ chat_log = [] # [(role, text)]
117
+ chat_scroll = -1
118
+ input_buf = ""
119
+ thinking = False
120
+ spinner_frame = 0
121
+ recording = False
122
+ rec_seconds = 0.0
123
+ transcribing = False
124
+ speaking = False
125
+
126
+ # VAD listening
127
+ listening = AUDIO_AVAILABLE # auto-listen by default
128
+ listen_stop = False # signal to stop listener thread
129
+
130
+ # settings
131
+ set_sel = 0
132
+ tts_enabled = AUDIO_AVAILABLE
133
+ auto_speak = True
134
+ vad_threshold = 0.4 # speech probability threshold
135
+ silence_timeout = 1.5 # seconds of silence before cut
136
+ min_speech = 0.3 # minimum speech duration to process
137
+ editing = False
138
+ edit_buf = ""
139
+ edit_key = ""
140
+
141
+ ui = UI()
142
+
143
+ # ================================================================
144
+ # Helpers
145
+ # ================================================================
146
+ def sz():
129
147
  try:
130
- # Use gTTS as fallback
131
- tts = gTTS(text=text, lang='en')
132
- with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as f:
133
- tts.save(f.name)
134
- wav_path = convert_mp3_to_wav(f.name)
148
+ s = os.get_terminal_size()
149
+ return s.columns, s.lines
150
+ except:
151
+ return 80, 24
152
+
153
+ TURQ = '\033[38;2;64;224;208m'
154
+ PURPLE = '\033[38;2;180;130;255m'
155
+ ORANGE = '\033[38;2;255;165;0m'
156
+ GREEN = '\033[32m'
157
+ DIM = '\033[90m'
158
+ BOLD = '\033[1m'
159
+ REV = '\033[7m'
160
+ RST = '\033[0m'
161
+ RED = '\033[31m'
162
+ SPINNERS = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']
163
+
164
+ def wrap_text(text, width):
165
+ lines = []
166
+ for line in text.split('\n'):
167
+ while len(line) > width:
168
+ lines.append(line[:width])
169
+ line = line[width:]
170
+ lines.append(line)
171
+ return lines
172
+
173
+ # ================================================================
174
+ # Audio functions
175
+ # ================================================================
176
+ def transcribe_audio(audio_path):
177
+ if not whisper_model or not audio_path:
178
+ return ""
179
+ try:
180
+ segments, _ = whisper_model.transcribe(audio_path, beam_size=5)
181
+ text = " ".join([seg.text for seg in segments]).strip()
182
+ try: os.remove(audio_path)
183
+ except: pass
184
+ return text
185
+ except Exception as e:
186
+ ui.chat_log.append(('error', f'Transcribe error: {e}'))
187
+ return ""
135
188
 
136
- # Play audio
189
+ def speak_text(text):
190
+ if not AUDIO_AVAILABLE or not ui.tts_enabled:
191
+ return
192
+ try:
193
+ ui.speaking = True
194
+ tts = gTTS(text=text, lang='en')
195
+ mp3_f = tempfile.NamedTemporaryFile(suffix='.mp3', delete=False)
196
+ mp3_path = mp3_f.name
197
+ mp3_f.close()
198
+ tts.save(mp3_path)
199
+ wav_path = mp3_path.replace('.mp3', '.wav')
200
+ convert_mp3_to_wav(mp3_path, wav_path)
137
201
  import subprocess
138
202
  if sys.platform == 'darwin':
139
- subprocess.run(['afplay', wav_path], check=True)
203
+ subprocess.run(['afplay', wav_path], check=True, timeout=30)
140
204
  elif sys.platform == 'linux':
141
- subprocess.run(['aplay', wav_path], check=True)
142
- else:
143
- # Windows
144
- import winsound
145
- winsound.PlaySound(wav_path, winsound.SND_FILENAME)
146
-
147
- cleanup_temp_files([f.name, wav_path])
205
+ subprocess.run(['aplay', wav_path], check=True, timeout=30,
206
+ stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
207
+ for _p in [mp3_path, wav_path]:
208
+ try: os.remove(_p)
209
+ except: pass
148
210
  except Exception as e:
149
- print(colored(f"TTS error: {e}", "red"))
150
-
151
- def record_audio(duration=5):
152
- """Record audio from microphone"""
153
- if not AUDIO_AVAILABLE:
154
- return None
155
-
211
+ ui.chat_log.append(('error', f'TTS error: {e}'))
212
+ finally:
213
+ ui.speaking = False
214
+
215
+ def save_frames_to_wav(frames, sample_width):
216
+ f = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)
217
+ path = f.name
218
+ f.close()
219
+ wf = wave.open(path, 'wb')
220
+ wf.setnchannels(CHANNELS)
221
+ wf.setsampwidth(sample_width)
222
+ wf.setframerate(RATE)
223
+ wf.writeframes(b''.join(frames))
224
+ wf.close()
225
+ return path
226
+
227
+ # ================================================================
228
+ # VAD continuous listener
229
+ # ================================================================
230
+ def vad_listener_loop():
231
+ """Background thread: continuously monitors mic, detects speech via
232
+ VAD, records until silence, then transcribes and sends."""
156
233
  try:
157
234
  p = pyaudio.PyAudio()
158
- stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)
235
+ sw = p.get_sample_size(FORMAT)
236
+ stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE,
237
+ input=True, frames_per_buffer=CHUNK)
238
+ except Exception as e:
239
+ ui.chat_log.append(('error', f'Mic open failed: {e}'))
240
+ ui.listening = False
241
+ return
159
242
 
160
- print(colored("Recording...", "cyan"), end='', flush=True)
161
- frames = []
162
- for _ in range(0, int(RATE / CHUNK * duration)):
163
- data = stream.read(CHUNK)
164
- frames.append(data)
165
- print(colored(" Done.", "cyan"))
243
+ chunk_dur = CHUNK / RATE # duration of one chunk in seconds
166
244
 
167
- stream.stop_stream()
168
- stream.close()
169
- p.terminate()
245
+ while not ui.listen_stop:
246
+ # Skip if busy
247
+ if ui.thinking or ui.speaking or ui.transcribing:
248
+ time.sleep(0.1)
249
+ continue
250
+ if not ui.listening:
251
+ time.sleep(0.1)
252
+ continue
170
253
 
171
- # Save to temp file
172
- with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as f:
173
- wf = wave.open(f.name, 'wb')
174
- wf.setnchannels(CHANNELS)
175
- wf.setsampwidth(p.get_sample_size(FORMAT))
176
- wf.setframerate(RATE)
177
- wf.writeframes(b''.join(frames))
178
- wf.close()
179
- return f.name
180
- except Exception as e:
181
- print(colored(f"Recording error: {e}", "red"))
182
- return None
254
+ # Read a chunk and run VAD
255
+ try:
256
+ data = stream.read(CHUNK, exception_on_overflow=False)
257
+ except Exception:
258
+ time.sleep(0.05)
259
+ continue
183
260
 
184
- def transcribe_audio(audio_path):
185
- """Transcribe audio to text using Whisper"""
186
- if not whisper_model or not audio_path:
187
- return ""
261
+ audio_np = np.frombuffer(data, dtype=np.int16).astype(np.float32) / 32768.0
262
+ if len(audio_np) != CHUNK:
263
+ continue
188
264
 
189
- try:
190
- segments, _ = whisper_model.transcribe(audio_path, beam_size=5)
191
- text = " ".join([seg.text for seg in segments])
192
- cleanup_temp_files([audio_path])
193
- return text.strip()
194
- except Exception as e:
195
- print(colored(f"Transcription error: {e}", "red"))
196
- return ""
265
+ try:
266
+ tensor = torch.from_numpy(audio_np)
267
+ prob = vad_model(tensor, RATE).item()
268
+ except Exception:
269
+ continue
197
270
 
198
- # REPL loop
199
- while True:
200
- try:
201
- # Voice input or text input
202
- if AUDIO_AVAILABLE:
203
- prompt_str = f"{npc_name}:yap> [Press Enter to speak, or type] "
204
- else:
205
- prompt_str = f"{npc_name}:yap> "
206
-
207
- user_input = input(prompt_str).strip()
208
-
209
- if user_input.lower() == "/yq":
210
- print("Exiting yap mode.")
211
- break
212
-
213
- # Empty input = record audio
214
- if not user_input and AUDIO_AVAILABLE:
215
- audio_path = record_audio(5)
216
- if audio_path:
217
- user_input = transcribe_audio(audio_path)
218
- if user_input:
219
- print(colored(f"You said: {user_input}", "cyan"))
220
- else:
221
- print(colored("Could not transcribe audio.", "yellow"))
222
- continue
271
+ if prob < ui.vad_threshold:
272
+ continue
273
+
274
+ # Speech detected start collecting frames
275
+ ui.recording = True
276
+ ui.rec_seconds = 0.0
277
+ ui.chat_scroll = -1
278
+ speech_frames = [data]
279
+ speech_dur = chunk_dur
280
+ silence_dur = 0.0
281
+
282
+ while not ui.listen_stop:
283
+ try:
284
+ data = stream.read(CHUNK, exception_on_overflow=False)
285
+ except Exception:
286
+ break
287
+
288
+ speech_frames.append(data)
289
+ speech_dur += chunk_dur
290
+ ui.rec_seconds = speech_dur
291
+
292
+ audio_np = np.frombuffer(data, dtype=np.int16).astype(np.float32) / 32768.0
293
+ try:
294
+ tensor = torch.from_numpy(audio_np)
295
+ prob = vad_model(tensor, RATE).item()
296
+ except Exception:
297
+ prob = 0.0
298
+
299
+ if prob < ui.vad_threshold:
300
+ silence_dur += chunk_dur
223
301
  else:
224
- continue
302
+ silence_dur = 0.0
225
303
 
226
- if not user_input:
227
- continue
304
+ if silence_dur >= ui.silence_timeout:
305
+ break
228
306
 
229
- # Add RAG context if files loaded
230
- current_prompt = user_input
231
- if loaded_chunks:
232
- context_content = ""
233
- for filename, chunks in loaded_chunks.items():
234
- full_text = "\n".join(chunks)
235
- retrieved = rag_search(user_input, full_text, similarity_threshold=0.3)
236
- if retrieved:
237
- context_content += f"\n{retrieved}\n"
238
- if context_content:
239
- current_prompt += f"\n\nContext:{context_content}"
240
-
241
- # Get response
242
- resp = get_llm_response(
243
- current_prompt,
244
- model=model,
245
- provider=provider,
246
- messages=messages,
247
- stream=False, # Don't stream for voice
248
- npc=npc
249
- )
307
+ # Safety: max 60 seconds
308
+ if speech_dur > 60.0:
309
+ break
310
+
311
+ ui.recording = False
312
+
313
+ # Only process if enough speech
314
+ if speech_dur - silence_dur < ui.min_speech:
315
+ continue
250
316
 
251
- messages = resp.get('messages', messages)
252
- response_text = str(resp.get('response', ''))
317
+ # Transcribe
318
+ ui.transcribing = True
319
+ audio_path = save_frames_to_wav(speech_frames, sw)
320
+ text = transcribe_audio(audio_path)
321
+ ui.transcribing = False
253
322
 
254
- # Display and speak response
255
- print(colored(f"{npc_name}: ", "green") + response_text)
323
+ if text and text.strip():
324
+ ui.chat_log.append(('info', f'Heard: "{text}"'))
325
+ ui.chat_scroll = -1
326
+ send_message(text)
256
327
 
328
+ # Cleanup
329
+ try:
330
+ stream.stop_stream()
331
+ stream.close()
332
+ p.terminate()
333
+ except Exception:
334
+ pass
335
+
336
+ # ================================================================
337
+ # Chat send
338
+ # ================================================================
339
+ def send_message(text):
340
+ ui.chat_log.append(('user', text))
341
+ ui.thinking = True
342
+ ui.chat_scroll = -1
343
+
344
+ def worker():
345
+ try:
346
+ current_prompt = text
347
+ if loaded_chunks:
348
+ ctx_content = ""
349
+ for fn, chunks in loaded_chunks.items():
350
+ full = "\n".join(chunks)
351
+ ret = rag_search(text, full, similarity_threshold=0.3)
352
+ if ret:
353
+ ctx_content += f"\n{ret}\n"
354
+ if ctx_content:
355
+ current_prompt += f"\n\nContext:{ctx_content}"
356
+
357
+ resp = get_llm_response(
358
+ current_prompt, model=model, provider=provider,
359
+ messages=messages, stream=False, npc=npc
360
+ )
361
+ messages[:] = resp.get('messages', messages)
362
+ response_text = str(resp.get('response', ''))
363
+ if response_text:
364
+ ui.chat_log.append(('assistant', response_text))
365
+ if ui.auto_speak and ui.tts_enabled:
366
+ speak_text(response_text)
367
+ except Exception as e:
368
+ ui.chat_log.append(('error', str(e)))
369
+ ui.thinking = False
370
+
371
+ threading.Thread(target=worker, daemon=True).start()
372
+
373
+ # ================================================================
374
+ # Rendering
375
+ # ================================================================
376
+ def render():
377
+ w, h = sz()
378
+ buf = ['\033[H']
379
+
380
+ # Tab bar
381
+ tabs = ''
382
+ for i, name in enumerate(ui.TAB_NAMES):
383
+ if i == ui.tab:
384
+ tabs += f' {REV}{BOLD} {name} {RST} '
385
+ else:
386
+ tabs += f' {DIM} {name} {RST} '
387
+
388
+ mic = ''
389
+ if ui.recording:
390
+ mic = f'{RED}● REC {ui.rec_seconds:.1f}s{RST}'
391
+ elif ui.transcribing:
392
+ mic = f'{ORANGE}● transcribing...{RST}'
393
+ elif ui.speaking:
394
+ mic = f'{GREEN}● speaking...{RST}'
395
+ elif ui.thinking:
396
+ sp = SPINNERS[ui.spinner_frame % len(SPINNERS)]
397
+ mic = f'{ORANGE}{sp} thinking...{RST}'
398
+ elif ui.listening:
399
+ mic = f'{TURQ}● listening{RST}'
400
+
401
+ audio_st = '🎤' if ui.listening else ('🔇' if not AUDIO_AVAILABLE else '⏸')
402
+ right = f'{npc_name} | {audio_st} | {model or "?"}@{provider or "?"}'
403
+ pad = w - 12 - len(right) - 20
404
+ header = f'{PURPLE}YAP{RST} {tabs}{" " * max(0, pad)}{mic} {DIM}{right}{RST}'
405
+ buf.append(f'\033[1;1H{REV} {header[:w-2].ljust(w-2)} {RST}')
406
+
407
+ if ui.tab == 0:
408
+ render_chat(buf, w, h)
409
+ elif ui.tab == 1:
410
+ render_settings(buf, w, h)
411
+
412
+ sys.stdout.write(''.join(buf))
413
+ sys.stdout.flush()
414
+
415
+ def render_chat(buf, w, h):
416
+ input_h = 3
417
+ chat_h = h - 2 - input_h
418
+
419
+ all_lines = []
420
+ _asst_pw = len(npc_name) + 2 # "name: "
421
+ _cont_pw = _asst_pw # continuation indent matches
422
+ for role, text in ui.chat_log:
423
+ if role == 'user':
424
+ tw = w - 6
425
+ wrapped = wrap_text(text, tw)
426
+ for i, l in enumerate(wrapped):
427
+ prefix = f'{BOLD}you:{RST} ' if i == 0 else ' '
428
+ all_lines.append(f'{prefix}{l}')
429
+ elif role == 'assistant':
430
+ tw = w - _asst_pw - 1
431
+ wrapped = wrap_text(text, tw)
432
+ pad = ' ' * _asst_pw
433
+ for i, l in enumerate(wrapped):
434
+ prefix = f'{PURPLE}{BOLD}{npc_name}:{RST} ' if i == 0 else pad
435
+ all_lines.append(f'{prefix}{l}')
436
+ elif role == 'info':
437
+ tw = w - 5
438
+ wrapped = wrap_text(text, tw)
439
+ for i, l in enumerate(wrapped):
440
+ prefix = f' {TURQ}ℹ ' if i == 0 else ' '
441
+ all_lines.append(f'{prefix}{l}{RST}' if i == 0 else f' {l}')
442
+ elif role == 'error':
443
+ tw = w - 5
444
+ wrapped = wrap_text(text, tw)
445
+ for i, l in enumerate(wrapped):
446
+ prefix = f' {RED}✗ ' if i == 0 else ' '
447
+ all_lines.append(f'{prefix}{l}{RST}' if i == 0 else f' {l}')
448
+
449
+ if ui.recording:
450
+ secs = ui.rec_seconds
451
+ all_lines.append(f' {RED}🎙 Recording... {secs:.1f}s{RST}')
452
+ elif ui.transcribing:
453
+ sp = SPINNERS[ui.spinner_frame % len(SPINNERS)]
454
+ all_lines.append(f' {ORANGE}{sp} Transcribing...{RST}')
455
+ elif ui.thinking:
456
+ sp = SPINNERS[ui.spinner_frame % len(SPINNERS)]
457
+ all_lines.append(f' {ORANGE}{sp} thinking...{RST}')
458
+ elif ui.speaking:
459
+ all_lines.append(f' {GREEN}🔊 Speaking...{RST}')
460
+
461
+ # Scrolling
462
+ if ui.chat_scroll == -1:
463
+ scroll = max(0, len(all_lines) - chat_h)
464
+ else:
465
+ scroll = ui.chat_scroll
466
+
467
+ for i in range(chat_h):
468
+ y = 2 + i
469
+ li = scroll + i
470
+ buf.append(f'\033[{y};1H\033[K')
471
+ if li < len(all_lines):
472
+ buf.append(all_lines[li])
473
+
474
+ # Input area
475
+ div_y = 2 + chat_h
476
+ buf.append(f'\033[{div_y};1H\033[K{DIM}{"─" * w}{RST}')
477
+ input_y = div_y + 1
478
+ visible = ui.input_buf[-(w-4):] if len(ui.input_buf) > w - 4 else ui.input_buf
479
+ buf.append(f'\033[{input_y};1H\033[K {BOLD}>{RST} {visible}\033[?25h')
480
+
481
+ # Status bar
482
+ if AUDIO_AVAILABLE:
483
+ ltog = 'Ctrl+L:Pause' if ui.listening else 'Ctrl+L:Listen'
484
+ hints = f'Enter:Send {ltog} PgUp/PgDn:Scroll Tab:Settings Ctrl+Q:Quit'
485
+ else:
486
+ hints = 'Enter:Send PgUp/PgDn:Scroll Tab:Settings Ctrl+Q:Quit'
487
+ buf.append(f'\033[{h};1H\033[K{REV} {hints[:w-2].ljust(w-2)} {RST}')
488
+
489
+ def render_settings(buf, w, h):
490
+ settings = [
491
+ ('tts_enabled', 'TTS Enabled', 'On' if ui.tts_enabled else 'Off'),
492
+ ('auto_speak', 'Auto-Speak', 'On' if ui.auto_speak else 'Off'),
493
+ ('listening', 'Auto-Listen', 'On' if ui.listening else 'Off'),
494
+ ('silence_timeout', 'Silence Timeout', f'{ui.silence_timeout}s'),
495
+ ('vad_threshold', 'VAD Sensitivity', f'{ui.vad_threshold:.1f}'),
496
+ ]
497
+
498
+ buf.append(f'\033[3;3H{BOLD}Voice Settings{RST}')
499
+ buf.append(f'\033[4;3H{DIM}{"─" * (w - 6)}{RST}')
500
+
501
+ y = 6
502
+ for i, (key, label, val) in enumerate(settings):
503
+ if ui.editing and ui.edit_key == key:
504
+ buf.append(f'\033[{y};3H{ORANGE}{label}:{RST} {REV} {ui.edit_buf}_ {RST}')
505
+ elif i == ui.set_sel:
506
+ buf.append(f'\033[{y};3H{REV} {label}: {val} {RST}')
507
+ else:
508
+ buf.append(f'\033[{y};3H {BOLD}{label}:{RST} {val}')
509
+ y += 2
510
+
511
+ y += 1
512
+ buf.append(f'\033[{y};3H{DIM}Audio: {"Available" if AUDIO_AVAILABLE else "Not available"}{RST}')
513
+ y += 1
514
+ if loaded_chunks:
515
+ buf.append(f'\033[{y};3H{DIM}Files loaded: {len(loaded_chunks)}{RST}')
516
+ y += 1
517
+ buf.append(f'\033[{y};3H{DIM}Whisper: {"Loaded" if whisper_model else "Not loaded"}{RST}')
518
+
519
+ for cy in range(y + 1, h - 1):
520
+ buf.append(f'\033[{cy};1H\033[K')
521
+
522
+ if ui.editing:
523
+ buf.append(f'\033[{h};1H\033[K{REV} Enter:Save Esc:Cancel {RST}')
524
+ else:
525
+ buf.append(f'\033[{h};1H\033[K{REV} j/k:Navigate Space:Toggle e:Edit Tab:Chat Ctrl+Q:Quit {RST}')
526
+
527
+ # ================================================================
528
+ # Input handling
529
+ # ================================================================
530
+ def handle_key(c, fd):
531
+ if c == '\t':
532
+ if not ui.editing:
533
+ ui.tab = (ui.tab + 1) % 2
534
+ return True
535
+ if c == '\x11': # Ctrl+Q
536
+ return False
537
+ if c == '\x03': # Ctrl+C
538
+ return True
539
+
540
+ # Escape sequences
541
+ if c == '\x1b':
542
+ if _sel.select([fd], [], [], 0.05)[0]:
543
+ c2 = os.read(fd, 1).decode('latin-1')
544
+ if c2 == '[':
545
+ c3 = os.read(fd, 1).decode('latin-1')
546
+ if c3 == 'A': # Up
547
+ if ui.tab == 0: _chat_scroll_up()
548
+ elif ui.tab == 1 and not ui.editing and ui.set_sel > 0: ui.set_sel -= 1
549
+ elif c3 == 'B': # Down
550
+ if ui.tab == 0: _chat_scroll_down()
551
+ elif ui.tab == 1 and not ui.editing and ui.set_sel < 4: ui.set_sel += 1
552
+ elif c3 == '5': # PgUp
553
+ os.read(fd, 1)
554
+ if ui.tab == 0: _chat_page_up()
555
+ elif c3 == '6': # PgDn
556
+ os.read(fd, 1)
557
+ if ui.tab == 0: _chat_page_down()
558
+ elif c2 == 'O':
559
+ c3 = os.read(fd, 1).decode('latin-1')
560
+ if c3 == 'P': ui.tab = 0 # F1
561
+ elif c3 == 'Q': ui.tab = 1 # F2
562
+ else:
563
+ # bare Esc
564
+ if ui.tab == 1 and ui.editing:
565
+ ui.editing = False
566
+ ui.edit_buf = ""
567
+ else:
568
+ if ui.tab == 1 and ui.editing:
569
+ ui.editing = False
570
+ ui.edit_buf = ""
571
+ return True
572
+
573
+ if ui.tab == 0:
574
+ return handle_chat(c, fd)
575
+ elif ui.tab == 1:
576
+ return handle_settings(c, fd)
577
+ return True
578
+
579
+ def _chat_scroll_up():
580
+ _, h = sz()
581
+ chat_h = h - 5
582
+ if ui.chat_scroll == -1:
583
+ ui.chat_scroll = max(0, len(ui.chat_log) * 2 - chat_h - 1)
584
+ ui.chat_scroll = max(0, ui.chat_scroll - 1)
585
+
586
+ def _chat_scroll_down():
587
+ ui.chat_scroll = -1 if ui.chat_scroll == -1 else ui.chat_scroll + 1
588
+
589
+ def _chat_page_up():
590
+ _, h = sz()
591
+ chat_h = h - 5
592
+ if ui.chat_scroll == -1:
593
+ ui.chat_scroll = max(0, len(ui.chat_log) * 2 - chat_h - chat_h)
594
+ else:
595
+ ui.chat_scroll = max(0, ui.chat_scroll - chat_h)
596
+
597
+ def _chat_page_down():
598
+ ui.chat_scroll = -1
599
+
600
+ def handle_chat(c, fd):
601
+ # Ctrl+L = toggle listening
602
+ if c == '\x0c': # Ctrl+L
257
603
  if AUDIO_AVAILABLE:
258
- speak_text(response_text, tts_model, voice)
259
-
260
- except KeyboardInterrupt:
261
- print("\nUse '/yq' to exit or continue.")
262
- continue
263
- except EOFError:
264
- print("\nExiting yap mode.")
265
- break
604
+ ui.listening = not ui.listening
605
+ st = 'on' if ui.listening else 'off'
606
+ ui.chat_log.append(('info', f'Listening {st}.'))
607
+ return True
608
+
609
+ if ui.recording or ui.transcribing:
610
+ return True
611
+
612
+ if ui.thinking:
613
+ return True
614
+
615
+ if c in ('\r', '\n'):
616
+ text = ui.input_buf.strip()
617
+ ui.input_buf = ""
618
+ if text:
619
+ send_message(text)
620
+ return True
621
+
622
+ if c == '\x7f' or c == '\x08':
623
+ ui.input_buf = ui.input_buf[:-1]
624
+ return True
625
+
626
+ if c >= ' ' and c <= '~':
627
+ ui.input_buf += c
628
+ ui.chat_scroll = -1
629
+ return True
630
+
631
+ return True
632
+
633
+ def handle_settings(c, fd):
634
+ SETTINGS_KEYS = ['tts_enabled', 'auto_speak', 'listening', 'silence_timeout', 'vad_threshold']
635
+
636
+ if ui.editing:
637
+ if c in ('\r', '\n'):
638
+ val = ui.edit_buf.strip()
639
+ if ui.edit_key == 'silence_timeout':
640
+ try: ui.silence_timeout = max(0.3, min(10.0, float(val)))
641
+ except: pass
642
+ elif ui.edit_key == 'vad_threshold':
643
+ try: ui.vad_threshold = max(0.1, min(0.9, float(val)))
644
+ except: pass
645
+ ui.editing = False
646
+ ui.edit_buf = ""
647
+ elif c == '\x7f' or c == '\x08':
648
+ ui.edit_buf = ui.edit_buf[:-1]
649
+ elif c >= ' ' and c <= '~':
650
+ ui.edit_buf += c
651
+ return True
652
+
653
+ if c == 'j' and ui.set_sel < len(SETTINGS_KEYS) - 1:
654
+ ui.set_sel += 1
655
+ elif c == 'k' and ui.set_sel > 0:
656
+ ui.set_sel -= 1
657
+ elif c == ' ':
658
+ key = SETTINGS_KEYS[ui.set_sel]
659
+ if key == 'tts_enabled':
660
+ ui.tts_enabled = not ui.tts_enabled
661
+ elif key == 'auto_speak':
662
+ ui.auto_speak = not ui.auto_speak
663
+ elif key == 'listening':
664
+ ui.listening = not ui.listening
665
+ st = 'on' if ui.listening else 'off'
666
+ ui.chat_log.append(('info', f'Listening {st}.'))
667
+ elif c == 'e':
668
+ key = SETTINGS_KEYS[ui.set_sel]
669
+ if key in ('silence_timeout', 'vad_threshold'):
670
+ ui.editing = True
671
+ ui.edit_key = key
672
+ ui.edit_buf = str(ui.silence_timeout if key == 'silence_timeout' else ui.vad_threshold)
673
+ return True
674
+
675
+ # ================================================================
676
+ # Welcome
677
+ # ================================================================
678
+ ui.chat_log.append(('info', f'YAP voice chat. NPC: {npc_name}.'))
679
+ if AUDIO_AVAILABLE:
680
+ ui.chat_log.append(('info', 'Listening for speech. Just start talking, or type text.'))
681
+ ui.chat_log.append(('info', 'Ctrl+L to pause/resume listening.'))
682
+ else:
683
+ ui.chat_log.append(('info', 'Audio not available. Text mode only.'))
684
+ if loaded_chunks:
685
+ ui.chat_log.append(('info', f'{len(loaded_chunks)} files loaded for context.'))
686
+
687
+ # Start VAD listener thread
688
+ _listener_thread = None
689
+ if AUDIO_AVAILABLE and vad_model is not None:
690
+ _listener_thread = threading.Thread(target=vad_listener_loop, daemon=True)
691
+ _listener_thread.start()
692
+
693
+ # ================================================================
694
+ # Main loop
695
+ # ================================================================
696
+ fd = sys.stdin.fileno()
697
+ old_settings = termios.tcgetattr(fd)
698
+ try:
699
+ tty.setcbreak(fd)
700
+ sys.stdout.write('\033[?25l\033[2J')
701
+ running = True
702
+ while running:
703
+ render()
704
+ if ui.thinking or ui.recording or ui.transcribing or ui.speaking or ui.listening:
705
+ ui.spinner_frame += 1
706
+ if _sel.select([fd], [], [], 0.15)[0]:
707
+ c = os.read(fd, 1).decode('latin-1')
708
+ running = handle_key(c, fd)
709
+ finally:
710
+ ui.listen_stop = True
711
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
712
+ sys.stdout.write('\033[?25h\033[2J\033[H')
713
+ sys.stdout.flush()
266
714
 
267
715
  context['output'] = "Exited yap mode."
268
716
  context['messages'] = messages