npcsh 0.1.2__py3-none-any.whl → 1.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (143) hide show
  1. npcsh/_state.py +3508 -0
  2. npcsh/alicanto.py +65 -0
  3. npcsh/build.py +291 -0
  4. npcsh/completion.py +206 -0
  5. npcsh/config.py +163 -0
  6. npcsh/corca.py +50 -0
  7. npcsh/execution.py +185 -0
  8. npcsh/guac.py +46 -0
  9. npcsh/mcp_helpers.py +357 -0
  10. npcsh/mcp_server.py +299 -0
  11. npcsh/npc.py +323 -0
  12. npcsh/npc_team/alicanto.npc +2 -0
  13. npcsh/npc_team/alicanto.png +0 -0
  14. npcsh/npc_team/corca.npc +12 -0
  15. npcsh/npc_team/corca.png +0 -0
  16. npcsh/npc_team/corca_example.png +0 -0
  17. npcsh/npc_team/foreman.npc +7 -0
  18. npcsh/npc_team/frederic.npc +6 -0
  19. npcsh/npc_team/frederic4.png +0 -0
  20. npcsh/npc_team/guac.png +0 -0
  21. npcsh/npc_team/jinxs/code/python.jinx +11 -0
  22. npcsh/npc_team/jinxs/code/sh.jinx +34 -0
  23. npcsh/npc_team/jinxs/code/sql.jinx +16 -0
  24. npcsh/npc_team/jinxs/modes/alicanto.jinx +194 -0
  25. npcsh/npc_team/jinxs/modes/corca.jinx +249 -0
  26. npcsh/npc_team/jinxs/modes/guac.jinx +317 -0
  27. npcsh/npc_team/jinxs/modes/plonk.jinx +214 -0
  28. npcsh/npc_team/jinxs/modes/pti.jinx +170 -0
  29. npcsh/npc_team/jinxs/modes/spool.jinx +161 -0
  30. npcsh/npc_team/jinxs/modes/wander.jinx +186 -0
  31. npcsh/npc_team/jinxs/modes/yap.jinx +262 -0
  32. npcsh/npc_team/jinxs/npc_studio/npc-studio.jinx +77 -0
  33. npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
  34. npcsh/npc_team/jinxs/utils/chat.jinx +44 -0
  35. npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
  36. npcsh/npc_team/jinxs/utils/compress.jinx +140 -0
  37. npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
  38. npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
  39. npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
  40. npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
  41. npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
  42. npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
  43. npcsh/npc_team/jinxs/utils/edit_file.jinx +94 -0
  44. npcsh/npc_team/jinxs/utils/load_file.jinx +35 -0
  45. npcsh/npc_team/jinxs/utils/ots.jinx +61 -0
  46. npcsh/npc_team/jinxs/utils/roll.jinx +68 -0
  47. npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
  48. npcsh/npc_team/jinxs/utils/search.jinx +130 -0
  49. npcsh/npc_team/jinxs/utils/serve.jinx +26 -0
  50. npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
  51. npcsh/npc_team/jinxs/utils/trigger.jinx +61 -0
  52. npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
  53. npcsh/npc_team/jinxs/utils/vixynt.jinx +144 -0
  54. npcsh/npc_team/kadiefa.npc +3 -0
  55. npcsh/npc_team/kadiefa.png +0 -0
  56. npcsh/npc_team/npcsh.ctx +18 -0
  57. npcsh/npc_team/npcsh_sibiji.png +0 -0
  58. npcsh/npc_team/plonk.npc +2 -0
  59. npcsh/npc_team/plonk.png +0 -0
  60. npcsh/npc_team/plonkjr.npc +2 -0
  61. npcsh/npc_team/plonkjr.png +0 -0
  62. npcsh/npc_team/sibiji.npc +3 -0
  63. npcsh/npc_team/sibiji.png +0 -0
  64. npcsh/npc_team/spool.png +0 -0
  65. npcsh/npc_team/yap.png +0 -0
  66. npcsh/npcsh.py +296 -112
  67. npcsh/parsing.py +118 -0
  68. npcsh/plonk.py +54 -0
  69. npcsh/pti.py +54 -0
  70. npcsh/routes.py +139 -0
  71. npcsh/spool.py +48 -0
  72. npcsh/ui.py +199 -0
  73. npcsh/wander.py +62 -0
  74. npcsh/yap.py +50 -0
  75. npcsh-1.1.13.data/data/npcsh/npc_team/agent.jinx +17 -0
  76. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.jinx +194 -0
  77. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.npc +2 -0
  78. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.png +0 -0
  79. npcsh-1.1.13.data/data/npcsh/npc_team/build.jinx +65 -0
  80. npcsh-1.1.13.data/data/npcsh/npc_team/chat.jinx +44 -0
  81. npcsh-1.1.13.data/data/npcsh/npc_team/cmd.jinx +44 -0
  82. npcsh-1.1.13.data/data/npcsh/npc_team/compile.jinx +50 -0
  83. npcsh-1.1.13.data/data/npcsh/npc_team/compress.jinx +140 -0
  84. npcsh-1.1.13.data/data/npcsh/npc_team/corca.jinx +249 -0
  85. npcsh-1.1.13.data/data/npcsh/npc_team/corca.npc +12 -0
  86. npcsh-1.1.13.data/data/npcsh/npc_team/corca.png +0 -0
  87. npcsh-1.1.13.data/data/npcsh/npc_team/corca_example.png +0 -0
  88. npcsh-1.1.13.data/data/npcsh/npc_team/edit_file.jinx +94 -0
  89. npcsh-1.1.13.data/data/npcsh/npc_team/foreman.npc +7 -0
  90. npcsh-1.1.13.data/data/npcsh/npc_team/frederic.npc +6 -0
  91. npcsh-1.1.13.data/data/npcsh/npc_team/frederic4.png +0 -0
  92. npcsh-1.1.13.data/data/npcsh/npc_team/guac.jinx +317 -0
  93. npcsh-1.1.13.data/data/npcsh/npc_team/guac.png +0 -0
  94. npcsh-1.1.13.data/data/npcsh/npc_team/help.jinx +52 -0
  95. npcsh-1.1.13.data/data/npcsh/npc_team/init.jinx +41 -0
  96. npcsh-1.1.13.data/data/npcsh/npc_team/jinxs.jinx +32 -0
  97. npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.npc +3 -0
  98. npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.png +0 -0
  99. npcsh-1.1.13.data/data/npcsh/npc_team/load_file.jinx +35 -0
  100. npcsh-1.1.13.data/data/npcsh/npc_team/npc-studio.jinx +77 -0
  101. npcsh-1.1.13.data/data/npcsh/npc_team/npcsh.ctx +18 -0
  102. npcsh-1.1.13.data/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  103. npcsh-1.1.13.data/data/npcsh/npc_team/ots.jinx +61 -0
  104. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.jinx +214 -0
  105. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.npc +2 -0
  106. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.png +0 -0
  107. npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.npc +2 -0
  108. npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.png +0 -0
  109. npcsh-1.1.13.data/data/npcsh/npc_team/pti.jinx +170 -0
  110. npcsh-1.1.13.data/data/npcsh/npc_team/python.jinx +11 -0
  111. npcsh-1.1.13.data/data/npcsh/npc_team/roll.jinx +68 -0
  112. npcsh-1.1.13.data/data/npcsh/npc_team/sample.jinx +56 -0
  113. npcsh-1.1.13.data/data/npcsh/npc_team/search.jinx +130 -0
  114. npcsh-1.1.13.data/data/npcsh/npc_team/serve.jinx +26 -0
  115. npcsh-1.1.13.data/data/npcsh/npc_team/set.jinx +40 -0
  116. npcsh-1.1.13.data/data/npcsh/npc_team/sh.jinx +34 -0
  117. npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.npc +3 -0
  118. npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.png +0 -0
  119. npcsh-1.1.13.data/data/npcsh/npc_team/sleep.jinx +116 -0
  120. npcsh-1.1.13.data/data/npcsh/npc_team/spool.jinx +161 -0
  121. npcsh-1.1.13.data/data/npcsh/npc_team/spool.png +0 -0
  122. npcsh-1.1.13.data/data/npcsh/npc_team/sql.jinx +16 -0
  123. npcsh-1.1.13.data/data/npcsh/npc_team/trigger.jinx +61 -0
  124. npcsh-1.1.13.data/data/npcsh/npc_team/usage.jinx +33 -0
  125. npcsh-1.1.13.data/data/npcsh/npc_team/vixynt.jinx +144 -0
  126. npcsh-1.1.13.data/data/npcsh/npc_team/wander.jinx +186 -0
  127. npcsh-1.1.13.data/data/npcsh/npc_team/yap.jinx +262 -0
  128. npcsh-1.1.13.data/data/npcsh/npc_team/yap.png +0 -0
  129. npcsh-1.1.13.dist-info/METADATA +522 -0
  130. npcsh-1.1.13.dist-info/RECORD +135 -0
  131. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/WHEEL +1 -1
  132. npcsh-1.1.13.dist-info/entry_points.txt +9 -0
  133. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info/licenses}/LICENSE +1 -1
  134. npcsh/command_history.py +0 -81
  135. npcsh/helpers.py +0 -36
  136. npcsh/llm_funcs.py +0 -295
  137. npcsh/main.py +0 -5
  138. npcsh/modes.py +0 -343
  139. npcsh/npc_compiler.py +0 -124
  140. npcsh-0.1.2.dist-info/METADATA +0 -99
  141. npcsh-0.1.2.dist-info/RECORD +0 -14
  142. npcsh-0.1.2.dist-info/entry_points.txt +0 -2
  143. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,186 @@
1
+ jinx_name: wander
2
+ description: Experimental wandering mode - creative exploration with varied temperatures and random events
3
+ inputs:
4
+ - problem: null
5
+ - environment: null
6
+ - low_temp: 0.5
7
+ - high_temp: 1.9
8
+ - interruption_likelihood: 1.0
9
+ - sample_rate: 0.4
10
+ - n_streams: 5
11
+ - include_events: false
12
+ - num_events: 3
13
+ - model: null
14
+ - provider: null
15
+
16
+ steps:
17
+ - name: wander_explore
18
+ engine: python
19
+ code: |
20
+ import os
21
+ import random
22
+ from termcolor import colored
23
+
24
+ from npcpy.llm_funcs import get_llm_response
25
+
26
+ npc = context.get('npc')
27
+ messages = context.get('messages', [])
28
+
29
+ problem = context.get('problem')
30
+ environment = context.get('environment')
31
+ low_temp = float(context.get('low_temp', 0.5))
32
+ high_temp = float(context.get('high_temp', 1.9))
33
+ interruption_likelihood = float(context.get('interruption_likelihood', 1.0))
34
+ sample_rate = float(context.get('sample_rate', 0.4))
35
+ n_streams = int(context.get('n_streams', 5))
36
+ include_events = context.get('include_events', False)
37
+ num_events = int(context.get('num_events', 3))
38
+
39
+ model = context.get('model') or (npc.model if npc else 'gemini-1.5-pro')
40
+ provider = context.get('provider') or (npc.provider if npc else 'gemini')
41
+
42
+ if not problem:
43
+ context['output'] = """Usage: /wander <problem to explore>
44
+
45
+ Options:
46
+ --environment DESC Metaphorical environment for wandering
47
+ --low-temp F Low temperature (default: 0.5)
48
+ --high-temp F High temperature (default: 1.9)
49
+ --n-streams N Number of exploration streams (default: 5)
50
+ --include-events Add random events during wandering
51
+
52
+ Example: /wander How might we reimagine urban transportation?"""
53
+ context['messages'] = messages
54
+ exit()
55
+
56
+ print(f"""
57
+ ██╗ ██╗ █████╗ ███╗ ██╗██████╗ ███████╗██████╗
58
+ ██║ ██║██╔══██╗████╗ ██║██╔══██╗██╔════╝██╔══██╗
59
+ ██║ █╗ ██║███████║██╔██╗ ██║██║ ██║█████╗ ██████╔╝
60
+ ██║███╗██║██╔══██║██║╚██╗██║██║ ██║██╔══╝ ██╔══██╗
61
+ ╚███╔███╔╝██║ ██║██║ ╚████║██████╔╝███████╗██║ ██║
62
+ ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚══════╝╚═╝ ╚═╝
63
+
64
+ Experimental Wandering Mode
65
+ Problem: {problem}
66
+ Temperature range: {low_temp} - {high_temp}
67
+ Streams: {n_streams}
68
+ """)
69
+
70
+ # Generate environment if not provided
71
+ if not environment:
72
+ env_prompt = f"""Create a rich, metaphorical environment for wandering through while thinking about:
73
+ "{problem}"
74
+
75
+ The environment should:
76
+ 1. Have distinct regions or areas
77
+ 2. Include various elements and features
78
+ 3. Be metaphorically related to the problem
79
+ 4. Be described in 3-5 sentences
80
+
81
+ Provide only the description, no framing."""
82
+
83
+ print(colored("Generating wandering environment...", "cyan"))
84
+ resp = get_llm_response(env_prompt, model=model, provider=provider, temperature=0.7, npc=npc)
85
+ environment = str(resp.get('response', 'A vast conceptual landscape stretches before you.'))
86
+ print(f"\n{environment}\n")
87
+
88
+ # Event types for random encounters
89
+ event_types = ["encounter", "discovery", "obstacle", "insight", "shift", "memory"]
90
+
91
+ all_insights = []
92
+ wandering_log = []
93
+
94
+ for stream_idx in range(n_streams):
95
+ # Alternate between low and high temperature
96
+ if stream_idx % 2 == 0:
97
+ temp = low_temp
98
+ mode = "focused"
99
+ else:
100
+ temp = high_temp
101
+ mode = "creative"
102
+
103
+ print(colored(f"\n--- Stream {stream_idx + 1}/{n_streams} ({mode}, temp={temp}) ---", "cyan"))
104
+
105
+ # Generate random event if enabled
106
+ event_context = ""
107
+ if include_events and random.random() < sample_rate:
108
+ event_type = random.choice(event_types)
109
+ event_prompt = f"""In the environment: {environment}
110
+
111
+ While exploring the problem "{problem}", generate a {event_type} event.
112
+ The event should be metaphorical and relate to the problem.
113
+ Describe it in 2-3 sentences."""
114
+
115
+ event_resp = get_llm_response(event_prompt, model=model, provider=provider, temperature=0.9, npc=npc)
116
+ event = str(event_resp.get('response', ''))
117
+ event_context = f"\n\nEvent ({event_type}): {event}"
118
+ print(colored(f"[{event_type.upper()}] {event[:100]}...", "yellow"))
119
+
120
+ # Main wandering exploration
121
+ wander_prompt = f"""You are wandering through: {environment}
122
+
123
+ Problem being explored: "{problem}"
124
+ {event_context}
125
+
126
+ Previous insights: {all_insights[-3:] if all_insights else 'Starting fresh'}
127
+
128
+ In this {mode} exploration (temperature {temp}):
129
+ 1. Let your mind wander through the conceptual space
130
+ 2. Make unexpected connections
131
+ 3. Notice what emerges from the wandering
132
+ 4. Share any insights, questions, or realizations
133
+
134
+ Think freely and explore."""
135
+
136
+ resp = get_llm_response(wander_prompt, model=model, provider=provider, temperature=temp, npc=npc)
137
+ stream_output = str(resp.get('response', ''))
138
+ print(stream_output)
139
+
140
+ all_insights.append(stream_output)
141
+ wandering_log.append({
142
+ "stream": stream_idx + 1,
143
+ "mode": mode,
144
+ "temperature": temp,
145
+ "event": event_context if include_events else None,
146
+ "insight": stream_output
147
+ })
148
+
149
+ # Random interruption
150
+ if random.random() < interruption_likelihood * 0.2:
151
+ print(colored("\n[Pause for reflection...]", "magenta"))
152
+ reflect_prompt = f"Briefly reflect on what's emerged so far about: {problem}"
153
+ reflect_resp = get_llm_response(reflect_prompt, model=model, provider=provider, temperature=0.4, npc=npc)
154
+ print(colored(str(reflect_resp.get('response', ''))[:200], "magenta"))
155
+
156
+ # Synthesis
157
+ print(colored("\n--- Synthesizing Wanderings ---", "cyan"))
158
+
159
+ synthesis_prompt = f"""After wandering through "{environment}" exploring "{problem}":
160
+
161
+ All insights gathered:
162
+ {chr(10).join(all_insights)}
163
+
164
+ Synthesize what emerged from this wandering:
165
+ 1. Key themes that appeared
166
+ 2. Unexpected connections made
167
+ 3. New questions raised
168
+ 4. Potential directions to explore further"""
169
+
170
+ resp = get_llm_response(synthesis_prompt, model=model, provider=provider, temperature=0.5, npc=npc)
171
+ synthesis = str(resp.get('response', ''))
172
+
173
+ print("\n" + "="*50)
174
+ print(colored("WANDERING SYNTHESIS", "green", attrs=['bold']))
175
+ print("="*50)
176
+ print(synthesis)
177
+
178
+ context['output'] = synthesis
179
+ context['messages'] = messages
180
+ context['wander_result'] = {
181
+ 'problem': problem,
182
+ 'environment': environment,
183
+ 'log': wandering_log,
184
+ 'insights': all_insights,
185
+ 'synthesis': synthesis
186
+ }
@@ -0,0 +1,262 @@
1
+ jinx_name: yap
2
+ description: Voice chat mode - speech-to-text input, text-to-speech output
3
+ inputs:
4
+ - model: null
5
+ - provider: null
6
+ - tts_model: kokoro
7
+ - voice: af_heart
8
+ - files: null
9
+
10
+ steps:
11
+ - name: yap_repl
12
+ engine: python
13
+ code: |
14
+ import os
15
+ import sys
16
+ import time
17
+ import tempfile
18
+ import threading
19
+ import queue
20
+ from termcolor import colored
21
+
22
+ # Audio imports with graceful fallback
23
+ try:
24
+ import torch
25
+ import pyaudio
26
+ import wave
27
+ import numpy as np
28
+ from faster_whisper import WhisperModel
29
+ from gtts import gTTS
30
+ from npcpy.data.audio import (
31
+ FORMAT, CHANNELS, RATE, CHUNK,
32
+ transcribe_recording, convert_mp3_to_wav, cleanup_temp_files
33
+ )
34
+ AUDIO_AVAILABLE = True
35
+ except ImportError as e:
36
+ AUDIO_AVAILABLE = False
37
+ print(colored(f"Audio dependencies not available: {e}", "yellow"))
38
+ print("Install with: pip install npcsh[audio]")
39
+
40
+ from npcpy.llm_funcs import get_llm_response
41
+ from npcpy.npc_sysenv import get_system_message, render_markdown
42
+ from npcpy.data.load import load_file_contents
43
+ from npcpy.data.text import rag_search
44
+
45
+ npc = context.get('npc')
46
+ team = context.get('team')
47
+ messages = context.get('messages', [])
48
+ files = context.get('files')
49
+ tts_model = context.get('tts_model', 'kokoro')
50
+ voice = context.get('voice', 'af_heart')
51
+
52
+ model = context.get('model') or (npc.model if npc else None)
53
+ provider = context.get('provider') or (npc.provider if npc else None)
54
+
55
+ print("""
56
+ ██╗ ██╗ █████╗ ██████╗
57
+ ╚██╗ ██╔╝██╔══██╗██╔══██╗
58
+ ╚████╔╝ ███████║██████╔╝
59
+ ╚██╔╝ ██╔══██║██╔═══╝
60
+ ██║ ██║ ██║██║
61
+ ╚═╝ ╚═╝ ╚═╝╚═╝
62
+
63
+ Voice Chat Mode
64
+ """)
65
+
66
+ npc_name = npc.name if npc else "yap"
67
+ print(f"Entering yap mode (NPC: {npc_name}). Type '/yq' to exit.")
68
+
69
+ if not AUDIO_AVAILABLE:
70
+ print(colored("Audio not available. Falling back to text mode.", "yellow"))
71
+
72
+ # Load files for RAG context
73
+ loaded_chunks = {}
74
+ if files:
75
+ if isinstance(files, str):
76
+ files = [f.strip() for f in files.split(',')]
77
+ for file_path in files:
78
+ file_path = os.path.expanduser(file_path)
79
+ if os.path.exists(file_path):
80
+ try:
81
+ chunks = load_file_contents(file_path)
82
+ loaded_chunks[file_path] = chunks
83
+ print(colored(f"Loaded: {file_path}", "green"))
84
+ except Exception as e:
85
+ print(colored(f"Error loading {file_path}: {e}", "red"))
86
+
87
+ # System message for concise voice responses
88
+ sys_msg = get_system_message(npc) if npc else "You are a helpful assistant."
89
+ sys_msg += "\n\nProvide brief responses of 1-2 sentences unless asked for more detail. Keep responses clear and conversational for voice."
90
+
91
+ if not messages or messages[0].get("role") != "system":
92
+ messages.insert(0, {"role": "system", "content": sys_msg})
93
+
94
+ # Audio state
95
+ vad_model = None
96
+ whisper_model = None
97
+
98
+ if AUDIO_AVAILABLE:
99
+ try:
100
+ # Load VAD model for voice activity detection
101
+ vad_model, _ = torch.hub.load(
102
+ repo_or_dir="snakers4/silero-vad",
103
+ model="silero_vad",
104
+ force_reload=False,
105
+ onnx=False,
106
+ verbose=False
107
+ )
108
+ vad_model.to('cpu')
109
+ print(colored("VAD model loaded.", "green"))
110
+
111
+ # Load Whisper for STT
112
+ whisper_model = WhisperModel("base", device="cpu", compute_type="int8")
113
+ print(colored("Whisper model loaded.", "green"))
114
+ except Exception as e:
115
+ print(colored(f"Error loading audio models: {e}", "red"))
116
+ AUDIO_AVAILABLE = False
117
+
118
+ def speak_text(text, tts_model='kokoro', voice='af_heart'):
119
+ """Convert text to speech and play it"""
120
+ if not AUDIO_AVAILABLE:
121
+ return
122
+
123
+ try:
124
+ # Use gTTS as fallback
125
+ tts = gTTS(text=text, lang='en')
126
+ with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as f:
127
+ tts.save(f.name)
128
+ wav_path = convert_mp3_to_wav(f.name)
129
+
130
+ # Play audio
131
+ import subprocess
132
+ if sys.platform == 'darwin':
133
+ subprocess.run(['afplay', wav_path], check=True)
134
+ elif sys.platform == 'linux':
135
+ subprocess.run(['aplay', wav_path], check=True)
136
+ else:
137
+ # Windows
138
+ import winsound
139
+ winsound.PlaySound(wav_path, winsound.SND_FILENAME)
140
+
141
+ cleanup_temp_files([f.name, wav_path])
142
+ except Exception as e:
143
+ print(colored(f"TTS error: {e}", "red"))
144
+
145
+ def record_audio(duration=5):
146
+ """Record audio from microphone"""
147
+ if not AUDIO_AVAILABLE:
148
+ return None
149
+
150
+ try:
151
+ p = pyaudio.PyAudio()
152
+ stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)
153
+
154
+ print(colored("Recording...", "cyan"), end='', flush=True)
155
+ frames = []
156
+ for _ in range(0, int(RATE / CHUNK * duration)):
157
+ data = stream.read(CHUNK)
158
+ frames.append(data)
159
+ print(colored(" Done.", "cyan"))
160
+
161
+ stream.stop_stream()
162
+ stream.close()
163
+ p.terminate()
164
+
165
+ # Save to temp file
166
+ with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as f:
167
+ wf = wave.open(f.name, 'wb')
168
+ wf.setnchannels(CHANNELS)
169
+ wf.setsampwidth(p.get_sample_size(FORMAT))
170
+ wf.setframerate(RATE)
171
+ wf.writeframes(b''.join(frames))
172
+ wf.close()
173
+ return f.name
174
+ except Exception as e:
175
+ print(colored(f"Recording error: {e}", "red"))
176
+ return None
177
+
178
+ def transcribe_audio(audio_path):
179
+ """Transcribe audio to text using Whisper"""
180
+ if not whisper_model or not audio_path:
181
+ return ""
182
+
183
+ try:
184
+ segments, _ = whisper_model.transcribe(audio_path, beam_size=5)
185
+ text = " ".join([seg.text for seg in segments])
186
+ cleanup_temp_files([audio_path])
187
+ return text.strip()
188
+ except Exception as e:
189
+ print(colored(f"Transcription error: {e}", "red"))
190
+ return ""
191
+
192
+ # REPL loop
193
+ while True:
194
+ try:
195
+ # Voice input or text input
196
+ if AUDIO_AVAILABLE:
197
+ prompt_str = f"{npc_name}:yap> [Press Enter to speak, or type] "
198
+ else:
199
+ prompt_str = f"{npc_name}:yap> "
200
+
201
+ user_input = input(prompt_str).strip()
202
+
203
+ if user_input.lower() == "/yq":
204
+ print("Exiting yap mode.")
205
+ break
206
+
207
+ # Empty input = record audio
208
+ if not user_input and AUDIO_AVAILABLE:
209
+ audio_path = record_audio(5)
210
+ if audio_path:
211
+ user_input = transcribe_audio(audio_path)
212
+ if user_input:
213
+ print(colored(f"You said: {user_input}", "cyan"))
214
+ else:
215
+ print(colored("Could not transcribe audio.", "yellow"))
216
+ continue
217
+ else:
218
+ continue
219
+
220
+ if not user_input:
221
+ continue
222
+
223
+ # Add RAG context if files loaded
224
+ current_prompt = user_input
225
+ if loaded_chunks:
226
+ context_content = ""
227
+ for filename, chunks in loaded_chunks.items():
228
+ full_text = "\n".join(chunks)
229
+ retrieved = rag_search(user_input, full_text, similarity_threshold=0.3)
230
+ if retrieved:
231
+ context_content += f"\n{retrieved}\n"
232
+ if context_content:
233
+ current_prompt += f"\n\nContext:{context_content}"
234
+
235
+ # Get response
236
+ resp = get_llm_response(
237
+ current_prompt,
238
+ model=model,
239
+ provider=provider,
240
+ messages=messages,
241
+ stream=False, # Don't stream for voice
242
+ npc=npc
243
+ )
244
+
245
+ messages = resp.get('messages', messages)
246
+ response_text = str(resp.get('response', ''))
247
+
248
+ # Display and speak response
249
+ print(colored(f"{npc_name}: ", "green") + response_text)
250
+
251
+ if AUDIO_AVAILABLE:
252
+ speak_text(response_text, tts_model, voice)
253
+
254
+ except KeyboardInterrupt:
255
+ print("\nUse '/yq' to exit or continue.")
256
+ continue
257
+ except EOFError:
258
+ print("\nExiting yap mode.")
259
+ break
260
+
261
+ context['output'] = "Exited yap mode."
262
+ context['messages'] = messages
@@ -0,0 +1,77 @@
1
+ jinx_name: "npc-studio"
2
+ description: "Start npc studio"
3
+ inputs:
4
+ - user_command: ""
5
+ steps:
6
+ - name: "launch_npc_studio"
7
+ engine: "python"
8
+ code: |
9
+ import os
10
+ import subprocess
11
+ import sys
12
+ from pathlib import Path
13
+ import traceback
14
+
15
+ NPC_STUDIO_DIR = Path.home() / ".npcsh" / "npc-studio"
16
+
17
+ user_command = context.get('user_command')
18
+ output_messages = context.get('messages', [])
19
+ output_result = ""
20
+
21
+ try:
22
+ if not NPC_STUDIO_DIR.exists():
23
+ os.makedirs(NPC_STUDIO_DIR.parent, exist_ok=True)
24
+ subprocess.check_call([
25
+ "git", "clone",
26
+ "https://github.com/npc-worldwide/npc-studio.git",
27
+ str(NPC_STUDIO_DIR)
28
+ ])
29
+ else:
30
+ subprocess.check_call(
31
+ ["git", "pull"],
32
+ cwd=NPC_STUDIO_DIR
33
+ )
34
+
35
+ subprocess.check_call(
36
+ ["npm", "install"],
37
+ cwd=NPC_STUDIO_DIR
38
+ )
39
+
40
+ req_file = NPC_STUDIO_DIR / "requirements.txt"
41
+ if req_file.exists():
42
+ subprocess.check_call([
43
+ sys.executable,
44
+ "-m",
45
+ "pip",
46
+ "install",
47
+ "-r",
48
+ str(req_file)
49
+ ])
50
+
51
+ backend = subprocess.Popen(
52
+ [sys.executable, "npc_studio_serve.py"],
53
+ cwd=NPC_STUDIO_DIR
54
+ )
55
+
56
+ dev_server = subprocess.Popen(
57
+ ["npm", "run", "dev"],
58
+ cwd=NPC_STUDIO_DIR
59
+ )
60
+
61
+ frontend = subprocess.Popen(
62
+ ["npm", "start"],
63
+ cwd=NPC_STUDIO_DIR
64
+ )
65
+
66
+ output_result = (
67
+ f"NPC Studio started!\n"
68
+ f"Backend PID={backend.pid}, "
69
+ f"Dev Server PID={dev_server.pid}, "
70
+ f"Frontend PID={frontend.pid}"
71
+ )
72
+ except Exception as e:
73
+ traceback.print_exc()
74
+ output_result = f"Failed to start NPC Studio: {e}"
75
+
76
+ context['output'] = output_result
77
+ context['messages'] = output_messages
@@ -0,0 +1,17 @@
1
+ jinx_name: agent
2
+ description: Provides an LLM response with tool use enabled.
3
+ inputs:
4
+ - query
5
+ - auto_process_tool_calls: True
6
+ - use_core_tools: True
7
+ steps:
8
+ - name: get_agent_response
9
+ engine: python
10
+ code: |
11
+ response = npc.get_llm_response(
12
+ request=query,
13
+ messages=context.get('messages', []),
14
+ auto_process_tool_calls={{ auto_process_tool_calls | default(True) }},
15
+ use_core_tools={{ use_core_tools | default(True) }}
16
+ )
17
+ output = response.get('response', '')
@@ -0,0 +1,44 @@
1
+ jinx_name: chat
2
+ description: Simple chat mode - LLM conversation without tool execution
3
+ inputs:
4
+ - query: null
5
+ - model: null
6
+ - provider: null
7
+ - stream: true
8
+
9
+ steps:
10
+ - name: chat_response
11
+ engine: python
12
+ code: |
13
+ from npcpy.llm_funcs import get_llm_response
14
+
15
+ npc = context.get('npc')
16
+ messages = context.get('messages', [])
17
+ query = context.get('query', '')
18
+ stream = context.get('stream', True)
19
+
20
+ model = context.get('model') or (npc.model if npc else None)
21
+ provider = context.get('provider') or (npc.provider if npc else None)
22
+
23
+ if not query:
24
+ context['output'] = ''
25
+ context['messages'] = messages
26
+ else:
27
+ response = get_llm_response(
28
+ query,
29
+ model=model,
30
+ provider=provider,
31
+ npc=npc,
32
+ stream=stream,
33
+ messages=messages
34
+ )
35
+
36
+ context['output'] = response.get('response', '')
37
+ context['messages'] = response.get('messages', messages)
38
+
39
+ # Track usage
40
+ if 'usage' in response and npc and hasattr(npc, 'shared_context'):
41
+ usage = response['usage']
42
+ npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
43
+ npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
44
+ npc.shared_context['turn_count'] += 1
@@ -0,0 +1,44 @@
1
+ jinx_name: cmd
2
+ description: Command mode - LLM generates and executes shell commands
3
+ inputs:
4
+ - query: null
5
+ - model: null
6
+ - provider: null
7
+ - stream: true
8
+
9
+ steps:
10
+ - name: cmd_execute
11
+ engine: python
12
+ code: |
13
+ from npcpy.llm_funcs import execute_llm_command
14
+
15
+ npc = context.get('npc')
16
+ messages = context.get('messages', [])
17
+ query = context.get('query', '')
18
+ stream = context.get('stream', True)
19
+
20
+ model = context.get('model') or (npc.model if npc else None)
21
+ provider = context.get('provider') or (npc.provider if npc else None)
22
+
23
+ if not query:
24
+ context['output'] = ''
25
+ context['messages'] = messages
26
+ else:
27
+ response = execute_llm_command(
28
+ query,
29
+ model=model,
30
+ provider=provider,
31
+ npc=npc,
32
+ stream=stream,
33
+ messages=messages
34
+ )
35
+
36
+ context['output'] = response.get('response', '')
37
+ context['messages'] = response.get('messages', messages)
38
+
39
+ # Track usage
40
+ if 'usage' in response and npc and hasattr(npc, 'shared_context'):
41
+ usage = response['usage']
42
+ npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
43
+ npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
44
+ npc.shared_context['turn_count'] += 1