rampup 0.1.4 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.js +440 -109
  2. package/package.json +4 -2
package/index.js CHANGED
@@ -704,6 +704,7 @@ program
704
704
  .command('voice')
705
705
  .description('Voice-based codebase learning (talk to your code)')
706
706
  .option('-p, --path <path>', 'Project path', '.')
707
+ .option('-t, --text', 'Use text input instead of microphone')
707
708
  .action(async (options) => {
708
709
  console.log(chalk.bold.blue('\nšŸŽ™ļø Voice Mode\n'));
709
710
  console.log(chalk.gray('Talk to your codebase. Say "exit" or press Ctrl+C to quit.\n'));
@@ -738,7 +739,7 @@ program
738
739
  // Get fresh token after potential login
739
740
  const authToken = await getIdToken();
740
741
 
741
- const API_URL = process.env.RAMP_API_URL || 'https://entitlement-service.rian-19c.workers.dev';
742
+ const RAMP_API_URL = process.env.RAMP_API_URL || 'https://ramp-api-946191982468.us-central1.run.app';
742
743
 
743
744
  // Track usage
744
745
  const usageFile = path.join(process.env.HOME, '.ramp', 'voice-usage.json');
@@ -749,7 +750,6 @@ program
749
750
  } catch {}
750
751
 
751
752
  const sessionStart = Date.now();
752
- let sessionMinutes = 0;
753
753
 
754
754
  // Gather codebase context once
755
755
  const spinner = ora('Reading codebase...').start();
@@ -781,85 +781,415 @@ program
781
781
  await getStructure(projectPath);
782
782
  context += `\nStructure:\n${structure.slice(0, 2000)}\n`;
783
783
 
784
- spinner.succeed('Ready! Listening...\n');
784
+ spinner.succeed('Codebase loaded');
785
785
  } catch (error) {
786
786
  spinner.fail(`Error: ${error.message}`);
787
787
  process.exit(1);
788
788
  }
789
789
 
790
- const conversationHistory = [];
790
+ // Check for text-only mode
791
+ if (options.text) {
792
+ await runTextVoiceMode(authToken, context, projectPath, usage, usageFile, sessionStart, RAMP_API_URL);
793
+ return;
794
+ }
791
795
 
792
- // Helper function to call backend chat API
793
- async function chatWithBackend(messages, systemPrompt) {
794
- const response = await fetch(`${API_URL}/ai/chat`, {
795
- method: 'POST',
796
- headers: {
797
- 'Authorization': `Bearer ${authToken}`,
798
- 'Content-Type': 'application/json',
796
+ // Try to use realtime voice with microphone
797
+ try {
798
+ await runRealtimeVoiceMode(authToken, context, projectPath, usage, usageFile, sessionStart, RAMP_API_URL);
799
+ } catch (micError) {
800
+ console.log(chalk.yellow(`\nāš ļø Microphone not available: ${micError.message}`));
801
+ console.log(chalk.dim('Falling back to text input mode...\n'));
802
+ await runTextVoiceMode(authToken, context, projectPath, usage, usageFile, sessionStart, RAMP_API_URL);
803
+ }
804
+ });
805
+
806
+ // Realtime voice mode using OpenAI Realtime API
807
+ async function runRealtimeVoiceMode(authToken, context, projectPath, usage, usageFile, sessionStart, RAMP_API_URL) {
808
+ const WebSocket = (await import('ws')).default;
809
+ let mic;
810
+ try {
811
+ mic = (await import('mic')).default;
812
+ } catch (e) {
813
+ throw new Error('mic package not available - run: npm install -g rampup');
814
+ }
815
+
816
+ console.log(chalk.cyan('Connecting to voice service...\n'));
817
+
818
+ // Get ephemeral token from our API
819
+ const sessionResponse = await fetch(`${RAMP_API_URL}/api/ramp/realtime/session`, {
820
+ method: 'POST',
821
+ headers: {
822
+ 'Authorization': `Bearer ${authToken}`,
823
+ 'Content-Type': 'application/json',
824
+ },
825
+ body: JSON.stringify({
826
+ model: 'gpt-4o-realtime-preview-2024-12-17',
827
+ voice: 'verse',
828
+ }),
829
+ });
830
+
831
+ if (!sessionResponse.ok) {
832
+ const error = await sessionResponse.json().catch(() => ({}));
833
+ throw new Error(error.message || error.error || 'Failed to create voice session');
834
+ }
835
+
836
+ const session = await sessionResponse.json();
837
+ const { clientSecret, sessionId } = session;
838
+
839
+ // Connect to OpenAI Realtime API
840
+ const ws = new WebSocket('wss://api.openai.com/v1/realtime?model=gpt-4o-realtime-preview-2024-12-17', {
841
+ headers: {
842
+ 'Authorization': `Bearer ${clientSecret}`,
843
+ 'OpenAI-Beta': 'realtime=v1',
844
+ },
845
+ });
846
+
847
+ let isConnected = false;
848
+ let micInstance = null;
849
+ let micInputStream = null;
850
+ let audioChunks = [];
851
+ let isListening = false;
852
+ let sessionDurationSeconds = 0;
853
+ const sessionTimer = setInterval(() => sessionDurationSeconds++, 1000);
854
+
855
+ // Handle WebSocket events
856
+ ws.on('open', () => {
857
+ isConnected = true;
858
+ console.log(chalk.green('āœ“ Connected to voice service'));
859
+
860
+ // Configure the session with codebase context
861
+ ws.send(JSON.stringify({
862
+ type: 'session.update',
863
+ session: {
864
+ modalities: ['text', 'audio'],
865
+ instructions: `You are Ramp, a helpful voice assistant for developers exploring codebases.
866
+ Keep responses concise (1-3 sentences) since they'll be spoken aloud.
867
+
868
+ Project context:
869
+ ${context}
870
+
871
+ Be friendly, practical, and reference specific files when relevant. If asked about code structure, explain it clearly.`,
872
+ voice: 'verse',
873
+ input_audio_format: 'pcm16',
874
+ output_audio_format: 'pcm16',
875
+ input_audio_transcription: { model: 'whisper-1' },
876
+ turn_detection: {
877
+ type: 'server_vad',
878
+ threshold: 0.5,
879
+ prefix_padding_ms: 300,
880
+ silence_duration_ms: 500,
799
881
  },
800
- body: JSON.stringify({
801
- product: 'ramp',
802
- messages,
803
- system: systemPrompt,
804
- max_tokens: 500,
805
- }),
882
+ },
883
+ }));
884
+
885
+ // Start microphone
886
+ startMicrophone();
887
+ });
888
+
889
+ ws.on('message', async (data) => {
890
+ try {
891
+ const event = JSON.parse(data.toString());
892
+
893
+ switch (event.type) {
894
+ case 'session.created':
895
+ case 'session.updated':
896
+ console.log(chalk.green('āœ“ Session ready - speak now!\n'));
897
+ break;
898
+
899
+ case 'input_audio_buffer.speech_started':
900
+ process.stdout.write(chalk.dim('šŸŽ¤ Listening... '));
901
+ break;
902
+
903
+ case 'input_audio_buffer.speech_stopped':
904
+ console.log(chalk.dim('processing...'));
905
+ break;
906
+
907
+ case 'conversation.item.input_audio_transcription.completed':
908
+ if (event.transcript) {
909
+ console.log(chalk.green(`\nYou: ${event.transcript}`));
910
+ if (event.transcript.toLowerCase().includes('exit') ||
911
+ event.transcript.toLowerCase().includes('quit') ||
912
+ event.transcript.toLowerCase().includes('goodbye')) {
913
+ cleanup();
914
+ }
915
+ }
916
+ break;
917
+
918
+ case 'response.audio.delta':
919
+ // Collect audio chunks
920
+ if (event.delta) {
921
+ audioChunks.push(Buffer.from(event.delta, 'base64'));
922
+ }
923
+ break;
924
+
925
+ case 'response.audio_transcript.delta':
926
+ // Stream transcript to console
927
+ if (event.delta) {
928
+ process.stdout.write(chalk.cyan(event.delta));
929
+ }
930
+ break;
931
+
932
+ case 'response.audio_transcript.done':
933
+ console.log('\n');
934
+ break;
935
+
936
+ case 'response.audio.done':
937
+ // Play collected audio
938
+ if (audioChunks.length > 0) {
939
+ await playAudioChunks(audioChunks);
940
+ audioChunks = [];
941
+ }
942
+ break;
943
+
944
+ case 'response.done':
945
+ // Response complete, ready for next input
946
+ break;
947
+
948
+ case 'error':
949
+ console.error(chalk.red(`\nError: ${event.error?.message || 'Unknown error'}`));
950
+ break;
951
+ }
952
+ } catch (e) {
953
+ // Ignore parse errors
954
+ }
955
+ });
956
+
957
+ ws.on('error', (error) => {
958
+ console.error(chalk.red(`\nConnection error: ${error.message}`));
959
+ cleanup();
960
+ });
961
+
962
+ ws.on('close', () => {
963
+ if (isConnected) {
964
+ console.log(chalk.dim('\nConnection closed'));
965
+ cleanup();
966
+ }
967
+ });
968
+
969
+ function startMicrophone() {
970
+ try {
971
+ micInstance = mic({
972
+ rate: '24000',
973
+ channels: '1',
974
+ bitwidth: '16',
975
+ encoding: 'signed-integer',
976
+ endian: 'little',
977
+ device: 'default',
978
+ debug: false,
979
+ });
980
+
981
+ micInputStream = micInstance.getAudioStream();
982
+
983
+ micInputStream.on('data', (chunk) => {
984
+ if (isConnected && ws.readyState === WebSocket.OPEN) {
985
+ // Send audio to OpenAI
986
+ ws.send(JSON.stringify({
987
+ type: 'input_audio_buffer.append',
988
+ audio: chunk.toString('base64'),
989
+ }));
990
+ }
991
+ });
992
+
993
+ micInputStream.on('error', (err) => {
994
+ console.error(chalk.red(`Microphone error: ${err.message}`));
806
995
  });
807
996
 
808
- if (!response.ok) {
809
- const error = await response.json().catch(() => ({}));
810
- throw new Error(error.message || `API error: ${response.status}`);
997
+ micInstance.start();
998
+ isListening = true;
999
+ } catch (err) {
1000
+ throw new Error(`Failed to start microphone: ${err.message}`);
1001
+ }
1002
+ }
1003
+
1004
+ async function playAudioChunks(chunks) {
1005
+ try {
1006
+ // Combine all chunks into one buffer
1007
+ const pcmData = Buffer.concat(chunks);
1008
+
1009
+ // Create WAV file with proper headers (no external tools needed)
1010
+ const wavBuffer = createWavBuffer(pcmData, 24000, 1, 16);
1011
+ const wavPath = `/tmp/ramp-voice-${Date.now()}.wav`;
1012
+
1013
+ await fs.writeFile(wavPath, wavBuffer);
1014
+
1015
+ // Play audio
1016
+ if (process.platform === 'darwin') {
1017
+ await execAsync(`afplay "${wavPath}"`);
1018
+ } else if (process.platform === 'linux') {
1019
+ await execAsync(`aplay "${wavPath}" 2>/dev/null || paplay "${wavPath}" 2>/dev/null`).catch(() => {});
811
1020
  }
812
1021
 
813
- return await response.json();
1022
+ // Clean up
1023
+ await fs.unlink(wavPath).catch(() => {});
1024
+ } catch (err) {
1025
+ // Log error for debugging but don't crash
1026
+ console.error(chalk.dim(`Audio playback error: ${err.message}`));
1027
+ }
1028
+ }
1029
+
1030
+ // Create WAV buffer from raw PCM data
1031
+ function createWavBuffer(pcmData, sampleRate, numChannels, bitsPerSample) {
1032
+ const byteRate = sampleRate * numChannels * (bitsPerSample / 8);
1033
+ const blockAlign = numChannels * (bitsPerSample / 8);
1034
+ const dataSize = pcmData.length;
1035
+ const headerSize = 44;
1036
+ const fileSize = headerSize + dataSize;
1037
+
1038
+ const buffer = Buffer.alloc(fileSize);
1039
+ let offset = 0;
1040
+
1041
+ // RIFF header
1042
+ buffer.write('RIFF', offset); offset += 4;
1043
+ buffer.writeUInt32LE(fileSize - 8, offset); offset += 4;
1044
+ buffer.write('WAVE', offset); offset += 4;
1045
+
1046
+ // fmt subchunk
1047
+ buffer.write('fmt ', offset); offset += 4;
1048
+ buffer.writeUInt32LE(16, offset); offset += 4; // Subchunk1Size (16 for PCM)
1049
+ buffer.writeUInt16LE(1, offset); offset += 2; // AudioFormat (1 = PCM)
1050
+ buffer.writeUInt16LE(numChannels, offset); offset += 2;
1051
+ buffer.writeUInt32LE(sampleRate, offset); offset += 4;
1052
+ buffer.writeUInt32LE(byteRate, offset); offset += 4;
1053
+ buffer.writeUInt16LE(blockAlign, offset); offset += 2;
1054
+ buffer.writeUInt16LE(bitsPerSample, offset); offset += 2;
1055
+
1056
+ // data subchunk
1057
+ buffer.write('data', offset); offset += 4;
1058
+ buffer.writeUInt32LE(dataSize, offset); offset += 4;
1059
+
1060
+ // Copy PCM data
1061
+ pcmData.copy(buffer, offset);
1062
+
1063
+ return buffer;
1064
+ }
1065
+
1066
+ async function cleanup() {
1067
+ clearInterval(sessionTimer);
1068
+
1069
+ if (micInstance) {
1070
+ try {
1071
+ micInstance.stop();
1072
+ } catch {}
814
1073
  }
815
1074
 
816
- // Helper function to call backend TTS API
817
- async function textToSpeech(text) {
818
- const response = await fetch(`${API_URL}/ai/tts`, {
1075
+ if (ws.readyState === WebSocket.OPEN) {
1076
+ ws.close();
1077
+ }
1078
+
1079
+ // Report session end to our API
1080
+ try {
1081
+ await fetch(`${RAMP_API_URL}/api/ramp/realtime/session/${sessionId}/end`, {
819
1082
  method: 'POST',
820
1083
  headers: {
821
1084
  'Authorization': `Bearer ${authToken}`,
822
1085
  'Content-Type': 'application/json',
823
1086
  },
824
- body: JSON.stringify({
825
- product: 'ramp',
826
- text,
827
- voice: 'nova',
828
- }),
1087
+ body: JSON.stringify({ durationSeconds: sessionDurationSeconds }),
829
1088
  });
1089
+ } catch {}
830
1090
 
831
- if (!response.ok) {
832
- throw new Error(`TTS error: ${response.status}`);
833
- }
1091
+ // Save usage
1092
+ const totalSessionMinutes = sessionDurationSeconds / 60;
1093
+ usage.totalMinutes += totalSessionMinutes;
1094
+ usage.sessions.push({
1095
+ date: new Date().toISOString(),
1096
+ project: path.basename(projectPath),
1097
+ minutes: totalSessionMinutes,
1098
+ type: 'realtime',
1099
+ });
1100
+ await fs.writeFile(usageFile, JSON.stringify(usage, null, 2));
1101
+
1102
+ console.log(chalk.cyan('\nšŸ‘‹ Ending voice session...'));
1103
+ console.log(chalk.dim(`Session: ${totalSessionMinutes.toFixed(2)} min`));
1104
+ console.log(chalk.dim(`Total usage: ${usage.totalMinutes.toFixed(2)} min\n`));
1105
+
1106
+ process.exit(0);
1107
+ }
834
1108
 
835
- return Buffer.from(await response.arrayBuffer());
1109
+ // Handle Ctrl+C
1110
+ process.on('SIGINT', cleanup);
1111
+
1112
+ // Keep process alive
1113
+ await new Promise(() => {});
1114
+ }
1115
+
1116
+ // Text input with voice output (fallback mode)
1117
+ async function runTextVoiceMode(authToken, context, projectPath, usage, usageFile, sessionStart, RAMP_API_URL) {
1118
+ const API_URL = process.env.ENTITLEMENT_API_URL || 'https://entitlement-service.rian-19c.workers.dev';
1119
+ const conversationHistory = [];
1120
+ let sessionMinutes = 0;
1121
+
1122
+ console.log(chalk.dim('Using text input with voice output.\n'));
1123
+
1124
+ // Helper function to call backend chat API
1125
+ async function chatWithBackend(messages, systemPrompt) {
1126
+ const response = await fetch(`${API_URL}/ai/chat`, {
1127
+ method: 'POST',
1128
+ headers: {
1129
+ 'Authorization': `Bearer ${authToken}`,
1130
+ 'Content-Type': 'application/json',
1131
+ },
1132
+ body: JSON.stringify({
1133
+ product: 'ramp',
1134
+ messages,
1135
+ system: systemPrompt,
1136
+ max_tokens: 500,
1137
+ }),
1138
+ });
1139
+
1140
+ if (!response.ok) {
1141
+ const error = await response.json().catch(() => ({}));
1142
+ throw new Error(error.message || `API error: ${response.status}`);
836
1143
  }
837
1144
 
838
- // Voice interaction loop
839
- async function voiceLoop() {
840
- while (true) {
841
- try {
842
- // For now, use text input with voice output
843
- // Full voice input requires native audio recording
844
- const { input } = await inquirer.prompt([{
845
- type: 'input',
846
- name: 'input',
847
- message: chalk.green('šŸŽ¤ You:'),
848
- prefix: ''
849
- }]);
850
-
851
- if (!input.trim()) continue;
852
- if (input.toLowerCase() === 'exit' || input.toLowerCase() === 'quit') {
853
- break;
854
- }
1145
+ return await response.json();
1146
+ }
1147
+
1148
+ // Helper function to call backend TTS API
1149
+ async function textToSpeech(text) {
1150
+ const response = await fetch(`${API_URL}/ai/tts`, {
1151
+ method: 'POST',
1152
+ headers: {
1153
+ 'Authorization': `Bearer ${authToken}`,
1154
+ 'Content-Type': 'application/json',
1155
+ },
1156
+ body: JSON.stringify({
1157
+ product: 'ramp',
1158
+ text,
1159
+ voice: 'nova',
1160
+ }),
1161
+ });
1162
+
1163
+ if (!response.ok) {
1164
+ throw new Error(`TTS error: ${response.status}`);
1165
+ }
1166
+
1167
+ return Buffer.from(await response.arrayBuffer());
1168
+ }
1169
+
1170
+ // Voice interaction loop
1171
+ async function voiceLoop() {
1172
+ while (true) {
1173
+ try {
1174
+ const { input } = await inquirer.prompt([{
1175
+ type: 'input',
1176
+ name: 'input',
1177
+ message: chalk.green('šŸŽ¤ You:'),
1178
+ prefix: ''
1179
+ }]);
855
1180
 
856
- const startTime = Date.now();
857
- conversationHistory.push({ role: 'user', content: input });
1181
+ if (!input.trim()) continue;
1182
+ if (input.toLowerCase() === 'exit' || input.toLowerCase() === 'quit') {
1183
+ break;
1184
+ }
858
1185
 
859
- // Get AI response
860
- const thinkingSpinner = ora('Thinking...').start();
1186
+ const startTime = Date.now();
1187
+ conversationHistory.push({ role: 'user', content: input });
861
1188
 
862
- const systemPrompt = `You are Ramp, a voice assistant helping a developer understand a codebase.
1189
+ // Get AI response
1190
+ const thinkingSpinner = ora('Thinking...').start();
1191
+
1192
+ const systemPrompt = `You are Ramp, a voice assistant helping a developer understand a codebase.
863
1193
  Keep responses concise (2-3 sentences) since they'll be spoken aloud.
864
1194
 
865
1195
  Project context:
@@ -867,77 +1197,78 @@ ${context}
867
1197
 
868
1198
  Be helpful, friendly, and practical. Reference specific files when relevant.`;
869
1199
 
870
- const chatResponse = await chatWithBackend(conversationHistory, systemPrompt);
871
- const answer = chatResponse.content || chatResponse.text || '';
872
- conversationHistory.push({ role: 'assistant', content: answer });
873
-
874
- thinkingSpinner.stop();
1200
+ const chatResponse = await chatWithBackend(conversationHistory, systemPrompt);
1201
+ const answer = chatResponse.content || chatResponse.text || '';
1202
+ conversationHistory.push({ role: 'assistant', content: answer });
875
1203
 
876
- // Generate speech
877
- const speechSpinner = ora('Speaking...').start();
878
-
879
- try {
880
- const audioBuffer = await textToSpeech(answer);
1204
+ thinkingSpinner.stop();
881
1205
 
882
- // Save and play audio
883
- const audioPath = `/tmp/ramp-voice-${Date.now()}.mp3`;
884
- await fs.writeFile(audioPath, audioBuffer);
1206
+ // Generate speech
1207
+ const speechSpinner = ora('Speaking...').start();
885
1208
 
886
- speechSpinner.stop();
887
- console.log(chalk.cyan(`\nšŸ”Š Ramp: ${answer}\n`));
1209
+ try {
1210
+ const audioBuffer = await textToSpeech(answer);
888
1211
 
889
- // Play audio (macOS)
890
- if (process.platform === 'darwin') {
891
- await execAsync(`afplay "${audioPath}"`).catch(() => {});
892
- } else if (process.platform === 'linux') {
893
- await execAsync(`mpg123 "${audioPath}" 2>/dev/null || play "${audioPath}" 2>/dev/null`).catch(() => {});
894
- }
1212
+ // Save and play audio
1213
+ const audioPath = `/tmp/ramp-voice-${Date.now()}.mp3`;
1214
+ await fs.writeFile(audioPath, audioBuffer);
895
1215
 
896
- // Clean up
897
- await fs.unlink(audioPath).catch(() => {});
1216
+ speechSpinner.stop();
1217
+ console.log(chalk.cyan(`\nšŸ”Š Ramp: ${answer}\n`));
898
1218
 
899
- } catch (ttsError) {
900
- speechSpinner.stop();
901
- // Fallback to text if TTS fails
902
- console.log(chalk.cyan(`\nšŸ’¬ Ramp: ${answer}\n`));
1219
+ // Play audio (macOS)
1220
+ if (process.platform === 'darwin') {
1221
+ await execAsync(`afplay "${audioPath}"`).catch(() => {});
1222
+ } else if (process.platform === 'linux') {
1223
+ await execAsync(`mpg123 "${audioPath}" 2>/dev/null || play "${audioPath}" 2>/dev/null`).catch(() => {});
903
1224
  }
904
1225
 
905
- // Track usage
906
- const elapsed = (Date.now() - startTime) / 1000 / 60;
907
- sessionMinutes += elapsed;
1226
+ // Clean up
1227
+ await fs.unlink(audioPath).catch(() => {});
908
1228
 
909
- } catch (error) {
910
- if (error.name === 'ExitPromptError') break;
911
- console.error(chalk.red(`Error: ${error.message}`));
1229
+ } catch (ttsError) {
1230
+ speechSpinner.stop();
1231
+ // Fallback to text if TTS fails
1232
+ console.log(chalk.cyan(`\nšŸ’¬ Ramp: ${answer}\n`));
912
1233
  }
913
- }
914
- }
915
1234
 
916
- // Handle exit
917
- process.on('SIGINT', async () => {
918
- console.log(chalk.cyan('\n\nšŸ‘‹ Ending voice session...\n'));
919
- await saveUsage();
920
- process.exit(0);
921
- });
922
-
923
- async function saveUsage() {
924
- const totalSessionMinutes = (Date.now() - sessionStart) / 1000 / 60;
925
- usage.totalMinutes += totalSessionMinutes;
926
- usage.sessions.push({
927
- date: new Date().toISOString(),
928
- project: path.basename(projectPath),
929
- minutes: totalSessionMinutes
930
- });
931
- await fs.writeFile(usageFile, JSON.stringify(usage, null, 2));
1235
+ // Track usage
1236
+ const elapsed = (Date.now() - startTime) / 1000 / 60;
1237
+ sessionMinutes += elapsed;
932
1238
 
933
- console.log(chalk.dim(`Session: ${totalSessionMinutes.toFixed(2)} min`));
934
- console.log(chalk.dim(`Total usage: ${usage.totalMinutes.toFixed(2)} min\n`));
1239
+ } catch (error) {
1240
+ if (error.name === 'ExitPromptError') break;
1241
+ console.error(chalk.red(`Error: ${error.message}`));
1242
+ }
935
1243
  }
1244
+ }
936
1245
 
937
- await voiceLoop();
1246
+ // Handle exit
1247
+ process.on('SIGINT', async () => {
1248
+ console.log(chalk.cyan('\n\nšŸ‘‹ Ending voice session...\n'));
938
1249
  await saveUsage();
1250
+ process.exit(0);
939
1251
  });
940
1252
 
1253
+ async function saveUsage() {
1254
+ const totalSessionMinutes = (Date.now() - sessionStart) / 1000 / 60;
1255
+ usage.totalMinutes += totalSessionMinutes;
1256
+ usage.sessions.push({
1257
+ date: new Date().toISOString(),
1258
+ project: path.basename(projectPath),
1259
+ minutes: totalSessionMinutes,
1260
+ type: 'text',
1261
+ });
1262
+ await fs.writeFile(usageFile, JSON.stringify(usage, null, 2));
1263
+
1264
+ console.log(chalk.dim(`Session: ${totalSessionMinutes.toFixed(2)} min`));
1265
+ console.log(chalk.dim(`Total usage: ${usage.totalMinutes.toFixed(2)} min\n`));
1266
+ }
1267
+
1268
+ await voiceLoop();
1269
+ await saveUsage();
1270
+ }
1271
+
941
1272
  // Voice usage stats
942
1273
  program
943
1274
  .command('voice:usage')
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "rampup",
3
- "version": "0.1.4",
3
+ "version": "0.1.6",
4
4
  "description": "Ramp - Understand any codebase in hours. AI-powered developer onboarding CLI.",
5
5
  "type": "module",
6
6
  "bin": {
@@ -42,8 +42,10 @@
42
42
  "commander": "^11.1.0",
43
43
  "firebase": "^10.14.1",
44
44
  "inquirer": "^8.2.6",
45
+ "mic": "^2.1.2",
45
46
  "open": "^9.1.0",
46
47
  "openai": "^4.0.0",
47
- "ora": "^5.4.1"
48
+ "ora": "^5.4.1",
49
+ "ws": "^8.18.0"
48
50
  }
49
51
  }