rampup 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.js +57 -25
  2. package/package.json +1 -2
package/index.js CHANGED
@@ -848,6 +848,7 @@ async function runRealtimeVoiceMode(authToken, context, projectPath, usage, usag
848
848
  let micInstance = null;
849
849
  let micInputStream = null;
850
850
  let audioChunks = [];
851
+ let transcriptChunks = [];
851
852
  let isListening = false;
852
853
  let sessionDurationSeconds = 0;
853
854
  const sessionTimer = setInterval(() => sessionDurationSeconds++, 1000);
@@ -923,21 +924,26 @@ Be friendly, practical, and reference specific files when relevant. If asked abo
923
924
  break;
924
925
 
925
926
  case 'response.audio_transcript.delta':
926
- // Stream transcript to console
927
+ // Collect transcript (don't show yet - will display after audio)
927
928
  if (event.delta) {
928
- process.stdout.write(chalk.cyan(event.delta));
929
+ transcriptChunks.push(event.delta);
929
930
  }
930
931
  break;
931
932
 
932
933
  case 'response.audio_transcript.done':
933
- console.log('\n');
934
+ // Transcript complete
934
935
  break;
935
936
 
936
937
  case 'response.audio.done':
937
- // Play collected audio
938
+ // Play audio first, then show transcript
938
939
  if (audioChunks.length > 0) {
939
940
  await playAudioChunks(audioChunks);
941
+ // Show transcript after audio finishes
942
+ if (transcriptChunks.length > 0) {
943
+ console.log(chalk.cyan(`\nšŸ”Š Ramp: ${transcriptChunks.join('')}\n`));
944
+ }
940
945
  audioChunks = [];
946
+ transcriptChunks = [];
941
947
  }
942
948
  break;
943
949
 
@@ -1004,39 +1010,65 @@ Be friendly, practical, and reference specific files when relevant. If asked abo
1004
1010
  async function playAudioChunks(chunks) {
1005
1011
  try {
1006
1012
  // Combine all chunks into one buffer
1007
- const audioBuffer = Buffer.concat(chunks);
1013
+ const pcmData = Buffer.concat(chunks);
1008
1014
 
1009
- // Save as raw PCM and convert to playable format
1010
- const rawPath = `/tmp/ramp-voice-${Date.now()}.raw`;
1015
+ // Create WAV file with proper headers (no external tools needed)
1016
+ const wavBuffer = createWavBuffer(pcmData, 24000, 1, 16);
1011
1017
  const wavPath = `/tmp/ramp-voice-${Date.now()}.wav`;
1012
1018
 
1013
- await fs.writeFile(rawPath, audioBuffer);
1019
+ await fs.writeFile(wavPath, wavBuffer);
1014
1020
 
1015
- // Convert raw PCM to WAV using sox or ffmpeg
1021
+ // Play audio
1016
1022
  if (process.platform === 'darwin') {
1017
- try {
1018
- // Try sox first
1019
- await execAsync(`sox -r 24000 -c 1 -b 16 -e signed-integer "${rawPath}" "${wavPath}" 2>/dev/null`);
1020
- await execAsync(`afplay "${wavPath}"`);
1021
- } catch {
1022
- // Try ffmpeg as fallback
1023
- try {
1024
- await execAsync(`ffmpeg -f s16le -ar 24000 -ac 1 -i "${rawPath}" "${wavPath}" -y 2>/dev/null`);
1025
- await execAsync(`afplay "${wavPath}"`);
1026
- } catch {
1027
- // Just try to play raw with afplay (may not work)
1028
- }
1029
- }
1023
+ await execAsync(`afplay "${wavPath}"`);
1024
+ } else if (process.platform === 'linux') {
1025
+ await execAsync(`aplay "${wavPath}" 2>/dev/null || paplay "${wavPath}" 2>/dev/null`).catch(() => {});
1030
1026
  }
1031
1027
 
1032
- // Clean up temp files
1033
- await fs.unlink(rawPath).catch(() => {});
1028
+ // Clean up
1034
1029
  await fs.unlink(wavPath).catch(() => {});
1035
1030
  } catch (err) {
1036
- // Silently fail audio playback
1031
+ // Log error for debugging but don't crash
1032
+ console.error(chalk.dim(`Audio playback error: ${err.message}`));
1037
1033
  }
1038
1034
  }
1039
1035
 
1036
+ // Create WAV buffer from raw PCM data
1037
+ function createWavBuffer(pcmData, sampleRate, numChannels, bitsPerSample) {
1038
+ const byteRate = sampleRate * numChannels * (bitsPerSample / 8);
1039
+ const blockAlign = numChannels * (bitsPerSample / 8);
1040
+ const dataSize = pcmData.length;
1041
+ const headerSize = 44;
1042
+ const fileSize = headerSize + dataSize;
1043
+
1044
+ const buffer = Buffer.alloc(fileSize);
1045
+ let offset = 0;
1046
+
1047
+ // RIFF header
1048
+ buffer.write('RIFF', offset); offset += 4;
1049
+ buffer.writeUInt32LE(fileSize - 8, offset); offset += 4;
1050
+ buffer.write('WAVE', offset); offset += 4;
1051
+
1052
+ // fmt subchunk
1053
+ buffer.write('fmt ', offset); offset += 4;
1054
+ buffer.writeUInt32LE(16, offset); offset += 4; // Subchunk1Size (16 for PCM)
1055
+ buffer.writeUInt16LE(1, offset); offset += 2; // AudioFormat (1 = PCM)
1056
+ buffer.writeUInt16LE(numChannels, offset); offset += 2;
1057
+ buffer.writeUInt32LE(sampleRate, offset); offset += 4;
1058
+ buffer.writeUInt32LE(byteRate, offset); offset += 4;
1059
+ buffer.writeUInt16LE(blockAlign, offset); offset += 2;
1060
+ buffer.writeUInt16LE(bitsPerSample, offset); offset += 2;
1061
+
1062
+ // data subchunk
1063
+ buffer.write('data', offset); offset += 4;
1064
+ buffer.writeUInt32LE(dataSize, offset); offset += 4;
1065
+
1066
+ // Copy PCM data
1067
+ pcmData.copy(buffer, offset);
1068
+
1069
+ return buffer;
1070
+ }
1071
+
1040
1072
  async function cleanup() {
1041
1073
  clearInterval(sessionTimer);
1042
1074
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "rampup",
3
- "version": "0.1.5",
3
+ "version": "0.1.7",
4
4
  "description": "Ramp - Understand any codebase in hours. AI-powered developer onboarding CLI.",
5
5
  "type": "module",
6
6
  "bin": {
@@ -46,7 +46,6 @@
46
46
  "open": "^9.1.0",
47
47
  "openai": "^4.0.0",
48
48
  "ora": "^5.4.1",
49
- "speaker": "^0.5.5",
50
49
  "ws": "^8.18.0"
51
50
  }
52
51
  }