rampup 0.1.6 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.js +20 -6
  2. package/package.json +1 -1
package/index.js CHANGED
@@ -848,7 +848,9 @@ async function runRealtimeVoiceMode(authToken, context, projectPath, usage, usag
848
848
  let micInstance = null;
849
849
  let micInputStream = null;
850
850
  let audioChunks = [];
851
+ let transcriptChunks = [];
851
852
  let isListening = false;
853
+ let isPlayingAudio = false; // Mute mic while playing to prevent feedback loop
852
854
  let sessionDurationSeconds = 0;
853
855
  const sessionTimer = setInterval(() => sessionDurationSeconds++, 1000);
854
856
 
@@ -915,6 +917,11 @@ Be friendly, practical, and reference specific files when relevant. If asked abo
915
917
  }
916
918
  break;
917
919
 
920
+ case 'response.created':
921
+ // AI is starting to respond - mute mic to prevent feedback
922
+ isPlayingAudio = true;
923
+ break;
924
+
918
925
  case 'response.audio.delta':
919
926
  // Collect audio chunks
920
927
  if (event.delta) {
@@ -923,22 +930,29 @@ Be friendly, practical, and reference specific files when relevant. If asked abo
923
930
  break;
924
931
 
925
932
  case 'response.audio_transcript.delta':
926
- // Stream transcript to console
933
+ // Collect transcript (don't show yet - will display after audio)
927
934
  if (event.delta) {
928
- process.stdout.write(chalk.cyan(event.delta));
935
+ transcriptChunks.push(event.delta);
929
936
  }
930
937
  break;
931
938
 
932
939
  case 'response.audio_transcript.done':
933
- console.log('\n');
940
+ // Transcript complete
934
941
  break;
935
942
 
936
943
  case 'response.audio.done':
937
- // Play collected audio
944
+ // Play audio first, then show transcript
938
945
  if (audioChunks.length > 0) {
939
946
  await playAudioChunks(audioChunks);
947
+ // Show transcript after audio finishes
948
+ if (transcriptChunks.length > 0) {
949
+ console.log(chalk.cyan(`\nšŸ”Š Ramp: ${transcriptChunks.join('')}\n`));
950
+ }
940
951
  audioChunks = [];
952
+ transcriptChunks = [];
941
953
  }
954
+ // Resume listening after audio finishes
955
+ isPlayingAudio = false;
942
956
  break;
943
957
 
944
958
  case 'response.done':
@@ -981,8 +995,8 @@ Be friendly, practical, and reference specific files when relevant. If asked abo
981
995
  micInputStream = micInstance.getAudioStream();
982
996
 
983
997
  micInputStream.on('data', (chunk) => {
984
- if (isConnected && ws.readyState === WebSocket.OPEN) {
985
- // Send audio to OpenAI
998
+ // Don't send audio while AI is speaking (prevents feedback loop)
999
+ if (isConnected && ws.readyState === WebSocket.OPEN && !isPlayingAudio) {
986
1000
  ws.send(JSON.stringify({
987
1001
  type: 'input_audio_buffer.append',
988
1002
  audio: chunk.toString('base64'),
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "rampup",
3
- "version": "0.1.6",
3
+ "version": "0.1.8",
4
4
  "description": "Ramp - Understand any codebase in hours. AI-powered developer onboarding CLI.",
5
5
  "type": "module",
6
6
  "bin": {