@superinterface/react 3.13.2 → 3.14.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -10757,6 +10757,43 @@ function _asyncToGenerator12(n) {
10757
10757
  });
10758
10758
  };
10759
10759
  }
10760
+ var sentTypes = [
10761
+ "session.created",
10762
+ "response.done",
10763
+ "conversation.item.input_audio_transcription.completed"
10764
+ ];
10765
+ var handleThreadEvent = function(_ref) {
10766
+ var event = _ref.event, superinterfaceContext = _ref.superinterfaceContext;
10767
+ if (event.data.event === "thread.created") {
10768
+ threadCreated({
10769
+ value: event.data,
10770
+ superinterfaceContext: superinterfaceContext
10771
+ });
10772
+ } else if (event.data.event === "thread.run.requires_action") {
10773
+ threadRunRequiresAction({
10774
+ value: event.data,
10775
+ superinterfaceContext: superinterfaceContext
10776
+ });
10777
+ }
10778
+ };
10779
+ var handleOpenaiEvent = function(_ref2) {
10780
+ var event = _ref2.event, openaiEventsDataChannel = _ref2.openaiEventsDataChannel;
10781
+ openaiEventsDataChannel.send(JSON.stringify(event.data));
10782
+ };
10783
+ var handleEvent = function(_ref3) {
10784
+ var event = _ref3.event, superinterfaceContext = _ref3.superinterfaceContext, openaiEventsDataChannel = _ref3.openaiEventsDataChannel;
10785
+ if (event.type === "openaiEvent") {
10786
+ return handleOpenaiEvent({
10787
+ event: event,
10788
+ openaiEventsDataChannel: openaiEventsDataChannel
10789
+ });
10790
+ } else if (event.type === "threadEvent") {
10791
+ return handleThreadEvent({
10792
+ event: event,
10793
+ superinterfaceContext: superinterfaceContext
10794
+ });
10795
+ }
10796
+ };
10760
10797
  var useWebrtcAudioRuntime = function() {
10761
10798
  var startSessionIfNeeded = function startSessionIfNeeded() {
10762
10799
  return _startSessionIfNeeded.apply(this, arguments);
@@ -10834,41 +10871,18 @@ var useWebrtcAudioRuntime = function() {
10834
10871
  }
10835
10872
  function _initRealtimeSession() {
10836
10873
  _initRealtimeSession = _asyncToGenerator12(function() {
10837
- var searchParams, iceServersResponse, iceServersData, peerConn, audioEl, ms, offer, sdpResponse, reader, decoder, answerSdp, _ref, value, done, answer, err;
10874
+ var peerConn, audioEl, openaiEventsDataChannel, ms, offer, searchParams_0, sdpResponse, answerSdp, answer, err1;
10838
10875
  return _ts_generator(this, function(_state) {
10839
10876
  switch(_state.label){
10840
10877
  case 0:
10841
10878
  _state.trys.push([
10842
10879
  0,
10843
- 9,
10880
+ 7,
10844
10881
  ,
10845
- 10
10882
+ 8
10846
10883
  ]);
10847
10884
  setUserIsPending(true);
10848
- searchParams = new URLSearchParams(variableParams({
10849
- variables: superinterfaceContext.variables,
10850
- superinterfaceContext: superinterfaceContext
10851
- }));
10852
- return [
10853
- 4,
10854
- fetch("".concat(superinterfaceContext.baseUrl, "/audio-runtimes/webrtc/ice-servers?").concat(searchParams), {
10855
- method: "GET",
10856
- headers: {
10857
- "Content-Type": "application/json"
10858
- }
10859
- })
10860
- ];
10861
- case 1:
10862
- iceServersResponse = _state.sent();
10863
- return [
10864
- 4,
10865
- iceServersResponse.json()
10866
- ];
10867
- case 2:
10868
- iceServersData = _state.sent();
10869
- peerConn = new RTCPeerConnection({
10870
- iceServers: iceServersData.iceServers
10871
- });
10885
+ peerConn = new RTCPeerConnection();
10872
10886
  pcRef.current = peerConn;
10873
10887
  audioEl = document.createElement("audio");
10874
10888
  audioEl.autoplay = true;
@@ -10881,35 +10895,115 @@ var useWebrtcAudioRuntime = function() {
10881
10895
  setAssistantPaused(false);
10882
10896
  setAssistantAudioPlayed(true);
10883
10897
  };
10884
- peerConn.createDataChannel("unused-negotiation-only");
10885
- peerConn.addEventListener("datachannel", function(event) {
10886
- var channel = event.channel;
10887
- if (channel.label === "thread-events") {
10888
- channel.onmessage = function(_ref) {
10889
- var data2 = _ref.data;
10890
- console.log("Data channel message:", data2);
10891
- var parsedData = JSON.parse(data2);
10892
- if (parsedData.event === "thread.created") {
10893
- threadCreated({
10894
- value: parsedData,
10895
- superinterfaceContext: superinterfaceContext
10896
- });
10897
- } else if (parsedData.event === "thread.run.requires_action") {
10898
- threadRunRequiresAction({
10899
- value: parsedData,
10900
- superinterfaceContext: superinterfaceContext
10901
- });
10898
+ openaiEventsDataChannel = peerConn.createDataChannel("oai-events");
10899
+ openaiEventsDataChannel.addEventListener("message", /* @__PURE__ */ function() {
10900
+ var _ref4 = _asyncToGenerator12(function(e) {
10901
+ var parsedData, searchParams, eventsResponse, reader, decoder, _ref, value, done, buffer, lines, _iteratorNormalCompletion, _didIteratorError, _iteratorError, _iterator, _step, line, event, ref;
10902
+ return _ts_generator(this, function(_state) {
10903
+ switch(_state.label){
10904
+ case 0:
10905
+ parsedData = JSON.parse(e.data);
10906
+ if (!sentTypes.includes(parsedData.type)) return [
10907
+ 2
10908
+ ];
10909
+ searchParams = new URLSearchParams(variableParams({
10910
+ variables: superinterfaceContext.variables,
10911
+ superinterfaceContext: superinterfaceContext
10912
+ }));
10913
+ return [
10914
+ 4,
10915
+ fetch("".concat(superinterfaceContext.baseUrl, "/audio-runtimes/webrtc/events?").concat(searchParams), {
10916
+ method: "POST",
10917
+ headers: {
10918
+ "Content-Type": "application/json"
10919
+ },
10920
+ body: e.data
10921
+ })
10922
+ ];
10923
+ case 1:
10924
+ eventsResponse = _state.sent();
10925
+ if (!eventsResponse.body) {
10926
+ throw new Error("No body in events response");
10927
+ }
10928
+ reader = eventsResponse.body.getReader();
10929
+ decoder = new TextDecoder("utf-8");
10930
+ return [
10931
+ 4,
10932
+ reader.read()
10933
+ ];
10934
+ case 2:
10935
+ _ref = _state.sent(), value = _ref.value, done = _ref.done;
10936
+ buffer = "";
10937
+ _state.label = 3;
10938
+ case 3:
10939
+ if (!!done) return [
10940
+ 3,
10941
+ 5
10942
+ ];
10943
+ buffer += decoder.decode(value, {
10944
+ stream: true
10945
+ });
10946
+ lines = buffer.split("\n");
10947
+ buffer = lines.pop() || "";
10948
+ _iteratorNormalCompletion = true, _didIteratorError = false, _iteratorError = undefined;
10949
+ try {
10950
+ for(_iterator = lines[Symbol.iterator](); !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true){
10951
+ line = _step.value;
10952
+ if (line.trim()) {
10953
+ try {
10954
+ event = JSON.parse(line);
10955
+ handleEvent({
10956
+ event: event,
10957
+ superinterfaceContext: superinterfaceContext,
10958
+ openaiEventsDataChannel: openaiEventsDataChannel
10959
+ });
10960
+ } catch (error) {
10961
+ console.error("JSON parse error:", error, "Line:", line);
10962
+ }
10963
+ }
10964
+ }
10965
+ } catch (err) {
10966
+ _didIteratorError = true;
10967
+ _iteratorError = err;
10968
+ } finally{
10969
+ try {
10970
+ if (!_iteratorNormalCompletion && _iterator.return != null) {
10971
+ _iterator.return();
10972
+ }
10973
+ } finally{
10974
+ if (_didIteratorError) {
10975
+ throw _iteratorError;
10976
+ }
10977
+ }
10978
+ }
10979
+ return [
10980
+ 4,
10981
+ reader.read()
10982
+ ];
10983
+ case 4:
10984
+ ref = _state.sent(), value = ref.value, done = ref.done, ref;
10985
+ return [
10986
+ 3,
10987
+ 3
10988
+ ];
10989
+ case 5:
10990
+ return [
10991
+ 2
10992
+ ];
10902
10993
  }
10903
- };
10904
- }
10905
- });
10994
+ });
10995
+ });
10996
+ return function(_x) {
10997
+ return _ref4.apply(this, arguments);
10998
+ };
10999
+ }());
10906
11000
  return [
10907
11001
  4,
10908
11002
  navigator.mediaDevices.getUserMedia({
10909
11003
  audio: true
10910
11004
  })
10911
11005
  ];
10912
- case 3:
11006
+ case 1:
10913
11007
  ms = _state.sent();
10914
11008
  localStreamRef.current = ms;
10915
11009
  ms.getTracks().forEach(function(t) {
@@ -10920,17 +11014,21 @@ var useWebrtcAudioRuntime = function() {
10920
11014
  4,
10921
11015
  peerConn.createOffer()
10922
11016
  ];
10923
- case 4:
11017
+ case 2:
10924
11018
  offer = _state.sent();
10925
11019
  return [
10926
11020
  4,
10927
11021
  peerConn.setLocalDescription(offer)
10928
11022
  ];
10929
- case 5:
11023
+ case 3:
10930
11024
  _state.sent();
11025
+ searchParams_0 = new URLSearchParams(variableParams({
11026
+ variables: superinterfaceContext.variables,
11027
+ superinterfaceContext: superinterfaceContext
11028
+ }));
10931
11029
  return [
10932
11030
  4,
10933
- fetch("".concat(superinterfaceContext.baseUrl, "/audio-runtimes/webrtc?").concat(searchParams), {
11031
+ fetch("".concat(superinterfaceContext.baseUrl, "/audio-runtimes/webrtc?").concat(searchParams_0), {
10934
11032
  method: "POST",
10935
11033
  body: offer.sdp,
10936
11034
  headers: {
@@ -10938,30 +11036,17 @@ var useWebrtcAudioRuntime = function() {
10938
11036
  }
10939
11037
  })
10940
11038
  ];
10941
- case 6:
11039
+ case 4:
10942
11040
  sdpResponse = _state.sent();
10943
11041
  if (!sdpResponse.ok) {
10944
11042
  throw new Error("Server responded with status ".concat(sdpResponse.status));
10945
11043
  }
10946
- if (!sdpResponse.body) {
10947
- throw new Error("ReadableStream not supported in this browser.");
10948
- }
10949
- reader = sdpResponse.body.getReader();
10950
- decoder = new TextDecoder("utf-8");
10951
- answerSdp = "";
10952
11044
  return [
10953
11045
  4,
10954
- reader.read()
11046
+ sdpResponse.text()
10955
11047
  ];
10956
- case 7:
10957
- _ref = _state.sent(), value = _ref.value, done = _ref.done;
10958
- if (done) {
10959
- throw new Error("Stream closed before SDP was received");
10960
- }
10961
- answerSdp += decoder.decode(value, {
10962
- stream: true
10963
- });
10964
- console.log("Received SDP Answer:", answerSdp);
11048
+ case 5:
11049
+ answerSdp = _state.sent();
10965
11050
  answer = {
10966
11051
  type: "answer",
10967
11052
  sdp: answerSdp
@@ -10970,7 +11055,7 @@ var useWebrtcAudioRuntime = function() {
10970
11055
  4,
10971
11056
  peerConn.setRemoteDescription(answer)
10972
11057
  ];
10973
- case 8:
11058
+ case 6:
10974
11059
  _state.sent();
10975
11060
  buildAnalyzers(ms, audioEl);
10976
11061
  setUserIsPending(false);
@@ -10979,11 +11064,11 @@ var useWebrtcAudioRuntime = function() {
10979
11064
  setAssistantPlaying(true);
10980
11065
  return [
10981
11066
  3,
10982
- 10
11067
+ 8
10983
11068
  ];
10984
- case 9:
10985
- err = _state.sent();
10986
- console.error("Error initRealtimeSession:", err);
11069
+ case 7:
11070
+ err1 = _state.sent();
11071
+ console.error("Error initRealtimeSession:", err1);
10987
11072
  setUserIsPending(false);
10988
11073
  setRecorderStatus("stopped");
10989
11074
  setAssistantPlaying(false);
@@ -10993,9 +11078,9 @@ var useWebrtcAudioRuntime = function() {
10993
11078
  setAssistantAudioPlayed(false);
10994
11079
  return [
10995
11080
  3,
10996
- 10
11081
+ 8
10997
11082
  ];
10998
- case 10:
11083
+ case 8:
10999
11084
  return [
11000
11085
  2
11001
11086
  ];