@superinterface/react 3.13.2 → 3.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -10757,6 +10757,43 @@ function _asyncToGenerator12(n) {
10757
10757
  });
10758
10758
  };
10759
10759
  }
10760
+ var sentTypes = [
10761
+ "session.created",
10762
+ "response.done",
10763
+ "conversation.item.input_audio_transcription.completed"
10764
+ ];
10765
+ var handleThreadEvent = function(_ref) {
10766
+ var event = _ref.event, superinterfaceContext = _ref.superinterfaceContext;
10767
+ if (event.data.event === "thread.created") {
10768
+ threadCreated({
10769
+ value: event.data,
10770
+ superinterfaceContext: superinterfaceContext
10771
+ });
10772
+ } else if (event.data.event === "thread.run.requires_action") {
10773
+ threadRunRequiresAction({
10774
+ value: event.data,
10775
+ superinterfaceContext: superinterfaceContext
10776
+ });
10777
+ }
10778
+ };
10779
+ var handleOpenaiEvent = function(_ref2) {
10780
+ var event = _ref2.event, openaiEventsDataChannel = _ref2.openaiEventsDataChannel;
10781
+ openaiEventsDataChannel.send(JSON.stringify(event.data));
10782
+ };
10783
+ var handleEvent = function(_ref3) {
10784
+ var event = _ref3.event, superinterfaceContext = _ref3.superinterfaceContext, openaiEventsDataChannel = _ref3.openaiEventsDataChannel;
10785
+ if (event.type === "openaiEvent") {
10786
+ return handleOpenaiEvent({
10787
+ event: event,
10788
+ openaiEventsDataChannel: openaiEventsDataChannel
10789
+ });
10790
+ } else if (event.type === "threadEvent") {
10791
+ return handleThreadEvent({
10792
+ event: event,
10793
+ superinterfaceContext: superinterfaceContext
10794
+ });
10795
+ }
10796
+ };
10760
10797
  var useWebrtcAudioRuntime = function() {
10761
10798
  var startSessionIfNeeded = function startSessionIfNeeded() {
10762
10799
  return _startSessionIfNeeded.apply(this, arguments);
@@ -10834,41 +10871,22 @@ var useWebrtcAudioRuntime = function() {
10834
10871
  }
10835
10872
  function _initRealtimeSession() {
10836
10873
  _initRealtimeSession = _asyncToGenerator12(function() {
10837
- var searchParams, iceServersResponse, iceServersData, peerConn, audioEl, ms, offer, sdpResponse, reader, decoder, answerSdp, _ref, value, done, answer, err;
10874
+ var searchParams, peerConn, audioEl, openaiEventsDataChannel, ms, offer, sdpResponse, answerSdp, answer, err1;
10838
10875
  return _ts_generator(this, function(_state) {
10839
10876
  switch(_state.label){
10840
10877
  case 0:
10841
10878
  _state.trys.push([
10842
10879
  0,
10843
- 9,
10880
+ 7,
10844
10881
  ,
10845
- 10
10882
+ 8
10846
10883
  ]);
10847
10884
  setUserIsPending(true);
10848
10885
  searchParams = new URLSearchParams(variableParams({
10849
10886
  variables: superinterfaceContext.variables,
10850
10887
  superinterfaceContext: superinterfaceContext
10851
10888
  }));
10852
- return [
10853
- 4,
10854
- fetch("".concat(superinterfaceContext.baseUrl, "/audio-runtimes/webrtc/ice-servers?").concat(searchParams), {
10855
- method: "GET",
10856
- headers: {
10857
- "Content-Type": "application/json"
10858
- }
10859
- })
10860
- ];
10861
- case 1:
10862
- iceServersResponse = _state.sent();
10863
- return [
10864
- 4,
10865
- iceServersResponse.json()
10866
- ];
10867
- case 2:
10868
- iceServersData = _state.sent();
10869
- peerConn = new RTCPeerConnection({
10870
- iceServers: iceServersData.iceServers
10871
- });
10889
+ peerConn = new RTCPeerConnection();
10872
10890
  pcRef.current = peerConn;
10873
10891
  audioEl = document.createElement("audio");
10874
10892
  audioEl.autoplay = true;
@@ -10881,35 +10899,111 @@ var useWebrtcAudioRuntime = function() {
10881
10899
  setAssistantPaused(false);
10882
10900
  setAssistantAudioPlayed(true);
10883
10901
  };
10884
- peerConn.createDataChannel("unused-negotiation-only");
10885
- peerConn.addEventListener("datachannel", function(event) {
10886
- var channel = event.channel;
10887
- if (channel.label === "thread-events") {
10888
- channel.onmessage = function(_ref) {
10889
- var data2 = _ref.data;
10890
- console.log("Data channel message:", data2);
10891
- var parsedData = JSON.parse(data2);
10892
- if (parsedData.event === "thread.created") {
10893
- threadCreated({
10894
- value: parsedData,
10895
- superinterfaceContext: superinterfaceContext
10896
- });
10897
- } else if (parsedData.event === "thread.run.requires_action") {
10898
- threadRunRequiresAction({
10899
- value: parsedData,
10900
- superinterfaceContext: superinterfaceContext
10901
- });
10902
+ openaiEventsDataChannel = peerConn.createDataChannel("oai-events");
10903
+ openaiEventsDataChannel.addEventListener("message", /* @__PURE__ */ function() {
10904
+ var _ref4 = _asyncToGenerator12(function(e) {
10905
+ var parsedData, eventsResponse, reader, decoder, _ref, value, done, buffer, lines, _iteratorNormalCompletion, _didIteratorError, _iteratorError, _iterator, _step, line, event, ref;
10906
+ return _ts_generator(this, function(_state) {
10907
+ switch(_state.label){
10908
+ case 0:
10909
+ parsedData = JSON.parse(e.data);
10910
+ if (!sentTypes.includes(parsedData.type)) return [
10911
+ 2
10912
+ ];
10913
+ return [
10914
+ 4,
10915
+ fetch("".concat(superinterfaceContext.baseUrl, "/audio-runtimes/webrtc/events?").concat(searchParams), {
10916
+ method: "POST",
10917
+ headers: {
10918
+ "Content-Type": "application/json"
10919
+ },
10920
+ body: e.data
10921
+ })
10922
+ ];
10923
+ case 1:
10924
+ eventsResponse = _state.sent();
10925
+ if (!eventsResponse.body) {
10926
+ throw new Error("No body in events response");
10927
+ }
10928
+ reader = eventsResponse.body.getReader();
10929
+ decoder = new TextDecoder("utf-8");
10930
+ return [
10931
+ 4,
10932
+ reader.read()
10933
+ ];
10934
+ case 2:
10935
+ _ref = _state.sent(), value = _ref.value, done = _ref.done;
10936
+ buffer = "";
10937
+ _state.label = 3;
10938
+ case 3:
10939
+ if (!!done) return [
10940
+ 3,
10941
+ 5
10942
+ ];
10943
+ buffer += decoder.decode(value, {
10944
+ stream: true
10945
+ });
10946
+ lines = buffer.split("\n");
10947
+ buffer = lines.pop() || "";
10948
+ _iteratorNormalCompletion = true, _didIteratorError = false, _iteratorError = undefined;
10949
+ try {
10950
+ for(_iterator = lines[Symbol.iterator](); !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true){
10951
+ line = _step.value;
10952
+ if (line.trim()) {
10953
+ try {
10954
+ event = JSON.parse(line);
10955
+ handleEvent({
10956
+ event: event,
10957
+ superinterfaceContext: superinterfaceContext,
10958
+ openaiEventsDataChannel: openaiEventsDataChannel
10959
+ });
10960
+ } catch (error) {
10961
+ console.error("JSON parse error:", error, "Line:", line);
10962
+ }
10963
+ }
10964
+ }
10965
+ } catch (err) {
10966
+ _didIteratorError = true;
10967
+ _iteratorError = err;
10968
+ } finally{
10969
+ try {
10970
+ if (!_iteratorNormalCompletion && _iterator.return != null) {
10971
+ _iterator.return();
10972
+ }
10973
+ } finally{
10974
+ if (_didIteratorError) {
10975
+ throw _iteratorError;
10976
+ }
10977
+ }
10978
+ }
10979
+ return [
10980
+ 4,
10981
+ reader.read()
10982
+ ];
10983
+ case 4:
10984
+ ref = _state.sent(), value = ref.value, done = ref.done, ref;
10985
+ return [
10986
+ 3,
10987
+ 3
10988
+ ];
10989
+ case 5:
10990
+ return [
10991
+ 2
10992
+ ];
10902
10993
  }
10903
- };
10904
- }
10905
- });
10994
+ });
10995
+ });
10996
+ return function(_x) {
10997
+ return _ref4.apply(this, arguments);
10998
+ };
10999
+ }());
10906
11000
  return [
10907
11001
  4,
10908
11002
  navigator.mediaDevices.getUserMedia({
10909
11003
  audio: true
10910
11004
  })
10911
11005
  ];
10912
- case 3:
11006
+ case 1:
10913
11007
  ms = _state.sent();
10914
11008
  localStreamRef.current = ms;
10915
11009
  ms.getTracks().forEach(function(t) {
@@ -10920,13 +11014,13 @@ var useWebrtcAudioRuntime = function() {
10920
11014
  4,
10921
11015
  peerConn.createOffer()
10922
11016
  ];
10923
- case 4:
11017
+ case 2:
10924
11018
  offer = _state.sent();
10925
11019
  return [
10926
11020
  4,
10927
11021
  peerConn.setLocalDescription(offer)
10928
11022
  ];
10929
- case 5:
11023
+ case 3:
10930
11024
  _state.sent();
10931
11025
  return [
10932
11026
  4,
@@ -10938,30 +11032,17 @@ var useWebrtcAudioRuntime = function() {
10938
11032
  }
10939
11033
  })
10940
11034
  ];
10941
- case 6:
11035
+ case 4:
10942
11036
  sdpResponse = _state.sent();
10943
11037
  if (!sdpResponse.ok) {
10944
11038
  throw new Error("Server responded with status ".concat(sdpResponse.status));
10945
11039
  }
10946
- if (!sdpResponse.body) {
10947
- throw new Error("ReadableStream not supported in this browser.");
10948
- }
10949
- reader = sdpResponse.body.getReader();
10950
- decoder = new TextDecoder("utf-8");
10951
- answerSdp = "";
10952
11040
  return [
10953
11041
  4,
10954
- reader.read()
11042
+ sdpResponse.text()
10955
11043
  ];
10956
- case 7:
10957
- _ref = _state.sent(), value = _ref.value, done = _ref.done;
10958
- if (done) {
10959
- throw new Error("Stream closed before SDP was received");
10960
- }
10961
- answerSdp += decoder.decode(value, {
10962
- stream: true
10963
- });
10964
- console.log("Received SDP Answer:", answerSdp);
11044
+ case 5:
11045
+ answerSdp = _state.sent();
10965
11046
  answer = {
10966
11047
  type: "answer",
10967
11048
  sdp: answerSdp
@@ -10970,7 +11051,7 @@ var useWebrtcAudioRuntime = function() {
10970
11051
  4,
10971
11052
  peerConn.setRemoteDescription(answer)
10972
11053
  ];
10973
- case 8:
11054
+ case 6:
10974
11055
  _state.sent();
10975
11056
  buildAnalyzers(ms, audioEl);
10976
11057
  setUserIsPending(false);
@@ -10979,11 +11060,11 @@ var useWebrtcAudioRuntime = function() {
10979
11060
  setAssistantPlaying(true);
10980
11061
  return [
10981
11062
  3,
10982
- 10
11063
+ 8
10983
11064
  ];
10984
- case 9:
10985
- err = _state.sent();
10986
- console.error("Error initRealtimeSession:", err);
11065
+ case 7:
11066
+ err1 = _state.sent();
11067
+ console.error("Error initRealtimeSession:", err1);
10987
11068
  setUserIsPending(false);
10988
11069
  setRecorderStatus("stopped");
10989
11070
  setAssistantPlaying(false);
@@ -10993,9 +11074,9 @@ var useWebrtcAudioRuntime = function() {
10993
11074
  setAssistantAudioPlayed(false);
10994
11075
  return [
10995
11076
  3,
10996
- 10
11077
+ 8
10997
11078
  ];
10998
- case 10:
11079
+ case 8:
10999
11080
  return [
11000
11081
  2
11001
11082
  ];