sera-ai 1.0.5 → 1.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -46,6 +46,9 @@ interface AudioDictationProps {
46
46
  specialty?: string;
47
47
  selectedFormat?: "json" | "hl7" | "fhir";
48
48
  onDictationComplete: (message: string) => void;
49
+ onDictationStart?: () => void;
50
+ onProcessingStart?: () => void;
51
+ onError?: (error: string) => void;
49
52
  className?: string;
50
53
  style?: React$1.CSSProperties;
51
54
  buttonText?: string;
package/dist/index.d.ts CHANGED
@@ -46,6 +46,9 @@ interface AudioDictationProps {
46
46
  specialty?: string;
47
47
  selectedFormat?: "json" | "hl7" | "fhir";
48
48
  onDictationComplete: (message: string) => void;
49
+ onDictationStart?: () => void;
50
+ onProcessingStart?: () => void;
51
+ onError?: (error: string) => void;
49
52
  className?: string;
50
53
  style?: React$1.CSSProperties;
51
54
  buttonText?: string;
package/dist/index.js CHANGED
@@ -4605,6 +4605,9 @@ var AudioRecorder_default = AudioRecorder;
4605
4605
  var API_BASE_URL2 = "https://nuxera.cloud";
4606
4606
  var useAudioDictation = ({
4607
4607
  onDictationComplete,
4608
+ onDictationStart,
4609
+ onProcessingStart,
4610
+ onError,
4608
4611
  apiKey,
4609
4612
  apiBaseUrl = API_BASE_URL2,
4610
4613
  appendMode = true,
@@ -4693,7 +4696,9 @@ var useAudioDictation = ({
4693
4696
  setAudioBuffer(combinedBuffer);
4694
4697
  } else {
4695
4698
  console.warn("Final chunk received but no audio data accumulated");
4696
- setDictationError("No audio data was recorded");
4699
+ const errorMessage = "No audio data was recorded";
4700
+ setDictationError(errorMessage);
4701
+ onError?.(errorMessage);
4697
4702
  }
4698
4703
  }
4699
4704
  };
@@ -4702,11 +4707,14 @@ var useAudioDictation = ({
4702
4707
  audioContextRef.current = audioContext;
4703
4708
  processorRef.current = processor;
4704
4709
  setIsDictating(true);
4710
+ onDictationStart?.();
4705
4711
  console.log("Recording started successfully");
4706
4712
  } catch (error) {
4707
4713
  console.error("Error starting dictation:", error);
4708
4714
  setIsDictating(false);
4709
- setDictationError("An error occurred while starting dictation");
4715
+ const errorMessage = "An error occurred while starting dictation";
4716
+ setDictationError(errorMessage);
4717
+ onError?.(errorMessage);
4710
4718
  }
4711
4719
  };
4712
4720
  const stopDictating = async () => {
@@ -4765,15 +4773,21 @@ var useAudioDictation = ({
4765
4773
  await processDictationAudio(combinedBuffer);
4766
4774
  } else {
4767
4775
  console.error("No valid audio data found");
4768
- setDictationError("No audio data recorded");
4776
+ const errorMessage = "No audio data recorded";
4777
+ setDictationError(errorMessage);
4778
+ onError?.(errorMessage);
4769
4779
  }
4770
4780
  } else {
4771
4781
  console.error("No audio data to process");
4772
- setDictationError("No audio data to process");
4782
+ const errorMessage = "No audio data to process";
4783
+ setDictationError(errorMessage);
4784
+ onError?.(errorMessage);
4773
4785
  }
4774
4786
  } catch (error) {
4775
4787
  console.error("Error stopping recording:", error);
4776
- setDictationError("An error occurred while stopping dictation");
4788
+ const errorMessage = "An error occurred while stopping dictation";
4789
+ setDictationError(errorMessage);
4790
+ onError?.(errorMessage);
4777
4791
  } finally {
4778
4792
  setIsDictating(false);
4779
4793
  audioSamplesRef.current = [];
@@ -4819,6 +4833,7 @@ var useAudioDictation = ({
4819
4833
  console.log(`Processing dictation with audio data length: ${audioData.length}`);
4820
4834
  console.log(`Using format: ${selectedFormat}`);
4821
4835
  setIsProcessing(true);
4836
+ onProcessingStart?.();
4822
4837
  const wavBlob = encodeWAV(audioData);
4823
4838
  console.log(`Sending audio to dictation API (${wavBlob.size / 1024} KB)`);
4824
4839
  const requestData = {
@@ -4891,11 +4906,15 @@ var useAudioDictation = ({
4891
4906
  onDictationComplete(convertedData.dictation);
4892
4907
  } else {
4893
4908
  console.error("No dictation text in response");
4894
- setDictationError("No dictation text in response");
4909
+ const errorMessage = "No dictation text in response";
4910
+ setDictationError(errorMessage);
4911
+ onError?.(errorMessage);
4895
4912
  }
4896
4913
  } catch (error) {
4897
4914
  console.error("Error processing dictation audio:", error);
4898
- setDictationError("An error occurred while processing dictation");
4915
+ const errorMessage = "An error occurred while processing dictation";
4916
+ setDictationError(errorMessage);
4917
+ onError?.(errorMessage);
4899
4918
  } finally {
4900
4919
  setIsProcessing(false);
4901
4920
  setIsDictating(false);
@@ -4982,6 +5001,9 @@ var AudioDictation = ({
4982
5001
  specialty = "general",
4983
5002
  selectedFormat = "json",
4984
5003
  onDictationComplete,
5004
+ onDictationStart,
5005
+ onProcessingStart,
5006
+ onError,
4985
5007
  className = "",
4986
5008
  style,
4987
5009
  buttonText,
@@ -4992,6 +5014,9 @@ var AudioDictation = ({
4992
5014
  }, []);
4993
5015
  const { startDictating, stopDictating, dictationError, isDictating, isProcessing } = useAudioDictation_default({
4994
5016
  onDictationComplete,
5017
+ onDictationStart,
5018
+ onProcessingStart,
5019
+ onError,
4995
5020
  apiKey,
4996
5021
  apiBaseUrl,
4997
5022
  appendMode,