sera-ai 1.0.2 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +62 -0
- package/dist/index.d.mts +19 -1
- package/dist/index.d.ts +19 -1
- package/dist/index.js +473 -0
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +474 -2
- package/dist/index.mjs.map +1 -1
- package/package.json +2 -2
package/README.md
CHANGED
|
@@ -55,6 +55,68 @@ export default App;
|
|
|
55
55
|
|
|
56
56
|
That's it! No configuration files, no worker files to copy, no CSS frameworks to install.
|
|
57
57
|
|
|
58
|
+
## Audio Dictation Component
|
|
59
|
+
|
|
60
|
+
For shorter dictation tasks, use the `AudioDictation` component which provides push-to-talk functionality:
|
|
61
|
+
|
|
62
|
+
```tsx
|
|
63
|
+
import React, { useState } from 'react';
|
|
64
|
+
import { AudioDictation } from 'sera-ai';
|
|
65
|
+
|
|
66
|
+
function DictationApp() {
|
|
67
|
+
const [dictatedText, setDictatedText] = useState('');
|
|
68
|
+
|
|
69
|
+
return (
|
|
70
|
+
<div style={{ padding: '20px' }}>
|
|
71
|
+
<h1>Medical Dictation</h1>
|
|
72
|
+
|
|
73
|
+
<AudioDictation
|
|
74
|
+
apiKey="your-api-key"
|
|
75
|
+
doctorName="Dr. Smith"
|
|
76
|
+
patientId="12345"
|
|
77
|
+
specialty="cardiology"
|
|
78
|
+
selectedFormat="json"
|
|
79
|
+
onDictationComplete={(text) => {
|
|
80
|
+
setDictatedText(prev => prev + ' ' + text);
|
|
81
|
+
}}
|
|
82
|
+
/>
|
|
83
|
+
|
|
84
|
+
<div style={{ marginTop: '20px', padding: '10px', border: '1px solid #ccc' }}>
|
|
85
|
+
<h3>Dictated Text:</h3>
|
|
86
|
+
<p>{dictatedText}</p>
|
|
87
|
+
</div>
|
|
88
|
+
</div>
|
|
89
|
+
);
|
|
90
|
+
}
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### AudioDictation Props
|
|
94
|
+
|
|
95
|
+
| Prop | Type | Default | Description |
|
|
96
|
+
|------|------|---------|-------------|
|
|
97
|
+
| `apiKey` | `string` | - | Your Sera AI API key |
|
|
98
|
+
| `appendMode` | `boolean` | `true` | Whether to append to existing text |
|
|
99
|
+
| `doctorName` | `string` | `"doctor"` | Doctor's name for the dictation |
|
|
100
|
+
| `patientId` | `string` | - | Patient identifier |
|
|
101
|
+
| `sessionId` | `string` | - | Session identifier |
|
|
102
|
+
| `language` | `string` | `"en"` | Language code for dictation |
|
|
103
|
+
| `specialty` | `string` | `"general"` | Medical specialty |
|
|
104
|
+
| `selectedFormat` | `"json" \| "hl7" \| "fhir"` | `"json"` | Output format |
|
|
105
|
+
| `onDictationComplete` | `(text: string) => void` | **Required** | Callback when dictation is complete |
|
|
106
|
+
| `className` | `string` | - | Custom CSS classes |
|
|
107
|
+
| `style` | `CSSProperties` | - | Inline styles |
|
|
108
|
+
| `buttonText` | `string` | `"Hold to Dictate"` | Custom button text |
|
|
109
|
+
| `placeholder` | `string` | `"Click and hold to dictate..."` | Tooltip text |
|
|
110
|
+
|
|
111
|
+
### AudioDictation Features
|
|
112
|
+
|
|
113
|
+
- **Push-to-talk**: Hold mouse button or spacebar to dictate
|
|
114
|
+
- **Mobile support**: Touch and hold on mobile devices
|
|
115
|
+
- **Visual feedback**: Button animates while recording
|
|
116
|
+
- **Error handling**: Built-in error display and recovery
|
|
117
|
+
- **Multiple formats**: Support for JSON, HL7, and FHIR output
|
|
118
|
+
- **Real-time processing**: Immediate transcription after release
|
|
119
|
+
|
|
58
120
|
## Advanced Usage
|
|
59
121
|
|
|
60
122
|
### Medical Specialties
|
package/dist/index.d.mts
CHANGED
|
@@ -35,4 +35,22 @@ interface ClassificationInfoResponse {
|
|
|
35
35
|
|
|
36
36
|
declare const AudioRecorder: React$1.FC<AudioRecorderProps>;
|
|
37
37
|
|
|
38
|
-
|
|
38
|
+
interface AudioDictationProps {
|
|
39
|
+
apiKey?: string;
|
|
40
|
+
apiBaseUrl?: string;
|
|
41
|
+
appendMode?: boolean;
|
|
42
|
+
doctorName?: string;
|
|
43
|
+
patientId?: string;
|
|
44
|
+
sessionId?: string;
|
|
45
|
+
language?: string;
|
|
46
|
+
specialty?: string;
|
|
47
|
+
selectedFormat?: "json" | "hl7" | "fhir";
|
|
48
|
+
onDictationComplete: (message: string) => void;
|
|
49
|
+
className?: string;
|
|
50
|
+
style?: React$1.CSSProperties;
|
|
51
|
+
buttonText?: string;
|
|
52
|
+
placeholder?: string;
|
|
53
|
+
}
|
|
54
|
+
declare const AudioDictation: React$1.FC<AudioDictationProps>;
|
|
55
|
+
|
|
56
|
+
export { type APIOptions, type APIResponse, AudioDictation, type AudioDictationProps, AudioRecorder, type AudioRecorderProps };
|
package/dist/index.d.ts
CHANGED
|
@@ -35,4 +35,22 @@ interface ClassificationInfoResponse {
|
|
|
35
35
|
|
|
36
36
|
declare const AudioRecorder: React$1.FC<AudioRecorderProps>;
|
|
37
37
|
|
|
38
|
-
|
|
38
|
+
interface AudioDictationProps {
|
|
39
|
+
apiKey?: string;
|
|
40
|
+
apiBaseUrl?: string;
|
|
41
|
+
appendMode?: boolean;
|
|
42
|
+
doctorName?: string;
|
|
43
|
+
patientId?: string;
|
|
44
|
+
sessionId?: string;
|
|
45
|
+
language?: string;
|
|
46
|
+
specialty?: string;
|
|
47
|
+
selectedFormat?: "json" | "hl7" | "fhir";
|
|
48
|
+
onDictationComplete: (message: string) => void;
|
|
49
|
+
className?: string;
|
|
50
|
+
style?: React$1.CSSProperties;
|
|
51
|
+
buttonText?: string;
|
|
52
|
+
placeholder?: string;
|
|
53
|
+
}
|
|
54
|
+
declare const AudioDictation: React$1.FC<AudioDictationProps>;
|
|
55
|
+
|
|
56
|
+
export { type APIOptions, type APIResponse, AudioDictation, type AudioDictationProps, AudioRecorder, type AudioRecorderProps };
|
package/dist/index.js
CHANGED
|
@@ -4602,7 +4602,480 @@ var AudioRecorder = ({
|
|
|
4602
4602
|
] });
|
|
4603
4603
|
};
|
|
4604
4604
|
var AudioRecorder_default = AudioRecorder;
|
|
4605
|
+
var API_BASE_URL2 = "https://nuxera.cloud";
|
|
4606
|
+
var useAudioDictation = ({
|
|
4607
|
+
setIsProcessing,
|
|
4608
|
+
setIsDictating,
|
|
4609
|
+
onDictationComplete,
|
|
4610
|
+
apiKey,
|
|
4611
|
+
apiBaseUrl = API_BASE_URL2,
|
|
4612
|
+
appendMode = true,
|
|
4613
|
+
doctorName = "asad",
|
|
4614
|
+
patientId,
|
|
4615
|
+
sessionId,
|
|
4616
|
+
language,
|
|
4617
|
+
specialty,
|
|
4618
|
+
selectedFormat = "json"
|
|
4619
|
+
}) => {
|
|
4620
|
+
const [dictationError, setDictationError] = React3.useState(null);
|
|
4621
|
+
const audioContextRef = React3.useRef(null);
|
|
4622
|
+
const processorRef = React3.useRef(null);
|
|
4623
|
+
const mediaStreamRef = React3.useRef(null);
|
|
4624
|
+
const audioSamplesRef = React3.useRef([]);
|
|
4625
|
+
const [audioBuffer, setAudioBuffer] = React3.useState(null);
|
|
4626
|
+
const { createHL7DictationRequest, createFHIRDictationRequest, convertDictationResponse } = useHL7FHIRConverter_default();
|
|
4627
|
+
const effectiveApiKey = apiKey;
|
|
4628
|
+
React3.useEffect(() => {
|
|
4629
|
+
return () => {
|
|
4630
|
+
if (mediaStreamRef.current) {
|
|
4631
|
+
mediaStreamRef.current.getTracks().forEach((track) => track.stop());
|
|
4632
|
+
mediaStreamRef.current = null;
|
|
4633
|
+
}
|
|
4634
|
+
if (processorRef.current) {
|
|
4635
|
+
try {
|
|
4636
|
+
processorRef.current.disconnect();
|
|
4637
|
+
} catch (e) {
|
|
4638
|
+
console.error("Error disconnecting processor:", e);
|
|
4639
|
+
}
|
|
4640
|
+
processorRef.current = null;
|
|
4641
|
+
}
|
|
4642
|
+
if (audioContextRef.current && audioContextRef.current.state !== "closed") {
|
|
4643
|
+
try {
|
|
4644
|
+
audioContextRef.current.close();
|
|
4645
|
+
} catch (e) {
|
|
4646
|
+
console.error("Error closing AudioContext:", e);
|
|
4647
|
+
}
|
|
4648
|
+
}
|
|
4649
|
+
audioContextRef.current = null;
|
|
4650
|
+
};
|
|
4651
|
+
}, []);
|
|
4652
|
+
const startDictating = async () => {
|
|
4653
|
+
try {
|
|
4654
|
+
console.log("Starting recording");
|
|
4655
|
+
setAudioBuffer(null);
|
|
4656
|
+
audioSamplesRef.current = [];
|
|
4657
|
+
setDictationError(null);
|
|
4658
|
+
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
4659
|
+
mediaStreamRef.current = stream;
|
|
4660
|
+
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
|
4661
|
+
console.log("Loading audio-processor.js module...");
|
|
4662
|
+
try {
|
|
4663
|
+
await audioContext.audioWorklet.addModule("/audio-processor.js");
|
|
4664
|
+
console.log("Audio worklet module loaded successfully");
|
|
4665
|
+
} catch (err) {
|
|
4666
|
+
console.error("Error loading audio worklet module:", err);
|
|
4667
|
+
throw err;
|
|
4668
|
+
}
|
|
4669
|
+
const processor = new AudioWorkletNode(audioContext, "audio-processor");
|
|
4670
|
+
processor.port.onmessage = (event) => {
|
|
4671
|
+
console.log("Received message from processor:", event.data.command);
|
|
4672
|
+
if (event.data.audioBuffer) {
|
|
4673
|
+
console.log("Received audio buffer", event.data.audioBuffer.length);
|
|
4674
|
+
audioSamplesRef.current.push(new Float32Array(event.data.audioBuffer));
|
|
4675
|
+
}
|
|
4676
|
+
if (event.data.command === "finalChunk") {
|
|
4677
|
+
console.log("Received final chunk");
|
|
4678
|
+
if (event.data.audioBuffer) {
|
|
4679
|
+
audioSamplesRef.current.push(new Float32Array(event.data.audioBuffer));
|
|
4680
|
+
}
|
|
4681
|
+
const totalLength = audioSamplesRef.current.reduce(
|
|
4682
|
+
(acc, buffer) => acc + buffer.length,
|
|
4683
|
+
0
|
|
4684
|
+
);
|
|
4685
|
+
if (totalLength > 0) {
|
|
4686
|
+
const combinedBuffer = new Float32Array(totalLength);
|
|
4687
|
+
let offset = 0;
|
|
4688
|
+
audioSamplesRef.current.forEach((buffer) => {
|
|
4689
|
+
combinedBuffer.set(buffer, offset);
|
|
4690
|
+
offset += buffer.length;
|
|
4691
|
+
});
|
|
4692
|
+
console.log("Combined buffer length:", combinedBuffer.length);
|
|
4693
|
+
setAudioBuffer(combinedBuffer);
|
|
4694
|
+
} else {
|
|
4695
|
+
console.warn("Final chunk received but no audio data accumulated");
|
|
4696
|
+
setDictationError("No audio data was recorded");
|
|
4697
|
+
}
|
|
4698
|
+
}
|
|
4699
|
+
};
|
|
4700
|
+
const source = audioContext.createMediaStreamSource(stream);
|
|
4701
|
+
source.connect(processor);
|
|
4702
|
+
audioContextRef.current = audioContext;
|
|
4703
|
+
processorRef.current = processor;
|
|
4704
|
+
setIsDictating(true);
|
|
4705
|
+
console.log("Recording started successfully");
|
|
4706
|
+
} catch (error) {
|
|
4707
|
+
console.error("Error starting dictation:", error);
|
|
4708
|
+
setIsDictating(false);
|
|
4709
|
+
setDictationError("An error occurred while starting dictation");
|
|
4710
|
+
}
|
|
4711
|
+
};
|
|
4712
|
+
const stopDictating = async () => {
|
|
4713
|
+
console.log("Stopping dictation");
|
|
4714
|
+
try {
|
|
4715
|
+
if (processorRef.current) {
|
|
4716
|
+
console.log("Sending stop command to processor");
|
|
4717
|
+
processorRef.current.port.postMessage({ command: "stop" });
|
|
4718
|
+
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
4719
|
+
}
|
|
4720
|
+
if (mediaStreamRef.current) {
|
|
4721
|
+
console.log("Stopping media stream tracks");
|
|
4722
|
+
mediaStreamRef.current.getTracks().forEach((track) => {
|
|
4723
|
+
track.stop();
|
|
4724
|
+
console.log("Track stopped:", track.kind, track.readyState);
|
|
4725
|
+
});
|
|
4726
|
+
mediaStreamRef.current = null;
|
|
4727
|
+
}
|
|
4728
|
+
if (processorRef.current) {
|
|
4729
|
+
console.log("Disconnecting processor");
|
|
4730
|
+
try {
|
|
4731
|
+
processorRef.current.disconnect();
|
|
4732
|
+
processorRef.current = null;
|
|
4733
|
+
} catch (e) {
|
|
4734
|
+
console.error("Error disconnecting processor:", e);
|
|
4735
|
+
processorRef.current = null;
|
|
4736
|
+
}
|
|
4737
|
+
}
|
|
4738
|
+
if (audioContextRef.current) {
|
|
4739
|
+
console.log("Closing audio context, current state:", audioContextRef.current.state);
|
|
4740
|
+
try {
|
|
4741
|
+
if (audioContextRef.current.state !== "closed") {
|
|
4742
|
+
await audioContextRef.current.close();
|
|
4743
|
+
console.log("Audio context closed successfully");
|
|
4744
|
+
}
|
|
4745
|
+
} catch (e) {
|
|
4746
|
+
console.error("Error closing AudioContext:", e);
|
|
4747
|
+
}
|
|
4748
|
+
audioContextRef.current = null;
|
|
4749
|
+
}
|
|
4750
|
+
await new Promise((resolve) => setTimeout(resolve, 200));
|
|
4751
|
+
if (audioBuffer && audioBuffer.length > 0) {
|
|
4752
|
+
console.log(`Processing audio buffer of size ${audioBuffer.length} samples`);
|
|
4753
|
+
await processDictationAudio(audioBuffer);
|
|
4754
|
+
} else if (audioSamplesRef.current.length > 0) {
|
|
4755
|
+
console.log("No audio buffer but have samples, combining now");
|
|
4756
|
+
const totalLength = audioSamplesRef.current.reduce((acc, buffer) => acc + buffer.length, 0);
|
|
4757
|
+
if (totalLength > 0) {
|
|
4758
|
+
const combinedBuffer = new Float32Array(totalLength);
|
|
4759
|
+
let offset = 0;
|
|
4760
|
+
audioSamplesRef.current.forEach((buffer) => {
|
|
4761
|
+
combinedBuffer.set(buffer, offset);
|
|
4762
|
+
offset += buffer.length;
|
|
4763
|
+
});
|
|
4764
|
+
console.log(`Created combined buffer of size ${combinedBuffer.length} samples`);
|
|
4765
|
+
await processDictationAudio(combinedBuffer);
|
|
4766
|
+
} else {
|
|
4767
|
+
console.error("No valid audio data found");
|
|
4768
|
+
setDictationError("No audio data recorded");
|
|
4769
|
+
}
|
|
4770
|
+
} else {
|
|
4771
|
+
console.error("No audio data to process");
|
|
4772
|
+
setDictationError("No audio data to process");
|
|
4773
|
+
}
|
|
4774
|
+
} catch (error) {
|
|
4775
|
+
console.error("Error stopping recording:", error);
|
|
4776
|
+
setDictationError("An error occurred while stopping dictation");
|
|
4777
|
+
} finally {
|
|
4778
|
+
setIsDictating(false);
|
|
4779
|
+
audioSamplesRef.current = [];
|
|
4780
|
+
setAudioBuffer(null);
|
|
4781
|
+
}
|
|
4782
|
+
};
|
|
4783
|
+
const encodeWAV = (samples) => {
|
|
4784
|
+
console.log(`Encoding WAV with ${samples.length} samples`);
|
|
4785
|
+
const sampleRate = audioContextRef.current?.sampleRate || 44100;
|
|
4786
|
+
console.log(`Using sample rate: ${sampleRate}Hz`);
|
|
4787
|
+
const buffer = new ArrayBuffer(44 + samples.length * 2);
|
|
4788
|
+
const view = new DataView(buffer);
|
|
4789
|
+
const writeString = (offset2, str) => {
|
|
4790
|
+
for (let i = 0; i < str.length; i++) {
|
|
4791
|
+
view.setUint8(offset2 + i, str.charCodeAt(i));
|
|
4792
|
+
}
|
|
4793
|
+
};
|
|
4794
|
+
writeString(0, "RIFF");
|
|
4795
|
+
view.setUint32(4, 36 + samples.length * 2, true);
|
|
4796
|
+
writeString(8, "WAVE");
|
|
4797
|
+
writeString(12, "fmt ");
|
|
4798
|
+
view.setUint32(16, 16, true);
|
|
4799
|
+
view.setUint16(20, 1, true);
|
|
4800
|
+
view.setUint16(22, 1, true);
|
|
4801
|
+
view.setUint32(24, sampleRate, true);
|
|
4802
|
+
view.setUint32(28, sampleRate * 2, true);
|
|
4803
|
+
view.setUint16(32, 2, true);
|
|
4804
|
+
view.setUint16(34, 16, true);
|
|
4805
|
+
writeString(36, "data");
|
|
4806
|
+
view.setUint32(40, samples.length * 2, true);
|
|
4807
|
+
let offset = 44;
|
|
4808
|
+
for (let i = 0; i < samples.length; i++) {
|
|
4809
|
+
const sample = Math.max(-1, Math.min(1, samples[i]));
|
|
4810
|
+
view.setInt16(offset, sample < 0 ? sample * 32768 : sample * 32767, true);
|
|
4811
|
+
offset += 2;
|
|
4812
|
+
}
|
|
4813
|
+
const wavBlob = new Blob([view], { type: "audio/wav" });
|
|
4814
|
+
console.log(`Created WAV blob: ${(wavBlob.size / 1024).toFixed(2)} KB`);
|
|
4815
|
+
return wavBlob;
|
|
4816
|
+
};
|
|
4817
|
+
const processDictationAudio = async (audioData) => {
|
|
4818
|
+
try {
|
|
4819
|
+
console.log(`Processing dictation with audio data length: ${audioData.length}`);
|
|
4820
|
+
console.log(`Using format: ${selectedFormat}`);
|
|
4821
|
+
setIsProcessing(true);
|
|
4822
|
+
const wavBlob = encodeWAV(audioData);
|
|
4823
|
+
console.log(`Sending audio to dictation API (${wavBlob.size / 1024} KB)`);
|
|
4824
|
+
const requestData = {
|
|
4825
|
+
doctorName,
|
|
4826
|
+
patientId,
|
|
4827
|
+
sessionId,
|
|
4828
|
+
language,
|
|
4829
|
+
specialty
|
|
4830
|
+
};
|
|
4831
|
+
let requestBody;
|
|
4832
|
+
let headers = {
|
|
4833
|
+
"x-api-key": effectiveApiKey || ""
|
|
4834
|
+
};
|
|
4835
|
+
switch (selectedFormat) {
|
|
4836
|
+
case "hl7":
|
|
4837
|
+
requestBody = createHL7DictationRequest(
|
|
4838
|
+
new File([wavBlob], "dictation.wav", { type: "audio/wav" }),
|
|
4839
|
+
requestData
|
|
4840
|
+
);
|
|
4841
|
+
headers["x-request-format"] = "hl7";
|
|
4842
|
+
headers["x-response-format"] = "hl7";
|
|
4843
|
+
console.log("Created HL7-formatted dictation request");
|
|
4844
|
+
break;
|
|
4845
|
+
case "fhir":
|
|
4846
|
+
requestBody = createFHIRDictationRequest(
|
|
4847
|
+
new File([wavBlob], "dictation.wav", { type: "audio/wav" }),
|
|
4848
|
+
requestData
|
|
4849
|
+
);
|
|
4850
|
+
headers["x-request-format"] = "fhir";
|
|
4851
|
+
headers["x-response-format"] = "fhir";
|
|
4852
|
+
console.log("Created FHIR-formatted dictation request");
|
|
4853
|
+
break;
|
|
4854
|
+
case "json":
|
|
4855
|
+
default:
|
|
4856
|
+
requestBody = new FormData();
|
|
4857
|
+
requestBody.append("audio", wavBlob);
|
|
4858
|
+
requestBody.append("doctorName", doctorName);
|
|
4859
|
+
if (patientId) requestBody.append("patientId", patientId);
|
|
4860
|
+
if (sessionId) requestBody.append("sessionId", sessionId);
|
|
4861
|
+
if (language) requestBody.append("language", language);
|
|
4862
|
+
if (specialty) requestBody.append("specialty", specialty);
|
|
4863
|
+
headers["x-request-format"] = "json";
|
|
4864
|
+
headers["x-response-format"] = "json";
|
|
4865
|
+
console.log("Created JSON-formatted dictation request");
|
|
4866
|
+
break;
|
|
4867
|
+
}
|
|
4868
|
+
console.log("Request headers:", headers);
|
|
4869
|
+
console.log("Sending request to API...");
|
|
4870
|
+
const response = await fetch(`${apiBaseUrl}/api/dictate`, {
|
|
4871
|
+
method: "POST",
|
|
4872
|
+
body: requestBody,
|
|
4873
|
+
headers
|
|
4874
|
+
});
|
|
4875
|
+
console.log("API response status:", response.status);
|
|
4876
|
+
if (!response.ok) {
|
|
4877
|
+
throw new Error(`API request failed with status ${response.status}`);
|
|
4878
|
+
}
|
|
4879
|
+
let responseData;
|
|
4880
|
+
const contentType = response.headers.get("content-type") || "";
|
|
4881
|
+
if (selectedFormat === "json" || contentType.includes("application/json")) {
|
|
4882
|
+
responseData = await response.json();
|
|
4883
|
+
} else {
|
|
4884
|
+
responseData = await response.text();
|
|
4885
|
+
}
|
|
4886
|
+
console.log("Raw API response data:", responseData);
|
|
4887
|
+
const convertedData = convertDictationResponse(responseData, selectedFormat);
|
|
4888
|
+
console.log("Converted response data:", convertedData);
|
|
4889
|
+
if (convertedData.dictation) {
|
|
4890
|
+
console.log("Dictation text received:", convertedData.dictation);
|
|
4891
|
+
onDictationComplete(convertedData.dictation);
|
|
4892
|
+
} else {
|
|
4893
|
+
console.error("No dictation text in response");
|
|
4894
|
+
setDictationError("No dictation text in response");
|
|
4895
|
+
}
|
|
4896
|
+
} catch (error) {
|
|
4897
|
+
console.error("Error processing dictation audio:", error);
|
|
4898
|
+
setDictationError("An error occurred while processing dictation");
|
|
4899
|
+
} finally {
|
|
4900
|
+
setIsProcessing(false);
|
|
4901
|
+
setIsDictating(false);
|
|
4902
|
+
}
|
|
4903
|
+
};
|
|
4904
|
+
return { startDictating, stopDictating, dictationError };
|
|
4905
|
+
};
|
|
4906
|
+
var useAudioDictation_default = useAudioDictation;
|
|
4907
|
+
var dictationStyles = `
|
|
4908
|
+
.dictation-button-recording {
|
|
4909
|
+
background: linear-gradient(-45deg, #ee7752, #e73c7e, #23a6d5, #23d5ab);
|
|
4910
|
+
background-size: 400% 400%;
|
|
4911
|
+
animation: gradientShift 2s ease infinite;
|
|
4912
|
+
}
|
|
4913
|
+
|
|
4914
|
+
@keyframes gradientShift {
|
|
4915
|
+
0% { background-position: 0% 50%; }
|
|
4916
|
+
50% { background-position: 100% 50%; }
|
|
4917
|
+
100% { background-position: 0% 50%; }
|
|
4918
|
+
}
|
|
4919
|
+
|
|
4920
|
+
.dictation-pulse {
|
|
4921
|
+
animation: dictationPulse 1.5s ease-in-out infinite;
|
|
4922
|
+
}
|
|
4923
|
+
|
|
4924
|
+
@keyframes dictationPulse {
|
|
4925
|
+
0%, 100% { transform: scale(1); opacity: 1; }
|
|
4926
|
+
50% { transform: scale(1.05); opacity: 0.8; }
|
|
4927
|
+
}
|
|
4928
|
+
|
|
4929
|
+
.bg-blue-600 { background-color: rgb(37 99 235); }
|
|
4930
|
+
.hover\\:bg-blue-700:hover { background-color: rgb(29 78 216); }
|
|
4931
|
+
.bg-red-600 { background-color: rgb(220 38 38); }
|
|
4932
|
+
.hover\\:bg-red-700:hover { background-color: rgb(185 28 28); }
|
|
4933
|
+
.bg-gray-600 { background-color: rgb(75 85 99); }
|
|
4934
|
+
.hover\\:bg-gray-700:hover { background-color: rgb(55 65 81); }
|
|
4935
|
+
.text-white { color: rgb(255 255 255); }
|
|
4936
|
+
.flex { display: flex; }
|
|
4937
|
+
.items-center { align-items: center; }
|
|
4938
|
+
.justify-center { justify-content: center; }
|
|
4939
|
+
.space-x-2 > :not([hidden]) ~ :not([hidden]) { margin-left: 0.5rem; }
|
|
4940
|
+
.px-4 { padding-left: 1rem; padding-right: 1rem; }
|
|
4941
|
+
.py-2 { padding-top: 0.5rem; padding-bottom: 0.5rem; }
|
|
4942
|
+
.rounded-lg { border-radius: 0.5rem; }
|
|
4943
|
+
.rounded-full { border-radius: 9999px; }
|
|
4944
|
+
.transition-all { transition-property: all; transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); transition-duration: 150ms; }
|
|
4945
|
+
.duration-200 { transition-duration: 200ms; }
|
|
4946
|
+
.disabled\\:opacity-50:disabled { opacity: 0.5; }
|
|
4947
|
+
.disabled\\:cursor-not-allowed:disabled { cursor: not-allowed; }
|
|
4948
|
+
.h-5 { height: 1.25rem; }
|
|
4949
|
+
.w-5 { width: 1.25rem; }
|
|
4950
|
+
.h-6 { height: 1.5rem; }
|
|
4951
|
+
.w-6 { width: 1.5rem; }
|
|
4952
|
+
.text-sm { font-size: 0.875rem; line-height: 1.25rem; }
|
|
4953
|
+
.font-medium { font-weight: 500; }
|
|
4954
|
+
.animate-spin { animation: spin 1s linear infinite; }
|
|
4955
|
+
@keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } }
|
|
4956
|
+
.bg-red-50 { background-color: rgb(254 242 242); }
|
|
4957
|
+
.border { border-width: 1px; }
|
|
4958
|
+
.border-red-200 { border-color: rgb(254 202 202); }
|
|
4959
|
+
.text-red-800 { color: rgb(153 27 27); }
|
|
4960
|
+
.text-red-700 { color: rgb(185 28 28); }
|
|
4961
|
+
.p-3 { padding: 0.75rem; }
|
|
4962
|
+
.mt-2 { margin-top: 0.5rem; }
|
|
4963
|
+
.text-xs { font-size: 0.75rem; line-height: 1rem; }
|
|
4964
|
+
`;
|
|
4965
|
+
var dictationStylesInjected = false;
|
|
4966
|
+
var injectDictationStyles = () => {
|
|
4967
|
+
if (!dictationStylesInjected && typeof document !== "undefined") {
|
|
4968
|
+
const styleElement = document.createElement("style");
|
|
4969
|
+
styleElement.textContent = dictationStyles;
|
|
4970
|
+
document.head.appendChild(styleElement);
|
|
4971
|
+
dictationStylesInjected = true;
|
|
4972
|
+
}
|
|
4973
|
+
};
|
|
4974
|
+
var AudioDictation = ({
|
|
4975
|
+
apiKey,
|
|
4976
|
+
apiBaseUrl,
|
|
4977
|
+
appendMode = true,
|
|
4978
|
+
doctorName = "doctor",
|
|
4979
|
+
patientId,
|
|
4980
|
+
sessionId,
|
|
4981
|
+
language = "en",
|
|
4982
|
+
specialty = "general",
|
|
4983
|
+
selectedFormat = "json",
|
|
4984
|
+
onDictationComplete,
|
|
4985
|
+
className = "",
|
|
4986
|
+
style,
|
|
4987
|
+
buttonText,
|
|
4988
|
+
placeholder = "Click to dictate..."
|
|
4989
|
+
}) => {
|
|
4990
|
+
const [isDictating, setIsDictating] = React3__namespace.useState(false);
|
|
4991
|
+
const [isProcessing, setIsProcessing] = React3__namespace.useState(false);
|
|
4992
|
+
React3__namespace.useEffect(() => {
|
|
4993
|
+
injectDictationStyles();
|
|
4994
|
+
}, []);
|
|
4995
|
+
const { startDictating, stopDictating, dictationError } = useAudioDictation_default({
|
|
4996
|
+
setIsProcessing,
|
|
4997
|
+
setIsDictating,
|
|
4998
|
+
onDictationComplete,
|
|
4999
|
+
apiKey,
|
|
5000
|
+
apiBaseUrl,
|
|
5001
|
+
appendMode,
|
|
5002
|
+
doctorName,
|
|
5003
|
+
patientId,
|
|
5004
|
+
sessionId,
|
|
5005
|
+
language,
|
|
5006
|
+
specialty,
|
|
5007
|
+
selectedFormat
|
|
5008
|
+
});
|
|
5009
|
+
const handleToggleDictation = (e) => {
|
|
5010
|
+
e.preventDefault();
|
|
5011
|
+
if (isProcessing) return;
|
|
5012
|
+
if (isDictating) {
|
|
5013
|
+
stopDictating();
|
|
5014
|
+
} else {
|
|
5015
|
+
startDictating();
|
|
5016
|
+
}
|
|
5017
|
+
};
|
|
5018
|
+
React3__namespace.useEffect(() => {
|
|
5019
|
+
const handleKeyDown = (e) => {
|
|
5020
|
+
if (e.code === "Space" && !e.repeat && !isProcessing) {
|
|
5021
|
+
e.preventDefault();
|
|
5022
|
+
if (isDictating) {
|
|
5023
|
+
stopDictating();
|
|
5024
|
+
} else {
|
|
5025
|
+
startDictating();
|
|
5026
|
+
}
|
|
5027
|
+
}
|
|
5028
|
+
};
|
|
5029
|
+
document.addEventListener("keydown", handleKeyDown);
|
|
5030
|
+
return () => {
|
|
5031
|
+
document.removeEventListener("keydown", handleKeyDown);
|
|
5032
|
+
};
|
|
5033
|
+
}, [isDictating, isProcessing, startDictating, stopDictating]);
|
|
5034
|
+
const getButtonContent = () => {
|
|
5035
|
+
if (isProcessing) {
|
|
5036
|
+
return /* @__PURE__ */ jsxRuntime.jsx(jsxRuntime.Fragment, { children: /* @__PURE__ */ jsxRuntime.jsx(lucideReact.Loader2, { className: "h-5 w-5 animate-spin" }) });
|
|
5037
|
+
}
|
|
5038
|
+
if (isDictating) {
|
|
5039
|
+
return /* @__PURE__ */ jsxRuntime.jsx(jsxRuntime.Fragment, { children: /* @__PURE__ */ jsxRuntime.jsx(lucideReact.Square, { className: "h-5 w-5" }) });
|
|
5040
|
+
}
|
|
5041
|
+
return /* @__PURE__ */ jsxRuntime.jsx(jsxRuntime.Fragment, { children: /* @__PURE__ */ jsxRuntime.jsx(lucideReact.Mic, { className: "h-5 w-5" }) });
|
|
5042
|
+
};
|
|
5043
|
+
const getButtonClass = () => {
|
|
5044
|
+
const baseClass = "flex items-center justify-center space-x-2 px-4 py-2 rounded-lg font-medium transition-all duration-200 disabled:opacity-50 disabled:cursor-not-allowed";
|
|
5045
|
+
if (className) {
|
|
5046
|
+
return `${baseClass} ${className}`;
|
|
5047
|
+
}
|
|
5048
|
+
if (isProcessing) {
|
|
5049
|
+
return `${baseClass} bg-gray-600 text-white`;
|
|
5050
|
+
}
|
|
5051
|
+
if (isDictating) {
|
|
5052
|
+
return `${baseClass} dictation-button-recording text-white dictation-pulse`;
|
|
5053
|
+
}
|
|
5054
|
+
return `${baseClass} bg-blue-600 hover:bg-blue-700 text-white`;
|
|
5055
|
+
};
|
|
5056
|
+
if (dictationError) {
|
|
5057
|
+
return /* @__PURE__ */ jsxRuntime.jsx("div", { className: "bg-red-50 border border-red-200 rounded-lg p-3", children: /* @__PURE__ */ jsxRuntime.jsxs("div", { className: "flex items-center space-x-2", children: [
|
|
5058
|
+
/* @__PURE__ */ jsxRuntime.jsx(lucideReact.AlertTriangle, { className: "h-5 w-5 text-red-600" }),
|
|
5059
|
+
/* @__PURE__ */ jsxRuntime.jsxs("div", { children: [
|
|
5060
|
+
/* @__PURE__ */ jsxRuntime.jsx("p", { className: "text-sm font-medium text-red-800", children: "Dictation Error" }),
|
|
5061
|
+
/* @__PURE__ */ jsxRuntime.jsx("p", { className: "text-xs text-red-700 mt-1", children: dictationError })
|
|
5062
|
+
] })
|
|
5063
|
+
] }) });
|
|
5064
|
+
}
|
|
5065
|
+
return /* @__PURE__ */ jsxRuntime.jsx("div", { style, children: /* @__PURE__ */ jsxRuntime.jsx(
|
|
5066
|
+
"button",
|
|
5067
|
+
{
|
|
5068
|
+
className: getButtonClass(),
|
|
5069
|
+
onClick: handleToggleDictation,
|
|
5070
|
+
disabled: isProcessing,
|
|
5071
|
+
title: isDictating ? "Click to stop dictating" : placeholder,
|
|
5072
|
+
children: getButtonContent()
|
|
5073
|
+
}
|
|
5074
|
+
) });
|
|
5075
|
+
};
|
|
5076
|
+
var AudioDictation_default = AudioDictation;
|
|
4605
5077
|
|
|
5078
|
+
exports.AudioDictation = AudioDictation_default;
|
|
4606
5079
|
exports.AudioRecorder = AudioRecorder_default;
|
|
4607
5080
|
//# sourceMappingURL=index.js.map
|
|
4608
5081
|
//# sourceMappingURL=index.js.map
|