reneco-advanced-input-module 0.0.21 → 0.0.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/dist/cjs/loader.cjs.js +1 -1
  2. package/dist/cjs/voice-input-module.cjs.entry.js +274 -62
  3. package/dist/cjs/voice-input-module.cjs.entry.js.map +1 -1
  4. package/dist/cjs/voice-input-module.cjs.js +1 -1
  5. package/dist/cjs/voice-input-module.entry.cjs.js.map +1 -1
  6. package/dist/collection/components/voice-input-module/voice-input-module.js +87 -4
  7. package/dist/collection/components/voice-input-module/voice-input-module.js.map +1 -1
  8. package/dist/collection/services/audio-recorder.service.js +61 -44
  9. package/dist/collection/services/audio-recorder.service.js.map +1 -1
  10. package/dist/collection/services/llm.service.js +137 -8
  11. package/dist/collection/services/llm.service.js.map +1 -1
  12. package/dist/collection/services/speech-to-text.service.js +39 -5
  13. package/dist/collection/services/speech-to-text.service.js.map +1 -1
  14. package/dist/collection/types/service-providers.types.js +9 -1
  15. package/dist/collection/types/service-providers.types.js.map +1 -1
  16. package/dist/components/voice-input-module.js +281 -63
  17. package/dist/components/voice-input-module.js.map +1 -1
  18. package/dist/esm/loader.js +1 -1
  19. package/dist/esm/voice-input-module.entry.js +274 -62
  20. package/dist/esm/voice-input-module.entry.js.map +1 -1
  21. package/dist/esm/voice-input-module.js +1 -1
  22. package/dist/types/components/voice-input-module/voice-input-module.d.ts +3 -0
  23. package/dist/types/components.d.ts +18 -0
  24. package/dist/types/services/audio-recorder.service.d.ts +5 -0
  25. package/dist/types/services/llm.service.d.ts +22 -0
  26. package/dist/types/services/speech-to-text.service.d.ts +8 -0
  27. package/dist/types/types/service-providers.types.d.ts +6 -2
  28. package/dist/voice-input-module/p-0e2b9ca0.entry.js +3 -0
  29. package/dist/voice-input-module/p-0e2b9ca0.entry.js.map +1 -0
  30. package/dist/voice-input-module/voice-input-module.entry.esm.js.map +1 -1
  31. package/dist/voice-input-module/voice-input-module.esm.js +1 -1
  32. package/package.json +1 -1
  33. package/readme.md +81 -0
  34. package/www/build/p-0e2b9ca0.entry.js +3 -0
  35. package/www/build/p-0e2b9ca0.entry.js.map +1 -0
  36. package/www/build/p-812b92c7.js +2 -0
  37. package/www/build/voice-input-module.entry.esm.js.map +1 -1
  38. package/www/build/voice-input-module.esm.js +1 -1
  39. package/www/index.html +12 -1
  40. package/dist/voice-input-module/p-4e449895.entry.js +0 -3
  41. package/dist/voice-input-module/p-4e449895.entry.js.map +0 -1
  42. package/www/build/p-3a11e8d2.js +0 -2
  43. package/www/build/p-4e449895.entry.js +0 -3
  44. package/www/build/p-4e449895.entry.js.map +0 -1
@@ -6,7 +6,7 @@ var appGlobals = require('./app-globals-V2Kpy_OQ.js');
6
6
  const defineCustomElements = async (win, options) => {
7
7
  if (typeof window === 'undefined') return undefined;
8
8
  await appGlobals.globalScripts();
9
- return index.bootstrapLazy([["ocr-file-uploader.cjs",[[257,"ocr-file-uploader",{"batch":[4],"callback":[16],"jsonSchema":[1,"json-schema"],"theme":[1],"parsedTheme":[32]},null,{"theme":["parseTheme"]}]]],["voice-input-module.cjs",[[257,"voice-input-module",{"formJson":[1,"form-json"],"serviceConfig":[1,"service-config"],"apiKey":[1,"api-key"],"apiProxyUrl":[1,"api-proxy-url"],"transcriptionModel":[1,"transcription-model"],"completionModel":[1,"completion-model"],"context":[1],"classificationRootUrl":[1,"classification-root-url"],"language":[1],"inputTypes":[1,"input-types"],"theme":[1],"debug":[4],"renderForm":[4,"render-form"],"displayStatus":[4,"display-status"],"isRecording":[32],"isProcessing":[32],"statusMessage":[32],"hasError":[32],"transcription":[32],"filledData":[32],"debugInfo":[32],"isReadonlyMode":[32],"convertXmlToJson":[64],"convertJsonToXml":[64],"convertXmlToJsonLegacy":[64],"convertJsonToXmlLegacy":[64]},null,{"formJson":["initializeServices"],"serviceConfig":["initializeServices"],"theme":["initializeServices"]}]]]], options);
9
+ return index.bootstrapLazy([["ocr-file-uploader.cjs",[[257,"ocr-file-uploader",{"batch":[4],"callback":[16],"jsonSchema":[1,"json-schema"],"theme":[1],"parsedTheme":[32]},null,{"theme":["parseTheme"]}]]],["voice-input-module.cjs",[[257,"voice-input-module",{"formJson":[1,"form-json"],"serviceConfig":[1,"service-config"],"apiKey":[1,"api-key"],"apiProxyUrl":[1,"api-proxy-url"],"transcriptionModel":[1,"transcription-model"],"completionModel":[1,"completion-model"],"transcriptionProvider":[1,"transcription-provider"],"completionProvider":[1,"completion-provider"],"context":[1],"classificationRootUrl":[1,"classification-root-url"],"language":[1],"inputTypes":[1,"input-types"],"theme":[1],"debug":[4],"renderForm":[4,"render-form"],"displayStatus":[4,"display-status"],"isRecording":[32],"isProcessing":[32],"statusMessage":[32],"hasError":[32],"transcription":[32],"filledData":[32],"debugInfo":[32],"isReadonlyMode":[32],"convertXmlToJson":[64],"convertJsonToXml":[64],"convertXmlToJsonLegacy":[64],"convertJsonToXmlLegacy":[64]},null,{"formJson":["initializeServices"],"serviceConfig":["initializeServices"],"theme":["initializeServices"],"transcriptionProvider":["initializeServices"],"completionProvider":["initializeServices"],"transcriptionModel":["initializeServices"],"completionModel":["initializeServices"]}]]]], options);
10
10
  };
11
11
 
12
12
  exports.setNonce = index.setNonce;
@@ -2,69 +2,96 @@
2
2
 
3
3
  var index = require('./index-BTSzTkSZ.js');
4
4
 
5
+ const TRANSCRIPTION_MODELS = {
6
+ openai: ['gpt-4o-transcribe', 'gpt-4o-mini-transcribe', 'whisper-1'],
7
+ mistral: ['voxtral-mini-latest', 'voxtral-mini-transcribe-realtime-latest'],
8
+ };
9
+ const COMPLETION_MODELS = {
10
+ openai: ['gpt-5', 'gpt-5-mini', 'gpt-5.5', 'gpt-4.1', 'gpt-4.1-mini', 'gpt-4o', 'gpt-4o-mini', 'o4-mini', 'gpt-5.2', 'gpt-5.3', 'gpt-5.4', 'gpt-5.4-mini', 'gpt-5.4-pro', 'gpt-5.4-nano'],
11
+ anthropic: ['claude-opus-4', 'claude-sonnet-4', 'claude-haiku-4', 'claude-3.7-sonnet', 'claude-3.5-sonnet', 'claude-opus-4.7', 'claude-opus-4.6', 'claude-opus-4.5', 'claude-sonnet-4.6', 'claude-sonnet-4.5'],
12
+ mistral: ['mistral-large-latest', 'mistral-medium-latest', 'mistral-small-latest', 'ministral', 'mistral-nemo'],
13
+ };
14
+
5
15
  class AudioRecorderService {
6
16
  constructor() {
7
17
  this.mediaRecorder = null;
8
18
  this.audioChunks = [];
9
19
  this.stream = null;
20
+ this.audioContext = null;
21
+ this.scriptProcessor = null;
22
+ this.pcmChunks = [];
23
+ this.sampleRate = 16000;
10
24
  }
11
25
  async startRecording() {
12
- try {
13
- // Check if the API exists before calling
14
- if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
15
- console.error('Failed to start recording:', 'Microphone access is not supported in this browser or the page is not served over HTTPS/localhost.');
16
- return; // Exit gracefully instead of throwing
17
- }
18
- this.stream = await navigator.mediaDevices.getUserMedia({
19
- audio: {
20
- echoCancellation: true,
21
- noiseSuppression: true,
22
- autoGainControl: true
23
- }
24
- });
25
- this.audioChunks = [];
26
- this.mediaRecorder = new MediaRecorder(this.stream, {
27
- mimeType: 'audio/webm;codecs=opus'
28
- });
29
- this.mediaRecorder.ondataavailable = (event) => {
30
- if (event.data && event.data.size > 0) {
31
- this.audioChunks.push(event.data);
32
- }
33
- };
34
- this.mediaRecorder.start(100); // Collect data every 100ms
35
- }
36
- catch (error) {
37
- console.error('Failed to start recording:', error);
26
+ if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
27
+ throw new Error('Microphone access is not supported in this browser or the page is not served over HTTPS/localhost.');
38
28
  }
29
+ this.stream = await navigator.mediaDevices.getUserMedia({
30
+ audio: { echoCancellation: true, noiseSuppression: true, autoGainControl: true }
31
+ });
32
+ this.audioContext = new AudioContext({ sampleRate: this.sampleRate });
33
+ const source = this.audioContext.createMediaStreamSource(this.stream);
34
+ this.scriptProcessor = this.audioContext.createScriptProcessor(4096, 1, 1);
35
+ this.pcmChunks = [];
36
+ this.scriptProcessor.onaudioprocess = (e) => {
37
+ const input = e.inputBuffer.getChannelData(0);
38
+ this.pcmChunks.push(new Float32Array(input));
39
+ };
40
+ source.connect(this.scriptProcessor);
41
+ this.scriptProcessor.connect(this.audioContext.destination);
39
42
  }
40
43
  async stopRecording() {
41
- return new Promise((resolve, reject) => {
42
- if (!this.mediaRecorder) {
43
- reject(new Error('No active recording found'));
44
- return;
44
+ if (!this.audioContext || !this.scriptProcessor) {
45
+ throw new Error('No active recording found');
46
+ }
47
+ this.scriptProcessor.disconnect();
48
+ await this.audioContext.close();
49
+ const wavBlob = this.encodeWav(this.pcmChunks, this.sampleRate);
50
+ this.cleanup();
51
+ return wavBlob;
52
+ }
53
+ encodeWav(chunks, sampleRate) {
54
+ const totalSamples = chunks.reduce((acc, c) => acc + c.length, 0);
55
+ const buffer = new ArrayBuffer(44 + totalSamples * 2);
56
+ const view = new DataView(buffer);
57
+ const writeStr = (offset, str) => {
58
+ for (let i = 0; i < str.length; i++)
59
+ view.setUint8(offset + i, str.charCodeAt(i));
60
+ };
61
+ writeStr(0, 'RIFF');
62
+ view.setUint32(4, 36 + totalSamples * 2, true);
63
+ writeStr(8, 'WAVE');
64
+ writeStr(12, 'fmt ');
65
+ view.setUint32(16, 16, true); // PCM chunk size
66
+ view.setUint16(20, 1, true); // PCM format
67
+ view.setUint16(22, 1, true); // mono
68
+ view.setUint32(24, sampleRate, true);
69
+ view.setUint32(28, sampleRate * 2, true); // byte rate
70
+ view.setUint16(32, 2, true); // block align
71
+ view.setUint16(34, 16, true); // bits per sample
72
+ writeStr(36, 'data');
73
+ view.setUint32(40, totalSamples * 2, true);
74
+ let offset = 44;
75
+ for (const chunk of chunks) {
76
+ for (let i = 0; i < chunk.length; i++) {
77
+ const s = Math.max(-1, Math.min(1, chunk[i]));
78
+ view.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
79
+ offset += 2;
45
80
  }
46
- this.mediaRecorder.onstop = () => {
47
- const audioBlob = new Blob(this.audioChunks, { type: 'audio/webm' });
48
- this.cleanup();
49
- resolve(audioBlob);
50
- };
51
- this.mediaRecorder.onerror = (event) => {
52
- reject(new Error(`Recording error: ${event}`));
53
- };
54
- this.mediaRecorder.stop();
55
- });
81
+ }
82
+ return new Blob([buffer], { type: 'audio/wav' });
56
83
  }
57
84
  isRecording() {
58
- var _a;
59
- return ((_a = this.mediaRecorder) === null || _a === void 0 ? void 0 : _a.state) === 'recording';
85
+ return this.scriptProcessor !== null;
60
86
  }
61
87
  cleanup() {
62
88
  if (this.stream) {
63
89
  this.stream.getTracks().forEach(track => track.stop());
64
90
  this.stream = null;
65
91
  }
66
- this.mediaRecorder = null;
67
- this.audioChunks = [];
92
+ this.audioContext = null;
93
+ this.scriptProcessor = null;
94
+ this.pcmChunks = [];
68
95
  }
69
96
  }
70
97
 
@@ -126,15 +153,49 @@ class WhisperSpeechToTextService {
126
153
  }
127
154
  }
128
155
  }
156
+ class MistralSpeechToTextService {
157
+ constructor(config) {
158
+ this.useProxy = (config === null || config === void 0 ? void 0 : config.useProxy) || false;
159
+ this.proxyUrl = (config === null || config === void 0 ? void 0 : config.proxyUrl) || 'http://localhost:8492';
160
+ this.model = (config === null || config === void 0 ? void 0 : config.model) || 'voxtral-mini-latest';
161
+ this.apiKey = this.useProxy ? '' : ((config === null || config === void 0 ? void 0 : config.apiKey) || '');
162
+ if (!this.useProxy && !this.apiKey) {
163
+ throw new Error('Mistral API key is required for speech-to-text service');
164
+ }
165
+ }
166
+ async transcribe(audioContent, lang = 'en') {
167
+ var _a;
168
+ try {
169
+ const formData = new FormData();
170
+ formData.append('file', audioContent);
171
+ formData.append('model', this.model);
172
+ formData.append('language', lang);
173
+ const endpoint = this.useProxy ? `${this.proxyUrl}/transcribe-mistral` : 'https://api.mistral.ai/v1/audio/transcriptions';
174
+ const headers = {};
175
+ if (!this.useProxy) {
176
+ headers['Authorization'] = `Bearer ${this.apiKey}`;
177
+ }
178
+ const response = await fetch(endpoint, { method: 'POST', headers, body: formData });
179
+ if (!response.ok) {
180
+ const err = await response.json().catch(() => ({ error: 'Unknown error' }));
181
+ throw new Error(`Transcription failed: ${((_a = err.error) === null || _a === void 0 ? void 0 : _a.message) || response.statusText}`);
182
+ }
183
+ const result = await response.json();
184
+ return result.text || '';
185
+ }
186
+ catch (error) {
187
+ throw new Error(`Mistral speech-to-text failed: ${error.message}`);
188
+ }
189
+ }
190
+ }
129
191
  class SpeechToTextServiceFactory {
130
192
  static create(config) {
131
193
  var _a;
132
- const provider = ((_a = config.speechToText) === null || _a === void 0 ? void 0 : _a.provider) || 'whisper';
194
+ const provider = ((_a = config.speechToText) === null || _a === void 0 ? void 0 : _a.provider) || 'openai';
133
195
  switch (provider) {
134
- case 'whisper':
135
- return new WhisperSpeechToTextService(config.speechToText);
136
- default:
137
- throw new Error(`Unsupported speech-to-text provider: ${provider}`);
196
+ case 'openai': return new WhisperSpeechToTextService(config.speechToText);
197
+ case 'mistral': return new MistralSpeechToTextService(config.speechToText);
198
+ default: throw new Error(`Unsupported speech-to-text provider: ${provider}`);
138
199
  }
139
200
  }
140
201
  }
@@ -326,7 +387,8 @@ class OpenAILLMService {
326
387
  `${field.readonly ? ', readonly' : ''}` +
327
388
  `${field.default !== undefined && field.default !== null && field.default !== '' ? ', default=' + field.default : ''}` +
328
389
  `${field.min && field.min !== "" ? ', min=' + field.min : ''}` +
329
- `${field.max && field.max !== "" ? ', max=' + field.max : ''}`;
390
+ `${field.max && field.max !== "" ? ', max=' + field.max : ''}` +
391
+ `${field.pattern && field.pattern !== "" ? ', pattern=' + field.pattern : ''}`;
330
392
  if (field.options && field.options.length > 0) {
331
393
  if (field.options.length < 50) {
332
394
  description += `) - options: ${field.options.join(', ')}`;
@@ -348,7 +410,8 @@ class OpenAILLMService {
348
410
  `${field.readonly ? ', readonly' : ''}` +
349
411
  `${field.default !== undefined && field.default !== null && field.default !== '' ? ', default=' + field.default : ''}` +
350
412
  `${field.min && field.min !== "" ? ', min=' + field.min : ''}` +
351
- `${field.max && field.max !== "" ? ', max=' + field.max : ''}`;
413
+ `${field.max && field.max !== "" ? ', max=' + field.max : ''}` +
414
+ `${field.pattern && field.pattern !== "" ? ', pattern=' + field.pattern : ''}`;
352
415
  if (field.options && field.options.length > 0) {
353
416
  if (field.options.length < 50) {
354
417
  description += `) - options: ${field.options.join(', ')}`;
@@ -492,12 +555,14 @@ class OpenAILLMService {
492
555
  7. Only include fields where relevant information is found
493
556
  8. The current GMT datetime is ${new Date().toGMTString()}
494
557
  9. Respect the constraints written between parenthesis for readonly status (readonly fields MUST NOT be filled with values), min and max values, whatever the transcription says
558
+ 9b. CRITICAL - DATE/DATETIME OUT OF RANGE: For date and datetime fields with min and/or max constraints, if the value from the transcription falls outside the allowed range, leave the field EMPTY. NEVER clamp, adjust, or substitute a boundary date - either the date is valid and within range, or the field is left empty
495
559
  10. IMPORTANT: Fields marked as "readonly" are presentation elements (headers, labels) that provide context but MUST NOT receive values
496
560
  11. CRITICAL - DUPLICATE FIELDS: When you see multiple fields with the same name but different IDs (e.g., "ID:field1 | Event date" and "ID:field2 | Event date"), these are DIFFERENT fields in DIFFERENT sections. The user may explicitly say which section they are filling (e.g., "for the first/second sub-form"). Listen VERY CAREFULLY to these contextual clues. Use the readonly headers to understand which section each field belongs to. If the user says "for the clinical sign, the event date is X", you must fill the Event date field that comes AFTER the "MC Clinical sign" header, NOT the first Event date you see
497
561
  12. Use readonly fields as contextual hints to understand form structure and field grouping, but never fill them
498
- 13. CRITICAL: For select fields, options like "No", "Non", "Non applicable", "Inconnu", "Unknown", "N/A" are VALID VALUES that can be selected. When the user says these words, treat them as legitimate option choices, NOT always as negations or refusals to answer. HOWEVER, for non-select fields (string, number, date, etc.), if the user says "I don't know", "unknown", "not known", "inconnu", "je ne sais pas", this usually means they have NO VALUE to provide, unless explicitely precised - leave the field empty, do NOT fill it with the literal text "unknown" or "I don't know"
562
+ 13. CRITICAL - UNKNOWN/NO/N-A VALUES: The interpretation of words like "Unknown", "Inconnu", "No", "Non", "N/A", "Non applicable" depends ENTIRELY on whether the field has those words as explicit options. Rule: if the user says "field X is Unknown" (or any similar phrasing) AND "Unknown" (or a close variant) exists in the options list for that field, then SELECT that option - it is a valid value. If the user says such words for a field that does NOT have them as options (string, number, date, etc.), then it means the user has no value to provide - leave the field empty, do NOT fill it with the literal text
499
563
  14. CRITICAL: Each field has a unique ID shown as "ID:xxx" at the start of its description. You MUST include this exact ID in your response for each field you fill. The ID is the ONLY way to distinguish between fields with the same name
500
564
  15. CRITICAL - DEFAULT VALUES: Some fields have default values shown as "default=xxx". If the user does NOT mention a specific value for that field in the transcription, you MUST return the default value. Default values should be preserved unless explicitly overridden by the user
565
+ 16. CRITICAL - PATTERN VALIDATION: Some fields have a regex pattern shown as "pattern=xxx". The value you extract MUST match this pattern exactly. If the transcription is too ambiguous to produce a value that matches the pattern, leave the field EMPTY
501
566
 
502
567
  Respond with JSON in this exact format: {"fields": [{"id": "field_id", "name": "field_name", "value": "extracted_value"}]}`;
503
568
  let userPrompt = `
@@ -566,10 +631,12 @@ class OpenAILLMService {
566
631
  8. Return the same schema structure with 'default' values filled
567
632
  9. The current GMT datetime is ${new Date().toGMTString()}
568
633
  10. Respect the constraints written between parenthesis for readonly status (readonly fields MUST NOT be filled with values), min and max values, whatever the transcription says
634
+ 10b. CRITICAL - DATE/DATETIME OUT OF RANGE: For date and datetime fields with min and/or max constraints, if the value from the transcription falls outside the allowed range, leave the field EMPTY. NEVER clamp, adjust, or substitute a boundary date - either the date is valid and within range, or the field is left empty
569
635
  11. IMPORTANT: Fields marked as "readonly" are presentation elements (headers, labels) that provide context but MUST NOT receive values
570
636
  12. CRITICAL - DUPLICATE FIELDS: When you see multiple fields with the same name but different IDs (e.g., "ID:field1 | Event date" and "ID:field2 | Event date"), these are DIFFERENT fields in DIFFERENT sections. The user will explicitly say which section they are filling (e.g., "for the first/second sub-form", "in the anamnesis section", "for the clinical sign"). Listen VERY CAREFULLY to these contextual clues. Use the readonly headers to understand which section each field belongs to. If the user says "for the clinical sign, the event date is X", you must fill the Event date field that comes AFTER the "MC Clinical sign" header, NOT the first Event date you see
571
637
  13. Use readonly fields as contextual hints to understand form structure and field grouping, but never fill them
572
- 14. CRITICAL: For select fields, options like "No", "Non", "Non applicable", "Inconnu", "Unknown", "N/A" are VALID VALUES that can be selected. When the user says these words, treat them as legitimate option choices, NOT always as negations or refusals to answer. HOWEVER, for non-select fields (string, number, date, etc.), if the user says "I don't know", "unknown", "not known", "inconnu", "je ne sais pas", this usually means they have NO VALUE to provide, unless explicitely precised - leave the field empty, do NOT fill it with the literal text "unknown" or "I don't know"
638
+ 14. CRITICAL - UNKNOWN/NO/N-A VALUES: The interpretation of words like "Unknown", "Inconnu", "No", "Non", "N/A", "Non applicable" depends ENTIRELY on whether the field has those words as explicit options. Rule: if the user says "field X is Unknown" (or any similar phrasing) AND "Unknown" (or a close variant) exists in the options list for that field, then SELECT that option - it is a valid value. If the user says such words for a field that does NOT have them as options (string, number, date, etc.), then it means the user has no value to provide - leave the field empty, do NOT fill it with the literal text
639
+ 15. CRITICAL - PATTERN VALIDATION: Some fields have a regex pattern shown as "pattern=xxx". The value you extract MUST match this pattern exactly. If the transcription is too ambiguous to produce a value that matches the pattern, leave the field EMPTY
573
640
 
574
641
  Respond with JSON in this exact format: {"schema": {...}}`;
575
642
  let userPrompt = `
@@ -633,15 +700,138 @@ class OpenAILLMService {
633
700
  }
634
701
  }
635
702
  }
703
+ class AnthropicLLMService {
704
+ constructor(config) {
705
+ this.useProxy = (config === null || config === void 0 ? void 0 : config.useProxy) || false;
706
+ this.proxyUrl = (config === null || config === void 0 ? void 0 : config.proxyUrl) || 'http://localhost:8492';
707
+ this.model = (config === null || config === void 0 ? void 0 : config.model) || 'claude-sonnet-4.5';
708
+ this.apiKey = this.useProxy ? '' : ((config === null || config === void 0 ? void 0 : config.apiKey) || '');
709
+ if (!this.useProxy && !this.apiKey) {
710
+ throw new Error('Anthropic API key is required');
711
+ }
712
+ }
713
+ async fillFormFromTranscription(transcription, schema) {
714
+ return this.fillForm(transcription, schema);
715
+ }
716
+ async fillFormFromJson(json, schema) {
717
+ return this.fillForm(json, schema);
718
+ }
719
+ async fillForm(data, schema) {
720
+ var _a;
721
+ const endpoint = this.useProxy ? `${this.proxyUrl}/complete-anthropic` : 'https://api.anthropic.com/v1/messages';
722
+ const headers = { 'Content-Type': 'application/json' };
723
+ if (!this.useProxy) {
724
+ headers['x-api-key'] = this.apiKey;
725
+ headers['anthropic-version'] = '2023-06-01';
726
+ }
727
+ const finalSchema = (schema === null || schema === void 0 ? void 0 : schema.fields) || (schema === null || schema === void 0 ? void 0 : schema.schema) || schema;
728
+ const systemPrompt = this.buildSystemPrompt();
729
+ const userPrompt = `Data: "${data}"
730
+
731
+ Form fields:
732
+ ${JSON.stringify(finalSchema, null, 2)}
733
+
734
+ Respond with JSON: {"fields": [{"id": "field_id", "name": "field_name", "value": "extracted_value"}]}`;
735
+ const body = this.useProxy
736
+ ? { model: this.model, messages: [{ role: 'user', content: userPrompt }], system: systemPrompt }
737
+ : { model: this.model, max_tokens: 4096, system: systemPrompt, messages: [{ role: 'user', content: userPrompt }] };
738
+ const response = await fetch(endpoint, { method: 'POST', headers, body: JSON.stringify(body) });
739
+ if (!response.ok) {
740
+ const err = await response.json().catch(() => ({ error: 'Unknown error' }));
741
+ throw new Error(`Anthropic API failed: ${((_a = err.error) === null || _a === void 0 ? void 0 : _a.message) || response.statusText}`);
742
+ }
743
+ const result = await response.json();
744
+ const content = this.useProxy ? result.choices[0].message.content : result.content[0].text;
745
+ return JSON.parse(content);
746
+ }
747
+ buildSystemPrompt() {
748
+ return `You are an expert form-filling assistant. Extract values from the input data and fill form fields.
749
+ Rules:
750
+ 1. Only extract values that can be confidently determined
751
+ 2. Respect field types (string, number, datetime, boolean, select)
752
+ 3. For datetime fields, use ISO format (YYYY-MM-DDTHH:MM)
753
+ 4. For date fields, use DD/MM/YYYY format
754
+ 5. For select fields, use exact option values from the provided choices
755
+ 6. Leave fields empty if no relevant information is found
756
+ 7. Fields marked readonly MUST NOT receive values
757
+ 8. DATE/DATETIME OUT OF RANGE: if a date falls outside min/max constraints, leave the field EMPTY
758
+ 9. PATTERN VALIDATION: if a field has a pattern, the value MUST match it exactly, otherwise leave empty
759
+ 10. UNKNOWN VALUES: if the user says "Unknown" and it exists as an option, select it; otherwise leave empty
760
+ Respond ONLY with valid JSON.`;
761
+ }
762
+ }
763
+ class MistralLLMService {
764
+ constructor(config) {
765
+ this.useProxy = (config === null || config === void 0 ? void 0 : config.useProxy) || false;
766
+ this.proxyUrl = (config === null || config === void 0 ? void 0 : config.proxyUrl) || 'http://localhost:8492';
767
+ this.model = (config === null || config === void 0 ? void 0 : config.model) || 'mistral-medium-latest';
768
+ this.apiKey = this.useProxy ? '' : ((config === null || config === void 0 ? void 0 : config.apiKey) || '');
769
+ if (!this.useProxy && !this.apiKey) {
770
+ throw new Error('Mistral API key is required');
771
+ }
772
+ }
773
+ async fillFormFromTranscription(transcription, schema) {
774
+ return this.fillForm(transcription, schema);
775
+ }
776
+ async fillFormFromJson(json, schema) {
777
+ return this.fillForm(json, schema);
778
+ }
779
+ async fillForm(data, schema) {
780
+ var _a;
781
+ const endpoint = this.useProxy ? `${this.proxyUrl}/complete-mistral` : 'https://api.mistral.ai/v1/chat/completions';
782
+ const headers = { 'Content-Type': 'application/json' };
783
+ if (!this.useProxy) {
784
+ headers['Authorization'] = `Bearer ${this.apiKey}`;
785
+ }
786
+ const finalSchema = (schema === null || schema === void 0 ? void 0 : schema.fields) || (schema === null || schema === void 0 ? void 0 : schema.schema) || schema;
787
+ const systemPrompt = this.buildSystemPrompt();
788
+ const userPrompt = `Data: "${data}"
789
+
790
+ Form fields:
791
+ ${JSON.stringify(finalSchema, null, 2)}
792
+
793
+ Respond with JSON: {"fields": [{"id": "field_id", "name": "field_name", "value": "extracted_value"}]}`;
794
+ const response = await fetch(endpoint, {
795
+ method: 'POST',
796
+ headers,
797
+ body: JSON.stringify({
798
+ model: this.model,
799
+ messages: [{ role: 'system', content: systemPrompt }, { role: 'user', content: userPrompt }],
800
+ response_format: { type: 'json_object' },
801
+ }),
802
+ });
803
+ if (!response.ok) {
804
+ const err = await response.json().catch(() => ({ error: 'Unknown error' }));
805
+ throw new Error(`Mistral API failed: ${((_a = err.error) === null || _a === void 0 ? void 0 : _a.message) || response.statusText}`);
806
+ }
807
+ const result = await response.json();
808
+ return JSON.parse(result.choices[0].message.content);
809
+ }
810
+ buildSystemPrompt() {
811
+ return `You are an expert form-filling assistant. Extract values from the input data and fill form fields.
812
+ Rules:
813
+ 1. Only extract values that can be confidently determined
814
+ 2. Respect field types (string, number, datetime, boolean, select)
815
+ 3. For datetime fields, use ISO format (YYYY-MM-DDTHH:MM)
816
+ 4. For date fields, use DD/MM/YYYY format
817
+ 5. For select fields, use exact option values from the provided choices
818
+ 6. Leave fields empty if no relevant information is found
819
+ 7. Fields marked readonly MUST NOT receive values
820
+ 8. DATE/DATETIME OUT OF RANGE: if a date falls outside min/max constraints, leave the field EMPTY
821
+ 9. PATTERN VALIDATION: if a field has a pattern, the value MUST match it exactly, otherwise leave empty
822
+ 10. UNKNOWN VALUES: if the user says "Unknown" and it exists as an option, select it; otherwise leave empty
823
+ Respond ONLY with valid JSON.`;
824
+ }
825
+ }
636
826
  class LLMServiceFactory {
637
827
  static create(config) {
638
828
  var _a;
639
829
  const provider = ((_a = config.llm) === null || _a === void 0 ? void 0 : _a.provider) || 'openai';
640
830
  switch (provider) {
641
- case 'openai':
642
- return new OpenAILLMService(config.llm);
643
- default:
644
- throw new Error(`Unsupported LLM provider: ${provider}`);
831
+ case 'openai': return new OpenAILLMService(config.llm);
832
+ case 'anthropic': return new AnthropicLLMService(config.llm);
833
+ case 'mistral': return new MistralLLMService(config.llm);
834
+ default: throw new Error(`Unsupported LLM provider: ${provider}`);
645
835
  }
646
836
  }
647
837
  }
@@ -4161,6 +4351,8 @@ const VoiceFormRecorder = class {
4161
4351
  this.apiProxyUrl = 'http://localhost:8492';
4162
4352
  this.transcriptionModel = 'gpt-4o-transcribe';
4163
4353
  this.completionModel = 'gpt-5-mini';
4354
+ this.transcriptionProvider = 'openai';
4355
+ this.completionProvider = 'openai';
4164
4356
  this.context = undefined;
4165
4357
  this.classificationRootUrl = 'http://localhost';
4166
4358
  this.language = 'en';
@@ -4208,17 +4400,33 @@ const VoiceFormRecorder = class {
4208
4400
  this.updateDebugInfo(errorMessage, { error: errorMessage });
4209
4401
  }
4210
4402
  else {
4403
+ // Validate transcription provider/model
4404
+ const allowedTranscriptionModels = TRANSCRIPTION_MODELS[this.transcriptionProvider];
4405
+ if (!allowedTranscriptionModels) {
4406
+ throw new Error(`Unsupported transcription provider: '${this.transcriptionProvider}'. Allowed: openai, mistral`);
4407
+ }
4408
+ if (!allowedTranscriptionModels.includes(this.transcriptionModel)) {
4409
+ throw new Error(`Model '${this.transcriptionModel}' is not allowed for transcription provider '${this.transcriptionProvider}'. Allowed: ${allowedTranscriptionModels.join(', ')}`);
4410
+ }
4411
+ // Validate completion provider/model
4412
+ const allowedCompletionModels = COMPLETION_MODELS[this.completionProvider];
4413
+ if (!allowedCompletionModels) {
4414
+ throw new Error(`Unsupported completion provider: '${this.completionProvider}'. Allowed: openai, anthropic, mistral`);
4415
+ }
4416
+ if (!allowedCompletionModels.includes(this.completionModel)) {
4417
+ throw new Error(`Model '${this.completionModel}' is not allowed for completion provider '${this.completionProvider}'. Allowed: ${allowedCompletionModels.join(', ')}`);
4418
+ }
4211
4419
  // Parse form schema
4212
4420
  this.parsedSchema = JSON.parse(this.formJson || '{}');
4213
4421
  // Parse service configuration
4214
4422
  this.parsedConfig = JSON.parse(this.serviceConfig || '{}');
4215
4423
  // Add API key to config if provided via prop
4216
4424
  if (this.apiKey) {
4217
- this.parsedConfig = Object.assign(Object.assign({}, this.parsedConfig), { speechToText: Object.assign(Object.assign({}, this.parsedConfig.speechToText), { apiKey: this.apiKey, model: this.transcriptionModel }), llm: Object.assign(Object.assign({}, this.parsedConfig.llm), { apiKey: this.apiKey, model: this.completionModel }) });
4425
+ this.parsedConfig = Object.assign(Object.assign({}, this.parsedConfig), { speechToText: Object.assign(Object.assign({}, this.parsedConfig.speechToText), { provider: this.transcriptionProvider, apiKey: this.apiKey, model: this.transcriptionModel }), llm: Object.assign(Object.assign({}, this.parsedConfig.llm), { provider: this.completionProvider, apiKey: this.apiKey, model: this.completionModel }) });
4218
4426
  }
4219
4427
  else {
4220
4428
  // Use proxy API if no API key provided
4221
- this.parsedConfig = Object.assign(Object.assign({}, this.parsedConfig), { speechToText: Object.assign(Object.assign({}, this.parsedConfig.speechToText), { useProxy: true, proxyUrl: this.apiProxyUrl, model: this.transcriptionModel }), llm: Object.assign(Object.assign({}, this.parsedConfig.llm), { useProxy: true, proxyUrl: this.apiProxyUrl, model: this.completionModel }) });
4429
+ this.parsedConfig = Object.assign(Object.assign({}, this.parsedConfig), { speechToText: Object.assign(Object.assign({}, this.parsedConfig.speechToText), { provider: this.transcriptionProvider, useProxy: true, proxyUrl: this.apiProxyUrl, model: this.transcriptionModel }), llm: Object.assign(Object.assign({}, this.parsedConfig.llm), { provider: this.completionProvider, useProxy: true, proxyUrl: this.apiProxyUrl, model: this.completionModel }) });
4222
4430
  }
4223
4431
  // Initialize services
4224
4432
  this.speechToTextService = SpeechToTextServiceFactory.create(this.parsedConfig);
@@ -4384,7 +4592,7 @@ const VoiceFormRecorder = class {
4384
4592
  });
4385
4593
  // Stop recording and get audio blob
4386
4594
  const audioBlob = await this.audioRecorder.stopRecording();
4387
- const audioContent = new File([audioBlob], 'audio.webm', { type: 'audio/webm' });
4595
+ const audioContent = new File([audioBlob], 'audio.wav', { type: 'audio/wav' });
4388
4596
  this.processAudioContent(audioContent);
4389
4597
  }
4390
4598
  catch (error) {
@@ -4889,12 +5097,16 @@ const VoiceFormRecorder = class {
4889
5097
  render() {
4890
5098
  const containerStyle = this.getContainerStyle();
4891
5099
  const statusStyle = this.getStatusStyle();
4892
- return (index.h("div", { key: '8503450dbd4de2cb91172bd2bdbdf16a0c4cdce0' }, index.h("div", { key: '8b3f553574a64754312f68f2abe0d64f57c56d3e', class: "voice-recorder-container" + (this.debug || this.renderForm ? "-debug" : ""), style: containerStyle }, index.h("div", { key: '3d08a37d611bc91dbe11c065c633f0d36c8b362e', class: "row-audio-area" }, this.renderRecordButton(), this.renderUploadRecordButton(), this.renderUploadButton()), this.displayStatus ? index.h("div", { class: "status-text", style: statusStyle }, this.statusMessage) : "", this.renderForm ? this.renderFormPreview() : "", this.debug ? this.renderDebugPanel() : "")));
5100
+ return (index.h("div", { key: '8378e98f02e5b7482929d255c5ec12ff8c2731e4' }, index.h("div", { key: 'b8a0d873bd4e1b4c8936747c0919ac8c4c15301b', class: "voice-recorder-container" + (this.debug || this.renderForm ? "-debug" : ""), style: containerStyle }, index.h("div", { key: '86e7783e3686db378ee16aa91a640f33c3255923', class: "row-audio-area" }, this.renderRecordButton(), this.renderUploadRecordButton(), this.renderUploadButton()), this.displayStatus ? index.h("div", { class: "status-text", style: statusStyle }, this.statusMessage) : "", this.renderForm ? this.renderFormPreview() : "", this.debug ? this.renderDebugPanel() : "")));
4893
5101
  }
4894
5102
  static get watchers() { return {
4895
5103
  "formJson": ["initializeServices"],
4896
5104
  "serviceConfig": ["initializeServices"],
4897
- "theme": ["initializeServices"]
5105
+ "theme": ["initializeServices"],
5106
+ "transcriptionProvider": ["initializeServices"],
5107
+ "completionProvider": ["initializeServices"],
5108
+ "transcriptionModel": ["initializeServices"],
5109
+ "completionModel": ["initializeServices"]
4898
5110
  }; }
4899
5111
  };
4900
5112
  VoiceFormRecorder.style = voiceInputModuleCss;