reneco-advanced-input-module 0.0.21 → 0.0.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/dist/cjs/loader.cjs.js +1 -1
  2. package/dist/cjs/voice-input-module.cjs.entry.js +274 -62
  3. package/dist/cjs/voice-input-module.cjs.entry.js.map +1 -1
  4. package/dist/cjs/voice-input-module.cjs.js +1 -1
  5. package/dist/cjs/voice-input-module.entry.cjs.js.map +1 -1
  6. package/dist/collection/components/voice-input-module/voice-input-module.js +87 -4
  7. package/dist/collection/components/voice-input-module/voice-input-module.js.map +1 -1
  8. package/dist/collection/services/audio-recorder.service.js +61 -44
  9. package/dist/collection/services/audio-recorder.service.js.map +1 -1
  10. package/dist/collection/services/llm.service.js +137 -8
  11. package/dist/collection/services/llm.service.js.map +1 -1
  12. package/dist/collection/services/speech-to-text.service.js +39 -5
  13. package/dist/collection/services/speech-to-text.service.js.map +1 -1
  14. package/dist/collection/types/service-providers.types.js +9 -1
  15. package/dist/collection/types/service-providers.types.js.map +1 -1
  16. package/dist/components/voice-input-module.js +281 -63
  17. package/dist/components/voice-input-module.js.map +1 -1
  18. package/dist/esm/loader.js +1 -1
  19. package/dist/esm/voice-input-module.entry.js +274 -62
  20. package/dist/esm/voice-input-module.entry.js.map +1 -1
  21. package/dist/esm/voice-input-module.js +1 -1
  22. package/dist/types/components/voice-input-module/voice-input-module.d.ts +3 -0
  23. package/dist/types/components.d.ts +18 -0
  24. package/dist/types/services/audio-recorder.service.d.ts +5 -0
  25. package/dist/types/services/llm.service.d.ts +22 -0
  26. package/dist/types/services/speech-to-text.service.d.ts +8 -0
  27. package/dist/types/types/service-providers.types.d.ts +6 -2
  28. package/dist/voice-input-module/p-0e2b9ca0.entry.js +3 -0
  29. package/dist/voice-input-module/p-0e2b9ca0.entry.js.map +1 -0
  30. package/dist/voice-input-module/voice-input-module.entry.esm.js.map +1 -1
  31. package/dist/voice-input-module/voice-input-module.esm.js +1 -1
  32. package/package.json +1 -1
  33. package/readme.md +81 -0
  34. package/www/build/p-0e2b9ca0.entry.js +3 -0
  35. package/www/build/p-0e2b9ca0.entry.js.map +1 -0
  36. package/www/build/p-812b92c7.js +2 -0
  37. package/www/build/voice-input-module.entry.esm.js.map +1 -1
  38. package/www/build/voice-input-module.esm.js +1 -1
  39. package/www/index.html +12 -1
  40. package/dist/voice-input-module/p-4e449895.entry.js +0 -3
  41. package/dist/voice-input-module/p-4e449895.entry.js.map +0 -1
  42. package/www/build/p-3a11e8d2.js +0 -2
  43. package/www/build/p-4e449895.entry.js +0 -3
  44. package/www/build/p-4e449895.entry.js.map +0 -1
@@ -56,15 +56,49 @@ export class WhisperSpeechToTextService {
56
56
  }
57
57
  }
58
58
  }
59
+ export class MistralSpeechToTextService {
60
+ constructor(config) {
61
+ this.useProxy = (config === null || config === void 0 ? void 0 : config.useProxy) || false;
62
+ this.proxyUrl = (config === null || config === void 0 ? void 0 : config.proxyUrl) || 'http://localhost:8492';
63
+ this.model = (config === null || config === void 0 ? void 0 : config.model) || 'voxtral-mini-latest';
64
+ this.apiKey = this.useProxy ? '' : ((config === null || config === void 0 ? void 0 : config.apiKey) || '');
65
+ if (!this.useProxy && !this.apiKey) {
66
+ throw new Error('Mistral API key is required for speech-to-text service');
67
+ }
68
+ }
69
+ async transcribe(audioContent, lang = 'en') {
70
+ var _a;
71
+ try {
72
+ const formData = new FormData();
73
+ formData.append('file', audioContent);
74
+ formData.append('model', this.model);
75
+ formData.append('language', lang);
76
+ const endpoint = this.useProxy ? `${this.proxyUrl}/transcribe-mistral` : 'https://api.mistral.ai/v1/audio/transcriptions';
77
+ const headers = {};
78
+ if (!this.useProxy) {
79
+ headers['Authorization'] = `Bearer ${this.apiKey}`;
80
+ }
81
+ const response = await fetch(endpoint, { method: 'POST', headers, body: formData });
82
+ if (!response.ok) {
83
+ const err = await response.json().catch(() => ({ error: 'Unknown error' }));
84
+ throw new Error(`Transcription failed: ${((_a = err.error) === null || _a === void 0 ? void 0 : _a.message) || response.statusText}`);
85
+ }
86
+ const result = await response.json();
87
+ return result.text || '';
88
+ }
89
+ catch (error) {
90
+ throw new Error(`Mistral speech-to-text failed: ${error.message}`);
91
+ }
92
+ }
93
+ }
59
94
  export class SpeechToTextServiceFactory {
60
95
  static create(config) {
61
96
  var _a;
62
- const provider = ((_a = config.speechToText) === null || _a === void 0 ? void 0 : _a.provider) || 'whisper';
97
+ const provider = ((_a = config.speechToText) === null || _a === void 0 ? void 0 : _a.provider) || 'openai';
63
98
  switch (provider) {
64
- case 'whisper':
65
- return new WhisperSpeechToTextService(config.speechToText);
66
- default:
67
- throw new Error(`Unsupported speech-to-text provider: ${provider}`);
99
+ case 'openai': return new WhisperSpeechToTextService(config.speechToText);
100
+ case 'mistral': return new MistralSpeechToTextService(config.speechToText);
101
+ default: throw new Error(`Unsupported speech-to-text provider: ${provider}`);
68
102
  }
69
103
  }
70
104
  }
@@ -1 +1 @@
1
- {"version":3,"file":"speech-to-text.service.js","sourceRoot":"","sources":["../../src/services/speech-to-text.service.ts"],"names":[],"mappings":"AAEA,MAAM,OAAO,0BAA0B;IAOrC,YAAY,MAA6C;QACvD,IAAI,CAAC,QAAQ,GAAG,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,QAAQ,KAAI,KAAK,CAAC;QAC1C,IAAI,CAAC,QAAQ,GAAG,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,QAAQ,KAAI,uBAAuB,CAAC;QAC5D,IAAI,CAAC,KAAK,GAAG,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,KAAK,KAAI,mBAAmB,CAAC;QAElD,IAAI,IAAI,CAAC,QAAQ,EAAE,CAAC;YAClB,mCAAmC;YACnC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC;YACjB,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,QAAQ,CAAC;QAC/B,CAAC;aAAM,CAAC;YACN,+BAA+B;YAC/B,IAAI,CAAC,MAAM,GAAG,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,MAAM,KAAI,IAAI,CAAC,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,EAAE,CAAC;YACpF,IAAI,CAAC,OAAO,GAAG,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,OAAO,KAAI,2BAA2B,CAAC;YAE9D,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;gBACjB,MAAM,IAAI,KAAK,CAAC,gDAAgD,CAAC,CAAC;YACpE,CAAC;QACH,CAAC;IACH,CAAC;IAEO,sBAAsB,CAAC,IAAY;QACzC,oEAAoE;QACpE,IAAI,OAAO,OAAO,KAAK,WAAW,IAAI,OAAO,CAAC,GAAG,EAAE,CAAC;YAClD,OAAO,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;QAC3B,CAAC;QACD,sEAAsE;QACtE,OAAQ,MAAc,CAAC,IAAI,CAAC,IAAI,SAAS,CAAC;IAC5C,CAAC;IAED,KAAK,CAAC,UAAU,CAAC,YAAkB,EAAE,OAAe,IAAI;;QACtD,IAAI,CAAC;YACH,MAAM,QAAQ,GAAG,IAAI,QAAQ,EAAE,CAAC;YAEhC,QAAQ,CAAC,MAAM,CAAC,MAAM,EAAE,YAAY,CAAC,CAAC;YACtC,QAAQ,CAAC,MAAM,CAAC,OAAO,EAAE,IAAI,CAAC,KAAK,CAAC,CAAC;YACrC,QAAQ,CAAC,MAAM,CAAC,UAAU,EAAE,IAAI,CAAC,CAAC;YAClC,QAAQ,CAAC,MAAM,CAAC,iBAAiB,EAAE,MAAM,CAAC,CAAC;YAC3C,QAAQ,CAAC,MAAM,CAAC,mBAAmB,EAAE,MAAM,CAAC,CAAC;YAE7C,MAAM,QAAQ,GAAG,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,OAAO,aAAa,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,OAAO,uBAAuB,CAAC;YACvG,MAAM,OAAO,GAAQ,EAAE,CAAC;YAExB,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC;gBACnB,OAAO,CAAC,eAAe,CAAC,GAAG,UAAU,IAAI,CAAC,MAAM,EAAE,CAAC;YACrD,CAAC;YAED,MAAM,QAAQ,GAAG,MAAM,KAAK,CAAC,QAAQ,EAAE;gBACrC,MAAM,EAAE,MAAM;gBACd,OAAO,EAAE,OAAO;gBAChB,IAAI,EAAE,QAAQ;aACf,CAAC,CAAC;YAEH,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,CAAC;gBACjB,MAAM,SAAS,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC,KAAK,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,KAAK,EAAE,eAAe,EAAE,CAAC,CAAC,CAAC;gBAClF,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAA,MAAA,SAAS,CAAC,KAAK,0CAAE,OAAO,KAAI,QAAQ,CAAC,UAAU,EAAE,CAAC,CAAC;YAC9F,CAAC;YAED,MAAM,MAAM,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;YACrC,OAAO,MAAM,CAAC,IAAI,IAAI,EAAE,CAAC;QAC3B,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YACf,MAAM,IAAI,KAAK,CAAC,wCAAwC,KAAK,CAAC,OAAO,EAAE,CAAC,CAAC;QAC3E,CAAC;IACH,CAAC;CACF;AAED,MAAM,OAAO,0BAA0B;IACrC,MAAM,CAAC,MAAM,CAAC,MAA6B;;QACzC,MAAM,QAAQ,GAAG,CAAA,MAAA,MAAM,CAAC,YAAY,0CAAE,QAAQ,KAAI,SAAS,CAAC;QAE5D,QAAQ,QAAQ,EAAE,CAAC;YACjB,KAAK,SAAS;gBACZ,OAAO,IAAI,0BAA0B,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC;YAC7D;gBACE,MAAM,IAAI,KAAK,CAAC,wCAAwC,QAAQ,EAAE,CAAC,CAAC;QACxE,CAAC;IACH,CAAC;CACF","sourcesContent":["import { SpeechToTextProvider, ServiceProviderConfig } from '../types/service-providers.types';\r\n\r\nexport class WhisperSpeechToTextService implements SpeechToTextProvider {\r\n private apiKey: string;\r\n private baseUrl: string;\r\n private useProxy: boolean;\r\n private proxyUrl: string;\r\n private model: string;\r\n\r\n constructor(config: ServiceProviderConfig['speechToText']) {\r\n this.useProxy = config?.useProxy || false;\r\n this.proxyUrl = config?.proxyUrl || 'http://localhost:8492';\r\n this.model = config?.model || 'gpt-4o-transcribe';\r\n \r\n if (this.useProxy) {\r\n // Mode proxy: pas besoin d'API key\r\n this.apiKey = '';\r\n this.baseUrl = this.proxyUrl;\r\n } else {\r\n // Mode direct: API key requise\r\n this.apiKey = config?.apiKey || this.getEnvironmentVariable('OPENAI_API_KEY') || '';\r\n this.baseUrl = config?.baseUrl || 'https://api.openai.com/v1';\r\n \r\n if (!this.apiKey) {\r\n throw new Error('OpenAI API key is required for Whisper service');\r\n }\r\n }\r\n }\r\n\r\n private getEnvironmentVariable(name: string): string | undefined {\r\n // In browser environment, we might get env vars through other means\r\n if (typeof process !== 'undefined' && process.env) {\r\n return process.env[name];\r\n }\r\n // Check if it's available as a global variable or through other means\r\n return (window as any)[name] || undefined;\r\n }\r\n\r\n async transcribe(audioContent: File, lang: string = 'en'): Promise<string> {\r\n try {\r\n const formData = new FormData();\r\n \r\n formData.append('file', audioContent);\r\n formData.append('model', this.model);\r\n formData.append('language', lang);\r\n formData.append('response_format', 'json');\r\n formData.append('max_output_tokens', '2000');\r\n\r\n const endpoint = this.useProxy ? `${this.baseUrl}/transcribe` : `${this.baseUrl}/audio/transcriptions`;\r\n const headers: any = {};\r\n \r\n if (!this.useProxy) {\r\n headers['Authorization'] = `Bearer ${this.apiKey}`;\r\n }\r\n\r\n const response = await fetch(endpoint, {\r\n method: 'POST',\r\n headers: headers,\r\n body: formData,\r\n });\r\n\r\n if (!response.ok) {\r\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' }));\r\n throw new Error(`Transcription failed: ${errorData.error?.message || response.statusText}`);\r\n }\r\n\r\n const result = await response.json();\r\n return result.text || '';\r\n } catch (error) {\r\n throw new Error(`Speech-to-text transcription failed: ${error.message}`);\r\n }\r\n }\r\n}\r\n\r\nexport class SpeechToTextServiceFactory {\r\n static create(config: ServiceProviderConfig): SpeechToTextProvider {\r\n const provider = config.speechToText?.provider || 'whisper';\r\n \r\n switch (provider) {\r\n case 'whisper':\r\n return new WhisperSpeechToTextService(config.speechToText);\r\n default:\r\n throw new Error(`Unsupported speech-to-text provider: ${provider}`);\r\n }\r\n }\r\n}\r\n"]}
1
+ {"version":3,"file":"speech-to-text.service.js","sourceRoot":"","sources":["../../src/services/speech-to-text.service.ts"],"names":[],"mappings":"AAEA,MAAM,OAAO,0BAA0B;IAOrC,YAAY,MAA6C;QACvD,IAAI,CAAC,QAAQ,GAAG,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,QAAQ,KAAI,KAAK,CAAC;QAC1C,IAAI,CAAC,QAAQ,GAAG,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,QAAQ,KAAI,uBAAuB,CAAC;QAC5D,IAAI,CAAC,KAAK,GAAG,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,KAAK,KAAI,mBAAmB,CAAC;QAElD,IAAI,IAAI,CAAC,QAAQ,EAAE,CAAC;YAClB,mCAAmC;YACnC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC;YACjB,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,QAAQ,CAAC;QAC/B,CAAC;aAAM,CAAC;YACN,+BAA+B;YAC/B,IAAI,CAAC,MAAM,GAAG,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,MAAM,KAAI,IAAI,CAAC,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,EAAE,CAAC;YACpF,IAAI,CAAC,OAAO,GAAG,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,OAAO,KAAI,2BAA2B,CAAC;YAE9D,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;gBACjB,MAAM,IAAI,KAAK,CAAC,gDAAgD,CAAC,CAAC;YACpE,CAAC;QACH,CAAC;IACH,CAAC;IAEO,sBAAsB,CAAC,IAAY;QACzC,oEAAoE;QACpE,IAAI,OAAO,OAAO,KAAK,WAAW,IAAI,OAAO,CAAC,GAAG,EAAE,CAAC;YAClD,OAAO,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;QAC3B,CAAC;QACD,sEAAsE;QACtE,OAAQ,MAAc,CAAC,IAAI,CAAC,IAAI,SAAS,CAAC;IAC5C,CAAC;IAED,KAAK,CAAC,UAAU,CAAC,YAAkB,EAAE,OAAe,IAAI;;QACtD,IAAI,CAAC;YACH,MAAM,QAAQ,GAAG,IAAI,QAAQ,EAAE,CAAC;YAEhC,QAAQ,CAAC,MAAM,CAAC,MAAM,EAAE,YAAY,CAAC,CAAC;YACtC,QAAQ,CAAC,MAAM,CAAC,OAAO,EAAE,IAAI,CAAC,KAAK,CAAC,CAAC;YACrC,QAAQ,CAAC,MAAM,CAAC,UAAU,EAAE,IAAI,CAAC,CAAC;YAClC,QAAQ,CAAC,MAAM,CAAC,iBAAiB,EAAE,MAAM,CAAC,CAAC;YAC3C,QAAQ,CAAC,MAAM,CAAC,mBAAmB,EAAE,MAAM,CAAC,CAAC;YAE7C,MAAM,QAAQ,GAAG,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,OAAO,aAAa,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,OAAO,uBAAuB,CAAC;YACvG,MAAM,OAAO,GAAQ,EAAE,CAAC;YAExB,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC;gBACnB,OAAO,CAAC,eAAe,CAAC,GAAG,UAAU,IAAI,CAAC,MAAM,EAAE,CAAC;YACrD,CAAC;YAED,MAAM,QAAQ,GAAG,MAAM,KAAK,CAAC,QAAQ,EAAE;gBACrC,MAAM,EAAE,MAAM;gBACd,OAAO,EAAE,OAAO;gBAChB,IAAI,EAAE,QAAQ;aACf,CAAC,CAAC;YAEH,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,CAAC;gBACjB,MAAM,SAAS,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC,KAAK,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,KAAK,EAAE,eAAe,EAAE,CAAC,CAAC,CAAC;gBAClF,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAA,MAAA,SAAS,CAAC,KAAK,0CAAE,OAAO,KAAI,QAAQ,CAAC,UAAU,EAAE,CAAC,CAAC;YAC9F,CAAC;YAED,MAAM,MAAM,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;YACrC,OAAO,MAAM,CAAC,IAAI,IAAI,EAAE,CAAC;QAC3B,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YACf,MAAM,IAAI,KAAK,CAAC,wCAAwC,KAAK,CAAC,OAAO,EAAE,CAAC,CAAC;QAC3E,CAAC;IACH,CAAC;CACF;AAED,MAAM,OAAO,0BAA0B;IAMrC,YAAY,MAA6C;QACvD,IAAI,CAAC,QAAQ,GAAG,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,QAAQ,KAAI,KAAK,CAAC;QAC1C,IAAI,CAAC,QAAQ,GAAG,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,QAAQ,KAAI,uBAAuB,CAAC;QAC5D,IAAI,CAAC,KAAK,GAAG,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,KAAK,KAAI,qBAAqB,CAAC;QACpD,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,MAAM,KAAI,EAAE,CAAC,CAAC;QAC1D,IAAI,CAAC,IAAI,CAAC,QAAQ,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACnC,MAAM,IAAI,KAAK,CAAC,wDAAwD,CAAC,CAAC;QAC5E,CAAC;IACH,CAAC;IAED,KAAK,CAAC,UAAU,CAAC,YAAkB,EAAE,OAAe,IAAI;;QACtD,IAAI,CAAC;YACH,MAAM,QAAQ,GAAG,IAAI,QAAQ,EAAE,CAAC;YAChC,QAAQ,CAAC,MAAM,CAAC,MAAM,EAAE,YAAY,CAAC,CAAC;YACtC,QAAQ,CAAC,MAAM,CAAC,OAAO,EAAE,IAAI,CAAC,KAAK,CAAC,CAAC;YACrC,QAAQ,CAAC,MAAM,CAAC,UAAU,EAAE,IAAI,CAAC,CAAC;YAElC,MAAM,QAAQ,GAAG,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,QAAQ,qBAAqB,CAAC,CAAC,CAAC,gDAAgD,CAAC;YAC1H,MAAM,OAAO,GAAQ,EAAE,CAAC;YACxB,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC;gBACnB,OAAO,CAAC,eAAe,CAAC,GAAG,UAAU,IAAI,CAAC,MAAM,EAAE,CAAC;YACrD,CAAC;YAED,MAAM,QAAQ,GAAG,MAAM,KAAK,CAAC,QAAQ,EAAE,EAAE,MAAM,EAAE,MAAM,EAAE,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,CAAC,CAAC;YACpF,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,CAAC;gBACjB,MAAM,GAAG,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC,KAAK,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,KAAK,EAAE,eAAe,EAAE,CAAC,CAAC,CAAC;gBAC5E,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAA,MAAA,GAAG,CAAC,KAAK,0CAAE,OAAO,KAAI,QAAQ,CAAC,UAAU,EAAE,CAAC,CAAC;YACxF,CAAC;YACD,MAAM,MAAM,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;YACrC,OAAO,MAAM,CAAC,IAAI,IAAI,EAAE,CAAC;QAC3B,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YACf,MAAM,IAAI,KAAK,CAAC,kCAAkC,KAAK,CAAC,OAAO,EAAE,CAAC,CAAC;QACrE,CAAC;IACH,CAAC;CACF;AAED,MAAM,OAAO,0BAA0B;IACrC,MAAM,CAAC,MAAM,CAAC,MAA6B;;QACzC,MAAM,QAAQ,GAAG,CAAA,MAAA,MAAM,CAAC,YAAY,0CAAE,QAAQ,KAAI,QAAQ,CAAC;QAC3D,QAAQ,QAAQ,EAAE,CAAC;YACjB,KAAK,QAAQ,CAAC,CAAC,OAAO,IAAI,0BAA0B,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC;YAC1E,KAAK,SAAS,CAAC,CAAC,OAAO,IAAI,0BAA0B,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC;YAC3E,OAAO,CAAC,CAAC,MAAM,IAAI,KAAK,CAAC,wCAAwC,QAAQ,EAAE,CAAC,CAAC;QAC/E,CAAC;IACH,CAAC;CACF","sourcesContent":["import { SpeechToTextProvider, ServiceProviderConfig } from '../types/service-providers.types';\r\n\r\nexport class WhisperSpeechToTextService implements SpeechToTextProvider {\r\n private apiKey: string;\r\n private baseUrl: string;\r\n private useProxy: boolean;\r\n private proxyUrl: string;\r\n private model: string;\r\n\r\n constructor(config: ServiceProviderConfig['speechToText']) {\r\n this.useProxy = config?.useProxy || false;\r\n this.proxyUrl = config?.proxyUrl || 'http://localhost:8492';\r\n this.model = config?.model || 'gpt-4o-transcribe';\r\n \r\n if (this.useProxy) {\r\n // Mode proxy: pas besoin d'API key\r\n this.apiKey = '';\r\n this.baseUrl = this.proxyUrl;\r\n } else {\r\n // Mode direct: API key requise\r\n this.apiKey = config?.apiKey || this.getEnvironmentVariable('OPENAI_API_KEY') || '';\r\n this.baseUrl = config?.baseUrl || 'https://api.openai.com/v1';\r\n \r\n if (!this.apiKey) {\r\n throw new Error('OpenAI API key is required for Whisper service');\r\n }\r\n }\r\n }\r\n\r\n private getEnvironmentVariable(name: string): string | undefined {\r\n // In browser environment, we might get env vars through other means\r\n if (typeof process !== 'undefined' && process.env) {\r\n return process.env[name];\r\n }\r\n // Check if it's available as a global variable or through other means\r\n return (window as any)[name] || undefined;\r\n }\r\n\r\n async transcribe(audioContent: File, lang: string = 'en'): Promise<string> {\r\n try {\r\n const formData = new FormData();\r\n \r\n formData.append('file', audioContent);\r\n formData.append('model', this.model);\r\n formData.append('language', lang);\r\n formData.append('response_format', 'json');\r\n formData.append('max_output_tokens', '2000');\r\n\r\n const endpoint = this.useProxy ? `${this.baseUrl}/transcribe` : `${this.baseUrl}/audio/transcriptions`;\r\n const headers: any = {};\r\n \r\n if (!this.useProxy) {\r\n headers['Authorization'] = `Bearer ${this.apiKey}`;\r\n }\r\n\r\n const response = await fetch(endpoint, {\r\n method: 'POST',\r\n headers: headers,\r\n body: formData,\r\n });\r\n\r\n if (!response.ok) {\r\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' }));\r\n throw new Error(`Transcription failed: ${errorData.error?.message || response.statusText}`);\r\n }\r\n\r\n const result = await response.json();\r\n return result.text || '';\r\n } catch (error) {\r\n throw new Error(`Speech-to-text transcription failed: ${error.message}`);\r\n }\r\n }\r\n}\r\n\r\nexport class MistralSpeechToTextService implements SpeechToTextProvider {\r\n private apiKey: string;\r\n private model: string;\r\n private useProxy: boolean;\r\n private proxyUrl: string;\r\n\r\n constructor(config: ServiceProviderConfig['speechToText']) {\r\n this.useProxy = config?.useProxy || false;\r\n this.proxyUrl = config?.proxyUrl || 'http://localhost:8492';\r\n this.model = config?.model || 'voxtral-mini-latest';\r\n this.apiKey = this.useProxy ? '' : (config?.apiKey || '');\r\n if (!this.useProxy && !this.apiKey) {\r\n throw new Error('Mistral API key is required for speech-to-text service');\r\n }\r\n }\r\n\r\n async transcribe(audioContent: File, lang: string = 'en'): Promise<string> {\r\n try {\r\n const formData = new FormData();\r\n formData.append('file', audioContent);\r\n formData.append('model', this.model);\r\n formData.append('language', lang);\r\n\r\n const endpoint = this.useProxy ? `${this.proxyUrl}/transcribe-mistral` : 'https://api.mistral.ai/v1/audio/transcriptions';\r\n const headers: any = {};\r\n if (!this.useProxy) {\r\n headers['Authorization'] = `Bearer ${this.apiKey}`;\r\n }\r\n\r\n const response = await fetch(endpoint, { method: 'POST', headers, body: formData });\r\n if (!response.ok) {\r\n const err = await response.json().catch(() => ({ error: 'Unknown error' }));\r\n throw new Error(`Transcription failed: ${err.error?.message || response.statusText}`);\r\n }\r\n const result = await response.json();\r\n return result.text || '';\r\n } catch (error) {\r\n throw new Error(`Mistral speech-to-text failed: ${error.message}`);\r\n }\r\n }\r\n}\r\n\r\nexport class SpeechToTextServiceFactory {\r\n static create(config: ServiceProviderConfig): SpeechToTextProvider {\r\n const provider = config.speechToText?.provider || 'openai';\r\n switch (provider) {\r\n case 'openai': return new WhisperSpeechToTextService(config.speechToText);\r\n case 'mistral': return new MistralSpeechToTextService(config.speechToText);\r\n default: throw new Error(`Unsupported speech-to-text provider: ${provider}`);\r\n }\r\n }\r\n}\r\n"]}
@@ -1,2 +1,10 @@
1
- export {};
1
+ export const TRANSCRIPTION_MODELS = {
2
+ openai: ['gpt-4o-transcribe', 'gpt-4o-mini-transcribe', 'whisper-1'],
3
+ mistral: ['voxtral-mini-latest', 'voxtral-mini-transcribe-realtime-latest'],
4
+ };
5
+ export const COMPLETION_MODELS = {
6
+ openai: ['gpt-5', 'gpt-5-mini', 'gpt-5.5', 'gpt-4.1', 'gpt-4.1-mini', 'gpt-4o', 'gpt-4o-mini', 'o4-mini', 'gpt-5.2', 'gpt-5.3', 'gpt-5.4', 'gpt-5.4-mini', 'gpt-5.4-pro', 'gpt-5.4-nano'],
7
+ anthropic: ['claude-opus-4', 'claude-sonnet-4', 'claude-haiku-4', 'claude-3.7-sonnet', 'claude-3.5-sonnet', 'claude-opus-4.7', 'claude-opus-4.6', 'claude-opus-4.5', 'claude-sonnet-4.6', 'claude-sonnet-4.5'],
8
+ mistral: ['mistral-large-latest', 'mistral-medium-latest', 'mistral-small-latest', 'ministral', 'mistral-nemo'],
9
+ };
2
10
  //# sourceMappingURL=service-providers.types.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"service-providers.types.js","sourceRoot":"","sources":["../../src/types/service-providers.types.ts"],"names":[],"mappings":"","sourcesContent":["export interface SpeechToTextProvider {\r\n transcribe(audioBlob: Blob, lang: string): Promise<string>;\r\n}\r\n\r\nexport interface LLMProvider {\r\n fillFormFromTranscription(transcription: string, schema: any): Promise<any>;\r\n fillFormFromJson(json: string, schema: any): Promise<any>;\r\n}\r\n\r\nexport interface ServiceProviderConfig {\r\n speechToText?: {\r\n provider: 'whisper' | 'custom';\r\n apiKey?: string;\r\n baseUrl?: string;\r\n useProxy?: boolean;\r\n proxyUrl?: string;\r\n model?: string;\r\n };\r\n llm?: {\r\n provider: 'openai' | 'custom';\r\n apiKey?: string;\r\n model?: string;\r\n baseUrl?: string;\r\n useProxy?: boolean;\r\n proxyUrl?: string;\r\n };\r\n}\r\n"]}
1
+ {"version":3,"file":"service-providers.types.js","sourceRoot":"","sources":["../../src/types/service-providers.types.ts"],"names":[],"mappings":"AAGA,MAAM,CAAC,MAAM,oBAAoB,GAA4C;IAC3E,MAAM,EAAE,CAAC,mBAAmB,EAAE,wBAAwB,EAAE,WAAW,CAAC;IACpE,OAAO,EAAE,CAAC,qBAAqB,EAAE,yCAAyC,CAAC;CAC5E,CAAC;AAEF,MAAM,CAAC,MAAM,iBAAiB,GAAyC;IACrE,MAAM,EAAE,CAAC,OAAO,EAAE,YAAY,EAAE,SAAS,EAAE,SAAS,EAAE,cAAc,EAAE,QAAQ,EAAE,aAAa,EAAE,SAAS,EAAE,SAAS,EAAE,SAAS,EAAE,SAAS,EAAE,cAAc,EAAE,aAAa,EAAE,cAAc,CAAC;IACzL,SAAS,EAAE,CAAC,eAAe,EAAE,iBAAiB,EAAE,gBAAgB,EAAE,mBAAmB,EAAE,mBAAmB,EAAE,iBAAiB,EAAE,iBAAiB,EAAE,iBAAiB,EAAE,mBAAmB,EAAE,mBAAmB,CAAC;IAC9M,OAAO,EAAE,CAAC,sBAAsB,EAAE,uBAAuB,EAAE,sBAAsB,EAAE,WAAW,EAAE,cAAc,CAAC;CAChH,CAAC","sourcesContent":["export type TranscriptionProvider = 'openai' | 'mistral';\r\nexport type CompletionProvider = 'openai' | 'anthropic' | 'mistral';\r\n\r\nexport const TRANSCRIPTION_MODELS: Record<TranscriptionProvider, string[]> = {\r\n openai: ['gpt-4o-transcribe', 'gpt-4o-mini-transcribe', 'whisper-1'],\r\n mistral: ['voxtral-mini-latest', 'voxtral-mini-transcribe-realtime-latest'],\r\n};\r\n\r\nexport const COMPLETION_MODELS: Record<CompletionProvider, string[]> = {\r\n openai: ['gpt-5', 'gpt-5-mini', 'gpt-5.5', 'gpt-4.1', 'gpt-4.1-mini', 'gpt-4o', 'gpt-4o-mini', 'o4-mini', 'gpt-5.2', 'gpt-5.3', 'gpt-5.4', 'gpt-5.4-mini', 'gpt-5.4-pro', 'gpt-5.4-nano'],\r\n anthropic: ['claude-opus-4', 'claude-sonnet-4', 'claude-haiku-4', 'claude-3.7-sonnet', 'claude-3.5-sonnet', 'claude-opus-4.7', 'claude-opus-4.6', 'claude-opus-4.5', 'claude-sonnet-4.6', 'claude-sonnet-4.5'],\r\n mistral: ['mistral-large-latest', 'mistral-medium-latest', 'mistral-small-latest', 'ministral', 'mistral-nemo'],\r\n};\r\n\r\nexport interface SpeechToTextProvider {\r\n transcribe(audioBlob: Blob, lang: string): Promise<string>;\r\n}\r\n\r\nexport interface LLMProvider {\r\n fillFormFromTranscription(transcription: string, schema: any): Promise<any>;\r\n fillFormFromJson(json: string, schema: any): Promise<any>;\r\n}\r\n\r\nexport interface ServiceProviderConfig {\r\n speechToText?: {\r\n provider: TranscriptionProvider;\r\n apiKey?: string;\r\n baseUrl?: string;\r\n useProxy?: boolean;\r\n proxyUrl?: string;\r\n model?: string;\r\n };\r\n llm?: {\r\n provider: CompletionProvider;\r\n apiKey?: string;\r\n model?: string;\r\n baseUrl?: string;\r\n useProxy?: boolean;\r\n proxyUrl?: string;\r\n };\r\n}\r\n"]}
@@ -1,69 +1,96 @@
1
1
  import { proxyCustomElement, HTMLElement as HTMLElement$1, createEvent, h as h$1 } from '@stencil/core/internal/client';
2
2
  import { d as defineCustomElement$2 } from './ocr-file-uploader2.js';
3
3
 
4
+ const TRANSCRIPTION_MODELS = {
5
+ openai: ['gpt-4o-transcribe', 'gpt-4o-mini-transcribe', 'whisper-1'],
6
+ mistral: ['voxtral-mini-latest', 'voxtral-mini-transcribe-realtime-latest'],
7
+ };
8
+ const COMPLETION_MODELS = {
9
+ openai: ['gpt-5', 'gpt-5-mini', 'gpt-5.5', 'gpt-4.1', 'gpt-4.1-mini', 'gpt-4o', 'gpt-4o-mini', 'o4-mini', 'gpt-5.2', 'gpt-5.3', 'gpt-5.4', 'gpt-5.4-mini', 'gpt-5.4-pro', 'gpt-5.4-nano'],
10
+ anthropic: ['claude-opus-4', 'claude-sonnet-4', 'claude-haiku-4', 'claude-3.7-sonnet', 'claude-3.5-sonnet', 'claude-opus-4.7', 'claude-opus-4.6', 'claude-opus-4.5', 'claude-sonnet-4.6', 'claude-sonnet-4.5'],
11
+ mistral: ['mistral-large-latest', 'mistral-medium-latest', 'mistral-small-latest', 'ministral', 'mistral-nemo'],
12
+ };
13
+
4
14
  class AudioRecorderService {
5
15
  constructor() {
6
16
  this.mediaRecorder = null;
7
17
  this.audioChunks = [];
8
18
  this.stream = null;
19
+ this.audioContext = null;
20
+ this.scriptProcessor = null;
21
+ this.pcmChunks = [];
22
+ this.sampleRate = 16000;
9
23
  }
10
24
  async startRecording() {
11
- try {
12
- // Check if the API exists before calling
13
- if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
14
- console.error('Failed to start recording:', 'Microphone access is not supported in this browser or the page is not served over HTTPS/localhost.');
15
- return; // Exit gracefully instead of throwing
16
- }
17
- this.stream = await navigator.mediaDevices.getUserMedia({
18
- audio: {
19
- echoCancellation: true,
20
- noiseSuppression: true,
21
- autoGainControl: true
22
- }
23
- });
24
- this.audioChunks = [];
25
- this.mediaRecorder = new MediaRecorder(this.stream, {
26
- mimeType: 'audio/webm;codecs=opus'
27
- });
28
- this.mediaRecorder.ondataavailable = (event) => {
29
- if (event.data && event.data.size > 0) {
30
- this.audioChunks.push(event.data);
31
- }
32
- };
33
- this.mediaRecorder.start(100); // Collect data every 100ms
34
- }
35
- catch (error) {
36
- console.error('Failed to start recording:', error);
25
+ if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
26
+ throw new Error('Microphone access is not supported in this browser or the page is not served over HTTPS/localhost.');
37
27
  }
28
+ this.stream = await navigator.mediaDevices.getUserMedia({
29
+ audio: { echoCancellation: true, noiseSuppression: true, autoGainControl: true }
30
+ });
31
+ this.audioContext = new AudioContext({ sampleRate: this.sampleRate });
32
+ const source = this.audioContext.createMediaStreamSource(this.stream);
33
+ this.scriptProcessor = this.audioContext.createScriptProcessor(4096, 1, 1);
34
+ this.pcmChunks = [];
35
+ this.scriptProcessor.onaudioprocess = (e) => {
36
+ const input = e.inputBuffer.getChannelData(0);
37
+ this.pcmChunks.push(new Float32Array(input));
38
+ };
39
+ source.connect(this.scriptProcessor);
40
+ this.scriptProcessor.connect(this.audioContext.destination);
38
41
  }
39
42
  async stopRecording() {
40
- return new Promise((resolve, reject) => {
41
- if (!this.mediaRecorder) {
42
- reject(new Error('No active recording found'));
43
- return;
43
+ if (!this.audioContext || !this.scriptProcessor) {
44
+ throw new Error('No active recording found');
45
+ }
46
+ this.scriptProcessor.disconnect();
47
+ await this.audioContext.close();
48
+ const wavBlob = this.encodeWav(this.pcmChunks, this.sampleRate);
49
+ this.cleanup();
50
+ return wavBlob;
51
+ }
52
+ encodeWav(chunks, sampleRate) {
53
+ const totalSamples = chunks.reduce((acc, c) => acc + c.length, 0);
54
+ const buffer = new ArrayBuffer(44 + totalSamples * 2);
55
+ const view = new DataView(buffer);
56
+ const writeStr = (offset, str) => {
57
+ for (let i = 0; i < str.length; i++)
58
+ view.setUint8(offset + i, str.charCodeAt(i));
59
+ };
60
+ writeStr(0, 'RIFF');
61
+ view.setUint32(4, 36 + totalSamples * 2, true);
62
+ writeStr(8, 'WAVE');
63
+ writeStr(12, 'fmt ');
64
+ view.setUint32(16, 16, true); // PCM chunk size
65
+ view.setUint16(20, 1, true); // PCM format
66
+ view.setUint16(22, 1, true); // mono
67
+ view.setUint32(24, sampleRate, true);
68
+ view.setUint32(28, sampleRate * 2, true); // byte rate
69
+ view.setUint16(32, 2, true); // block align
70
+ view.setUint16(34, 16, true); // bits per sample
71
+ writeStr(36, 'data');
72
+ view.setUint32(40, totalSamples * 2, true);
73
+ let offset = 44;
74
+ for (const chunk of chunks) {
75
+ for (let i = 0; i < chunk.length; i++) {
76
+ const s = Math.max(-1, Math.min(1, chunk[i]));
77
+ view.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
78
+ offset += 2;
44
79
  }
45
- this.mediaRecorder.onstop = () => {
46
- const audioBlob = new Blob(this.audioChunks, { type: 'audio/webm' });
47
- this.cleanup();
48
- resolve(audioBlob);
49
- };
50
- this.mediaRecorder.onerror = (event) => {
51
- reject(new Error(`Recording error: ${event}`));
52
- };
53
- this.mediaRecorder.stop();
54
- });
80
+ }
81
+ return new Blob([buffer], { type: 'audio/wav' });
55
82
  }
56
83
  isRecording() {
57
- var _a;
58
- return ((_a = this.mediaRecorder) === null || _a === void 0 ? void 0 : _a.state) === 'recording';
84
+ return this.scriptProcessor !== null;
59
85
  }
60
86
  cleanup() {
61
87
  if (this.stream) {
62
88
  this.stream.getTracks().forEach(track => track.stop());
63
89
  this.stream = null;
64
90
  }
65
- this.mediaRecorder = null;
66
- this.audioChunks = [];
91
+ this.audioContext = null;
92
+ this.scriptProcessor = null;
93
+ this.pcmChunks = [];
67
94
  }
68
95
  }
69
96
 
@@ -125,15 +152,49 @@ class WhisperSpeechToTextService {
125
152
  }
126
153
  }
127
154
  }
155
+ class MistralSpeechToTextService {
156
+ constructor(config) {
157
+ this.useProxy = (config === null || config === void 0 ? void 0 : config.useProxy) || false;
158
+ this.proxyUrl = (config === null || config === void 0 ? void 0 : config.proxyUrl) || 'http://localhost:8492';
159
+ this.model = (config === null || config === void 0 ? void 0 : config.model) || 'voxtral-mini-latest';
160
+ this.apiKey = this.useProxy ? '' : ((config === null || config === void 0 ? void 0 : config.apiKey) || '');
161
+ if (!this.useProxy && !this.apiKey) {
162
+ throw new Error('Mistral API key is required for speech-to-text service');
163
+ }
164
+ }
165
+ async transcribe(audioContent, lang = 'en') {
166
+ var _a;
167
+ try {
168
+ const formData = new FormData();
169
+ formData.append('file', audioContent);
170
+ formData.append('model', this.model);
171
+ formData.append('language', lang);
172
+ const endpoint = this.useProxy ? `${this.proxyUrl}/transcribe-mistral` : 'https://api.mistral.ai/v1/audio/transcriptions';
173
+ const headers = {};
174
+ if (!this.useProxy) {
175
+ headers['Authorization'] = `Bearer ${this.apiKey}`;
176
+ }
177
+ const response = await fetch(endpoint, { method: 'POST', headers, body: formData });
178
+ if (!response.ok) {
179
+ const err = await response.json().catch(() => ({ error: 'Unknown error' }));
180
+ throw new Error(`Transcription failed: ${((_a = err.error) === null || _a === void 0 ? void 0 : _a.message) || response.statusText}`);
181
+ }
182
+ const result = await response.json();
183
+ return result.text || '';
184
+ }
185
+ catch (error) {
186
+ throw new Error(`Mistral speech-to-text failed: ${error.message}`);
187
+ }
188
+ }
189
+ }
128
190
  class SpeechToTextServiceFactory {
129
191
  static create(config) {
130
192
  var _a;
131
- const provider = ((_a = config.speechToText) === null || _a === void 0 ? void 0 : _a.provider) || 'whisper';
193
+ const provider = ((_a = config.speechToText) === null || _a === void 0 ? void 0 : _a.provider) || 'openai';
132
194
  switch (provider) {
133
- case 'whisper':
134
- return new WhisperSpeechToTextService(config.speechToText);
135
- default:
136
- throw new Error(`Unsupported speech-to-text provider: ${provider}`);
195
+ case 'openai': return new WhisperSpeechToTextService(config.speechToText);
196
+ case 'mistral': return new MistralSpeechToTextService(config.speechToText);
197
+ default: throw new Error(`Unsupported speech-to-text provider: ${provider}`);
137
198
  }
138
199
  }
139
200
  }
@@ -325,7 +386,8 @@ class OpenAILLMService {
325
386
  `${field.readonly ? ', readonly' : ''}` +
326
387
  `${field.default !== undefined && field.default !== null && field.default !== '' ? ', default=' + field.default : ''}` +
327
388
  `${field.min && field.min !== "" ? ', min=' + field.min : ''}` +
328
- `${field.max && field.max !== "" ? ', max=' + field.max : ''}`;
389
+ `${field.max && field.max !== "" ? ', max=' + field.max : ''}` +
390
+ `${field.pattern && field.pattern !== "" ? ', pattern=' + field.pattern : ''}`;
329
391
  if (field.options && field.options.length > 0) {
330
392
  if (field.options.length < 50) {
331
393
  description += `) - options: ${field.options.join(', ')}`;
@@ -347,7 +409,8 @@ class OpenAILLMService {
347
409
  `${field.readonly ? ', readonly' : ''}` +
348
410
  `${field.default !== undefined && field.default !== null && field.default !== '' ? ', default=' + field.default : ''}` +
349
411
  `${field.min && field.min !== "" ? ', min=' + field.min : ''}` +
350
- `${field.max && field.max !== "" ? ', max=' + field.max : ''}`;
412
+ `${field.max && field.max !== "" ? ', max=' + field.max : ''}` +
413
+ `${field.pattern && field.pattern !== "" ? ', pattern=' + field.pattern : ''}`;
351
414
  if (field.options && field.options.length > 0) {
352
415
  if (field.options.length < 50) {
353
416
  description += `) - options: ${field.options.join(', ')}`;
@@ -491,12 +554,14 @@ class OpenAILLMService {
491
554
  7. Only include fields where relevant information is found
492
555
  8. The current GMT datetime is ${new Date().toGMTString()}
493
556
  9. Respect the constraints written between parenthesis for readonly status (readonly fields MUST NOT be filled with values), min and max values, whatever the transcription says
557
+ 9b. CRITICAL - DATE/DATETIME OUT OF RANGE: For date and datetime fields with min and/or max constraints, if the value from the transcription falls outside the allowed range, leave the field EMPTY. NEVER clamp, adjust, or substitute a boundary date - either the date is valid and within range, or the field is left empty
494
558
  10. IMPORTANT: Fields marked as "readonly" are presentation elements (headers, labels) that provide context but MUST NOT receive values
495
559
  11. CRITICAL - DUPLICATE FIELDS: When you see multiple fields with the same name but different IDs (e.g., "ID:field1 | Event date" and "ID:field2 | Event date"), these are DIFFERENT fields in DIFFERENT sections. The user may explicitly say which section they are filling (e.g., "for the first/second sub-form"). Listen VERY CAREFULLY to these contextual clues. Use the readonly headers to understand which section each field belongs to. If the user says "for the clinical sign, the event date is X", you must fill the Event date field that comes AFTER the "MC Clinical sign" header, NOT the first Event date you see
496
560
  12. Use readonly fields as contextual hints to understand form structure and field grouping, but never fill them
497
- 13. CRITICAL: For select fields, options like "No", "Non", "Non applicable", "Inconnu", "Unknown", "N/A" are VALID VALUES that can be selected. When the user says these words, treat them as legitimate option choices, NOT always as negations or refusals to answer. HOWEVER, for non-select fields (string, number, date, etc.), if the user says "I don't know", "unknown", "not known", "inconnu", "je ne sais pas", this usually means they have NO VALUE to provide, unless explicitely precised - leave the field empty, do NOT fill it with the literal text "unknown" or "I don't know"
561
+ 13. CRITICAL - UNKNOWN/NO/N-A VALUES: The interpretation of words like "Unknown", "Inconnu", "No", "Non", "N/A", "Non applicable" depends ENTIRELY on whether the field has those words as explicit options. Rule: if the user says "field X is Unknown" (or any similar phrasing) AND "Unknown" (or a close variant) exists in the options list for that field, then SELECT that option - it is a valid value. If the user says such words for a field that does NOT have them as options (string, number, date, etc.), then it means the user has no value to provide - leave the field empty, do NOT fill it with the literal text
498
562
  14. CRITICAL: Each field has a unique ID shown as "ID:xxx" at the start of its description. You MUST include this exact ID in your response for each field you fill. The ID is the ONLY way to distinguish between fields with the same name
499
563
  15. CRITICAL - DEFAULT VALUES: Some fields have default values shown as "default=xxx". If the user does NOT mention a specific value for that field in the transcription, you MUST return the default value. Default values should be preserved unless explicitly overridden by the user
564
+ 16. CRITICAL - PATTERN VALIDATION: Some fields have a regex pattern shown as "pattern=xxx". The value you extract MUST match this pattern exactly. If the transcription is too ambiguous to produce a value that matches the pattern, leave the field EMPTY
500
565
 
501
566
  Respond with JSON in this exact format: {"fields": [{"id": "field_id", "name": "field_name", "value": "extracted_value"}]}`;
502
567
  let userPrompt = `
@@ -565,10 +630,12 @@ class OpenAILLMService {
565
630
  8. Return the same schema structure with 'default' values filled
566
631
  9. The current GMT datetime is ${new Date().toGMTString()}
567
632
  10. Respect the constraints written between parenthesis for readonly status (readonly fields MUST NOT be filled with values), min and max values, whatever the transcription says
633
+ 10b. CRITICAL - DATE/DATETIME OUT OF RANGE: For date and datetime fields with min and/or max constraints, if the value from the transcription falls outside the allowed range, leave the field EMPTY. NEVER clamp, adjust, or substitute a boundary date - either the date is valid and within range, or the field is left empty
568
634
  11. IMPORTANT: Fields marked as "readonly" are presentation elements (headers, labels) that provide context but MUST NOT receive values
569
635
  12. CRITICAL - DUPLICATE FIELDS: When you see multiple fields with the same name but different IDs (e.g., "ID:field1 | Event date" and "ID:field2 | Event date"), these are DIFFERENT fields in DIFFERENT sections. The user will explicitly say which section they are filling (e.g., "for the first/second sub-form", "in the anamnesis section", "for the clinical sign"). Listen VERY CAREFULLY to these contextual clues. Use the readonly headers to understand which section each field belongs to. If the user says "for the clinical sign, the event date is X", you must fill the Event date field that comes AFTER the "MC Clinical sign" header, NOT the first Event date you see
570
636
  13. Use readonly fields as contextual hints to understand form structure and field grouping, but never fill them
571
- 14. CRITICAL: For select fields, options like "No", "Non", "Non applicable", "Inconnu", "Unknown", "N/A" are VALID VALUES that can be selected. When the user says these words, treat them as legitimate option choices, NOT always as negations or refusals to answer. HOWEVER, for non-select fields (string, number, date, etc.), if the user says "I don't know", "unknown", "not known", "inconnu", "je ne sais pas", this usually means they have NO VALUE to provide, unless explicitely precised - leave the field empty, do NOT fill it with the literal text "unknown" or "I don't know"
637
+ 14. CRITICAL - UNKNOWN/NO/N-A VALUES: The interpretation of words like "Unknown", "Inconnu", "No", "Non", "N/A", "Non applicable" depends ENTIRELY on whether the field has those words as explicit options. Rule: if the user says "field X is Unknown" (or any similar phrasing) AND "Unknown" (or a close variant) exists in the options list for that field, then SELECT that option - it is a valid value. If the user says such words for a field that does NOT have them as options (string, number, date, etc.), then it means the user has no value to provide - leave the field empty, do NOT fill it with the literal text
638
+ 15. CRITICAL - PATTERN VALIDATION: Some fields have a regex pattern shown as "pattern=xxx". The value you extract MUST match this pattern exactly. If the transcription is too ambiguous to produce a value that matches the pattern, leave the field EMPTY
572
639
 
573
640
  Respond with JSON in this exact format: {"schema": {...}}`;
574
641
  let userPrompt = `
@@ -632,15 +699,138 @@ class OpenAILLMService {
632
699
  }
633
700
  }
634
701
  }
702
+ class AnthropicLLMService {
703
+ constructor(config) {
704
+ this.useProxy = (config === null || config === void 0 ? void 0 : config.useProxy) || false;
705
+ this.proxyUrl = (config === null || config === void 0 ? void 0 : config.proxyUrl) || 'http://localhost:8492';
706
+ this.model = (config === null || config === void 0 ? void 0 : config.model) || 'claude-sonnet-4.5';
707
+ this.apiKey = this.useProxy ? '' : ((config === null || config === void 0 ? void 0 : config.apiKey) || '');
708
+ if (!this.useProxy && !this.apiKey) {
709
+ throw new Error('Anthropic API key is required');
710
+ }
711
+ }
712
+ async fillFormFromTranscription(transcription, schema) {
713
+ return this.fillForm(transcription, schema);
714
+ }
715
+ async fillFormFromJson(json, schema) {
716
+ return this.fillForm(json, schema);
717
+ }
718
+ async fillForm(data, schema) {
719
+ var _a;
720
+ const endpoint = this.useProxy ? `${this.proxyUrl}/complete-anthropic` : 'https://api.anthropic.com/v1/messages';
721
+ const headers = { 'Content-Type': 'application/json' };
722
+ if (!this.useProxy) {
723
+ headers['x-api-key'] = this.apiKey;
724
+ headers['anthropic-version'] = '2023-06-01';
725
+ }
726
+ const finalSchema = (schema === null || schema === void 0 ? void 0 : schema.fields) || (schema === null || schema === void 0 ? void 0 : schema.schema) || schema;
727
+ const systemPrompt = this.buildSystemPrompt();
728
+ const userPrompt = `Data: "${data}"
729
+
730
+ Form fields:
731
+ ${JSON.stringify(finalSchema, null, 2)}
732
+
733
+ Respond with JSON: {"fields": [{"id": "field_id", "name": "field_name", "value": "extracted_value"}]}`;
734
+ const body = this.useProxy
735
+ ? { model: this.model, messages: [{ role: 'user', content: userPrompt }], system: systemPrompt }
736
+ : { model: this.model, max_tokens: 4096, system: systemPrompt, messages: [{ role: 'user', content: userPrompt }] };
737
+ const response = await fetch(endpoint, { method: 'POST', headers, body: JSON.stringify(body) });
738
+ if (!response.ok) {
739
+ const err = await response.json().catch(() => ({ error: 'Unknown error' }));
740
+ throw new Error(`Anthropic API failed: ${((_a = err.error) === null || _a === void 0 ? void 0 : _a.message) || response.statusText}`);
741
+ }
742
+ const result = await response.json();
743
+ const content = this.useProxy ? result.choices[0].message.content : result.content[0].text;
744
+ return JSON.parse(content);
745
+ }
746
+ buildSystemPrompt() {
747
+ return `You are an expert form-filling assistant. Extract values from the input data and fill form fields.
748
+ Rules:
749
+ 1. Only extract values that can be confidently determined
750
+ 2. Respect field types (string, number, datetime, boolean, select)
751
+ 3. For datetime fields, use ISO format (YYYY-MM-DDTHH:MM)
752
+ 4. For date fields, use DD/MM/YYYY format
753
+ 5. For select fields, use exact option values from the provided choices
754
+ 6. Leave fields empty if no relevant information is found
755
+ 7. Fields marked readonly MUST NOT receive values
756
+ 8. DATE/DATETIME OUT OF RANGE: if a date falls outside min/max constraints, leave the field EMPTY
757
+ 9. PATTERN VALIDATION: if a field has a pattern, the value MUST match it exactly, otherwise leave empty
758
+ 10. UNKNOWN VALUES: if the user says "Unknown" and it exists as an option, select it; otherwise leave empty
759
+ Respond ONLY with valid JSON.`;
760
+ }
761
+ }
762
+ class MistralLLMService {
763
+ constructor(config) {
764
+ this.useProxy = (config === null || config === void 0 ? void 0 : config.useProxy) || false;
765
+ this.proxyUrl = (config === null || config === void 0 ? void 0 : config.proxyUrl) || 'http://localhost:8492';
766
+ this.model = (config === null || config === void 0 ? void 0 : config.model) || 'mistral-medium-latest';
767
+ this.apiKey = this.useProxy ? '' : ((config === null || config === void 0 ? void 0 : config.apiKey) || '');
768
+ if (!this.useProxy && !this.apiKey) {
769
+ throw new Error('Mistral API key is required');
770
+ }
771
+ }
772
+ async fillFormFromTranscription(transcription, schema) {
773
+ return this.fillForm(transcription, schema);
774
+ }
775
+ async fillFormFromJson(json, schema) {
776
+ return this.fillForm(json, schema);
777
+ }
778
+ async fillForm(data, schema) {
779
+ var _a;
780
+ const endpoint = this.useProxy ? `${this.proxyUrl}/complete-mistral` : 'https://api.mistral.ai/v1/chat/completions';
781
+ const headers = { 'Content-Type': 'application/json' };
782
+ if (!this.useProxy) {
783
+ headers['Authorization'] = `Bearer ${this.apiKey}`;
784
+ }
785
+ const finalSchema = (schema === null || schema === void 0 ? void 0 : schema.fields) || (schema === null || schema === void 0 ? void 0 : schema.schema) || schema;
786
+ const systemPrompt = this.buildSystemPrompt();
787
+ const userPrompt = `Data: "${data}"
788
+
789
+ Form fields:
790
+ ${JSON.stringify(finalSchema, null, 2)}
791
+
792
+ Respond with JSON: {"fields": [{"id": "field_id", "name": "field_name", "value": "extracted_value"}]}`;
793
+ const response = await fetch(endpoint, {
794
+ method: 'POST',
795
+ headers,
796
+ body: JSON.stringify({
797
+ model: this.model,
798
+ messages: [{ role: 'system', content: systemPrompt }, { role: 'user', content: userPrompt }],
799
+ response_format: { type: 'json_object' },
800
+ }),
801
+ });
802
+ if (!response.ok) {
803
+ const err = await response.json().catch(() => ({ error: 'Unknown error' }));
804
+ throw new Error(`Mistral API failed: ${((_a = err.error) === null || _a === void 0 ? void 0 : _a.message) || response.statusText}`);
805
+ }
806
+ const result = await response.json();
807
+ return JSON.parse(result.choices[0].message.content);
808
+ }
809
+ buildSystemPrompt() {
810
+ return `You are an expert form-filling assistant. Extract values from the input data and fill form fields.
811
+ Rules:
812
+ 1. Only extract values that can be confidently determined
813
+ 2. Respect field types (string, number, datetime, boolean, select)
814
+ 3. For datetime fields, use ISO format (YYYY-MM-DDTHH:MM)
815
+ 4. For date fields, use DD/MM/YYYY format
816
+ 5. For select fields, use exact option values from the provided choices
817
+ 6. Leave fields empty if no relevant information is found
818
+ 7. Fields marked readonly MUST NOT receive values
819
+ 8. DATE/DATETIME OUT OF RANGE: if a date falls outside min/max constraints, leave the field EMPTY
820
+ 9. PATTERN VALIDATION: if a field has a pattern, the value MUST match it exactly, otherwise leave empty
821
+ 10. UNKNOWN VALUES: if the user says "Unknown" and it exists as an option, select it; otherwise leave empty
822
+ Respond ONLY with valid JSON.`;
823
+ }
824
+ }
635
825
  class LLMServiceFactory {
636
826
  static create(config) {
637
827
  var _a;
638
828
  const provider = ((_a = config.llm) === null || _a === void 0 ? void 0 : _a.provider) || 'openai';
639
829
  switch (provider) {
640
- case 'openai':
641
- return new OpenAILLMService(config.llm);
642
- default:
643
- throw new Error(`Unsupported LLM provider: ${provider}`);
830
+ case 'openai': return new OpenAILLMService(config.llm);
831
+ case 'anthropic': return new AnthropicLLMService(config.llm);
832
+ case 'mistral': return new MistralLLMService(config.llm);
833
+ default: throw new Error(`Unsupported LLM provider: ${provider}`);
644
834
  }
645
835
  }
646
836
  }
@@ -4162,6 +4352,8 @@ const VoiceFormRecorder = /*@__PURE__*/ proxyCustomElement(class VoiceFormRecord
4162
4352
  this.apiProxyUrl = 'http://localhost:8492';
4163
4353
  this.transcriptionModel = 'gpt-4o-transcribe';
4164
4354
  this.completionModel = 'gpt-5-mini';
4355
+ this.transcriptionProvider = 'openai';
4356
+ this.completionProvider = 'openai';
4165
4357
  this.context = undefined;
4166
4358
  this.classificationRootUrl = 'http://localhost';
4167
4359
  this.language = 'en';
@@ -4209,17 +4401,33 @@ const VoiceFormRecorder = /*@__PURE__*/ proxyCustomElement(class VoiceFormRecord
4209
4401
  this.updateDebugInfo(errorMessage, { error: errorMessage });
4210
4402
  }
4211
4403
  else {
4404
+ // Validate transcription provider/model
4405
+ const allowedTranscriptionModels = TRANSCRIPTION_MODELS[this.transcriptionProvider];
4406
+ if (!allowedTranscriptionModels) {
4407
+ throw new Error(`Unsupported transcription provider: '${this.transcriptionProvider}'. Allowed: openai, mistral`);
4408
+ }
4409
+ if (!allowedTranscriptionModels.includes(this.transcriptionModel)) {
4410
+ throw new Error(`Model '${this.transcriptionModel}' is not allowed for transcription provider '${this.transcriptionProvider}'. Allowed: ${allowedTranscriptionModels.join(', ')}`);
4411
+ }
4412
+ // Validate completion provider/model
4413
+ const allowedCompletionModels = COMPLETION_MODELS[this.completionProvider];
4414
+ if (!allowedCompletionModels) {
4415
+ throw new Error(`Unsupported completion provider: '${this.completionProvider}'. Allowed: openai, anthropic, mistral`);
4416
+ }
4417
+ if (!allowedCompletionModels.includes(this.completionModel)) {
4418
+ throw new Error(`Model '${this.completionModel}' is not allowed for completion provider '${this.completionProvider}'. Allowed: ${allowedCompletionModels.join(', ')}`);
4419
+ }
4212
4420
  // Parse form schema
4213
4421
  this.parsedSchema = JSON.parse(this.formJson || '{}');
4214
4422
  // Parse service configuration
4215
4423
  this.parsedConfig = JSON.parse(this.serviceConfig || '{}');
4216
4424
  // Add API key to config if provided via prop
4217
4425
  if (this.apiKey) {
4218
- this.parsedConfig = Object.assign(Object.assign({}, this.parsedConfig), { speechToText: Object.assign(Object.assign({}, this.parsedConfig.speechToText), { apiKey: this.apiKey, model: this.transcriptionModel }), llm: Object.assign(Object.assign({}, this.parsedConfig.llm), { apiKey: this.apiKey, model: this.completionModel }) });
4426
+ this.parsedConfig = Object.assign(Object.assign({}, this.parsedConfig), { speechToText: Object.assign(Object.assign({}, this.parsedConfig.speechToText), { provider: this.transcriptionProvider, apiKey: this.apiKey, model: this.transcriptionModel }), llm: Object.assign(Object.assign({}, this.parsedConfig.llm), { provider: this.completionProvider, apiKey: this.apiKey, model: this.completionModel }) });
4219
4427
  }
4220
4428
  else {
4221
4429
  // Use proxy API if no API key provided
4222
- this.parsedConfig = Object.assign(Object.assign({}, this.parsedConfig), { speechToText: Object.assign(Object.assign({}, this.parsedConfig.speechToText), { useProxy: true, proxyUrl: this.apiProxyUrl, model: this.transcriptionModel }), llm: Object.assign(Object.assign({}, this.parsedConfig.llm), { useProxy: true, proxyUrl: this.apiProxyUrl, model: this.completionModel }) });
4430
+ this.parsedConfig = Object.assign(Object.assign({}, this.parsedConfig), { speechToText: Object.assign(Object.assign({}, this.parsedConfig.speechToText), { provider: this.transcriptionProvider, useProxy: true, proxyUrl: this.apiProxyUrl, model: this.transcriptionModel }), llm: Object.assign(Object.assign({}, this.parsedConfig.llm), { provider: this.completionProvider, useProxy: true, proxyUrl: this.apiProxyUrl, model: this.completionModel }) });
4223
4431
  }
4224
4432
  // Initialize services
4225
4433
  this.speechToTextService = SpeechToTextServiceFactory.create(this.parsedConfig);
@@ -4385,7 +4593,7 @@ const VoiceFormRecorder = /*@__PURE__*/ proxyCustomElement(class VoiceFormRecord
4385
4593
  });
4386
4594
  // Stop recording and get audio blob
4387
4595
  const audioBlob = await this.audioRecorder.stopRecording();
4388
- const audioContent = new File([audioBlob], 'audio.webm', { type: 'audio/webm' });
4596
+ const audioContent = new File([audioBlob], 'audio.wav', { type: 'audio/wav' });
4389
4597
  this.processAudioContent(audioContent);
4390
4598
  }
4391
4599
  catch (error) {
@@ -4890,12 +5098,16 @@ const VoiceFormRecorder = /*@__PURE__*/ proxyCustomElement(class VoiceFormRecord
4890
5098
  render() {
4891
5099
  const containerStyle = this.getContainerStyle();
4892
5100
  const statusStyle = this.getStatusStyle();
4893
- return (h$1("div", { key: '8503450dbd4de2cb91172bd2bdbdf16a0c4cdce0' }, h$1("div", { key: '8b3f553574a64754312f68f2abe0d64f57c56d3e', class: "voice-recorder-container" + (this.debug || this.renderForm ? "-debug" : ""), style: containerStyle }, h$1("div", { key: '3d08a37d611bc91dbe11c065c633f0d36c8b362e', class: "row-audio-area" }, this.renderRecordButton(), this.renderUploadRecordButton(), this.renderUploadButton()), this.displayStatus ? h$1("div", { class: "status-text", style: statusStyle }, this.statusMessage) : "", this.renderForm ? this.renderFormPreview() : "", this.debug ? this.renderDebugPanel() : "")));
5101
+ return (h$1("div", { key: '8378e98f02e5b7482929d255c5ec12ff8c2731e4' }, h$1("div", { key: 'b8a0d873bd4e1b4c8936747c0919ac8c4c15301b', class: "voice-recorder-container" + (this.debug || this.renderForm ? "-debug" : ""), style: containerStyle }, h$1("div", { key: '86e7783e3686db378ee16aa91a640f33c3255923', class: "row-audio-area" }, this.renderRecordButton(), this.renderUploadRecordButton(), this.renderUploadButton()), this.displayStatus ? h$1("div", { class: "status-text", style: statusStyle }, this.statusMessage) : "", this.renderForm ? this.renderFormPreview() : "", this.debug ? this.renderDebugPanel() : "")));
4894
5102
  }
4895
5103
  static get watchers() { return {
4896
5104
  "formJson": ["initializeServices"],
4897
5105
  "serviceConfig": ["initializeServices"],
4898
- "theme": ["initializeServices"]
5106
+ "theme": ["initializeServices"],
5107
+ "transcriptionProvider": ["initializeServices"],
5108
+ "completionProvider": ["initializeServices"],
5109
+ "transcriptionModel": ["initializeServices"],
5110
+ "completionModel": ["initializeServices"]
4899
5111
  }; }
4900
5112
  static get style() { return voiceInputModuleCss; }
4901
5113
  }, [257, "voice-input-module", {
@@ -4905,6 +5117,8 @@ const VoiceFormRecorder = /*@__PURE__*/ proxyCustomElement(class VoiceFormRecord
4905
5117
  "apiProxyUrl": [1, "api-proxy-url"],
4906
5118
  "transcriptionModel": [1, "transcription-model"],
4907
5119
  "completionModel": [1, "completion-model"],
5120
+ "transcriptionProvider": [1, "transcription-provider"],
5121
+ "completionProvider": [1, "completion-provider"],
4908
5122
  "context": [1],
4909
5123
  "classificationRootUrl": [1, "classification-root-url"],
4910
5124
  "language": [1],
@@ -4928,7 +5142,11 @@ const VoiceFormRecorder = /*@__PURE__*/ proxyCustomElement(class VoiceFormRecord
4928
5142
  }, undefined, {
4929
5143
  "formJson": ["initializeServices"],
4930
5144
  "serviceConfig": ["initializeServices"],
4931
- "theme": ["initializeServices"]
5145
+ "theme": ["initializeServices"],
5146
+ "transcriptionProvider": ["initializeServices"],
5147
+ "completionProvider": ["initializeServices"],
5148
+ "transcriptionModel": ["initializeServices"],
5149
+ "completionModel": ["initializeServices"]
4932
5150
  }]);
4933
5151
  function defineCustomElement$1() {
4934
5152
  if (typeof customElements === "undefined") {