reneco-advanced-input-module 0.0.1-beta.1 → 0.0.1-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/loader/cdn.js +1 -0
  2. package/loader/index.cjs.js +1 -0
  3. package/loader/index.d.ts +24 -0
  4. package/loader/index.es2017.js +1 -0
  5. package/loader/index.js +2 -0
  6. package/package.json +6 -2
  7. package/www/build/index.esm.js +2 -0
  8. package/www/build/index.esm.js.map +1 -0
  9. package/www/build/loader.esm.js.map +1 -0
  10. package/www/build/ocr-file-uploader.voice-input-module.entry.esm.js.map +1 -0
  11. package/www/build/p-52e59129.entry.js +2 -0
  12. package/www/build/p-52e59129.entry.js.map +1 -0
  13. package/www/build/p-DQuL1Twl.js +2 -0
  14. package/www/build/p-DQuL1Twl.js.map +1 -0
  15. package/www/build/p-jmc2yzBp.js +3 -0
  16. package/www/build/p-jmc2yzBp.js.map +1 -0
  17. package/www/build/voice-input-module.esm.js +2 -0
  18. package/www/build/voice-input-module.esm.js.map +1 -0
  19. package/www/build/voice-input-module.js +33 -0
  20. package/www/host.config.json +15 -0
  21. package/www/index.html +922 -0
  22. package/.editorconfig +0 -15
  23. package/.prettierrc.json +0 -13
  24. package/api-key-inject.js +0 -46
  25. package/env-config.js +0 -4
  26. package/inject-env.js +0 -20
  27. package/src/components/ocr-file-uploader/ocr-file-uploader.css +0 -26
  28. package/src/components/ocr-file-uploader/ocr-file-uploader.tsx +0 -100
  29. package/src/components/ocr-file-uploader/readme.md +0 -31
  30. package/src/components/voice-input-module/readme.md +0 -114
  31. package/src/components/voice-input-module/voice-input-module.css +0 -286
  32. package/src/components/voice-input-module/voice-input-module.tsx +0 -778
  33. package/src/components.d.ts +0 -158
  34. package/src/index.html +0 -1015
  35. package/src/index.ts +0 -12
  36. package/src/services/audio-recorder.service.ts +0 -74
  37. package/src/services/llm.service.ts +0 -221
  38. package/src/services/speech-to-text.service.ts +0 -70
  39. package/src/types/form-schema.types.ts +0 -78
  40. package/src/types/service-providers.types.ts +0 -22
  41. package/src/utils/schema-converter.ts +0 -494
  42. package/stencil.config.ts +0 -24
  43. package/tsconfig.json +0 -30
package/src/index.ts DELETED
@@ -1,12 +0,0 @@
1
- /**
2
- * @fileoverview entry point for your component library
3
- *
4
- * This is the entry point for your component library. Use this file to export utilities,
5
- * constants or data structure that accompany your components.
6
- *
7
- * DO NOT use this file to export your components. Instead, use the recommended approaches
8
- * to consume components of this package as outlined in the `README.md`.
9
- */
10
-
11
- // export { format } from './utils/utils';
12
- // export type * from './components.d.ts';
@@ -1,74 +0,0 @@
1
- export class AudioRecorderService {
2
- private mediaRecorder: MediaRecorder | null = null;
3
- private audioChunks: Blob[] = [];
4
- private stream: MediaStream | null = null;
5
-
6
- async startRecording(): Promise<void> {
7
- try {
8
- // Check if the API exists before calling
9
- if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
10
- console.error(
11
- 'Failed to start recording:', 'Microphone access is not supported in this browser or the page is not served over HTTPS/localhost.'
12
- );
13
- return; // Exit gracefully instead of throwing
14
- }
15
-
16
- this.stream = await navigator.mediaDevices.getUserMedia({
17
- audio: {
18
- echoCancellation: true,
19
- noiseSuppression: true,
20
- autoGainControl: true
21
- }
22
- });
23
-
24
- this.audioChunks = [];
25
- this.mediaRecorder = new MediaRecorder(this.stream, {
26
- mimeType: 'audio/webm;codecs=opus'
27
- });
28
-
29
- this.mediaRecorder.ondataavailable = (event) => {
30
- if (event.data && event.data.size > 0) {
31
- this.audioChunks.push(event.data);
32
- }
33
- };
34
-
35
- this.mediaRecorder.start(100); // Collect data every 100ms
36
- } catch (error: any) {
37
- console.error('Failed to start recording:', error);
38
- }
39
- }
40
-
41
- async stopRecording(): Promise<Blob> {
42
- return new Promise((resolve, reject) => {
43
- if (!this.mediaRecorder) {
44
- reject(new Error('No active recording found'));
45
- return;
46
- }
47
-
48
- this.mediaRecorder.onstop = () => {
49
- const audioBlob = new Blob(this.audioChunks, { type: 'audio/webm' });
50
- this.cleanup();
51
- resolve(audioBlob);
52
- };
53
-
54
- this.mediaRecorder.onerror = (event) => {
55
- reject(new Error(`Recording error: ${event}`));
56
- };
57
-
58
- this.mediaRecorder.stop();
59
- });
60
- }
61
-
62
- isRecording(): boolean {
63
- return this.mediaRecorder?.state === 'recording';
64
- }
65
-
66
- private cleanup(): void {
67
- if (this.stream) {
68
- this.stream.getTracks().forEach(track => track.stop());
69
- this.stream = null;
70
- }
71
- this.mediaRecorder = null;
72
- this.audioChunks = [];
73
- }
74
- }
@@ -1,221 +0,0 @@
1
- import { LLMProvider, ServiceProviderConfig } from '../types/service-providers.types';
2
- import { FormSchema } from '../types/form-schema.types';
3
-
4
- export class OpenAILLMService implements LLMProvider {
5
- private apiKey: string;
6
- private model: string;
7
- private baseUrl: string;
8
-
9
- constructor(config: ServiceProviderConfig['llm']) {
10
- // Get API key from config or environment
11
- this.apiKey = config?.apiKey || this.getEnvironmentVariable('OPENAI_API_KEY') || '';
12
- // the newest OpenAI model is "gpt-4.1-mini". do not change this unless explicitly requested by the user
13
- // this.model = config?.model || 'gpt-4.1-mini';
14
- this.model = config?.model || 'gpt-4.1';
15
- this.baseUrl = config?.baseUrl || 'https://api.openai.com/v1';
16
-
17
- if (!this.apiKey) {
18
- throw new Error('OpenAI API key is required for LLM service');
19
- }
20
- }
21
-
22
- private getEnvironmentVariable(name: string): string | undefined {
23
- // In browser environment, we might get env vars through other means
24
- if (typeof process !== 'undefined' && process.env) {
25
- return process.env[name];
26
- }
27
- // Check if it's available as a global variable or through other means
28
- return (window as any)[name] || undefined;
29
- }
30
-
31
- private getOptimizeFieldsDescription(schema: any){
32
- return Object.values(schema).map((field: any) =>
33
- `- ${field.name ?? field.title} ` +
34
- `(${field.type}` +
35
- `${field.required ? ', required' : ''}` +
36
- `${field.readonly ? ', readonly' : ''}` +
37
- `${field.min && field.min !== "" ? ', min='+field.min : ''}` +
38
- `${field.max && field.max !== "" ? ', max='+field.max : ''}` +
39
- `)` +
40
- `${field.options ? ` - options: ${field.options.join(', ')}` : ''}`
41
- ).join('\n')
42
- }
43
-
44
- async fillFormFromTranscription(transcription: string, schema: any): Promise<any> {
45
- return this.fillForm(transcription, schema, true);
46
- }
47
-
48
- async fillFormFromJson(json: string, schema: any): Promise<any> {
49
- return this.fillForm(json, schema, false);
50
- }
51
-
52
- private async fillForm(data: string, schema: any, dataIsTranscription: boolean = true): Promise<any> {
53
- try {
54
- // Handle complex schema format with fields array
55
- if (schema?.fields || schema?.schema) {
56
- const finalSchema = schema?.fields || schema?.schema;
57
- const systemPrompt = `You are an expert form-filling assistant. You will receive a voice transcription and form field definitions. Your task is to extract values from the spoken content.
58
- Rules:
59
- 1. Only extract values that can be confidently determined from the transcription
60
- 2. Respect field types (string, number, datetime, boolean, select)
61
- 3. For datetime fields, use ISO format (YYYY-MM-DDTHH:MM)
62
- 4. For date fields, use the following format: DD/MM/YYY
63
- 5. For boolean fields, interpret yes/no, true/false, positive/negative responses
64
- 6. For select fields, use exact option values from the provided choices
65
- 7. Only include fields where relevant information is found
66
- 8. The current GMT datetime is ${(new Date() as any).toGMTString()}
67
- 9. Respect the constraints written between parenthesis for readonly status (readonly fields must not be filled with values), min and max values, whatever the transcription says
68
-
69
- Respond with JSON in this exact format: {"fields": [{"name": "field_name", "value": "extracted_value"}]}`;
70
-
71
- let userPrompt = `
72
- Voice transcription: "${data}"
73
-
74
- Form fields:
75
- ${this.getOptimizeFieldsDescription(finalSchema)}
76
-
77
- Please extract values from the Transcription for these fields.
78
- `;
79
-
80
- //TODO
81
-
82
- // const userPrompt = (
83
- // dataIsTranscription
84
- // ?
85
- // `Voice transcription: "${data}"`
86
- // :
87
- // `Json datas: "${JSON.stringify(data)}"`
88
- // )+`
89
-
90
- // Form fields:
91
- // ${this.getOptimizeFieldsDescription(finalSchema)}
92
-
93
- // Please extract values from the ` +
94
- // (
95
- // dataIsTranscription
96
- // ?
97
- // `Transcription`
98
- // :
99
- // `Json generated file`
100
- // )+` for these fields.`;
101
-
102
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
103
- method: 'POST',
104
- headers: {
105
- 'Content-Type': 'application/json',
106
- 'Authorization': `Bearer ${this.apiKey}`,
107
- },
108
- body: JSON.stringify({
109
- model: this.model,
110
- messages: [
111
- { role: 'system', content: systemPrompt },
112
- { role: 'user', content: userPrompt }
113
- ],
114
- response_format: { type: 'json_object' },
115
- temperature: 0.1,
116
- }),
117
- });
118
-
119
- if (!response.ok) {
120
- const errorData = await response.json().catch(() => ({ error: 'Unknown error' }));
121
- throw new Error(`LLM API failed: ${errorData.error?.message || response.statusText}`);
122
- }
123
-
124
- const result = await response.json();
125
- return JSON.parse(result.choices[0].message.content);
126
- }
127
-
128
- // Handle simple schema format (backward compatibility)
129
- const systemPrompt = `You are an expert form-filling assistant. You will receive a voice transcription and a JSON form schema. Your task is to intelligently fill the form fields based on the spoken content.
130
- Rules:
131
- 1. Only fill fields that can be confidently determined from the transcription
132
- 2. Respect field types (string, number, date, boolean, select)
133
- 3. For datetime fields, use ISO format (YYYY-MM-DDTHH:MM)
134
- 4. For date fields, use the following format: DD/MM/YYY
135
- 5. For boolean fields, interpret yes/no, true/false, positive/negative responses
136
- 6. For select fields, match the closest option from the provided choices
137
- 7. Leave fields empty if no relevant information is found
138
- 8. Return the same schema structure with 'default' values filled
139
- 9. The current GMT datetime is ${(new Date() as any).toGMTString()}
140
- 10. Respect the constraints written between parenthesis for readonly status (readonly fields must not be filled with values), min and max values, whatever the transcription says
141
-
142
- Respond with JSON in this exact format: {"schema": {...}}`;
143
-
144
- let userPrompt = `
145
- Voice transcription: "${data}"
146
-
147
- Form schema to fill:
148
- ${JSON.stringify(schema, null, 2)}
149
-
150
- Please fill the form fields based on the Transcription and return the schema with default values populated.
151
- `;
152
-
153
- // const userPrompt = (
154
- // dataIsTranscription
155
- // ?
156
- // `Voice transcription: "${data}"`
157
- // :
158
- // `Json datas: "${JSON.stringify(data)}"`
159
- // )+`
160
-
161
- // Form schema to fill:
162
- // ${JSON.stringify(schema, null, 2)}
163
-
164
- // Please fill the form fields based on the ` +
165
- // (
166
- // dataIsTranscription
167
- // ?
168
- // `Transcription`
169
- // :
170
- // `Json generated file`
171
- // )+` nd return the schema with default values populated.`;
172
-
173
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
174
- method: 'POST',
175
- headers: {
176
- 'Content-Type': 'application/json',
177
- 'Authorization': `Bearer ${this.apiKey}`,
178
- },
179
- body: JSON.stringify({
180
- model: this.model,
181
- messages: [
182
- { role: 'system', content: systemPrompt },
183
- { role: 'user', content: userPrompt }
184
- ],
185
- response_format: { type: 'json_object' },
186
- temperature: 0.1, // Low temperature for consistency
187
- }),
188
- });
189
-
190
- if (!response.ok) {
191
- const errorData = await response.json().catch(() => ({ error: 'Unknown error' }));
192
- throw new Error(`LLM API failed: ${errorData.error?.message || response.statusText}`);
193
- }
194
-
195
- const result = await response.json();
196
- const filledSchema = JSON.parse(result.choices[0].message.content);
197
-
198
- // Validate that the response has the correct structure
199
- if (!filledSchema.schema) {
200
- throw new Error('Invalid response format from LLM service');
201
- }
202
-
203
- return filledSchema;
204
- } catch (error) {
205
- throw new Error(`Form filling failed: ${error.message}`);
206
- }
207
- }
208
- }
209
-
210
- export class LLMServiceFactory {
211
- static create(config: ServiceProviderConfig): LLMProvider {
212
- const provider = config.llm?.provider || 'openai';
213
-
214
- switch (provider) {
215
- case 'openai':
216
- return new OpenAILLMService(config.llm);
217
- default:
218
- throw new Error(`Unsupported LLM provider: ${provider}`);
219
- }
220
- }
221
- }
@@ -1,70 +0,0 @@
1
- import { SpeechToTextProvider, ServiceProviderConfig } from '../types/service-providers.types';
2
-
3
- export class WhisperSpeechToTextService implements SpeechToTextProvider {
4
- private apiKey: string;
5
- private baseUrl: string;
6
-
7
- constructor(config: ServiceProviderConfig['speechToText']) {
8
- // Get API key from config or environment
9
- this.apiKey = config?.apiKey || this.getEnvironmentVariable('OPENAI_API_KEY') || '';
10
- this.baseUrl = config?.baseUrl || 'https://api.openai.com/v1';
11
-
12
- if (!this.apiKey) {
13
- throw new Error('OpenAI API key is required for Whisper service');
14
- }
15
- }
16
-
17
- private getEnvironmentVariable(name: string): string | undefined {
18
- // In browser environment, we might get env vars through other means
19
- if (typeof process !== 'undefined' && process.env) {
20
- return process.env[name];
21
- }
22
- // Check if it's available as a global variable or through other means
23
- return (window as any)[name] || undefined;
24
- }
25
-
26
- async transcribe(audioContent: File, lang: string = 'en'): Promise<string> {
27
- try {
28
- const formData = new FormData();
29
-
30
- formData.append('file', audioContent);
31
- formData.append('model', 'gpt-4o-transcribe');// >>> tronque le texte ?
32
- // formData.append('model', 'gpt-4o-mini-transcribe');// >>> tronque le texte ?
33
- // formData.append('model', 'whisper-1');
34
- formData.append('language', lang);
35
- formData.append('response_format', 'json');
36
- formData.append('max_output_tokens', '2000');
37
-
38
- const response = await fetch(`${this.baseUrl}/audio/transcriptions`, {
39
- method: 'POST',
40
- headers: {
41
- 'Authorization': `Bearer ${this.apiKey}`,
42
- },
43
- body: formData,
44
- });
45
-
46
- if (!response.ok) {
47
- const errorData = await response.json().catch(() => ({ error: 'Unknown error' }));
48
- throw new Error(`Transcription failed: ${errorData.error?.message || response.statusText}`);
49
- }
50
-
51
- const result = await response.json();
52
- return result.text || '';
53
- } catch (error) {
54
- throw new Error(`Speech-to-text transcription failed: ${error.message}`);
55
- }
56
- }
57
- }
58
-
59
- export class SpeechToTextServiceFactory {
60
- static create(config: ServiceProviderConfig): SpeechToTextProvider {
61
- const provider = config.speechToText?.provider || 'whisper';
62
-
63
- switch (provider) {
64
- case 'whisper':
65
- return new WhisperSpeechToTextService(config.speechToText);
66
- default:
67
- throw new Error(`Unsupported speech-to-text provider: ${provider}`);
68
- }
69
- }
70
- }
@@ -1,78 +0,0 @@
1
- export interface FormField {
2
- type: 'string' | 'number' | 'date' | 'boolean' | 'select' | 'header' | 'dbpicker';
3
- title: string;
4
- description?: string;
5
- required?: boolean;
6
- default?: any;
7
- options?: string[]; // For select fields
8
- pattern?: string; // For validation
9
- readonly?: boolean;
10
- min?: number;
11
- max?: number;
12
- }
13
- export interface FormFieldExtended extends FormField {
14
- realType?: string;
15
- pickerOptions?: any[];
16
-
17
- ID?: string;
18
- TFie_PK_ID?: string;
19
- TVal_PK_ID?: string;
20
- TFIn_PK_ID?: string;
21
- Enabled?: boolean;
22
- isForm?: string;
23
- LabelText?: string;
24
- SystemName?: string;
25
- HelpText?: string;
26
- CssStyle?: string;
27
- ControlType?: string;
28
- ValidationRequired?: string;
29
- ValidationMin?: string;
30
- ValidationMax?: string;
31
- Mask?: string;
32
- TypeId?: string;
33
- Unit?: string;
34
- TFie_Fullpath?: string;
35
- TVal_FK_Parent_ID?: string;
36
- value?: any;
37
- }
38
-
39
- export interface FormSchema {
40
- schema: {
41
- [fieldName: string]: FormField;
42
- };
43
- title?: string;
44
- description?: string;
45
- }
46
-
47
- export interface FormSchemaFieldsOnly {
48
- fields: {
49
- [fieldName: string]: FormField;
50
- };
51
- }
52
-
53
- export interface FormSchemaExtended {
54
- schema: {
55
- [fieldName: string]: FormFieldExtended;
56
- };
57
- title?: string;
58
- description?: string;
59
- }
60
-
61
-
62
- export interface FormSchemaFieldsOnlyExtended {
63
- fields: {
64
- [fieldName: string]: FormFieldExtended;
65
- };
66
- }
67
-
68
- export interface FilledFormData {
69
- [fieldName: string]: any;
70
- }
71
-
72
- export interface VoiceFormRecorderResult {
73
- success: boolean;
74
- data?: FilledFormData;
75
- error?: string;
76
- transcription?: string;
77
- jsonForm?: string;
78
- }
@@ -1,22 +0,0 @@
1
- export interface SpeechToTextProvider {
2
- transcribe(audioBlob: Blob, lang: string): Promise<string>;
3
- }
4
-
5
- export interface LLMProvider {
6
- fillFormFromTranscription(transcription: string, schema: any): Promise<any>;
7
- fillFormFromJson(json: string, schema: any): Promise<any>;
8
- }
9
-
10
- export interface ServiceProviderConfig {
11
- speechToText?: {
12
- provider: 'whisper' | 'custom';
13
- apiKey?: string;
14
- baseUrl?: string;
15
- };
16
- llm?: {
17
- provider: 'openai' | 'custom';
18
- apiKey?: string;
19
- model?: string;
20
- baseUrl?: string;
21
- };
22
- }