web-speech-cognitive-services 8.0.0-main.5903868 → 8.0.0-main.6cbf0fb
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/web-speech-cognitive-services.d.mts +217 -65
- package/dist/web-speech-cognitive-services.d.ts +217 -65
- package/dist/web-speech-cognitive-services.development.js +240 -199
- package/dist/web-speech-cognitive-services.development.js.map +1 -1
- package/dist/web-speech-cognitive-services.js +238 -194
- package/dist/web-speech-cognitive-services.js.map +1 -1
- package/dist/web-speech-cognitive-services.mjs +238 -194
- package/dist/web-speech-cognitive-services.mjs.map +1 -1
- package/dist/web-speech-cognitive-services.production.min.js +12 -12
- package/dist/web-speech-cognitive-services.production.min.js.map +1 -1
- package/package.json +2 -2
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import * as memoize_one from 'memoize-one';
|
|
2
|
+
import { AudioConfig, SpeechRecognizer } from 'microsoft-cognitiveservices-speech-sdk';
|
|
2
3
|
|
|
3
4
|
declare class SpeechSynthesisUtterance {
|
|
4
5
|
constructor(text: any);
|
|
@@ -47,7 +48,7 @@ declare class SpeechSynthesisEvent {
|
|
|
47
48
|
constructor(type: any);
|
|
48
49
|
}
|
|
49
50
|
|
|
50
|
-
declare class _default$
|
|
51
|
+
declare class _default$2 {
|
|
51
52
|
constructor(audioContext: any);
|
|
52
53
|
audioContext: any;
|
|
53
54
|
pause(): void;
|
|
@@ -57,12 +58,12 @@ declare class _default$5 {
|
|
|
57
58
|
stop(): void;
|
|
58
59
|
}
|
|
59
60
|
|
|
60
|
-
declare class _default$
|
|
61
|
+
declare class _default$1 {
|
|
61
62
|
constructor({ audioContext, ponyfill }: {
|
|
62
63
|
audioContext: any;
|
|
63
64
|
ponyfill: any;
|
|
64
65
|
});
|
|
65
|
-
consumer: _default$
|
|
66
|
+
consumer: _default$2 | null;
|
|
66
67
|
paused: boolean;
|
|
67
68
|
queue: any[];
|
|
68
69
|
getAudioContext: memoize_one.MemoizedFn<() => any>;
|
|
@@ -74,14 +75,7 @@ declare class _default$4 {
|
|
|
74
75
|
stop(): void;
|
|
75
76
|
}
|
|
76
77
|
|
|
77
|
-
|
|
78
|
-
#private;
|
|
79
|
-
constructor(eventTarget: EventTarget);
|
|
80
|
-
getProperty(name: T): ((event: EventMap[typeof name]) => void) | undefined;
|
|
81
|
-
setProperty(name: T, value: ((event: EventMap[typeof name]) => void) | undefined): void;
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
type SpeechRecognitionErrorType = 'aborted' | 'audio-capture' | 'bad-grammar' | 'language-not-supported' | 'network' | 'no-speech' | 'not-allowed' | 'service-not-allowed';
|
|
78
|
+
type SpeechRecognitionErrorType = 'aborted' | 'audio-capture' | 'bad-grammar' | 'language-not-supported' | 'network' | 'no-speech' | 'not-allowed' | 'service-not-allowed' | 'unknown';
|
|
85
79
|
type SpeechRecognitionErrorEventInit = {
|
|
86
80
|
error: SpeechRecognitionErrorType;
|
|
87
81
|
message?: string | undefined;
|
|
@@ -91,6 +85,7 @@ declare class SpeechRecognitionErrorEvent extends Event {
|
|
|
91
85
|
constructor(type: 'error', { error, message }: SpeechRecognitionErrorEventInit);
|
|
92
86
|
get error(): SpeechRecognitionErrorType;
|
|
93
87
|
get message(): string | undefined;
|
|
88
|
+
get type(): 'error';
|
|
94
89
|
}
|
|
95
90
|
|
|
96
91
|
interface FakeArrayInterface<T> {
|
|
@@ -143,6 +138,16 @@ declare class SpeechRecognitionEvent<T extends 'audioend' | 'audiostart' | 'cogn
|
|
|
143
138
|
get data(): unknown;
|
|
144
139
|
get resultIndex(): number | undefined;
|
|
145
140
|
get results(): SpeechRecognitionResultList;
|
|
141
|
+
get type(): T;
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
declare class EventListenerMap<T extends string, EventMap extends {
|
|
145
|
+
[Name in T]: unknown;
|
|
146
|
+
}> {
|
|
147
|
+
#private;
|
|
148
|
+
constructor(eventTarget: EventTarget);
|
|
149
|
+
getProperty<U extends T>(name: U): ((event: EventMap[U]) => void) | undefined;
|
|
150
|
+
setProperty<U extends T>(name: U, value: ((event: EventMap[U]) => void) | undefined): void;
|
|
146
151
|
}
|
|
147
152
|
|
|
148
153
|
type SpeechRecognitionEventListenerMap = EventListenerMap<'audioend' | 'audiostart' | 'cognitiveservices' | 'end' | 'error' | 'result' | 'soundend' | 'soundstart' | 'speechend' | 'speechstart' | 'start', {
|
|
@@ -159,73 +164,142 @@ type SpeechRecognitionEventListenerMap = EventListenerMap<'audioend' | 'audiosta
|
|
|
159
164
|
start: SpeechRecognitionEvent<'start'>;
|
|
160
165
|
}>;
|
|
161
166
|
|
|
162
|
-
declare class
|
|
163
|
-
|
|
167
|
+
declare class SpeechGrammarList {
|
|
168
|
+
#private;
|
|
169
|
+
constructor();
|
|
164
170
|
addFromString(): void;
|
|
165
|
-
|
|
166
|
-
|
|
171
|
+
get phrases(): readonly string[];
|
|
172
|
+
set phrases(value: readonly string[]);
|
|
167
173
|
}
|
|
168
174
|
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
})
|
|
176
|
-
|
|
175
|
+
type Credentials = Readonly<({
|
|
176
|
+
authorizationToken: string;
|
|
177
|
+
subscriptionKey?: undefined;
|
|
178
|
+
} | {
|
|
179
|
+
authorizationToken?: undefined;
|
|
180
|
+
subscriptionKey: string;
|
|
181
|
+
}) & ({
|
|
182
|
+
customVoiceHostname?: undefined;
|
|
183
|
+
region: string;
|
|
184
|
+
speechRecognitionHostname?: undefined;
|
|
185
|
+
speechSynthesisHostname?: undefined;
|
|
186
|
+
} | {
|
|
187
|
+
customVoiceHostname: string;
|
|
188
|
+
region?: undefined;
|
|
189
|
+
speechRecognitionHostname: string;
|
|
190
|
+
speechSynthesisHostname: string;
|
|
191
|
+
})>;
|
|
192
|
+
type PatchOptionsInit = {
|
|
193
|
+
audioConfig: AudioConfig;
|
|
194
|
+
credentials?: (() => Credentials | Promise<Credentials>) | Credentials | Promise<Credentials>;
|
|
195
|
+
enableTelemetry: boolean;
|
|
196
|
+
looseEvent?: boolean | undefined;
|
|
197
|
+
looseEvents?: boolean | undefined;
|
|
198
|
+
referenceGrammars?: readonly string[] | undefined;
|
|
199
|
+
region?: string | undefined;
|
|
200
|
+
speechRecognitionEndpointId: string;
|
|
201
|
+
textNormalization: 'display' | 'itn' | 'lexical' | 'maskeditn';
|
|
202
|
+
} & ({
|
|
203
|
+
authorizationToken: string;
|
|
204
|
+
subscriptionKey?: undefined;
|
|
205
|
+
} | {
|
|
206
|
+
authorizationToken?: undefined;
|
|
207
|
+
subscriptionKey: string;
|
|
208
|
+
});
|
|
209
|
+
|
|
210
|
+
declare function createSpeechRecognitionPonyfill(options: PatchOptionsInit): {
|
|
211
|
+
SpeechGrammarList: typeof SpeechGrammarList;
|
|
177
212
|
SpeechRecognition: {
|
|
178
213
|
new (): {
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
214
|
+
"__#8@#continuous": boolean;
|
|
215
|
+
"__#8@#eventListenerMap": SpeechRecognitionEventListenerMap;
|
|
216
|
+
"__#8@#grammars": SpeechGrammarList;
|
|
217
|
+
"__#8@#interimResults": boolean;
|
|
218
|
+
"__#8@#lang": string;
|
|
219
|
+
"__#8@#maxAlternatives": number;
|
|
220
|
+
emitCognitiveServices<T extends {
|
|
221
|
+
type: string;
|
|
222
|
+
}>(type: string, event: T): void;
|
|
187
223
|
continuous: boolean;
|
|
188
|
-
grammars:
|
|
224
|
+
grammars: SpeechGrammarList;
|
|
225
|
+
interimResults: boolean;
|
|
226
|
+
maxAlternatives: number;
|
|
227
|
+
lang: string;
|
|
228
|
+
get onaudioend(): ((event: SpeechRecognitionEvent<"audioend">) => void) | undefined;
|
|
229
|
+
set onaudioend(value: ((event: SpeechRecognitionEvent<"audioend">) => void) | undefined);
|
|
230
|
+
onaudiostart: ((event: SpeechRecognitionEvent<"audiostart">) => void) | undefined;
|
|
231
|
+
oncognitiveservices: ((event: SpeechRecognitionEvent<"cognitiveservices">) => void) | undefined;
|
|
232
|
+
onend: ((event: SpeechRecognitionEvent<"end">) => void) | undefined;
|
|
233
|
+
onerror: ((event: SpeechRecognitionErrorEvent) => void) | undefined;
|
|
234
|
+
onresult: ((event: SpeechRecognitionEvent<"result">) => void) | undefined;
|
|
235
|
+
onsoundend: ((event: SpeechRecognitionEvent<"soundend">) => void) | undefined;
|
|
236
|
+
onsoundstart: ((event: SpeechRecognitionEvent<"soundstart">) => void) | undefined;
|
|
237
|
+
onspeechend: ((event: SpeechRecognitionEvent<"speechend">) => void) | undefined;
|
|
238
|
+
onspeechstart: ((event: SpeechRecognitionEvent<"speechstart">) => void) | undefined;
|
|
239
|
+
onstart: ((event: SpeechRecognitionEvent<"start">) => void) | undefined;
|
|
240
|
+
abort: (() => void) | undefined;
|
|
241
|
+
stop: (() => void) | undefined;
|
|
242
|
+
start(): void;
|
|
243
|
+
_startOnce(): Promise<void>;
|
|
244
|
+
addEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | AddEventListenerOptions | undefined): void;
|
|
245
|
+
dispatchEvent(event: Event): boolean;
|
|
246
|
+
removeEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | EventListenerOptions | undefined): void;
|
|
247
|
+
};
|
|
248
|
+
};
|
|
249
|
+
SpeechRecognitionEvent: typeof SpeechRecognitionEvent;
|
|
250
|
+
};
|
|
251
|
+
|
|
252
|
+
type CreateSpeechRecognitionPonyfillFromRecognizerInit = {
|
|
253
|
+
createRecognizer: (lang: string) => Promise<SpeechRecognizer>;
|
|
254
|
+
enableTelemetry: boolean;
|
|
255
|
+
looseEvents: boolean;
|
|
256
|
+
referenceGrammars?: readonly string[] | undefined;
|
|
257
|
+
textNormalization: 'display' | 'itn' | 'lexical' | 'maskeditn';
|
|
258
|
+
};
|
|
259
|
+
declare function createSpeechRecognitionPonyfillFromRecognizer({ createRecognizer, enableTelemetry, looseEvents, referenceGrammars, textNormalization }: CreateSpeechRecognitionPonyfillFromRecognizerInit): {
|
|
260
|
+
SpeechGrammarList: typeof SpeechGrammarList;
|
|
261
|
+
SpeechRecognition: {
|
|
262
|
+
new (): {
|
|
263
|
+
"__#8@#continuous": boolean;
|
|
264
|
+
"__#8@#eventListenerMap": SpeechRecognitionEventListenerMap;
|
|
265
|
+
"__#8@#grammars": SpeechGrammarList;
|
|
266
|
+
"__#8@#interimResults": boolean;
|
|
267
|
+
"__#8@#lang": string;
|
|
268
|
+
"__#8@#maxAlternatives": number;
|
|
269
|
+
emitCognitiveServices<T extends {
|
|
270
|
+
type: string;
|
|
271
|
+
}>(type: string, event: T): void;
|
|
272
|
+
continuous: boolean;
|
|
273
|
+
grammars: SpeechGrammarList;
|
|
189
274
|
interimResults: boolean;
|
|
190
275
|
maxAlternatives: number;
|
|
191
276
|
lang: string;
|
|
192
|
-
/** @type { ((event: SpeechRecognitionEvent<'audioend'>) => void) | undefined } */
|
|
193
277
|
get onaudioend(): ((event: SpeechRecognitionEvent<'audioend'>) => void) | undefined;
|
|
194
278
|
set onaudioend(value: ((event: SpeechRecognitionEvent<'audioend'>) => void) | undefined);
|
|
195
279
|
/** @type { ((event: SpeechRecognitionEvent<'audiostart'>) => void) | undefined } */
|
|
196
|
-
|
|
197
|
-
set onaudiostart(value: ((event: SpeechRecognitionEvent<'audiostart'>) => void) | undefined);
|
|
280
|
+
onaudiostart: ((event: SpeechRecognitionEvent<"audiostart">) => void) | undefined;
|
|
198
281
|
/** @type { ((event: SpeechRecognitionEvent<'cognitiveservices'>) => void) | undefined } */
|
|
199
|
-
|
|
200
|
-
set oncognitiveservices(value: ((event: SpeechRecognitionEvent<'cognitiveservices'>) => void) | undefined);
|
|
282
|
+
oncognitiveservices: ((event: SpeechRecognitionEvent<"cognitiveservices">) => void) | undefined;
|
|
201
283
|
/** @type { ((event: SpeechRecognitionEvent<'end'>) => void) | undefined } */
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
get onerror(): ((event: SpeechRecognitionEvent<'error'>) => void) | undefined;
|
|
206
|
-
set onerror(value: ((event: SpeechRecognitionEvent<'error'>) => void) | undefined);
|
|
284
|
+
onend: ((event: SpeechRecognitionEvent<"end">) => void) | undefined;
|
|
285
|
+
/** @type { ((event: SpeechRecognitionErrorEvent) => void) | undefined } */
|
|
286
|
+
onerror: ((event: SpeechRecognitionErrorEvent) => void) | undefined;
|
|
207
287
|
/** @type { ((event: SpeechRecognitionEvent<'result'>) => void) | undefined } */
|
|
208
|
-
|
|
209
|
-
set onresult(value: ((event: SpeechRecognitionEvent<'result'>) => void) | undefined);
|
|
288
|
+
onresult: ((event: SpeechRecognitionEvent<"result">) => void) | undefined;
|
|
210
289
|
/** @type { ((event: SpeechRecognitionEvent<'soundend'>) => void) | undefined } */
|
|
211
|
-
|
|
212
|
-
set onsoundend(value: ((event: SpeechRecognitionEvent<'soundend'>) => void) | undefined);
|
|
290
|
+
onsoundend: ((event: SpeechRecognitionEvent<"soundend">) => void) | undefined;
|
|
213
291
|
/** @type { ((event: SpeechRecognitionEvent<'soundstart'>) => void) | undefined } */
|
|
214
|
-
|
|
215
|
-
set onsoundstart(value: ((event: SpeechRecognitionEvent<'soundstart'>) => void) | undefined);
|
|
292
|
+
onsoundstart: ((event: SpeechRecognitionEvent<"soundstart">) => void) | undefined;
|
|
216
293
|
/** @type { ((event: SpeechRecognitionEvent<'speechend'>) => void) | undefined } */
|
|
217
|
-
|
|
218
|
-
set onspeechend(value: ((event: SpeechRecognitionEvent<'speechend'>) => void) | undefined);
|
|
294
|
+
onspeechend: ((event: SpeechRecognitionEvent<"speechend">) => void) | undefined;
|
|
219
295
|
/** @type { ((event: SpeechRecognitionEvent<'speechstart'>) => void) | undefined } */
|
|
220
|
-
|
|
221
|
-
set onspeechstart(value: ((event: SpeechRecognitionEvent<'speechstart'>) => void) | undefined);
|
|
296
|
+
onspeechstart: ((event: SpeechRecognitionEvent<"speechstart">) => void) | undefined;
|
|
222
297
|
/** @type { ((event: SpeechRecognitionEvent<'start'>) => void) | undefined } */
|
|
223
|
-
|
|
224
|
-
set onstart(value: ((event: SpeechRecognitionEvent<'start'>) => void) | undefined);
|
|
225
|
-
start(): void;
|
|
226
|
-
_startOnce(): Promise<void>;
|
|
298
|
+
onstart: ((event: SpeechRecognitionEvent<"start">) => void) | undefined;
|
|
227
299
|
abort: (() => void) | undefined;
|
|
228
300
|
stop: (() => void) | undefined;
|
|
301
|
+
start(): void;
|
|
302
|
+
_startOnce(): Promise<void>;
|
|
229
303
|
addEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | AddEventListenerOptions | undefined): void;
|
|
230
304
|
dispatchEvent(event: Event): boolean;
|
|
231
305
|
removeEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | EventListenerOptions | undefined): void;
|
|
@@ -233,9 +307,8 @@ declare function createSpeechRecognitionPonyfillFromRecognizer({ createRecognize
|
|
|
233
307
|
};
|
|
234
308
|
SpeechRecognitionEvent: typeof SpeechRecognitionEvent;
|
|
235
309
|
};
|
|
236
|
-
declare function _default$2(options: any): {};
|
|
237
310
|
|
|
238
|
-
declare function _default
|
|
311
|
+
declare function _default(options: any): {
|
|
239
312
|
speechSynthesis?: never;
|
|
240
313
|
SpeechSynthesisEvent?: never;
|
|
241
314
|
SpeechSynthesisUtterance?: never;
|
|
@@ -255,18 +328,58 @@ declare function _default$1(options: any): {
|
|
|
255
328
|
SpeechSynthesisUtterance: typeof SpeechSynthesisUtterance;
|
|
256
329
|
};
|
|
257
330
|
|
|
258
|
-
|
|
259
|
-
region:
|
|
260
|
-
subscriptionKey:
|
|
261
|
-
}
|
|
331
|
+
type FetchAuthorizationTokenInit = {
|
|
332
|
+
region: string;
|
|
333
|
+
subscriptionKey: string;
|
|
334
|
+
};
|
|
335
|
+
declare function fetchAuthorizationToken({ region, subscriptionKey }: FetchAuthorizationTokenInit): Promise<string>;
|
|
262
336
|
|
|
263
|
-
declare function createSpeechServicesPonyfill(options?:
|
|
337
|
+
declare function createSpeechServicesPonyfill(options?: any): {
|
|
264
338
|
speechSynthesis?: never;
|
|
265
339
|
SpeechSynthesisEvent?: never;
|
|
266
340
|
SpeechSynthesisUtterance?: never;
|
|
341
|
+
SpeechGrammarList: typeof SpeechGrammarList;
|
|
342
|
+
SpeechRecognition: {
|
|
343
|
+
new (): {
|
|
344
|
+
"__#8@#continuous": boolean;
|
|
345
|
+
"__#8@#eventListenerMap": SpeechRecognitionEventListenerMap;
|
|
346
|
+
"__#8@#grammars": SpeechGrammarList;
|
|
347
|
+
"__#8@#interimResults": boolean;
|
|
348
|
+
"__#8@#lang": string;
|
|
349
|
+
"__#8@#maxAlternatives": number;
|
|
350
|
+
emitCognitiveServices<T extends {
|
|
351
|
+
type: string;
|
|
352
|
+
}>(type: string, event: T): void;
|
|
353
|
+
continuous: boolean;
|
|
354
|
+
grammars: SpeechGrammarList;
|
|
355
|
+
interimResults: boolean;
|
|
356
|
+
maxAlternatives: number;
|
|
357
|
+
lang: string;
|
|
358
|
+
get onaudioend(): ((event: SpeechRecognitionEvent<"audioend">) => void) | undefined;
|
|
359
|
+
set onaudioend(value: ((event: SpeechRecognitionEvent<"audioend">) => void) | undefined);
|
|
360
|
+
onaudiostart: ((event: SpeechRecognitionEvent<"audiostart">) => void) | undefined;
|
|
361
|
+
oncognitiveservices: ((event: SpeechRecognitionEvent<"cognitiveservices">) => void) | undefined;
|
|
362
|
+
onend: ((event: SpeechRecognitionEvent<"end">) => void) | undefined;
|
|
363
|
+
onerror: ((event: SpeechRecognitionErrorEvent) => void) | undefined;
|
|
364
|
+
onresult: ((event: SpeechRecognitionEvent<"result">) => void) | undefined;
|
|
365
|
+
onsoundend: ((event: SpeechRecognitionEvent<"soundend">) => void) | undefined;
|
|
366
|
+
onsoundstart: ((event: SpeechRecognitionEvent<"soundstart">) => void) | undefined;
|
|
367
|
+
onspeechend: ((event: SpeechRecognitionEvent<"speechend">) => void) | undefined;
|
|
368
|
+
onspeechstart: ((event: SpeechRecognitionEvent<"speechstart">) => void) | undefined;
|
|
369
|
+
onstart: ((event: SpeechRecognitionEvent<"start">) => void) | undefined;
|
|
370
|
+
abort: (() => void) | undefined;
|
|
371
|
+
stop: (() => void) | undefined;
|
|
372
|
+
start(): void;
|
|
373
|
+
_startOnce(): Promise<void>;
|
|
374
|
+
addEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | AddEventListenerOptions | undefined): void;
|
|
375
|
+
dispatchEvent(event: Event): boolean;
|
|
376
|
+
removeEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | EventListenerOptions | undefined): void;
|
|
377
|
+
};
|
|
378
|
+
};
|
|
379
|
+
SpeechRecognitionEvent: typeof SpeechRecognitionEvent;
|
|
267
380
|
} | {
|
|
268
381
|
speechSynthesis: {
|
|
269
|
-
queue: _default$
|
|
382
|
+
queue: _default$1;
|
|
270
383
|
cancel(): void;
|
|
271
384
|
getVoices(): any[];
|
|
272
385
|
onvoiceschanged: any;
|
|
@@ -278,6 +391,45 @@ declare function createSpeechServicesPonyfill(options?: {}, ...args: any[]): {
|
|
|
278
391
|
};
|
|
279
392
|
SpeechSynthesisEvent: typeof SpeechSynthesisEvent;
|
|
280
393
|
SpeechSynthesisUtterance: typeof SpeechSynthesisUtterance;
|
|
394
|
+
SpeechGrammarList: typeof SpeechGrammarList;
|
|
395
|
+
SpeechRecognition: {
|
|
396
|
+
new (): {
|
|
397
|
+
"__#8@#continuous": boolean;
|
|
398
|
+
"__#8@#eventListenerMap": SpeechRecognitionEventListenerMap;
|
|
399
|
+
"__#8@#grammars": SpeechGrammarList;
|
|
400
|
+
"__#8@#interimResults": boolean;
|
|
401
|
+
"__#8@#lang": string;
|
|
402
|
+
"__#8@#maxAlternatives": number;
|
|
403
|
+
emitCognitiveServices<T extends {
|
|
404
|
+
type: string;
|
|
405
|
+
}>(type: string, event: T): void;
|
|
406
|
+
continuous: boolean;
|
|
407
|
+
grammars: SpeechGrammarList;
|
|
408
|
+
interimResults: boolean;
|
|
409
|
+
maxAlternatives: number;
|
|
410
|
+
lang: string;
|
|
411
|
+
get onaudioend(): ((event: SpeechRecognitionEvent<"audioend">) => void) | undefined;
|
|
412
|
+
set onaudioend(value: ((event: SpeechRecognitionEvent<"audioend">) => void) | undefined);
|
|
413
|
+
onaudiostart: ((event: SpeechRecognitionEvent<"audiostart">) => void) | undefined;
|
|
414
|
+
oncognitiveservices: ((event: SpeechRecognitionEvent<"cognitiveservices">) => void) | undefined;
|
|
415
|
+
onend: ((event: SpeechRecognitionEvent<"end">) => void) | undefined;
|
|
416
|
+
onerror: ((event: SpeechRecognitionErrorEvent) => void) | undefined;
|
|
417
|
+
onresult: ((event: SpeechRecognitionEvent<"result">) => void) | undefined;
|
|
418
|
+
onsoundend: ((event: SpeechRecognitionEvent<"soundend">) => void) | undefined;
|
|
419
|
+
onsoundstart: ((event: SpeechRecognitionEvent<"soundstart">) => void) | undefined;
|
|
420
|
+
onspeechend: ((event: SpeechRecognitionEvent<"speechend">) => void) | undefined;
|
|
421
|
+
onspeechstart: ((event: SpeechRecognitionEvent<"speechstart">) => void) | undefined;
|
|
422
|
+
onstart: ((event: SpeechRecognitionEvent<"start">) => void) | undefined;
|
|
423
|
+
abort: (() => void) | undefined;
|
|
424
|
+
stop: (() => void) | undefined;
|
|
425
|
+
start(): void;
|
|
426
|
+
_startOnce(): Promise<void>;
|
|
427
|
+
addEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | AddEventListenerOptions | undefined): void;
|
|
428
|
+
dispatchEvent(event: Event): boolean;
|
|
429
|
+
removeEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | EventListenerOptions | undefined): void;
|
|
430
|
+
};
|
|
431
|
+
};
|
|
432
|
+
SpeechRecognitionEvent: typeof SpeechRecognitionEvent;
|
|
281
433
|
};
|
|
282
434
|
|
|
283
|
-
export {
|
|
435
|
+
export { createSpeechRecognitionPonyfill, createSpeechRecognitionPonyfillFromRecognizer, createSpeechServicesPonyfill, _default as createSpeechSynthesisPonyfill, fetchAuthorizationToken };
|