web-speech-cognitive-services 8.0.0-main.d9ef940 → 8.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/web-speech-cognitive-services.d.mts +151 -139
- package/dist/web-speech-cognitive-services.d.ts +151 -139
- package/dist/web-speech-cognitive-services.development.js +16 -11
- package/dist/web-speech-cognitive-services.development.js.map +1 -1
- package/dist/web-speech-cognitive-services.js +17 -12
- package/dist/web-speech-cognitive-services.js.map +1 -1
- package/dist/web-speech-cognitive-services.mjs +17 -12
- package/dist/web-speech-cognitive-services.mjs.map +1 -1
- package/dist/web-speech-cognitive-services.production.min.js +11 -11
- package/dist/web-speech-cognitive-services.production.min.js.map +1 -1
- package/package.json +2 -2
|
@@ -1,125 +1,5 @@
|
|
|
1
|
-
import * as memoize_one from 'memoize-one';
|
|
2
1
|
import { AudioConfig, SpeechRecognizer } from 'microsoft-cognitiveservices-speech-sdk';
|
|
3
2
|
|
|
4
|
-
declare class SpeechSynthesisUtterance {
|
|
5
|
-
constructor(text: any);
|
|
6
|
-
_lang: any;
|
|
7
|
-
_pitch: number;
|
|
8
|
-
_rate: number;
|
|
9
|
-
_voice: any;
|
|
10
|
-
_volume: number;
|
|
11
|
-
text: any;
|
|
12
|
-
set onboundary(value: any);
|
|
13
|
-
get onboundary(): any;
|
|
14
|
-
set onend(value: any);
|
|
15
|
-
get onend(): any;
|
|
16
|
-
set onerror(value: any);
|
|
17
|
-
get onerror(): any;
|
|
18
|
-
set onmark(value: any);
|
|
19
|
-
get onmark(): any;
|
|
20
|
-
set onpause(value: any);
|
|
21
|
-
get onpause(): any;
|
|
22
|
-
set onresume(value: any);
|
|
23
|
-
get onresume(): any;
|
|
24
|
-
set onstart(value: any);
|
|
25
|
-
get onstart(): any;
|
|
26
|
-
set lang(value: any);
|
|
27
|
-
get lang(): any;
|
|
28
|
-
set pitch(value: number);
|
|
29
|
-
get pitch(): number;
|
|
30
|
-
set rate(value: number);
|
|
31
|
-
get rate(): number;
|
|
32
|
-
set voice(value: any);
|
|
33
|
-
get voice(): any;
|
|
34
|
-
set volume(value: number);
|
|
35
|
-
get volume(): number;
|
|
36
|
-
preload({ deploymentId, fetchCredentials, outputFormat }: {
|
|
37
|
-
deploymentId: any;
|
|
38
|
-
fetchCredentials: any;
|
|
39
|
-
outputFormat: any;
|
|
40
|
-
}): void;
|
|
41
|
-
arrayBufferPromise: Promise<ArrayBuffer> | undefined;
|
|
42
|
-
play(audioContext: any): Promise<void>;
|
|
43
|
-
_playingSource: any;
|
|
44
|
-
stop(): void;
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
declare class SpeechSynthesisEvent {
|
|
48
|
-
constructor(type: any);
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
declare class _default$2 {
|
|
52
|
-
constructor(audioContext: any);
|
|
53
|
-
audioContext: any;
|
|
54
|
-
pause(): void;
|
|
55
|
-
resume(): void;
|
|
56
|
-
start(queue: any): Promise<void>;
|
|
57
|
-
playingUtterance: any;
|
|
58
|
-
stop(): void;
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
declare class _default$1 {
|
|
62
|
-
constructor({ audioContext, ponyfill }: {
|
|
63
|
-
audioContext: any;
|
|
64
|
-
ponyfill: any;
|
|
65
|
-
});
|
|
66
|
-
consumer: _default$2 | null;
|
|
67
|
-
paused: boolean;
|
|
68
|
-
queue: any[];
|
|
69
|
-
getAudioContext: memoize_one.MemoizedFn<() => any>;
|
|
70
|
-
pause(): void;
|
|
71
|
-
push(utterance: any): void;
|
|
72
|
-
resume(): void;
|
|
73
|
-
get speaking(): boolean;
|
|
74
|
-
startConsumer(): Promise<void>;
|
|
75
|
-
stop(): void;
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
type Credentials = Readonly<({
|
|
79
|
-
authorizationToken: string;
|
|
80
|
-
subscriptionKey?: undefined;
|
|
81
|
-
} | {
|
|
82
|
-
authorizationToken?: undefined;
|
|
83
|
-
subscriptionKey: string;
|
|
84
|
-
}) & ({
|
|
85
|
-
customVoiceHostname?: undefined;
|
|
86
|
-
region: string;
|
|
87
|
-
speechRecognitionHostname?: undefined;
|
|
88
|
-
speechSynthesisHostname?: undefined;
|
|
89
|
-
} | {
|
|
90
|
-
customVoiceHostname: string;
|
|
91
|
-
region?: undefined;
|
|
92
|
-
speechRecognitionHostname: string;
|
|
93
|
-
speechSynthesisHostname: string;
|
|
94
|
-
})>;
|
|
95
|
-
type PatchOptionsInit = {
|
|
96
|
-
audioConfig: AudioConfig;
|
|
97
|
-
credentials?: (() => Credentials | Promise<Credentials>) | Credentials | Promise<Credentials>;
|
|
98
|
-
enableTelemetry: boolean;
|
|
99
|
-
looseEvent?: boolean | undefined;
|
|
100
|
-
looseEvents?: boolean | undefined;
|
|
101
|
-
referenceGrammars?: readonly string[] | undefined;
|
|
102
|
-
region?: string | undefined;
|
|
103
|
-
speechRecognitionEndpointId: string;
|
|
104
|
-
textNormalization: 'display' | 'itn' | 'lexical' | 'maskeditn';
|
|
105
|
-
} & ({
|
|
106
|
-
authorizationToken: string;
|
|
107
|
-
subscriptionKey?: undefined;
|
|
108
|
-
} | {
|
|
109
|
-
authorizationToken?: undefined;
|
|
110
|
-
subscriptionKey: string;
|
|
111
|
-
});
|
|
112
|
-
|
|
113
|
-
declare function createSpeechRecognitionPonyfill(options: PatchOptionsInit): {};
|
|
114
|
-
|
|
115
|
-
declare class SpeechGrammarList {
|
|
116
|
-
#private;
|
|
117
|
-
constructor();
|
|
118
|
-
addFromString(): void;
|
|
119
|
-
get phrases(): readonly string[];
|
|
120
|
-
set phrases(value: readonly string[]);
|
|
121
|
-
}
|
|
122
|
-
|
|
123
3
|
type SpeechRecognitionErrorType = 'aborted' | 'audio-capture' | 'bad-grammar' | 'language-not-supported' | 'network' | 'no-speech' | 'not-allowed' | 'service-not-allowed' | 'unknown';
|
|
124
4
|
type SpeechRecognitionErrorEventInit = {
|
|
125
5
|
error: SpeechRecognitionErrorType;
|
|
@@ -209,6 +89,109 @@ type SpeechRecognitionEventListenerMap = EventListenerMap<'audioend' | 'audiosta
|
|
|
209
89
|
start: SpeechRecognitionEvent<'start'>;
|
|
210
90
|
}>;
|
|
211
91
|
|
|
92
|
+
interface W3CSpeechGrammar {
|
|
93
|
+
src: string;
|
|
94
|
+
weight: number;
|
|
95
|
+
}
|
|
96
|
+
interface W3CSpeechGrammarList {
|
|
97
|
+
readonly length: number;
|
|
98
|
+
addFromString(string: string, weight?: number): void;
|
|
99
|
+
addFromURI(src: string, weight?: number): void;
|
|
100
|
+
item(index: number): W3CSpeechGrammar;
|
|
101
|
+
[index: number]: W3CSpeechGrammar;
|
|
102
|
+
}
|
|
103
|
+
declare class SpeechGrammarList implements W3CSpeechGrammarList {
|
|
104
|
+
#private;
|
|
105
|
+
constructor();
|
|
106
|
+
addFromString(): void;
|
|
107
|
+
addFromURI(): void;
|
|
108
|
+
item(): W3CSpeechGrammar;
|
|
109
|
+
get length(): number;
|
|
110
|
+
[index: number]: {
|
|
111
|
+
src: string;
|
|
112
|
+
weight: number;
|
|
113
|
+
};
|
|
114
|
+
get phrases(): readonly string[];
|
|
115
|
+
set phrases(value: readonly string[]);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
type Credentials = Readonly<({
|
|
119
|
+
authorizationToken: string;
|
|
120
|
+
subscriptionKey?: undefined;
|
|
121
|
+
} | {
|
|
122
|
+
authorizationToken?: undefined;
|
|
123
|
+
subscriptionKey: string;
|
|
124
|
+
}) & ({
|
|
125
|
+
customVoiceHostname?: undefined;
|
|
126
|
+
region: string;
|
|
127
|
+
speechRecognitionHostname?: undefined;
|
|
128
|
+
speechSynthesisHostname?: undefined;
|
|
129
|
+
} | {
|
|
130
|
+
customVoiceHostname: string;
|
|
131
|
+
region?: undefined;
|
|
132
|
+
speechRecognitionHostname: string;
|
|
133
|
+
speechSynthesisHostname: string;
|
|
134
|
+
})>;
|
|
135
|
+
type PatchOptionsInit = {
|
|
136
|
+
audioConfig: AudioConfig;
|
|
137
|
+
credentials?: (() => Credentials | Promise<Credentials>) | Credentials | Promise<Credentials>;
|
|
138
|
+
enableTelemetry: boolean;
|
|
139
|
+
looseEvent?: boolean | undefined;
|
|
140
|
+
looseEvents?: boolean | undefined;
|
|
141
|
+
referenceGrammars?: readonly string[] | undefined;
|
|
142
|
+
region?: string | undefined;
|
|
143
|
+
speechRecognitionEndpointId: string;
|
|
144
|
+
textNormalization: 'display' | 'itn' | 'lexical' | 'maskeditn';
|
|
145
|
+
} & ({
|
|
146
|
+
authorizationToken: string;
|
|
147
|
+
subscriptionKey?: undefined;
|
|
148
|
+
} | {
|
|
149
|
+
authorizationToken?: undefined;
|
|
150
|
+
subscriptionKey: string;
|
|
151
|
+
});
|
|
152
|
+
|
|
153
|
+
declare function createSpeechRecognitionPonyfill(options: PatchOptionsInit): {
|
|
154
|
+
SpeechGrammarList: typeof SpeechGrammarList;
|
|
155
|
+
SpeechRecognition: {
|
|
156
|
+
new (): {
|
|
157
|
+
"__#8@#continuous": boolean;
|
|
158
|
+
"__#8@#eventListenerMap": SpeechRecognitionEventListenerMap;
|
|
159
|
+
"__#8@#grammars": SpeechGrammarList;
|
|
160
|
+
"__#8@#interimResults": boolean;
|
|
161
|
+
"__#8@#lang": string;
|
|
162
|
+
"__#8@#maxAlternatives": number;
|
|
163
|
+
emitCognitiveServices<T extends {
|
|
164
|
+
type: string;
|
|
165
|
+
}>(type: string, event: T): void;
|
|
166
|
+
continuous: boolean;
|
|
167
|
+
grammars: SpeechGrammarList;
|
|
168
|
+
interimResults: boolean;
|
|
169
|
+
maxAlternatives: number;
|
|
170
|
+
lang: string;
|
|
171
|
+
get onaudioend(): ((event: SpeechRecognitionEvent<"audioend">) => void) | undefined;
|
|
172
|
+
set onaudioend(value: ((event: SpeechRecognitionEvent<"audioend">) => void) | undefined);
|
|
173
|
+
onaudiostart: ((event: SpeechRecognitionEvent<"audiostart">) => void) | undefined;
|
|
174
|
+
oncognitiveservices: ((event: SpeechRecognitionEvent<"cognitiveservices">) => void) | undefined;
|
|
175
|
+
onend: ((event: SpeechRecognitionEvent<"end">) => void) | undefined;
|
|
176
|
+
onerror: ((event: SpeechRecognitionErrorEvent) => void) | undefined;
|
|
177
|
+
onresult: ((event: SpeechRecognitionEvent<"result">) => void) | undefined;
|
|
178
|
+
onsoundend: ((event: SpeechRecognitionEvent<"soundend">) => void) | undefined;
|
|
179
|
+
onsoundstart: ((event: SpeechRecognitionEvent<"soundstart">) => void) | undefined;
|
|
180
|
+
onspeechend: ((event: SpeechRecognitionEvent<"speechend">) => void) | undefined;
|
|
181
|
+
onspeechstart: ((event: SpeechRecognitionEvent<"speechstart">) => void) | undefined;
|
|
182
|
+
onstart: ((event: SpeechRecognitionEvent<"start">) => void) | undefined;
|
|
183
|
+
abort: (() => void) | undefined;
|
|
184
|
+
stop: (() => void) | undefined;
|
|
185
|
+
start(): void;
|
|
186
|
+
_startOnce(): Promise<void>;
|
|
187
|
+
addEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | AddEventListenerOptions | undefined): void;
|
|
188
|
+
dispatchEvent(event: Event): boolean;
|
|
189
|
+
removeEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | EventListenerOptions | undefined): void;
|
|
190
|
+
};
|
|
191
|
+
};
|
|
192
|
+
SpeechRecognitionEvent: typeof SpeechRecognitionEvent;
|
|
193
|
+
};
|
|
194
|
+
|
|
212
195
|
type CreateSpeechRecognitionPonyfillFromRecognizerInit = {
|
|
213
196
|
createRecognizer: (lang: string) => Promise<SpeechRecognizer>;
|
|
214
197
|
enableTelemetry: boolean;
|
|
@@ -268,6 +251,53 @@ declare function createSpeechRecognitionPonyfillFromRecognizer({ createRecognize
|
|
|
268
251
|
SpeechRecognitionEvent: typeof SpeechRecognitionEvent;
|
|
269
252
|
};
|
|
270
253
|
|
|
254
|
+
declare class SpeechSynthesisEvent {
|
|
255
|
+
constructor(type: any);
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
declare class SpeechSynthesisUtterance {
|
|
259
|
+
constructor(text: any);
|
|
260
|
+
_lang: any;
|
|
261
|
+
_pitch: number;
|
|
262
|
+
_rate: number;
|
|
263
|
+
_voice: any;
|
|
264
|
+
_volume: number;
|
|
265
|
+
text: any;
|
|
266
|
+
set onboundary(value: any);
|
|
267
|
+
get onboundary(): any;
|
|
268
|
+
set onend(value: any);
|
|
269
|
+
get onend(): any;
|
|
270
|
+
set onerror(value: any);
|
|
271
|
+
get onerror(): any;
|
|
272
|
+
set onmark(value: any);
|
|
273
|
+
get onmark(): any;
|
|
274
|
+
set onpause(value: any);
|
|
275
|
+
get onpause(): any;
|
|
276
|
+
set onresume(value: any);
|
|
277
|
+
get onresume(): any;
|
|
278
|
+
set onstart(value: any);
|
|
279
|
+
get onstart(): any;
|
|
280
|
+
set lang(value: any);
|
|
281
|
+
get lang(): any;
|
|
282
|
+
set pitch(value: number);
|
|
283
|
+
get pitch(): number;
|
|
284
|
+
set rate(value: number);
|
|
285
|
+
get rate(): number;
|
|
286
|
+
set voice(value: any);
|
|
287
|
+
get voice(): any;
|
|
288
|
+
set volume(value: number);
|
|
289
|
+
get volume(): number;
|
|
290
|
+
preload({ deploymentId, fetchCredentials, outputFormat }: {
|
|
291
|
+
deploymentId: any;
|
|
292
|
+
fetchCredentials: any;
|
|
293
|
+
outputFormat: any;
|
|
294
|
+
}): void;
|
|
295
|
+
arrayBufferPromise: Promise<ArrayBuffer> | undefined;
|
|
296
|
+
play(audioContext: any): Promise<void>;
|
|
297
|
+
_playingSource: any;
|
|
298
|
+
stop(): void;
|
|
299
|
+
}
|
|
300
|
+
|
|
271
301
|
declare function _default(options: any): {
|
|
272
302
|
speechSynthesis?: never;
|
|
273
303
|
SpeechSynthesisEvent?: never;
|
|
@@ -294,24 +324,6 @@ type FetchAuthorizationTokenInit = {
|
|
|
294
324
|
};
|
|
295
325
|
declare function fetchAuthorizationToken({ region, subscriptionKey }: FetchAuthorizationTokenInit): Promise<string>;
|
|
296
326
|
|
|
297
|
-
declare function createSpeechServicesPonyfill(options?:
|
|
298
|
-
speechSynthesis?: never;
|
|
299
|
-
SpeechSynthesisEvent?: never;
|
|
300
|
-
SpeechSynthesisUtterance?: never;
|
|
301
|
-
} | {
|
|
302
|
-
speechSynthesis: {
|
|
303
|
-
queue: _default$1;
|
|
304
|
-
cancel(): void;
|
|
305
|
-
getVoices(): any[];
|
|
306
|
-
onvoiceschanged: any;
|
|
307
|
-
pause(): void;
|
|
308
|
-
resume(): void;
|
|
309
|
-
speak(utterance: any): Promise<any>;
|
|
310
|
-
readonly speaking: boolean;
|
|
311
|
-
updateVoices(): Promise<void>;
|
|
312
|
-
};
|
|
313
|
-
SpeechSynthesisEvent: typeof SpeechSynthesisEvent;
|
|
314
|
-
SpeechSynthesisUtterance: typeof SpeechSynthesisUtterance;
|
|
315
|
-
};
|
|
327
|
+
declare function createSpeechServicesPonyfill(options?: any): any;
|
|
316
328
|
|
|
317
329
|
export { createSpeechRecognitionPonyfill, createSpeechRecognitionPonyfillFromRecognizer, createSpeechServicesPonyfill, _default as createSpeechSynthesisPonyfill, fetchAuthorizationToken };
|
|
@@ -1,125 +1,5 @@
|
|
|
1
|
-
import * as memoize_one from 'memoize-one';
|
|
2
1
|
import { AudioConfig, SpeechRecognizer } from 'microsoft-cognitiveservices-speech-sdk';
|
|
3
2
|
|
|
4
|
-
declare class SpeechSynthesisUtterance {
|
|
5
|
-
constructor(text: any);
|
|
6
|
-
_lang: any;
|
|
7
|
-
_pitch: number;
|
|
8
|
-
_rate: number;
|
|
9
|
-
_voice: any;
|
|
10
|
-
_volume: number;
|
|
11
|
-
text: any;
|
|
12
|
-
set onboundary(value: any);
|
|
13
|
-
get onboundary(): any;
|
|
14
|
-
set onend(value: any);
|
|
15
|
-
get onend(): any;
|
|
16
|
-
set onerror(value: any);
|
|
17
|
-
get onerror(): any;
|
|
18
|
-
set onmark(value: any);
|
|
19
|
-
get onmark(): any;
|
|
20
|
-
set onpause(value: any);
|
|
21
|
-
get onpause(): any;
|
|
22
|
-
set onresume(value: any);
|
|
23
|
-
get onresume(): any;
|
|
24
|
-
set onstart(value: any);
|
|
25
|
-
get onstart(): any;
|
|
26
|
-
set lang(value: any);
|
|
27
|
-
get lang(): any;
|
|
28
|
-
set pitch(value: number);
|
|
29
|
-
get pitch(): number;
|
|
30
|
-
set rate(value: number);
|
|
31
|
-
get rate(): number;
|
|
32
|
-
set voice(value: any);
|
|
33
|
-
get voice(): any;
|
|
34
|
-
set volume(value: number);
|
|
35
|
-
get volume(): number;
|
|
36
|
-
preload({ deploymentId, fetchCredentials, outputFormat }: {
|
|
37
|
-
deploymentId: any;
|
|
38
|
-
fetchCredentials: any;
|
|
39
|
-
outputFormat: any;
|
|
40
|
-
}): void;
|
|
41
|
-
arrayBufferPromise: Promise<ArrayBuffer> | undefined;
|
|
42
|
-
play(audioContext: any): Promise<void>;
|
|
43
|
-
_playingSource: any;
|
|
44
|
-
stop(): void;
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
declare class SpeechSynthesisEvent {
|
|
48
|
-
constructor(type: any);
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
declare class _default$2 {
|
|
52
|
-
constructor(audioContext: any);
|
|
53
|
-
audioContext: any;
|
|
54
|
-
pause(): void;
|
|
55
|
-
resume(): void;
|
|
56
|
-
start(queue: any): Promise<void>;
|
|
57
|
-
playingUtterance: any;
|
|
58
|
-
stop(): void;
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
declare class _default$1 {
|
|
62
|
-
constructor({ audioContext, ponyfill }: {
|
|
63
|
-
audioContext: any;
|
|
64
|
-
ponyfill: any;
|
|
65
|
-
});
|
|
66
|
-
consumer: _default$2 | null;
|
|
67
|
-
paused: boolean;
|
|
68
|
-
queue: any[];
|
|
69
|
-
getAudioContext: memoize_one.MemoizedFn<() => any>;
|
|
70
|
-
pause(): void;
|
|
71
|
-
push(utterance: any): void;
|
|
72
|
-
resume(): void;
|
|
73
|
-
get speaking(): boolean;
|
|
74
|
-
startConsumer(): Promise<void>;
|
|
75
|
-
stop(): void;
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
type Credentials = Readonly<({
|
|
79
|
-
authorizationToken: string;
|
|
80
|
-
subscriptionKey?: undefined;
|
|
81
|
-
} | {
|
|
82
|
-
authorizationToken?: undefined;
|
|
83
|
-
subscriptionKey: string;
|
|
84
|
-
}) & ({
|
|
85
|
-
customVoiceHostname?: undefined;
|
|
86
|
-
region: string;
|
|
87
|
-
speechRecognitionHostname?: undefined;
|
|
88
|
-
speechSynthesisHostname?: undefined;
|
|
89
|
-
} | {
|
|
90
|
-
customVoiceHostname: string;
|
|
91
|
-
region?: undefined;
|
|
92
|
-
speechRecognitionHostname: string;
|
|
93
|
-
speechSynthesisHostname: string;
|
|
94
|
-
})>;
|
|
95
|
-
type PatchOptionsInit = {
|
|
96
|
-
audioConfig: AudioConfig;
|
|
97
|
-
credentials?: (() => Credentials | Promise<Credentials>) | Credentials | Promise<Credentials>;
|
|
98
|
-
enableTelemetry: boolean;
|
|
99
|
-
looseEvent?: boolean | undefined;
|
|
100
|
-
looseEvents?: boolean | undefined;
|
|
101
|
-
referenceGrammars?: readonly string[] | undefined;
|
|
102
|
-
region?: string | undefined;
|
|
103
|
-
speechRecognitionEndpointId: string;
|
|
104
|
-
textNormalization: 'display' | 'itn' | 'lexical' | 'maskeditn';
|
|
105
|
-
} & ({
|
|
106
|
-
authorizationToken: string;
|
|
107
|
-
subscriptionKey?: undefined;
|
|
108
|
-
} | {
|
|
109
|
-
authorizationToken?: undefined;
|
|
110
|
-
subscriptionKey: string;
|
|
111
|
-
});
|
|
112
|
-
|
|
113
|
-
declare function createSpeechRecognitionPonyfill(options: PatchOptionsInit): {};
|
|
114
|
-
|
|
115
|
-
declare class SpeechGrammarList {
|
|
116
|
-
#private;
|
|
117
|
-
constructor();
|
|
118
|
-
addFromString(): void;
|
|
119
|
-
get phrases(): readonly string[];
|
|
120
|
-
set phrases(value: readonly string[]);
|
|
121
|
-
}
|
|
122
|
-
|
|
123
3
|
type SpeechRecognitionErrorType = 'aborted' | 'audio-capture' | 'bad-grammar' | 'language-not-supported' | 'network' | 'no-speech' | 'not-allowed' | 'service-not-allowed' | 'unknown';
|
|
124
4
|
type SpeechRecognitionErrorEventInit = {
|
|
125
5
|
error: SpeechRecognitionErrorType;
|
|
@@ -209,6 +89,109 @@ type SpeechRecognitionEventListenerMap = EventListenerMap<'audioend' | 'audiosta
|
|
|
209
89
|
start: SpeechRecognitionEvent<'start'>;
|
|
210
90
|
}>;
|
|
211
91
|
|
|
92
|
+
interface W3CSpeechGrammar {
|
|
93
|
+
src: string;
|
|
94
|
+
weight: number;
|
|
95
|
+
}
|
|
96
|
+
interface W3CSpeechGrammarList {
|
|
97
|
+
readonly length: number;
|
|
98
|
+
addFromString(string: string, weight?: number): void;
|
|
99
|
+
addFromURI(src: string, weight?: number): void;
|
|
100
|
+
item(index: number): W3CSpeechGrammar;
|
|
101
|
+
[index: number]: W3CSpeechGrammar;
|
|
102
|
+
}
|
|
103
|
+
declare class SpeechGrammarList implements W3CSpeechGrammarList {
|
|
104
|
+
#private;
|
|
105
|
+
constructor();
|
|
106
|
+
addFromString(): void;
|
|
107
|
+
addFromURI(): void;
|
|
108
|
+
item(): W3CSpeechGrammar;
|
|
109
|
+
get length(): number;
|
|
110
|
+
[index: number]: {
|
|
111
|
+
src: string;
|
|
112
|
+
weight: number;
|
|
113
|
+
};
|
|
114
|
+
get phrases(): readonly string[];
|
|
115
|
+
set phrases(value: readonly string[]);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
type Credentials = Readonly<({
|
|
119
|
+
authorizationToken: string;
|
|
120
|
+
subscriptionKey?: undefined;
|
|
121
|
+
} | {
|
|
122
|
+
authorizationToken?: undefined;
|
|
123
|
+
subscriptionKey: string;
|
|
124
|
+
}) & ({
|
|
125
|
+
customVoiceHostname?: undefined;
|
|
126
|
+
region: string;
|
|
127
|
+
speechRecognitionHostname?: undefined;
|
|
128
|
+
speechSynthesisHostname?: undefined;
|
|
129
|
+
} | {
|
|
130
|
+
customVoiceHostname: string;
|
|
131
|
+
region?: undefined;
|
|
132
|
+
speechRecognitionHostname: string;
|
|
133
|
+
speechSynthesisHostname: string;
|
|
134
|
+
})>;
|
|
135
|
+
type PatchOptionsInit = {
|
|
136
|
+
audioConfig: AudioConfig;
|
|
137
|
+
credentials?: (() => Credentials | Promise<Credentials>) | Credentials | Promise<Credentials>;
|
|
138
|
+
enableTelemetry: boolean;
|
|
139
|
+
looseEvent?: boolean | undefined;
|
|
140
|
+
looseEvents?: boolean | undefined;
|
|
141
|
+
referenceGrammars?: readonly string[] | undefined;
|
|
142
|
+
region?: string | undefined;
|
|
143
|
+
speechRecognitionEndpointId: string;
|
|
144
|
+
textNormalization: 'display' | 'itn' | 'lexical' | 'maskeditn';
|
|
145
|
+
} & ({
|
|
146
|
+
authorizationToken: string;
|
|
147
|
+
subscriptionKey?: undefined;
|
|
148
|
+
} | {
|
|
149
|
+
authorizationToken?: undefined;
|
|
150
|
+
subscriptionKey: string;
|
|
151
|
+
});
|
|
152
|
+
|
|
153
|
+
declare function createSpeechRecognitionPonyfill(options: PatchOptionsInit): {
|
|
154
|
+
SpeechGrammarList: typeof SpeechGrammarList;
|
|
155
|
+
SpeechRecognition: {
|
|
156
|
+
new (): {
|
|
157
|
+
"__#8@#continuous": boolean;
|
|
158
|
+
"__#8@#eventListenerMap": SpeechRecognitionEventListenerMap;
|
|
159
|
+
"__#8@#grammars": SpeechGrammarList;
|
|
160
|
+
"__#8@#interimResults": boolean;
|
|
161
|
+
"__#8@#lang": string;
|
|
162
|
+
"__#8@#maxAlternatives": number;
|
|
163
|
+
emitCognitiveServices<T extends {
|
|
164
|
+
type: string;
|
|
165
|
+
}>(type: string, event: T): void;
|
|
166
|
+
continuous: boolean;
|
|
167
|
+
grammars: SpeechGrammarList;
|
|
168
|
+
interimResults: boolean;
|
|
169
|
+
maxAlternatives: number;
|
|
170
|
+
lang: string;
|
|
171
|
+
get onaudioend(): ((event: SpeechRecognitionEvent<"audioend">) => void) | undefined;
|
|
172
|
+
set onaudioend(value: ((event: SpeechRecognitionEvent<"audioend">) => void) | undefined);
|
|
173
|
+
onaudiostart: ((event: SpeechRecognitionEvent<"audiostart">) => void) | undefined;
|
|
174
|
+
oncognitiveservices: ((event: SpeechRecognitionEvent<"cognitiveservices">) => void) | undefined;
|
|
175
|
+
onend: ((event: SpeechRecognitionEvent<"end">) => void) | undefined;
|
|
176
|
+
onerror: ((event: SpeechRecognitionErrorEvent) => void) | undefined;
|
|
177
|
+
onresult: ((event: SpeechRecognitionEvent<"result">) => void) | undefined;
|
|
178
|
+
onsoundend: ((event: SpeechRecognitionEvent<"soundend">) => void) | undefined;
|
|
179
|
+
onsoundstart: ((event: SpeechRecognitionEvent<"soundstart">) => void) | undefined;
|
|
180
|
+
onspeechend: ((event: SpeechRecognitionEvent<"speechend">) => void) | undefined;
|
|
181
|
+
onspeechstart: ((event: SpeechRecognitionEvent<"speechstart">) => void) | undefined;
|
|
182
|
+
onstart: ((event: SpeechRecognitionEvent<"start">) => void) | undefined;
|
|
183
|
+
abort: (() => void) | undefined;
|
|
184
|
+
stop: (() => void) | undefined;
|
|
185
|
+
start(): void;
|
|
186
|
+
_startOnce(): Promise<void>;
|
|
187
|
+
addEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | AddEventListenerOptions | undefined): void;
|
|
188
|
+
dispatchEvent(event: Event): boolean;
|
|
189
|
+
removeEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | EventListenerOptions | undefined): void;
|
|
190
|
+
};
|
|
191
|
+
};
|
|
192
|
+
SpeechRecognitionEvent: typeof SpeechRecognitionEvent;
|
|
193
|
+
};
|
|
194
|
+
|
|
212
195
|
type CreateSpeechRecognitionPonyfillFromRecognizerInit = {
|
|
213
196
|
createRecognizer: (lang: string) => Promise<SpeechRecognizer>;
|
|
214
197
|
enableTelemetry: boolean;
|
|
@@ -268,6 +251,53 @@ declare function createSpeechRecognitionPonyfillFromRecognizer({ createRecognize
|
|
|
268
251
|
SpeechRecognitionEvent: typeof SpeechRecognitionEvent;
|
|
269
252
|
};
|
|
270
253
|
|
|
254
|
+
declare class SpeechSynthesisEvent {
|
|
255
|
+
constructor(type: any);
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
declare class SpeechSynthesisUtterance {
|
|
259
|
+
constructor(text: any);
|
|
260
|
+
_lang: any;
|
|
261
|
+
_pitch: number;
|
|
262
|
+
_rate: number;
|
|
263
|
+
_voice: any;
|
|
264
|
+
_volume: number;
|
|
265
|
+
text: any;
|
|
266
|
+
set onboundary(value: any);
|
|
267
|
+
get onboundary(): any;
|
|
268
|
+
set onend(value: any);
|
|
269
|
+
get onend(): any;
|
|
270
|
+
set onerror(value: any);
|
|
271
|
+
get onerror(): any;
|
|
272
|
+
set onmark(value: any);
|
|
273
|
+
get onmark(): any;
|
|
274
|
+
set onpause(value: any);
|
|
275
|
+
get onpause(): any;
|
|
276
|
+
set onresume(value: any);
|
|
277
|
+
get onresume(): any;
|
|
278
|
+
set onstart(value: any);
|
|
279
|
+
get onstart(): any;
|
|
280
|
+
set lang(value: any);
|
|
281
|
+
get lang(): any;
|
|
282
|
+
set pitch(value: number);
|
|
283
|
+
get pitch(): number;
|
|
284
|
+
set rate(value: number);
|
|
285
|
+
get rate(): number;
|
|
286
|
+
set voice(value: any);
|
|
287
|
+
get voice(): any;
|
|
288
|
+
set volume(value: number);
|
|
289
|
+
get volume(): number;
|
|
290
|
+
preload({ deploymentId, fetchCredentials, outputFormat }: {
|
|
291
|
+
deploymentId: any;
|
|
292
|
+
fetchCredentials: any;
|
|
293
|
+
outputFormat: any;
|
|
294
|
+
}): void;
|
|
295
|
+
arrayBufferPromise: Promise<ArrayBuffer> | undefined;
|
|
296
|
+
play(audioContext: any): Promise<void>;
|
|
297
|
+
_playingSource: any;
|
|
298
|
+
stop(): void;
|
|
299
|
+
}
|
|
300
|
+
|
|
271
301
|
declare function _default(options: any): {
|
|
272
302
|
speechSynthesis?: never;
|
|
273
303
|
SpeechSynthesisEvent?: never;
|
|
@@ -294,24 +324,6 @@ type FetchAuthorizationTokenInit = {
|
|
|
294
324
|
};
|
|
295
325
|
declare function fetchAuthorizationToken({ region, subscriptionKey }: FetchAuthorizationTokenInit): Promise<string>;
|
|
296
326
|
|
|
297
|
-
declare function createSpeechServicesPonyfill(options?:
|
|
298
|
-
speechSynthesis?: never;
|
|
299
|
-
SpeechSynthesisEvent?: never;
|
|
300
|
-
SpeechSynthesisUtterance?: never;
|
|
301
|
-
} | {
|
|
302
|
-
speechSynthesis: {
|
|
303
|
-
queue: _default$1;
|
|
304
|
-
cancel(): void;
|
|
305
|
-
getVoices(): any[];
|
|
306
|
-
onvoiceschanged: any;
|
|
307
|
-
pause(): void;
|
|
308
|
-
resume(): void;
|
|
309
|
-
speak(utterance: any): Promise<any>;
|
|
310
|
-
readonly speaking: boolean;
|
|
311
|
-
updateVoices(): Promise<void>;
|
|
312
|
-
};
|
|
313
|
-
SpeechSynthesisEvent: typeof SpeechSynthesisEvent;
|
|
314
|
-
SpeechSynthesisUtterance: typeof SpeechSynthesisUtterance;
|
|
315
|
-
};
|
|
327
|
+
declare function createSpeechServicesPonyfill(options?: any): any;
|
|
316
328
|
|
|
317
329
|
export { createSpeechRecognitionPonyfill, createSpeechRecognitionPonyfillFromRecognizer, createSpeechServicesPonyfill, _default as createSpeechSynthesisPonyfill, fetchAuthorizationToken };
|
|
@@ -23755,6 +23755,15 @@
|
|
|
23755
23755
|
addFromString() {
|
|
23756
23756
|
throw new Error("JSGF is not supported");
|
|
23757
23757
|
}
|
|
23758
|
+
addFromURI() {
|
|
23759
|
+
throw new Error("JSGF is not supported");
|
|
23760
|
+
}
|
|
23761
|
+
item() {
|
|
23762
|
+
throw new Error("JSGF is not supported");
|
|
23763
|
+
}
|
|
23764
|
+
get length() {
|
|
23765
|
+
throw new Error("JSGF is not supported");
|
|
23766
|
+
}
|
|
23758
23767
|
#phrases;
|
|
23759
23768
|
get phrases() {
|
|
23760
23769
|
return this.#phrases;
|
|
@@ -24216,10 +24225,9 @@
|
|
|
24216
24225
|
textNormalization = "display"
|
|
24217
24226
|
} = patchOptions(options);
|
|
24218
24227
|
if (!audioConfig && (!window.navigator.mediaDevices || !window.navigator.mediaDevices.getUserMedia)) {
|
|
24219
|
-
|
|
24220
|
-
"web-speech-cognitive-services: This browser does not support
|
|
24228
|
+
throw new Error(
|
|
24229
|
+
"web-speech-cognitive-services: This browser does not support Media Capture and Streams API and it will not work with Cognitive Services Speech Services."
|
|
24221
24230
|
);
|
|
24222
|
-
return {};
|
|
24223
24231
|
}
|
|
24224
24232
|
const createRecognizer = async (lang) => {
|
|
24225
24233
|
const credentials = await fetchCredentials();
|
|
@@ -24252,9 +24260,6 @@
|
|
|
24252
24260
|
});
|
|
24253
24261
|
}
|
|
24254
24262
|
|
|
24255
|
-
// src/SpeechServices/SpeechToText.ts
|
|
24256
|
-
var SpeechToText_default = createSpeechRecognitionPonyfill;
|
|
24257
|
-
|
|
24258
24263
|
// ../../node_modules/event-target-shim/index.mjs
|
|
24259
24264
|
function assertType(condition, message, ...args) {
|
|
24260
24265
|
if (!condition) {
|
|
@@ -25720,16 +25725,16 @@
|
|
|
25720
25725
|
// src/SpeechServices/TextToSpeech.js
|
|
25721
25726
|
var TextToSpeech_default = createSpeechSynthesisPonyfill_default;
|
|
25722
25727
|
|
|
25723
|
-
// src/SpeechServices.
|
|
25724
|
-
function createSpeechServicesPonyfill(options = {}
|
|
25728
|
+
// src/SpeechServices.ts
|
|
25729
|
+
function createSpeechServicesPonyfill(options = {}) {
|
|
25725
25730
|
return {
|
|
25726
|
-
...
|
|
25727
|
-
...TextToSpeech_default(options
|
|
25731
|
+
...createSpeechRecognitionPonyfill(options),
|
|
25732
|
+
...TextToSpeech_default(options)
|
|
25728
25733
|
};
|
|
25729
25734
|
}
|
|
25730
25735
|
var meta = document.createElement("meta");
|
|
25731
25736
|
meta.setAttribute("name", "web-speech-cognitive-services");
|
|
25732
|
-
meta.setAttribute("content", `version=${"8.0.0
|
|
25737
|
+
meta.setAttribute("content", `version=${"8.0.0"}`);
|
|
25733
25738
|
document.head.appendChild(meta);
|
|
25734
25739
|
|
|
25735
25740
|
// src/index.umd.js
|