web-speech-cognitive-services 7.1.4-master.151bc9b → 8.0.0-main.428d2a8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/dist/web-speech-cognitive-services.d.mts +186 -0
  2. package/dist/web-speech-cognitive-services.d.ts +186 -0
  3. package/dist/web-speech-cognitive-services.development.js +25608 -0
  4. package/dist/web-speech-cognitive-services.development.js.map +1 -0
  5. package/dist/web-speech-cognitive-services.js +1271 -0
  6. package/dist/web-speech-cognitive-services.js.map +1 -0
  7. package/dist/web-speech-cognitive-services.mjs +1236 -0
  8. package/dist/web-speech-cognitive-services.mjs.map +1 -0
  9. package/dist/web-speech-cognitive-services.production.min.js +31 -0
  10. package/dist/web-speech-cognitive-services.production.min.js.map +1 -0
  11. package/package.json +67 -47
  12. package/CHANGELOG.md +0 -372
  13. package/lib/BingSpeech/SpeechToText/SpeechGrammarList.js +0 -94
  14. package/lib/BingSpeech/SpeechToText/SpeechGrammarList.js.map +0 -1
  15. package/lib/BingSpeech/SpeechToText/createSpeechRecognitionPonyfill.js +0 -483
  16. package/lib/BingSpeech/SpeechToText/createSpeechRecognitionPonyfill.js.map +0 -1
  17. package/lib/BingSpeech/SpeechToText.js +0 -14
  18. package/lib/BingSpeech/SpeechToText.js.map +0 -1
  19. package/lib/BingSpeech/TextToSpeech/AudioContextConsumer.js +0 -122
  20. package/lib/BingSpeech/TextToSpeech/AudioContextConsumer.js.map +0 -1
  21. package/lib/BingSpeech/TextToSpeech/AudioContextQueue.js +0 -104
  22. package/lib/BingSpeech/TextToSpeech/AudioContextQueue.js.map +0 -1
  23. package/lib/BingSpeech/TextToSpeech/SpeechSynthesisUtterance.js +0 -264
  24. package/lib/BingSpeech/TextToSpeech/SpeechSynthesisUtterance.js.map +0 -1
  25. package/lib/BingSpeech/TextToSpeech/SpeechSynthesisVoice.js +0 -61
  26. package/lib/BingSpeech/TextToSpeech/SpeechSynthesisVoice.js.map +0 -1
  27. package/lib/BingSpeech/TextToSpeech/buildSSML.js +0 -32
  28. package/lib/BingSpeech/TextToSpeech/buildSSML.js.map +0 -1
  29. package/lib/BingSpeech/TextToSpeech/createSpeechSynthesisPonyfill.js +0 -220
  30. package/lib/BingSpeech/TextToSpeech/createSpeechSynthesisPonyfill.js.map +0 -1
  31. package/lib/BingSpeech/TextToSpeech/fetchSpeechData.js +0 -74
  32. package/lib/BingSpeech/TextToSpeech/fetchSpeechData.js.map +0 -1
  33. package/lib/BingSpeech/TextToSpeech/fetchVoices.js +0 -335
  34. package/lib/BingSpeech/TextToSpeech/fetchVoices.js.map +0 -1
  35. package/lib/BingSpeech/TextToSpeech/isSSML.js +0 -13
  36. package/lib/BingSpeech/TextToSpeech/isSSML.js.map +0 -1
  37. package/lib/BingSpeech/TextToSpeech/subscribeEvent.js +0 -14
  38. package/lib/BingSpeech/TextToSpeech/subscribeEvent.js.map +0 -1
  39. package/lib/BingSpeech/TextToSpeech.js +0 -14
  40. package/lib/BingSpeech/TextToSpeech.js.map +0 -1
  41. package/lib/BingSpeech/Util/DOMEventEmitter.js +0 -61
  42. package/lib/BingSpeech/Util/DOMEventEmitter.js.map +0 -1
  43. package/lib/BingSpeech/Util/createFetchTokenUsingSubscriptionKey.js +0 -41
  44. package/lib/BingSpeech/Util/createFetchTokenUsingSubscriptionKey.js.map +0 -1
  45. package/lib/BingSpeech/fetchAuthorizationToken.js +0 -57
  46. package/lib/BingSpeech/fetchAuthorizationToken.js.map +0 -1
  47. package/lib/BingSpeech/index.js +0 -84
  48. package/lib/BingSpeech/index.js.map +0 -1
  49. package/lib/SpeechServices/SpeechSDK.js +0 -19
  50. package/lib/SpeechServices/SpeechSDK.js.map +0 -1
  51. package/lib/SpeechServices/SpeechToText/SpeechGrammarList.js +0 -45
  52. package/lib/SpeechServices/SpeechToText/SpeechGrammarList.js.map +0 -1
  53. package/lib/SpeechServices/SpeechToText/cognitiveServiceEventResultToWebSpeechRecognitionResultList.js +0 -56
  54. package/lib/SpeechServices/SpeechToText/cognitiveServiceEventResultToWebSpeechRecognitionResultList.js.map +0 -1
  55. package/lib/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.js +0 -984
  56. package/lib/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.js.map +0 -1
  57. package/lib/SpeechServices/SpeechToText.js +0 -24
  58. package/lib/SpeechServices/SpeechToText.js.map +0 -1
  59. package/lib/SpeechServices/TextToSpeech/AudioContextConsumer.js +0 -92
  60. package/lib/SpeechServices/TextToSpeech/AudioContextConsumer.js.map +0 -1
  61. package/lib/SpeechServices/TextToSpeech/AudioContextQueue.js +0 -111
  62. package/lib/SpeechServices/TextToSpeech/AudioContextQueue.js.map +0 -1
  63. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisEvent.js +0 -40
  64. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisEvent.js.map +0 -1
  65. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisUtterance.js +0 -283
  66. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisUtterance.js.map +0 -1
  67. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisVoice.js +0 -63
  68. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisVoice.js.map +0 -1
  69. package/lib/SpeechServices/TextToSpeech/buildSSML.js +0 -32
  70. package/lib/SpeechServices/TextToSpeech/buildSSML.js.map +0 -1
  71. package/lib/SpeechServices/TextToSpeech/createSpeechSynthesisPonyfill.js +0 -282
  72. package/lib/SpeechServices/TextToSpeech/createSpeechSynthesisPonyfill.js.map +0 -1
  73. package/lib/SpeechServices/TextToSpeech/fetchCustomVoices.js +0 -110
  74. package/lib/SpeechServices/TextToSpeech/fetchCustomVoices.js.map +0 -1
  75. package/lib/SpeechServices/TextToSpeech/fetchSpeechData.js +0 -127
  76. package/lib/SpeechServices/TextToSpeech/fetchSpeechData.js.map +0 -1
  77. package/lib/SpeechServices/TextToSpeech/fetchVoices.js +0 -87
  78. package/lib/SpeechServices/TextToSpeech/fetchVoices.js.map +0 -1
  79. package/lib/SpeechServices/TextToSpeech/isSSML.js +0 -13
  80. package/lib/SpeechServices/TextToSpeech/isSSML.js.map +0 -1
  81. package/lib/SpeechServices/TextToSpeech/subscribeEvent.js +0 -14
  82. package/lib/SpeechServices/TextToSpeech/subscribeEvent.js.map +0 -1
  83. package/lib/SpeechServices/TextToSpeech.js +0 -14
  84. package/lib/SpeechServices/TextToSpeech.js.map +0 -1
  85. package/lib/SpeechServices/fetchAuthorizationToken.js +0 -58
  86. package/lib/SpeechServices/fetchAuthorizationToken.js.map +0 -1
  87. package/lib/SpeechServices/patchOptions.js +0 -213
  88. package/lib/SpeechServices/patchOptions.js.map +0 -1
  89. package/lib/SpeechServices/resolveFunctionOrReturnValue.js +0 -11
  90. package/lib/SpeechServices/resolveFunctionOrReturnValue.js.map +0 -1
  91. package/lib/SpeechServices.js +0 -73
  92. package/lib/SpeechServices.js.map +0 -1
  93. package/lib/Util/arrayToMap.js +0 -28
  94. package/lib/Util/arrayToMap.js.map +0 -1
  95. package/lib/Util/createPromiseQueue.js +0 -40
  96. package/lib/Util/createPromiseQueue.js.map +0 -1
  97. package/lib/index.js +0 -14
  98. package/lib/index.js.map +0 -1
  99. package/umd/web-speech-cognitive-services.development.js +0 -4740
  100. package/umd/web-speech-cognitive-services.production.min.js +0 -2
@@ -0,0 +1,186 @@
1
+ import * as memoize_one from 'memoize-one';
2
+
3
+ declare class SpeechSynthesisUtterance {
4
+ constructor(text: any);
5
+ _lang: any;
6
+ _pitch: number;
7
+ _rate: number;
8
+ _voice: any;
9
+ _volume: number;
10
+ text: any;
11
+ set onboundary(value: any);
12
+ get onboundary(): any;
13
+ set onend(value: any);
14
+ get onend(): any;
15
+ set onerror(value: any);
16
+ get onerror(): any;
17
+ set onmark(value: any);
18
+ get onmark(): any;
19
+ set onpause(value: any);
20
+ get onpause(): any;
21
+ set onresume(value: any);
22
+ get onresume(): any;
23
+ set onstart(value: any);
24
+ get onstart(): any;
25
+ set lang(value: any);
26
+ get lang(): any;
27
+ set pitch(value: number);
28
+ get pitch(): number;
29
+ set rate(value: number);
30
+ get rate(): number;
31
+ set voice(value: any);
32
+ get voice(): any;
33
+ set volume(value: number);
34
+ get volume(): number;
35
+ preload({ deploymentId, fetchCredentials, outputFormat }: {
36
+ deploymentId: any;
37
+ fetchCredentials: any;
38
+ outputFormat: any;
39
+ }): void;
40
+ arrayBufferPromise: Promise<ArrayBuffer> | undefined;
41
+ play(audioContext: any): Promise<void>;
42
+ _playingSource: any;
43
+ stop(): void;
44
+ }
45
+
46
+ declare class SpeechSynthesisEvent {
47
+ constructor(type: any);
48
+ }
49
+
50
+ declare class _default$5 {
51
+ constructor(audioContext: any);
52
+ audioContext: any;
53
+ pause(): void;
54
+ resume(): void;
55
+ start(queue: any): Promise<void>;
56
+ playingUtterance: any;
57
+ stop(): void;
58
+ }
59
+
60
+ declare class _default$4 {
61
+ constructor({ audioContext, ponyfill }: {
62
+ audioContext: any;
63
+ ponyfill: any;
64
+ });
65
+ consumer: _default$5 | null;
66
+ paused: boolean;
67
+ queue: any[];
68
+ getAudioContext: memoize_one.MemoizedFn<() => any>;
69
+ pause(): void;
70
+ push(utterance: any): void;
71
+ resume(): void;
72
+ get speaking(): boolean;
73
+ startConsumer(): Promise<void>;
74
+ stop(): void;
75
+ }
76
+
77
+ declare class _default$3 {
78
+ _phrases: any[];
79
+ addFromString(): void;
80
+ set phrases(value: any[]);
81
+ get phrases(): any[];
82
+ }
83
+
84
+ declare function createSpeechRecognitionPonyfillFromRecognizer({ createRecognizer, enableTelemetry, looseEvents, referenceGrammars, textNormalization }: {
85
+ createRecognizer: any;
86
+ enableTelemetry: any;
87
+ looseEvents: any;
88
+ referenceGrammars: any;
89
+ textNormalization: any;
90
+ }): {
91
+ SpeechGrammarList: typeof _default$3;
92
+ SpeechRecognition: {
93
+ new (): {
94
+ _continuous: boolean;
95
+ _interimResults: boolean;
96
+ _lang: string;
97
+ _grammars: _default$3;
98
+ _maxAlternatives: number;
99
+ emitCognitiveServices(type: any, event: any): void;
100
+ continuous: boolean;
101
+ grammars: _default$3;
102
+ interimResults: boolean;
103
+ maxAlternatives: number;
104
+ lang: string;
105
+ onaudioend: any;
106
+ onaudiostart: any;
107
+ oncognitiveservices: any;
108
+ onend: any;
109
+ onerror: any;
110
+ onresult: any;
111
+ onsoundend: any;
112
+ onsoundstart: any;
113
+ onspeechend: any;
114
+ onspeechstart: any;
115
+ onstart: any;
116
+ start(): void;
117
+ _startOnce(): Promise<void>;
118
+ abort: (() => void) | undefined;
119
+ stop: (() => void) | undefined;
120
+ };
121
+ };
122
+ SpeechRecognitionEvent: typeof SpeechRecognitionEvent;
123
+ };
124
+ declare function _default$2(options: any): {};
125
+
126
+ declare class SpeechRecognitionEvent {
127
+ constructor(type: any, { data, emma, interpretation, resultIndex, results }?: {
128
+ data: any;
129
+ emma: any;
130
+ interpretation: any;
131
+ resultIndex: any;
132
+ results: any;
133
+ });
134
+ data: any;
135
+ emma: any;
136
+ interpretation: any;
137
+ resultIndex: any;
138
+ results: any;
139
+ }
140
+
141
+ declare function _default$1(options: any): {
142
+ speechSynthesis?: never;
143
+ SpeechSynthesisEvent?: never;
144
+ SpeechSynthesisUtterance?: never;
145
+ } | {
146
+ speechSynthesis: {
147
+ queue: AudioContextQueue;
148
+ cancel(): void;
149
+ getVoices(): any[];
150
+ onvoiceschanged: any;
151
+ pause(): void;
152
+ resume(): void;
153
+ speak(utterance: any): Promise<any>;
154
+ readonly speaking: boolean;
155
+ updateVoices(): Promise<void>;
156
+ };
157
+ SpeechSynthesisEvent: typeof SpeechSynthesisEvent;
158
+ SpeechSynthesisUtterance: typeof SpeechSynthesisUtterance;
159
+ };
160
+
161
+ declare function _default({ region, subscriptionKey }: {
162
+ region: any;
163
+ subscriptionKey: any;
164
+ }): Promise<string>;
165
+
166
+ declare function createSpeechServicesPonyfill(options?: {}, ...args: any[]): {
167
+ speechSynthesis?: never;
168
+ SpeechSynthesisEvent?: never;
169
+ SpeechSynthesisUtterance?: never;
170
+ } | {
171
+ speechSynthesis: {
172
+ queue: _default$4;
173
+ cancel(): void;
174
+ getVoices(): any[];
175
+ onvoiceschanged: any;
176
+ pause(): void;
177
+ resume(): void;
178
+ speak(utterance: any): Promise<any>;
179
+ readonly speaking: boolean;
180
+ updateVoices(): Promise<void>;
181
+ };
182
+ SpeechSynthesisEvent: typeof SpeechSynthesisEvent;
183
+ SpeechSynthesisUtterance: typeof SpeechSynthesisUtterance;
184
+ };
185
+
186
+ export { _default$2 as createSpeechRecognitionPonyfill, createSpeechRecognitionPonyfillFromRecognizer, createSpeechServicesPonyfill, _default$1 as createSpeechSynthesisPonyfill, _default as fetchAuthorizationToken };
@@ -0,0 +1,186 @@
1
+ import * as memoize_one from 'memoize-one';
2
+
3
+ declare class SpeechSynthesisUtterance {
4
+ constructor(text: any);
5
+ _lang: any;
6
+ _pitch: number;
7
+ _rate: number;
8
+ _voice: any;
9
+ _volume: number;
10
+ text: any;
11
+ set onboundary(value: any);
12
+ get onboundary(): any;
13
+ set onend(value: any);
14
+ get onend(): any;
15
+ set onerror(value: any);
16
+ get onerror(): any;
17
+ set onmark(value: any);
18
+ get onmark(): any;
19
+ set onpause(value: any);
20
+ get onpause(): any;
21
+ set onresume(value: any);
22
+ get onresume(): any;
23
+ set onstart(value: any);
24
+ get onstart(): any;
25
+ set lang(value: any);
26
+ get lang(): any;
27
+ set pitch(value: number);
28
+ get pitch(): number;
29
+ set rate(value: number);
30
+ get rate(): number;
31
+ set voice(value: any);
32
+ get voice(): any;
33
+ set volume(value: number);
34
+ get volume(): number;
35
+ preload({ deploymentId, fetchCredentials, outputFormat }: {
36
+ deploymentId: any;
37
+ fetchCredentials: any;
38
+ outputFormat: any;
39
+ }): void;
40
+ arrayBufferPromise: Promise<ArrayBuffer> | undefined;
41
+ play(audioContext: any): Promise<void>;
42
+ _playingSource: any;
43
+ stop(): void;
44
+ }
45
+
46
+ declare class SpeechSynthesisEvent {
47
+ constructor(type: any);
48
+ }
49
+
50
+ declare class _default$5 {
51
+ constructor(audioContext: any);
52
+ audioContext: any;
53
+ pause(): void;
54
+ resume(): void;
55
+ start(queue: any): Promise<void>;
56
+ playingUtterance: any;
57
+ stop(): void;
58
+ }
59
+
60
+ declare class _default$4 {
61
+ constructor({ audioContext, ponyfill }: {
62
+ audioContext: any;
63
+ ponyfill: any;
64
+ });
65
+ consumer: _default$5 | null;
66
+ paused: boolean;
67
+ queue: any[];
68
+ getAudioContext: memoize_one.MemoizedFn<() => any>;
69
+ pause(): void;
70
+ push(utterance: any): void;
71
+ resume(): void;
72
+ get speaking(): boolean;
73
+ startConsumer(): Promise<void>;
74
+ stop(): void;
75
+ }
76
+
77
+ declare class _default$3 {
78
+ _phrases: any[];
79
+ addFromString(): void;
80
+ set phrases(value: any[]);
81
+ get phrases(): any[];
82
+ }
83
+
84
+ declare function createSpeechRecognitionPonyfillFromRecognizer({ createRecognizer, enableTelemetry, looseEvents, referenceGrammars, textNormalization }: {
85
+ createRecognizer: any;
86
+ enableTelemetry: any;
87
+ looseEvents: any;
88
+ referenceGrammars: any;
89
+ textNormalization: any;
90
+ }): {
91
+ SpeechGrammarList: typeof _default$3;
92
+ SpeechRecognition: {
93
+ new (): {
94
+ _continuous: boolean;
95
+ _interimResults: boolean;
96
+ _lang: string;
97
+ _grammars: _default$3;
98
+ _maxAlternatives: number;
99
+ emitCognitiveServices(type: any, event: any): void;
100
+ continuous: boolean;
101
+ grammars: _default$3;
102
+ interimResults: boolean;
103
+ maxAlternatives: number;
104
+ lang: string;
105
+ onaudioend: any;
106
+ onaudiostart: any;
107
+ oncognitiveservices: any;
108
+ onend: any;
109
+ onerror: any;
110
+ onresult: any;
111
+ onsoundend: any;
112
+ onsoundstart: any;
113
+ onspeechend: any;
114
+ onspeechstart: any;
115
+ onstart: any;
116
+ start(): void;
117
+ _startOnce(): Promise<void>;
118
+ abort: (() => void) | undefined;
119
+ stop: (() => void) | undefined;
120
+ };
121
+ };
122
+ SpeechRecognitionEvent: typeof SpeechRecognitionEvent;
123
+ };
124
+ declare function _default$2(options: any): {};
125
+
126
+ declare class SpeechRecognitionEvent {
127
+ constructor(type: any, { data, emma, interpretation, resultIndex, results }?: {
128
+ data: any;
129
+ emma: any;
130
+ interpretation: any;
131
+ resultIndex: any;
132
+ results: any;
133
+ });
134
+ data: any;
135
+ emma: any;
136
+ interpretation: any;
137
+ resultIndex: any;
138
+ results: any;
139
+ }
140
+
141
+ declare function _default$1(options: any): {
142
+ speechSynthesis?: never;
143
+ SpeechSynthesisEvent?: never;
144
+ SpeechSynthesisUtterance?: never;
145
+ } | {
146
+ speechSynthesis: {
147
+ queue: AudioContextQueue;
148
+ cancel(): void;
149
+ getVoices(): any[];
150
+ onvoiceschanged: any;
151
+ pause(): void;
152
+ resume(): void;
153
+ speak(utterance: any): Promise<any>;
154
+ readonly speaking: boolean;
155
+ updateVoices(): Promise<void>;
156
+ };
157
+ SpeechSynthesisEvent: typeof SpeechSynthesisEvent;
158
+ SpeechSynthesisUtterance: typeof SpeechSynthesisUtterance;
159
+ };
160
+
161
+ declare function _default({ region, subscriptionKey }: {
162
+ region: any;
163
+ subscriptionKey: any;
164
+ }): Promise<string>;
165
+
166
+ declare function createSpeechServicesPonyfill(options?: {}, ...args: any[]): {
167
+ speechSynthesis?: never;
168
+ SpeechSynthesisEvent?: never;
169
+ SpeechSynthesisUtterance?: never;
170
+ } | {
171
+ speechSynthesis: {
172
+ queue: _default$4;
173
+ cancel(): void;
174
+ getVoices(): any[];
175
+ onvoiceschanged: any;
176
+ pause(): void;
177
+ resume(): void;
178
+ speak(utterance: any): Promise<any>;
179
+ readonly speaking: boolean;
180
+ updateVoices(): Promise<void>;
181
+ };
182
+ SpeechSynthesisEvent: typeof SpeechSynthesisEvent;
183
+ SpeechSynthesisUtterance: typeof SpeechSynthesisUtterance;
184
+ };
185
+
186
+ export { _default$2 as createSpeechRecognitionPonyfill, createSpeechRecognitionPonyfillFromRecognizer, createSpeechServicesPonyfill, _default$1 as createSpeechSynthesisPonyfill, _default as fetchAuthorizationToken };