@memori.ai/memori-api-client 0.11.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,5 +1,5 @@
1
1
  {
2
- "version": "0.11.0",
2
+ "version": "1.0.0",
3
3
  "name": "@memori.ai/memori-api-client",
4
4
  "description": "React library to integrate a Memori in your app or website",
5
5
  "license": "Apache-2.0",
@@ -126,7 +126,6 @@
126
126
  "typescript": "4.7.4"
127
127
  },
128
128
  "dependencies": {
129
- "cross-fetch": "^3.1.5",
130
- "microsoft-cognitiveservices-speech-sdk": "1.20.0"
129
+ "cross-fetch": "^3.1.5"
131
130
  }
132
131
  }
package/src/index.ts CHANGED
@@ -2,7 +2,6 @@ import { getApiUrl } from './helpers/getApiUrl';
2
2
  import backend from './backend';
3
3
  import engine from './engine';
4
4
  import * as constants from './constants';
5
- import speech from './speech';
6
5
  import asset from './helpers/asset';
7
6
 
8
7
  const api = (hostname?: string) => {
@@ -11,7 +10,6 @@ const api = (hostname?: string) => {
11
10
  return {
12
11
  backend: backend(`${apiUrl}/api/v2`),
13
12
  ...engine(`${apiUrl}/memori/v2`),
14
- speech,
15
13
  constants,
16
14
  asset: asset(`${apiUrl}/api/v2`),
17
15
  };
package/dist/speech.d.ts DELETED
@@ -1,13 +0,0 @@
1
- import * as speechSdk from 'microsoft-cognitiveservices-speech-sdk';
2
- /**
3
- * EXPERIMENTAL
4
- */
5
- declare const speech: (AZURE_COGNITIVE_SERVICES_TTS_KEY: string, DEBUG?: boolean) => (lang: string, voiceType: 'FEMALE' | 'MALE') => {
6
- speak: (text: string, onAudioEnd?: ((sender: speechSdk.IPlayer) => void) | undefined) => void;
7
- isSpeaking: () => boolean;
8
- stopSpeaking: () => void;
9
- recognize: (onRecognized: (transcript: string) => void) => void;
10
- isRecognizing: () => boolean;
11
- stopRecognizing: (onStop?: (() => void) | undefined) => void;
12
- };
13
- export default speech;
package/src/speech.ts DELETED
@@ -1,243 +0,0 @@
1
- import * as speechSdk from 'microsoft-cognitiveservices-speech-sdk';
2
-
3
- const getTTSVoice = (lang: string, voiceType: 'MALE' | 'FEMALE'): string => {
4
- let voice = '';
5
- let voiceLang = lang.toUpperCase();
6
- switch (voiceLang) {
7
- case 'IT':
8
- voice = `${
9
- voiceType === 'MALE' ? 'it-IT-DiegoNeural' : 'it-IT-ElsaNeural'
10
- }`;
11
- break;
12
- case 'DE':
13
- voice = `${
14
- voiceType === 'MALE' ? 'de-DE-ConradNeural' : 'de-DE-KatjaNeural'
15
- }`;
16
- break;
17
- case 'EN':
18
- voice = `${
19
- voiceType === 'MALE' ? 'en-GB-RyanNeural' : 'en-GB-SoniaNeural'
20
- }`;
21
- break;
22
- case 'ES':
23
- voice = `${
24
- voiceType === 'MALE' ? 'es-ES-AlvaroNeural' : 'es-ES-ElviraNeural'
25
- }`;
26
- break;
27
- case 'FR':
28
- voice = `${
29
- voiceType === 'MALE' ? 'fr-FR-HenriNeural' : 'fr-FR-DeniseNeural'
30
- }`;
31
- break;
32
- case 'PT':
33
- voice = `${
34
- voiceType === 'MALE' ? 'pt-PT-DuarteNeural' : 'pt-PT-RaquelNeural'
35
- }`;
36
- break;
37
- default:
38
- voice = `${
39
- voiceType === 'MALE' ? 'it-IT-DiegoNeural' : 'it-IT-IsabellaNeural'
40
- }`;
41
- break;
42
- }
43
- return voice;
44
- };
45
-
46
- const getCultureCodeByLanguage = (lang: string): string => {
47
- let voice = '';
48
- let voiceLang = lang.toUpperCase();
49
- switch (voiceLang) {
50
- case 'IT':
51
- voice = 'it-IT';
52
- break;
53
- case 'DE':
54
- voice = 'de-DE';
55
- break;
56
- case 'EN':
57
- voice = 'en-US';
58
- break;
59
- case 'ES':
60
- voice = 'es-ES';
61
- break;
62
- case 'FR':
63
- voice = 'fr-FR';
64
- break;
65
- case 'PT':
66
- voice = 'pt-PT';
67
- break;
68
- default:
69
- voice = 'it-IT';
70
- break;
71
- }
72
- return voice;
73
- };
74
-
75
- /**
76
- * EXPERIMENTAL
77
- */
78
- const speech = (AZURE_COGNITIVE_SERVICES_TTS_KEY: string, DEBUG = false) => (
79
- lang: string,
80
- voiceType: 'FEMALE' | 'MALE'
81
- ) => {
82
- let speechConfig: speechSdk.SpeechConfig = speechSdk.SpeechConfig.fromSubscription(
83
- AZURE_COGNITIVE_SERVICES_TTS_KEY,
84
- 'eastus'
85
- );
86
- let speechSynthesizer: speechSdk.SpeechSynthesizer | null;
87
- let audioDestination: speechSdk.SpeakerAudioDestination;
88
-
89
- audioDestination = new speechSdk.SpeakerAudioDestination();
90
- let audioOutputConfig = speechSdk.AudioConfig.fromSpeakerOutput(
91
- audioDestination
92
- );
93
-
94
- // https://docs.microsoft.com/it-it/azure/cognitive-services/speech-service/language-support#text-to-speech
95
- speechConfig.speechSynthesisVoiceName = getTTSVoice(lang, voiceType);
96
-
97
- let langCultureCode = getCultureCodeByLanguage(lang);
98
- speechConfig.speechSynthesisLanguage = langCultureCode;
99
- speechConfig.speechRecognitionLanguage = langCultureCode;
100
-
101
- /**
102
- * speak
103
- * @description Speaks the text using the speech synthesizer. (TTS)
104
- * @param {string} text - The text to be synthesized.
105
- * @param {func=} onAudioEnd - The callback to be invoked when the synthesized audio is finished.
106
- */
107
- const speak = (
108
- text: string,
109
- onAudioEnd?: (sender: speechSdk.IPlayer) => void
110
- ) => {
111
- stopSpeaking();
112
-
113
- speechSynthesizer = new speechSdk.SpeechSynthesizer(
114
- speechConfig,
115
- audioOutputConfig
116
- );
117
-
118
- if (onAudioEnd) audioDestination.onAudioEnd = onAudioEnd;
119
-
120
- speechSynthesizer.speakTextAsync(
121
- text,
122
- result => {
123
- if (result) {
124
- try {
125
- if (DEBUG) console.log('speak result', result);
126
- if (speechSynthesizer) {
127
- speechSynthesizer.close();
128
- speechSynthesizer = null;
129
- }
130
- } catch (e) {
131
- console.error('speak error: ', e);
132
- window.speechSynthesis.speak(new SpeechSynthesisUtterance(text));
133
- }
134
- } else if (DEBUG) {
135
- console.log('speak no result', result);
136
- }
137
- },
138
- error => {
139
- console.error('speak:', error);
140
- window.speechSynthesis.speak(new SpeechSynthesisUtterance(text));
141
- }
142
- );
143
- };
144
-
145
- /**
146
- * isSpeaking
147
- * @description Returns true if the synthesizer is speaking.
148
- * @returns {boolean}
149
- */
150
- const isSpeaking = (): boolean => {
151
- return !!speechSynthesizer;
152
- };
153
-
154
- /**
155
- * stopSpeaking
156
- * @description Stops the speech synthesizer if it is synthesizing.
157
- */
158
- const stopSpeaking = () => {
159
- if (audioDestination) audioDestination.pause();
160
- if (speechSynthesizer) {
161
- speechSynthesizer.close();
162
- speechSynthesizer = null;
163
- }
164
- };
165
-
166
- const audioInputConfig = speechSdk.AudioConfig.fromDefaultMicrophoneInput();
167
- let recognizer: speechSdk.SpeechRecognizer | null;
168
-
169
- /**
170
- * recognize
171
- * @description Starts the speech recognition.
172
- * @param {func=} onRecognized - Callback method invoked when the speech is recognized with the text.
173
- */
174
- const recognize = (onRecognized: (transcript: string) => void) => {
175
- recognizer = new speechSdk.SpeechRecognizer(speechConfig, audioInputConfig);
176
-
177
- recognizer.recognizing = (_s, e) => {
178
- if (DEBUG) console.log(`RECOGNIZING: Text=${e.result.text}`);
179
- };
180
- recognizer.recognized = (_s, e) => {
181
- if (e.result.reason === speechSdk.ResultReason.RecognizedSpeech) {
182
- if (DEBUG) console.log(`RECOGNIZED: Text=${e.result.text}`);
183
- onRecognized(e.result.text ?? '');
184
- } else if (e.result.reason === speechSdk.ResultReason.NoMatch && DEBUG) {
185
- console.log('NOMATCH: Speech could not be recognized.');
186
- }
187
- };
188
- recognizer.canceled = (_s, e) => {
189
- if (DEBUG) console.log(`CANCELED: Reason=${e.reason}`);
190
-
191
- if (e.reason === speechSdk.CancellationReason.Error && DEBUG) {
192
- console.log(`"CANCELED: ErrorCode=${e.errorCode}`);
193
- console.log(`"CANCELED: ErrorDetails=${e.errorDetails}`);
194
- console.log(
195
- 'CANCELED: Did you set the speech resource key and region values?'
196
- );
197
- }
198
-
199
- stopRecognizing();
200
- };
201
-
202
- recognizer.sessionStopped = (_s, _e) => {
203
- if (DEBUG) console.log('\n Session stopped event.');
204
- if (recognizer) recognizer.stopContinuousRecognitionAsync();
205
- };
206
- recognizer.startContinuousRecognitionAsync();
207
- };
208
-
209
- /**
210
- * isRecognizing
211
- * @description Returns true if the recognizer is recognizing.
212
- * @returns {boolean}
213
- */
214
- const isRecognizing = (): boolean => {
215
- return !!recognizer;
216
- };
217
-
218
- /**
219
- * stopRecognizing
220
- * @description Stops the speech recognizer if it is recognizing.
221
- * @param {func=} onStop - (optional) The callback to be invoked when the speech recognition is stopped.
222
- */
223
- const stopRecognizing = (onStop?: () => void) => {
224
- if (recognizer) {
225
- recognizer.stopContinuousRecognitionAsync();
226
- recognizer.close();
227
- recognizer = null;
228
-
229
- if (onStop) onStop();
230
- }
231
- };
232
-
233
- return {
234
- speak,
235
- isSpeaking,
236
- stopSpeaking,
237
- recognize,
238
- isRecognizing,
239
- stopRecognizing,
240
- };
241
- };
242
-
243
- export default speech;