@memori.ai/memori-api-client 0.11.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,4 @@
1
1
  import fetch$1 from 'cross-fetch';
2
- import { SpeechConfig, SpeakerAudioDestination, AudioConfig, SpeechSynthesizer, SpeechRecognizer, ResultReason, CancellationReason } from 'microsoft-cognitiveservices-speech-sdk';
3
2
 
4
3
  function _regeneratorRuntime() {
5
4
  /*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
@@ -4102,239 +4101,6 @@ var constants = {
4102
4101
  anonTag: anonTag
4103
4102
  };
4104
4103
 
4105
- var getTTSVoice = function getTTSVoice(lang, voiceType) {
4106
- var voice = '';
4107
- var voiceLang = lang.toUpperCase();
4108
-
4109
- switch (voiceLang) {
4110
- case 'IT':
4111
- voice = "" + (voiceType === 'MALE' ? 'it-IT-DiegoNeural' : 'it-IT-ElsaNeural');
4112
- break;
4113
-
4114
- case 'DE':
4115
- voice = "" + (voiceType === 'MALE' ? 'de-DE-ConradNeural' : 'de-DE-KatjaNeural');
4116
- break;
4117
-
4118
- case 'EN':
4119
- voice = "" + (voiceType === 'MALE' ? 'en-GB-RyanNeural' : 'en-GB-SoniaNeural');
4120
- break;
4121
-
4122
- case 'ES':
4123
- voice = "" + (voiceType === 'MALE' ? 'es-ES-AlvaroNeural' : 'es-ES-ElviraNeural');
4124
- break;
4125
-
4126
- case 'FR':
4127
- voice = "" + (voiceType === 'MALE' ? 'fr-FR-HenriNeural' : 'fr-FR-DeniseNeural');
4128
- break;
4129
-
4130
- case 'PT':
4131
- voice = "" + (voiceType === 'MALE' ? 'pt-PT-DuarteNeural' : 'pt-PT-RaquelNeural');
4132
- break;
4133
-
4134
- default:
4135
- voice = "" + (voiceType === 'MALE' ? 'it-IT-DiegoNeural' : 'it-IT-IsabellaNeural');
4136
- break;
4137
- }
4138
-
4139
- return voice;
4140
- };
4141
-
4142
- var getCultureCodeByLanguage = function getCultureCodeByLanguage(lang) {
4143
- var voice = '';
4144
- var voiceLang = lang.toUpperCase();
4145
-
4146
- switch (voiceLang) {
4147
- case 'IT':
4148
- voice = 'it-IT';
4149
- break;
4150
-
4151
- case 'DE':
4152
- voice = 'de-DE';
4153
- break;
4154
-
4155
- case 'EN':
4156
- voice = 'en-US';
4157
- break;
4158
-
4159
- case 'ES':
4160
- voice = 'es-ES';
4161
- break;
4162
-
4163
- case 'FR':
4164
- voice = 'fr-FR';
4165
- break;
4166
-
4167
- case 'PT':
4168
- voice = 'pt-PT';
4169
- break;
4170
-
4171
- default:
4172
- voice = 'it-IT';
4173
- break;
4174
- }
4175
-
4176
- return voice;
4177
- };
4178
- /**
4179
- * EXPERIMENTAL
4180
- */
4181
-
4182
-
4183
- var speech = function speech(AZURE_COGNITIVE_SERVICES_TTS_KEY, DEBUG) {
4184
- if (DEBUG === void 0) {
4185
- DEBUG = false;
4186
- }
4187
-
4188
- return function (lang, voiceType) {
4189
- var speechConfig = SpeechConfig.fromSubscription(AZURE_COGNITIVE_SERVICES_TTS_KEY, 'eastus');
4190
- var speechSynthesizer;
4191
- var audioDestination;
4192
- audioDestination = new SpeakerAudioDestination();
4193
- var audioOutputConfig = AudioConfig.fromSpeakerOutput(audioDestination); // https://docs.microsoft.com/it-it/azure/cognitive-services/speech-service/language-support#text-to-speech
4194
-
4195
- speechConfig.speechSynthesisVoiceName = getTTSVoice(lang, voiceType);
4196
- var langCultureCode = getCultureCodeByLanguage(lang);
4197
- speechConfig.speechSynthesisLanguage = langCultureCode;
4198
- speechConfig.speechRecognitionLanguage = langCultureCode;
4199
- /**
4200
- * speak
4201
- * @description Speaks the text using the speech synthesizer. (TTS)
4202
- * @param {string} text - The text to be synthesized.
4203
- * @param {func=} onAudioEnd - The callback to be invoked when the synthesized audio is finished.
4204
- */
4205
-
4206
- var speak = function speak(text, onAudioEnd) {
4207
- stopSpeaking();
4208
- speechSynthesizer = new SpeechSynthesizer(speechConfig, audioOutputConfig);
4209
- if (onAudioEnd) audioDestination.onAudioEnd = onAudioEnd;
4210
- speechSynthesizer.speakTextAsync(text, function (result) {
4211
- if (result) {
4212
- try {
4213
- if (DEBUG) console.log('speak result', result);
4214
-
4215
- if (speechSynthesizer) {
4216
- speechSynthesizer.close();
4217
- speechSynthesizer = null;
4218
- }
4219
- } catch (e) {
4220
- console.error('speak error: ', e);
4221
- window.speechSynthesis.speak(new SpeechSynthesisUtterance(text));
4222
- }
4223
- } else if (DEBUG) {
4224
- console.log('speak no result', result);
4225
- }
4226
- }, function (error) {
4227
- console.error('speak:', error);
4228
- window.speechSynthesis.speak(new SpeechSynthesisUtterance(text));
4229
- });
4230
- };
4231
- /**
4232
- * isSpeaking
4233
- * @description Returns true if the synthesizer is speaking.
4234
- * @returns {boolean}
4235
- */
4236
-
4237
-
4238
- var isSpeaking = function isSpeaking() {
4239
- return !!speechSynthesizer;
4240
- };
4241
- /**
4242
- * stopSpeaking
4243
- * @description Stops the speech synthesizer if it is synthesizing.
4244
- */
4245
-
4246
-
4247
- var stopSpeaking = function stopSpeaking() {
4248
- if (audioDestination) audioDestination.pause();
4249
-
4250
- if (speechSynthesizer) {
4251
- speechSynthesizer.close();
4252
- speechSynthesizer = null;
4253
- }
4254
- };
4255
-
4256
- var audioInputConfig = AudioConfig.fromDefaultMicrophoneInput();
4257
- var recognizer;
4258
- /**
4259
- * recognize
4260
- * @description Starts the speech recognition.
4261
- * @param {func=} onRecognized - Callback method invoked when the speech is recognized with the text.
4262
- */
4263
-
4264
- var recognize = function recognize(onRecognized) {
4265
- recognizer = new SpeechRecognizer(speechConfig, audioInputConfig);
4266
-
4267
- recognizer.recognizing = function (_s, e) {
4268
- if (DEBUG) console.log("RECOGNIZING: Text=" + e.result.text);
4269
- };
4270
-
4271
- recognizer.recognized = function (_s, e) {
4272
- if (e.result.reason === ResultReason.RecognizedSpeech) {
4273
- var _e$result$text;
4274
-
4275
- if (DEBUG) console.log("RECOGNIZED: Text=" + e.result.text);
4276
- onRecognized((_e$result$text = e.result.text) != null ? _e$result$text : '');
4277
- } else if (e.result.reason === ResultReason.NoMatch && DEBUG) {
4278
- console.log('NOMATCH: Speech could not be recognized.');
4279
- }
4280
- };
4281
-
4282
- recognizer.canceled = function (_s, e) {
4283
- if (DEBUG) console.log("CANCELED: Reason=" + e.reason);
4284
-
4285
- if (e.reason === CancellationReason.Error && DEBUG) {
4286
- console.log("\"CANCELED: ErrorCode=" + e.errorCode);
4287
- console.log("\"CANCELED: ErrorDetails=" + e.errorDetails);
4288
- console.log('CANCELED: Did you set the speech resource key and region values?');
4289
- }
4290
-
4291
- stopRecognizing();
4292
- };
4293
-
4294
- recognizer.sessionStopped = function (_s, _e) {
4295
- if (DEBUG) console.log('\n Session stopped event.');
4296
- if (recognizer) recognizer.stopContinuousRecognitionAsync();
4297
- };
4298
-
4299
- recognizer.startContinuousRecognitionAsync();
4300
- };
4301
- /**
4302
- * isRecognizing
4303
- * @description Returns true if the recognizer is recognizing.
4304
- * @returns {boolean}
4305
- */
4306
-
4307
-
4308
- var isRecognizing = function isRecognizing() {
4309
- return !!recognizer;
4310
- };
4311
- /**
4312
- * stopRecognizing
4313
- * @description Stops the speech recognizer if it is recognizing.
4314
- * @param {func=} onStop - (optional) The callback to be invoked when the speech recognition is stopped.
4315
- */
4316
-
4317
-
4318
- var stopRecognizing = function stopRecognizing(onStop) {
4319
- if (recognizer) {
4320
- recognizer.stopContinuousRecognitionAsync();
4321
- recognizer.close();
4322
- recognizer = null;
4323
- if (onStop) onStop();
4324
- }
4325
- };
4326
-
4327
- return {
4328
- speak: speak,
4329
- isSpeaking: isSpeaking,
4330
- stopSpeaking: stopSpeaking,
4331
- recognize: recognize,
4332
- isRecognizing: isRecognizing,
4333
- stopRecognizing: stopRecognizing
4334
- };
4335
- };
4336
- };
4337
-
4338
4104
  var asset$1 = (function (apiUrl) {
4339
4105
  return {
4340
4106
  /**
@@ -4377,7 +4143,6 @@ var api = function api(hostname) {
4377
4143
  return _extends({
4378
4144
  backend: backendAPI(apiUrl + "/api/v2")
4379
4145
  }, engine(apiUrl + "/memori/v2"), {
4380
- speech: speech,
4381
4146
  constants: constants,
4382
4147
  asset: asset$1(apiUrl + "/api/v2")
4383
4148
  });