@websolutespa/bom-llm 0.0.37 → 0.0.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # @websolutespa/bom-llm
2
2
 
3
+ ## 0.0.38
4
+
5
+ ### Patch Changes
6
+
7
+ - Modified: TextToSpeech cancel method
8
+
3
9
  ## 0.0.37
4
10
 
5
11
  ### Patch Changes
package/dist/umd/index.js CHANGED
@@ -22063,90 +22063,6 @@ ${Object.entries(vars).map(_ref2 => {
22063
22063
  }
22064
22064
  }
22065
22065
  }
22066
- class ElevenlabsTextToSpeech {
22067
- chunks_ = [];
22068
- lang = 'it';
22069
- speaking = false;
22070
- get hasSupport() {
22071
- return true;
22072
- }
22073
- constructor(apiKey, voiceId) {
22074
- this.apiKey = apiKey;
22075
- this.voiceId = voiceId;
22076
- }
22077
- speak(text) {
22078
- // console.log('ElevenlabsTextToSpeech.speak', text, this.enabled, this.hasSupport);
22079
-
22080
- if (!this.apiKey) {
22081
- console.error('ElevenlabsTextToSpeech.error: missing apiKey!');
22082
- return;
22083
- }
22084
- if (!this.voiceId) {
22085
- console.error('ElevenlabsTextToSpeech.error: missing voiceId!');
22086
- return;
22087
- }
22088
- if (this.speaking) {
22089
- // console.warn('speak already speaking');
22090
- this.chunks_.push(text);
22091
- return;
22092
- }
22093
- this.speaking = true;
22094
- const ctx = new AudioContext();
22095
- const options = {
22096
- method: 'POST',
22097
- headers: {
22098
- 'xi-api-key': this.apiKey,
22099
- 'Content-Type': 'application/json'
22100
- },
22101
- body: JSON.stringify({
22102
- voice_settings: {
22103
- stability: 1,
22104
- similarity_boost: 1
22105
- },
22106
- text: text,
22107
- model_id: 'eleven_multilingual_v2'
22108
- })
22109
- };
22110
- fetch(`https://api.elevenlabs.io/v1/text-to-speech/${this.voiceId}?output_format=mp3_22050_32`, options).then(data => data.arrayBuffer()).then(arrayBuffer => ctx.decodeAudioData(arrayBuffer)).then(audioBuffer => {
22111
- return new Promise((resolve, reject) => {
22112
- const audioSource = this.audioSource_ = ctx.createBufferSource();
22113
- audioSource.buffer = audioBuffer;
22114
- audioSource.connect(ctx.destination);
22115
- audioSource.start(ctx.currentTime);
22116
- audioSource.addEventListener('ended', () => {
22117
- this.audioSource_ = undefined;
22118
- resolve(audioSource);
22119
- });
22120
- audioSource.addEventListener('error', error => {
22121
- this.audioSource_ = undefined;
22122
- reject(error);
22123
- });
22124
- });
22125
- }).then(audioSource => {
22126
- console.log('ElevenlabsTextToSpeech.speak.success', audioSource);
22127
- }).catch(error => {
22128
- console.error('ElevenlabsTextToSpeech.speak.error', error);
22129
- }).finally(() => {
22130
- this.speaking = false;
22131
- if (this.chunks_.length > 0) {
22132
- const text = this.chunks_.shift();
22133
- if (text) {
22134
- this.speak(text);
22135
- }
22136
- }
22137
- });
22138
- }
22139
- cancel() {
22140
- if (this.speaking) {
22141
- // cancel
22142
- if (this.audioSource_) {
22143
- this.audioSource_.stop();
22144
- this.audioSource_ = undefined;
22145
- }
22146
- this.speaking = false;
22147
- }
22148
- }
22149
- }
22150
22066
  class DefaultTextToSpeech {
22151
22067
  chunks_ = [];
22152
22068
  lang = 'it';
@@ -22252,6 +22168,7 @@ ${Object.entries(vars).map(_ref2 => {
22252
22168
  synthesis.speak(utterance);
22253
22169
  }
22254
22170
  cancel() {
22171
+ this.chunks_ = [];
22255
22172
  if (!this.hasSupport) {
22256
22173
  return;
22257
22174
  }
@@ -22271,6 +22188,90 @@ ${Object.entries(vars).map(_ref2 => {
22271
22188
  console.log('DefaultTextToSpeech', ...rest);
22272
22189
  }
22273
22190
  }
22191
+ class ElevenlabsTextToSpeech {
22192
+ chunks_ = [];
22193
+ lang = 'it';
22194
+ speaking = false;
22195
+ get hasSupport() {
22196
+ return true;
22197
+ }
22198
+ constructor(apiKey, voiceId) {
22199
+ this.apiKey = apiKey;
22200
+ this.voiceId = voiceId;
22201
+ }
22202
+ speak(text) {
22203
+ // console.log('ElevenlabsTextToSpeech.speak', text, this.enabled, this.hasSupport);
22204
+
22205
+ if (!this.apiKey) {
22206
+ console.error('ElevenlabsTextToSpeech.error: missing apiKey!');
22207
+ return;
22208
+ }
22209
+ if (!this.voiceId) {
22210
+ console.error('ElevenlabsTextToSpeech.error: missing voiceId!');
22211
+ return;
22212
+ }
22213
+ if (this.speaking) {
22214
+ // console.warn('speak already speaking');
22215
+ this.chunks_.push(text);
22216
+ return;
22217
+ }
22218
+ this.speaking = true;
22219
+ const ctx = new AudioContext();
22220
+ const options = {
22221
+ method: 'POST',
22222
+ headers: {
22223
+ 'xi-api-key': this.apiKey,
22224
+ 'Content-Type': 'application/json'
22225
+ },
22226
+ body: JSON.stringify({
22227
+ voice_settings: {
22228
+ stability: 1,
22229
+ similarity_boost: 1
22230
+ },
22231
+ text: text,
22232
+ model_id: 'eleven_multilingual_v2'
22233
+ })
22234
+ };
22235
+ fetch(`https://api.elevenlabs.io/v1/text-to-speech/${this.voiceId}?output_format=mp3_22050_32`, options).then(data => data.arrayBuffer()).then(arrayBuffer => ctx.decodeAudioData(arrayBuffer)).then(audioBuffer => {
22236
+ return new Promise((resolve, reject) => {
22237
+ const audioSource = this.audioSource_ = ctx.createBufferSource();
22238
+ audioSource.buffer = audioBuffer;
22239
+ audioSource.connect(ctx.destination);
22240
+ audioSource.start(ctx.currentTime);
22241
+ audioSource.addEventListener('ended', () => {
22242
+ this.audioSource_ = undefined;
22243
+ resolve(audioSource);
22244
+ });
22245
+ audioSource.addEventListener('error', error => {
22246
+ this.audioSource_ = undefined;
22247
+ reject(error);
22248
+ });
22249
+ });
22250
+ }).then(audioSource => {
22251
+ console.log('ElevenlabsTextToSpeech.speak.success', audioSource);
22252
+ }).catch(error => {
22253
+ console.error('ElevenlabsTextToSpeech.speak.error', error);
22254
+ }).finally(() => {
22255
+ this.speaking = false;
22256
+ if (this.chunks_.length > 0) {
22257
+ const text = this.chunks_.shift();
22258
+ if (text) {
22259
+ this.speak(text);
22260
+ }
22261
+ }
22262
+ });
22263
+ }
22264
+ cancel() {
22265
+ this.chunks_ = [];
22266
+ if (this.speaking) {
22267
+ if (this.audioSource_) {
22268
+ this.audioSource_.stop();
22269
+ this.audioSource_ = undefined;
22270
+ }
22271
+ this.speaking = false;
22272
+ }
22273
+ }
22274
+ }
22274
22275
  class DefaultSpeechToText {
22275
22276
  lang = 'it';
22276
22277
  enabled = false;