@unith-ai/core-client 2.0.63 → 2.0.65

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -504,6 +504,7 @@ export declare class Conversation {
504
504
  getUserId(): string;
505
505
  private handleEndSession;
506
506
  private updateStatus;
507
+ generateSpeechRecognitionToken(): Promise<void>;
507
508
  /**
508
509
  * To stop streaming response, we'll send a stop message to the BE. The BE will then stop sending audio and video frames, which will naturally end the response. This is more efficient and leads to a better user experience as it allows for a smoother transition when stopping the response.
509
510
  *
@@ -512,11 +513,6 @@ export declare class Conversation {
512
513
  stopResponse(): Promise<void>;
513
514
  toggleMute(): Promise<number>;
514
515
  startSession(): Promise<Connection>;
515
- /**
516
- * Returns token for speech recognition.
517
- * @param provider
518
- */
519
- private getSpeechRecognitionToken;
520
516
  toggleMicrophone(): Promise<void>;
521
517
  getMicrophoneStatus(): MicrophoneStatus;
522
518
  endSession(): Promise<void>;
package/dist/lib.js CHANGED
@@ -11426,6 +11426,7 @@ function asEncryptablePacket(packet) {
11426
11426
  var _a, _b, _c, _d, _e;
11427
11427
  if (((_a = packet.value) === null || _a === void 0 ? void 0 : _a.case) !== "sipDtmf" && ((_b = packet.value) === null || _b === void 0 ? void 0 : _b.case) !== "metrics" && ((_c = packet.value) === null || _c === void 0 ? void 0 : _c.case) !== "speaker" && ((_d = packet.value) === null || _d === void 0 ? void 0 : _d.case) !== "transcription" && ((_e = packet.value) === null || _e === void 0 ? void 0 : _e.case) !== "encryptedPacket") return new EncryptedPacketPayload({ value: packet.value });
11428
11428
  }
11429
+ eventsExports.EventEmitter;
11429
11430
  var CryptorErrorReason;
11430
11431
  (function(CryptorErrorReason) {
11431
11432
  CryptorErrorReason[CryptorErrorReason["InvalidKey"] = 0] = "InvalidKey";
@@ -22273,6 +22274,8 @@ var CheckStatus;
22273
22274
  CheckStatus[CheckStatus["SUCCESS"] = 3] = "SUCCESS";
22274
22275
  CheckStatus[CheckStatus["FAILED"] = 4] = "FAILED";
22275
22276
  })(CheckStatus || (CheckStatus = {}));
22277
+ eventsExports.EventEmitter;
22278
+ eventsExports.EventEmitter;
22276
22279
  function _defineProperty(e, r, t) {
22277
22280
  return (r = _toPropertyKey(r)) in e ? Object.defineProperty(e, r, {
22278
22281
  value: t,
@@ -22440,7 +22443,8 @@ var JWSSignatureVerificationFailed = class extends JOSEError {
22440
22443
  }
22441
22444
  };
22442
22445
  _defineProperty(JWSSignatureVerificationFailed, "code", "ERR_JWS_SIGNATURE_VERIFICATION_FAILED");
22443
- new Uint8Array(0);
22446
+ //#endregion
22447
+ //#region ../../node_modules/.pnpm/@elevenlabs+client@0.13.1_@types+dom-mediacapture-record@1.0.22/node_modules/@elevenlabs/client/dist/lib.modern.js
22444
22448
  const _ = /* @__PURE__ */ new Map();
22445
22449
  function w(e, t) {
22446
22450
  return async (n, o) => {
@@ -24118,6 +24122,12 @@ var Conversation = class Conversation {
24118
24122
  this.options.onStatusChange({ status });
24119
24123
  }
24120
24124
  }
24125
+ async generateSpeechRecognitionToken() {
24126
+ if (this.options.microphoneProvider === "custom") {
24127
+ const response = await this.user.getAsrToken("elevenlabs");
24128
+ this.options.onSpeechRecognitionToken(response.token);
24129
+ } else throw new Error("Speech recognition token generation is only applicable for custom microphone provider.");
24130
+ }
24121
24131
  /**
24122
24132
  * To stop streaming response, we'll send a stop message to the BE. The BE will then stop sending audio and video frames, which will naturally end the response. This is more efficient and leads to a better user experience as it allows for a smoother transition when stopping the response.
24123
24133
  *
@@ -24177,18 +24187,11 @@ var Conversation = class Conversation {
24177
24187
  }
24178
24188
  if (this.options.microphoneProvider !== "custom") this.microphone = await Microphone.initializeMicrophone(this.options.microphoneOptions, this.options.microphoneProvider, this.options.elevenLabsOptions ?? defaultElevenLabsOptions, this.user, this.headInfo, this.microphoneAccess, this.options.voiceInterruptions ?? false);
24179
24189
  else {
24180
- const token = await this.getSpeechRecognitionToken("eleven_labs");
24181
- this.options.onSpeechRecognitionToken(token);
24190
+ const response = await this.user.getAsrToken("elevenlabs");
24191
+ this.options.onSpeechRecognitionToken(response.token);
24182
24192
  }
24183
24193
  return this.connection;
24184
24194
  }
24185
- /**
24186
- * Returns token for speech recognition.
24187
- * @param provider
24188
- */
24189
- async getSpeechRecognitionToken(provider) {
24190
- return (await this.user.getAsrToken(provider === "eleven_labs" ? "elevenlabs" : "azure")).token;
24191
- }
24192
24195
  async toggleMicrophone() {
24193
24196
  if (this.options.microphoneProvider === "custom") throw new Error("Cannot toggle microphone for custom provider.");
24194
24197
  if (!this.microphone) this.microphone = await Microphone.initializeMicrophone(this.options.microphoneOptions, this.options.microphoneProvider, this.options.elevenLabsOptions ?? defaultElevenLabsOptions, this.user, this.headInfo, this.microphoneAccess, this.options.voiceInterruptions ?? false);