@lobehub/ui 1.111.2 → 1.113.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,7 +5,8 @@ import { ChatMessage } from "../types/chatMessage";
5
5
  import { LLMRoleType } from "../types/llm";
6
6
  import { type ActionsBarProps } from './ActionsBar';
7
7
  export type OnMessageChange = (id: string, content: string) => void;
8
- export type OnActionClick = (action: ActionEvent, message: ChatMessage) => void;
8
+ export type OnActionsClick = (action: ActionEvent, message: ChatMessage) => void;
9
+ export type OnAvatatsClick = (role: RenderRole) => ChatItemProps['onAvatarClick'];
9
10
  export type RenderRole = LLMRoleType | 'default' | string;
10
11
  export type RenderItem = FC<{
11
12
  key: string;
@@ -22,7 +23,8 @@ export interface ListItemProps {
22
23
  /**
23
24
  * @description 点击操作按钮的回调函数
24
25
  */
25
- onActionsClick?: OnActionClick;
26
+ onActionsClick?: OnActionsClick;
27
+ onAvatarsClick?: OnAvatatsClick;
26
28
  /**
27
29
  * @description 消息变化的回调函数
28
30
  */
@@ -1,7 +1,7 @@
1
1
  import _defineProperty from "@babel/runtime/helpers/esm/defineProperty";
2
2
  import _slicedToArray from "@babel/runtime/helpers/esm/slicedToArray";
3
3
  import _objectWithoutProperties from "@babel/runtime/helpers/esm/objectWithoutProperties";
4
- var _excluded = ["renderMessagesExtra", "showTitle", "onActionsClick", "onMessageChange", "type", "text", "renderMessages", "renderErrorMessages", "renderActions", "loading", "groupNav", "renderItems"];
4
+ var _excluded = ["renderMessagesExtra", "showTitle", "onActionsClick", "onAvatarsClick", "onMessageChange", "type", "text", "renderMessages", "renderErrorMessages", "renderActions", "loading", "groupNav", "renderItems"];
5
5
  function ownKeys(e, r) { var t = Object.keys(e); if (Object.getOwnPropertySymbols) { var o = Object.getOwnPropertySymbols(e); r && (o = o.filter(function (r) { return Object.getOwnPropertyDescriptor(e, r).enumerable; })), t.push.apply(t, o); } return t; }
6
6
  function _objectSpread(e) { for (var r = 1; r < arguments.length; r++) { var t = null != arguments[r] ? arguments[r] : {}; r % 2 ? ownKeys(Object(t), !0).forEach(function (r) { _defineProperty(e, r, t[r]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(e, Object.getOwnPropertyDescriptors(t)) : ownKeys(Object(t)).forEach(function (r) { Object.defineProperty(e, r, Object.getOwnPropertyDescriptor(t, r)); }); } return e; }
7
7
  import { App } from 'antd';
@@ -15,6 +15,7 @@ var Item = /*#__PURE__*/memo(function (props) {
15
15
  var renderMessagesExtra = props.renderMessagesExtra,
16
16
  showTitle = props.showTitle,
17
17
  onActionsClick = props.onActionsClick,
18
+ onAvatarsClick = props.onAvatarsClick,
18
19
  onMessageChange = props.onMessageChange,
19
20
  type = props.type,
20
21
  text = props.text,
@@ -123,6 +124,7 @@ var Item = /*#__PURE__*/memo(function (props) {
123
124
  messageExtra: /*#__PURE__*/_jsx(MessageExtra, {
124
125
  data: item
125
126
  }),
127
+ onAvatarClick: onAvatarsClick === null || onAvatarsClick === void 0 ? void 0 : onAvatarsClick(item.role),
126
128
  onChange: function onChange(value) {
127
129
  return onMessageChange === null || onMessageChange === void 0 ? void 0 : onMessageChange(item.id, value);
128
130
  },
@@ -10,6 +10,6 @@ export interface ChatListProps extends DivProps, ListItemProps {
10
10
  historyCount?: number;
11
11
  loadingId?: string;
12
12
  }
13
- export type { OnActionClick, OnMessageChange, RenderAction, RenderErrorMessage, RenderItem, RenderMessage, RenderMessageExtra, } from './Item';
13
+ export type { OnActionsClick, OnAvatatsClick, OnMessageChange, RenderAction, RenderErrorMessage, RenderItem, RenderMessage, RenderMessageExtra, } from './Item';
14
14
  declare const ChatList: import("react").NamedExoticComponent<ChatListProps>;
15
15
  export default ChatList;
package/es/index.d.ts CHANGED
@@ -7,7 +7,7 @@ export { default as ChatHeader, type ChatHeaderProps } from './ChatHeader';
7
7
  export { default as ChatHeaderTitle, type ChatHeaderTitleProps, } from './ChatHeader/ChatHeaderTitle';
8
8
  export { default as ChatInputArea, type ChatInputAreaProps } from './ChatInputArea';
9
9
  export { default as ChatItem, type ChatItemProps } from './ChatItem';
10
- export type { ChatListProps, OnActionClick, OnMessageChange, RenderAction, RenderErrorMessage, RenderItem, RenderMessage, RenderMessageExtra, } from './ChatList';
10
+ export type { ChatListProps, OnActionsClick, OnAvatatsClick, OnMessageChange, RenderAction, RenderErrorMessage, RenderItem, RenderMessage, RenderMessageExtra, } from './ChatList';
11
11
  export { default as ChatList } from './ChatList';
12
12
  export { default as ActionsBar, type ActionsBarProps } from './ChatList/ActionsBar';
13
13
  export { default as CodeEditor, type CodeEditorProps } from './CodeEditor';
@@ -1,10 +1,9 @@
1
- /// <reference types="node" />
2
1
  /// <reference types="react" />
3
- import { AzureSpeechEnv } from '../services/postAzureSpeech';
4
- import { SsmlOptions } from '../utils/genSSML';
5
- export declare const useAzureSpeech: (defaultText: string, options: SsmlOptions, env: AzureSpeechEnv) => {
6
- data: Buffer | undefined;
2
+ import { AzureSpeechOptions } from '../services/postAzureSpeech';
3
+ export declare const useAzureSpeech: (defaultText: string, options: AzureSpeechOptions) => {
4
+ data: AudioBufferSourceNode | undefined;
7
5
  isLoading: boolean;
6
+ isPlaying: boolean;
8
7
  setText: import("react").Dispatch<import("react").SetStateAction<string>>;
9
8
  start: () => void;
10
9
  stop: () => void;
@@ -2,37 +2,62 @@ import _slicedToArray from "@babel/runtime/helpers/esm/slicedToArray";
2
2
  import { useState } from 'react';
3
3
  import useSWR from 'swr';
4
4
  import { postAzureSpeech } from "../services/postAzureSpeech";
5
- export var useAzureSpeech = function useAzureSpeech(defaultText, options, env) {
6
- var _useState = useState(defaultText),
5
+ export var useAzureSpeech = function useAzureSpeech(defaultText, options) {
6
+ var _useState = useState(),
7
7
  _useState2 = _slicedToArray(_useState, 2),
8
- text = _useState2[0],
9
- setText = _useState2[1];
10
- var _useState3 = useState(false),
8
+ data = _useState2[0],
9
+ setDate = _useState2[1];
10
+ var _useState3 = useState(defaultText),
11
11
  _useState4 = _slicedToArray(_useState3, 2),
12
- shouldFetch = _useState4[0],
13
- setShouldFetch = _useState4[1];
12
+ text = _useState4[0],
13
+ setText = _useState4[1];
14
+ var _useState5 = useState(false),
15
+ _useState6 = _slicedToArray(_useState5, 2),
16
+ shouldFetch = _useState6[0],
17
+ setShouldFetch = _useState6[1];
18
+ var _useState7 = useState(false),
19
+ _useState8 = _slicedToArray(_useState7, 2),
20
+ isPlaying = _useState8[0],
21
+ setIsPlaying = _useState8[1];
14
22
  var _useSWR = useSWR(shouldFetch ? [options.name, text].join('-') : null, function () {
15
- return postAzureSpeech(text, options, env);
23
+ return postAzureSpeech(text, options);
16
24
  }, {
17
- onError: function onError(error) {
18
- setShouldFetch(false);
19
- console.error(error);
25
+ onError: function onError() {
26
+ return setShouldFetch(false);
20
27
  },
21
- onSuccess: function onSuccess() {
28
+ onSuccess: function onSuccess(audioBufferSource) {
22
29
  setShouldFetch(false);
30
+ setIsPlaying(true);
31
+ setDate(audioBufferSource);
32
+ audioBufferSource.start();
33
+ audioBufferSource.addEventListener('ended', function () {
34
+ setShouldFetch(false);
35
+ setIsPlaying(false);
36
+ });
23
37
  }
24
38
  }),
25
- isLoading = _useSWR.isLoading,
26
- data = _useSWR.data;
39
+ isLoading = _useSWR.isLoading;
27
40
  return {
28
41
  data: data,
29
42
  isLoading: isLoading,
43
+ isPlaying: isPlaying,
30
44
  setText: setText,
31
45
  start: function start() {
46
+ if (isPlaying || shouldFetch) return;
32
47
  setShouldFetch(true);
48
+ if (!data) return;
49
+ try {
50
+ setIsPlaying(true);
51
+ data === null || data === void 0 || data.start();
52
+ } catch (_unused) {}
33
53
  },
34
54
  stop: function stop() {
55
+ if (!isPlaying) return;
35
56
  setShouldFetch(false);
57
+ setIsPlaying(false);
58
+ try {
59
+ data === null || data === void 0 || data.stop();
60
+ } catch (_unused2) {}
36
61
  }
37
62
  };
38
63
  };
@@ -1,8 +1,9 @@
1
1
  /// <reference types="react" />
2
- import { SsmlOptions } from '../utils/genSSML';
3
- export declare const useMicrosoftSpeech: (defaultText: string, options: SsmlOptions) => {
4
- data: ArrayBuffer | undefined;
2
+ import { type MicrosoftSpeechOptions } from '../services/postMicrosoftSpeech';
3
+ export declare const useMicrosoftSpeech: (defaultText: string, options: MicrosoftSpeechOptions) => {
4
+ data: AudioBufferSourceNode | undefined;
5
5
  isLoading: boolean;
6
+ isPlaying: boolean;
6
7
  setText: import("react").Dispatch<import("react").SetStateAction<string>>;
7
8
  start: () => void;
8
9
  stop: () => void;
@@ -3,36 +3,61 @@ import { useState } from 'react';
3
3
  import useSWR from 'swr';
4
4
  import { postMicrosoftSpeech } from "../services/postMicrosoftSpeech";
5
5
  export var useMicrosoftSpeech = function useMicrosoftSpeech(defaultText, options) {
6
- var _useState = useState(defaultText),
6
+ var _useState = useState(),
7
7
  _useState2 = _slicedToArray(_useState, 2),
8
- text = _useState2[0],
9
- setText = _useState2[1];
10
- var _useState3 = useState(false),
8
+ data = _useState2[0],
9
+ setDate = _useState2[1];
10
+ var _useState3 = useState(defaultText),
11
11
  _useState4 = _slicedToArray(_useState3, 2),
12
- shouldFetch = _useState4[0],
13
- setShouldFetch = _useState4[1];
12
+ text = _useState4[0],
13
+ setText = _useState4[1];
14
+ var _useState5 = useState(false),
15
+ _useState6 = _slicedToArray(_useState5, 2),
16
+ shouldFetch = _useState6[0],
17
+ setShouldFetch = _useState6[1];
18
+ var _useState7 = useState(false),
19
+ _useState8 = _slicedToArray(_useState7, 2),
20
+ isPlaying = _useState8[0],
21
+ setIsPlaying = _useState8[1];
14
22
  var _useSWR = useSWR(shouldFetch ? [options.name, text].join('-') : null, function () {
15
23
  return postMicrosoftSpeech(text, options);
16
24
  }, {
17
- onError: function onError(error) {
18
- setShouldFetch(false);
19
- console.error(error);
25
+ onError: function onError() {
26
+ return setShouldFetch(false);
20
27
  },
21
- onSuccess: function onSuccess() {
28
+ onSuccess: function onSuccess(audioBufferSource) {
22
29
  setShouldFetch(false);
30
+ setIsPlaying(true);
31
+ setDate(audioBufferSource);
32
+ audioBufferSource.start();
33
+ audioBufferSource.addEventListener('ended', function () {
34
+ setShouldFetch(false);
35
+ setIsPlaying(false);
36
+ });
23
37
  }
24
38
  }),
25
- isLoading = _useSWR.isLoading,
26
- data = _useSWR.data;
39
+ isLoading = _useSWR.isLoading;
27
40
  return {
28
41
  data: data,
29
42
  isLoading: isLoading,
43
+ isPlaying: isPlaying,
30
44
  setText: setText,
31
45
  start: function start() {
46
+ if (isPlaying || shouldFetch) return;
32
47
  setShouldFetch(true);
48
+ if (!data) return;
49
+ try {
50
+ setIsPlaying(true);
51
+ data === null || data === void 0 || data.start();
52
+ } catch (_unused) {}
33
53
  },
34
54
  stop: function stop() {
55
+ if (!isPlaying) return;
35
56
  setShouldFetch(false);
57
+ setIsPlaying(false);
58
+ try {
59
+ data === null || data === void 0 || data.stop();
60
+ } catch (_unused2) {}
36
61
  }
37
62
  };
38
63
  };
@@ -0,0 +1,6 @@
1
+ export declare const usePersistedSpeechRecognition: (locale: string) => {
2
+ isLoading: boolean;
3
+ start: () => void;
4
+ stop: () => void;
5
+ text: string;
6
+ };
@@ -0,0 +1,38 @@
1
+ import _toConsumableArray from "@babel/runtime/helpers/esm/toConsumableArray";
2
+ import _slicedToArray from "@babel/runtime/helpers/esm/slicedToArray";
3
+ import { useEffect, useState } from 'react';
4
+ import { useSpeechRecognition } from "./useSpeechRecognition";
5
+ export var usePersistedSpeechRecognition = function usePersistedSpeechRecognition(locale) {
6
+ var _useState = useState([]),
7
+ _useState2 = _slicedToArray(_useState, 2),
8
+ texts = _useState2[0],
9
+ setTexts = _useState2[1];
10
+ var _useState3 = useState(false),
11
+ _useState4 = _slicedToArray(_useState3, 2),
12
+ isGLobalLoading = _useState4[0],
13
+ setIsGlobalLoading = _useState4[1];
14
+ var _useSpeechRecognition = useSpeechRecognition(locale),
15
+ text = _useSpeechRecognition.text,
16
+ _stop = _useSpeechRecognition.stop,
17
+ _start = _useSpeechRecognition.start,
18
+ isLoading = _useSpeechRecognition.isLoading;
19
+ useEffect(function () {
20
+ if (isGLobalLoading && !isLoading) {
21
+ if (text) setTexts([].concat(_toConsumableArray(texts), [text]));
22
+ _start();
23
+ }
24
+ }, [isLoading, texts, text, _start, isGLobalLoading]);
25
+ return {
26
+ isLoading: isGLobalLoading,
27
+ start: function start() {
28
+ setTexts([]);
29
+ setIsGlobalLoading(true);
30
+ _start();
31
+ },
32
+ stop: function stop() {
33
+ _stop();
34
+ setIsGlobalLoading(false);
35
+ },
36
+ text: [].concat(_toConsumableArray(texts), [text]).filter(Boolean).join(',')
37
+ };
38
+ };
@@ -1,6 +1,6 @@
1
1
  export declare const useSpeechRecognition: (locale: string) => {
2
2
  isLoading: boolean;
3
- start: () => any;
3
+ start: () => void;
4
4
  stop: () => void;
5
5
  text: string;
6
6
  };
@@ -1,49 +1,70 @@
1
1
  import _slicedToArray from "@babel/runtime/helpers/esm/slicedToArray";
2
2
  var _window;
3
- import { useState } from 'react';
3
+ import { useEffect, useState } from 'react';
4
4
  var SpeechRecognition = (globalThis === null || globalThis === void 0 ? void 0 : globalThis.SpeechRecognition) || ((_window = window) === null || _window === void 0 ? void 0 : _window.webkitSpeechRecognition);
5
5
  export var useSpeechRecognition = function useSpeechRecognition(locale) {
6
- var _useState = useState(''),
6
+ var _useState = useState(null),
7
7
  _useState2 = _slicedToArray(_useState, 2),
8
- text = _useState2[0],
9
- setText = _useState2[1];
10
- var _useState3 = useState(false),
8
+ recognition = _useState2[0],
9
+ setRecognition = _useState2[1];
10
+ var _useState3 = useState(''),
11
11
  _useState4 = _slicedToArray(_useState3, 2),
12
- isLoading = _useState4[0],
13
- setIsLoading = _useState4[1];
12
+ text = _useState4[0],
13
+ setText = _useState4[1];
14
14
  var _useState5 = useState(false),
15
15
  _useState6 = _slicedToArray(_useState5, 2),
16
- finalStop = _useState6[0],
17
- setFinalStop = _useState6[1];
18
- var recognition = new SpeechRecognition();
19
- recognition.lang = locale;
20
- recognition.interimResults = true;
21
- recognition.continuous = true;
16
+ isLoading = _useState6[0],
17
+ setIsLoading = _useState6[1];
18
+ var _useState7 = useState(false),
19
+ _useState8 = _slicedToArray(_useState7, 2),
20
+ finalStop = _useState8[0],
21
+ setFinalStop = _useState8[1];
22
+ useEffect(function () {
23
+ if (recognition) return;
24
+ try {
25
+ var speechRecognition = new SpeechRecognition();
26
+ speechRecognition.interimResults = true;
27
+ speechRecognition.continuous = true;
28
+ speechRecognition.onstart = function () {
29
+ setFinalStop(false);
30
+ setIsLoading(true);
31
+ };
32
+ speechRecognition.onend = function () {
33
+ setIsLoading(false);
34
+ setFinalStop(true);
35
+ };
36
+ speechRecognition.onresult = function (_ref) {
37
+ var _result$;
38
+ var results = _ref.results;
39
+ if (!results) return;
40
+ var result = results[0];
41
+ if (!finalStop && result !== null && result !== void 0 && (_result$ = result[0]) !== null && _result$ !== void 0 && _result$.transcript) setText(result[0].transcript);
42
+ if (result.isFinal) {
43
+ speechRecognition.abort();
44
+ setIsLoading(false);
45
+ }
46
+ };
47
+ setRecognition(speechRecognition);
48
+ } catch (error) {
49
+ console.error(error);
50
+ }
51
+ }, []);
52
+ useEffect(function () {
53
+ if (recognition) recognition.lang = locale;
54
+ }, [locale, recognition]);
22
55
  var handleStop = function handleStop() {
23
- recognition.abort();
56
+ try {
57
+ recognition.abort();
58
+ } catch (_unused) {}
24
59
  setIsLoading(false);
25
60
  };
26
- recognition.onstart = function () {
27
- setFinalStop(false);
28
- setIsLoading(true);
29
- setText('');
30
- };
31
- recognition.onend = function () {
32
- setIsLoading(false);
33
- setFinalStop(true);
34
- };
35
- recognition.onresult = function (_ref) {
36
- var _result$;
37
- var results = _ref.results;
38
- if (!results) return;
39
- var result = results[0];
40
- if (!finalStop && result !== null && result !== void 0 && (_result$ = result[0]) !== null && _result$ !== void 0 && _result$.transcript) setText(result[0].transcript);
41
- if (result.isFinal) handleStop();
42
- };
43
61
  return {
44
62
  isLoading: isLoading,
45
63
  start: function start() {
46
- return recognition.start();
64
+ try {
65
+ setText('');
66
+ recognition.start();
67
+ } catch (_unused2) {}
47
68
  },
48
69
  stop: function stop() {
49
70
  return handleStop();
@@ -18,8 +18,8 @@ export var useSpeechSynthes = function useSpeechSynthes(defaultText, options) {
18
18
  utterance.voice = voiceList.find(function (item) {
19
19
  return item.name === options.name;
20
20
  });
21
- if (options.pitch) utterance.pitch = options.pitch;
22
- if (options.rate) utterance.rate = options.rate;
21
+ if (options.pitch) utterance.pitch = options.pitch * 10;
22
+ if (options.rate) utterance.rate = options.rate * 10;
23
23
  return utterance;
24
24
  }, [text, voiceList, options]);
25
25
  speechSynthesis.onvoiceschanged = function () {
@@ -1,6 +1,7 @@
1
1
  export { useAzureSpeech } from './hooks/useAzureSpeech';
2
2
  export { useEdgeSpeech } from './hooks/useEdgeSpeech';
3
3
  export { useMicrosoftSpeech } from './hooks/useMicrosoftSpeech';
4
+ export { usePersistedSpeechRecognition } from './hooks/usePersistedSpeechRecognition';
4
5
  export { useSpeechRecognition } from './hooks/useSpeechRecognition';
5
6
  export { useSpeechSynthes } from './hooks/useSpeechSynthes';
6
7
  export { getAzureVoiceList, getEdgeVoiceList, getSpeechSynthesVoiceList, } from './utils/getVoiceList';
@@ -1,6 +1,7 @@
1
1
  export { useAzureSpeech } from "./hooks/useAzureSpeech";
2
2
  export { useEdgeSpeech } from "./hooks/useEdgeSpeech";
3
3
  export { useMicrosoftSpeech } from "./hooks/useMicrosoftSpeech";
4
+ export { usePersistedSpeechRecognition } from "./hooks/usePersistedSpeechRecognition";
4
5
  export { useSpeechRecognition } from "./hooks/useSpeechRecognition";
5
6
  export { useSpeechSynthes } from "./hooks/useSpeechSynthes";
6
7
  export { getAzureVoiceList, getEdgeVoiceList, getSpeechSynthesVoiceList } from "./utils/getVoiceList";
@@ -1,7 +1,8 @@
1
- /// <reference types="node" />
2
1
  import { type SsmlOptions } from '../utils/genSSML';
3
- export interface AzureSpeechEnv {
4
- SPEECH_KEY: string;
5
- SPEECH_REGION: string;
2
+ export interface AzureSpeechOptions extends SsmlOptions {
3
+ api: {
4
+ key: string;
5
+ region: string;
6
+ };
6
7
  }
7
- export declare const postAzureSpeech: (text: string, options: SsmlOptions, env: AzureSpeechEnv) => Promise<Buffer>;
8
+ export declare const postAzureSpeech: (text: string, { api, ...options }: AzureSpeechOptions) => Promise<AudioBufferSourceNode>;
@@ -1,45 +1,73 @@
1
+ import _objectWithoutProperties from "@babel/runtime/helpers/esm/objectWithoutProperties";
1
2
  import _asyncToGenerator from "@babel/runtime/helpers/esm/asyncToGenerator";
3
+ var _excluded = ["api"];
2
4
  import _regeneratorRuntime from "@babel/runtime/regenerator";
3
5
  import { AudioConfig, PropertyId, ResultReason, SpeechConfig, SpeechSynthesisOutputFormat, SpeechSynthesizer } from 'microsoft-cognitiveservices-speech-sdk';
4
6
  import { genSSML } from "../utils/genSSML";
5
7
  // 纯文本生成语音
6
8
  export var postAzureSpeech = /*#__PURE__*/function () {
7
- var _ref = _asyncToGenerator( /*#__PURE__*/_regeneratorRuntime.mark(function _callee(text, options, env) {
8
- var speechConfig, audioConfig, synthesizer, completeCb, errCb;
9
- return _regeneratorRuntime.wrap(function _callee$(_context) {
10
- while (1) switch (_context.prev = _context.next) {
9
+ var _ref2 = _asyncToGenerator( /*#__PURE__*/_regeneratorRuntime.mark(function _callee2(text, _ref) {
10
+ var api, options, key, region, speechConfig, audioConfig, synthesizer, completeCb, errCb;
11
+ return _regeneratorRuntime.wrap(function _callee2$(_context2) {
12
+ while (1) switch (_context2.prev = _context2.next) {
11
13
  case 0:
12
- speechConfig = SpeechConfig.fromSubscription(env.SPEECH_KEY, env.SPEECH_REGION);
14
+ api = _ref.api, options = _objectWithoutProperties(_ref, _excluded);
15
+ key = api.key || process.env.AZURE_SPEECH_KEY || '';
16
+ region = api.key || process.env.AZURE_SPEECH_REGION || '';
17
+ speechConfig = SpeechConfig.fromSubscription(key, region);
13
18
  speechConfig.setProperty(PropertyId.SpeechServiceResponse_RequestSentenceBoundary, 'true');
14
19
  speechConfig.speechSynthesisOutputFormat = SpeechSynthesisOutputFormat.Webm24Khz16BitMonoOpus;
15
20
  audioConfig = AudioConfig.fromDefaultSpeakerOutput();
16
21
  synthesizer = new SpeechSynthesizer(speechConfig, audioConfig);
17
- completeCb = function completeCb(result, resolve, reject) {
18
- if (result.reason === ResultReason.SynthesizingAudioCompleted) {
19
- resolve(Buffer.from(result.audioData));
20
- } else {
21
- reject(result);
22
- }
23
- synthesizer.close();
24
- };
22
+ completeCb = /*#__PURE__*/function () {
23
+ var _ref3 = _asyncToGenerator( /*#__PURE__*/_regeneratorRuntime.mark(function _callee(result, resolve) {
24
+ var audioData, audioContext, audioBufferSource;
25
+ return _regeneratorRuntime.wrap(function _callee$(_context) {
26
+ while (1) switch (_context.prev = _context.next) {
27
+ case 0:
28
+ if (!(result.reason === ResultReason.SynthesizingAudioCompleted)) {
29
+ _context.next = 9;
30
+ break;
31
+ }
32
+ audioData = result.audioData;
33
+ audioContext = new AudioContext();
34
+ audioBufferSource = audioContext.createBufferSource();
35
+ _context.next = 6;
36
+ return audioContext.decodeAudioData(audioData);
37
+ case 6:
38
+ audioBufferSource.buffer = _context.sent;
39
+ audioBufferSource.connect(audioContext.destination);
40
+ resolve(audioBufferSource);
41
+ case 9:
42
+ synthesizer.close();
43
+ case 10:
44
+ case "end":
45
+ return _context.stop();
46
+ }
47
+ }, _callee);
48
+ }));
49
+ return function completeCb(_x3, _x4) {
50
+ return _ref3.apply(this, arguments);
51
+ };
52
+ }();
25
53
  errCb = function errCb(err, reject) {
26
54
  reject(err);
27
55
  synthesizer.close();
28
56
  };
29
- return _context.abrupt("return", new Promise(function (resolve, reject) {
57
+ return _context2.abrupt("return", new Promise(function (resolve, reject) {
30
58
  synthesizer.speakSsmlAsync(genSSML(text, options), function (result) {
31
- return completeCb(result, resolve, reject);
59
+ return completeCb(result, resolve);
32
60
  }, function (err) {
33
61
  return errCb(err, reject);
34
62
  });
35
63
  }));
36
- case 8:
64
+ case 11:
37
65
  case "end":
38
- return _context.stop();
66
+ return _context2.stop();
39
67
  }
40
- }, _callee);
68
+ }, _callee2);
41
69
  }));
42
- return function postAzureSpeech(_x, _x2, _x3) {
43
- return _ref.apply(this, arguments);
70
+ return function postAzureSpeech(_x, _x2) {
71
+ return _ref2.apply(this, arguments);
44
72
  };
45
73
  }();
@@ -1,2 +1,5 @@
1
1
  import { type SsmlOptions } from '../utils/genSSML';
2
- export declare const postMicrosoftSpeech: (text: string, options: SsmlOptions) => Promise<ArrayBuffer>;
2
+ export interface MicrosoftSpeechOptions extends SsmlOptions {
3
+ api?: string;
4
+ }
5
+ export declare const postMicrosoftSpeech: (text: string, { api, ...options }: MicrosoftSpeechOptions) => Promise<AudioBufferSourceNode>;
@@ -1,64 +1,46 @@
1
+ import _defineProperty from "@babel/runtime/helpers/esm/defineProperty";
2
+ import _objectWithoutProperties from "@babel/runtime/helpers/esm/objectWithoutProperties";
1
3
  import _asyncToGenerator from "@babel/runtime/helpers/esm/asyncToGenerator";
4
+ var _excluded = ["api"];
2
5
  import _regeneratorRuntime from "@babel/runtime/regenerator";
3
- import { v4 as uuidv4 } from 'uuid';
4
- import { genSSML } from "../utils/genSSML";
5
- var API = 'https://southeastasia.api.speech.microsoft.com/accfreetrial/texttospeech/acc/v3.0-beta1/vcg/speak';
6
+ function ownKeys(e, r) { var t = Object.keys(e); if (Object.getOwnPropertySymbols) { var o = Object.getOwnPropertySymbols(e); r && (o = o.filter(function (r) { return Object.getOwnPropertyDescriptor(e, r).enumerable; })), t.push.apply(t, o); } return t; }
7
+ function _objectSpread(e) { for (var r = 1; r < arguments.length; r++) { var t = null != arguments[r] ? arguments[r] : {}; r % 2 ? ownKeys(Object(t), !0).forEach(function (r) { _defineProperty(e, r, t[r]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(e, Object.getOwnPropertyDescriptors(t)) : ownKeys(Object(t)).forEach(function (r) { Object.defineProperty(e, r, Object.getOwnPropertyDescriptor(t, r)); }); } return e; }
8
+ import qs from 'query-string';
6
9
  export var postMicrosoftSpeech = /*#__PURE__*/function () {
7
- var _ref = _asyncToGenerator( /*#__PURE__*/_regeneratorRuntime.mark(function _callee(text, options) {
8
- var data, DEFAULT_HEADERS, response;
10
+ var _ref2 = _asyncToGenerator( /*#__PURE__*/_regeneratorRuntime.mark(function _callee(text, _ref) {
11
+ var api, options, response, audioData, audioContext, audioBufferSource;
9
12
  return _regeneratorRuntime.wrap(function _callee$(_context) {
10
13
  while (1) switch (_context.prev = _context.next) {
11
14
  case 0:
12
- data = JSON.stringify({
13
- offsetInPlainText: 0,
14
- properties: {
15
- SpeakTriggerSource: 'AccTuningPagePlayButton'
16
- },
17
- ssml: genSSML(text, options),
18
- ttsAudioFormat: 'audio-24khz-160kbitrate-mono-mp3'
19
- });
20
- DEFAULT_HEADERS = {
21
- 'accept': '*/*',
22
- 'accept-language': 'zh-CN,zh;q=0.9',
23
- 'authority': 'southeastasia.api.speech.microsoft.com',
24
- 'content-type': 'application/json',
25
- 'customvoiceconnectionid': uuidv4(),
26
- 'origin': 'https://speech.microsoft.com',
27
- 'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
28
- 'sec-ch-ua-mobile': '?0',
29
- 'sec-ch-ua-platform': '"Windows"',
30
- 'sec-fetch-dest': 'empty',
31
- 'sec-fetch-mode': 'cors',
32
- 'sec-fetch-site': 'same-site',
33
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'
34
- };
35
- _context.prev = 2;
36
- _context.next = 5;
37
- return fetch(API, {
38
- body: data,
39
- headers: DEFAULT_HEADERS,
40
- method: 'POST',
41
- // @ts-ignore
42
- responseType: 'arraybuffer'
43
- });
44
- case 5:
15
+ api = _ref.api, options = _objectWithoutProperties(_ref, _excluded);
16
+ _context.next = 3;
17
+ return fetch(qs.stringifyUrl({
18
+ query: _objectSpread({
19
+ text: text
20
+ }, options),
21
+ url: api || process.env.MICROSOFT_SPEECH_PROXY_URL || ''
22
+ }));
23
+ case 3:
45
24
  response = _context.sent;
46
- _context.next = 8;
25
+ _context.next = 6;
47
26
  return response.arrayBuffer();
48
- case 8:
49
- return _context.abrupt("return", _context.sent);
27
+ case 6:
28
+ audioData = _context.sent;
29
+ audioContext = new AudioContext();
30
+ audioBufferSource = audioContext.createBufferSource();
31
+ _context.next = 11;
32
+ return audioContext.decodeAudioData(audioData);
50
33
  case 11:
51
- _context.prev = 11;
52
- _context.t0 = _context["catch"](2);
53
- console.error(_context.t0);
54
- throw _context.t0;
55
- case 15:
34
+ audioBufferSource.buffer = _context.sent;
35
+ audioBufferSource.connect(audioContext.destination);
36
+ return _context.abrupt("return", audioBufferSource);
37
+ case 14:
56
38
  case "end":
57
39
  return _context.stop();
58
40
  }
59
- }, _callee, null, [[2, 11]]);
41
+ }, _callee);
60
42
  }));
61
43
  return function postMicrosoftSpeech(_x, _x2) {
62
- return _ref.apply(this, arguments);
44
+ return _ref2.apply(this, arguments);
63
45
  };
64
46
  }();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/ui",
3
- "version": "1.111.2",
3
+ "version": "1.113.0",
4
4
  "description": "Lobe UI is an open-source UI component library for building AIGC web apps",
5
5
  "keywords": [
6
6
  "lobehub",