@lobehub/ui 1.112.0 → 1.113.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/es/ChatList/Item.d.ts +4 -2
- package/es/ChatList/Item.js +3 -1
- package/es/ChatList/index.d.ts +1 -1
- package/es/index.d.ts +1 -1
- package/es/useTTS/hooks/{usePressSpeechRecognition.d.ts → usePersistedSpeechRecognition.d.ts} +1 -1
- package/es/useTTS/hooks/{usePressSpeechRecognition.js → usePersistedSpeechRecognition.js} +4 -5
- package/es/useTTS/hooks/useSpeechRecognition.d.ts +1 -1
- package/es/useTTS/hooks/useSpeechRecognition.js +53 -32
- package/es/useTTS/index.d.ts +1 -1
- package/es/useTTS/index.js +1 -1
- package/package.json +1 -1
package/es/ChatList/Item.d.ts
CHANGED
|
@@ -5,7 +5,8 @@ import { ChatMessage } from "../types/chatMessage";
|
|
|
5
5
|
import { LLMRoleType } from "../types/llm";
|
|
6
6
|
import { type ActionsBarProps } from './ActionsBar';
|
|
7
7
|
export type OnMessageChange = (id: string, content: string) => void;
|
|
8
|
-
export type
|
|
8
|
+
export type OnActionsClick = (action: ActionEvent, message: ChatMessage) => void;
|
|
9
|
+
export type OnAvatatsClick = (role: RenderRole) => ChatItemProps['onAvatarClick'];
|
|
9
10
|
export type RenderRole = LLMRoleType | 'default' | string;
|
|
10
11
|
export type RenderItem = FC<{
|
|
11
12
|
key: string;
|
|
@@ -22,7 +23,8 @@ export interface ListItemProps {
|
|
|
22
23
|
/**
|
|
23
24
|
* @description 点击操作按钮的回调函数
|
|
24
25
|
*/
|
|
25
|
-
onActionsClick?:
|
|
26
|
+
onActionsClick?: OnActionsClick;
|
|
27
|
+
onAvatarsClick?: OnAvatatsClick;
|
|
26
28
|
/**
|
|
27
29
|
* @description 消息变化的回调函数
|
|
28
30
|
*/
|
package/es/ChatList/Item.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import _defineProperty from "@babel/runtime/helpers/esm/defineProperty";
|
|
2
2
|
import _slicedToArray from "@babel/runtime/helpers/esm/slicedToArray";
|
|
3
3
|
import _objectWithoutProperties from "@babel/runtime/helpers/esm/objectWithoutProperties";
|
|
4
|
-
var _excluded = ["renderMessagesExtra", "showTitle", "onActionsClick", "onMessageChange", "type", "text", "renderMessages", "renderErrorMessages", "renderActions", "loading", "groupNav", "renderItems"];
|
|
4
|
+
var _excluded = ["renderMessagesExtra", "showTitle", "onActionsClick", "onAvatarsClick", "onMessageChange", "type", "text", "renderMessages", "renderErrorMessages", "renderActions", "loading", "groupNav", "renderItems"];
|
|
5
5
|
function ownKeys(e, r) { var t = Object.keys(e); if (Object.getOwnPropertySymbols) { var o = Object.getOwnPropertySymbols(e); r && (o = o.filter(function (r) { return Object.getOwnPropertyDescriptor(e, r).enumerable; })), t.push.apply(t, o); } return t; }
|
|
6
6
|
function _objectSpread(e) { for (var r = 1; r < arguments.length; r++) { var t = null != arguments[r] ? arguments[r] : {}; r % 2 ? ownKeys(Object(t), !0).forEach(function (r) { _defineProperty(e, r, t[r]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(e, Object.getOwnPropertyDescriptors(t)) : ownKeys(Object(t)).forEach(function (r) { Object.defineProperty(e, r, Object.getOwnPropertyDescriptor(t, r)); }); } return e; }
|
|
7
7
|
import { App } from 'antd';
|
|
@@ -15,6 +15,7 @@ var Item = /*#__PURE__*/memo(function (props) {
|
|
|
15
15
|
var renderMessagesExtra = props.renderMessagesExtra,
|
|
16
16
|
showTitle = props.showTitle,
|
|
17
17
|
onActionsClick = props.onActionsClick,
|
|
18
|
+
onAvatarsClick = props.onAvatarsClick,
|
|
18
19
|
onMessageChange = props.onMessageChange,
|
|
19
20
|
type = props.type,
|
|
20
21
|
text = props.text,
|
|
@@ -123,6 +124,7 @@ var Item = /*#__PURE__*/memo(function (props) {
|
|
|
123
124
|
messageExtra: /*#__PURE__*/_jsx(MessageExtra, {
|
|
124
125
|
data: item
|
|
125
126
|
}),
|
|
127
|
+
onAvatarClick: onAvatarsClick === null || onAvatarsClick === void 0 ? void 0 : onAvatarsClick(item.role),
|
|
126
128
|
onChange: function onChange(value) {
|
|
127
129
|
return onMessageChange === null || onMessageChange === void 0 ? void 0 : onMessageChange(item.id, value);
|
|
128
130
|
},
|
package/es/ChatList/index.d.ts
CHANGED
|
@@ -10,6 +10,6 @@ export interface ChatListProps extends DivProps, ListItemProps {
|
|
|
10
10
|
historyCount?: number;
|
|
11
11
|
loadingId?: string;
|
|
12
12
|
}
|
|
13
|
-
export type {
|
|
13
|
+
export type { OnActionsClick, OnAvatatsClick, OnMessageChange, RenderAction, RenderErrorMessage, RenderItem, RenderMessage, RenderMessageExtra, } from './Item';
|
|
14
14
|
declare const ChatList: import("react").NamedExoticComponent<ChatListProps>;
|
|
15
15
|
export default ChatList;
|
package/es/index.d.ts
CHANGED
|
@@ -7,7 +7,7 @@ export { default as ChatHeader, type ChatHeaderProps } from './ChatHeader';
|
|
|
7
7
|
export { default as ChatHeaderTitle, type ChatHeaderTitleProps, } from './ChatHeader/ChatHeaderTitle';
|
|
8
8
|
export { default as ChatInputArea, type ChatInputAreaProps } from './ChatInputArea';
|
|
9
9
|
export { default as ChatItem, type ChatItemProps } from './ChatItem';
|
|
10
|
-
export type { ChatListProps,
|
|
10
|
+
export type { ChatListProps, OnActionsClick, OnAvatatsClick, OnMessageChange, RenderAction, RenderErrorMessage, RenderItem, RenderMessage, RenderMessageExtra, } from './ChatList';
|
|
11
11
|
export { default as ChatList } from './ChatList';
|
|
12
12
|
export { default as ActionsBar, type ActionsBarProps } from './ChatList/ActionsBar';
|
|
13
13
|
export { default as CodeEditor, type CodeEditorProps } from './CodeEditor';
|
|
@@ -2,7 +2,7 @@ import _toConsumableArray from "@babel/runtime/helpers/esm/toConsumableArray";
|
|
|
2
2
|
import _slicedToArray from "@babel/runtime/helpers/esm/slicedToArray";
|
|
3
3
|
import { useEffect, useState } from 'react';
|
|
4
4
|
import { useSpeechRecognition } from "./useSpeechRecognition";
|
|
5
|
-
export var
|
|
5
|
+
export var usePersistedSpeechRecognition = function usePersistedSpeechRecognition(locale) {
|
|
6
6
|
var _useState = useState([]),
|
|
7
7
|
_useState2 = _slicedToArray(_useState, 2),
|
|
8
8
|
texts = _useState2[0],
|
|
@@ -17,12 +17,11 @@ export var usePressSpeechRecognition = function usePressSpeechRecognition(locale
|
|
|
17
17
|
_start = _useSpeechRecognition.start,
|
|
18
18
|
isLoading = _useSpeechRecognition.isLoading;
|
|
19
19
|
useEffect(function () {
|
|
20
|
-
if (
|
|
21
|
-
setTexts([].concat(_toConsumableArray(texts), [text]));
|
|
22
|
-
_stop();
|
|
20
|
+
if (isGLobalLoading && !isLoading) {
|
|
21
|
+
if (text) setTexts([].concat(_toConsumableArray(texts), [text]));
|
|
23
22
|
_start();
|
|
24
23
|
}
|
|
25
|
-
}, [isLoading, texts, text]);
|
|
24
|
+
}, [isLoading, texts, text, _start, isGLobalLoading]);
|
|
26
25
|
return {
|
|
27
26
|
isLoading: isGLobalLoading,
|
|
28
27
|
start: function start() {
|
|
@@ -1,49 +1,70 @@
|
|
|
1
1
|
import _slicedToArray from "@babel/runtime/helpers/esm/slicedToArray";
|
|
2
2
|
var _window;
|
|
3
|
-
import { useState } from 'react';
|
|
3
|
+
import { useEffect, useState } from 'react';
|
|
4
4
|
var SpeechRecognition = (globalThis === null || globalThis === void 0 ? void 0 : globalThis.SpeechRecognition) || ((_window = window) === null || _window === void 0 ? void 0 : _window.webkitSpeechRecognition);
|
|
5
5
|
export var useSpeechRecognition = function useSpeechRecognition(locale) {
|
|
6
|
-
var _useState = useState(
|
|
6
|
+
var _useState = useState(null),
|
|
7
7
|
_useState2 = _slicedToArray(_useState, 2),
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
var _useState3 = useState(
|
|
8
|
+
recognition = _useState2[0],
|
|
9
|
+
setRecognition = _useState2[1];
|
|
10
|
+
var _useState3 = useState(''),
|
|
11
11
|
_useState4 = _slicedToArray(_useState3, 2),
|
|
12
|
-
|
|
13
|
-
|
|
12
|
+
text = _useState4[0],
|
|
13
|
+
setText = _useState4[1];
|
|
14
14
|
var _useState5 = useState(false),
|
|
15
15
|
_useState6 = _slicedToArray(_useState5, 2),
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
var
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
16
|
+
isLoading = _useState6[0],
|
|
17
|
+
setIsLoading = _useState6[1];
|
|
18
|
+
var _useState7 = useState(false),
|
|
19
|
+
_useState8 = _slicedToArray(_useState7, 2),
|
|
20
|
+
finalStop = _useState8[0],
|
|
21
|
+
setFinalStop = _useState8[1];
|
|
22
|
+
useEffect(function () {
|
|
23
|
+
if (recognition) return;
|
|
24
|
+
try {
|
|
25
|
+
var speechRecognition = new SpeechRecognition();
|
|
26
|
+
speechRecognition.interimResults = true;
|
|
27
|
+
speechRecognition.continuous = true;
|
|
28
|
+
speechRecognition.onstart = function () {
|
|
29
|
+
setFinalStop(false);
|
|
30
|
+
setIsLoading(true);
|
|
31
|
+
};
|
|
32
|
+
speechRecognition.onend = function () {
|
|
33
|
+
setIsLoading(false);
|
|
34
|
+
setFinalStop(true);
|
|
35
|
+
};
|
|
36
|
+
speechRecognition.onresult = function (_ref) {
|
|
37
|
+
var _result$;
|
|
38
|
+
var results = _ref.results;
|
|
39
|
+
if (!results) return;
|
|
40
|
+
var result = results[0];
|
|
41
|
+
if (!finalStop && result !== null && result !== void 0 && (_result$ = result[0]) !== null && _result$ !== void 0 && _result$.transcript) setText(result[0].transcript);
|
|
42
|
+
if (result.isFinal) {
|
|
43
|
+
speechRecognition.abort();
|
|
44
|
+
setIsLoading(false);
|
|
45
|
+
}
|
|
46
|
+
};
|
|
47
|
+
setRecognition(speechRecognition);
|
|
48
|
+
} catch (error) {
|
|
49
|
+
console.error(error);
|
|
50
|
+
}
|
|
51
|
+
}, []);
|
|
52
|
+
useEffect(function () {
|
|
53
|
+
if (recognition) recognition.lang = locale;
|
|
54
|
+
}, [locale, recognition]);
|
|
22
55
|
var handleStop = function handleStop() {
|
|
23
|
-
|
|
56
|
+
try {
|
|
57
|
+
recognition.abort();
|
|
58
|
+
} catch (_unused) {}
|
|
24
59
|
setIsLoading(false);
|
|
25
60
|
};
|
|
26
|
-
recognition.onstart = function () {
|
|
27
|
-
setFinalStop(false);
|
|
28
|
-
setIsLoading(true);
|
|
29
|
-
setText('');
|
|
30
|
-
};
|
|
31
|
-
recognition.onend = function () {
|
|
32
|
-
setIsLoading(false);
|
|
33
|
-
setFinalStop(true);
|
|
34
|
-
};
|
|
35
|
-
recognition.onresult = function (_ref) {
|
|
36
|
-
var _result$;
|
|
37
|
-
var results = _ref.results;
|
|
38
|
-
if (!results) return;
|
|
39
|
-
var result = results[0];
|
|
40
|
-
if (!finalStop && result !== null && result !== void 0 && (_result$ = result[0]) !== null && _result$ !== void 0 && _result$.transcript) setText(result[0].transcript);
|
|
41
|
-
if (result.isFinal) handleStop();
|
|
42
|
-
};
|
|
43
61
|
return {
|
|
44
62
|
isLoading: isLoading,
|
|
45
63
|
start: function start() {
|
|
46
|
-
|
|
64
|
+
try {
|
|
65
|
+
setText('');
|
|
66
|
+
recognition.start();
|
|
67
|
+
} catch (_unused2) {}
|
|
47
68
|
},
|
|
48
69
|
stop: function stop() {
|
|
49
70
|
return handleStop();
|
package/es/useTTS/index.d.ts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
export { useAzureSpeech } from './hooks/useAzureSpeech';
|
|
2
2
|
export { useEdgeSpeech } from './hooks/useEdgeSpeech';
|
|
3
3
|
export { useMicrosoftSpeech } from './hooks/useMicrosoftSpeech';
|
|
4
|
-
export {
|
|
4
|
+
export { usePersistedSpeechRecognition } from './hooks/usePersistedSpeechRecognition';
|
|
5
5
|
export { useSpeechRecognition } from './hooks/useSpeechRecognition';
|
|
6
6
|
export { useSpeechSynthes } from './hooks/useSpeechSynthes';
|
|
7
7
|
export { getAzureVoiceList, getEdgeVoiceList, getSpeechSynthesVoiceList, } from './utils/getVoiceList';
|
package/es/useTTS/index.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
export { useAzureSpeech } from "./hooks/useAzureSpeech";
|
|
2
2
|
export { useEdgeSpeech } from "./hooks/useEdgeSpeech";
|
|
3
3
|
export { useMicrosoftSpeech } from "./hooks/useMicrosoftSpeech";
|
|
4
|
-
export {
|
|
4
|
+
export { usePersistedSpeechRecognition } from "./hooks/usePersistedSpeechRecognition";
|
|
5
5
|
export { useSpeechRecognition } from "./hooks/useSpeechRecognition";
|
|
6
6
|
export { useSpeechSynthes } from "./hooks/useSpeechSynthes";
|
|
7
7
|
export { getAzureVoiceList, getEdgeVoiceList, getSpeechSynthesVoiceList } from "./utils/getVoiceList";
|