@cognizant-ai-lab/ui-common 1.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/components/AgentChat/ChatCommon.d.ts +94 -0
- package/dist/components/AgentChat/ChatCommon.js +581 -0
- package/dist/components/AgentChat/ControlButtons.d.ts +16 -0
- package/dist/components/AgentChat/ControlButtons.js +24 -0
- package/dist/components/AgentChat/FormattedMarkdown.d.ts +32 -0
- package/dist/components/AgentChat/FormattedMarkdown.js +82 -0
- package/dist/components/AgentChat/Greetings.d.ts +1 -0
- package/dist/components/AgentChat/Greetings.js +38 -0
- package/dist/components/AgentChat/LlmChatButton.d.ts +12 -0
- package/dist/components/AgentChat/LlmChatButton.js +33 -0
- package/dist/components/AgentChat/SendButton.d.ts +12 -0
- package/dist/components/AgentChat/SendButton.js +28 -0
- package/dist/components/AgentChat/SyntaxHighlighterThemes.d.ts +14 -0
- package/dist/components/AgentChat/SyntaxHighlighterThemes.js +27 -0
- package/dist/components/AgentChat/Types.d.ts +17 -0
- package/dist/components/AgentChat/Types.js +26 -0
- package/dist/components/AgentChat/UserQueryDisplay.d.ts +5 -0
- package/dist/components/AgentChat/UserQueryDisplay.js +33 -0
- package/dist/components/AgentChat/Utils.d.ts +11 -0
- package/dist/components/AgentChat/Utils.js +64 -0
- package/dist/components/AgentChat/VoiceChat/MicrophoneButton.d.ts +29 -0
- package/dist/components/AgentChat/VoiceChat/MicrophoneButton.js +55 -0
- package/dist/components/AgentChat/VoiceChat/VoiceChat.d.ts +33 -0
- package/dist/components/AgentChat/VoiceChat/VoiceChat.js +180 -0
- package/dist/components/Authentication/Auth.d.ts +14 -0
- package/dist/components/Authentication/Auth.js +58 -0
- package/dist/components/ChatBot/ChatBot.d.ts +20 -0
- package/dist/components/ChatBot/ChatBot.js +75 -0
- package/dist/components/Common/Breadcrumbs.d.ts +6 -0
- package/dist/components/Common/Breadcrumbs.js +36 -0
- package/dist/components/Common/LlmChatOptionsButton.d.ts +9 -0
- package/dist/components/Common/LlmChatOptionsButton.js +31 -0
- package/dist/components/Common/LoadingSpinner.d.ts +10 -0
- package/dist/components/Common/LoadingSpinner.js +24 -0
- package/dist/components/Common/MUIAccordion.d.ts +17 -0
- package/dist/components/Common/MUIAccordion.js +76 -0
- package/dist/components/Common/MUIAlert.d.ts +11 -0
- package/dist/components/Common/MUIAlert.js +41 -0
- package/dist/components/Common/MUIDialog.d.ts +16 -0
- package/dist/components/Common/MUIDialog.js +40 -0
- package/dist/components/Common/Navbar.d.ts +15 -0
- package/dist/components/Common/Navbar.js +137 -0
- package/dist/components/Common/PageLoader.d.ts +5 -0
- package/dist/components/Common/PageLoader.js +26 -0
- package/dist/components/Common/Snackbar.d.ts +5 -0
- package/dist/components/Common/Snackbar.js +84 -0
- package/dist/components/Common/confirmationModal.d.ts +14 -0
- package/dist/components/Common/confirmationModal.js +65 -0
- package/dist/components/Common/notification.d.ts +18 -0
- package/dist/components/Common/notification.js +79 -0
- package/dist/components/ErrorPage/ErrorBoundary.d.ts +38 -0
- package/dist/components/ErrorPage/ErrorBoundary.js +77 -0
- package/dist/components/ErrorPage/ErrorPage.d.ts +12 -0
- package/dist/components/ErrorPage/ErrorPage.js +46 -0
- package/dist/components/MultiAgentAccelerator/AgentFlow.d.ts +21 -0
- package/dist/components/MultiAgentAccelerator/AgentFlow.js +394 -0
- package/dist/components/MultiAgentAccelerator/AgentNode.d.ts +18 -0
- package/dist/components/MultiAgentAccelerator/AgentNode.js +129 -0
- package/dist/components/MultiAgentAccelerator/GraphLayouts.d.ts +33 -0
- package/dist/components/MultiAgentAccelerator/GraphLayouts.js +297 -0
- package/dist/components/MultiAgentAccelerator/MultiAgentAccelerator.d.ts +17 -0
- package/dist/components/MultiAgentAccelerator/MultiAgentAccelerator.js +208 -0
- package/dist/components/MultiAgentAccelerator/PlasmaEdge.d.ts +3 -0
- package/dist/components/MultiAgentAccelerator/PlasmaEdge.js +124 -0
- package/dist/components/MultiAgentAccelerator/Sidebar.d.ts +12 -0
- package/dist/components/MultiAgentAccelerator/Sidebar.js +204 -0
- package/dist/components/MultiAgentAccelerator/ThoughtBubbleEdge.d.ts +12 -0
- package/dist/components/MultiAgentAccelerator/ThoughtBubbleEdge.js +15 -0
- package/dist/components/MultiAgentAccelerator/ThoughtBubbleOverlay.d.ts +11 -0
- package/dist/components/MultiAgentAccelerator/ThoughtBubbleOverlay.js +466 -0
- package/dist/components/MultiAgentAccelerator/const.d.ts +7 -0
- package/dist/components/MultiAgentAccelerator/const.js +39 -0
- package/dist/const.d.ts +10 -0
- package/dist/const.js +30 -0
- package/dist/controller/agent/Agent.d.ts +56 -0
- package/dist/controller/agent/Agent.js +162 -0
- package/dist/controller/llm/LlmChat.d.ts +18 -0
- package/dist/controller/llm/LlmChat.js +65 -0
- package/dist/controller/llm/endpoints.d.ts +1 -0
- package/dist/controller/llm/endpoints.js +17 -0
- package/dist/generated/neuro-san/NeuroSanClient.d.ts +413 -0
- package/dist/generated/neuro-san/NeuroSanClient.js +28 -0
- package/dist/index.d.ts +37 -0
- package/dist/index.js +52 -0
- package/dist/state/UserInfo.d.ts +16 -0
- package/dist/state/UserInfo.js +27 -0
- package/dist/state/environment.d.ts +18 -0
- package/dist/state/environment.js +33 -0
- package/dist/tsconfig.build.tsbuildinfo +1 -0
- package/dist/utils/Authentication.d.ts +31 -0
- package/dist/utils/Authentication.js +94 -0
- package/dist/utils/BrowserNavigation.d.ts +5 -0
- package/dist/utils/BrowserNavigation.js +22 -0
- package/dist/utils/Theme.d.ts +7 -0
- package/dist/utils/Theme.js +7 -0
- package/dist/utils/agentConversations.d.ts +24 -0
- package/dist/utils/agentConversations.js +113 -0
- package/dist/utils/text.d.ts +28 -0
- package/dist/utils/text.js +64 -0
- package/dist/utils/title.d.ts +1 -0
- package/dist/utils/title.js +20 -0
- package/dist/utils/types.d.ts +17 -0
- package/dist/utils/types.js +16 -0
- package/dist/utils/useLocalStorage.d.ts +1 -0
- package/dist/utils/useLocalStorage.js +55 -0
- package/dist/utils/zIndexLayers.d.ts +2 -0
- package/dist/utils/zIndexLayers.js +29 -0
- package/package.json +69 -0
|
@@ -0,0 +1,581 @@
|
|
|
1
|
+
import { jsx as _jsx, jsxs as _jsxs, Fragment as _Fragment } from "react/jsx-runtime";
|
|
2
|
+
/*
|
|
3
|
+
Copyright 2025 Cognizant Technology Solutions Corp, www.cognizant.com.
|
|
4
|
+
|
|
5
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
|
+
you may not use this file except in compliance with the License.
|
|
7
|
+
You may obtain a copy of the License at
|
|
8
|
+
|
|
9
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
|
|
11
|
+
Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
See the License for the specific language governing permissions and
|
|
15
|
+
limitations under the License.
|
|
16
|
+
*/
|
|
17
|
+
/**
|
|
18
|
+
* See main function description.
|
|
19
|
+
*/
|
|
20
|
+
import { AIMessage, HumanMessage } from "@langchain/core/messages";
|
|
21
|
+
import AccountTreeIcon from "@mui/icons-material/AccountTree";
|
|
22
|
+
import ClearIcon from "@mui/icons-material/Clear";
|
|
23
|
+
import CloseIcon from "@mui/icons-material/Close";
|
|
24
|
+
import VerticalAlignBottomIcon from "@mui/icons-material/VerticalAlignBottom";
|
|
25
|
+
import WrapTextIcon from "@mui/icons-material/WrapText";
|
|
26
|
+
import { Box, Input, useColorScheme } from "@mui/material";
|
|
27
|
+
import CircularProgress from "@mui/material/CircularProgress";
|
|
28
|
+
import IconButton from "@mui/material/IconButton";
|
|
29
|
+
import InputAdornment from "@mui/material/InputAdornment";
|
|
30
|
+
import Tooltip from "@mui/material/Tooltip";
|
|
31
|
+
import Typography from "@mui/material/Typography";
|
|
32
|
+
import { jsonrepair } from "jsonrepair";
|
|
33
|
+
import { cloneElement, forwardRef, isValidElement, useEffect, useImperativeHandle, useRef, useState, } from "react";
|
|
34
|
+
import ReactMarkdown from "react-markdown";
|
|
35
|
+
import SyntaxHighlighter from "react-syntax-highlighter";
|
|
36
|
+
import { ControlButtons } from "./ControlButtons.js";
|
|
37
|
+
import { FormattedMarkdown } from "./FormattedMarkdown.js";
|
|
38
|
+
import { AGENT_GREETINGS } from "./Greetings.js";
|
|
39
|
+
import { SendButton } from "./SendButton.js";
|
|
40
|
+
import { HLJS_THEMES } from "./SyntaxHighlighterThemes.js";
|
|
41
|
+
import { isLegacyAgentType } from "./Types.js";
|
|
42
|
+
import { UserQueryDisplay } from "./UserQueryDisplay.js";
|
|
43
|
+
import { chatMessageFromChunk, checkError, cleanUpAgentName } from "./Utils.js";
|
|
44
|
+
import { MicrophoneButton } from "./VoiceChat/MicrophoneButton.js";
|
|
45
|
+
import { cleanupAndStopSpeechRecognition, setupSpeechRecognition } from "./VoiceChat/VoiceChat.js";
|
|
46
|
+
import { getAgentFunction, getConnectivity, sendChatQuery } from "../../controller/agent/Agent.js";
|
|
47
|
+
import { sendLlmRequest } from "../../controller/llm/LlmChat.js";
|
|
48
|
+
import { ChatMessageType, } from "../../generated/neuro-san/NeuroSanClient.js";
|
|
49
|
+
import { hashString, hasOnlyWhitespace } from "../../utils/text.js";
|
|
50
|
+
import { isDarkMode } from "../../utils/Theme.js";
|
|
51
|
+
import { LlmChatOptionsButton } from "../Common/LlmChatOptionsButton.js";
|
|
52
|
+
import { MUIAccordion } from "../Common/MUIAccordion.js";
|
|
53
|
+
import { MUIAlert } from "../Common/MUIAlert.js";
|
|
54
|
+
import { NotificationType, sendNotification } from "../Common/notification.js";
|
|
55
|
+
// Define fancy EMPTY constant to avoid linter error about using object literals as default props
|
|
56
|
+
const EMPTY = {};
|
|
57
|
+
// Avatar to use for agents in chat
|
|
58
|
+
const AGENT_IMAGE = "/agent.svg";
|
|
59
|
+
// How many times to retry the entire agent interaction process. Some networks have a well-defined success condition.
|
|
60
|
+
// For others, it's just "whenever the stream is done".
|
|
61
|
+
const MAX_AGENT_RETRIES = 3;
|
|
62
|
+
/**
|
|
63
|
+
* Common chat component for agent chat. This component is used by all agent chat components to provide a consistent
|
|
64
|
+
* experience for users when chatting with agents. It handles user input as well as displaying and nicely formatting
|
|
65
|
+
* agent responses. Customization for inputs and outputs is provided via event handlers-like props.
|
|
66
|
+
*/
|
|
67
|
+
export const ChatCommon = forwardRef((props, ref) => {
|
|
68
|
+
const slyData = useRef({});
|
|
69
|
+
const { id, currentUser, userImage, setIsAwaitingLlm, isAwaitingLlm, onChunkReceived, onStreamingStarted, onStreamingComplete, onSend, setPreviousResponse, targetAgent, legacyAgentEndpoint, agentPlaceholders = EMPTY, clearChatOnNewAgent = false, extraParams, backgroundColor, title, onClose, neuroSanURL, } = props;
|
|
70
|
+
// Expose the handleStop method to parent components via ref for external control (e.g., to cancel chat requests)
|
|
71
|
+
useImperativeHandle(ref, () => ({
|
|
72
|
+
handleStop,
|
|
73
|
+
}));
|
|
74
|
+
// User LLM chat input
|
|
75
|
+
const [chatInput, setChatInput] = useState("");
|
|
76
|
+
// Previous user query (for "regenerate" feature)
|
|
77
|
+
const [previousUserQuery, setPreviousUserQuery] = useState("");
|
|
78
|
+
// Chat output window contents
|
|
79
|
+
const [chatOutput, setChatOutput] = useState([]);
|
|
80
|
+
// To accumulate current response, which will be different from the contents of the output window if there is a
|
|
81
|
+
// chat session
|
|
82
|
+
const currentResponse = useRef("");
|
|
83
|
+
// Ref for output text area, so we can auto scroll it
|
|
84
|
+
const chatOutputRef = useRef(null);
|
|
85
|
+
// Ref for user input text area, so we can handle shift-enter
|
|
86
|
+
const chatInputRef = useRef(null);
|
|
87
|
+
// Controller for cancelling fetch request
|
|
88
|
+
const controller = useRef(null);
|
|
89
|
+
// For tracking if we're autoscrolling. A button allows the user to enable or disable autoscrolling.
|
|
90
|
+
const [autoScrollEnabled, setAutoScrollEnabled] = useState(true);
|
|
91
|
+
// ref for same
|
|
92
|
+
const autoScrollEnabledRef = useRef(autoScrollEnabled);
|
|
93
|
+
// Whether to wrap output text
|
|
94
|
+
const [shouldWrapOutput, setShouldWrapOutput] = useState(true);
|
|
95
|
+
// Keeps a copy of the last AI message so we can highlight it as "final answer"
|
|
96
|
+
const lastAIMessage = useRef("");
|
|
97
|
+
// Ref for the final answer key, so we can highlight the accordion
|
|
98
|
+
const finalAnswerKey = useRef("");
|
|
99
|
+
// Use useRef here since we don't want changes in the chat history to trigger a re-render
|
|
100
|
+
const chatHistory = useRef([]);
|
|
101
|
+
/* Use useRef here since we don't want changes in the chat context to trigger a re-render
|
|
102
|
+
Note on ChatContext vs ChatHistory:
|
|
103
|
+
"Legacy" (not Neuro-san) agents use ChatHistory, which is a collection of messages of various types, Human, AI,
|
|
104
|
+
System etc. It mimics the langchain field of the same name.
|
|
105
|
+
Neuro-san agents deal in ChatContext, which is a more complex collection of chat histories, since more agents
|
|
106
|
+
are involved.
|
|
107
|
+
Both fields fulfill the same purpose: to maintain conversation state across multiple messages.
|
|
108
|
+
*/
|
|
109
|
+
const chatContext = useRef(null);
|
|
110
|
+
const finalAnswerRef = useRef(null);
|
|
111
|
+
const [showThinking, setShowThinking] = useState(false);
|
|
112
|
+
// Microphone state for voice input
|
|
113
|
+
const [isMicOn, setIsMicOn] = useState(false);
|
|
114
|
+
// Ref for speech recognition
|
|
115
|
+
const speechRecognitionRef = useRef(null);
|
|
116
|
+
// Voice state for speech recognition
|
|
117
|
+
const [voiceInputState, setVoiceInputState] = useState({
|
|
118
|
+
currentTranscript: "",
|
|
119
|
+
finalTranscript: "",
|
|
120
|
+
isListening: false,
|
|
121
|
+
isProcessingSpeech: false,
|
|
122
|
+
});
|
|
123
|
+
// Define styles based on user options (wrap setting)
|
|
124
|
+
const divStyle = shouldWrapOutput
|
|
125
|
+
? {
|
|
126
|
+
whiteSpace: "normal",
|
|
127
|
+
overflow: "visible",
|
|
128
|
+
textOverflow: "clip",
|
|
129
|
+
overflowX: "visible",
|
|
130
|
+
}
|
|
131
|
+
: {
|
|
132
|
+
whiteSpace: "nowrap",
|
|
133
|
+
overflow: "hidden",
|
|
134
|
+
textOverflow: "ellipsis",
|
|
135
|
+
overflowX: "auto",
|
|
136
|
+
};
|
|
137
|
+
// Keeps track of whether the agent completed its task
|
|
138
|
+
const succeeded = useRef(false);
|
|
139
|
+
const { mode, systemMode } = useColorScheme();
|
|
140
|
+
const darkMode = isDarkMode(mode, systemMode);
|
|
141
|
+
const { atelierDuneDark, a11yLight } = HLJS_THEMES;
|
|
142
|
+
useEffect(() => {
|
|
143
|
+
// Set up speech recognition
|
|
144
|
+
const handlers = setupSpeechRecognition(setChatInput, setVoiceInputState, speechRecognitionRef);
|
|
145
|
+
// Clean up function
|
|
146
|
+
return () => cleanupAndStopSpeechRecognition(speechRecognitionRef, handlers);
|
|
147
|
+
}, []);
|
|
148
|
+
// Hide/show existing accordions based on showThinking state
|
|
149
|
+
useEffect(() => {
|
|
150
|
+
setChatOutput((currentOutput) => currentOutput.map((item) => {
|
|
151
|
+
if (isValidElement(item) && item.type === MUIAccordion) {
|
|
152
|
+
const itemAsAccordion = item;
|
|
153
|
+
return cloneElement(itemAsAccordion, {
|
|
154
|
+
sx: {
|
|
155
|
+
...item.props.sx,
|
|
156
|
+
display: showThinking || item.key === finalAnswerKey?.current ? "block" : "none",
|
|
157
|
+
},
|
|
158
|
+
});
|
|
159
|
+
}
|
|
160
|
+
return item;
|
|
161
|
+
}));
|
|
162
|
+
}, [showThinking, darkMode]);
|
|
163
|
+
// Sync ref with state variable for use within timer etc.
|
|
164
|
+
useEffect(() => {
|
|
165
|
+
autoScrollEnabledRef.current = autoScrollEnabled;
|
|
166
|
+
}, [autoScrollEnabled]);
|
|
167
|
+
useEffect(() => {
|
|
168
|
+
// Delay for a second before focusing on the input area; gets around ChatBot stealing focus.
|
|
169
|
+
setTimeout(() => chatInputRef?.current?.focus(), 1000);
|
|
170
|
+
}, []);
|
|
171
|
+
// Auto scroll chat output window when new content is added
|
|
172
|
+
useEffect(() => {
|
|
173
|
+
// Scroll the final answer into view
|
|
174
|
+
if (finalAnswerRef.current && !isAwaitingLlm) {
|
|
175
|
+
const offset = 50;
|
|
176
|
+
chatOutputRef.current.scrollTop = finalAnswerRef.current.offsetTop - offset;
|
|
177
|
+
return;
|
|
178
|
+
}
|
|
179
|
+
if (autoScrollEnabledRef.current && chatOutputRef?.current) {
|
|
180
|
+
chatOutputRef.current.scrollTop = chatOutputRef.current.scrollHeight;
|
|
181
|
+
}
|
|
182
|
+
}, [chatOutput]);
|
|
183
|
+
useEffect(() => {
|
|
184
|
+
// Clear chat output on change of neuro-san URL
|
|
185
|
+
// TODO: We want to revise this in the future to not need a useEffect
|
|
186
|
+
setChatOutput([]);
|
|
187
|
+
currentResponse.current = "";
|
|
188
|
+
setShowThinking(false);
|
|
189
|
+
}, [neuroSanURL]);
|
|
190
|
+
/**
|
|
191
|
+
* Process a log line from the agent and format it nicely using the syntax highlighter and Accordion components.
|
|
192
|
+
* By the time we get to here, it's assumed things like errors and termination conditions have already been handled.
|
|
193
|
+
*
|
|
194
|
+
* @param logLine The log line to process
|
|
195
|
+
* @param messageType The type of the message (AI, LEGACY_LOGS etc.). Used for displaying certain message types
|
|
196
|
+
* differently
|
|
197
|
+
* @param isFinalAnswer If true, the log line is the final answer from the agent. This will be highlighted in some
|
|
198
|
+
* way to draw the user's attention to it.
|
|
199
|
+
* @param summary Used as the "title" for the accordion block. Something like an agent name or "Final Answer"
|
|
200
|
+
* @returns A React component representing the log line (agent message)
|
|
201
|
+
*/
|
|
202
|
+
const processLogLine = (logLine, summary, messageType, isFinalAnswer) => {
|
|
203
|
+
// extract the parts of the line
|
|
204
|
+
let repairedJson;
|
|
205
|
+
try {
|
|
206
|
+
// Attempt to parse as JSON
|
|
207
|
+
// First, repair it. Also replace "escaped newlines" with actual newlines for better display.
|
|
208
|
+
repairedJson = jsonrepair(logLine);
|
|
209
|
+
// Now try to parse it. We don't care about the result, only if it throws on parsing.
|
|
210
|
+
JSON.parse(repairedJson);
|
|
211
|
+
repairedJson = repairedJson.replace(/\\n/gu, "\n").replace(/\\"/gu, "'");
|
|
212
|
+
}
|
|
213
|
+
catch {
|
|
214
|
+
// Not valid JSON
|
|
215
|
+
repairedJson = null;
|
|
216
|
+
}
|
|
217
|
+
const hashedSummary = hashString(summary);
|
|
218
|
+
const isAIMessage = messageType === ChatMessageType.AI;
|
|
219
|
+
if (isAIMessage && !isFinalAnswer) {
|
|
220
|
+
lastAIMessage.current = logLine;
|
|
221
|
+
}
|
|
222
|
+
if (isFinalAnswer) {
|
|
223
|
+
// Save key of final answer for highlighting
|
|
224
|
+
finalAnswerKey.current = hashedSummary;
|
|
225
|
+
}
|
|
226
|
+
return (_jsx(MUIAccordion, { id: `${hashedSummary}-panel`, defaultExpandedPanelKey: isFinalAnswer ? 1 : null, items: [
|
|
227
|
+
{
|
|
228
|
+
title: summary,
|
|
229
|
+
content: (_jsx("div", { id: `${summary}-details`, children: repairedJson ? (_jsx(SyntaxHighlighter, { id: "syntax-highlighter", language: "json", style: darkMode ? atelierDuneDark : a11yLight, showLineNumbers: false, wrapLongLines: shouldWrapOutput, children: repairedJson })) : (_jsx(ReactMarkdown, { children: logLine || "No further details" }, hashString(logLine))) })),
|
|
230
|
+
},
|
|
231
|
+
], sx: {
|
|
232
|
+
fontSize: "large",
|
|
233
|
+
marginBottom: "1rem",
|
|
234
|
+
display: showThinking || isFinalAnswer ? "block" : "none",
|
|
235
|
+
boxShadow: isFinalAnswer
|
|
236
|
+
? `0 6px 16px 0 rgba(0, 0, 0, 0.08), 0 3px 6px -4px rgba(0, 0, 0, 0.12),
|
|
237
|
+
0 9px 28px 8px rgba(0, 0, 0, 0.05)`
|
|
238
|
+
: "none",
|
|
239
|
+
} }, hashedSummary));
|
|
240
|
+
};
|
|
241
|
+
/**
|
|
242
|
+
* Introduce the agent to the user with a friendly greeting
|
|
243
|
+
*/
|
|
244
|
+
const introduceAgent = () => {
|
|
245
|
+
updateOutput(_jsx(UserQueryDisplay, { userQuery: cleanUpAgentName(targetAgent), title: targetAgent, userImage: AGENT_IMAGE }));
|
|
246
|
+
// Random greeting
|
|
247
|
+
const greeting = AGENT_GREETINGS[Math.floor(Math.random() * AGENT_GREETINGS.length)];
|
|
248
|
+
updateOutput(greeting);
|
|
249
|
+
};
|
|
250
|
+
/**
|
|
251
|
+
* Render the connectivity info as a list of origins and their tools
|
|
252
|
+
* @param connectivityInfo The connectivity info to render
|
|
253
|
+
* @returns A ReactNode representing the connectivity info with agents and their tools
|
|
254
|
+
*/
|
|
255
|
+
const renderConnectivityInfo = (connectivityInfo) => (_jsx(_Fragment, { children: connectivityInfo
|
|
256
|
+
// Don't show connection to self
|
|
257
|
+
.filter((info) => info.origin.toLowerCase() !== targetAgent.toLowerCase())
|
|
258
|
+
// Sort by origin name
|
|
259
|
+
.sort((a, b) => a.origin.localeCompare(b.origin))
|
|
260
|
+
// Render each origin and its tools
|
|
261
|
+
.map((info) => (_jsxs("li", { id: info.origin, children: [_jsx("b", { id: info.origin, children: info.origin }), _jsx("ul", { id: `${info.origin}-tools`, style: { marginLeft: "8px" }, children: info?.tools?.map((tool) => (_jsx("li", { id: tool, children: tool }, tool))) })] }, info.origin))) }));
|
|
262
|
+
useEffect(() => {
|
|
263
|
+
const newAgent = async () => {
|
|
264
|
+
if (clearChatOnNewAgent) {
|
|
265
|
+
// New agent, so clear chat context if desired
|
|
266
|
+
chatContext.current = null;
|
|
267
|
+
currentResponse.current = "";
|
|
268
|
+
slyData.current = null;
|
|
269
|
+
setChatOutput([]);
|
|
270
|
+
}
|
|
271
|
+
// Introduce the agent to the user
|
|
272
|
+
introduceAgent();
|
|
273
|
+
// if not neuro san agent return since we won't get connectivity info
|
|
274
|
+
if (isLegacyAgentType(targetAgent)) {
|
|
275
|
+
return;
|
|
276
|
+
}
|
|
277
|
+
let agentFunction;
|
|
278
|
+
// It is a Neuro-san agent, so get the function and connectivity info
|
|
279
|
+
try {
|
|
280
|
+
agentFunction = await getAgentFunction(neuroSanURL, targetAgent, currentUser);
|
|
281
|
+
}
|
|
282
|
+
catch {
|
|
283
|
+
// For now, just return. May be a legacy agent without a functional description in Neuro-san.
|
|
284
|
+
return;
|
|
285
|
+
}
|
|
286
|
+
try {
|
|
287
|
+
const connectivity = await getConnectivity(neuroSanURL, targetAgent, currentUser);
|
|
288
|
+
updateOutput(_jsx(MUIAccordion, { id: `${id}-agent-details`, sx: {
|
|
289
|
+
marginTop: "1rem",
|
|
290
|
+
marginBottom: "1rem",
|
|
291
|
+
borderColor: darkMode ? "var(--bs-white)" : "var(--bs-border-color)",
|
|
292
|
+
}, items: [
|
|
293
|
+
{
|
|
294
|
+
title: "Agent Details",
|
|
295
|
+
content: [
|
|
296
|
+
`My description is: "${agentFunction?.function?.description}"`,
|
|
297
|
+
_jsx("h6", { id: "connectivity-header", style: { marginTop: "1rem" }, children: "I can connect you to the following agents" }, "item-1"),
|
|
298
|
+
_jsx("ul", { id: "connectivity-list", "aria-labelledby": "connectivity-header", style: { marginTop: "1rem" }, children: renderConnectivityInfo(connectivity?.connectivity_info.concat()) }, "item-2"),
|
|
299
|
+
],
|
|
300
|
+
},
|
|
301
|
+
] }));
|
|
302
|
+
}
|
|
303
|
+
catch (e) {
|
|
304
|
+
sendNotification(NotificationType.error, `Failed to get connectivity info for ${cleanUpAgentName(targetAgent)}. Error: ${e}`);
|
|
305
|
+
}
|
|
306
|
+
};
|
|
307
|
+
if (targetAgent) {
|
|
308
|
+
void newAgent();
|
|
309
|
+
}
|
|
310
|
+
}, [targetAgent]);
|
|
311
|
+
/**
|
|
312
|
+
* Handles adding content to the output window.
|
|
313
|
+
* @param node A ReactNode to add to the output window -- text, spinner, etc. but could also be simple string
|
|
314
|
+
* @returns Nothing, but updates the output window with the new content. Updates currentResponse as a side effect.
|
|
315
|
+
*/
|
|
316
|
+
const updateOutput = (node) => {
|
|
317
|
+
currentResponse.current += node;
|
|
318
|
+
setChatOutput((currentOutput) => [...currentOutput, node]);
|
|
319
|
+
};
|
|
320
|
+
/**
|
|
321
|
+
* Reset the state of the component. This is called after a request is completed, regardless of success or failure.
|
|
322
|
+
*/
|
|
323
|
+
const resetState = () => {
|
|
324
|
+
// Reset state, whatever happened during request
|
|
325
|
+
setIsAwaitingLlm(false);
|
|
326
|
+
setChatInput("");
|
|
327
|
+
lastAIMessage.current = "";
|
|
328
|
+
finalAnswerRef.current = null;
|
|
329
|
+
// Get agent name, either from the enum (Neuro-san) or from the targetAgent string directly (legacy)
|
|
330
|
+
setPreviousResponse?.(targetAgent, currentResponse.current);
|
|
331
|
+
currentResponse.current = "";
|
|
332
|
+
};
|
|
333
|
+
const handleStop = () => {
|
|
334
|
+
try {
|
|
335
|
+
controller?.current?.abort();
|
|
336
|
+
controller.current = null;
|
|
337
|
+
updateOutput(_jsx(MUIAlert, { id: "opp-finder-error-occurred-alert", severity: "warning", children: "Request cancelled." }));
|
|
338
|
+
}
|
|
339
|
+
finally {
|
|
340
|
+
resetState();
|
|
341
|
+
}
|
|
342
|
+
};
|
|
343
|
+
// Regex to check if user has typed anything besides whitespace
|
|
344
|
+
const userInputEmpty = !chatInput || chatInput.length === 0 || hasOnlyWhitespace(chatInput);
|
|
345
|
+
// Enable Send button when there is user input and not awaiting a response
|
|
346
|
+
const shouldEnableSendButton = !userInputEmpty && !isAwaitingLlm;
|
|
347
|
+
// Enable regenerate button when there is a previous query to resent, and we're not awaiting a response
|
|
348
|
+
const shouldEnableRegenerateButton = previousUserQuery && !isAwaitingLlm;
|
|
349
|
+
// Enable Clear Chat button if not awaiting response and there is chat output to clear
|
|
350
|
+
const enableClearChatButton = !isAwaitingLlm && chatOutput.length > 0;
|
|
351
|
+
/**
|
|
352
|
+
* Extract the final answer from the response from a legacy agent
|
|
353
|
+
* @param response The response from the legacy agent
|
|
354
|
+
* @returns The final answer from the agent, if it exists or null if it doesn't
|
|
355
|
+
*/
|
|
356
|
+
const extractFinalAnswer = (response) => /Final Answer: (?<finalAnswerText>.*)/su.exec(response)?.groups?.["finalAnswerText"];
|
|
357
|
+
const handleChunk = (chunk) => {
|
|
358
|
+
// Give container a chance to process the chunk first
|
|
359
|
+
const onChunkReceivedResult = onChunkReceived?.(chunk) ?? true;
|
|
360
|
+
succeeded.current = succeeded.current || onChunkReceivedResult;
|
|
361
|
+
// For legacy agents, we either get plain text or markdown. Just output it as-is.
|
|
362
|
+
if (isLegacyAgentType(targetAgent)) {
|
|
363
|
+
// Display output as-is
|
|
364
|
+
updateOutput(chunk);
|
|
365
|
+
// Check for Final Answer from legacy agent
|
|
366
|
+
const finalAnswerMatch = extractFinalAnswer(currentResponse.current);
|
|
367
|
+
if (finalAnswerMatch) {
|
|
368
|
+
lastAIMessage.current = finalAnswerMatch;
|
|
369
|
+
}
|
|
370
|
+
return;
|
|
371
|
+
}
|
|
372
|
+
// For Neuro-san agents, we expect a ChatMessage structure in the chunk.
|
|
373
|
+
const chatMessage = chatMessageFromChunk(chunk);
|
|
374
|
+
if (!chatMessage) {
|
|
375
|
+
// This is an error since Neuro-san agents should send us ChatMessage structures.
|
|
376
|
+
// But don't want to spam output by logging errors for every bad message.
|
|
377
|
+
return;
|
|
378
|
+
}
|
|
379
|
+
// It's a ChatMessage. Does it have chat context? Only AGENT_FRAMEWORK messages can have chat context.
|
|
380
|
+
if (chatMessage.type === ChatMessageType.AGENT_FRAMEWORK && chatMessage.chat_context) {
|
|
381
|
+
// Save the chat context, potentially overwriting any previous ones we received during this session.
|
|
382
|
+
// We only care about the last one received.
|
|
383
|
+
chatContext.current = chatMessage.chat_context;
|
|
384
|
+
// Nothing more to do with this message. It's just a message to give us the chat context, so return
|
|
385
|
+
return;
|
|
386
|
+
}
|
|
387
|
+
// Merge slyData.current with incoming chatMessage.sly_data
|
|
388
|
+
if (chatMessage.sly_data) {
|
|
389
|
+
slyData.current = { ...slyData.current, ...chatMessage.sly_data };
|
|
390
|
+
}
|
|
391
|
+
// Check if there is an error block in the "structure" field of the chat message.
|
|
392
|
+
if (chatMessage.structure) {
|
|
393
|
+
// If there is an error block, we should display it as an alert.
|
|
394
|
+
const errorMessage = checkError(chatMessage.structure);
|
|
395
|
+
if (errorMessage) {
|
|
396
|
+
updateOutput(_jsx(MUIAlert, { id: "retry-message-alert", severity: "warning", children: errorMessage }));
|
|
397
|
+
succeeded.current = false;
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
else if (chatMessage?.text?.trim() !== "") {
|
|
401
|
+
// Not an error, so output it if it has text. The backend sometimes sends messages with no text content,
|
|
402
|
+
// and we don't want to display those to the user.
|
|
403
|
+
// Agent name is the last tool in the origin array. If it's not there, use a default name.
|
|
404
|
+
const agentName = chatMessage.origin?.length > 0
|
|
405
|
+
? cleanUpAgentName(chatMessage.origin[chatMessage.origin.length - 1].tool)
|
|
406
|
+
: "Agent message";
|
|
407
|
+
updateOutput(processLogLine(chatMessage.text, agentName, chatMessage.type));
|
|
408
|
+
}
|
|
409
|
+
};
|
|
410
|
+
async function doQueryLoop(query) {
|
|
411
|
+
succeeded.current = false;
|
|
412
|
+
let attemptNumber = 0;
|
|
413
|
+
let wasAborted = false;
|
|
414
|
+
do {
|
|
415
|
+
try {
|
|
416
|
+
// Increment the attempt number and set the state to indicate we're awaiting a response
|
|
417
|
+
attemptNumber += 1;
|
|
418
|
+
// Check which agent type we are dealing with
|
|
419
|
+
if (isLegacyAgentType(targetAgent)) {
|
|
420
|
+
// It's a legacy agent (these go directly to the LLM and are different from the Neuro-san agents).
|
|
421
|
+
// Send the chat query to the server. This will block until the stream ends from the server
|
|
422
|
+
await sendLlmRequest(handleChunk, controller?.current.signal, legacyAgentEndpoint, extraParams, query, chatHistory.current);
|
|
423
|
+
}
|
|
424
|
+
else {
|
|
425
|
+
// It's a Neuro-san agent.
|
|
426
|
+
// Some coded tools (data generator...) expect the username provided in slyData.
|
|
427
|
+
const slyDataWithUserName = { ...slyData.current, login: currentUser };
|
|
428
|
+
await sendChatQuery(neuroSanURL, controller?.current.signal, query, targetAgent, handleChunk, chatContext.current, slyDataWithUserName, currentUser);
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
catch (error) {
|
|
432
|
+
// Was it due to user aborting the request?
|
|
433
|
+
wasAborted = error instanceof Error && error.name === "AbortError";
|
|
434
|
+
if (wasAborted) {
|
|
435
|
+
// AbortErrors are handled elsewhere. We also want to stop retries here.
|
|
436
|
+
break;
|
|
437
|
+
}
|
|
438
|
+
if (!wasAborted) {
|
|
439
|
+
if (error instanceof Error) {
|
|
440
|
+
console.error(error, error.stack);
|
|
441
|
+
}
|
|
442
|
+
updateOutput(_jsx(MUIAlert, { id: "opp-finder-error-occurred-alert", severity: "error", children: `Error occurred: ${error}` }));
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
} while (attemptNumber < MAX_AGENT_RETRIES && !succeeded.current);
|
|
446
|
+
return { wasAborted };
|
|
447
|
+
}
|
|
448
|
+
const handleSend = async (query) => {
|
|
449
|
+
// Record user query in chat history
|
|
450
|
+
chatHistory.current = [...chatHistory.current, new HumanMessage(previousUserQuery)];
|
|
451
|
+
// Allow parent to intercept and modify the query before sending if needed
|
|
452
|
+
const queryToSend = onSend?.(query) ?? query;
|
|
453
|
+
// Save query for "regenerate" use. Again we save the real user input, not the modified query. It will again
|
|
454
|
+
// get intercepted and re-modified (if applicable) on "regenerate".
|
|
455
|
+
setPreviousUserQuery(query);
|
|
456
|
+
setIsAwaitingLlm(true);
|
|
457
|
+
// Always start output by echoing user query.
|
|
458
|
+
// Note: we display the original user query, not the modified one. The modified one could be a monstrosity
|
|
459
|
+
// that we generated behind their back. Ultimately, we shouldn't need to generate a fake query on behalf of the
|
|
460
|
+
// user, but currently we do for orchestration.
|
|
461
|
+
updateOutput(_jsx(UserQueryDisplay, { userQuery: query, title: currentUser, userImage: userImage }));
|
|
462
|
+
// Add ID block for agent
|
|
463
|
+
updateOutput(_jsx(UserQueryDisplay, { userQuery: cleanUpAgentName(targetAgent), title: targetAgent, userImage: AGENT_IMAGE }));
|
|
464
|
+
// Allow clients to do something when streaming starts
|
|
465
|
+
onStreamingStarted?.();
|
|
466
|
+
// Set up the abort controller
|
|
467
|
+
controller.current = new AbortController();
|
|
468
|
+
setIsAwaitingLlm(true);
|
|
469
|
+
if (showThinking) {
|
|
470
|
+
updateOutput(_jsx(MUIAccordion, { id: "initiating-orchestration-accordion", items: [
|
|
471
|
+
{
|
|
472
|
+
title: `Contacting ${cleanUpAgentName(targetAgent)}...`,
|
|
473
|
+
content: `Query: ${queryToSend}`,
|
|
474
|
+
},
|
|
475
|
+
], sx: { marginBottom: "1rem" } }));
|
|
476
|
+
}
|
|
477
|
+
try {
|
|
478
|
+
const { wasAborted } = await doQueryLoop(queryToSend);
|
|
479
|
+
if (!wasAborted && !succeeded.current) {
|
|
480
|
+
updateOutput(_jsx(MUIAlert, { id: "opp-finder-max-retries-exceeded-alert", severity: "error", children: `Gave up after ${MAX_AGENT_RETRIES} attempts.` }));
|
|
481
|
+
}
|
|
482
|
+
// Display prominent "Final Answer" message if we have one
|
|
483
|
+
if (lastAIMessage.current) {
|
|
484
|
+
// Legacy agents text is a bit messy and doesn't add a blank line, so we add it here
|
|
485
|
+
if (isLegacyAgentType(targetAgent)) {
|
|
486
|
+
updateOutput(" \n\n");
|
|
487
|
+
}
|
|
488
|
+
updateOutput(_jsx("div", { id: "final-answer-div", ref: finalAnswerRef, style: { marginBottom: "1rem" }, children: processLogLine(lastAIMessage.current, "Final Answer", ChatMessageType.AI, true) }));
|
|
489
|
+
}
|
|
490
|
+
// Add a blank line after response
|
|
491
|
+
updateOutput("\n");
|
|
492
|
+
// Record bot answer in history.
|
|
493
|
+
if (currentResponse?.current?.length > 0) {
|
|
494
|
+
chatHistory.current = [...chatHistory.current, new AIMessage(currentResponse.current)];
|
|
495
|
+
}
|
|
496
|
+
}
|
|
497
|
+
finally {
|
|
498
|
+
resetState();
|
|
499
|
+
// Allow parent components to do something when streaming is complete
|
|
500
|
+
onStreamingComplete?.();
|
|
501
|
+
}
|
|
502
|
+
};
|
|
503
|
+
return (_jsxs(Box, { id: `llm-chat-${id}`, sx: {
|
|
504
|
+
display: "flex",
|
|
505
|
+
flexDirection: "column",
|
|
506
|
+
flexGrow: 1,
|
|
507
|
+
height: "100%",
|
|
508
|
+
}, children: [title && (_jsxs(Box, { id: `llm-chat-title-container-${id}`, sx: {
|
|
509
|
+
alignItems: "center",
|
|
510
|
+
borderTopLeftRadius: "var(--bs-border-radius)",
|
|
511
|
+
borderTopRightRadius: "var(--bs-border-radius)",
|
|
512
|
+
display: "flex",
|
|
513
|
+
justifyContent: "space-between",
|
|
514
|
+
paddingLeft: "1rem",
|
|
515
|
+
paddingRight: "0.5rem",
|
|
516
|
+
paddingTop: "0.25rem",
|
|
517
|
+
paddingBottom: "0.25rem",
|
|
518
|
+
}, children: [_jsx(Typography, { id: `llm-chat-title-${id}-text`, sx: { fontSize: "0.9rem" }, children: title }), onClose && (_jsx(IconButton, { "data-testid": `close-button-${id}`, id: `close-button-${id}`, onClick: onClose, children: _jsx(CloseIcon, { id: `close-icon-${id}` }) }))] })), _jsxs(Box, { id: "llm-response-div", sx: {
|
|
519
|
+
...divStyle,
|
|
520
|
+
border: "var(--bs-border-width) var(--bs-border-style)",
|
|
521
|
+
borderRadius: "var(--bs-border-radius)",
|
|
522
|
+
display: "flex",
|
|
523
|
+
flexGrow: 1,
|
|
524
|
+
height: "100%",
|
|
525
|
+
margin: "10px",
|
|
526
|
+
position: "relative",
|
|
527
|
+
overflowY: "auto",
|
|
528
|
+
}, children: [_jsx(Tooltip, { id: "show-thinking", title: showThinking ? "Displaying agent thinking" : "Hiding agent thinking", children: _jsx("span", { id: "show-thinking-span", children: _jsx(LlmChatOptionsButton, { enabled: showThinking, id: "show-thinking-button", onClick: () => setShowThinking(!showThinking), posRight: 150, disabled: isAwaitingLlm, children: _jsx(AccountTreeIcon, { id: "show-thinking-icon", sx: { color: "var(--bs-white)", fontSize: "0.85rem" } }) }) }) }), _jsx(Tooltip, { id: "enable-autoscroll", title: autoScrollEnabled ? "Autoscroll enabled" : "Autoscroll disabled", children: _jsx(LlmChatOptionsButton, { enabled: autoScrollEnabled, id: "autoscroll-button", onClick: () => setAutoScrollEnabled(!autoScrollEnabled), posRight: 80, children: _jsx(VerticalAlignBottomIcon, { id: "autoscroll-icon", sx: { color: "var(--bs-white)", fontSize: "0.85rem" } }) }) }), _jsx(Tooltip, { id: "wrap-tooltip", title: shouldWrapOutput ? "Text wrapping enabled" : "Text wrapping disabled", children: _jsx(LlmChatOptionsButton, { enabled: shouldWrapOutput, id: "wrap-button", onClick: () => setShouldWrapOutput(!shouldWrapOutput), posRight: 10, children: _jsx(WrapTextIcon, { id: "wrap-icon", sx: { color: "var(--bs-white)", fontSize: "0.85rem" } }) }) }), _jsxs(Box, { id: "llm-responses", ref: chatOutputRef, sx: {
|
|
529
|
+
backgroundColor: backgroundColor || "var(--bs-secondary-blue)",
|
|
530
|
+
borderWidth: "1px",
|
|
531
|
+
borderRadius: "0.5rem",
|
|
532
|
+
fontSize: "smaller",
|
|
533
|
+
resize: "none",
|
|
534
|
+
overflowY: "auto", // Enable vertical scrollbar
|
|
535
|
+
paddingBottom: "60px",
|
|
536
|
+
paddingTop: "7.5px",
|
|
537
|
+
paddingLeft: "15px",
|
|
538
|
+
paddingRight: "15px",
|
|
539
|
+
width: "100%",
|
|
540
|
+
}, tabIndex: -1, children: [_jsx(FormattedMarkdown, { id: `${id}-formatted-markdown`, nodesList: chatOutput, style: darkMode ? atelierDuneDark : a11yLight, wrapLongLines: shouldWrapOutput }), isAwaitingLlm && (_jsxs(Box, { id: "awaitingOutputContainer", sx: { display: "flex", alignItems: "center", fontSize: "smaller" }, children: [_jsx("span", { id: "working-span", style: { marginRight: "1rem" }, children: "Working..." }), _jsx(CircularProgress, { id: "awaitingOutputSpinner", sx: {
|
|
541
|
+
color: "var(--bs-primary)",
|
|
542
|
+
}, size: "1rem" })] }))] }), _jsx(ControlButtons, { clearChatOnClickCallback: () => {
|
|
543
|
+
setChatOutput([]);
|
|
544
|
+
chatHistory.current = [];
|
|
545
|
+
chatContext.current = null;
|
|
546
|
+
setPreviousUserQuery("");
|
|
547
|
+
currentResponse.current = "";
|
|
548
|
+
lastAIMessage.current = "";
|
|
549
|
+
introduceAgent();
|
|
550
|
+
}, enableClearChatButton: enableClearChatButton, isAwaitingLlm: isAwaitingLlm, handleSend: handleSend, handleStop: handleStop, previousUserQuery: previousUserQuery, shouldEnableRegenerateButton: shouldEnableRegenerateButton })] }), _jsxs(Box, { id: "user-input-div", style: { ...divStyle, display: "flex", margin: "10px", alignItems: "flex-end", position: "relative" }, children: [_jsx(Input, { autoComplete: "off", id: "user-input", multiline: true, placeholder: agentPlaceholders[targetAgent] || `Chat with ${cleanUpAgentName(targetAgent)}`, ref: chatInputRef, sx: {
|
|
551
|
+
border: "var(--bs-border-style) var(--bs-border-width) var(--bs-gray-light)",
|
|
552
|
+
borderRadius: "var(--bs-border-radius)",
|
|
553
|
+
display: "flex",
|
|
554
|
+
flexGrow: 1,
|
|
555
|
+
fontSize: "smaller",
|
|
556
|
+
marginRight: "0.75rem",
|
|
557
|
+
paddingBottom: "0.5rem",
|
|
558
|
+
paddingTop: "0.5rem",
|
|
559
|
+
paddingLeft: "1rem",
|
|
560
|
+
paddingRight: "1rem",
|
|
561
|
+
transition: "margin-right 0.2s",
|
|
562
|
+
}, onChange: (event) => {
|
|
563
|
+
setChatInput(event.target.value);
|
|
564
|
+
}, onKeyDown: async (event) => {
|
|
565
|
+
if (event.key === "Enter" && !event.shiftKey) {
|
|
566
|
+
event.preventDefault();
|
|
567
|
+
await handleSend(chatInput);
|
|
568
|
+
}
|
|
569
|
+
}, value: chatInput, endAdornment: _jsxs(InputAdornment, { id: "input-adornments", position: "end", disableTypography: true, children: [voiceInputState.isProcessingSpeech && (_jsx(CircularProgress, { size: 16, sx: {
|
|
570
|
+
color: "var(--bs-primary)",
|
|
571
|
+
marginRight: "0.5rem",
|
|
572
|
+
} })), _jsx(IconButton, { id: "clear-input-button", onClick: () => {
|
|
573
|
+
setChatInput("");
|
|
574
|
+
}, sx: {
|
|
575
|
+
color: "var(--bs-primary)",
|
|
576
|
+
opacity: userInputEmpty ? "25%" : "100%",
|
|
577
|
+
}, disabled: userInputEmpty, tabIndex: -1, edge: "end", children: _jsx(ClearIcon, { id: "clear-input-icon" }) })] }) }), _jsx(MicrophoneButton, { isMicOn: isMicOn, onMicToggle: setIsMicOn, speechRecognitionRef: speechRecognitionRef, voiceInputState: voiceInputState, setVoiceInputState: setVoiceInputState }), _jsx(SendButton, { enableSendButton: shouldEnableSendButton, id: "submit-query-button", onClickCallback: () => handleSend(chatInput) })] })] }));
|
|
578
|
+
});
|
|
579
|
+
// Set a useful display name for the component for debugging purposes. We have to do it here because we're using
|
|
580
|
+
// forwardRef in the main definition.
|
|
581
|
+
ChatCommon.displayName = "ChatCommon";
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import { FC } from "react";
|
|
2
|
+
interface ControlButtonsProps {
|
|
3
|
+
clearChatOnClickCallback: () => void;
|
|
4
|
+
enableClearChatButton: boolean;
|
|
5
|
+
isAwaitingLlm: boolean;
|
|
6
|
+
handleSend: (query: string) => void;
|
|
7
|
+
handleStop: () => void;
|
|
8
|
+
previousUserQuery: string;
|
|
9
|
+
shouldEnableRegenerateButton: boolean;
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Generate the Control Buttons for a chat window.
|
|
13
|
+
* @returns A fragment containing the Control Buttons.
|
|
14
|
+
*/
|
|
15
|
+
export declare const ControlButtons: FC<ControlButtonsProps>;
|
|
16
|
+
export {};
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import { jsx as _jsx, Fragment as _Fragment, jsxs as _jsxs } from "react/jsx-runtime";
|
|
2
|
+
/*
|
|
3
|
+
Copyright 2025 Cognizant Technology Solutions Corp, www.cognizant.com.
|
|
4
|
+
|
|
5
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
|
+
you may not use this file except in compliance with the License.
|
|
7
|
+
You may obtain a copy of the License at
|
|
8
|
+
|
|
9
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
|
|
11
|
+
Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
See the License for the specific language governing permissions and
|
|
15
|
+
limitations under the License.
|
|
16
|
+
*/
|
|
17
|
+
import { DeleteOutline, Loop, StopCircle } from "@mui/icons-material";
|
|
18
|
+
import { SmallLlmChatButton } from "./LlmChatButton.js";
|
|
19
|
+
// #endregion: Types
|
|
20
|
+
/**
|
|
21
|
+
* Generate the Control Buttons for a chat window.
|
|
22
|
+
* @returns A fragment containing the Control Buttons.
|
|
23
|
+
*/
|
|
24
|
+
export const ControlButtons = ({ clearChatOnClickCallback, enableClearChatButton, isAwaitingLlm, handleSend, handleStop, previousUserQuery, shouldEnableRegenerateButton, }) => (_jsxs(_Fragment, { children: [!isAwaitingLlm && (_jsx(SmallLlmChatButton, { "aria-label": "Clear Chat", disabled: !enableClearChatButton, id: "clear-chat-button", onClick: clearChatOnClickCallback, posBottom: 8, posRight: 65, children: _jsx(DeleteOutline, { fontSize: "small", id: "stop-button-icon", sx: { color: "var(--bs-white)" } }) })), isAwaitingLlm && (_jsx(SmallLlmChatButton, { "aria-label": "Stop", disabled: !isAwaitingLlm, id: "stop-output-button", onClick: () => handleStop(), posBottom: 8, posRight: 23, children: _jsx(StopCircle, { fontSize: "small", id: "stop-button-icon", sx: { color: "var(--bs-white)" } }) })), !isAwaitingLlm && (_jsx(SmallLlmChatButton, { "aria-label": "Regenerate", disabled: !shouldEnableRegenerateButton, id: "regenerate-output-button", onClick: () => handleSend(previousUserQuery), posBottom: 8, posRight: 23, children: _jsx(Loop, { fontSize: "small", id: "generate-icon", sx: { color: "var(--bs-white)" } }) }))] }));
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import { JSX as ReactJSX, ReactNode } from "react";
|
|
2
|
+
import { SyntaxHighlighterProps } from "react-syntax-highlighter";
|
|
3
|
+
/**
|
|
4
|
+
* The props for the FormattedMarkdown component.
|
|
5
|
+
*/
|
|
6
|
+
interface FormattedMarkdownProps {
|
|
7
|
+
/**
|
|
8
|
+
* The id for the div that will contain the formatted markdown.
|
|
9
|
+
*/
|
|
10
|
+
readonly id: string;
|
|
11
|
+
/**
|
|
12
|
+
* The list of nodes to format. Each node can be a string or a React node.
|
|
13
|
+
*/
|
|
14
|
+
readonly nodesList: ReactNode[];
|
|
15
|
+
/**
|
|
16
|
+
* The style to use for the syntax highlighter. @see SyntaxHighlighterThemes
|
|
17
|
+
*/
|
|
18
|
+
readonly style: SyntaxHighlighterProps["style"];
|
|
19
|
+
/**
|
|
20
|
+
* Whether to wrap long lines in the markdown.
|
|
21
|
+
*/
|
|
22
|
+
readonly wrapLongLines: boolean;
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* Format the output to ensure that text nodes are formatted as markdown but other nodes are passed along as-is.
|
|
26
|
+
*
|
|
27
|
+
* @param props The props for the component. @see FormattedMarkdownProps
|
|
28
|
+
* @returns The formatted output. Consecutive string nodes will be aggregated and wrapped in a markdown component,
|
|
29
|
+
* while other nodes will be passed along as-is.
|
|
30
|
+
*/
|
|
31
|
+
export declare const FormattedMarkdown: ({ id, nodesList, style, wrapLongLines, }: FormattedMarkdownProps) => ReactJSX.Element;
|
|
32
|
+
export {};
|