@cognizant-ai-lab/ui-common 1.4.0 → 1.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/components/AgentChat/ChatCommon.js +3 -3
- package/dist/components/Common/Navbar.js +6 -5
- package/dist/components/MultiAgentAccelerator/AgentNode.js +3 -3
- package/dist/controller/agent/Agent.d.ts +6 -4
- package/dist/controller/agent/Agent.js +7 -6
- package/dist/controller/llm/LlmChat.d.ts +11 -1
- package/dist/controller/llm/LlmChat.js +60 -13
- package/dist/generated/neuro-san/NeuroSanClient.d.ts +3 -1
- package/dist/tsconfig.build.tsbuildinfo +1 -1
- package/package.json +2 -2
|
@@ -47,7 +47,7 @@ import { chatMessageFromChunk, checkError, cleanUpAgentName, removeTrailingUuid
|
|
|
47
47
|
import { MicrophoneButton } from "./VoiceChat/MicrophoneButton.js";
|
|
48
48
|
import { cleanupAndStopSpeechRecognition, setupSpeechRecognition } from "./VoiceChat/VoiceChat.js";
|
|
49
49
|
import { getAgentFunction, getConnectivity, sendChatQuery } from "../../controller/agent/Agent.js";
|
|
50
|
-
import { sendLlmRequest } from "../../controller/llm/LlmChat.js";
|
|
50
|
+
import { sendLlmRequest, StreamingUnit } from "../../controller/llm/LlmChat.js";
|
|
51
51
|
import { ChatMessageType, } from "../../generated/neuro-san/NeuroSanClient.js";
|
|
52
52
|
import { hashString, hasOnlyWhitespace } from "../../utils/text.js";
|
|
53
53
|
import { LlmChatOptionsButton } from "../Common/LlmChatOptionsButton.js";
|
|
@@ -433,13 +433,13 @@ export const ChatCommon = ({ ref, ...props }) => {
|
|
|
433
433
|
if (isLegacyAgentType(targetAgent)) {
|
|
434
434
|
// It's a legacy agent (these go directly to the LLM and are different from the Neuro-san agents).
|
|
435
435
|
// Send the chat query to the server. This will block until the stream ends from the server
|
|
436
|
-
await sendLlmRequest(handleChunk, controller?.current.signal, legacyAgentEndpoint, extraParams, query, chatHistory.current);
|
|
436
|
+
await sendLlmRequest(handleChunk, controller?.current.signal, legacyAgentEndpoint, extraParams, query, chatHistory.current, null, StreamingUnit.Chunk);
|
|
437
437
|
}
|
|
438
438
|
else {
|
|
439
439
|
// It's a Neuro-san agent.
|
|
440
440
|
// Some coded tools (data generator...) expect the username provided in slyData.
|
|
441
441
|
const slyDataWithUserName = { ...slyData.current, login: currentUser };
|
|
442
|
-
await sendChatQuery(neuroSanURL, controller?.current.signal, query, targetAgent, handleChunk, chatContext.current, slyDataWithUserName, currentUser);
|
|
442
|
+
await sendChatQuery(neuroSanURL, controller?.current.signal, query, targetAgent, handleChunk, chatContext.current, slyDataWithUserName, currentUser, StreamingUnit.Line);
|
|
443
443
|
}
|
|
444
444
|
}
|
|
445
445
|
catch (error) {
|
|
@@ -87,17 +87,18 @@ export const Navbar = ({ authenticationType, id, logo, logoServiceToken, pathnam
|
|
|
87
87
|
// Customer for branding
|
|
88
88
|
const customer = useSettingsStore((state) => state.settings.branding.customer);
|
|
89
89
|
const primary = useSettingsStore((state) => state.settings.branding.primary);
|
|
90
|
+
const hasCustomer = customer?.trim().length > 0;
|
|
90
91
|
return hydrated ? (_jsxs(Grid, { id: "nav-bar-container", container: true, alignItems: "center", sx: {
|
|
91
92
|
...MENU_ITEM_TEXT_PROPS,
|
|
92
93
|
padding: "0.5rem",
|
|
93
|
-
}, children: [settingsDialogOpen && (_jsx(SettingsDialog, { id: "settings-dialog", isOpen: settingsDialogOpen, logoServiceToken: logoServiceToken, onClose: () => setSettingsDialogOpen(false) })), _jsxs(Box, { sx: { display: "flex", alignItems: "center", gap: 2 }, children: [_jsx(CustomerLogo, { logoServiceToken: logoServiceToken }), _jsx(Typography, {
|
|
94
|
+
}, children: [settingsDialogOpen && (_jsx(SettingsDialog, { id: "settings-dialog", isOpen: settingsDialogOpen, logoServiceToken: logoServiceToken, onClose: () => setSettingsDialogOpen(false) })), _jsxs(Box, { sx: { display: "flex", alignItems: "center", gap: 2 }, children: [_jsx(CustomerLogo, { logoServiceToken: logoServiceToken }), hasCustomer && (_jsx(Typography, { "data-testid": "customer-branding", sx: {
|
|
94
95
|
fontSize: "20px",
|
|
95
96
|
fontWeight: "600",
|
|
96
97
|
paddingLeft: "0.15rem",
|
|
97
98
|
width: "200px",
|
|
98
99
|
display: "flex",
|
|
99
100
|
alignItems: "center",
|
|
100
|
-
}, children: customer })] }), _jsxs(Grid, { id: id, sx: { display: "flex", alignItems: "center" }, children: [
|
|
101
|
+
}, children: customer }))] }), _jsxs(Grid, { id: id, sx: { display: "flex", alignItems: "center" }, children: [hasCustomer ? getCognizantLogoImage() : null, _jsx(Typography, { id: "nav-bar-brand", sx: {
|
|
101
102
|
...MENU_ITEM_TEXT_PROPS,
|
|
102
103
|
marginLeft: "0.85rem",
|
|
103
104
|
fontSize: "16px",
|
|
@@ -142,15 +143,15 @@ export const Navbar = ({ authenticationType, id, logo, logoServiceToken, pathnam
|
|
|
142
143
|
whiteSpace: "normal",
|
|
143
144
|
wordWrap: "break-word",
|
|
144
145
|
fontSize: "smaller",
|
|
145
|
-
}, children: userInfo.name }), _jsx(MenuItem, { id: "auth-type-title", disabled: true, sx: { fontWeight: "bold" }, children: "Authentication" }), _jsx(MenuItem, { id: "authentication-type-menu-item", disabled: true, sx: { fontSize: "smaller" }, children: authenticationType }), authenticationEnabled() && (_jsx(MenuItem, { id: "user-sign-out", sx: { ...DISABLE_OUTLINE_PROPS, fontWeight: "bold" }, onClick: signOut, children: "Sign out" }))] })] })) : null, _jsx(Tooltip, { id: "dark-mode-toggle", title:
|
|
146
|
+
}, children: userInfo.name }), _jsx(MenuItem, { id: "auth-type-title", disabled: true, sx: { fontWeight: "bold" }, children: "Authentication" }), _jsx(MenuItem, { id: "authentication-type-menu-item", disabled: true, sx: { fontSize: "smaller" }, children: authenticationType }), authenticationEnabled() && (_jsx(MenuItem, { id: "user-sign-out", sx: { ...DISABLE_OUTLINE_PROPS, fontWeight: "bold" }, onClick: signOut, children: "Sign out" }))] })] })) : null, _jsx(Tooltip, { id: "dark-mode-toggle", title: hasCustomer
|
|
146
147
|
? "Dark mode toggle is not available when customer branding is active. Reset via Settings menu."
|
|
147
148
|
: "Toggle dark mode", children: _jsx(DarkModeIcon, { id: "dark-mode-icon", sx: {
|
|
148
149
|
marginRight: "1rem",
|
|
149
150
|
fontSize: "1rem",
|
|
150
|
-
cursor:
|
|
151
|
+
cursor: hasCustomer ? "not-allowed" : "pointer",
|
|
151
152
|
color: darkMode ? "var(--bs-yellow)" : "var(--bs-gray-dark)",
|
|
152
153
|
}, onClick: () => {
|
|
153
|
-
!
|
|
154
|
+
!hasCustomer && setMode(darkMode ? "light" : "dark");
|
|
154
155
|
} }) }), showSettingsButton && (_jsx(Tooltip, { title: "Settings", children: _jsx(SettingsIcon, { sx: {
|
|
155
156
|
...MENU_ITEM_TEXT_PROPS,
|
|
156
157
|
marginRight: "1rem",
|
|
@@ -113,8 +113,8 @@ export const AgentNode = (props) => {
|
|
|
113
113
|
const colorIndex = depth % palette.length;
|
|
114
114
|
backgroundColor = palette[colorIndex];
|
|
115
115
|
}
|
|
116
|
-
// Hide handles when awaiting LLM response ("zen mode")
|
|
117
|
-
const handleVisibility = isAwaitingLlm ? "
|
|
116
|
+
// Hide handles when awaiting LLM response ("zen mode").
|
|
117
|
+
const handleVisibility = isAwaitingLlm ? "hidden" : "visible";
|
|
118
118
|
// Determine which icon to display based on the agent type whether it is Frontman or not
|
|
119
119
|
const getDisplayAsIcon = () => {
|
|
120
120
|
const id = `${agentId}-icon`;
|
|
@@ -156,7 +156,7 @@ export const AgentNode = (props) => {
|
|
|
156
156
|
height: NODE_HEIGHT * (isFrontman ? 1.25 : 1.0),
|
|
157
157
|
width: NODE_WIDTH * (isFrontman ? 1.25 : 1.0),
|
|
158
158
|
zIndex: getZIndex(1, theme),
|
|
159
|
-
}, children: [getDisplayAsIcon(), _jsx(Handle, { id: `${agentId}-left-handle`, position: Position.Left, type: "source", style: {
|
|
159
|
+
}, children: [getDisplayAsIcon(), _jsx(Handle, { id: `${agentId}-left-handle`, position: Position.Left, type: "source", style: { visibility: handleVisibility } }), _jsx(Handle, { id: `${agentId}-right-handle`, position: Position.Right, type: "source", style: { visibility: handleVisibility } }), _jsx(Handle, { id: `${agentId}-top-handle`, position: Position.Top, type: "source", style: { visibility: handleVisibility } }), _jsx(Handle, { id: `${agentId}-bottom-handle`, position: Position.Bottom, type: "source", style: { visibility: handleVisibility } })] }), _jsx(Tooltip, { id: `${agentId}-tooltip`, title: agentName, placement: "top", disableInteractive: true, children: _jsx(Typography, { id: `${agentId}-name`, sx: {
|
|
160
160
|
display: "-webkit-box",
|
|
161
161
|
fontSize: "18px",
|
|
162
162
|
fontWeight: "bold",
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { AgentInfo, ChatContext, ChatResponse, ConnectivityResponse, FunctionResponse } from "../../generated/neuro-san/NeuroSanClient.js";
|
|
2
|
+
import { StreamingUnit } from "../llm/LlmChat.js";
|
|
2
3
|
import { AgentIconSuggestions } from "../Types/AgentIconSuggestions.js";
|
|
3
4
|
import { BrandingSuggestions } from "../Types/Branding.js";
|
|
4
5
|
import { NetworkIconSuggestions } from "../Types/NetworkIconSuggestions.js";
|
|
@@ -44,17 +45,18 @@ export declare const getAgentNetworks: (url: string) => Promise<readonly AgentIn
|
|
|
44
45
|
* @param url The neuro-san server URL
|
|
45
46
|
* @param signal The AbortSignal to use for the request. Used to cancel the request on user demand
|
|
46
47
|
* @param userInput The user input to send to the agent.
|
|
47
|
-
*
|
|
48
|
-
*
|
|
49
|
-
* @param targetAgent The target agent to send the request to. See CombinedAgentType for the list of available agents.
|
|
48
|
+
* @param targetAgent The target agent to send the request to. See CombinedAgentType for some available agents, or
|
|
49
|
+
* could be a string with an arbitrary agent name.
|
|
50
50
|
* @param callback The callback function to be called when a chunk of data is received from the server.
|
|
51
51
|
* @param chatContext "Opaque" conversation context for maintaining conversation state with the server. Neuro-san
|
|
52
52
|
* agents do not use ChatHistory directly, but rather, ChatContext, which is a collection of ChatHistory objects.
|
|
53
53
|
* @param slyData Data items that should not be sent to the LLM. Generated by the server.
|
|
54
54
|
* @param userId Current user ID in the session.
|
|
55
|
+
* @param streamingUnit Determines whether to send data to the callback as soon as it's received (Chunk)
|
|
56
|
+
* or to accumulate it until a newline is received (Line).
|
|
55
57
|
* @returns The response from the agent network.
|
|
56
58
|
*/
|
|
57
|
-
export declare const sendChatQuery: (url: string, signal: AbortSignal, userInput: string, targetAgent: string, callback: (chunk: string) => void, chatContext: ChatContext, slyData: Record<string, unknown>, userId: string) => Promise<ChatResponse>;
|
|
59
|
+
export declare const sendChatQuery: (url: string, signal: AbortSignal, userInput: string, targetAgent: string, callback: (chunk: string) => void, chatContext: ChatContext, slyData: Record<string, unknown>, userId: string, streamingUnit?: StreamingUnit) => Promise<ChatResponse>;
|
|
58
60
|
/**
|
|
59
61
|
* Gets information on the agent and tool connections within a network
|
|
60
62
|
* @param url The neuro-san server URL
|
|
@@ -20,7 +20,7 @@ import { TEMPORARY_NETWORK_FOLDER } from "../../components/MultiAgentAccelerator
|
|
|
20
20
|
import { ApiPaths,
|
|
21
21
|
// eslint-disable-next-line camelcase
|
|
22
22
|
ChatFilterChat_filter_type, ChatMessageType, } from "../../generated/neuro-san/NeuroSanClient.js";
|
|
23
|
-
import { sendLlmRequest } from "../llm/LlmChat.js";
|
|
23
|
+
import { sendLlmRequest, StreamingUnit } from "../llm/LlmChat.js";
|
|
24
24
|
/**
|
|
25
25
|
* Insert the target agent name into the path. The paths Api enum contains values like:
|
|
26
26
|
* <code>"/api/v1/{agent_name}/connectivity"</code> so unfortunately we need to do a `replace()` to insert the target
|
|
@@ -139,17 +139,18 @@ const handleJsonLines = (chunk, callback) => {
|
|
|
139
139
|
* @param url The neuro-san server URL
|
|
140
140
|
* @param signal The AbortSignal to use for the request. Used to cancel the request on user demand
|
|
141
141
|
* @param userInput The user input to send to the agent.
|
|
142
|
-
*
|
|
143
|
-
*
|
|
144
|
-
* @param targetAgent The target agent to send the request to. See CombinedAgentType for the list of available agents.
|
|
142
|
+
* @param targetAgent The target agent to send the request to. See CombinedAgentType for some available agents, or
|
|
143
|
+
* could be a string with an arbitrary agent name.
|
|
145
144
|
* @param callback The callback function to be called when a chunk of data is received from the server.
|
|
146
145
|
* @param chatContext "Opaque" conversation context for maintaining conversation state with the server. Neuro-san
|
|
147
146
|
* agents do not use ChatHistory directly, but rather, ChatContext, which is a collection of ChatHistory objects.
|
|
148
147
|
* @param slyData Data items that should not be sent to the LLM. Generated by the server.
|
|
149
148
|
* @param userId Current user ID in the session.
|
|
149
|
+
* @param streamingUnit Determines whether to send data to the callback as soon as it's received (Chunk)
|
|
150
|
+
* or to accumulate it until a newline is received (Line).
|
|
150
151
|
* @returns The response from the agent network.
|
|
151
152
|
*/
|
|
152
|
-
export const sendChatQuery = async (url, signal, userInput, targetAgent, callback, chatContext, slyData, userId) => {
|
|
153
|
+
export const sendChatQuery = async (url, signal, userInput, targetAgent, callback, chatContext, slyData, userId, streamingUnit = StreamingUnit.Chunk) => {
|
|
153
154
|
// Create request
|
|
154
155
|
const userMessage = {
|
|
155
156
|
type: ChatMessageType.HUMAN,
|
|
@@ -164,7 +165,7 @@ export const sendChatQuery = async (url, signal, userInput, targetAgent, callbac
|
|
|
164
165
|
};
|
|
165
166
|
const fetchUrl = `${url}${insertTargetAgent(targetAgent, ApiPaths.AgentService_StreamingChat)}`;
|
|
166
167
|
const requestRecord = Object.entries(agentChatRequest).reduce((acc, [key, value]) => (value ? { ...acc, [key]: value } : acc), {});
|
|
167
|
-
return sendLlmRequest((chunk) => handleJsonLines(chunk, callback), signal, fetchUrl, requestRecord, null, null, userId);
|
|
168
|
+
return sendLlmRequest((chunk) => handleJsonLines(chunk, callback), signal, fetchUrl, requestRecord, null, null, userId, streamingUnit);
|
|
168
169
|
};
|
|
169
170
|
/**
|
|
170
171
|
* Gets information on the agent and tool connections within a network
|
|
@@ -3,6 +3,14 @@
|
|
|
3
3
|
* Allows streaming callback for a more interactive experience.
|
|
4
4
|
*/
|
|
5
5
|
import { BaseMessage } from "@langchain/core/messages";
|
|
6
|
+
/**
|
|
7
|
+
* Determines whether to send data to the callback as soon as it's received (Chunk) or to accumulate it
|
|
8
|
+
* until a newline is received (Line).
|
|
9
|
+
*/
|
|
10
|
+
export declare enum StreamingUnit {
|
|
11
|
+
Chunk = 0,
|
|
12
|
+
Line = 1
|
|
13
|
+
}
|
|
6
14
|
/**
|
|
7
15
|
* Send a request to an LLM and stream the response to a callback.
|
|
8
16
|
* @param callback The callback function to be called when a chunk of data is received from the server.
|
|
@@ -12,7 +20,9 @@ import { BaseMessage } from "@langchain/core/messages";
|
|
|
12
20
|
* @param userQuery The user query to send to the server (sometimes part of chat history instead).
|
|
13
21
|
* @param chatHistory The chat history to be sent to the server. Contains user requests and server responses.
|
|
14
22
|
* @param userId Current user ID in the session.
|
|
23
|
+
* @param streamingUnit Determines whether to send data to the callback as soon as it's received (Chunk)
|
|
24
|
+
* or to accumulate it until a newline is received (Line). Default is Chunk.
|
|
15
25
|
* @returns Either the JSON result of the call, or, if a callback is provided, nothing, but tokens are streamed
|
|
16
26
|
* to the callback as they are received from the server.
|
|
17
27
|
*/
|
|
18
|
-
export declare const sendLlmRequest: (callback: (token: string) => void, signal: AbortSignal, fetchUrl: string, params: Record<string, unknown>, userQuery?: string, chatHistory?: BaseMessage[], userId?: string) => Promise<any>;
|
|
28
|
+
export declare const sendLlmRequest: (callback: (token: string) => void, signal: AbortSignal, fetchUrl: string, params: Record<string, unknown>, userQuery?: string, chatHistory?: BaseMessage[], userId?: string, streamingUnit?: StreamingUnit) => Promise<any>;
|
|
@@ -13,6 +13,61 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
13
13
|
See the License for the specific language governing permissions and
|
|
14
14
|
limitations under the License.
|
|
15
15
|
*/
|
|
16
|
+
/**
|
|
17
|
+
* Determines whether to send data to the callback as soon as it's received (Chunk) or to accumulate it
|
|
18
|
+
* until a newline is received (Line).
|
|
19
|
+
*/
|
|
20
|
+
export var StreamingUnit;
|
|
21
|
+
(function (StreamingUnit) {
|
|
22
|
+
StreamingUnit[StreamingUnit["Chunk"] = 0] = "Chunk";
|
|
23
|
+
StreamingUnit[StreamingUnit["Line"] = 1] = "Line";
|
|
24
|
+
})(StreamingUnit || (StreamingUnit = {}));
|
|
25
|
+
const handleStreamingCallback = async (res, callback, streamingUnit) => {
|
|
26
|
+
const reader = res.body.getReader();
|
|
27
|
+
const utf8decoder = new TextDecoder("utf8");
|
|
28
|
+
let buffer = "";
|
|
29
|
+
while (true) {
|
|
30
|
+
const { done, value } = await reader.read();
|
|
31
|
+
// If the caller wants to process chunk by chunk, send it to the callback immediately.
|
|
32
|
+
if (streamingUnit === StreamingUnit.Chunk) {
|
|
33
|
+
if (done) {
|
|
34
|
+
break; // End of stream
|
|
35
|
+
}
|
|
36
|
+
// Decode chunk from server and send to callback
|
|
37
|
+
const chunk = utf8decoder.decode(value);
|
|
38
|
+
callback(chunk);
|
|
39
|
+
}
|
|
40
|
+
else {
|
|
41
|
+
// Otherwise, accumulate in buffer until we have a full line (delimited by newline character)
|
|
42
|
+
// to send to the callback.
|
|
43
|
+
if (done) {
|
|
44
|
+
// Handle any remaining data in buffer (last line without newline)
|
|
45
|
+
if (buffer.trim().length > 0) {
|
|
46
|
+
callback(buffer);
|
|
47
|
+
}
|
|
48
|
+
break; // End of stream
|
|
49
|
+
}
|
|
50
|
+
// Decode chunk from server. Note: pass stream: true to handle multibyte characters that may be split
|
|
51
|
+
// across chunks
|
|
52
|
+
const chunk = utf8decoder.decode(value, { stream: true });
|
|
53
|
+
// Append chunk to buffer
|
|
54
|
+
buffer += chunk;
|
|
55
|
+
// Process all complete lines in the buffer
|
|
56
|
+
let newlineIndex;
|
|
57
|
+
while ((newlineIndex = buffer.indexOf("\n")) !== -1) {
|
|
58
|
+
// Extract the complete line (without the newline)
|
|
59
|
+
const line = buffer.substring(0, newlineIndex).trim();
|
|
60
|
+
// Keep the rest for next iteration
|
|
61
|
+
buffer = buffer.substring(newlineIndex + 1);
|
|
62
|
+
// Skip empty lines
|
|
63
|
+
if (line.length > 0) {
|
|
64
|
+
// Send the current line
|
|
65
|
+
callback(line);
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
};
|
|
16
71
|
/**
|
|
17
72
|
* Send a request to an LLM and stream the response to a callback.
|
|
18
73
|
* @param callback The callback function to be called when a chunk of data is received from the server.
|
|
@@ -22,10 +77,12 @@ limitations under the License.
|
|
|
22
77
|
* @param userQuery The user query to send to the server (sometimes part of chat history instead).
|
|
23
78
|
* @param chatHistory The chat history to be sent to the server. Contains user requests and server responses.
|
|
24
79
|
* @param userId Current user ID in the session.
|
|
80
|
+
* @param streamingUnit Determines whether to send data to the callback as soon as it's received (Chunk)
|
|
81
|
+
* or to accumulate it until a newline is received (Line). Default is Chunk.
|
|
25
82
|
* @returns Either the JSON result of the call, or, if a callback is provided, nothing, but tokens are streamed
|
|
26
83
|
* to the callback as they are received from the server.
|
|
27
84
|
*/
|
|
28
|
-
export const sendLlmRequest = async (callback, signal, fetchUrl, params, userQuery, chatHistory, userId) => {
|
|
85
|
+
export const sendLlmRequest = async (callback, signal, fetchUrl, params, userQuery, chatHistory, userId, streamingUnit = StreamingUnit.Chunk) => {
|
|
29
86
|
const res = await fetch(fetchUrl, {
|
|
30
87
|
method: "POST",
|
|
31
88
|
headers: {
|
|
@@ -45,18 +102,8 @@ export const sendLlmRequest = async (callback, signal, fetchUrl, params, userQue
|
|
|
45
102
|
throw new Error(`Failed to fetch: ${res.statusText} error code ${res.status}`);
|
|
46
103
|
}
|
|
47
104
|
if (callback) {
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
while (true) {
|
|
51
|
-
const { done, value } = await reader.read();
|
|
52
|
-
if (done) {
|
|
53
|
-
break; // End of stream
|
|
54
|
-
}
|
|
55
|
-
// Decode chunk from server
|
|
56
|
-
const chunk = utf8decoder.decode(value);
|
|
57
|
-
// Send current chunk to callback
|
|
58
|
-
callback(chunk);
|
|
59
|
-
}
|
|
105
|
+
// If a callback was provided, we assume the response is a stream and handle it accordingly
|
|
106
|
+
await handleStreamingCallback(res, callback, streamingUnit);
|
|
60
107
|
return null;
|
|
61
108
|
}
|
|
62
109
|
else {
|
|
@@ -193,8 +193,10 @@ export interface components {
|
|
|
193
193
|
readonly description?: string;
|
|
194
194
|
/** @description Optional map of parameters passed in via the natural-language chat text channel that the agent needs in order to work. This is really a pydantic/OpenAI function description, which is a bit too complex to specify directly in protobuf. */
|
|
195
195
|
readonly parameters?: Record<string, unknown>;
|
|
196
|
-
/** @description Optional map of parameters passed in via the sly_data dictionary private data channel that the agent needs in order to work. Just like the parameters above, this is really a pydantic/OpenAI function description, which is a bit too complex to specify directly in protobuf. */
|
|
196
|
+
/** @description This is a description of what data is expected to come *in* via the sly_data channel. Optional map of parameters passed in via the sly_data dictionary private data channel that the agent needs in order to work. Just like the parameters above, this is really a pydantic/OpenAI function description, which is a bit too complex to specify directly in protobuf. */
|
|
197
197
|
readonly sly_data_schema?: Record<string, unknown>;
|
|
198
|
+
/** @description This is a description of what data is expected to come *out* via the sly_data channel. Optional map of parameters returned via the sly_data dictionary private data channel that the agent will return after its work. Just like the parameters above, this is really a pydantic/OpenAI function description, which is a bit too complex to specify directly in protobuf. */
|
|
199
|
+
readonly sly_data_output_schema?: Record<string, unknown>;
|
|
198
200
|
};
|
|
199
201
|
/** @description Response structure for Function gRPC method */
|
|
200
202
|
readonly FunctionResponse: {
|