@xalia/agent 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.prettierrc.json +11 -0
- package/README.md +56 -0
- package/dist/agent.js +238 -0
- package/dist/agentUtils.js +106 -0
- package/dist/chat.js +296 -0
- package/dist/dummyLLM.js +38 -0
- package/dist/files.js +115 -0
- package/dist/iplatform.js +2 -0
- package/dist/llm.js +2 -0
- package/dist/main.js +147 -0
- package/dist/mcpServerManager.js +278 -0
- package/dist/nodePlatform.js +61 -0
- package/dist/openAILLM.js +38 -0
- package/dist/openAILLMStreaming.js +431 -0
- package/dist/options.js +79 -0
- package/dist/prompt.js +83 -0
- package/dist/sudoMcpServerManager.js +183 -0
- package/dist/test/imageLoad.test.js +14 -0
- package/dist/test/mcpServerManager.test.js +71 -0
- package/dist/test/prompt.test.js +26 -0
- package/dist/test/sudoMcpServerManager.test.js +49 -0
- package/dist/tokenAuth.js +39 -0
- package/dist/tools.js +44 -0
- package/eslint.config.mjs +25 -0
- package/frog.png +0 -0
- package/package.json +42 -0
- package/scripts/git_message +31 -0
- package/scripts/git_wip +21 -0
- package/scripts/pr_message +18 -0
- package/scripts/pr_review +16 -0
- package/scripts/sudomcp_import +23 -0
- package/scripts/test_script +60 -0
- package/src/agent.ts +283 -0
- package/src/agentUtils.ts +198 -0
- package/src/chat.ts +346 -0
- package/src/dummyLLM.ts +50 -0
- package/src/files.ts +95 -0
- package/src/iplatform.ts +17 -0
- package/src/llm.ts +15 -0
- package/src/main.ts +187 -0
- package/src/mcpServerManager.ts +371 -0
- package/src/nodePlatform.ts +24 -0
- package/src/openAILLM.ts +51 -0
- package/src/openAILLMStreaming.ts +528 -0
- package/src/options.ts +103 -0
- package/src/prompt.ts +93 -0
- package/src/sudoMcpServerManager.ts +278 -0
- package/src/test/imageLoad.test.ts +14 -0
- package/src/test/mcpServerManager.test.ts +98 -0
- package/src/test/prompt.test.src +0 -0
- package/src/test/prompt.test.ts +26 -0
- package/src/test/sudoMcpServerManager.test.ts +65 -0
- package/src/tokenAuth.ts +50 -0
- package/src/tools.ts +57 -0
- package/test_data/background_test_profile.json +6 -0
- package/test_data/background_test_script.json +11 -0
- package/test_data/dummyllm_script_simplecalc.json +28 -0
- package/test_data/git_message_profile.json +4 -0
- package/test_data/git_wip_system.txt +5 -0
- package/test_data/pr_message_profile.json +4 -0
- package/test_data/pr_review_profile.json +4 -0
- package/test_data/prompt_simplecalc.txt +1 -0
- package/test_data/simplecalc_profile.json +4 -0
- package/test_data/sudomcp_import_profile.json +4 -0
- package/test_data/test_script_profile.json +8 -0
- package/tsconfig.json +13 -0
package/src/agent.ts
ADDED
@@ -0,0 +1,283 @@
|
|
1
|
+
import * as dotenv from "dotenv";
|
2
|
+
import { OpenAI } from "openai";
|
3
|
+
import { McpServerManager } from "./mcpServerManager";
|
4
|
+
import {
|
5
|
+
ChatCompletionContentPart,
|
6
|
+
ChatCompletionUserMessageParam,
|
7
|
+
} from "openai/resources.mjs";
|
8
|
+
import { strict as assert } from "assert";
|
9
|
+
import { ILLM } from "./llm";
|
10
|
+
import { AgentProfile, getLogger } from "@xalia/xmcp/sdk";
|
11
|
+
export { AgentProfile } from "@xalia/xmcp/sdk";
|
12
|
+
|
13
|
+
export type ToolHandler = (args: unknown) => string;
|
14
|
+
|
15
|
+
export type McpServerUrls = (name: string) => string;
|
16
|
+
|
17
|
+
// Role: If content, give it to UI
|
18
|
+
export type OnMessageCB = {
|
19
|
+
(msg: string, msgEnd: boolean): Promise<void>;
|
20
|
+
};
|
21
|
+
|
22
|
+
// Role: If tool calls, prompt for permission to handle them
|
23
|
+
export type OnToolCallCB = {
|
24
|
+
(msg: OpenAI.ChatCompletionMessageToolCall): Promise<boolean>;
|
25
|
+
};
|
26
|
+
|
27
|
+
dotenv.config();
|
28
|
+
const logger = getLogger();
|
29
|
+
|
30
|
+
export class Agent {
|
31
|
+
private toolHandlers: { [toolName: string]: ToolHandler } = {};
|
32
|
+
|
33
|
+
private constructor(
|
34
|
+
public onMessage: OnMessageCB,
|
35
|
+
public onToolCall: OnToolCallCB,
|
36
|
+
private messages: OpenAI.ChatCompletionMessageParam[],
|
37
|
+
private mcpServerManager: McpServerManager,
|
38
|
+
private tools: OpenAI.ChatCompletionTool[],
|
39
|
+
private llm: ILLM
|
40
|
+
) {}
|
41
|
+
|
42
|
+
public static async initializeWithLLM(
|
43
|
+
onMessage: OnMessageCB,
|
44
|
+
onToolCall: OnToolCallCB,
|
45
|
+
systemPrompt: string | undefined,
|
46
|
+
llm: ILLM
|
47
|
+
): Promise<Agent> {
|
48
|
+
// Initialize messages with system prompt
|
49
|
+
const messages = [
|
50
|
+
{
|
51
|
+
role: "system",
|
52
|
+
content: systemPrompt ?? "You are a helpful assistant",
|
53
|
+
} as OpenAI.ChatCompletionMessageParam,
|
54
|
+
];
|
55
|
+
|
56
|
+
// Create the server manager
|
57
|
+
const mcpServerManager = new McpServerManager();
|
58
|
+
|
59
|
+
return new Agent(
|
60
|
+
onMessage,
|
61
|
+
onToolCall,
|
62
|
+
messages,
|
63
|
+
mcpServerManager,
|
64
|
+
[],
|
65
|
+
llm
|
66
|
+
);
|
67
|
+
}
|
68
|
+
|
69
|
+
public async shutdown(): Promise<void> {
|
70
|
+
return this.mcpServerManager.shutdown();
|
71
|
+
}
|
72
|
+
|
73
|
+
public getAgentProfile(): AgentProfile {
|
74
|
+
return new AgentProfile(
|
75
|
+
this.llm.getModel(),
|
76
|
+
this.getSystemMessage(),
|
77
|
+
this.mcpServerManager.getMcpServerSettings()
|
78
|
+
);
|
79
|
+
}
|
80
|
+
|
81
|
+
public getConversation(): OpenAI.ChatCompletionMessageParam[] {
|
82
|
+
assert(
|
83
|
+
this.messages[0].role == "system",
|
84
|
+
"first message must have system role"
|
85
|
+
);
|
86
|
+
// Return a copy so future modifications to `this.messages` don't impact
|
87
|
+
// the callers copy.
|
88
|
+
return structuredClone(this.messages.slice(1));
|
89
|
+
}
|
90
|
+
|
91
|
+
public setConversation(messages: OpenAI.ChatCompletionMessageParam[]) {
|
92
|
+
assert(this.messages[0].role == "system");
|
93
|
+
assert(messages[0].role != "system", "conversation contains system msg");
|
94
|
+
|
95
|
+
const newMessages: OpenAI.ChatCompletionMessageParam[] = [this.messages[0]];
|
96
|
+
this.messages = newMessages.concat(structuredClone(messages));
|
97
|
+
}
|
98
|
+
|
99
|
+
public getMcpServerManager(): McpServerManager {
|
100
|
+
return this.mcpServerManager;
|
101
|
+
}
|
102
|
+
|
103
|
+
public async userMessage(
|
104
|
+
msg?: string,
|
105
|
+
imageB64?: string
|
106
|
+
): Promise<OpenAI.ChatCompletionMessageParam | undefined> {
|
107
|
+
const userMessage = createUserMessage(msg, imageB64);
|
108
|
+
if (!userMessage) {
|
109
|
+
return undefined;
|
110
|
+
}
|
111
|
+
|
112
|
+
this.messages.push(userMessage);
|
113
|
+
let completion = await this.chatCompletion();
|
114
|
+
|
115
|
+
let message = completion.choices[0].message;
|
116
|
+
this.messages.push(message);
|
117
|
+
|
118
|
+
// While there are tool calls to make, make them and loop
|
119
|
+
|
120
|
+
while (message.tool_calls && message.tool_calls.length > 0) {
|
121
|
+
for (const toolCall of message.tool_calls ?? []) {
|
122
|
+
const approval = await this.onToolCall(toolCall);
|
123
|
+
if (approval) {
|
124
|
+
try {
|
125
|
+
const result = await this.doToolCall(toolCall);
|
126
|
+
logger.debug(`tool call result ${JSON.stringify(result)}`);
|
127
|
+
this.messages.push(result);
|
128
|
+
} catch (e) {
|
129
|
+
logger.error(`tool call error: ${e}`);
|
130
|
+
this.messages.push({
|
131
|
+
role: "tool",
|
132
|
+
tool_call_id: toolCall.id,
|
133
|
+
content: "Tool call failed.",
|
134
|
+
});
|
135
|
+
}
|
136
|
+
} else {
|
137
|
+
this.messages.push({
|
138
|
+
role: "tool",
|
139
|
+
tool_call_id: toolCall.id,
|
140
|
+
content: "User denied tool use request.",
|
141
|
+
});
|
142
|
+
}
|
143
|
+
}
|
144
|
+
|
145
|
+
completion = await this.chatCompletion();
|
146
|
+
message = completion.choices[0].message;
|
147
|
+
this.messages.push(message);
|
148
|
+
}
|
149
|
+
|
150
|
+
return completion.choices[0].message;
|
151
|
+
}
|
152
|
+
|
153
|
+
public chooseModel(model: string) {
|
154
|
+
logger.debug(`Set model ${model}`);
|
155
|
+
this.llm.setModel(model);
|
156
|
+
}
|
157
|
+
|
158
|
+
/**
|
159
|
+
* Clear the conversation.
|
160
|
+
*/
|
161
|
+
public resetConversation() {
|
162
|
+
assert(this.messages.length > 0);
|
163
|
+
// Keep only the system message
|
164
|
+
this.messages.splice(1);
|
165
|
+
}
|
166
|
+
|
167
|
+
public getSystemMessage(): string {
|
168
|
+
assert(this.messages[0].role === "system");
|
169
|
+
return this.messages[0].content as string;
|
170
|
+
}
|
171
|
+
|
172
|
+
/**
|
173
|
+
* Set the system prompt
|
174
|
+
*/
|
175
|
+
public setSystemMessage(systemMsg: string) {
|
176
|
+
assert(this.messages[0].role === "system");
|
177
|
+
this.messages[0].content = systemMsg;
|
178
|
+
}
|
179
|
+
|
180
|
+
async chatCompletion(): Promise<OpenAI.Chat.Completions.ChatCompletion> {
|
181
|
+
let tools: OpenAI.ChatCompletionTool[] | undefined;
|
182
|
+
const enabledTools = this.tools.concat(
|
183
|
+
this.mcpServerManager.getOpenAITools()
|
184
|
+
);
|
185
|
+
if (enabledTools.length > 0) {
|
186
|
+
tools = enabledTools;
|
187
|
+
}
|
188
|
+
// logger.debug(
|
189
|
+
// `chatCompletion: tools: ${JSON.stringify(tools, undefined, 2)}`
|
190
|
+
// );
|
191
|
+
const completion = await this.llm.getConversationResponse(
|
192
|
+
this.messages,
|
193
|
+
tools,
|
194
|
+
this.onMessage
|
195
|
+
);
|
196
|
+
logger.debug(`Received chat completion ${JSON.stringify(completion)}`);
|
197
|
+
return completion;
|
198
|
+
}
|
199
|
+
|
200
|
+
public toolNames(): string[] {
|
201
|
+
return this.mcpServerManager
|
202
|
+
.getOpenAITools()
|
203
|
+
.map((tool) => tool.function.name);
|
204
|
+
}
|
205
|
+
|
206
|
+
public addTool(tool: OpenAI.ChatCompletionTool, handler: ToolHandler) {
|
207
|
+
const name = tool.function.name;
|
208
|
+
if (this.toolHandlers[name]) {
|
209
|
+
throw `tool ${name} already added`;
|
210
|
+
}
|
211
|
+
|
212
|
+
logger.debug(`Adding tool ${name}`);
|
213
|
+
|
214
|
+
this.tools.push(tool);
|
215
|
+
this.toolHandlers[name] = handler;
|
216
|
+
}
|
217
|
+
|
218
|
+
async doToolCall(
|
219
|
+
toolCall: OpenAI.ChatCompletionMessageToolCall
|
220
|
+
): Promise<OpenAI.ChatCompletionToolMessageParam> {
|
221
|
+
const name = toolCall.function.name;
|
222
|
+
const args = JSON.parse(toolCall.function.arguments);
|
223
|
+
|
224
|
+
let result: string | undefined = undefined;
|
225
|
+
const handler = this.toolHandlers[name];
|
226
|
+
if (handler) {
|
227
|
+
logger.debug(` found agent tool ${name} ...`);
|
228
|
+
result = handler(args);
|
229
|
+
} else {
|
230
|
+
result = await this.mcpServerManager.invoke(name, args);
|
231
|
+
}
|
232
|
+
return {
|
233
|
+
role: "tool",
|
234
|
+
tool_call_id: toolCall.id,
|
235
|
+
content: result.toString(),
|
236
|
+
};
|
237
|
+
}
|
238
|
+
}
|
239
|
+
|
240
|
+
/**
|
241
|
+
* Returns the ChatCompletionMessageParam constructed from (optional) text and
|
242
|
+
* (optional) image. If neither is given (null message), then undefined is
|
243
|
+
* returned.
|
244
|
+
**/
|
245
|
+
export function createUserMessage(
|
246
|
+
msg?: string,
|
247
|
+
imageB64?: string
|
248
|
+
): ChatCompletionUserMessageParam | undefined {
|
249
|
+
const content = (() => {
|
250
|
+
if (!imageB64) {
|
251
|
+
if (!msg) {
|
252
|
+
return undefined;
|
253
|
+
}
|
254
|
+
return msg;
|
255
|
+
}
|
256
|
+
|
257
|
+
const content: ChatCompletionContentPart[] = [];
|
258
|
+
if (msg) {
|
259
|
+
content.push({
|
260
|
+
type: "text",
|
261
|
+
text: msg,
|
262
|
+
});
|
263
|
+
}
|
264
|
+
if (imageB64) {
|
265
|
+
content.push({
|
266
|
+
type: "image_url",
|
267
|
+
image_url: {
|
268
|
+
url: imageB64,
|
269
|
+
},
|
270
|
+
});
|
271
|
+
}
|
272
|
+
return content;
|
273
|
+
})();
|
274
|
+
|
275
|
+
if (!content) {
|
276
|
+
return undefined;
|
277
|
+
}
|
278
|
+
|
279
|
+
return {
|
280
|
+
role: "user",
|
281
|
+
content,
|
282
|
+
};
|
283
|
+
}
|
@@ -0,0 +1,198 @@
|
|
1
|
+
import { getLogger } from "@xalia/xmcp/sdk";
|
2
|
+
import { Agent, AgentProfile, OnMessageCB, OnToolCallCB } from "./agent";
|
3
|
+
import { IPlatform } from "./iplatform";
|
4
|
+
import { SudoMcpServerManager } from "./sudoMcpServerManager";
|
5
|
+
import OpenAI from "openai";
|
6
|
+
import { Configuration as SudoMcpConfiguration } from "@xalia/xmcp/sdk";
|
7
|
+
import { OpenAILLM } from "./openAILLM";
|
8
|
+
import { OpenAILLMStreaming } from "./openAILLMStreaming";
|
9
|
+
import { DummyLLM } from "./dummyLLM";
|
10
|
+
import { ILLM } from "./llm";
|
11
|
+
import { strict as assert } from "assert";
|
12
|
+
|
13
|
+
const logger = getLogger();
|
14
|
+
|
15
|
+
export const DEFAULT_LLM_URL = "http://localhost:5001/v1";
|
16
|
+
|
17
|
+
/**
|
18
|
+
* Util function to create an Agent from some config information.
|
19
|
+
*/
|
20
|
+
async function createAgent(
|
21
|
+
llmUrl: string | undefined,
|
22
|
+
model: string | undefined,
|
23
|
+
systemPrompt: string,
|
24
|
+
onMessage: OnMessageCB,
|
25
|
+
onToolCall: OnToolCallCB,
|
26
|
+
platform: IPlatform,
|
27
|
+
openaiApiKey: string | undefined,
|
28
|
+
stream: boolean = false
|
29
|
+
): Promise<Agent> {
|
30
|
+
let llm: ILLM | undefined;
|
31
|
+
|
32
|
+
if (model && model.startsWith("dummy:")) {
|
33
|
+
// Dummy Agent
|
34
|
+
const llmUrl = model.slice(6);
|
35
|
+
if (llmUrl.length === 0) {
|
36
|
+
throw "malformed dummy:<script>";
|
37
|
+
}
|
38
|
+
const script = await platform.load(llmUrl);
|
39
|
+
logger.debug(` script: ${script}`);
|
40
|
+
const responses: OpenAI.ChatCompletion.Choice[] = JSON.parse(script);
|
41
|
+
logger.debug(`Initializing Dummy Agent: ${llmUrl}`);
|
42
|
+
llm = new DummyLLM(responses);
|
43
|
+
} else {
|
44
|
+
// Regular Agent
|
45
|
+
if (!openaiApiKey) {
|
46
|
+
throw "Missing OpenAI API Key";
|
47
|
+
}
|
48
|
+
|
49
|
+
logger.debug(`Initializing Agent: ${llmUrl} - ${model}`);
|
50
|
+
if (stream) {
|
51
|
+
llm = new OpenAILLMStreaming(openaiApiKey, llmUrl, model);
|
52
|
+
} else {
|
53
|
+
llm = new OpenAILLM(openaiApiKey, llmUrl, model);
|
54
|
+
}
|
55
|
+
}
|
56
|
+
|
57
|
+
assert(llm);
|
58
|
+
return Agent.initializeWithLLM(onMessage, onToolCall, systemPrompt, llm);
|
59
|
+
}
|
60
|
+
|
61
|
+
/**
|
62
|
+
* Util function to create and initialize an Agent given an AgentProfile.
|
63
|
+
*/
|
64
|
+
export async function createAgentAndSudoMcpServerManager(
|
65
|
+
url: string,
|
66
|
+
agentProfile: AgentProfile,
|
67
|
+
onMessage: OnMessageCB,
|
68
|
+
onToolCall: OnToolCallCB,
|
69
|
+
platform: IPlatform,
|
70
|
+
openaiApiKey: string | undefined,
|
71
|
+
sudomcpConfig: SudoMcpConfiguration,
|
72
|
+
authorizedUrl: string | undefined,
|
73
|
+
conversation: OpenAI.ChatCompletionMessageParam[] | undefined,
|
74
|
+
stream: boolean = false
|
75
|
+
): Promise<[Agent, SudoMcpServerManager]> {
|
76
|
+
// Create agent
|
77
|
+
logger.debug("[createAgentAndSudoMcpServerManager] creating agent ...");
|
78
|
+
const agent = await createAgent(
|
79
|
+
url,
|
80
|
+
agentProfile.model,
|
81
|
+
agentProfile.system_prompt,
|
82
|
+
onMessage,
|
83
|
+
onToolCall,
|
84
|
+
platform,
|
85
|
+
openaiApiKey,
|
86
|
+
stream
|
87
|
+
);
|
88
|
+
if (conversation) {
|
89
|
+
agent.setConversation(conversation);
|
90
|
+
}
|
91
|
+
|
92
|
+
// Init SudoMcpServerManager
|
93
|
+
logger.debug(
|
94
|
+
"[createAgentAndSudoMcpServerManager] creating SudoMcpServerManager."
|
95
|
+
);
|
96
|
+
const sudoMcpServerManager = await SudoMcpServerManager.initialize(
|
97
|
+
agent.getMcpServerManager(),
|
98
|
+
platform.openUrl,
|
99
|
+
sudomcpConfig.backend_url,
|
100
|
+
sudomcpConfig.api_key,
|
101
|
+
authorizedUrl
|
102
|
+
);
|
103
|
+
logger.debug(
|
104
|
+
"[createAgentAndSudoMcpServerManager] restore mcp settings:" +
|
105
|
+
JSON.stringify(agentProfile.mcp_settings)
|
106
|
+
);
|
107
|
+
await sudoMcpServerManager.restoreMcpSettings(agentProfile.mcp_settings);
|
108
|
+
|
109
|
+
logger.debug("[createAgentAndSudoMcpServerManager] done");
|
110
|
+
return [agent, sudoMcpServerManager];
|
111
|
+
}
|
112
|
+
|
113
|
+
/**
|
114
|
+
* An "non-interactive" agent is one which is not intended to be used
|
115
|
+
* interactively (settings cannot be dyanmically adjusted, intermediate
|
116
|
+
* messages are not used by the caller, the user does not need to approve tool
|
117
|
+
* calls, etc).
|
118
|
+
*/
|
119
|
+
export async function createNonInteractiveAgent(
|
120
|
+
url: string,
|
121
|
+
agentProfile: AgentProfile,
|
122
|
+
conversation: OpenAI.ChatCompletionMessageParam[] | undefined,
|
123
|
+
platform: IPlatform,
|
124
|
+
openaiApiKey: string | undefined,
|
125
|
+
sudomcpConfig: SudoMcpConfiguration,
|
126
|
+
approveToolsUpTo: number
|
127
|
+
): Promise<Agent> {
|
128
|
+
let remainingToolCalls = approveToolsUpTo;
|
129
|
+
const onMessage = async () => {};
|
130
|
+
const onToolCall = async () => {
|
131
|
+
if (remainingToolCalls !== 0) {
|
132
|
+
--remainingToolCalls;
|
133
|
+
return true;
|
134
|
+
}
|
135
|
+
return false;
|
136
|
+
};
|
137
|
+
|
138
|
+
const [agent, _] = await createAgentAndSudoMcpServerManager(
|
139
|
+
url,
|
140
|
+
agentProfile,
|
141
|
+
onMessage,
|
142
|
+
onToolCall,
|
143
|
+
platform,
|
144
|
+
openaiApiKey,
|
145
|
+
sudomcpConfig,
|
146
|
+
undefined,
|
147
|
+
conversation
|
148
|
+
);
|
149
|
+
|
150
|
+
return agent;
|
151
|
+
}
|
152
|
+
|
153
|
+
/**
|
154
|
+
* Create an Agent (from the AgentProfile), pass it a single prompt and output
|
155
|
+
* the response.
|
156
|
+
*/
|
157
|
+
export async function runOneShot(
|
158
|
+
url: string,
|
159
|
+
agentProfile: AgentProfile,
|
160
|
+
conversation: OpenAI.ChatCompletionMessageParam[] | undefined,
|
161
|
+
platform: IPlatform,
|
162
|
+
prompt: string,
|
163
|
+
image: string | undefined,
|
164
|
+
llmApiKey: string | undefined,
|
165
|
+
sudomcpConfig: SudoMcpConfiguration,
|
166
|
+
approveToolsUpTo: number
|
167
|
+
): Promise<{
|
168
|
+
response: string;
|
169
|
+
conversation: OpenAI.ChatCompletionMessageParam[];
|
170
|
+
}> {
|
171
|
+
logger.debug("[runOneShot]: start");
|
172
|
+
|
173
|
+
// Create a non-interactive agent and pass any prompt/ image to it. Return
|
174
|
+
// the first answer.
|
175
|
+
|
176
|
+
const agent = await createNonInteractiveAgent(
|
177
|
+
url,
|
178
|
+
agentProfile,
|
179
|
+
conversation,
|
180
|
+
platform,
|
181
|
+
llmApiKey,
|
182
|
+
sudomcpConfig,
|
183
|
+
approveToolsUpTo
|
184
|
+
);
|
185
|
+
|
186
|
+
const response = await agent.userMessage(prompt, image);
|
187
|
+
await agent.shutdown();
|
188
|
+
logger.debug("[runOneShot]: shutdown done");
|
189
|
+
|
190
|
+
if (!response) {
|
191
|
+
throw "No message returned from agent";
|
192
|
+
}
|
193
|
+
|
194
|
+
return {
|
195
|
+
response: "" + response.content,
|
196
|
+
conversation: agent.getConversation(),
|
197
|
+
};
|
198
|
+
}
|