@xalia/agent 0.5.4 → 0.5.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent/src/agent/agent.js +16 -9
- package/dist/agent/src/agent/agentUtils.js +24 -4
- package/dist/agent/src/agent/mcpServerManager.js +19 -9
- package/dist/agent/src/agent/openAILLM.js +3 -1
- package/dist/agent/src/agent/openAILLMStreaming.js +24 -25
- package/dist/agent/src/agent/repeatLLM.js +43 -0
- package/dist/agent/src/agent/sudoMcpServerManager.js +12 -6
- package/dist/agent/src/chat/client.js +259 -36
- package/dist/agent/src/chat/conversationManager.js +243 -24
- package/dist/agent/src/chat/db.js +24 -1
- package/dist/agent/src/chat/frontendClient.js +74 -0
- package/dist/agent/src/chat/server.js +3 -3
- package/dist/agent/src/test/db.test.js +25 -2
- package/dist/agent/src/test/openaiStreaming.test.js +133 -0
- package/dist/agent/src/test/prompt.test.js +2 -2
- package/dist/agent/src/test/sudoMcpServerManager.test.js +1 -1
- package/dist/agent/src/tool/agentChat.js +7 -197
- package/dist/agent/src/tool/chatMain.js +18 -23
- package/dist/agent/src/tool/commandPrompt.js +248 -0
- package/dist/agent/src/tool/prompt.js +27 -31
- package/package.json +1 -1
- package/scripts/test_chat +17 -1
- package/src/agent/agent.ts +34 -11
- package/src/agent/agentUtils.ts +52 -3
- package/src/agent/mcpServerManager.ts +43 -13
- package/src/agent/openAILLM.ts +3 -1
- package/src/agent/openAILLMStreaming.ts +28 -27
- package/src/agent/repeatLLM.ts +51 -0
- package/src/agent/sudoMcpServerManager.ts +41 -12
- package/src/chat/client.ts +353 -40
- package/src/chat/conversationManager.ts +345 -33
- package/src/chat/db.ts +28 -2
- package/src/chat/frontendClient.ts +123 -0
- package/src/chat/messages.ts +146 -2
- package/src/chat/server.ts +3 -3
- package/src/test/db.test.ts +35 -2
- package/src/test/openaiStreaming.test.ts +142 -0
- package/src/test/prompt.test.ts +1 -1
- package/src/test/sudoMcpServerManager.test.ts +1 -1
- package/src/tool/agentChat.ts +13 -211
- package/src/tool/chatMain.ts +28 -43
- package/src/tool/commandPrompt.ts +252 -0
- package/src/tool/prompt.ts +33 -32
@@ -53,7 +53,7 @@ class Agent {
|
|
53
53
|
this.llm = llm;
|
54
54
|
this.toolHandlers = {};
|
55
55
|
}
|
56
|
-
static async initializeWithLLM(onMessage, onToolCall, systemPrompt, llm) {
|
56
|
+
static async initializeWithLLM(onMessage, onToolCall, systemPrompt, llm, mcpServerManager) {
|
57
57
|
// Initialize messages with system prompt
|
58
58
|
const messages = [
|
59
59
|
{
|
@@ -61,15 +61,13 @@ class Agent {
|
|
61
61
|
content: systemPrompt ?? "You are a helpful assistant",
|
62
62
|
},
|
63
63
|
];
|
64
|
-
|
65
|
-
const mcpServerManager = new mcpServerManager_1.McpServerManager();
|
66
|
-
return new Agent(onMessage, onToolCall, messages, mcpServerManager, [], llm);
|
64
|
+
return new Agent(onMessage, onToolCall, messages, mcpServerManager ?? new mcpServerManager_1.McpServerManager(), [], llm);
|
67
65
|
}
|
68
66
|
async shutdown() {
|
69
67
|
return this.mcpServerManager.shutdown();
|
70
68
|
}
|
71
69
|
getAgentProfile() {
|
72
|
-
return new sdk_1.AgentProfile(this.llm.getModel(), this.
|
70
|
+
return new sdk_1.AgentProfile(this.llm.getModel(), this.getSystemPrompt(), this.mcpServerManager.getMcpServerSettings());
|
73
71
|
}
|
74
72
|
getConversation() {
|
75
73
|
(0, assert_1.strict)(this.messages[0].role == "system", "first message must have system role");
|
@@ -86,7 +84,10 @@ class Agent {
|
|
86
84
|
getMcpServerManager() {
|
87
85
|
return this.mcpServerManager;
|
88
86
|
}
|
89
|
-
|
87
|
+
/**
|
88
|
+
* Like `userMessage`, but can be awaited, and accepts the user name.
|
89
|
+
*/
|
90
|
+
async userMessageEx(msg, imageB64, name) {
|
90
91
|
const userMessage = createUserMessage(msg, imageB64, name);
|
91
92
|
if (!userMessage) {
|
92
93
|
return undefined;
|
@@ -128,7 +129,13 @@ class Agent {
|
|
128
129
|
}
|
129
130
|
return completion.choices[0].message;
|
130
131
|
}
|
131
|
-
|
132
|
+
userMessage(msg, imageB64) {
|
133
|
+
this.userMessageEx(msg, imageB64);
|
134
|
+
}
|
135
|
+
getModel() {
|
136
|
+
return this.llm.getModel();
|
137
|
+
}
|
138
|
+
setModel(model) {
|
132
139
|
logger.debug(`Set model ${model}`);
|
133
140
|
this.llm.setModel(model);
|
134
141
|
}
|
@@ -140,14 +147,14 @@ class Agent {
|
|
140
147
|
// Keep only the system message
|
141
148
|
this.messages.splice(1);
|
142
149
|
}
|
143
|
-
|
150
|
+
getSystemPrompt() {
|
144
151
|
(0, assert_1.strict)(this.messages[0].role === "system");
|
145
152
|
return this.messages[0].content;
|
146
153
|
}
|
147
154
|
/**
|
148
155
|
* Set the system prompt
|
149
156
|
*/
|
150
|
-
|
157
|
+
setSystemPrompt(systemMsg) {
|
151
158
|
(0, assert_1.strict)(this.messages[0].role === "system");
|
152
159
|
this.messages[0].content = systemMsg;
|
153
160
|
}
|
@@ -1,7 +1,8 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.DEFAULT_LLM_MODEL = exports.DEFAULT_LLM_URL = void 0;
|
3
|
+
exports.XALIA_APP_HEADER = exports.DEFAULT_LLM_MODEL = exports.DEFAULT_LLM_URL = void 0;
|
4
4
|
exports.createAgentWithSkills = createAgentWithSkills;
|
5
|
+
exports.createAgentFromSkillManager = createAgentFromSkillManager;
|
5
6
|
exports.createNonInteractiveAgent = createNonInteractiveAgent;
|
6
7
|
exports.runOneShot = runOneShot;
|
7
8
|
const sdk_1 = require("@xalia/xmcp/sdk");
|
@@ -11,13 +12,18 @@ const openAILLM_1 = require("./openAILLM");
|
|
11
12
|
const openAILLMStreaming_1 = require("./openAILLMStreaming");
|
12
13
|
const dummyLLM_1 = require("./dummyLLM");
|
13
14
|
const assert_1 = require("assert");
|
15
|
+
const repeatLLM_1 = require("./repeatLLM");
|
14
16
|
const logger = (0, sdk_1.getLogger)();
|
15
17
|
exports.DEFAULT_LLM_URL = "http://localhost:5001/v1";
|
16
18
|
exports.DEFAULT_LLM_MODEL = "gpt-4o";
|
19
|
+
exports.XALIA_APP_HEADER = {
|
20
|
+
"HTTP-Referer": "xalia.ai",
|
21
|
+
"X-Title": "Xalia",
|
22
|
+
};
|
17
23
|
/**
|
18
24
|
* Util function to create an Agent from some config information.
|
19
25
|
*/
|
20
|
-
async function createAgent(llmUrl, model, systemPrompt, onMessage, onToolCall, platform, openaiApiKey, stream = false) {
|
26
|
+
async function createAgent(llmUrl, model, systemPrompt, onMessage, onToolCall, platform, openaiApiKey, stream = false, mcpServerManager) {
|
21
27
|
let llm;
|
22
28
|
if (model && model.startsWith("dummy:")) {
|
23
29
|
// Dummy Agent
|
@@ -31,6 +37,9 @@ async function createAgent(llmUrl, model, systemPrompt, onMessage, onToolCall, p
|
|
31
37
|
logger.debug(`Initializing Dummy Agent: ${llmUrl}`);
|
32
38
|
llm = new dummyLLM_1.DummyLLM(responses);
|
33
39
|
}
|
40
|
+
else if (model === "repeat") {
|
41
|
+
llm = new repeatLLM_1.RepeatLLM();
|
42
|
+
}
|
34
43
|
else {
|
35
44
|
// Regular Agent
|
36
45
|
if (!openaiApiKey) {
|
@@ -45,7 +54,7 @@ async function createAgent(llmUrl, model, systemPrompt, onMessage, onToolCall, p
|
|
45
54
|
}
|
46
55
|
}
|
47
56
|
(0, assert_1.strict)(llm);
|
48
|
-
return agent_1.Agent.initializeWithLLM(onMessage, onToolCall, systemPrompt, llm);
|
57
|
+
return agent_1.Agent.initializeWithLLM(onMessage, onToolCall, systemPrompt, llm, mcpServerManager);
|
49
58
|
}
|
50
59
|
/**
|
51
60
|
* Util function to create and initialize an Agent given an AgentProfile.
|
@@ -66,6 +75,17 @@ async function createAgentWithSkills(llmUrl, agentProfile, onMessage, onToolCall
|
|
66
75
|
logger.debug("[createAgentWithSkills] done");
|
67
76
|
return [agent, sudoMcpServerManager];
|
68
77
|
}
|
78
|
+
async function createAgentFromSkillManager(llmUrl, agentProfile, onMessage, onToolCall, platform, llmApiKey, skillManager, conversation, stream = false) {
|
79
|
+
// Create agent
|
80
|
+
logger.debug("[createAgentAndSudoMcpServerManager] creating agent ...");
|
81
|
+
const mcpServerManager = skillManager.getMcpServerManager();
|
82
|
+
const agent = await createAgent(llmUrl, agentProfile.model, agentProfile.system_prompt, onMessage, onToolCall, platform, llmApiKey, stream, mcpServerManager);
|
83
|
+
if (conversation) {
|
84
|
+
agent.setConversation(conversation);
|
85
|
+
}
|
86
|
+
logger.debug("[createAgentWithSkills] done");
|
87
|
+
return agent;
|
88
|
+
}
|
69
89
|
/**
|
70
90
|
* An "non-interactive" agent is one which is not intended to be used
|
71
91
|
* interactively (settings cannot be dyanmically adjusted, intermediate
|
@@ -94,7 +114,7 @@ async function runOneShot(url, agentProfile, conversation, platform, prompt, ima
|
|
94
114
|
// Create a non-interactive agent and pass any prompt/ image to it. Return
|
95
115
|
// the first answer.
|
96
116
|
const agent = await createNonInteractiveAgent(url, agentProfile, conversation, platform, llmApiKey, sudomcpConfig, approveToolsUpTo);
|
97
|
-
const response = await agent.
|
117
|
+
const response = await agent.userMessageEx(prompt, image);
|
98
118
|
await agent.shutdown();
|
99
119
|
logger.debug("[runOneShot]: shutdown done");
|
100
120
|
if (!response) {
|
@@ -1,6 +1,6 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.McpServerManager = exports.McpServerInfo = void 0;
|
3
|
+
exports.McpServerManager = exports.McpServerInfoRW = exports.McpServerInfo = void 0;
|
4
4
|
exports.computeQualifiedName = computeQualifiedName;
|
5
5
|
exports.splitQualifiedName = splitQualifiedName;
|
6
6
|
exports.computeOpenAIToolList = computeOpenAIToolList;
|
@@ -38,12 +38,25 @@ class McpServerInfo {
|
|
38
38
|
}
|
39
39
|
}
|
40
40
|
exports.McpServerInfo = McpServerInfo;
|
41
|
+
/**
|
42
|
+
* Instance of McpServerInfo which supports setting tool state. Intended for
|
43
|
+
* IMcpServerManager implementations, not for client code.
|
44
|
+
*/
|
45
|
+
class McpServerInfoRW extends McpServerInfo {
|
46
|
+
enableTool(toolName) {
|
47
|
+
this.enabledToolsMap[toolName] = true;
|
48
|
+
}
|
49
|
+
disableTool(toolName) {
|
50
|
+
delete this.enabledToolsMap[toolName];
|
51
|
+
}
|
52
|
+
}
|
53
|
+
exports.McpServerInfoRW = McpServerInfoRW;
|
41
54
|
/**
|
42
55
|
* The internal class holds server info and allows it to be updated. Managed
|
43
56
|
* by McpServerManager. Do not access these methods except via the
|
44
57
|
* McpServerManager.
|
45
58
|
*/
|
46
|
-
class McpServerInfoInternal extends
|
59
|
+
class McpServerInfoInternal extends McpServerInfoRW {
|
47
60
|
constructor(client, tools) {
|
48
61
|
super(tools);
|
49
62
|
const callbacks = {};
|
@@ -77,12 +90,6 @@ class McpServerInfoInternal extends McpServerInfo {
|
|
77
90
|
async shutdown() {
|
78
91
|
await this.client.close();
|
79
92
|
}
|
80
|
-
enableTool(toolName) {
|
81
|
-
this.enabledToolsMap[toolName] = true;
|
82
|
-
}
|
83
|
-
disableTool(toolName) {
|
84
|
-
delete this.enabledToolsMap[toolName];
|
85
|
-
}
|
86
93
|
getCallback(toolName) {
|
87
94
|
return this.callbacks[toolName];
|
88
95
|
}
|
@@ -105,6 +112,9 @@ class McpServerManager {
|
|
105
112
|
}));
|
106
113
|
this.mcpServers = {};
|
107
114
|
}
|
115
|
+
hasMcpServer(mcpServerName) {
|
116
|
+
return !!this.mcpServers[mcpServerName];
|
117
|
+
}
|
108
118
|
getMcpServerNames() {
|
109
119
|
return Object.keys(this.mcpServers);
|
110
120
|
}
|
@@ -214,7 +224,7 @@ class McpServerManager {
|
|
214
224
|
*/
|
215
225
|
getMcpServerSettings() {
|
216
226
|
const config = {};
|
217
|
-
// NOTE: on load,
|
227
|
+
// NOTE: on load, entries of the form:
|
218
228
|
//
|
219
229
|
// <server>: []
|
220
230
|
//
|
@@ -1,6 +1,7 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.OpenAILLM = void 0;
|
4
|
+
const agentUtils_1 = require("./agentUtils");
|
4
5
|
const openai_1 = require("openai");
|
5
6
|
class OpenAILLM {
|
6
7
|
constructor(apiKey, apiUrl, model) {
|
@@ -8,8 +9,9 @@ class OpenAILLM {
|
|
8
9
|
apiKey,
|
9
10
|
baseURL: apiUrl,
|
10
11
|
dangerouslyAllowBrowser: true,
|
12
|
+
defaultHeaders: agentUtils_1.XALIA_APP_HEADER,
|
11
13
|
});
|
12
|
-
this.model = model ||
|
14
|
+
this.model = model || agentUtils_1.DEFAULT_LLM_MODEL;
|
13
15
|
}
|
14
16
|
setModel(model) {
|
15
17
|
this.model = model;
|
@@ -1,9 +1,12 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.OpenAILLMStreaming = void 0;
|
4
|
+
exports.initializeCompletion = initializeCompletion;
|
5
|
+
exports.updateCompletion = updateCompletion;
|
4
6
|
const sdk_1 = require("@xalia/xmcp/sdk");
|
5
7
|
const openai_1 = require("openai");
|
6
8
|
const assert_1 = require("assert");
|
9
|
+
const agentUtils_1 = require("./agentUtils");
|
7
10
|
const logger = (0, sdk_1.getLogger)();
|
8
11
|
function initialToolCallFunction(deltaFn) {
|
9
12
|
// export interface ChatCompletionChunk.Choice.Delta.ToolCall.Function {
|
@@ -222,7 +225,8 @@ function initializeCompletionChoice(chunkChoice) {
|
|
222
225
|
return {
|
223
226
|
choice: {
|
224
227
|
message,
|
225
|
-
|
228
|
+
// We use `null` to signal that `finish_reason` is unset
|
229
|
+
finish_reason: chunkChoice.finish_reason || null,
|
226
230
|
index: chunkChoice.index,
|
227
231
|
logprobs: chunkChoice.logprobs || null,
|
228
232
|
},
|
@@ -251,6 +255,7 @@ function updateCompletionChoice(completionChoice, chunkChoice) {
|
|
251
255
|
(0, assert_1.strict)(completionChoice.index === chunkChoice.index);
|
252
256
|
updateCompletionMessage(completionChoice.message, chunkChoice.delta);
|
253
257
|
if (chunkChoice.finish_reason) {
|
258
|
+
(0, assert_1.strict)(completionChoice.finish_reason === null, `finish_reason already set: (${completionChoice.finish_reason})`);
|
254
259
|
completionChoice.finish_reason = chunkChoice.finish_reason;
|
255
260
|
return true;
|
256
261
|
}
|
@@ -261,7 +266,7 @@ function initializeCompletionChoices(chunkChoices) {
|
|
261
266
|
// content to stream. We keep it simple for now and assume only single
|
262
267
|
// choices, which allows us to mark everything as done if any choice we hit is
|
263
268
|
// done.
|
264
|
-
(0, assert_1.strict)(chunkChoices.length
|
269
|
+
(0, assert_1.strict)(chunkChoices.length < 2);
|
265
270
|
let msgDone = false;
|
266
271
|
const choices = [];
|
267
272
|
for (const chunkChoice of chunkChoices) {
|
@@ -367,8 +372,9 @@ class OpenAILLMStreaming {
|
|
367
372
|
apiKey,
|
368
373
|
baseURL: apiUrl,
|
369
374
|
dangerouslyAllowBrowser: true,
|
375
|
+
defaultHeaders: agentUtils_1.XALIA_APP_HEADER,
|
370
376
|
});
|
371
|
-
this.model = model ||
|
377
|
+
this.model = model || agentUtils_1.DEFAULT_LLM_MODEL;
|
372
378
|
}
|
373
379
|
setModel(model) {
|
374
380
|
this.model = model;
|
@@ -391,39 +397,32 @@ class OpenAILLMStreaming {
|
|
391
397
|
throw "not a stream";
|
392
398
|
}
|
393
399
|
let aggregatedMessage;
|
394
|
-
let done = false;
|
395
400
|
for await (const chunk of chunks) {
|
396
401
|
logger.debug(`[stream] chunk: ${JSON.stringify(chunk)}`);
|
397
|
-
(0, assert_1.strict)(!done);
|
398
402
|
if (chunk.object !== "chat.completion.chunk") {
|
399
403
|
// logger.warn("[stream]: unexpected message");
|
400
404
|
continue;
|
401
405
|
}
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
return updateCompletion(aggregatedMessage, chunk);
|
411
|
-
}
|
412
|
-
})();
|
406
|
+
if (!aggregatedMessage) {
|
407
|
+
logger.debug(`[stream] first}`);
|
408
|
+
const { initMessage } = initializeCompletion(chunk);
|
409
|
+
aggregatedMessage = initMessage;
|
410
|
+
}
|
411
|
+
else {
|
412
|
+
updateCompletion(aggregatedMessage, chunk);
|
413
|
+
}
|
413
414
|
if (onMessage) {
|
414
|
-
// Inform the call of a message fragment
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
await onMessage(delta.content, done);
|
419
|
-
}
|
420
|
-
else if (done) {
|
421
|
-
await onMessage("", true);
|
415
|
+
// Inform the call of a message fragment if it contains any text.
|
416
|
+
const delta = chunk.choices[0]?.delta;
|
417
|
+
if (delta?.content) {
|
418
|
+
await onMessage(delta.content, false);
|
422
419
|
}
|
423
420
|
}
|
424
421
|
}
|
422
|
+
if (onMessage) {
|
423
|
+
await onMessage("", true);
|
424
|
+
}
|
425
425
|
logger.debug(`[stream] final message: ${JSON.stringify(aggregatedMessage)}`);
|
426
|
-
(0, assert_1.strict)(done);
|
427
426
|
(0, assert_1.strict)(aggregatedMessage);
|
428
427
|
return aggregatedMessage;
|
429
428
|
}
|
@@ -0,0 +1,43 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.RepeatLLM = void 0;
|
4
|
+
const assert_1 = require("assert");
|
5
|
+
class RepeatLLM {
|
6
|
+
constructor() {
|
7
|
+
this.idx = 0;
|
8
|
+
}
|
9
|
+
getModel() {
|
10
|
+
return "repeat";
|
11
|
+
}
|
12
|
+
getUrl() {
|
13
|
+
throw "cannot get url for RepeatLLM";
|
14
|
+
}
|
15
|
+
async getConversationResponse(_messages, _tools, onMessage) {
|
16
|
+
await new Promise((r) => setTimeout(r, 0));
|
17
|
+
const content = `Message number ${this.idx++}`;
|
18
|
+
const response = {
|
19
|
+
finish_reason: "stop",
|
20
|
+
index: 0,
|
21
|
+
logprobs: null,
|
22
|
+
message: {
|
23
|
+
content,
|
24
|
+
refusal: null,
|
25
|
+
role: "assistant",
|
26
|
+
},
|
27
|
+
};
|
28
|
+
if (onMessage) {
|
29
|
+
onMessage(content, true);
|
30
|
+
}
|
31
|
+
return {
|
32
|
+
id: "" + this.idx,
|
33
|
+
choices: [response],
|
34
|
+
created: Date.now(),
|
35
|
+
model: "dummyLlmModel",
|
36
|
+
object: "chat.completion",
|
37
|
+
};
|
38
|
+
}
|
39
|
+
setModel(_model) {
|
40
|
+
(0, assert_1.strict)(false, "unexpected call to setModel");
|
41
|
+
}
|
42
|
+
}
|
43
|
+
exports.RepeatLLM = RepeatLLM;
|
@@ -62,7 +62,7 @@ class SkillManager {
|
|
62
62
|
// Concurrently establish all server connections
|
63
63
|
const addServer = async (serverName) => {
|
64
64
|
logger.debug(`restoring ${serverName} ...`);
|
65
|
-
return this.addMcpServer(serverName);
|
65
|
+
return this.addMcpServer(serverName, false);
|
66
66
|
};
|
67
67
|
await Promise.all(Object.entries(mcpConfig).map((e) => addServer(e[0])));
|
68
68
|
// Enable tools
|
@@ -93,6 +93,9 @@ class SkillManager {
|
|
93
93
|
this.serverBriefsMap = mcpServersMap;
|
94
94
|
this.toolCache = {};
|
95
95
|
}
|
96
|
+
hasServer(serverName) {
|
97
|
+
return !!this.serverBriefsMap[serverName];
|
98
|
+
}
|
96
99
|
getServerBriefs() {
|
97
100
|
return this.serverBriefs;
|
98
101
|
}
|
@@ -116,11 +119,10 @@ class SkillManager {
|
|
116
119
|
return tools;
|
117
120
|
}
|
118
121
|
/**
|
119
|
-
* Add a server to the `McpServerManager`, using `ApiClient`
|
120
|
-
*
|
121
|
-
* schema, if applicable.
|
122
|
+
* Add a server to the `McpServerManager`, using `ApiClient` to produce the
|
123
|
+
* transport. Validates the server's config schema, if applicable.
|
122
124
|
*/
|
123
|
-
async addMcpServer(serverName) {
|
125
|
+
async addMcpServer(serverName, enableAll) {
|
124
126
|
const tools = await this.getServerTools(serverName);
|
125
127
|
const originalName = this.serverBriefsMap[serverName].originalName;
|
126
128
|
const mcpserver = await this.apiClient.getDetails(originalName, "run");
|
@@ -129,7 +131,11 @@ class SkillManager {
|
|
129
131
|
version: "1.0.0",
|
130
132
|
});
|
131
133
|
await connectServer(client, this.apiClient, mcpserver, this.openUrl, this.authorized_url);
|
132
|
-
|
134
|
+
const msm = this.mcpServerManager;
|
135
|
+
await msm.addMcpServerWithClient(client, serverName, tools);
|
136
|
+
if (enableAll) {
|
137
|
+
msm.enableAllTools(serverName);
|
138
|
+
}
|
133
139
|
}
|
134
140
|
getOriginalName(serverName) {
|
135
141
|
return this.serverBriefsMap[serverName].name;
|