@xalia/agent 1.0.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.prettierrc.json +11 -0
- package/README.md +57 -0
- package/dist/agent.js +278 -0
- package/dist/agentUtils.js +88 -0
- package/dist/chat.js +278 -0
- package/dist/dummyLLM.js +28 -0
- package/dist/files.js +115 -0
- package/dist/iplatform.js +2 -0
- package/dist/llm.js +2 -0
- package/dist/main.js +136 -0
- package/dist/mcpServerManager.js +269 -0
- package/dist/nodePlatform.js +61 -0
- package/dist/openAILLM.js +31 -0
- package/dist/options.js +79 -0
- package/dist/prompt.js +83 -0
- package/dist/sudoMcpServerManager.js +174 -0
- package/dist/test/imageLoad.test.js +14 -0
- package/dist/test/mcpServerManager.test.js +71 -0
- package/dist/test/prompt.test.js +26 -0
- package/dist/test/sudoMcpServerManager.test.js +49 -0
- package/dist/tokenAuth.js +39 -0
- package/dist/tools.js +44 -0
- package/eslint.config.mjs +25 -0
- package/frog.png +0 -0
- package/package.json +41 -0
- package/scripts/git_message +31 -0
- package/scripts/git_wip +21 -0
- package/scripts/pr_message +18 -0
- package/scripts/pr_review +16 -0
- package/scripts/sudomcp_import +23 -0
- package/scripts/test_script +60 -0
- package/src/agent.ts +357 -0
- package/src/agentUtils.ts +188 -0
- package/src/chat.ts +325 -0
- package/src/dummyLLM.ts +36 -0
- package/src/files.ts +95 -0
- package/src/iplatform.ts +11 -0
- package/src/llm.ts +12 -0
- package/src/main.ts +171 -0
- package/src/mcpServerManager.ts +365 -0
- package/src/nodePlatform.ts +24 -0
- package/src/openAILLM.ts +43 -0
- package/src/options.ts +103 -0
- package/src/prompt.ts +93 -0
- package/src/sudoMcpServerManager.ts +268 -0
- package/src/test/imageLoad.test.ts +14 -0
- package/src/test/mcpServerManager.test.ts +98 -0
- package/src/test/prompt.test.src +0 -0
- package/src/test/prompt.test.ts +26 -0
- package/src/test/sudoMcpServerManager.test.ts +63 -0
- package/src/tokenAuth.ts +50 -0
- package/src/tools.ts +57 -0
- package/test_data/background_test_profile.json +7 -0
- package/test_data/background_test_script.json +11 -0
- package/test_data/dummyllm_script_simplecalc.json +28 -0
- package/test_data/git_message_profile.json +4 -0
- package/test_data/git_wip_system.txt +5 -0
- package/test_data/pr_message_profile.json +4 -0
- package/test_data/pr_review_profile.json +4 -0
- package/test_data/prompt_simplecalc.txt +1 -0
- package/test_data/simplecalc_profile.json +4 -0
- package/test_data/sudomcp_import_profile.json +4 -0
- package/test_data/test_script_profile.json +9 -0
- package/tsconfig.json +13 -0
package/.prettierrc.json
ADDED
package/README.md
ADDED
@@ -0,0 +1,57 @@
|
|
1
|
+
# Sudobase Client WIP
|
2
|
+
|
3
|
+
## Setup
|
4
|
+
Create `.env` file with `OPENAI_API_KEY=xxxxx`, then
|
5
|
+
|
6
|
+
```sh
|
7
|
+
# In the root folder
|
8
|
+
yarn
|
9
|
+
yarn workspaces run build
|
10
|
+
```
|
11
|
+
|
12
|
+
Run a local backend server (follow instructions in `mcppro`) because authentication against deployed backend is WIP.
|
13
|
+
|
14
|
+
## Usage:
|
15
|
+
To enter a chat with no initial prompt and default system prompt:
|
16
|
+
```sh
|
17
|
+
node dist/main.js
|
18
|
+
```
|
19
|
+
|
20
|
+
Optional arguments are `prompt` (first User message) and `systemprompt`
|
21
|
+
```sh
|
22
|
+
node dist/main.js --prompt 'Who is the new pope?' --sysprompt 'You are extremely polite.'
|
23
|
+
```
|
24
|
+
|
25
|
+
## Features:
|
26
|
+
### Conversation:
|
27
|
+
CLI-mode is a conversation between user and LLM.
|
28
|
+
|
29
|
+
### Tool selection:
|
30
|
+
We now support MCP tool calls. Currently servers are enabled by editing the `mcpServerUrls.json` file, but this will be improved soon.
|
31
|
+
|
32
|
+
### Model selection:
|
33
|
+
The CLI uses the default model (`gpt-4o-mini`) but uncomment the `agent.chooseModel` line to switch to `gpt-4.1-2025-04-14`. Right now we can use any OpenAI model that supports tool calling.
|
34
|
+
|
35
|
+
Supporting inference providers like Together.ai is TODO.
|
36
|
+
|
37
|
+
### Callbacks
|
38
|
+
The CLI uses an `onMessage` callback to display the Agent's messages and an `onToolCall` callback to request authorization for tool calls.
|
39
|
+
|
40
|
+
## Development Notes
|
41
|
+
|
42
|
+
### Architecture
|
43
|
+
Frontend talks to
|
44
|
+
- Agent (for conversation, ChatCompletion)
|
45
|
+
- McpServerManager (to enable, disable tools that have been added)
|
46
|
+
- SudoMcpServerManager (to access catalog of SudoMCP servers, add to McpServerManager)
|
47
|
+
|
48
|
+
SudoMcpServerManager:
|
49
|
+
- track list of available mcp servers (via sdk/ApiClient)
|
50
|
+
- get the list of tools as required by UI (via sdk/ApiClient)
|
51
|
+
- add tools to McpServerManager
|
52
|
+
|
53
|
+
McpServerManager:
|
54
|
+
- manager (mcpServer, tool)
|
55
|
+
- enabling / disabling
|
56
|
+
- list of enabled / available tools per mcp server
|
57
|
+
- exposes tools to Agent
|
package/dist/agent.js
ADDED
@@ -0,0 +1,278 @@
|
|
1
|
+
"use strict";
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
3
|
+
if (k2 === undefined) k2 = k;
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
7
|
+
}
|
8
|
+
Object.defineProperty(o, k2, desc);
|
9
|
+
}) : (function(o, m, k, k2) {
|
10
|
+
if (k2 === undefined) k2 = k;
|
11
|
+
o[k2] = m[k];
|
12
|
+
}));
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
15
|
+
}) : function(o, v) {
|
16
|
+
o["default"] = v;
|
17
|
+
});
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
19
|
+
var ownKeys = function(o) {
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
21
|
+
var ar = [];
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
23
|
+
return ar;
|
24
|
+
};
|
25
|
+
return ownKeys(o);
|
26
|
+
};
|
27
|
+
return function (mod) {
|
28
|
+
if (mod && mod.__esModule) return mod;
|
29
|
+
var result = {};
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
31
|
+
__setModuleDefault(result, mod);
|
32
|
+
return result;
|
33
|
+
};
|
34
|
+
})();
|
35
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
36
|
+
exports.Agent = exports.AgentProfile = void 0;
|
37
|
+
exports.createUserMessage = createUserMessage;
|
38
|
+
const dotenv = __importStar(require("dotenv"));
|
39
|
+
const mcpServerManager_1 = require("./mcpServerManager");
|
40
|
+
const assert_1 = require("assert");
|
41
|
+
const openAILLM_1 = require("./openAILLM");
|
42
|
+
const dummyLLM_1 = require("./dummyLLM");
|
43
|
+
const sdk_1 = require("@xalia/xmcp/sdk");
|
44
|
+
dotenv.config();
|
45
|
+
const logger = (0, sdk_1.getLogger)();
|
46
|
+
class AgentProfile {
|
47
|
+
constructor(
|
48
|
+
/// The llm provider endpoint, or dummy llm filename. `undefined` means
|
49
|
+
/// openai.
|
50
|
+
llm_url,
|
51
|
+
/// "dummy" means use the dummy LLM, in which case llmUrl refers to the
|
52
|
+
/// filename. `undefined` means default for the provider.
|
53
|
+
model,
|
54
|
+
/// System prompt
|
55
|
+
system_prompt,
|
56
|
+
/// MCP server settings.
|
57
|
+
mcp_settings) {
|
58
|
+
this.llm_url = llm_url;
|
59
|
+
this.model = model;
|
60
|
+
this.system_prompt = system_prompt;
|
61
|
+
this.mcp_settings = mcp_settings;
|
62
|
+
}
|
63
|
+
static fromJSONObj(obj) {
|
64
|
+
(0, assert_1.strict)(typeof obj === "object");
|
65
|
+
(0, assert_1.strict)(typeof obj.llm_url === "string" || typeof obj.llm_url === "undefined");
|
66
|
+
(0, assert_1.strict)(typeof obj.model === "string" || typeof obj.model === "undefined");
|
67
|
+
(0, assert_1.strict)(typeof obj.system_prompt === "string");
|
68
|
+
(0, assert_1.strict)(typeof obj.mcp_settings === "object");
|
69
|
+
return new AgentProfile(obj.llm_url, obj.model, obj.system_prompt, obj.mcp_settings);
|
70
|
+
}
|
71
|
+
}
|
72
|
+
exports.AgentProfile = AgentProfile;
|
73
|
+
class Agent {
|
74
|
+
constructor(onMessage, onToolCall, messages, mcpServerManager, tools, llm) {
|
75
|
+
this.onMessage = onMessage;
|
76
|
+
this.onToolCall = onToolCall;
|
77
|
+
this.messages = messages;
|
78
|
+
this.mcpServerManager = mcpServerManager;
|
79
|
+
this.tools = tools;
|
80
|
+
this.llm = llm;
|
81
|
+
this.toolHandlers = {};
|
82
|
+
}
|
83
|
+
static async initializeWithLLM(onMessage, onToolCall, systemPrompt, llm) {
|
84
|
+
// Initialize messages with system prompt
|
85
|
+
const messages = [
|
86
|
+
{
|
87
|
+
role: "system",
|
88
|
+
content: systemPrompt ?? "You are a helpful assistant",
|
89
|
+
},
|
90
|
+
];
|
91
|
+
// Create the server manager
|
92
|
+
const mcpServerManager = new mcpServerManager_1.McpServerManager();
|
93
|
+
return new Agent(onMessage, onToolCall, messages, mcpServerManager, [], llm);
|
94
|
+
}
|
95
|
+
static async initialize(onMessage, onToolCall, systemPrompt, openaiApiUrl, openaiApiKey, model) {
|
96
|
+
return Agent.initializeWithLLM(onMessage, onToolCall, systemPrompt, new openAILLM_1.OpenAILLM(openaiApiKey, openaiApiUrl, model));
|
97
|
+
}
|
98
|
+
static async initializeDummy(onMessage, onToolCall, systemPrompt, responses) {
|
99
|
+
return Agent.initializeWithLLM(onMessage, onToolCall, systemPrompt, new dummyLLM_1.DummyLLM(responses));
|
100
|
+
}
|
101
|
+
async shutdown() {
|
102
|
+
return this.mcpServerManager.shutdown();
|
103
|
+
}
|
104
|
+
getAgentProfile() {
|
105
|
+
return new AgentProfile(this.llm.getUrl(), this.llm.getModel(), this.getSystemMessage(), this.mcpServerManager.getMcpServerSettings());
|
106
|
+
}
|
107
|
+
getConversation() {
|
108
|
+
(0, assert_1.strict)(this.messages[0].role == "system", "first message must have system role");
|
109
|
+
// Return a copy so future modifications to `this.messages` don't impact
|
110
|
+
// the callers copy.
|
111
|
+
return structuredClone(this.messages.slice(1));
|
112
|
+
}
|
113
|
+
setConversation(messages) {
|
114
|
+
(0, assert_1.strict)(this.messages[0].role == "system");
|
115
|
+
(0, assert_1.strict)(messages[0].role != "system", "conversation contains system msg");
|
116
|
+
const newMessages = [this.messages[0]];
|
117
|
+
this.messages = newMessages.concat(structuredClone(messages));
|
118
|
+
}
|
119
|
+
getMcpServerManager() {
|
120
|
+
return this.mcpServerManager;
|
121
|
+
}
|
122
|
+
async userMessage(msg, imageB64) {
|
123
|
+
const userMessage = createUserMessage(msg, imageB64);
|
124
|
+
if (!userMessage) {
|
125
|
+
return undefined;
|
126
|
+
}
|
127
|
+
this.messages.push(userMessage);
|
128
|
+
let completion = await this.chatCompletion();
|
129
|
+
let message = completion.choices[0].message;
|
130
|
+
this.messages.push(message);
|
131
|
+
if (message.content) {
|
132
|
+
await this.onMessage(message, true);
|
133
|
+
}
|
134
|
+
// While there are tool calls to make, make them and loop
|
135
|
+
while (message.tool_calls && message.tool_calls.length > 0) {
|
136
|
+
for (const toolCall of message.tool_calls ?? []) {
|
137
|
+
const approval = await this.onToolCall(toolCall);
|
138
|
+
if (approval) {
|
139
|
+
try {
|
140
|
+
const result = await this.doToolCall(toolCall);
|
141
|
+
logger.debug(`tool call result ${JSON.stringify(result)}`);
|
142
|
+
this.messages.push(result);
|
143
|
+
}
|
144
|
+
catch (e) {
|
145
|
+
logger.error(`tool call error: ${e}`);
|
146
|
+
this.messages.push({
|
147
|
+
role: "tool",
|
148
|
+
tool_call_id: toolCall.id,
|
149
|
+
content: "Tool call failed.",
|
150
|
+
});
|
151
|
+
}
|
152
|
+
}
|
153
|
+
else {
|
154
|
+
this.messages.push({
|
155
|
+
role: "tool",
|
156
|
+
tool_call_id: toolCall.id,
|
157
|
+
content: "User denied tool use request.",
|
158
|
+
});
|
159
|
+
}
|
160
|
+
}
|
161
|
+
completion = await this.chatCompletion();
|
162
|
+
message = completion.choices[0].message;
|
163
|
+
this.messages.push(message);
|
164
|
+
if (message.content) {
|
165
|
+
await this.onMessage(message, true);
|
166
|
+
}
|
167
|
+
}
|
168
|
+
return completion.choices[0].message;
|
169
|
+
}
|
170
|
+
chooseModel(model) {
|
171
|
+
logger.debug(`Set model ${model}`);
|
172
|
+
(0, assert_1.strict)(this.llm instanceof openAILLM_1.OpenAILLM);
|
173
|
+
this.llm.setModel(model);
|
174
|
+
}
|
175
|
+
/**
|
176
|
+
* Clear the conversation.
|
177
|
+
*/
|
178
|
+
resetConversation() {
|
179
|
+
(0, assert_1.strict)(this.messages.length > 0);
|
180
|
+
// Keep only the system message
|
181
|
+
this.messages.splice(1);
|
182
|
+
}
|
183
|
+
getSystemMessage() {
|
184
|
+
(0, assert_1.strict)(this.messages[0].role === "system");
|
185
|
+
return this.messages[0].content;
|
186
|
+
}
|
187
|
+
/**
|
188
|
+
* Set the system prompt
|
189
|
+
*/
|
190
|
+
setSystemMessage(systemMsg) {
|
191
|
+
(0, assert_1.strict)(this.messages[0].role === "system");
|
192
|
+
this.messages[0].content = systemMsg;
|
193
|
+
}
|
194
|
+
async chatCompletion() {
|
195
|
+
let tools;
|
196
|
+
const enabledTools = this.tools.concat(this.mcpServerManager.getOpenAITools());
|
197
|
+
if (enabledTools.length > 0) {
|
198
|
+
tools = enabledTools;
|
199
|
+
}
|
200
|
+
// logger.debug(
|
201
|
+
// `chatCompletion: tools: ${JSON.stringify(tools, undefined, 2)}`
|
202
|
+
// );
|
203
|
+
const completion = await this.llm.getConversationResponse(this.messages, tools);
|
204
|
+
logger.debug(`Received chat completion ${JSON.stringify(completion)}`);
|
205
|
+
return completion;
|
206
|
+
}
|
207
|
+
toolNames() {
|
208
|
+
return this.mcpServerManager
|
209
|
+
.getOpenAITools()
|
210
|
+
.map((tool) => tool.function.name);
|
211
|
+
}
|
212
|
+
addTool(tool, handler) {
|
213
|
+
const name = tool.function.name;
|
214
|
+
if (this.toolHandlers[name]) {
|
215
|
+
throw `tool ${name} already added`;
|
216
|
+
}
|
217
|
+
logger.debug(`Adding tool ${name}`);
|
218
|
+
this.tools.push(tool);
|
219
|
+
this.toolHandlers[name] = handler;
|
220
|
+
}
|
221
|
+
async doToolCall(toolCall) {
|
222
|
+
const name = toolCall.function.name;
|
223
|
+
const args = JSON.parse(toolCall.function.arguments);
|
224
|
+
let result = undefined;
|
225
|
+
const handler = this.toolHandlers[name];
|
226
|
+
if (handler) {
|
227
|
+
logger.debug(` found agent tool ${name} ...`);
|
228
|
+
result = handler(args);
|
229
|
+
}
|
230
|
+
else {
|
231
|
+
result = await this.mcpServerManager.invoke(name, args);
|
232
|
+
}
|
233
|
+
return {
|
234
|
+
role: "tool",
|
235
|
+
tool_call_id: toolCall.id,
|
236
|
+
content: result.toString(),
|
237
|
+
};
|
238
|
+
}
|
239
|
+
}
|
240
|
+
exports.Agent = Agent;
|
241
|
+
/**
|
242
|
+
* Returns the ChatCompletionMessageParam constructed from (optional) text and
|
243
|
+
* (optional) image. If neither is given (null message), then undefined is
|
244
|
+
* returned.
|
245
|
+
**/
|
246
|
+
function createUserMessage(msg, imageB64) {
|
247
|
+
const content = (() => {
|
248
|
+
if (!imageB64) {
|
249
|
+
if (!msg) {
|
250
|
+
return undefined;
|
251
|
+
}
|
252
|
+
return msg;
|
253
|
+
}
|
254
|
+
const content = [];
|
255
|
+
if (msg) {
|
256
|
+
content.push({
|
257
|
+
type: "text",
|
258
|
+
text: msg,
|
259
|
+
});
|
260
|
+
}
|
261
|
+
if (imageB64) {
|
262
|
+
content.push({
|
263
|
+
type: "image_url",
|
264
|
+
image_url: {
|
265
|
+
url: imageB64,
|
266
|
+
},
|
267
|
+
});
|
268
|
+
}
|
269
|
+
return content;
|
270
|
+
})();
|
271
|
+
if (!content) {
|
272
|
+
return undefined;
|
273
|
+
}
|
274
|
+
return {
|
275
|
+
role: "user",
|
276
|
+
content,
|
277
|
+
};
|
278
|
+
}
|
@@ -0,0 +1,88 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.createAgentAndSudoMcpServerManager = createAgentAndSudoMcpServerManager;
|
4
|
+
exports.createNonInteractiveAgent = createNonInteractiveAgent;
|
5
|
+
exports.runOneShot = runOneShot;
|
6
|
+
const sdk_1 = require("@xalia/xmcp/sdk");
|
7
|
+
const agent_1 = require("./agent");
|
8
|
+
const sudoMcpServerManager_1 = require("./sudoMcpServerManager");
|
9
|
+
const logger = (0, sdk_1.getLogger)();
|
10
|
+
/**
|
11
|
+
* Util function to create an Agent from some config information.
|
12
|
+
*/
|
13
|
+
async function createAgent(llmUrl, model, systemPrompt, onMessage, onToolCall, platform, openaiApiKey) {
|
14
|
+
if (model === "dummy") {
|
15
|
+
if (!llmUrl) {
|
16
|
+
throw "AgentProfile.llmUrl must be set for dummy LLM";
|
17
|
+
}
|
18
|
+
logger.debug(`dummy model with script: ${llmUrl}`);
|
19
|
+
const script = await platform.load(llmUrl);
|
20
|
+
logger.debug(` script: ${script}`);
|
21
|
+
const responses = JSON.parse(script);
|
22
|
+
logger.debug(`Initializing Dummy Agent: ${llmUrl}`);
|
23
|
+
return agent_1.Agent.initializeDummy(onMessage, onToolCall, systemPrompt, responses);
|
24
|
+
}
|
25
|
+
if (!openaiApiKey) {
|
26
|
+
throw "Missing OpenAI API Key";
|
27
|
+
}
|
28
|
+
logger.debug(`Initializing Agent: ${llmUrl} - ${model}`);
|
29
|
+
return agent_1.Agent.initialize(onMessage, onToolCall, systemPrompt, llmUrl, openaiApiKey, model);
|
30
|
+
}
|
31
|
+
/**
|
32
|
+
* Util function to create and initialize an Agent given an AgentProfile.
|
33
|
+
*/
|
34
|
+
async function createAgentAndSudoMcpServerManager(agentProfile, onMessage, onToolCall, platform, openaiApiKey, sudomcpConfig, authorizedUrl, conversation) {
|
35
|
+
// Create agent
|
36
|
+
logger.debug("[createAgentAndSudoMcpServerManager] creating agent ...");
|
37
|
+
const agent = await createAgent(agentProfile.llm_url, agentProfile.model, agentProfile.system_prompt, onMessage, onToolCall, platform, openaiApiKey);
|
38
|
+
if (conversation) {
|
39
|
+
agent.setConversation(conversation);
|
40
|
+
}
|
41
|
+
// Init SudoMcpServerManager
|
42
|
+
logger.debug("[createAgentAndSudoMcpServerManager] creating SudoMcpServerManager.");
|
43
|
+
const sudoMcpServerManager = await sudoMcpServerManager_1.SudoMcpServerManager.initialize(agent.getMcpServerManager(), platform.openUrl, sudomcpConfig.backend_url, sudomcpConfig.api_key, authorizedUrl);
|
44
|
+
logger.debug("[createAgentAndSudoMcpServerManager] restore mcp settings:" +
|
45
|
+
JSON.stringify(agentProfile.mcp_settings));
|
46
|
+
await sudoMcpServerManager.restoreMcpSettings(agentProfile.mcp_settings, sudomcpConfig.server_configs);
|
47
|
+
logger.debug("[createAgentAndSudoMcpServerManager] done");
|
48
|
+
return [agent, sudoMcpServerManager];
|
49
|
+
}
|
50
|
+
/**
|
51
|
+
* An "non-interactive" agent is one which is not intended to be used
|
52
|
+
* interactively (settings cannot be dyanmically adjusted, intermediate
|
53
|
+
* messages are not used by the caller, the user does not need to approve tool
|
54
|
+
* calls, etc).
|
55
|
+
*/
|
56
|
+
async function createNonInteractiveAgent(agentProfile, conversation, platform, openaiApiKey, sudomcpConfig, approveToolsUpTo) {
|
57
|
+
let remainingToolCalls = approveToolsUpTo;
|
58
|
+
const onMessage = async () => { };
|
59
|
+
const onToolCall = async () => {
|
60
|
+
if (remainingToolCalls !== 0) {
|
61
|
+
--remainingToolCalls;
|
62
|
+
return true;
|
63
|
+
}
|
64
|
+
return false;
|
65
|
+
};
|
66
|
+
const [agent, _] = await createAgentAndSudoMcpServerManager(agentProfile, onMessage, onToolCall, platform, openaiApiKey, sudomcpConfig, undefined, conversation);
|
67
|
+
return agent;
|
68
|
+
}
|
69
|
+
/**
|
70
|
+
* Create an Agent (from the AgentProfile), pass it a single prompt and output
|
71
|
+
* the response.
|
72
|
+
*/
|
73
|
+
async function runOneShot(agentProfile, conversation, platform, prompt, image, openaiApiKey, sudomcpConfig, approveToolsUpTo) {
|
74
|
+
logger.debug("[runOneShot]: start");
|
75
|
+
// Create a non-interactive agent and pass any prompt/ image to it. Return
|
76
|
+
// the first answer.
|
77
|
+
const agent = await createNonInteractiveAgent(agentProfile, conversation, platform, openaiApiKey, sudomcpConfig, approveToolsUpTo);
|
78
|
+
const response = await agent.userMessage(prompt, image);
|
79
|
+
await agent.shutdown();
|
80
|
+
logger.debug("[runOneShot]: shutdown done");
|
81
|
+
if (!response) {
|
82
|
+
throw "No message returned from agent";
|
83
|
+
}
|
84
|
+
return {
|
85
|
+
response: "" + response.content,
|
86
|
+
conversation: agent.getConversation(),
|
87
|
+
};
|
88
|
+
}
|