@xalia/agent 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/.prettierrc.json +11 -0
  2. package/README.md +56 -0
  3. package/dist/agent.js +238 -0
  4. package/dist/agentUtils.js +106 -0
  5. package/dist/chat.js +296 -0
  6. package/dist/dummyLLM.js +38 -0
  7. package/dist/files.js +115 -0
  8. package/dist/iplatform.js +2 -0
  9. package/dist/llm.js +2 -0
  10. package/dist/main.js +147 -0
  11. package/dist/mcpServerManager.js +278 -0
  12. package/dist/nodePlatform.js +61 -0
  13. package/dist/openAILLM.js +38 -0
  14. package/dist/openAILLMStreaming.js +431 -0
  15. package/dist/options.js +79 -0
  16. package/dist/prompt.js +83 -0
  17. package/dist/sudoMcpServerManager.js +183 -0
  18. package/dist/test/imageLoad.test.js +14 -0
  19. package/dist/test/mcpServerManager.test.js +71 -0
  20. package/dist/test/prompt.test.js +26 -0
  21. package/dist/test/sudoMcpServerManager.test.js +49 -0
  22. package/dist/tokenAuth.js +39 -0
  23. package/dist/tools.js +44 -0
  24. package/eslint.config.mjs +25 -0
  25. package/frog.png +0 -0
  26. package/package.json +42 -0
  27. package/scripts/git_message +31 -0
  28. package/scripts/git_wip +21 -0
  29. package/scripts/pr_message +18 -0
  30. package/scripts/pr_review +16 -0
  31. package/scripts/sudomcp_import +23 -0
  32. package/scripts/test_script +60 -0
  33. package/src/agent.ts +283 -0
  34. package/src/agentUtils.ts +198 -0
  35. package/src/chat.ts +346 -0
  36. package/src/dummyLLM.ts +50 -0
  37. package/src/files.ts +95 -0
  38. package/src/iplatform.ts +17 -0
  39. package/src/llm.ts +15 -0
  40. package/src/main.ts +187 -0
  41. package/src/mcpServerManager.ts +371 -0
  42. package/src/nodePlatform.ts +24 -0
  43. package/src/openAILLM.ts +51 -0
  44. package/src/openAILLMStreaming.ts +528 -0
  45. package/src/options.ts +103 -0
  46. package/src/prompt.ts +93 -0
  47. package/src/sudoMcpServerManager.ts +278 -0
  48. package/src/test/imageLoad.test.ts +14 -0
  49. package/src/test/mcpServerManager.test.ts +98 -0
  50. package/src/test/prompt.test.src +0 -0
  51. package/src/test/prompt.test.ts +26 -0
  52. package/src/test/sudoMcpServerManager.test.ts +65 -0
  53. package/src/tokenAuth.ts +50 -0
  54. package/src/tools.ts +57 -0
  55. package/test_data/background_test_profile.json +6 -0
  56. package/test_data/background_test_script.json +11 -0
  57. package/test_data/dummyllm_script_simplecalc.json +28 -0
  58. package/test_data/git_message_profile.json +4 -0
  59. package/test_data/git_wip_system.txt +5 -0
  60. package/test_data/pr_message_profile.json +4 -0
  61. package/test_data/pr_review_profile.json +4 -0
  62. package/test_data/prompt_simplecalc.txt +1 -0
  63. package/test_data/simplecalc_profile.json +4 -0
  64. package/test_data/sudomcp_import_profile.json +4 -0
  65. package/test_data/test_script_profile.json +8 -0
  66. package/tsconfig.json +13 -0
@@ -0,0 +1,431 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.OpenAILLMStreaming = void 0;
4
+ const sdk_1 = require("@xalia/xmcp/sdk");
5
+ const openai_1 = require("openai");
6
+ const assert_1 = require("assert");
7
+ const logger = (0, sdk_1.getLogger)();
8
+ function initialToolCallFunction(deltaFn) {
9
+ // export interface ChatCompletionChunk.Choice.Delta.ToolCall.Function {
10
+ // arguments?: string;
11
+ // name?: string;
12
+ // }
13
+ //
14
+ // ->
15
+ //
16
+ // export interface Function {
17
+ // arguments: string;
18
+ // name: string;
19
+ // }
20
+ return {
21
+ arguments: deltaFn?.arguments || "",
22
+ name: deltaFn?.name || "",
23
+ };
24
+ }
25
+ function updateToolCallFunction(existingFn, deltaFn // eslint-disable-line
26
+ ) {
27
+ // export interface ChatCompletionChunk.Choice.Delta.ToolCall.Function {
28
+ // arguments?: string;
29
+ // name?: string;
30
+ // }
31
+ // ->
32
+ // export interface Function {
33
+ // arguments: string;
34
+ // name: string;
35
+ // }
36
+ // The function can have either (or possibly both) field(s) empty.
37
+ // `arguments` has been observered to arrive in chunks. The same is
38
+ // probably true of `name`.
39
+ if (!deltaFn) {
40
+ return;
41
+ }
42
+ if (deltaFn.name) {
43
+ existingFn.name += deltaFn.name;
44
+ }
45
+ if (deltaFn.arguments) {
46
+ existingFn.arguments += deltaFn.arguments;
47
+ }
48
+ }
49
+ function initialToolCall(delta) {
50
+ return {
51
+ id: delta.id || "",
52
+ function: initialToolCallFunction(delta.function),
53
+ type: "function",
54
+ };
55
+ }
56
+ function updateToolCall(existing, delta) {
57
+ // export interface ChatCompletionChunk.Choice.Delta.ToolCall {
58
+ // index: number;
59
+ // id?: string;
60
+ // function?: ToolCall.Function;
61
+ // type?: 'function';
62
+ // }
63
+ //
64
+ // ->
65
+ //
66
+ // export interface ChatCompletionMessageToolCall {
67
+ // id: string;
68
+ // function: ChatCompletionMessageToolCall.Function;
69
+ // type: 'function';
70
+ // }
71
+ if (delta.id) {
72
+ if (existing.id.length > 0) {
73
+ (0, assert_1.strict)(delta.id == existing.id);
74
+ }
75
+ else {
76
+ existing.id = delta.id;
77
+ }
78
+ }
79
+ if (delta.function) {
80
+ updateToolCallFunction(existing.function, delta.function);
81
+ }
82
+ if (delta.type) {
83
+ (0, assert_1.strict)((delta.type = "function"));
84
+ }
85
+ }
86
+ function updateToolCalls(toolCalls, deltaToolCall // eslint-disable-line
87
+ ) {
88
+ // export interface ChatCompletionChunk.Choice.Delta.ToolCall {
89
+ // index: number;
90
+ // id?: string;
91
+ // function?: ToolCall.Function;
92
+ // type?: 'function';
93
+ // }
94
+ //
95
+ // ->
96
+ //
97
+ // export interface ChatCompletionMessageToolCall {
98
+ // id: string;
99
+ // function: ChatCompletionMessageToolCall.Function;
100
+ // type: 'function';
101
+ // }
102
+ // The delta can arrive with any or none of the given fields. Only `index`
103
+ // can be relied upon.
104
+ if (typeof toolCalls === "undefined") {
105
+ toolCalls = [];
106
+ }
107
+ const existing = toolCalls[deltaToolCall.index];
108
+ if (!existing) {
109
+ toolCalls[deltaToolCall.index] = initialToolCall(deltaToolCall);
110
+ }
111
+ else {
112
+ updateToolCall(toolCalls[deltaToolCall.index], deltaToolCall);
113
+ }
114
+ return toolCalls;
115
+ }
116
+ function initializeCompletionMessage(delta) {
117
+ (0, assert_1.strict)(delta.role === undefined || delta.role == "assistant");
118
+ (0, assert_1.strict)(!delta.function_call);
119
+ // export interface ChatCompletionChunk.Choice.Delta {
120
+ // content?: string | null;
121
+ // function_call?: Delta.FunctionCall;
122
+ // refusal?: string | null;
123
+ // role?: 'developer' | 'system' | 'user' | 'assistant' | 'tool';
124
+ // tool_calls?: Array<Delta.ToolCall>;
125
+ // }
126
+ //
127
+ // ->
128
+ //
129
+ // export interface ChatCompletionMessage {
130
+ // content: string | null;
131
+ // refusal: string | null;
132
+ // role: 'assistant';
133
+ // annotations?: Array<ChatCompletionMessage.Annotation>;
134
+ // audio?: ChatCompletionAudio | null;
135
+ // function_call?: ChatCompletionMessage.FunctionCall | null;
136
+ // tool_calls?: Array<ChatCompletionMessageToolCall>;
137
+ // }
138
+ let toolCalls = undefined;
139
+ if (delta.tool_calls) {
140
+ for (const t of delta.tool_calls) {
141
+ toolCalls = updateToolCalls(toolCalls, t);
142
+ }
143
+ }
144
+ return {
145
+ content: delta.content || null,
146
+ refusal: delta.refusal || null,
147
+ role: "assistant",
148
+ // annotations?: Array<ChatCompletionMessage.Annotation>;
149
+ // audio?: ChatCompletionAudio | null;
150
+ // function_call: delta.function_call,
151
+ tool_calls: toolCalls,
152
+ };
153
+ }
154
+ function updateCompletionMessage(message, delta) {
155
+ (0, assert_1.strict)(message.role === "assistant");
156
+ (0, assert_1.strict)(!message.function_call);
157
+ (0, assert_1.strict)(!message.audio);
158
+ (0, assert_1.strict)(message.tool_calls instanceof Array ||
159
+ typeof message.tool_calls === "undefined");
160
+ // export interface ChatCompletionChunk.Choice.Delta {
161
+ // content?: string | null;
162
+ // function_call?: Delta.FunctionCall;
163
+ // refusal?: string | null;
164
+ // role?: 'developer' | 'system' | 'user' | 'assistant' | 'tool';
165
+ // tool_calls?: Array<Delta.ToolCall>;
166
+ // }
167
+ //
168
+ // ->
169
+ //
170
+ // export interface ChatCompletionMessage {
171
+ // content: string | null;
172
+ // refusal: string | null;
173
+ // role: 'assistant';
174
+ // annotations?: Array<ChatCompletionMessage.Annotation>;
175
+ // audio?: ChatCompletionAudio | null;
176
+ // function_call?: ChatCompletionMessage.FunctionCall | null;
177
+ // tool_calls?: Array<ChatCompletionMessageToolCall>;
178
+ // }
179
+ if (delta.content) {
180
+ if (message.content) {
181
+ message.content += delta.content;
182
+ }
183
+ else {
184
+ message.content = delta.content;
185
+ }
186
+ }
187
+ (0, assert_1.strict)(!delta.function_call);
188
+ if (delta.refusal) {
189
+ if (message.refusal) {
190
+ message.refusal += delta.refusal;
191
+ }
192
+ else {
193
+ message.refusal = delta.refusal;
194
+ }
195
+ }
196
+ (0, assert_1.strict)(delta.role === undefined || delta.role == "assistant");
197
+ if (delta.tool_calls) {
198
+ for (const t of delta.tool_calls) {
199
+ message.tool_calls = updateToolCalls(message.tool_calls, t);
200
+ }
201
+ }
202
+ }
203
+ function initializeCompletionChoice(chunkChoice) {
204
+ // export interface ChatCompletionChunk.Choice {
205
+ // delta: Choice.Delta;
206
+ // finish_reason:
207
+ // 'stop'|'length'|'tool_calls'|'content_filter'|'function_call'|null;
208
+ // index: number;
209
+ // logprobs?: Choice.Logprobs | null;
210
+ // }
211
+ //
212
+ // ->
213
+ //
214
+ // export interface ChatCompletion.Choice {
215
+ // message: CompletionsCompletionsAPI.ChatCompletionMessage;
216
+ // finish_reason:
217
+ // 'stop'|'length'|'tool_calls'|'content_filter'|'function_call';
218
+ // index: number;
219
+ // logprobs: Choice.Logprobs | null;
220
+ // }
221
+ const message = initializeCompletionMessage(chunkChoice.delta);
222
+ return {
223
+ choice: {
224
+ message,
225
+ finish_reason: chunkChoice.finish_reason || "stop",
226
+ index: chunkChoice.index,
227
+ logprobs: chunkChoice.logprobs || null,
228
+ },
229
+ done: !!chunkChoice.finish_reason,
230
+ };
231
+ }
232
+ function updateCompletionChoice(completionChoice, chunkChoice) {
233
+ // export interface ChatCompletionChunk.Choice {
234
+ // delta: Choice.Delta;
235
+ // finish_reason:
236
+ // 'stop'|'length'|'tool_calls'|'content_filter'|'function_call'|null;
237
+ // index: number;
238
+ // logprobs?: Choice.Logprobs | null;
239
+ // }
240
+ //
241
+ // ->
242
+ //
243
+ // export interface ChatCompletion.Choice {
244
+ // message: CompletionsCompletionsAPI.ChatCompletionMessage;
245
+ // finish_reason:
246
+ // 'stop'|'length'|'tool_calls'|'content_filter'|'function_call';
247
+ // index: number;
248
+ // logprobs: Choice.Logprobs | null;
249
+ // }
250
+ // TODO: logprobs
251
+ (0, assert_1.strict)(completionChoice.index === chunkChoice.index);
252
+ updateCompletionMessage(completionChoice.message, chunkChoice.delta);
253
+ if (chunkChoice.finish_reason) {
254
+ completionChoice.finish_reason = chunkChoice.finish_reason;
255
+ return true;
256
+ }
257
+ return false;
258
+ }
259
+ function initializeCompletionChoices(chunkChoices) {
260
+ // Technically, one choice could be done and the other still have some
261
+ // content to stream. We keep it simple for now and assume only single
262
+ // choices, which allows us to mark everything as done if any choice we hit is
263
+ // done.
264
+ (0, assert_1.strict)(chunkChoices.length === 1);
265
+ let msgDone = false;
266
+ const choices = [];
267
+ for (const chunkChoice of chunkChoices) {
268
+ const { choice, done } = initializeCompletionChoice(chunkChoice);
269
+ if (done) {
270
+ msgDone = true;
271
+ }
272
+ choices[chunkChoice.index] = choice;
273
+ }
274
+ return { choices, done: msgDone };
275
+ }
276
+ function updateCompletionChoices(completionChoices, chunkChoices) {
277
+ // Technically, one choice could be done and the other still have some
278
+ // content to stream. We keep it simple for now and assume only single
279
+ // choices, which allows us to mark everything as done if any choice we hit is
280
+ // done.
281
+ (0, assert_1.strict)(chunkChoices.length === 1);
282
+ (0, assert_1.strict)(completionChoices.length === 1);
283
+ let msgDone = false;
284
+ for (const chunkChoice of chunkChoices) {
285
+ const choiceIdx = chunkChoice.index;
286
+ const done = updateCompletionChoice(completionChoices[choiceIdx], chunkChoice);
287
+ if (done) {
288
+ msgDone = true;
289
+ }
290
+ }
291
+ return msgDone;
292
+ }
293
+ function initializeCompletion(chunk) {
294
+ // export interface ChatCompletionChunk {
295
+ // id: string;
296
+ // choices: Array<ChatCompletionChunk.Choice>;
297
+ // created: number;
298
+ // model: string;
299
+ // object: 'chat.completion.chunk';
300
+ // service_tier?: 'auto' | 'default' | 'flex' | null;
301
+ // system_fingerprint?: string;
302
+ // usage?: CompletionsAPI.CompletionUsage | null;
303
+ // }
304
+ //
305
+ // ->
306
+ //
307
+ // export interface ChatCompletion {
308
+ // id: string;
309
+ // choices: Array<ChatCompletion.Choice>;
310
+ // created: number;
311
+ // model: string;
312
+ // object: 'chat.completion';
313
+ // service_tier?: 'auto'|'default'|'flex'|null;
314
+ // system_fingerprint?: string;
315
+ // usage?: CompletionsAPI.CompletionUsage;
316
+ // }
317
+ const { choices, done } = initializeCompletionChoices(chunk.choices);
318
+ return {
319
+ initMessage: {
320
+ id: chunk.id,
321
+ choices,
322
+ created: chunk.created,
323
+ model: chunk.model,
324
+ object: "chat.completion",
325
+ service_tier: chunk.service_tier,
326
+ system_fingerprint: chunk.system_fingerprint,
327
+ usage: chunk.usage ?? undefined,
328
+ },
329
+ done,
330
+ };
331
+ }
332
+ function updateCompletion(completion, chunk) {
333
+ // export interface ChatCompletionChunk {
334
+ // id: string;
335
+ // choices: Array<ChatCompletionChunk.Choice>;
336
+ // created: number;
337
+ // model: string;
338
+ // object: 'chat.completion.chunk';
339
+ // service_tier?: 'auto' | 'default' | 'flex' | null;
340
+ // system_fingerprint?: string;
341
+ // usage?: CompletionsAPI.CompletionUsage | null;
342
+ // }
343
+ //
344
+ // ->
345
+ //
346
+ // export interface ChatCompletion {
347
+ // id: string;
348
+ // choices: Array<ChatCompletion.Choice>;
349
+ // created: number;
350
+ // model: string;
351
+ // object: 'chat.completion';
352
+ // service_tier?: 'auto'|'default'|'flex'|null;
353
+ // system_fingerprint?: string;
354
+ // usage?: CompletionsAPI.CompletionUsage;
355
+ // }
356
+ (0, assert_1.strict)(completion.id === chunk.id);
357
+ (0, assert_1.strict)(completion.model === chunk.model);
358
+ completion.service_tier = completion.service_tier || chunk.service_tier;
359
+ completion.system_fingerprint =
360
+ completion.system_fingerprint || chunk.system_fingerprint;
361
+ completion.usage = completion.usage || chunk.usage || undefined;
362
+ return updateCompletionChoices(completion.choices, chunk.choices);
363
+ }
364
+ class OpenAILLMStreaming {
365
+ constructor(apiKey, apiUrl, model) {
366
+ this.openai = new openai_1.OpenAI({
367
+ apiKey,
368
+ baseURL: apiUrl,
369
+ dangerouslyAllowBrowser: true,
370
+ });
371
+ this.model = model || "gpt-4o-mini";
372
+ }
373
+ setModel(model) {
374
+ this.model = model;
375
+ }
376
+ getModel() {
377
+ return this.model;
378
+ }
379
+ getUrl() {
380
+ return this.openai.baseURL;
381
+ }
382
+ async getConversationResponse(messages, tools, onMessage) {
383
+ const chunks = await this.openai.chat.completions.create({
384
+ model: this.model,
385
+ messages,
386
+ tools,
387
+ stream: true,
388
+ });
389
+ // Check the type casting above
390
+ if (!chunks.iterator) {
391
+ throw "not a stream";
392
+ }
393
+ let aggregatedMessage;
394
+ let done = false;
395
+ for await (const chunk of chunks) {
396
+ logger.debug(`[stream] chunk: ${JSON.stringify(chunk)}`);
397
+ (0, assert_1.strict)(!done);
398
+ if (chunk.object !== "chat.completion.chunk") {
399
+ // logger.warn("[stream]: unexpected message");
400
+ continue;
401
+ }
402
+ done = (() => {
403
+ if (!aggregatedMessage) {
404
+ logger.debug(`[stream] first}`);
405
+ const { initMessage, done } = initializeCompletion(chunk);
406
+ aggregatedMessage = initMessage;
407
+ return done;
408
+ }
409
+ else {
410
+ return updateCompletion(aggregatedMessage, chunk);
411
+ }
412
+ })();
413
+ if (onMessage) {
414
+ // Inform the call of a message fragment. Note that even if there is
415
+ // no content, we must call `onMessage` once `done` is true.
416
+ const delta = chunk.choices[0].delta;
417
+ if (delta.content) {
418
+ await onMessage(delta.content, done);
419
+ }
420
+ else if (done) {
421
+ await onMessage("", true);
422
+ }
423
+ }
424
+ }
425
+ logger.debug(`[stream] final message: ${JSON.stringify(aggregatedMessage)}`);
426
+ (0, assert_1.strict)(done);
427
+ (0, assert_1.strict)(aggregatedMessage);
428
+ return aggregatedMessage;
429
+ }
430
+ }
431
+ exports.OpenAILLMStreaming = OpenAILLMStreaming;
@@ -0,0 +1,79 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.llmUrl = exports.llmApiKey = exports.approveToolsUpTo = exports.approveTools = exports.oneShot = exports.llmModel = exports.systemPromptFile = exports.imageFile = exports.promptFile = void 0;
4
+ exports.secretOption = secretOption;
5
+ const cmd_ts_1 = require("cmd-ts");
6
+ const agentUtils_1 = require("./agentUtils");
7
+ /// Prevents env content from being displayed in the help text.
8
+ function secretOption({ long, short, env, description, }) {
9
+ if (env) {
10
+ return (0, cmd_ts_1.option)({
11
+ type: (0, cmd_ts_1.optional)(cmd_ts_1.string),
12
+ long,
13
+ short,
14
+ description: `${description} [env: ${env}]`,
15
+ defaultValue: () => process.env[env],
16
+ defaultValueIsSerializable: false, // hides the value from --help
17
+ });
18
+ }
19
+ return (0, cmd_ts_1.option)({
20
+ type: (0, cmd_ts_1.optional)(cmd_ts_1.string),
21
+ long,
22
+ short,
23
+ description: `${description} (can also be set via ${env} env var)`,
24
+ });
25
+ }
26
+ exports.promptFile = (0, cmd_ts_1.option)({
27
+ type: (0, cmd_ts_1.optional)(cmd_ts_1.string),
28
+ long: "prompt",
29
+ short: "p",
30
+ description: "File containing user's first prompt to LLM",
31
+ });
32
+ exports.imageFile = (0, cmd_ts_1.option)({
33
+ type: (0, cmd_ts_1.optional)(cmd_ts_1.string),
34
+ long: "image",
35
+ short: "i",
36
+ description: "File containing image input",
37
+ });
38
+ exports.systemPromptFile = (0, cmd_ts_1.option)({
39
+ type: (0, cmd_ts_1.optional)(cmd_ts_1.string),
40
+ long: "sysprompt",
41
+ short: "s",
42
+ description: "File containing system prompt",
43
+ });
44
+ exports.llmModel = (0, cmd_ts_1.option)({
45
+ type: (0, cmd_ts_1.optional)(cmd_ts_1.string),
46
+ long: "model",
47
+ short: "m",
48
+ description: "LLM model",
49
+ env: "LLM_MODEL",
50
+ });
51
+ exports.oneShot = (0, cmd_ts_1.flag)({
52
+ type: cmd_ts_1.boolean,
53
+ long: "one-shot",
54
+ short: "1",
55
+ description: "Exit after first reply (implies --approve-tools)",
56
+ });
57
+ exports.approveTools = (0, cmd_ts_1.flag)({
58
+ type: cmd_ts_1.boolean,
59
+ long: "approve-tools",
60
+ short: "y",
61
+ description: "Automatically approve all tool calls",
62
+ });
63
+ exports.approveToolsUpTo = (0, cmd_ts_1.option)({
64
+ type: (0, cmd_ts_1.optional)(cmd_ts_1.number),
65
+ long: "approve-tools-up-to",
66
+ description: "Automatically approve all tool calls up to some number",
67
+ });
68
+ exports.llmApiKey = secretOption({
69
+ long: "llm-api-key",
70
+ description: "API Key for LLM provider",
71
+ env: "LLM_API_KEY",
72
+ });
73
+ exports.llmUrl = (0, cmd_ts_1.option)({
74
+ type: cmd_ts_1.string,
75
+ long: "llm-url",
76
+ description: "LLM provider URL (OpenAI compatible)",
77
+ env: "LLM_URL",
78
+ defaultValue: () => agentUtils_1.DEFAULT_LLM_URL,
79
+ });
package/dist/prompt.js ADDED
@@ -0,0 +1,83 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.Prompt = void 0;
7
+ exports.parsePrompt = parsePrompt;
8
+ const readline_1 = __importDefault(require("readline"));
9
+ const DEFAULT_PROMPT = "USER: ";
10
+ class Prompt {
11
+ constructor() {
12
+ this.prompt = readline_1.default.createInterface({
13
+ input: process.stdin,
14
+ output: process.stdout,
15
+ prompt: DEFAULT_PROMPT,
16
+ });
17
+ this.prompt.on("line", (line) => {
18
+ this.line = line;
19
+ this.resolve();
20
+ });
21
+ this.prompt.on("close", () => {
22
+ this.line = undefined;
23
+ this.resolve();
24
+ });
25
+ }
26
+ async run(prompt) {
27
+ // Clear any line
28
+ this.line = "";
29
+ return new Promise((r) => {
30
+ this.online = r;
31
+ if (prompt) {
32
+ this.prompt.setPrompt(prompt);
33
+ }
34
+ this.prompt.prompt();
35
+ if (prompt) {
36
+ this.prompt.setPrompt(DEFAULT_PROMPT);
37
+ }
38
+ });
39
+ }
40
+ shutdown() {
41
+ this.prompt.close();
42
+ }
43
+ resolve() {
44
+ if (this.online) {
45
+ this.online(this.line);
46
+ }
47
+ this.online = undefined;
48
+ }
49
+ }
50
+ exports.Prompt = Prompt;
51
+ /**
52
+ * Support prompts:
53
+ * - some text (msg: some text, cmds: undefined)
54
+ * - :i image.png some text (msg: some text, cmds: ["i", "image.png"])
55
+ * - :i image.png (msg: undefined, cmds: ["i", "image.png"])
56
+ * - :l (msg: undefined, cmds: ["l"])
57
+ * - :e toolName .. (msg: undefined, cmds: ["e", "toolName", ...])
58
+ * - :ea toolName .. (msg: undefined, cmds: ["ea", "toolName", ...])
59
+ */
60
+ function parsePrompt(prompt) {
61
+ prompt = prompt.trim();
62
+ let msg = undefined;
63
+ let cmds = undefined;
64
+ if (prompt.startsWith(":") || prompt.startsWith("/")) {
65
+ cmds = prompt.split(" ");
66
+ cmds[0] = cmds[0].slice(1);
67
+ if (cmds[0] == "i") {
68
+ // :i is special as it may have a trailing message
69
+ const fileDelim = prompt.indexOf(" ", 3);
70
+ if (fileDelim < 0) {
71
+ cmds = [cmds[0], prompt.slice(3)];
72
+ }
73
+ else {
74
+ msg = prompt.slice(fileDelim + 1);
75
+ cmds = [cmds[0], prompt.slice(3, fileDelim)];
76
+ }
77
+ }
78
+ }
79
+ else {
80
+ msg = prompt;
81
+ }
82
+ return { msg, cmds };
83
+ }