@xalia/agent 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/.prettierrc.json +11 -0
  2. package/README.md +56 -0
  3. package/dist/agent.js +238 -0
  4. package/dist/agentUtils.js +106 -0
  5. package/dist/chat.js +296 -0
  6. package/dist/dummyLLM.js +38 -0
  7. package/dist/files.js +115 -0
  8. package/dist/iplatform.js +2 -0
  9. package/dist/llm.js +2 -0
  10. package/dist/main.js +147 -0
  11. package/dist/mcpServerManager.js +278 -0
  12. package/dist/nodePlatform.js +61 -0
  13. package/dist/openAILLM.js +38 -0
  14. package/dist/openAILLMStreaming.js +431 -0
  15. package/dist/options.js +79 -0
  16. package/dist/prompt.js +83 -0
  17. package/dist/sudoMcpServerManager.js +183 -0
  18. package/dist/test/imageLoad.test.js +14 -0
  19. package/dist/test/mcpServerManager.test.js +71 -0
  20. package/dist/test/prompt.test.js +26 -0
  21. package/dist/test/sudoMcpServerManager.test.js +49 -0
  22. package/dist/tokenAuth.js +39 -0
  23. package/dist/tools.js +44 -0
  24. package/eslint.config.mjs +25 -0
  25. package/frog.png +0 -0
  26. package/package.json +42 -0
  27. package/scripts/git_message +31 -0
  28. package/scripts/git_wip +21 -0
  29. package/scripts/pr_message +18 -0
  30. package/scripts/pr_review +16 -0
  31. package/scripts/sudomcp_import +23 -0
  32. package/scripts/test_script +60 -0
  33. package/src/agent.ts +283 -0
  34. package/src/agentUtils.ts +198 -0
  35. package/src/chat.ts +346 -0
  36. package/src/dummyLLM.ts +50 -0
  37. package/src/files.ts +95 -0
  38. package/src/iplatform.ts +17 -0
  39. package/src/llm.ts +15 -0
  40. package/src/main.ts +187 -0
  41. package/src/mcpServerManager.ts +371 -0
  42. package/src/nodePlatform.ts +24 -0
  43. package/src/openAILLM.ts +51 -0
  44. package/src/openAILLMStreaming.ts +528 -0
  45. package/src/options.ts +103 -0
  46. package/src/prompt.ts +93 -0
  47. package/src/sudoMcpServerManager.ts +278 -0
  48. package/src/test/imageLoad.test.ts +14 -0
  49. package/src/test/mcpServerManager.test.ts +98 -0
  50. package/src/test/prompt.test.src +0 -0
  51. package/src/test/prompt.test.ts +26 -0
  52. package/src/test/sudoMcpServerManager.test.ts +65 -0
  53. package/src/tokenAuth.ts +50 -0
  54. package/src/tools.ts +57 -0
  55. package/test_data/background_test_profile.json +6 -0
  56. package/test_data/background_test_script.json +11 -0
  57. package/test_data/dummyllm_script_simplecalc.json +28 -0
  58. package/test_data/git_message_profile.json +4 -0
  59. package/test_data/git_wip_system.txt +5 -0
  60. package/test_data/pr_message_profile.json +4 -0
  61. package/test_data/pr_review_profile.json +4 -0
  62. package/test_data/prompt_simplecalc.txt +1 -0
  63. package/test_data/simplecalc_profile.json +4 -0
  64. package/test_data/sudomcp_import_profile.json +4 -0
  65. package/test_data/test_script_profile.json +8 -0
  66. package/tsconfig.json +13 -0
@@ -0,0 +1,528 @@
1
+ import { getLogger } from "@xalia/xmcp/sdk";
2
+ import { ILLM } from "./llm";
3
+ import { OpenAI } from "openai";
4
+ import { strict as assert } from "assert";
5
+
6
+ const logger = getLogger();
7
+
8
+ function initialToolCallFunction(
9
+ deltaFn:
10
+ | OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall.Function
11
+ | undefined
12
+ ): OpenAI.Chat.Completions.ChatCompletionMessageToolCall.Function {
13
+ // export interface ChatCompletionChunk.Choice.Delta.ToolCall.Function {
14
+ // arguments?: string;
15
+ // name?: string;
16
+ // }
17
+ //
18
+ // ->
19
+ //
20
+ // export interface Function {
21
+ // arguments: string;
22
+ // name: string;
23
+ // }
24
+
25
+ return {
26
+ arguments: deltaFn?.arguments || "",
27
+ name: deltaFn?.name || "",
28
+ };
29
+ }
30
+
31
+ function updateToolCallFunction(
32
+ existingFn: OpenAI.Chat.Completions.ChatCompletionMessageToolCall.Function,
33
+ deltaFn: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall.Function // eslint-disable-line
34
+ ) {
35
+ // export interface ChatCompletionChunk.Choice.Delta.ToolCall.Function {
36
+ // arguments?: string;
37
+ // name?: string;
38
+ // }
39
+ // ->
40
+ // export interface Function {
41
+ // arguments: string;
42
+ // name: string;
43
+ // }
44
+
45
+ // The function can have either (or possibly both) field(s) empty.
46
+ // `arguments` has been observered to arrive in chunks. The same is
47
+ // probably true of `name`.
48
+
49
+ if (!deltaFn) {
50
+ return;
51
+ }
52
+ if (deltaFn.name) {
53
+ existingFn.name += deltaFn.name;
54
+ }
55
+ if (deltaFn.arguments) {
56
+ existingFn.arguments += deltaFn.arguments;
57
+ }
58
+ }
59
+
60
+ function initialToolCall(
61
+ delta: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall
62
+ ): OpenAI.Chat.Completions.ChatCompletionMessageToolCall {
63
+ return {
64
+ id: delta.id || "",
65
+ function: initialToolCallFunction(delta.function),
66
+ type: "function",
67
+ };
68
+ }
69
+
70
+ function updateToolCall(
71
+ existing: OpenAI.Chat.Completions.ChatCompletionMessageToolCall,
72
+ delta: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall
73
+ ) {
74
+ // export interface ChatCompletionChunk.Choice.Delta.ToolCall {
75
+ // index: number;
76
+ // id?: string;
77
+ // function?: ToolCall.Function;
78
+ // type?: 'function';
79
+ // }
80
+ //
81
+ // ->
82
+ //
83
+ // export interface ChatCompletionMessageToolCall {
84
+ // id: string;
85
+ // function: ChatCompletionMessageToolCall.Function;
86
+ // type: 'function';
87
+ // }
88
+
89
+ if (delta.id) {
90
+ if (existing.id.length > 0) {
91
+ assert(delta.id == existing.id);
92
+ } else {
93
+ existing.id = delta.id;
94
+ }
95
+ }
96
+
97
+ if (delta.function) {
98
+ updateToolCallFunction(existing.function, delta.function);
99
+ }
100
+
101
+ if (delta.type) {
102
+ assert((delta.type = "function"));
103
+ }
104
+ }
105
+
106
+ function updateToolCalls(
107
+ toolCalls:
108
+ | OpenAI.Chat.Completions.ChatCompletionMessageToolCall[]
109
+ | undefined,
110
+ deltaToolCall: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall // eslint-disable-line
111
+ ): OpenAI.Chat.Completions.ChatCompletionMessageToolCall[] {
112
+ // export interface ChatCompletionChunk.Choice.Delta.ToolCall {
113
+ // index: number;
114
+ // id?: string;
115
+ // function?: ToolCall.Function;
116
+ // type?: 'function';
117
+ // }
118
+ //
119
+ // ->
120
+ //
121
+ // export interface ChatCompletionMessageToolCall {
122
+ // id: string;
123
+ // function: ChatCompletionMessageToolCall.Function;
124
+ // type: 'function';
125
+ // }
126
+
127
+ // The delta can arrive with any or none of the given fields. Only `index`
128
+ // can be relied upon.
129
+
130
+ if (typeof toolCalls === "undefined") {
131
+ toolCalls = [];
132
+ }
133
+ const existing = toolCalls[deltaToolCall.index];
134
+ if (!existing) {
135
+ toolCalls[deltaToolCall.index] = initialToolCall(deltaToolCall);
136
+ } else {
137
+ updateToolCall(toolCalls[deltaToolCall.index], deltaToolCall);
138
+ }
139
+ return toolCalls;
140
+ }
141
+
142
+ function initializeCompletionMessage(
143
+ delta: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta
144
+ ): OpenAI.Chat.Completions.ChatCompletionMessage {
145
+ assert(delta.role === undefined || delta.role == "assistant");
146
+ assert(!delta.function_call);
147
+
148
+ // export interface ChatCompletionChunk.Choice.Delta {
149
+ // content?: string | null;
150
+ // function_call?: Delta.FunctionCall;
151
+ // refusal?: string | null;
152
+ // role?: 'developer' | 'system' | 'user' | 'assistant' | 'tool';
153
+ // tool_calls?: Array<Delta.ToolCall>;
154
+ // }
155
+ //
156
+ // ->
157
+ //
158
+ // export interface ChatCompletionMessage {
159
+ // content: string | null;
160
+ // refusal: string | null;
161
+ // role: 'assistant';
162
+ // annotations?: Array<ChatCompletionMessage.Annotation>;
163
+ // audio?: ChatCompletionAudio | null;
164
+ // function_call?: ChatCompletionMessage.FunctionCall | null;
165
+ // tool_calls?: Array<ChatCompletionMessageToolCall>;
166
+ // }
167
+
168
+ let toolCalls:
169
+ | OpenAI.Chat.Completions.ChatCompletionMessageToolCall[]
170
+ | undefined = undefined;
171
+ if (delta.tool_calls) {
172
+ for (const t of delta.tool_calls) {
173
+ toolCalls = updateToolCalls(toolCalls, t);
174
+ }
175
+ }
176
+
177
+ return {
178
+ content: delta.content || null,
179
+ refusal: delta.refusal || null,
180
+ role: "assistant",
181
+ // annotations?: Array<ChatCompletionMessage.Annotation>;
182
+ // audio?: ChatCompletionAudio | null;
183
+ // function_call: delta.function_call,
184
+ tool_calls: toolCalls,
185
+ };
186
+ }
187
+
188
+ function updateCompletionMessage(
189
+ message: OpenAI.Chat.Completions.ChatCompletionMessage,
190
+ delta: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta
191
+ ) {
192
+ assert(message.role === "assistant");
193
+ assert(!message.function_call);
194
+ assert(!message.audio);
195
+ assert(
196
+ message.tool_calls instanceof Array ||
197
+ typeof message.tool_calls === "undefined"
198
+ );
199
+
200
+ // export interface ChatCompletionChunk.Choice.Delta {
201
+ // content?: string | null;
202
+ // function_call?: Delta.FunctionCall;
203
+ // refusal?: string | null;
204
+ // role?: 'developer' | 'system' | 'user' | 'assistant' | 'tool';
205
+ // tool_calls?: Array<Delta.ToolCall>;
206
+ // }
207
+ //
208
+ // ->
209
+ //
210
+ // export interface ChatCompletionMessage {
211
+ // content: string | null;
212
+ // refusal: string | null;
213
+ // role: 'assistant';
214
+ // annotations?: Array<ChatCompletionMessage.Annotation>;
215
+ // audio?: ChatCompletionAudio | null;
216
+ // function_call?: ChatCompletionMessage.FunctionCall | null;
217
+ // tool_calls?: Array<ChatCompletionMessageToolCall>;
218
+ // }
219
+
220
+ if (delta.content) {
221
+ if (message.content) {
222
+ message.content += delta.content;
223
+ } else {
224
+ message.content = delta.content;
225
+ }
226
+ }
227
+ assert(!delta.function_call);
228
+ if (delta.refusal) {
229
+ if (message.refusal) {
230
+ message.refusal += delta.refusal;
231
+ } else {
232
+ message.refusal = delta.refusal;
233
+ }
234
+ }
235
+ assert(delta.role === undefined || delta.role == "assistant");
236
+ if (delta.tool_calls) {
237
+ for (const t of delta.tool_calls) {
238
+ message.tool_calls = updateToolCalls(message.tool_calls, t);
239
+ }
240
+ }
241
+ }
242
+
243
+ function initializeCompletionChoice(
244
+ chunkChoice: OpenAI.Chat.Completions.ChatCompletionChunk.Choice
245
+ ): { choice: OpenAI.Chat.Completions.ChatCompletion.Choice; done: boolean } {
246
+ // export interface ChatCompletionChunk.Choice {
247
+ // delta: Choice.Delta;
248
+ // finish_reason:
249
+ // 'stop'|'length'|'tool_calls'|'content_filter'|'function_call'|null;
250
+ // index: number;
251
+ // logprobs?: Choice.Logprobs | null;
252
+ // }
253
+ //
254
+ // ->
255
+ //
256
+ // export interface ChatCompletion.Choice {
257
+ // message: CompletionsCompletionsAPI.ChatCompletionMessage;
258
+ // finish_reason:
259
+ // 'stop'|'length'|'tool_calls'|'content_filter'|'function_call';
260
+ // index: number;
261
+ // logprobs: Choice.Logprobs | null;
262
+ // }
263
+
264
+ const message = initializeCompletionMessage(chunkChoice.delta);
265
+ return {
266
+ choice: {
267
+ message,
268
+ finish_reason: chunkChoice.finish_reason || "stop",
269
+ index: chunkChoice.index,
270
+ logprobs: chunkChoice.logprobs || null,
271
+ },
272
+ done: !!chunkChoice.finish_reason,
273
+ };
274
+ }
275
+
276
+ function updateCompletionChoice(
277
+ completionChoice: OpenAI.Chat.Completions.ChatCompletion.Choice,
278
+ chunkChoice: OpenAI.Chat.Completions.ChatCompletionChunk.Choice
279
+ ): boolean {
280
+ // export interface ChatCompletionChunk.Choice {
281
+ // delta: Choice.Delta;
282
+ // finish_reason:
283
+ // 'stop'|'length'|'tool_calls'|'content_filter'|'function_call'|null;
284
+ // index: number;
285
+ // logprobs?: Choice.Logprobs | null;
286
+ // }
287
+ //
288
+ // ->
289
+ //
290
+ // export interface ChatCompletion.Choice {
291
+ // message: CompletionsCompletionsAPI.ChatCompletionMessage;
292
+ // finish_reason:
293
+ // 'stop'|'length'|'tool_calls'|'content_filter'|'function_call';
294
+ // index: number;
295
+ // logprobs: Choice.Logprobs | null;
296
+ // }
297
+
298
+ // TODO: logprobs
299
+
300
+ assert(completionChoice.index === chunkChoice.index);
301
+ updateCompletionMessage(completionChoice.message, chunkChoice.delta);
302
+ if (chunkChoice.finish_reason) {
303
+ completionChoice.finish_reason = chunkChoice.finish_reason;
304
+ return true;
305
+ }
306
+ return false;
307
+ }
308
+
309
+ function initializeCompletionChoices(
310
+ chunkChoices: OpenAI.Chat.Completions.ChatCompletionChunk.Choice[]
311
+ ): { choices: OpenAI.Chat.Completions.ChatCompletion.Choice[]; done: boolean } {
312
+ // Technically, one choice could be done and the other still have some
313
+ // content to stream. We keep it simple for now and assume only single
314
+ // choices, which allows us to mark everything as done if any choice we hit is
315
+ // done.
316
+ assert(chunkChoices.length === 1);
317
+
318
+ let msgDone = false;
319
+ const choices: OpenAI.Chat.Completions.ChatCompletion.Choice[] = [];
320
+ for (const chunkChoice of chunkChoices) {
321
+ const { choice, done } = initializeCompletionChoice(chunkChoice);
322
+ if (done) {
323
+ msgDone = true;
324
+ }
325
+ choices[chunkChoice.index] = choice;
326
+ }
327
+
328
+ return { choices, done: msgDone };
329
+ }
330
+
331
+ function updateCompletionChoices(
332
+ completionChoices: OpenAI.Chat.Completions.ChatCompletion.Choice[],
333
+ chunkChoices: OpenAI.Chat.Completions.ChatCompletionChunk.Choice[]
334
+ ): boolean {
335
+ // Technically, one choice could be done and the other still have some
336
+ // content to stream. We keep it simple for now and assume only single
337
+ // choices, which allows us to mark everything as done if any choice we hit is
338
+ // done.
339
+ assert(chunkChoices.length === 1);
340
+ assert(completionChoices.length === 1);
341
+
342
+ let msgDone = false;
343
+ for (const chunkChoice of chunkChoices) {
344
+ const choiceIdx = chunkChoice.index;
345
+ const done = updateCompletionChoice(
346
+ completionChoices[choiceIdx],
347
+ chunkChoice
348
+ );
349
+ if (done) {
350
+ msgDone = true;
351
+ }
352
+ }
353
+
354
+ return msgDone;
355
+ }
356
+
357
+ function initializeCompletion(
358
+ chunk: OpenAI.Chat.Completions.ChatCompletionChunk
359
+ ): { initMessage: OpenAI.Chat.Completions.ChatCompletion; done: boolean } {
360
+ // export interface ChatCompletionChunk {
361
+ // id: string;
362
+ // choices: Array<ChatCompletionChunk.Choice>;
363
+ // created: number;
364
+ // model: string;
365
+ // object: 'chat.completion.chunk';
366
+ // service_tier?: 'auto' | 'default' | 'flex' | null;
367
+ // system_fingerprint?: string;
368
+ // usage?: CompletionsAPI.CompletionUsage | null;
369
+ // }
370
+ //
371
+ // ->
372
+ //
373
+ // export interface ChatCompletion {
374
+ // id: string;
375
+ // choices: Array<ChatCompletion.Choice>;
376
+ // created: number;
377
+ // model: string;
378
+ // object: 'chat.completion';
379
+ // service_tier?: 'auto'|'default'|'flex'|null;
380
+ // system_fingerprint?: string;
381
+ // usage?: CompletionsAPI.CompletionUsage;
382
+ // }
383
+
384
+ const { choices, done } = initializeCompletionChoices(chunk.choices);
385
+ return {
386
+ initMessage: {
387
+ id: chunk.id,
388
+ choices,
389
+ created: chunk.created,
390
+ model: chunk.model,
391
+ object: "chat.completion",
392
+ service_tier: chunk.service_tier,
393
+ system_fingerprint: chunk.system_fingerprint,
394
+ usage: chunk.usage ?? undefined,
395
+ },
396
+ done,
397
+ };
398
+ }
399
+
400
+ function updateCompletion(
401
+ completion: OpenAI.Chat.Completions.ChatCompletion,
402
+ chunk: OpenAI.Chat.Completions.ChatCompletionChunk
403
+ ): boolean {
404
+ // export interface ChatCompletionChunk {
405
+ // id: string;
406
+ // choices: Array<ChatCompletionChunk.Choice>;
407
+ // created: number;
408
+ // model: string;
409
+ // object: 'chat.completion.chunk';
410
+ // service_tier?: 'auto' | 'default' | 'flex' | null;
411
+ // system_fingerprint?: string;
412
+ // usage?: CompletionsAPI.CompletionUsage | null;
413
+ // }
414
+ //
415
+ // ->
416
+ //
417
+ // export interface ChatCompletion {
418
+ // id: string;
419
+ // choices: Array<ChatCompletion.Choice>;
420
+ // created: number;
421
+ // model: string;
422
+ // object: 'chat.completion';
423
+ // service_tier?: 'auto'|'default'|'flex'|null;
424
+ // system_fingerprint?: string;
425
+ // usage?: CompletionsAPI.CompletionUsage;
426
+ // }
427
+
428
+ assert(completion.id === chunk.id);
429
+ assert(completion.model === chunk.model);
430
+ completion.service_tier = completion.service_tier || chunk.service_tier;
431
+ completion.system_fingerprint =
432
+ completion.system_fingerprint || chunk.system_fingerprint;
433
+ completion.usage = completion.usage || chunk.usage || undefined;
434
+
435
+ return updateCompletionChoices(completion.choices, chunk.choices);
436
+ }
437
+
438
+ export class OpenAILLMStreaming implements ILLM {
439
+ private readonly openai: OpenAI;
440
+ private model: string;
441
+
442
+ constructor(
443
+ apiKey: string,
444
+ apiUrl: string | undefined,
445
+ model: string | undefined
446
+ ) {
447
+ this.openai = new OpenAI({
448
+ apiKey,
449
+ baseURL: apiUrl,
450
+ dangerouslyAllowBrowser: true,
451
+ });
452
+ this.model = model || "gpt-4o-mini";
453
+ }
454
+
455
+ public setModel(model: string) {
456
+ this.model = model;
457
+ }
458
+
459
+ getModel(): string {
460
+ return this.model;
461
+ }
462
+
463
+ getUrl(): string {
464
+ return this.openai.baseURL;
465
+ }
466
+
467
+ public async getConversationResponse(
468
+ messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[],
469
+ tools?: OpenAI.Chat.Completions.ChatCompletionTool[],
470
+ onMessage?: (msg: string, end: boolean) => Promise<void>
471
+ ): Promise<OpenAI.Chat.Completions.ChatCompletion> {
472
+ const chunks = await this.openai.chat.completions.create({
473
+ model: this.model,
474
+ messages,
475
+ tools,
476
+ stream: true,
477
+ });
478
+
479
+ // Check the type casting above
480
+ if (!(chunks as unknown as { iterator: unknown }).iterator) {
481
+ throw "not a stream";
482
+ }
483
+
484
+ let aggregatedMessage: OpenAI.Chat.Completions.ChatCompletion | undefined;
485
+ let done = false;
486
+
487
+ for await (const chunk of chunks) {
488
+ logger.debug(`[stream] chunk: ${JSON.stringify(chunk)}`);
489
+ assert(!done);
490
+
491
+ if (chunk.object !== "chat.completion.chunk") {
492
+ // logger.warn("[stream]: unexpected message");
493
+ continue;
494
+ }
495
+
496
+ done = (() => {
497
+ if (!aggregatedMessage) {
498
+ logger.debug(`[stream] first}`);
499
+ const { initMessage, done } = initializeCompletion(chunk);
500
+ aggregatedMessage = initMessage;
501
+ return done;
502
+ } else {
503
+ return updateCompletion(aggregatedMessage, chunk);
504
+ }
505
+ })();
506
+
507
+ if (onMessage) {
508
+ // Inform the call of a message fragment. Note that even if there is
509
+ // no content, we must call `onMessage` once `done` is true.
510
+
511
+ const delta = chunk.choices[0].delta;
512
+ if (delta.content) {
513
+ await onMessage(delta.content, done);
514
+ } else if (done) {
515
+ await onMessage("", true);
516
+ }
517
+ }
518
+ }
519
+
520
+ logger.debug(
521
+ `[stream] final message: ${JSON.stringify(aggregatedMessage)}`
522
+ );
523
+
524
+ assert(done);
525
+ assert(aggregatedMessage);
526
+ return aggregatedMessage;
527
+ }
528
+ }
package/src/options.ts ADDED
@@ -0,0 +1,103 @@
1
+ import { boolean, option, optional, string, flag, number } from "cmd-ts";
2
+ import { ArgParser } from "cmd-ts/dist/cjs/argparser";
3
+ import { Descriptive, ProvidesHelp } from "cmd-ts/dist/cjs/helpdoc";
4
+ import { DEFAULT_LLM_URL } from "./agentUtils";
5
+
6
+ export type Option = ReturnType<typeof option>;
7
+ export type OptionalOption = ArgParser<string | undefined> &
8
+ ProvidesHelp &
9
+ Partial<Descriptive>;
10
+
11
+ /// Prevents env content from being displayed in the help text.
12
+ export function secretOption({
13
+ long,
14
+ short,
15
+ env,
16
+ description,
17
+ }: {
18
+ long: string;
19
+ short?: string;
20
+ env?: string;
21
+ description: string;
22
+ }): OptionalOption {
23
+ if (env) {
24
+ return option({
25
+ type: optional(string),
26
+ long,
27
+ short,
28
+ description: `${description} [env: ${env}]`,
29
+ defaultValue: () => process.env[env],
30
+ defaultValueIsSerializable: false, // hides the value from --help
31
+ });
32
+ }
33
+
34
+ return option({
35
+ type: optional(string),
36
+ long,
37
+ short,
38
+ description: `${description} (can also be set via ${env} env var)`,
39
+ });
40
+ }
41
+
42
+ export const promptFile = option({
43
+ type: optional(string),
44
+ long: "prompt",
45
+ short: "p",
46
+ description: "File containing user's first prompt to LLM",
47
+ });
48
+
49
+ export const imageFile = option({
50
+ type: optional(string),
51
+ long: "image",
52
+ short: "i",
53
+ description: "File containing image input",
54
+ });
55
+
56
+ export const systemPromptFile = option({
57
+ type: optional(string),
58
+ long: "sysprompt",
59
+ short: "s",
60
+ description: "File containing system prompt",
61
+ });
62
+
63
+ export const llmModel = option({
64
+ type: optional(string),
65
+ long: "model",
66
+ short: "m",
67
+ description: "LLM model",
68
+ env: "LLM_MODEL",
69
+ });
70
+
71
+ export const oneShot = flag({
72
+ type: boolean,
73
+ long: "one-shot",
74
+ short: "1",
75
+ description: "Exit after first reply (implies --approve-tools)",
76
+ });
77
+
78
+ export const approveTools = flag({
79
+ type: boolean,
80
+ long: "approve-tools",
81
+ short: "y",
82
+ description: "Automatically approve all tool calls",
83
+ });
84
+
85
+ export const approveToolsUpTo = option({
86
+ type: optional(number),
87
+ long: "approve-tools-up-to",
88
+ description: "Automatically approve all tool calls up to some number",
89
+ });
90
+
91
+ export const llmApiKey = secretOption({
92
+ long: "llm-api-key",
93
+ description: "API Key for LLM provider",
94
+ env: "LLM_API_KEY",
95
+ });
96
+
97
+ export const llmUrl = option({
98
+ type: string,
99
+ long: "llm-url",
100
+ description: "LLM provider URL (OpenAI compatible)",
101
+ env: "LLM_URL",
102
+ defaultValue: () => DEFAULT_LLM_URL,
103
+ });