@agentuity/cli 2.0.14 → 2.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. package/dist/agent-detection.d.ts.map +1 -1
  2. package/dist/agent-detection.js +3 -6
  3. package/dist/agent-detection.js.map +1 -1
  4. package/dist/ai-help.js +10 -10
  5. package/dist/ai-help.js.map +1 -1
  6. package/dist/cmd/ai/capabilities/show.d.ts.map +1 -1
  7. package/dist/cmd/ai/capabilities/show.js +6 -0
  8. package/dist/cmd/ai/capabilities/show.js.map +1 -1
  9. package/dist/cmd/ai/intro.d.ts.map +1 -1
  10. package/dist/cmd/ai/intro.js +1 -0
  11. package/dist/cmd/ai/intro.js.map +1 -1
  12. package/dist/cmd/cloud/aigateway/complete.d.ts +7 -0
  13. package/dist/cmd/cloud/aigateway/complete.d.ts.map +1 -0
  14. package/dist/cmd/cloud/aigateway/complete.js +386 -0
  15. package/dist/cmd/cloud/aigateway/complete.js.map +1 -0
  16. package/dist/cmd/cloud/aigateway/index.d.ts +3 -0
  17. package/dist/cmd/cloud/aigateway/index.d.ts.map +1 -0
  18. package/dist/cmd/cloud/aigateway/index.js +20 -0
  19. package/dist/cmd/cloud/aigateway/index.js.map +1 -0
  20. package/dist/cmd/cloud/aigateway/model-cache.d.ts +4 -0
  21. package/dist/cmd/cloud/aigateway/model-cache.d.ts.map +1 -0
  22. package/dist/cmd/cloud/aigateway/model-cache.js +72 -0
  23. package/dist/cmd/cloud/aigateway/model-cache.js.map +1 -0
  24. package/dist/cmd/cloud/aigateway/models.d.ts +2 -0
  25. package/dist/cmd/cloud/aigateway/models.d.ts.map +1 -0
  26. package/dist/cmd/cloud/aigateway/models.js +193 -0
  27. package/dist/cmd/cloud/aigateway/models.js.map +1 -0
  28. package/dist/cmd/cloud/aigateway/util.d.ts +20 -0
  29. package/dist/cmd/cloud/aigateway/util.d.ts.map +1 -0
  30. package/dist/cmd/cloud/aigateway/util.js +58 -0
  31. package/dist/cmd/cloud/aigateway/util.js.map +1 -0
  32. package/dist/cmd/cloud/index.d.ts.map +1 -1
  33. package/dist/cmd/cloud/index.js +2 -0
  34. package/dist/cmd/cloud/index.js.map +1 -1
  35. package/dist/cmd/coder/skill/create.d.ts +2 -0
  36. package/dist/cmd/coder/skill/create.d.ts.map +1 -0
  37. package/dist/cmd/coder/skill/create.js +104 -0
  38. package/dist/cmd/coder/skill/create.js.map +1 -0
  39. package/dist/cmd/coder/skill/index.d.ts.map +1 -1
  40. package/dist/cmd/coder/skill/index.js +12 -1
  41. package/dist/cmd/coder/skill/index.js.map +1 -1
  42. package/dist/cmd/coder/workspace/common.d.ts +22 -2
  43. package/dist/cmd/coder/workspace/common.d.ts.map +1 -1
  44. package/dist/cmd/coder/workspace/common.js +38 -2
  45. package/dist/cmd/coder/workspace/common.js.map +1 -1
  46. package/dist/cmd/coder/workspace/create.d.ts.map +1 -1
  47. package/dist/cmd/coder/workspace/create.js +34 -2
  48. package/dist/cmd/coder/workspace/create.js.map +1 -1
  49. package/dist/cmd/coder/workspace/update.d.ts.map +1 -1
  50. package/dist/cmd/coder/workspace/update.js +33 -1
  51. package/dist/cmd/coder/workspace/update.js.map +1 -1
  52. package/dist/cmd/dev/download.d.ts +8 -0
  53. package/dist/cmd/dev/download.d.ts.map +1 -1
  54. package/dist/cmd/dev/download.js +27 -1
  55. package/dist/cmd/dev/download.js.map +1 -1
  56. package/dist/cmd/dev/index.d.ts.map +1 -1
  57. package/dist/cmd/dev/index.js +18 -7
  58. package/dist/cmd/dev/index.js.map +1 -1
  59. package/dist/config.d.ts.map +1 -1
  60. package/dist/config.js +3 -0
  61. package/dist/config.js.map +1 -1
  62. package/dist/types.d.ts +3 -2
  63. package/dist/types.d.ts.map +1 -1
  64. package/dist/types.js +1 -0
  65. package/dist/types.js.map +1 -1
  66. package/package.json +7 -7
  67. package/src/agent-detection.ts +3 -6
  68. package/src/ai-help.ts +10 -10
  69. package/src/cmd/ai/capabilities/show.ts +6 -0
  70. package/src/cmd/ai/intro.ts +1 -0
  71. package/src/cmd/cloud/aigateway/complete.ts +461 -0
  72. package/src/cmd/cloud/aigateway/index.ts +21 -0
  73. package/src/cmd/cloud/aigateway/model-cache.ts +89 -0
  74. package/src/cmd/cloud/aigateway/models.ts +219 -0
  75. package/src/cmd/cloud/aigateway/util.ts +86 -0
  76. package/src/cmd/cloud/index.ts +2 -0
  77. package/src/cmd/coder/skill/create.ts +122 -0
  78. package/src/cmd/coder/skill/index.ts +14 -1
  79. package/src/cmd/coder/workspace/common.ts +46 -2
  80. package/src/cmd/coder/workspace/create.ts +34 -1
  81. package/src/cmd/coder/workspace/update.ts +33 -0
  82. package/src/cmd/dev/download.ts +32 -1
  83. package/src/cmd/dev/index.ts +24 -8
  84. package/src/config.ts +3 -0
  85. package/src/types.ts +1 -0
@@ -0,0 +1,461 @@
1
+ import { z } from 'zod';
2
+ import { StructuredError, type AIGatewayModels, type AIGatewayService } from '@agentuity/core';
3
+ import { createCommand } from '../../../types';
4
+ import { getCommand } from '../../../command-prefix';
5
+ import { getExecutingAgent } from '../../../agent-detection';
6
+ import { createAIGatewayService, getAIGatewayUrl, getCompletionText } from './util';
7
+ import { getCachedAIGatewayModels, setCachedAIGatewayModels } from './model-cache';
8
+
9
+ const CompletionResponseSchema = z.object({
10
+ text: z.string(),
11
+ response: z.unknown(),
12
+ cost: z.unknown().optional(),
13
+ });
14
+
15
+ const defaultModel = 'openai/gpt-4o-mini';
16
+ const PromptRequiredError = StructuredError(
17
+ 'AIGatewayPromptRequired',
18
+ 'Prompt is required. Pass it as an argument, use --prompt, use --file, or pipe it through stdin.'
19
+ );
20
+ const PromptFileNotFoundError = StructuredError('AIGatewayPromptFileNotFound')<{
21
+ filename: string;
22
+ }>();
23
+
24
+ function isAgentOutputMode(): boolean {
25
+ return Boolean(getExecutingAgent()) && process.env.AGENTUITY_AIGATEWAY_AGENT_OUTPUT !== 'false';
26
+ }
27
+
28
+ async function readPromptFromStdin(): Promise<string | undefined> {
29
+ if (process.stdin.isTTY) {
30
+ return undefined;
31
+ }
32
+ const text = await Bun.stdin.text();
33
+ const trimmed = text.trim();
34
+ return trimmed.length > 0 ? trimmed : undefined;
35
+ }
36
+
37
+ async function readPromptFromFile(filename?: string): Promise<string | undefined> {
38
+ if (!filename) {
39
+ return undefined;
40
+ }
41
+ const file = Bun.file(filename);
42
+ if (!(await file.exists())) {
43
+ throw new PromptFileNotFoundError({
44
+ message: `Prompt file not found: ${filename}`,
45
+ filename,
46
+ });
47
+ }
48
+ const text = await file.text();
49
+ const trimmed = text.trim();
50
+ return trimmed.length > 0 ? trimmed : undefined;
51
+ }
52
+
53
+ export function combinePromptInput(opts: {
54
+ explicitPrompt?: string;
55
+ stdinPrompt?: string;
56
+ stdinMode?: 'append' | 'replace';
57
+ }): string | undefined {
58
+ if (!opts.stdinPrompt) {
59
+ return opts.explicitPrompt;
60
+ }
61
+ if (!opts.explicitPrompt || opts.stdinMode === 'replace') {
62
+ return opts.stdinPrompt;
63
+ }
64
+ if (!opts.stdinMode || opts.stdinMode === 'append') {
65
+ return `${opts.explicitPrompt}\n\n${opts.stdinPrompt}`;
66
+ }
67
+ return opts.explicitPrompt;
68
+ }
69
+
70
+ function getUsageText(response: unknown): string | undefined {
71
+ if (!response || typeof response !== 'object') {
72
+ return undefined;
73
+ }
74
+ const usage = (response as { usage?: unknown }).usage;
75
+ if (!usage || typeof usage !== 'object') {
76
+ return undefined;
77
+ }
78
+ const input =
79
+ (usage as { prompt_tokens?: unknown; input_tokens?: unknown }).prompt_tokens ??
80
+ (usage as { input_tokens?: unknown }).input_tokens;
81
+ const output =
82
+ (usage as { completion_tokens?: unknown; output_tokens?: unknown }).completion_tokens ??
83
+ (usage as { output_tokens?: unknown }).output_tokens;
84
+ const total = (usage as { total_tokens?: unknown }).total_tokens;
85
+ const parts = [
86
+ typeof input === 'number' ? `input=${input}` : undefined,
87
+ typeof output === 'number' ? `output=${output}` : undefined,
88
+ typeof total === 'number' ? `total=${total}` : undefined,
89
+ ].filter(Boolean);
90
+ return parts.length > 0 ? `Usage: ${parts.join(' ')}` : undefined;
91
+ }
92
+
93
+ function getCostInfo(response: unknown): unknown | undefined {
94
+ if (!response || typeof response !== 'object') {
95
+ return undefined;
96
+ }
97
+ const agentuity = (response as { agentuity?: unknown }).agentuity;
98
+ if (!agentuity || typeof agentuity !== 'object') {
99
+ return undefined;
100
+ }
101
+ return (agentuity as { cost?: unknown }).cost;
102
+ }
103
+
104
+ function getCostText(response: unknown): string | undefined {
105
+ const cost = getCostInfo(response);
106
+ if (!cost || typeof cost !== 'object') {
107
+ return undefined;
108
+ }
109
+ const total = (cost as { total?: unknown }).total;
110
+ const promptTokens = (cost as { promptTokens?: unknown }).promptTokens;
111
+ const completionTokens = (cost as { completionTokens?: unknown }).completionTokens;
112
+ const parts = [
113
+ typeof total === 'number' ? `total=$${total.toFixed(6)}` : undefined,
114
+ typeof promptTokens === 'number' ? `prompt=${promptTokens}` : undefined,
115
+ typeof completionTokens === 'number' ? `completion=${completionTokens}` : undefined,
116
+ ].filter(Boolean);
117
+ return parts.length > 0 ? `Cost: ${parts.join(' ')}` : undefined;
118
+ }
119
+
120
+ type CompletionModelInfo = {
121
+ id: string;
122
+ api?: string;
123
+ provider?: string;
124
+ };
125
+
126
+ function matchesModel(provider: string, candidateId: string, model: string): boolean {
127
+ return candidateId === model || `${provider}/${candidateId}` === model;
128
+ }
129
+
130
+ async function getCompletionModelInfo(
131
+ model: string,
132
+ models: AIGatewayModels
133
+ ): Promise<CompletionModelInfo | undefined> {
134
+ for (const [provider, providerModels] of Object.entries(models)) {
135
+ const match = providerModels.find((candidate) => matchesModel(provider, candidate.id, model));
136
+ if (match) {
137
+ return { id: match.id, api: match.api, provider };
138
+ }
139
+ }
140
+ return undefined;
141
+ }
142
+
143
+ async function loadModelsForCompletion(opts: {
144
+ service: AIGatewayService;
145
+ profile: string;
146
+ cacheKey: string;
147
+ refresh?: boolean;
148
+ }): Promise<AIGatewayModels> {
149
+ if (!opts.refresh) {
150
+ const cached = await getCachedAIGatewayModels(opts.profile, opts.cacheKey);
151
+ if (cached) {
152
+ return cached;
153
+ }
154
+ }
155
+ const models = await opts.service.listModels();
156
+ await setCachedAIGatewayModels(opts.profile, opts.cacheKey, models);
157
+ return models;
158
+ }
159
+
160
+ function buildCompletionRequest(opts: {
161
+ model: string;
162
+ prompt: string;
163
+ system?: string;
164
+ temperature?: number;
165
+ maxTokens?: number;
166
+ stream?: boolean;
167
+ }) {
168
+ const common = {
169
+ model: opts.model,
170
+ temperature: opts.temperature,
171
+ max_tokens: opts.maxTokens,
172
+ ...(opts.stream ? { stream: true } : {}),
173
+ };
174
+ return {
175
+ ...common,
176
+ messages: [
177
+ ...(opts.system ? [{ role: 'system' as const, content: opts.system }] : []),
178
+ { role: 'user' as const, content: opts.prompt },
179
+ ],
180
+ };
181
+ }
182
+
183
+ async function resolvePrompt(opts: {
184
+ optionPrompt?: string;
185
+ argPrompt?: string;
186
+ file?: string;
187
+ stdinMode?: 'append' | 'replace';
188
+ }): Promise<string | undefined> {
189
+ const explicitPrompt =
190
+ opts.optionPrompt ?? opts.argPrompt ?? (await readPromptFromFile(opts.file));
191
+ const stdinPrompt = await readPromptFromStdin();
192
+ return combinePromptInput({ explicitPrompt, stdinPrompt, stdinMode: opts.stdinMode });
193
+ }
194
+
195
+ function getStreamDeltaText(payload: unknown): string {
196
+ if (!payload || typeof payload !== 'object') {
197
+ return '';
198
+ }
199
+ const choices = (payload as { choices?: unknown }).choices;
200
+ if (!Array.isArray(choices)) {
201
+ return '';
202
+ }
203
+ return choices
204
+ .map((choice) => {
205
+ if (!choice || typeof choice !== 'object') {
206
+ return '';
207
+ }
208
+ const delta = (choice as { delta?: { content?: unknown } }).delta;
209
+ if (typeof delta?.content === 'string') {
210
+ return delta.content;
211
+ }
212
+ const text = (choice as { text?: unknown }).text;
213
+ return typeof text === 'string' ? text : '';
214
+ })
215
+ .join('');
216
+ }
217
+
218
+ async function consumeCompletionStream(
219
+ stream: ReadableStream<Uint8Array>,
220
+ options: { json?: boolean; raw?: boolean }
221
+ ): Promise<string> {
222
+ const reader = stream.getReader();
223
+ const decoder = new TextDecoder();
224
+ let buffer = '';
225
+ let text = '';
226
+
227
+ const consumeFrame = (frame: string) => {
228
+ const dataLines = frame
229
+ .split(/\r?\n/)
230
+ .filter((line) => line.startsWith('data:'))
231
+ .map((line) => line.slice(5).trimStart());
232
+ for (const data of dataLines) {
233
+ if (!data || data === '[DONE]') {
234
+ continue;
235
+ }
236
+ try {
237
+ const delta = getStreamDeltaText(JSON.parse(data));
238
+ if (delta) {
239
+ text += delta;
240
+ }
241
+ } catch {
242
+ // Ignore malformed stream frames and continue consuming the stream.
243
+ }
244
+ if (options.raw) {
245
+ if (!options.json) {
246
+ console.log(data);
247
+ }
248
+ continue;
249
+ }
250
+ try {
251
+ const delta = getStreamDeltaText(JSON.parse(data));
252
+ if (delta) {
253
+ if (!options.json) {
254
+ process.stdout.write(delta);
255
+ }
256
+ }
257
+ } catch {
258
+ // Ignore malformed stream frames and continue consuming the stream.
259
+ }
260
+ }
261
+ };
262
+
263
+ try {
264
+ while (true) {
265
+ const { done, value } = await reader.read();
266
+ if (done) {
267
+ break;
268
+ }
269
+ buffer += decoder.decode(value, { stream: true });
270
+ const frames = buffer.split(/\r?\n\r?\n/);
271
+ buffer = frames.pop() ?? '';
272
+ for (const frame of frames) {
273
+ consumeFrame(frame);
274
+ }
275
+ }
276
+ buffer += decoder.decode();
277
+ if (buffer.trim()) {
278
+ consumeFrame(buffer);
279
+ }
280
+ } finally {
281
+ reader.releaseLock();
282
+ }
283
+ if (!options.json && !options.raw && text) {
284
+ process.stdout.write('\n');
285
+ }
286
+ return text;
287
+ }
288
+
289
+ export const completeSubcommand = createCommand({
290
+ name: 'complete',
291
+ aliases: ['completion', 'chat'],
292
+ description: 'Run an AI Gateway completion',
293
+ tags: ['write', 'slow', 'requires-auth', 'uses-stdin'],
294
+ requires: { auth: true },
295
+ optional: { project: true, region: true },
296
+ examples: [
297
+ {
298
+ command: getCommand('cloud aigateway complete --model openai/gpt-4.1-mini "Hello"'),
299
+ description: 'Run a completion',
300
+ },
301
+ {
302
+ command: `echo "Hello" | ${getCommand('cloud aigateway complete --model openai/gpt-4.1-mini')}`,
303
+ description: 'Read the prompt from stdin',
304
+ },
305
+ {
306
+ command: getCommand(
307
+ 'cloud aigateway complete --model openai/gpt-4.1-mini --file prompt.txt'
308
+ ),
309
+ description: 'Read the prompt from a file',
310
+ },
311
+ {
312
+ command: getCommand(
313
+ 'cloud aigateway complete --model openai/gpt-4.1-mini --stream "Hello"'
314
+ ),
315
+ description: 'Stream token output as it arrives',
316
+ },
317
+ ],
318
+ schema: {
319
+ args: z.object({
320
+ prompt: z.string().optional().describe('prompt text'),
321
+ }),
322
+ options: z.object({
323
+ model: z.string().min(1).optional().describe('model id'),
324
+ prompt: z.string().optional().describe('prompt text'),
325
+ file: z.string().optional().describe('read prompt text from a file'),
326
+ system: z.string().optional().describe('optional system message'),
327
+ systemFile: z.string().optional().describe('read the system message from a file'),
328
+ refreshModels: z
329
+ .boolean()
330
+ .optional()
331
+ .describe('refresh the cached AI Gateway model catalog before choosing request format'),
332
+ temperature: z.number().optional().describe('sampling temperature'),
333
+ maxTokens: z.number().optional().describe('maximum output tokens'),
334
+ stream: z.boolean().optional().describe('stream token output as it arrives'),
335
+ save: z.string().optional().describe('write assistant text to a file'),
336
+ format: z
337
+ .enum(['text', 'json', 'raw'])
338
+ .optional()
339
+ .describe('output format for non-json mode'),
340
+ stdinMode: z
341
+ .enum(['append', 'replace'])
342
+ .optional()
343
+ .describe('how to combine stdin with prompt text'),
344
+ usage: z.boolean().optional().describe('print usage details when available'),
345
+ cost: z.boolean().optional().describe('print AI Gateway cost details when available'),
346
+ raw: z.boolean().optional().describe('print the raw completion response'),
347
+ }),
348
+ response: CompletionResponseSchema,
349
+ },
350
+ async handler(ctx) {
351
+ const prompt = await resolvePrompt({
352
+ optionPrompt: ctx.opts.prompt,
353
+ argPrompt: ctx.args.prompt,
354
+ file: ctx.opts.file,
355
+ stdinMode: ctx.opts.stdinMode,
356
+ });
357
+ if (!prompt) {
358
+ throw new PromptRequiredError();
359
+ }
360
+
361
+ const service = createAIGatewayService(ctx);
362
+ const model = ctx.opts.model ?? process.env.AGENTUITY_AIGATEWAY_MODEL ?? defaultModel;
363
+ const system = ctx.opts.system ?? (await readPromptFromFile(ctx.opts.systemFile));
364
+ const profile = ctx.config?.name ?? 'default';
365
+ const cacheKey = getAIGatewayUrl(ctx.region, ctx.config?.overrides);
366
+ let models = await loadModelsForCompletion({
367
+ service,
368
+ profile,
369
+ cacheKey,
370
+ refresh: ctx.opts.refreshModels,
371
+ });
372
+ let modelInfo = await getCompletionModelInfo(model, models);
373
+ if (!modelInfo && !ctx.opts.refreshModels) {
374
+ models = await loadModelsForCompletion({ service, profile, cacheKey, refresh: true });
375
+ modelInfo = await getCompletionModelInfo(model, models);
376
+ }
377
+ const requestModel = modelInfo?.id ?? model;
378
+ const request = buildCompletionRequest({
379
+ model: requestModel,
380
+ prompt,
381
+ system,
382
+ temperature: ctx.opts.temperature,
383
+ maxTokens: ctx.opts.maxTokens,
384
+ });
385
+ const format = ctx.opts.raw
386
+ ? 'raw'
387
+ : (ctx.opts.format ?? (isAgentOutputMode() ? 'json' : 'text'));
388
+
389
+ if (ctx.opts.stream) {
390
+ const streamed = await service.streamCompleteWithMetadata({ ...request, stream: true });
391
+ const text = await consumeCompletionStream(streamed.stream, {
392
+ json: ctx.options.json || format === 'json',
393
+ raw: format === 'raw',
394
+ });
395
+ const metadata = await streamed.metadata;
396
+ const cost = metadata.cost;
397
+ if (ctx.opts.save) {
398
+ await Bun.write(ctx.opts.save, text);
399
+ }
400
+ if (!ctx.options.json && format === 'json') {
401
+ console.log(
402
+ JSON.stringify(
403
+ { text, cost, response: { stream: true, model: requestModel } },
404
+ null,
405
+ 2
406
+ )
407
+ );
408
+ }
409
+ if (!ctx.options.json && ctx.opts.cost) {
410
+ const costText = getCostText({ agentuity: metadata });
411
+ if (costText) {
412
+ console.error(costText);
413
+ }
414
+ }
415
+ return { text, response: { stream: true, model: requestModel }, cost };
416
+ }
417
+
418
+ const response = await service.complete(request);
419
+ const text = getCompletionText(response);
420
+ const cost = getCostInfo(response);
421
+ if (ctx.opts.save) {
422
+ await Bun.write(ctx.opts.save, text);
423
+ }
424
+
425
+ if (!ctx.options.json) {
426
+ if (format === 'raw') {
427
+ console.log(JSON.stringify(response, null, 2));
428
+ } else if (format === 'json') {
429
+ console.log(
430
+ JSON.stringify(
431
+ {
432
+ text,
433
+ model: requestModel,
434
+ usage: (response as { usage?: unknown }).usage,
435
+ cost,
436
+ response,
437
+ },
438
+ null,
439
+ 2
440
+ )
441
+ );
442
+ } else {
443
+ console.log(text);
444
+ }
445
+ if (ctx.opts.usage) {
446
+ const usage = getUsageText(response);
447
+ if (usage) {
448
+ console.error(usage);
449
+ }
450
+ }
451
+ if (ctx.opts.cost) {
452
+ const costText = getCostText(response);
453
+ if (costText) {
454
+ console.error(costText);
455
+ }
456
+ }
457
+ }
458
+
459
+ return { text, response, cost };
460
+ },
461
+ });
@@ -0,0 +1,21 @@
1
+ import { createCommand } from '../../../types';
2
+ import { getCommand } from '../../../command-prefix';
3
+ import { completeSubcommand } from './complete';
4
+ import { modelsSubcommand } from './models';
5
+
6
+ export const aigatewayCommand = createCommand({
7
+ name: 'aigateway',
8
+ aliases: ['ai-gateway', 'ai'],
9
+ description: 'Use the Agentuity AI Gateway',
10
+ tags: ['slow'],
11
+ examples: [
12
+ { command: getCommand('cloud aigateway models'), description: 'List supported models' },
13
+ {
14
+ command: getCommand('cloud aigateway complete --model openai/gpt-4.1-mini "Hello"'),
15
+ description: 'Run a completion',
16
+ },
17
+ ],
18
+ subcommands: [modelsSubcommand, completeSubcommand],
19
+ });
20
+
21
+ export default aigatewayCommand;
@@ -0,0 +1,89 @@
1
+ import { Database } from 'bun:sqlite';
2
+ import { mkdir } from 'node:fs/promises';
3
+ import { join } from 'node:path';
4
+ import { AIGatewayModelsSchema, type AIGatewayModels } from '@agentuity/core';
5
+ import { getDefaultConfigDir } from '../../../config';
6
+
7
+ const TTL_MS = 6 * 60 * 60 * 1000;
8
+
9
+ let db: Database | null = null;
10
+
11
+ async function getDatabase(): Promise<Database> {
12
+ if (db) {
13
+ return db;
14
+ }
15
+
16
+ const configDir = getDefaultConfigDir();
17
+ await mkdir(configDir, { recursive: true });
18
+
19
+ db = new Database(join(configDir, 'resource.db'));
20
+ db.run('PRAGMA journal_mode = WAL');
21
+ db.run('PRAGMA busy_timeout = 5000');
22
+ db.run('PRAGMA synchronous = NORMAL');
23
+ db.run(`
24
+ CREATE TABLE IF NOT EXISTS aigateway_model_cache (
25
+ profile TEXT NOT NULL,
26
+ cache_key TEXT NOT NULL,
27
+ models_json TEXT NOT NULL,
28
+ cached_at INTEGER NOT NULL,
29
+ PRIMARY KEY (profile, cache_key)
30
+ )
31
+ `);
32
+ db.run(`
33
+ CREATE INDEX IF NOT EXISTS idx_aigateway_model_cache_cached_at
34
+ ON aigateway_model_cache(cached_at)
35
+ `);
36
+
37
+ return db;
38
+ }
39
+
40
+ export async function getCachedAIGatewayModels(
41
+ profile: string,
42
+ cacheKey: string
43
+ ): Promise<AIGatewayModels | null> {
44
+ try {
45
+ const database = await getDatabase();
46
+ const cutoff = Date.now() - TTL_MS;
47
+ const row = database
48
+ .query<{ models_json: string; cached_at: number }, [string, string]>(
49
+ 'SELECT models_json, cached_at FROM aigateway_model_cache WHERE profile = ? AND cache_key = ?'
50
+ )
51
+ .get(profile, cacheKey);
52
+ if (!row) {
53
+ return null;
54
+ }
55
+ if (row.cached_at < cutoff) {
56
+ database.run('DELETE FROM aigateway_model_cache WHERE profile = ? AND cache_key = ?', [
57
+ profile,
58
+ cacheKey,
59
+ ]);
60
+ return null;
61
+ }
62
+ const parsed = AIGatewayModelsSchema.safeParse(JSON.parse(row.models_json));
63
+ return parsed.success ? parsed.data : null;
64
+ } catch {
65
+ return null;
66
+ }
67
+ }
68
+
69
+ export async function setCachedAIGatewayModels(
70
+ profile: string,
71
+ cacheKey: string,
72
+ models: AIGatewayModels
73
+ ): Promise<void> {
74
+ try {
75
+ const database = await getDatabase();
76
+ const cutoff = Date.now() - TTL_MS;
77
+ database.run('DELETE FROM aigateway_model_cache WHERE cached_at < ?', [cutoff]);
78
+ database.run(
79
+ `INSERT INTO aigateway_model_cache (profile, cache_key, models_json, cached_at)
80
+ VALUES (?, ?, ?, ?)
81
+ ON CONFLICT(profile, cache_key) DO UPDATE SET
82
+ models_json = excluded.models_json,
83
+ cached_at = excluded.cached_at`,
84
+ [profile, cacheKey, JSON.stringify(models), Date.now()]
85
+ );
86
+ } catch {
87
+ // Non-critical cache failure should never block the CLI.
88
+ }
89
+ }