keystone-cli 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +136 -0
  2. package/logo.png +0 -0
  3. package/package.json +45 -0
  4. package/src/cli.ts +775 -0
  5. package/src/db/workflow-db.test.ts +99 -0
  6. package/src/db/workflow-db.ts +265 -0
  7. package/src/expression/evaluator.test.ts +247 -0
  8. package/src/expression/evaluator.ts +517 -0
  9. package/src/parser/agent-parser.test.ts +123 -0
  10. package/src/parser/agent-parser.ts +59 -0
  11. package/src/parser/config-schema.ts +54 -0
  12. package/src/parser/schema.ts +157 -0
  13. package/src/parser/workflow-parser.test.ts +212 -0
  14. package/src/parser/workflow-parser.ts +228 -0
  15. package/src/runner/llm-adapter.test.ts +329 -0
  16. package/src/runner/llm-adapter.ts +306 -0
  17. package/src/runner/llm-executor.test.ts +537 -0
  18. package/src/runner/llm-executor.ts +256 -0
  19. package/src/runner/mcp-client.test.ts +122 -0
  20. package/src/runner/mcp-client.ts +123 -0
  21. package/src/runner/mcp-manager.test.ts +143 -0
  22. package/src/runner/mcp-manager.ts +85 -0
  23. package/src/runner/mcp-server.test.ts +242 -0
  24. package/src/runner/mcp-server.ts +436 -0
  25. package/src/runner/retry.test.ts +52 -0
  26. package/src/runner/retry.ts +58 -0
  27. package/src/runner/shell-executor.test.ts +123 -0
  28. package/src/runner/shell-executor.ts +166 -0
  29. package/src/runner/step-executor.test.ts +465 -0
  30. package/src/runner/step-executor.ts +354 -0
  31. package/src/runner/timeout.test.ts +20 -0
  32. package/src/runner/timeout.ts +30 -0
  33. package/src/runner/tool-integration.test.ts +198 -0
  34. package/src/runner/workflow-runner.test.ts +358 -0
  35. package/src/runner/workflow-runner.ts +955 -0
  36. package/src/ui/dashboard.tsx +165 -0
  37. package/src/utils/auth-manager.test.ts +152 -0
  38. package/src/utils/auth-manager.ts +88 -0
  39. package/src/utils/config-loader.test.ts +52 -0
  40. package/src/utils/config-loader.ts +85 -0
  41. package/src/utils/mermaid.test.ts +51 -0
  42. package/src/utils/mermaid.ts +87 -0
  43. package/src/utils/redactor.test.ts +66 -0
  44. package/src/utils/redactor.ts +60 -0
  45. package/src/utils/workflow-registry.test.ts +108 -0
  46. package/src/utils/workflow-registry.ts +121 -0
@@ -0,0 +1,329 @@
1
+ import { afterEach, beforeEach, describe, expect, it, mock, spyOn } from 'bun:test';
2
+ import { AuthManager } from '../utils/auth-manager';
3
+ import { ConfigLoader } from '../utils/config-loader';
4
+ import { AnthropicAdapter, CopilotAdapter, OpenAIAdapter, getAdapter } from './llm-adapter';
5
+
6
+ interface MockFetch {
7
+ mock: {
8
+ calls: unknown[][];
9
+ };
10
+ }
11
+
12
+ describe('OpenAIAdapter', () => {
13
+ const originalFetch = global.fetch;
14
+
15
+ beforeEach(() => {
16
+ // @ts-ignore
17
+ global.fetch = mock();
18
+ });
19
+
20
+ afterEach(() => {
21
+ global.fetch = originalFetch;
22
+ });
23
+
24
+ it('should call the OpenAI API correctly', async () => {
25
+ const mockResponse = {
26
+ choices: [{ message: { role: 'assistant', content: 'hello' } }],
27
+ usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 },
28
+ };
29
+
30
+ // @ts-ignore
31
+ global.fetch = mock(() =>
32
+ Promise.resolve(
33
+ new Response(JSON.stringify(mockResponse), {
34
+ status: 200,
35
+ headers: { 'Content-Type': 'application/json' },
36
+ })
37
+ )
38
+ );
39
+
40
+ const adapter = new OpenAIAdapter('fake-key');
41
+ const response = await adapter.chat([{ role: 'user', content: 'hi' }]);
42
+
43
+ expect(response.message.content).toBe('hello');
44
+ expect(response.usage?.total_tokens).toBe(15);
45
+
46
+ // @ts-ignore
47
+ const fetchMock = global.fetch;
48
+ // @ts-ignore
49
+ const fetchCall = fetchMock.mock.calls[0];
50
+ expect(fetchCall[0]).toBe('https://api.openai.com/v1/chat/completions');
51
+ expect(fetchCall[1].headers.Authorization).toBe('Bearer fake-key');
52
+ });
53
+
54
+ it('should handle API errors', async () => {
55
+ // @ts-ignore
56
+ global.fetch = mock(() =>
57
+ Promise.resolve(
58
+ new Response('Error message', {
59
+ status: 400,
60
+ statusText: 'Bad Request',
61
+ })
62
+ )
63
+ );
64
+
65
+ const adapter = new OpenAIAdapter('fake-key');
66
+ await expect(adapter.chat([])).rejects.toThrow(/OpenAI API error: 400 Bad Request/);
67
+ });
68
+ });
69
+
70
+ describe('AnthropicAdapter', () => {
71
+ const originalFetch = global.fetch;
72
+
73
+ beforeEach(() => {
74
+ // @ts-ignore
75
+ global.fetch = mock();
76
+ });
77
+
78
+ afterEach(() => {
79
+ global.fetch = originalFetch;
80
+ });
81
+
82
+ it('should map messages correctly and call Anthropic API', async () => {
83
+ const mockResponse = {
84
+ content: [{ type: 'text', text: 'hello from claude' }],
85
+ usage: { input_tokens: 10, output_tokens: 5 },
86
+ };
87
+
88
+ // @ts-ignore
89
+ global.fetch.mockResolvedValue(
90
+ new Response(JSON.stringify(mockResponse), {
91
+ status: 200,
92
+ headers: { 'Content-Type': 'application/json' },
93
+ })
94
+ );
95
+
96
+ const adapter = new AnthropicAdapter('fake-anthropic-key');
97
+ const response = await adapter.chat([
98
+ { role: 'system', content: 'You are a bot' },
99
+ { role: 'user', content: 'hi' },
100
+ ]);
101
+
102
+ expect(response.message.content).toBe('hello from claude');
103
+ expect(response.usage?.total_tokens).toBe(15);
104
+
105
+ // @ts-ignore
106
+ const fetchMock = global.fetch as MockFetch;
107
+ // @ts-ignore
108
+ const [url, init] = fetchMock.mock.calls[0];
109
+
110
+ expect(url).toBe('https://api.anthropic.com/v1/messages');
111
+ expect(init.headers['x-api-key']).toBe('fake-anthropic-key');
112
+
113
+ const body = JSON.parse(init.body);
114
+ expect(body.system).toBe('You are a bot');
115
+ expect(body.messages[0].role).toBe('user');
116
+ expect(body.messages[0].content).toBe('hi');
117
+ });
118
+
119
+ it('should handle tool calls correctly', async () => {
120
+ const mockResponse = {
121
+ content: [
122
+ {
123
+ type: 'tool_use',
124
+ id: 'tool_1',
125
+ name: 'get_weather',
126
+ input: { city: 'San Francisco' },
127
+ },
128
+ ],
129
+ usage: { input_tokens: 10, output_tokens: 5 },
130
+ };
131
+
132
+ // @ts-ignore
133
+ global.fetch.mockResolvedValue(
134
+ new Response(JSON.stringify(mockResponse), {
135
+ status: 200,
136
+ headers: { 'Content-Type': 'application/json' },
137
+ })
138
+ );
139
+
140
+ const adapter = new AnthropicAdapter('fake-key');
141
+ const response = await adapter.chat([{ role: 'user', content: 'what is the weather?' }], {
142
+ tools: [
143
+ {
144
+ type: 'function',
145
+ function: {
146
+ name: 'get_weather',
147
+ parameters: { type: 'object', properties: { city: { type: 'string' } } },
148
+ },
149
+ },
150
+ ],
151
+ });
152
+
153
+ expect(response.message.tool_calls?.[0].function.name).toBe('get_weather');
154
+ // @ts-ignore
155
+ expect(JSON.parse(response.message.tool_calls?.[0].function.arguments)).toEqual({
156
+ city: 'San Francisco',
157
+ });
158
+ });
159
+
160
+ it('should map assistant tool calls correctly', async () => {
161
+ // @ts-ignore
162
+ global.fetch.mockResolvedValue(
163
+ new Response(JSON.stringify({ content: [], usage: { input_tokens: 0, output_tokens: 0 } }))
164
+ );
165
+
166
+ const adapter = new AnthropicAdapter('fake-key');
167
+ await adapter.chat([
168
+ {
169
+ role: 'assistant',
170
+ content: 'I will call a tool',
171
+ tool_calls: [
172
+ {
173
+ id: 'call_1',
174
+ type: 'function',
175
+ function: { name: 'my_tool', arguments: '{"arg": 1}' },
176
+ },
177
+ ],
178
+ },
179
+ ]);
180
+
181
+ // @ts-ignore
182
+ const init = global.fetch.mock.calls[0][1];
183
+ const body = JSON.parse(init.body);
184
+ expect(body.messages[0].role).toBe('assistant');
185
+ expect(body.messages[0].content).toHaveLength(2);
186
+ expect(body.messages[0].content[0]).toEqual({ type: 'text', text: 'I will call a tool' });
187
+ expect(body.messages[0].content[1]).toEqual({
188
+ type: 'tool_use',
189
+ id: 'call_1',
190
+ name: 'my_tool',
191
+ input: { arg: 1 },
192
+ });
193
+ });
194
+
195
+ it('should map tool results correctly', async () => {
196
+ // @ts-ignore
197
+ global.fetch.mockResolvedValue(
198
+ new Response(JSON.stringify({ content: [], usage: { input_tokens: 0, output_tokens: 0 } }))
199
+ );
200
+
201
+ const adapter = new AnthropicAdapter('fake-key');
202
+ await adapter.chat([
203
+ {
204
+ role: 'tool',
205
+ content: 'result',
206
+ tool_call_id: 'call_1',
207
+ },
208
+ ]);
209
+
210
+ // @ts-ignore
211
+ const init = global.fetch.mock.calls[0][1];
212
+ const body = JSON.parse(init.body);
213
+ expect(body.messages[0].role).toBe('user');
214
+ expect(body.messages[0].content[0]).toEqual({
215
+ type: 'tool_result',
216
+ tool_use_id: 'call_1',
217
+ content: 'result',
218
+ });
219
+ });
220
+ });
221
+
222
+ describe('CopilotAdapter', () => {
223
+ const originalFetch = global.fetch;
224
+
225
+ beforeEach(() => {
226
+ // @ts-ignore
227
+ global.fetch = mock();
228
+ });
229
+
230
+ afterEach(() => {
231
+ global.fetch = originalFetch;
232
+ });
233
+
234
+ it('should get token from AuthManager and call Copilot API', async () => {
235
+ const mockResponse = {
236
+ choices: [{ message: { role: 'assistant', content: 'hello from copilot' } }],
237
+ };
238
+
239
+ const spy = spyOn(AuthManager, 'getCopilotToken').mockResolvedValue('mock-token');
240
+
241
+ // @ts-ignore
242
+ global.fetch.mockResolvedValue(
243
+ new Response(JSON.stringify(mockResponse), {
244
+ status: 200,
245
+ headers: { 'Content-Type': 'application/json' },
246
+ })
247
+ );
248
+
249
+ const adapter = new CopilotAdapter();
250
+ const response = await adapter.chat([{ role: 'user', content: 'hi' }]);
251
+
252
+ expect(response.message.content).toBe('hello from copilot');
253
+ expect(AuthManager.getCopilotToken).toHaveBeenCalled();
254
+
255
+ // @ts-ignore
256
+ const fetchMock = global.fetch as MockFetch;
257
+ // @ts-ignore
258
+ const [url, init] = fetchMock.mock.calls[0];
259
+ expect(url).toBe('https://api.githubcopilot.com/chat/completions');
260
+ expect(init.headers.Authorization).toBe('Bearer mock-token');
261
+ spy.mockRestore();
262
+ });
263
+
264
+ it('should throw error if token not found', async () => {
265
+ const spy = spyOn(AuthManager, 'getCopilotToken').mockResolvedValue(undefined);
266
+
267
+ const adapter = new CopilotAdapter();
268
+ await expect(adapter.chat([])).rejects.toThrow(/GitHub Copilot token not found/);
269
+ spy.mockRestore();
270
+ });
271
+
272
+ it('should throw error if token not found (duplicated)', async () => {
273
+ const spy = spyOn(AuthManager, 'getCopilotToken').mockResolvedValue(undefined);
274
+
275
+ const adapter = new CopilotAdapter();
276
+ await expect(adapter.chat([])).rejects.toThrow(/GitHub Copilot token not found/);
277
+ spy.mockRestore();
278
+ });
279
+ });
280
+
281
+ describe('getAdapter', () => {
282
+ beforeEach(() => {
283
+ spyOn(ConfigLoader, 'getProviderForModel').mockImplementation((model: string) => {
284
+ if (model.startsWith('claude')) return 'anthropic';
285
+ if (model.startsWith('gpt')) return 'openai';
286
+ if (model.startsWith('copilot')) return 'copilot';
287
+ return 'openai';
288
+ });
289
+ // @ts-ignore
290
+ spyOn(ConfigLoader, 'load').mockReturnValue({
291
+ providers: {
292
+ openai: { type: 'openai', api_key_env: 'OPENAI_API_KEY' },
293
+ anthropic: { type: 'anthropic', api_key_env: 'ANTHROPIC_API_KEY' },
294
+ copilot: { type: 'copilot' },
295
+ },
296
+ });
297
+ });
298
+
299
+ afterEach(() => {
300
+ mock.restore();
301
+ });
302
+
303
+ it('should return OpenAIAdapter for gpt models', () => {
304
+ const { adapter, resolvedModel } = getAdapter('gpt-4');
305
+ expect(adapter).toBeInstanceOf(OpenAIAdapter);
306
+ expect(resolvedModel).toBe('gpt-4');
307
+ });
308
+
309
+ it('should return AnthropicAdapter for claude models', () => {
310
+ const { adapter, resolvedModel } = getAdapter('claude-3');
311
+ expect(adapter).toBeInstanceOf(AnthropicAdapter);
312
+ expect(resolvedModel).toBe('claude-3');
313
+ });
314
+
315
+ it('should return CopilotAdapter for copilot models', () => {
316
+ const { adapter, resolvedModel } = getAdapter('copilot:gpt-4');
317
+ expect(adapter).toBeInstanceOf(CopilotAdapter);
318
+ expect(resolvedModel).toBe('gpt-4');
319
+ });
320
+
321
+ it('should throw error for unknown provider', () => {
322
+ // @ts-ignore
323
+ ConfigLoader.getProviderForModel.mockReturnValue('unknown');
324
+ // @ts-ignore
325
+ ConfigLoader.load.mockReturnValue({ providers: {} });
326
+
327
+ expect(() => getAdapter('unknown-model')).toThrow(/Provider configuration not found/);
328
+ });
329
+ });
@@ -0,0 +1,306 @@
1
+ import { AuthManager, COPILOT_HEADERS } from '../utils/auth-manager';
2
+ import { ConfigLoader } from '../utils/config-loader';
3
+
4
+ export interface LLMMessage {
5
+ role: 'system' | 'user' | 'assistant' | 'tool';
6
+ content: string | null;
7
+ tool_call_id?: string;
8
+ name?: string;
9
+ tool_calls?: LLMToolCall[];
10
+ }
11
+
12
+ export interface LLMToolCall {
13
+ id: string;
14
+ type: 'function';
15
+ function: {
16
+ name: string;
17
+ arguments: string;
18
+ };
19
+ }
20
+
21
+ export interface LLMResponse {
22
+ message: LLMMessage;
23
+ usage?: {
24
+ prompt_tokens: number;
25
+ completion_tokens: number;
26
+ total_tokens: number;
27
+ };
28
+ }
29
+
30
+ export interface LLMTool {
31
+ type: 'function';
32
+ function: {
33
+ name: string;
34
+ description?: string;
35
+ parameters?: Record<string, unknown>;
36
+ };
37
+ }
38
+
39
+ export interface LLMAdapter {
40
+ chat(
41
+ messages: LLMMessage[],
42
+ options?: { model?: string; tools?: LLMTool[] }
43
+ ): Promise<LLMResponse>;
44
+ }
45
+
46
+ export class OpenAIAdapter implements LLMAdapter {
47
+ private apiKey: string;
48
+ private baseUrl: string;
49
+
50
+ constructor(apiKey?: string, baseUrl?: string) {
51
+ this.apiKey = apiKey || Bun.env.OPENAI_API_KEY || '';
52
+ this.baseUrl = baseUrl || Bun.env.OPENAI_BASE_URL || 'https://api.openai.com/v1';
53
+
54
+ if (!this.apiKey && this.baseUrl === 'https://api.openai.com/v1') {
55
+ console.warn('Warning: OPENAI_API_KEY is not set.');
56
+ }
57
+ }
58
+
59
+ async chat(
60
+ messages: LLMMessage[],
61
+ options?: { model?: string; tools?: LLMTool[] }
62
+ ): Promise<LLMResponse> {
63
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
64
+ method: 'POST',
65
+ headers: {
66
+ 'Content-Type': 'application/json',
67
+ Authorization: `Bearer ${this.apiKey}`,
68
+ },
69
+ body: JSON.stringify({
70
+ model: options?.model || 'gpt-4o',
71
+ messages,
72
+ tools: options?.tools,
73
+ }),
74
+ });
75
+
76
+ if (!response.ok) {
77
+ const error = await response.text();
78
+ throw new Error(`OpenAI API error: ${response.status} ${response.statusText} - ${error}`);
79
+ }
80
+
81
+ const data = (await response.json()) as {
82
+ choices: { message: LLMMessage }[];
83
+ usage?: { prompt_tokens: number; completion_tokens: number; total_tokens: number };
84
+ };
85
+ return {
86
+ message: data.choices[0].message,
87
+ usage: data.usage,
88
+ };
89
+ }
90
+ }
91
+
92
+ export class AnthropicAdapter implements LLMAdapter {
93
+ private apiKey: string;
94
+ private baseUrl: string;
95
+
96
+ constructor(apiKey?: string, baseUrl?: string) {
97
+ this.apiKey = apiKey || Bun.env.ANTHROPIC_API_KEY || '';
98
+ this.baseUrl = baseUrl || Bun.env.ANTHROPIC_BASE_URL || 'https://api.anthropic.com/v1';
99
+
100
+ if (!this.apiKey && this.baseUrl === 'https://api.anthropic.com/v1') {
101
+ console.warn('Warning: ANTHROPIC_API_KEY is not set.');
102
+ }
103
+ }
104
+
105
+ async chat(
106
+ messages: LLMMessage[],
107
+ options?: { model?: string; tools?: LLMTool[] }
108
+ ): Promise<LLMResponse> {
109
+ const system = messages.find((m) => m.role === 'system')?.content || undefined;
110
+
111
+ // Anthropic requires alternating user/assistant roles.
112
+ // Sequential tool results must be grouped into a single user message.
113
+ const anthropicMessages: Array<{
114
+ role: 'user' | 'assistant';
115
+ content: string | Array<Record<string, unknown>>;
116
+ }> = [];
117
+
118
+ for (const m of messages) {
119
+ if (m.role === 'system') continue;
120
+
121
+ if (m.role === 'tool') {
122
+ const lastMsg = anthropicMessages[anthropicMessages.length - 1];
123
+ const toolResult = {
124
+ type: 'tool_result' as const,
125
+ tool_use_id: m.tool_call_id,
126
+ content: m.content,
127
+ };
128
+
129
+ if (lastMsg && lastMsg.role === 'user' && Array.isArray(lastMsg.content)) {
130
+ // Append to existing tool result block if previous message was also a tool result
131
+ lastMsg.content.push(toolResult);
132
+ } else {
133
+ // Start a new user message for tool results
134
+ anthropicMessages.push({
135
+ role: 'user',
136
+ content: [toolResult],
137
+ });
138
+ }
139
+ } else if (m.tool_calls) {
140
+ anthropicMessages.push({
141
+ role: 'assistant',
142
+ content: [
143
+ ...(m.content ? [{ type: 'text' as const, text: m.content }] : []),
144
+ ...m.tool_calls.map((tc) => ({
145
+ type: 'tool_use' as const,
146
+ id: tc.id,
147
+ name: tc.function.name,
148
+ input: JSON.parse(tc.function.arguments),
149
+ })),
150
+ ],
151
+ });
152
+ } else {
153
+ anthropicMessages.push({
154
+ role: m.role as 'user' | 'assistant',
155
+ content: m.content || '',
156
+ });
157
+ }
158
+ }
159
+
160
+ const anthropicTools = options?.tools
161
+ ? options.tools.map((t) => ({
162
+ name: t.function.name,
163
+ description: t.function.description,
164
+ input_schema: t.function.parameters,
165
+ }))
166
+ : undefined;
167
+
168
+ const response = await fetch(`${this.baseUrl}/messages`, {
169
+ method: 'POST',
170
+ headers: {
171
+ 'Content-Type': 'application/json',
172
+ 'x-api-key': this.apiKey,
173
+ 'anthropic-version': '2023-06-01',
174
+ },
175
+ body: JSON.stringify({
176
+ model: options?.model || 'claude-3-5-sonnet-20240620',
177
+ system,
178
+ messages: anthropicMessages,
179
+ tools: anthropicTools,
180
+ max_tokens: 4096,
181
+ }),
182
+ });
183
+
184
+ if (!response.ok) {
185
+ const error = await response.text();
186
+ throw new Error(`Anthropic API error: ${response.status} ${response.statusText} - ${error}`);
187
+ }
188
+
189
+ const data = (await response.json()) as {
190
+ content: {
191
+ type: 'text' | 'tool_use';
192
+ text?: string;
193
+ id?: string;
194
+ name?: string;
195
+ input?: Record<string, unknown>;
196
+ }[];
197
+ usage: { input_tokens: number; output_tokens: number };
198
+ };
199
+
200
+ const content = data.content.find((c) => c.type === 'text')?.text || null;
201
+ const toolCalls = data.content
202
+ .filter((c) => c.type === 'tool_use')
203
+ .map((c) => ({
204
+ id: c.id as string,
205
+ type: 'function' as const,
206
+ function: {
207
+ name: c.name as string,
208
+ arguments: JSON.stringify(c.input),
209
+ },
210
+ }));
211
+
212
+ return {
213
+ message: {
214
+ role: 'assistant',
215
+ content,
216
+ tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
217
+ },
218
+ usage: {
219
+ prompt_tokens: data.usage.input_tokens,
220
+ completion_tokens: data.usage.output_tokens,
221
+ total_tokens: data.usage.input_tokens + data.usage.output_tokens,
222
+ },
223
+ };
224
+ }
225
+ }
226
+
227
+ export class CopilotAdapter implements LLMAdapter {
228
+ private baseUrl: string;
229
+
230
+ constructor(baseUrl?: string) {
231
+ this.baseUrl = baseUrl || 'https://api.githubcopilot.com';
232
+ }
233
+
234
+ async chat(
235
+ messages: LLMMessage[],
236
+ options?: { model?: string; tools?: LLMTool[] }
237
+ ): Promise<LLMResponse> {
238
+ const token = await AuthManager.getCopilotToken();
239
+ if (!token) {
240
+ throw new Error('GitHub Copilot token not found. Please run "keystone auth login" first.');
241
+ }
242
+
243
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
244
+ method: 'POST',
245
+ headers: {
246
+ 'Content-Type': 'application/json',
247
+ Authorization: `Bearer ${token}`,
248
+ 'vscode-editorid': 'vscode-chat',
249
+ 'vscode-machineid': 'default',
250
+ ...COPILOT_HEADERS,
251
+ },
252
+ body: JSON.stringify({
253
+ model: options?.model || 'gpt-4o',
254
+ messages,
255
+ tools: options?.tools,
256
+ }),
257
+ });
258
+
259
+ if (!response.ok) {
260
+ const error = await response.text();
261
+ throw new Error(`Copilot API error: ${response.status} ${response.statusText} - ${error}`);
262
+ }
263
+
264
+ const data = (await response.json()) as {
265
+ choices: { message: LLMMessage }[];
266
+ usage?: { prompt_tokens: number; completion_tokens: number; total_tokens: number };
267
+ };
268
+ return {
269
+ message: data.choices[0].message,
270
+ usage: data.usage,
271
+ };
272
+ }
273
+ }
274
+
275
+ export function getAdapter(model: string): { adapter: LLMAdapter; resolvedModel: string } {
276
+ const providerName = ConfigLoader.getProviderForModel(model);
277
+ const config = ConfigLoader.load();
278
+ const providerConfig = config.providers[providerName];
279
+
280
+ if (!providerConfig) {
281
+ throw new Error(`Provider configuration not found for: ${providerName}`);
282
+ }
283
+
284
+ let resolvedModel = model;
285
+ if (model.includes(':')) {
286
+ const [prefix, ...rest] = model.split(':');
287
+ if (config.providers[prefix]) {
288
+ resolvedModel = rest.join(':');
289
+ }
290
+ }
291
+
292
+ let adapter: LLMAdapter;
293
+ if (providerConfig.type === 'copilot') {
294
+ adapter = new CopilotAdapter(providerConfig.base_url);
295
+ } else {
296
+ const apiKey = providerConfig.api_key_env ? Bun.env[providerConfig.api_key_env] : undefined;
297
+
298
+ if (providerConfig.type === 'anthropic') {
299
+ adapter = new AnthropicAdapter(apiKey, providerConfig.base_url);
300
+ } else {
301
+ adapter = new OpenAIAdapter(apiKey, providerConfig.base_url);
302
+ }
303
+ }
304
+
305
+ return { adapter, resolvedModel };
306
+ }