@liquidmetal-ai/precip 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.prettierrc +9 -0
- package/CHANGELOG.md +8 -0
- package/eslint.config.mjs +28 -0
- package/package.json +53 -0
- package/src/engine/agent.ts +478 -0
- package/src/engine/llm-provider.test.ts +275 -0
- package/src/engine/llm-provider.ts +330 -0
- package/src/engine/stream-parser.ts +170 -0
- package/src/index.ts +142 -0
- package/src/mounts/mount-manager.test.ts +516 -0
- package/src/mounts/mount-manager.ts +327 -0
- package/src/mounts/mount-registry.ts +196 -0
- package/src/mounts/zod-to-string.test.ts +154 -0
- package/src/mounts/zod-to-string.ts +213 -0
- package/src/presets/agent-tools.ts +57 -0
- package/src/presets/index.ts +5 -0
- package/src/sandbox/README.md +1321 -0
- package/src/sandbox/bridges/README.md +571 -0
- package/src/sandbox/bridges/actor.test.ts +229 -0
- package/src/sandbox/bridges/actor.ts +195 -0
- package/src/sandbox/bridges/bridge-fixes.test.ts +614 -0
- package/src/sandbox/bridges/bucket.test.ts +300 -0
- package/src/sandbox/bridges/cleanup-reproduction.test.ts +225 -0
- package/src/sandbox/bridges/console-multiple.test.ts +187 -0
- package/src/sandbox/bridges/console.test.ts +157 -0
- package/src/sandbox/bridges/console.ts +122 -0
- package/src/sandbox/bridges/fetch.ts +93 -0
- package/src/sandbox/bridges/index.ts +78 -0
- package/src/sandbox/bridges/readable-stream.ts +323 -0
- package/src/sandbox/bridges/response.test.ts +154 -0
- package/src/sandbox/bridges/response.ts +123 -0
- package/src/sandbox/bridges/review-fixes.test.ts +331 -0
- package/src/sandbox/bridges/search.test.ts +475 -0
- package/src/sandbox/bridges/search.ts +264 -0
- package/src/sandbox/bridges/shared/body-methods.ts +93 -0
- package/src/sandbox/bridges/shared/cleanup.ts +112 -0
- package/src/sandbox/bridges/shared/convert.ts +76 -0
- package/src/sandbox/bridges/shared/headers.ts +181 -0
- package/src/sandbox/bridges/shared/index.ts +36 -0
- package/src/sandbox/bridges/shared/json-helpers.ts +77 -0
- package/src/sandbox/bridges/shared/path-parser.ts +109 -0
- package/src/sandbox/bridges/shared/promise-helper.ts +108 -0
- package/src/sandbox/bridges/shared/registry-setup.ts +84 -0
- package/src/sandbox/bridges/shared/response-object.ts +280 -0
- package/src/sandbox/bridges/shared/result-builder.ts +130 -0
- package/src/sandbox/bridges/shared/scope-helpers.ts +44 -0
- package/src/sandbox/bridges/shared/stream-reader.ts +90 -0
- package/src/sandbox/bridges/storage-bridge.test.ts +893 -0
- package/src/sandbox/bridges/storage.ts +421 -0
- package/src/sandbox/bridges/text-decoder.ts +190 -0
- package/src/sandbox/bridges/text-encoder.ts +102 -0
- package/src/sandbox/bridges/types.ts +39 -0
- package/src/sandbox/bridges/utils.ts +123 -0
- package/src/sandbox/index.ts +6 -0
- package/src/sandbox/quickjs-wasm.d.ts +9 -0
- package/src/sandbox/sandbox.test.ts +191 -0
- package/src/sandbox/sandbox.ts +831 -0
- package/src/sandbox/test-helper.ts +43 -0
- package/src/sandbox/test-mocks.ts +154 -0
- package/src/sandbox/user-stream.test.ts +77 -0
- package/src/skills/frontmatter.test.ts +305 -0
- package/src/skills/frontmatter.ts +200 -0
- package/src/skills/index.ts +9 -0
- package/src/skills/skills-loader.test.ts +237 -0
- package/src/skills/skills-loader.ts +200 -0
- package/src/tools/actor-storage-tools.ts +250 -0
- package/src/tools/code-tools.test.ts +199 -0
- package/src/tools/code-tools.ts +444 -0
- package/src/tools/file-tools.ts +206 -0
- package/src/tools/registry.ts +125 -0
- package/src/tools/script-tools.ts +145 -0
- package/src/tools/smartbucket-tools.ts +203 -0
- package/src/tools/sql-tools.ts +213 -0
- package/src/tools/tool-factory.ts +119 -0
- package/src/types.ts +512 -0
- package/tsconfig.eslint.json +5 -0
- package/tsconfig.json +15 -0
- package/vitest.config.ts +33 -0
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
import { describe, it, expect, vi } from 'vitest';
|
|
2
|
+
import { createLLMProvider } from './llm-provider.js';
|
|
3
|
+
|
|
4
|
+
describe('LLM Provider Retry Logic', () => {
|
|
5
|
+
describe('resolveRetryConfig validation', () => {
|
|
6
|
+
it('clamps negative maxRetries to 0', () => {
|
|
7
|
+
const logger = vi.fn();
|
|
8
|
+
const provider = createLLMProvider(
|
|
9
|
+
{
|
|
10
|
+
provider: 'openai',
|
|
11
|
+
apiKey: 'test-key',
|
|
12
|
+
model: 'gpt-4',
|
|
13
|
+
},
|
|
14
|
+
logger,
|
|
15
|
+
{ maxRetries: -5 }
|
|
16
|
+
);
|
|
17
|
+
|
|
18
|
+
expect(provider).toBeDefined();
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
it('clamps negative initialDelayMs to 0', () => {
|
|
22
|
+
const logger = vi.fn();
|
|
23
|
+
const provider = createLLMProvider(
|
|
24
|
+
{
|
|
25
|
+
provider: 'openai',
|
|
26
|
+
apiKey: 'test-key',
|
|
27
|
+
model: 'gpt-4',
|
|
28
|
+
},
|
|
29
|
+
logger,
|
|
30
|
+
{ initialDelayMs: -1000 }
|
|
31
|
+
);
|
|
32
|
+
|
|
33
|
+
expect(provider).toBeDefined();
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
it('clamps backoffMultiplier to minimum 1', () => {
|
|
37
|
+
const logger = vi.fn();
|
|
38
|
+
const provider = createLLMProvider(
|
|
39
|
+
{
|
|
40
|
+
provider: 'openai',
|
|
41
|
+
apiKey: 'test-key',
|
|
42
|
+
model: 'gpt-4',
|
|
43
|
+
},
|
|
44
|
+
logger,
|
|
45
|
+
{ backoffMultiplier: 0.5 }
|
|
46
|
+
);
|
|
47
|
+
|
|
48
|
+
expect(provider).toBeDefined();
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
it('ensures maxDelayMs is at least initialDelayMs', () => {
|
|
52
|
+
const logger = vi.fn();
|
|
53
|
+
const provider = createLLMProvider(
|
|
54
|
+
{
|
|
55
|
+
provider: 'openai',
|
|
56
|
+
apiKey: 'test-key',
|
|
57
|
+
model: 'gpt-4',
|
|
58
|
+
},
|
|
59
|
+
logger,
|
|
60
|
+
{ initialDelayMs: 5000, maxDelayMs: 1000 }
|
|
61
|
+
);
|
|
62
|
+
|
|
63
|
+
expect(provider).toBeDefined();
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
it('fills in all defaults when config is empty', () => {
|
|
67
|
+
const logger = vi.fn();
|
|
68
|
+
const provider = createLLMProvider(
|
|
69
|
+
{
|
|
70
|
+
provider: 'openai',
|
|
71
|
+
apiKey: 'test-key',
|
|
72
|
+
model: 'gpt-4',
|
|
73
|
+
},
|
|
74
|
+
logger,
|
|
75
|
+
{}
|
|
76
|
+
);
|
|
77
|
+
|
|
78
|
+
expect(provider).toBeDefined();
|
|
79
|
+
});
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
describe('retry behavior', () => {
|
|
83
|
+
it('retries on transient errors with exponential backoff', async () => {
|
|
84
|
+
const logger = {
|
|
85
|
+
warn: vi.fn(),
|
|
86
|
+
error: vi.fn(),
|
|
87
|
+
};
|
|
88
|
+
|
|
89
|
+
const provider = createLLMProvider(
|
|
90
|
+
{
|
|
91
|
+
provider: 'openai',
|
|
92
|
+
apiKey: 'test-key',
|
|
93
|
+
model: 'gpt-4',
|
|
94
|
+
},
|
|
95
|
+
logger,
|
|
96
|
+
{
|
|
97
|
+
maxRetries: 2,
|
|
98
|
+
initialDelayMs: 100,
|
|
99
|
+
backoffMultiplier: 2,
|
|
100
|
+
maxDelayMs: 500,
|
|
101
|
+
}
|
|
102
|
+
);
|
|
103
|
+
|
|
104
|
+
// Mock fetch to fail twice then succeed
|
|
105
|
+
let attemptCount = 0;
|
|
106
|
+
global.fetch = vi.fn().mockImplementation(() => {
|
|
107
|
+
attemptCount++;
|
|
108
|
+
if (attemptCount < 3) {
|
|
109
|
+
return Promise.reject(new Error('Network error'));
|
|
110
|
+
}
|
|
111
|
+
return Promise.resolve({
|
|
112
|
+
ok: true,
|
|
113
|
+
body: new ReadableStream(),
|
|
114
|
+
json: async () => ({ choices: [{ message: { content: 'success' } }] }),
|
|
115
|
+
} as Response);
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
try {
|
|
119
|
+
await provider.chat([{ role: 'user', content: 'test' }]);
|
|
120
|
+
} catch (_error) {
|
|
121
|
+
// Expected to fail since we're not properly mocking the stream
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// Verify retry attempts
|
|
125
|
+
expect(attemptCount).toBe(3);
|
|
126
|
+
expect(logger.warn).toHaveBeenCalledTimes(2);
|
|
127
|
+
});
|
|
128
|
+
|
|
129
|
+
it('respects maxRetries limit', async () => {
|
|
130
|
+
const logger = {
|
|
131
|
+
warn: vi.fn(),
|
|
132
|
+
error: vi.fn(),
|
|
133
|
+
};
|
|
134
|
+
|
|
135
|
+
const provider = createLLMProvider(
|
|
136
|
+
{
|
|
137
|
+
provider: 'openai',
|
|
138
|
+
apiKey: 'test-key',
|
|
139
|
+
model: 'gpt-4',
|
|
140
|
+
},
|
|
141
|
+
logger,
|
|
142
|
+
{
|
|
143
|
+
maxRetries: 2,
|
|
144
|
+
initialDelayMs: 10,
|
|
145
|
+
backoffMultiplier: 2,
|
|
146
|
+
}
|
|
147
|
+
);
|
|
148
|
+
|
|
149
|
+
let attemptCount = 0;
|
|
150
|
+
global.fetch = vi.fn().mockImplementation(() => {
|
|
151
|
+
attemptCount++;
|
|
152
|
+
return Promise.reject(new Error('Always fails'));
|
|
153
|
+
});
|
|
154
|
+
|
|
155
|
+
try {
|
|
156
|
+
await provider.chat([{ role: 'user', content: 'test' }]);
|
|
157
|
+
} catch (_error) {
|
|
158
|
+
// Expected
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Should attempt initial call + 2 retries = 3 total
|
|
162
|
+
expect(attemptCount).toBe(3);
|
|
163
|
+
expect(logger.warn).toHaveBeenCalledTimes(2);
|
|
164
|
+
});
|
|
165
|
+
|
|
166
|
+
it('filters errors with retryOn predicate', async () => {
|
|
167
|
+
const logger = {
|
|
168
|
+
warn: vi.fn(),
|
|
169
|
+
error: vi.fn(),
|
|
170
|
+
};
|
|
171
|
+
|
|
172
|
+
const provider = createLLMProvider(
|
|
173
|
+
{
|
|
174
|
+
provider: 'openai',
|
|
175
|
+
apiKey: 'test-key',
|
|
176
|
+
model: 'gpt-4',
|
|
177
|
+
},
|
|
178
|
+
logger,
|
|
179
|
+
{
|
|
180
|
+
maxRetries: 5,
|
|
181
|
+
initialDelayMs: 10,
|
|
182
|
+
retryOn: (error) => {
|
|
183
|
+
// Only retry on network errors, not auth errors
|
|
184
|
+
return error.message.includes('Network');
|
|
185
|
+
},
|
|
186
|
+
}
|
|
187
|
+
);
|
|
188
|
+
|
|
189
|
+
let attemptCount = 0;
|
|
190
|
+
global.fetch = vi.fn().mockImplementation(() => {
|
|
191
|
+
attemptCount++;
|
|
192
|
+
return Promise.reject(new Error('Auth error'));
|
|
193
|
+
});
|
|
194
|
+
|
|
195
|
+
try {
|
|
196
|
+
await provider.chat([{ role: 'user', content: 'test' }]);
|
|
197
|
+
} catch (_error) {
|
|
198
|
+
// Expected
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
// Should not retry because retryOn returns false
|
|
202
|
+
expect(attemptCount).toBe(1);
|
|
203
|
+
expect(logger.warn).not.toHaveBeenCalled();
|
|
204
|
+
});
|
|
205
|
+
|
|
206
|
+
it('respects maxRetries=0 (no retries)', async () => {
|
|
207
|
+
const logger = {
|
|
208
|
+
warn: vi.fn(),
|
|
209
|
+
error: vi.fn(),
|
|
210
|
+
};
|
|
211
|
+
|
|
212
|
+
const provider = createLLMProvider(
|
|
213
|
+
{
|
|
214
|
+
provider: 'openai',
|
|
215
|
+
apiKey: 'test-key',
|
|
216
|
+
model: 'gpt-4',
|
|
217
|
+
},
|
|
218
|
+
logger,
|
|
219
|
+
{
|
|
220
|
+
maxRetries: 0,
|
|
221
|
+
initialDelayMs: 10,
|
|
222
|
+
}
|
|
223
|
+
);
|
|
224
|
+
|
|
225
|
+
let attemptCount = 0;
|
|
226
|
+
global.fetch = vi.fn().mockImplementation(() => {
|
|
227
|
+
attemptCount++;
|
|
228
|
+
return Promise.reject(new Error('Network error'));
|
|
229
|
+
});
|
|
230
|
+
|
|
231
|
+
try {
|
|
232
|
+
await provider.chat([{ role: 'user', content: 'test' }]);
|
|
233
|
+
} catch (_error) {
|
|
234
|
+
// Expected
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
// Should only attempt once (no retries)
|
|
238
|
+
expect(attemptCount).toBe(1);
|
|
239
|
+
expect(logger.warn).not.toHaveBeenCalled();
|
|
240
|
+
});
|
|
241
|
+
|
|
242
|
+
it('succeeds on first attempt when no error', async () => {
|
|
243
|
+
const logger = {
|
|
244
|
+
warn: vi.fn(),
|
|
245
|
+
error: vi.fn(),
|
|
246
|
+
};
|
|
247
|
+
|
|
248
|
+
const provider = createLLMProvider(
|
|
249
|
+
{
|
|
250
|
+
provider: 'openai',
|
|
251
|
+
apiKey: 'test-key',
|
|
252
|
+
model: 'gpt-4',
|
|
253
|
+
streaming: false,
|
|
254
|
+
},
|
|
255
|
+
logger,
|
|
256
|
+
{
|
|
257
|
+
maxRetries: 2,
|
|
258
|
+
initialDelayMs: 10,
|
|
259
|
+
}
|
|
260
|
+
);
|
|
261
|
+
|
|
262
|
+
global.fetch = vi.fn().mockResolvedValue({
|
|
263
|
+
ok: true,
|
|
264
|
+
json: async () => ({
|
|
265
|
+
choices: [{ message: { content: 'success' } }],
|
|
266
|
+
}),
|
|
267
|
+
} as Response);
|
|
268
|
+
|
|
269
|
+
const result = await provider.chat([{ role: 'user', content: 'test' }]);
|
|
270
|
+
|
|
271
|
+
expect(result.content).toBe('success');
|
|
272
|
+
expect(logger.warn).not.toHaveBeenCalled();
|
|
273
|
+
});
|
|
274
|
+
});
|
|
275
|
+
});
|
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Provider Abstraction
|
|
3
|
+
* Supports OpenAI, Anthropic, GLM, and Raindrop AI
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import type { LLMConfig, LLMMessage, LLMResponse, LLMToolCall, Logger, RetryConfig } from '../types.js';
|
|
7
|
+
import { parseOpenAIStream, parseOpenAIResponse } from './stream-parser.js';
|
|
8
|
+
|
|
9
|
+
export interface LLMProvider {
|
|
10
|
+
chat(messages: LLMMessage[], tools?: any[]): Promise<LLMResponse>;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
// ============================================================================
|
|
14
|
+
// Retry Helper
|
|
15
|
+
// ============================================================================
|
|
16
|
+
|
|
17
|
+
/** Resolved retry config with all defaults filled in */
|
|
18
|
+
interface ResolvedRetryConfig {
|
|
19
|
+
maxRetries: number;
|
|
20
|
+
initialDelayMs: number;
|
|
21
|
+
backoffMultiplier: number;
|
|
22
|
+
maxDelayMs: number;
|
|
23
|
+
retryOn: (error: Error) => boolean;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/** Fill in defaults for retry config */
|
|
27
|
+
function resolveRetryConfig(config?: RetryConfig): ResolvedRetryConfig | null {
|
|
28
|
+
if (!config) return null;
|
|
29
|
+
|
|
30
|
+
const maxRetries = Math.max(0, config.maxRetries ?? 2);
|
|
31
|
+
const initialDelayMs = Math.max(0, config.initialDelayMs ?? 1000);
|
|
32
|
+
const backoffMultiplier = Math.max(1, config.backoffMultiplier ?? 2);
|
|
33
|
+
const maxDelayMs = Math.max(initialDelayMs, config.maxDelayMs ?? 30000);
|
|
34
|
+
|
|
35
|
+
return {
|
|
36
|
+
maxRetries,
|
|
37
|
+
initialDelayMs,
|
|
38
|
+
backoffMultiplier,
|
|
39
|
+
maxDelayMs,
|
|
40
|
+
retryOn: config.retryOn ?? (() => true),
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Execute an async function with retry and exponential backoff.
|
|
46
|
+
* Only retries if `retryOn` returns true for the caught error.
|
|
47
|
+
*/
|
|
48
|
+
async function withRetry<T>(
|
|
49
|
+
fn: () => Promise<T>,
|
|
50
|
+
config: ResolvedRetryConfig,
|
|
51
|
+
logger?: Logger,
|
|
52
|
+
): Promise<T> {
|
|
53
|
+
let lastError: Error;
|
|
54
|
+
let delay = config.initialDelayMs;
|
|
55
|
+
|
|
56
|
+
for (let attempt = 0; attempt <= config.maxRetries; attempt++) {
|
|
57
|
+
try {
|
|
58
|
+
return await fn();
|
|
59
|
+
} catch (error) {
|
|
60
|
+
lastError = error instanceof Error ? error : new Error(String(error));
|
|
61
|
+
|
|
62
|
+
if (attempt >= config.maxRetries || !config.retryOn(lastError)) {
|
|
63
|
+
throw lastError;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
logger?.warn?.('LLM call failed, retrying', {
|
|
67
|
+
attempt: attempt + 1,
|
|
68
|
+
maxRetries: config.maxRetries,
|
|
69
|
+
delayMs: delay,
|
|
70
|
+
error: lastError.message,
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
await new Promise((r) => setTimeout(r, delay));
|
|
74
|
+
delay = Math.min(delay * config.backoffMultiplier, config.maxDelayMs);
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
throw lastError!;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// ============================================================================
|
|
82
|
+
// Provider Factory
|
|
83
|
+
// ============================================================================
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Create an LLM provider instance based on config
|
|
87
|
+
*/
|
|
88
|
+
export function createLLMProvider(config: LLMConfig, logger?: Logger, retry?: RetryConfig): LLMProvider {
|
|
89
|
+
const retryConfig = resolveRetryConfig(retry);
|
|
90
|
+
|
|
91
|
+
let provider: LLMProvider;
|
|
92
|
+
switch (config.provider) {
|
|
93
|
+
case 'openai':
|
|
94
|
+
provider = new OpenAIProvider(config, logger);
|
|
95
|
+
break;
|
|
96
|
+
|
|
97
|
+
case 'anthropic':
|
|
98
|
+
provider = new AnthropicProvider(config, logger);
|
|
99
|
+
break;
|
|
100
|
+
|
|
101
|
+
case 'liquidmetal':
|
|
102
|
+
provider = new LiquidmetalProvider(config, logger);
|
|
103
|
+
break;
|
|
104
|
+
|
|
105
|
+
default:
|
|
106
|
+
throw new Error(`Unsupported LLM provider: ${config.provider}`);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// Wrap with retry if configured
|
|
110
|
+
if (retryConfig) {
|
|
111
|
+
return new RetryProvider(provider, retryConfig, logger);
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
return provider;
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// ============================================================================
|
|
118
|
+
// Retry Provider Wrapper
|
|
119
|
+
// ============================================================================
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Wraps any LLM provider with retry logic.
|
|
123
|
+
* Transparent to the caller — just adds resilience.
|
|
124
|
+
*/
|
|
125
|
+
class RetryProvider implements LLMProvider {
|
|
126
|
+
constructor(
|
|
127
|
+
private inner: LLMProvider,
|
|
128
|
+
private retryConfig: ResolvedRetryConfig,
|
|
129
|
+
private logger?: Logger,
|
|
130
|
+
) {}
|
|
131
|
+
|
|
132
|
+
async chat(messages: LLMMessage[], tools?: any[]): Promise<LLMResponse> {
|
|
133
|
+
return withRetry(
|
|
134
|
+
() => this.inner.chat(messages, tools),
|
|
135
|
+
this.retryConfig,
|
|
136
|
+
this.logger,
|
|
137
|
+
);
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
/**
|
|
142
|
+
* OpenAI Provider (GPT-4, GPT-3.5, etc.)
|
|
143
|
+
*/
|
|
144
|
+
class OpenAIProvider implements LLMProvider {
|
|
145
|
+
constructor(
|
|
146
|
+
private config: LLMConfig,
|
|
147
|
+
private logger?: Logger
|
|
148
|
+
) {}
|
|
149
|
+
|
|
150
|
+
async chat(messages: LLMMessage[], tools?: any[]): Promise<LLMResponse> {
|
|
151
|
+
const baseUrl = this.config.baseUrl || 'https://api.openai.com/v1';
|
|
152
|
+
|
|
153
|
+
const payload: any = {
|
|
154
|
+
model: this.config.model,
|
|
155
|
+
messages,
|
|
156
|
+
temperature: this.config.temperature ?? 0.7,
|
|
157
|
+
max_tokens: this.config.maxTokens ?? 4096,
|
|
158
|
+
stream: this.config.streaming ?? true
|
|
159
|
+
};
|
|
160
|
+
|
|
161
|
+
if (tools && tools.length > 0) {
|
|
162
|
+
payload.tools = tools;
|
|
163
|
+
payload.tool_choice = 'auto';
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
const response = await fetch(`${baseUrl}/chat/completions`, {
|
|
167
|
+
method: 'POST',
|
|
168
|
+
headers: {
|
|
169
|
+
'Content-Type': 'application/json',
|
|
170
|
+
Authorization: `Bearer ${this.config.apiKey}`
|
|
171
|
+
},
|
|
172
|
+
body: JSON.stringify(payload)
|
|
173
|
+
});
|
|
174
|
+
|
|
175
|
+
if (!response.ok) {
|
|
176
|
+
const error = await response.text();
|
|
177
|
+
throw new Error(`OpenAI API error: ${response.status} ${error}`);
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
if (this.config.streaming) {
|
|
181
|
+
return await parseOpenAIStream(response.body!, this.logger);
|
|
182
|
+
} else {
|
|
183
|
+
const data = await response.json();
|
|
184
|
+
return parseOpenAIResponse(data);
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
/**
|
|
190
|
+
* Anthropic Provider (Claude)
|
|
191
|
+
*/
|
|
192
|
+
class AnthropicProvider implements LLMProvider {
|
|
193
|
+
constructor(
|
|
194
|
+
private config: LLMConfig,
|
|
195
|
+
private logger?: Logger
|
|
196
|
+
) {}
|
|
197
|
+
|
|
198
|
+
async chat(messages: LLMMessage[], tools?: any[]): Promise<LLMResponse> {
|
|
199
|
+
const baseUrl = this.config.baseUrl || 'https://api.anthropic.com/v1';
|
|
200
|
+
|
|
201
|
+
// Extract system message
|
|
202
|
+
const systemMessage = messages.find(m => m.role === 'system')?.content || '';
|
|
203
|
+
const conversationMessages = messages.filter(m => m.role !== 'system');
|
|
204
|
+
|
|
205
|
+
const payload: any = {
|
|
206
|
+
model: this.config.model,
|
|
207
|
+
messages: conversationMessages,
|
|
208
|
+
system: systemMessage,
|
|
209
|
+
max_tokens: this.config.maxTokens ?? 4096,
|
|
210
|
+
temperature: this.config.temperature ?? 0.7,
|
|
211
|
+
stream: this.config.streaming ?? true
|
|
212
|
+
};
|
|
213
|
+
|
|
214
|
+
if (tools && tools.length > 0) {
|
|
215
|
+
payload.tools = tools.map((t: any) => ({
|
|
216
|
+
name: t.function.name,
|
|
217
|
+
description: t.function.description,
|
|
218
|
+
input_schema: t.function.parameters
|
|
219
|
+
}));
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
const response = await fetch(`${baseUrl}/messages`, {
|
|
223
|
+
method: 'POST',
|
|
224
|
+
headers: {
|
|
225
|
+
'Content-Type': 'application/json',
|
|
226
|
+
'x-api-key': this.config.apiKey!,
|
|
227
|
+
'anthropic-version': '2023-06-01'
|
|
228
|
+
},
|
|
229
|
+
body: JSON.stringify(payload)
|
|
230
|
+
});
|
|
231
|
+
|
|
232
|
+
if (!response.ok) {
|
|
233
|
+
const error = await response.text();
|
|
234
|
+
throw new Error(`Anthropic API error: ${response.status} ${error}`);
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
const data: any = await response.json();
|
|
238
|
+
|
|
239
|
+
// Parse Anthropic response
|
|
240
|
+
const toolCalls: LLMToolCall[] = [];
|
|
241
|
+
let content = '';
|
|
242
|
+
|
|
243
|
+
for (const block of data.content || []) {
|
|
244
|
+
if (block.type === 'text') {
|
|
245
|
+
content += block.text;
|
|
246
|
+
} else if (block.type === 'tool_use') {
|
|
247
|
+
toolCalls.push({
|
|
248
|
+
id: block.id,
|
|
249
|
+
type: 'function',
|
|
250
|
+
function: {
|
|
251
|
+
name: block.name,
|
|
252
|
+
arguments: JSON.stringify(block.input)
|
|
253
|
+
}
|
|
254
|
+
});
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
return {
|
|
259
|
+
content: content || null,
|
|
260
|
+
toolCalls,
|
|
261
|
+
finishReason: data.stop_reason || null,
|
|
262
|
+
usage: data.usage
|
|
263
|
+
? {
|
|
264
|
+
promptTokens: data.usage.input_tokens,
|
|
265
|
+
completionTokens: data.usage.output_tokens,
|
|
266
|
+
totalTokens: data.usage.input_tokens + data.usage.output_tokens
|
|
267
|
+
}
|
|
268
|
+
: undefined
|
|
269
|
+
};
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
/**
|
|
274
|
+
* LiquidMetal AI Provider (for Raindrop AI binding)
|
|
275
|
+
*/
|
|
276
|
+
class LiquidmetalProvider implements LLMProvider {
|
|
277
|
+
private aiBinding: any;
|
|
278
|
+
|
|
279
|
+
constructor(
|
|
280
|
+
private config: LLMConfig,
|
|
281
|
+
private logger?: Logger
|
|
282
|
+
) {
|
|
283
|
+
// AI binding should be passed via baseUrl (hacky but works)
|
|
284
|
+
this.aiBinding = (config as any).aiBinding;
|
|
285
|
+
if (!this.aiBinding) {
|
|
286
|
+
throw new Error('Liquidmetal provider requires AI binding. Pass it via config.aiBinding');
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
async chat(messages: LLMMessage[], tools?: any[]): Promise<LLMResponse> {
|
|
291
|
+
const payload: any = {
|
|
292
|
+
messages,
|
|
293
|
+
temperature: this.config.temperature ?? 0.7,
|
|
294
|
+
max_tokens: this.config.maxTokens ?? 4096,
|
|
295
|
+
stream: this.config.streaming ?? true
|
|
296
|
+
};
|
|
297
|
+
|
|
298
|
+
if (tools && tools.length > 0) {
|
|
299
|
+
payload.tools = tools;
|
|
300
|
+
payload.tool_choice = 'auto';
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
let result: any;
|
|
304
|
+
try {
|
|
305
|
+
// Use Liquidmetal AI binding
|
|
306
|
+
result = await this.aiBinding.run(this.config.model, payload);
|
|
307
|
+
} catch (error) {
|
|
308
|
+
this.logger?.error?.('AI binding error', {
|
|
309
|
+
error: (error as Error).message,
|
|
310
|
+
stack: (error as Error).stack
|
|
311
|
+
});
|
|
312
|
+
throw new Error(`AI binding failed: ${(error as Error).message}`);
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
// Check if we got an error response
|
|
316
|
+
if (result?.error) {
|
|
317
|
+
const errorMsg =
|
|
318
|
+
typeof result.error === 'string' ? result.error : JSON.stringify(result.error);
|
|
319
|
+
this.logger?.error?.('AI returned error response', { error: errorMsg });
|
|
320
|
+
throw new Error(`AI service error: ${errorMsg}`);
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
// Check if streaming
|
|
324
|
+
if (result instanceof ReadableStream) {
|
|
325
|
+
return await parseOpenAIStream(result, this.logger, true);
|
|
326
|
+
} else {
|
|
327
|
+
return parseOpenAIResponse(result);
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
}
|