millas 0.2.12-beta-2 → 0.2.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/package.json +3 -2
  2. package/src/admin/Admin.js +122 -38
  3. package/src/admin/ViewContext.js +12 -3
  4. package/src/admin/resources/AdminResource.js +10 -0
  5. package/src/admin/static/admin.css +95 -14
  6. package/src/admin/views/layouts/base.njk +23 -34
  7. package/src/admin/views/pages/detail.njk +16 -5
  8. package/src/admin/views/pages/error.njk +65 -0
  9. package/src/admin/views/pages/list.njk +127 -2
  10. package/src/admin/views/partials/form-scripts.njk +7 -3
  11. package/src/admin/views/partials/form-widget.njk +2 -1
  12. package/src/admin/views/partials/icons.njk +64 -0
  13. package/src/ai/AIManager.js +954 -0
  14. package/src/ai/AITokenBudget.js +250 -0
  15. package/src/ai/PromptGuard.js +216 -0
  16. package/src/ai/agents.js +218 -0
  17. package/src/ai/conversation.js +213 -0
  18. package/src/ai/drivers.js +734 -0
  19. package/src/ai/files.js +249 -0
  20. package/src/ai/media.js +303 -0
  21. package/src/ai/pricing.js +152 -0
  22. package/src/ai/provider_tools.js +114 -0
  23. package/src/ai/types.js +356 -0
  24. package/src/commands/createsuperuser.js +17 -4
  25. package/src/commands/serve.js +2 -4
  26. package/src/container/AppInitializer.js +39 -15
  27. package/src/container/Application.js +31 -1
  28. package/src/core/foundation.js +1 -1
  29. package/src/errors/HttpError.js +32 -16
  30. package/src/facades/AI.js +411 -0
  31. package/src/facades/Hash.js +67 -0
  32. package/src/facades/Process.js +144 -0
  33. package/src/hashing/Hash.js +262 -0
  34. package/src/http/HtmlEscape.js +162 -0
  35. package/src/http/MillasRequest.js +63 -7
  36. package/src/http/MillasResponse.js +70 -4
  37. package/src/http/ResponseDispatcher.js +21 -27
  38. package/src/http/SafeFilePath.js +195 -0
  39. package/src/http/SafeRedirect.js +62 -0
  40. package/src/http/SecurityBootstrap.js +70 -0
  41. package/src/http/helpers.js +40 -125
  42. package/src/http/index.js +10 -1
  43. package/src/http/middleware/CsrfMiddleware.js +258 -0
  44. package/src/http/middleware/RateLimiter.js +314 -0
  45. package/src/http/middleware/SecurityHeaders.js +281 -0
  46. package/src/i18n/Translator.js +10 -2
  47. package/src/logger/LogRedactor.js +247 -0
  48. package/src/logger/Logger.js +1 -1
  49. package/src/logger/formatters/JsonFormatter.js +11 -4
  50. package/src/logger/formatters/PrettyFormatter.js +3 -1
  51. package/src/logger/formatters/SimpleFormatter.js +14 -3
  52. package/src/middleware/ThrottleMiddleware.js +27 -4
  53. package/src/process/Process.js +333 -0
  54. package/src/router/MiddlewareRegistry.js +27 -2
  55. package/src/scaffold/templates.js +3 -0
  56. package/src/validation/Validator.js +348 -607
  57. package/src/admin.zip +0 -0
@@ -0,0 +1,734 @@
1
+ 'use strict';
2
+
3
+ const {
4
+ AIResponse, AIMessage, AIStreamEvent,
5
+ AIError, AIRateLimitError, AIProviderError,
6
+ } = require('./types');
7
+
8
+ // ─────────────────────────────────────────────────────────────────────────────
9
+ // Base driver
10
+ // ─────────────────────────────────────────────────────────────────────────────
11
+
12
+ class BaseDriver {
13
+ constructor(config) { this.config = config; }
14
+
15
+ _headers(extra = {}) {
16
+ return { 'Content-Type': 'application/json', ...extra };
17
+ }
18
+
19
+ async _post(url, body, headers = {}) {
20
+ const res = await fetch(url, {
21
+ method: 'POST',
22
+ headers: { ...this._headers(), ...headers },
23
+ body: JSON.stringify(body),
24
+ });
25
+ if (res.status === 429) {
26
+ throw new AIRateLimitError(this.name, res.headers.get('retry-after') ? Number(res.headers.get('retry-after')) : null);
27
+ }
28
+ if (!res.ok) {
29
+ const text = await res.text().catch(() => '');
30
+ throw new AIProviderError(this.name, `HTTP ${res.status}: ${text}`, res.status);
31
+ }
32
+ return res.json();
33
+ }
34
+
35
+ async _postRaw(url, body, headers = {}) {
36
+ const res = await fetch(url, {
37
+ method: 'POST',
38
+ headers: { ...this._headers(), ...headers },
39
+ body: JSON.stringify(body),
40
+ });
41
+ if (!res.ok) {
42
+ const text = await res.text().catch(() => '');
43
+ throw new AIProviderError(this.name, `HTTP ${res.status}: ${text}`, res.status);
44
+ }
45
+ return res;
46
+ }
47
+
48
+ async _postForm(url, formData, headers = {}) {
49
+ const res = await fetch(url, {
50
+ method: 'POST',
51
+ headers,
52
+ body: formData,
53
+ });
54
+ if (!res.ok) {
55
+ const text = await res.text().catch(() => '');
56
+ throw new AIProviderError(this.name, `HTTP ${res.status}: ${text}`, res.status);
57
+ }
58
+ return res.json();
59
+ }
60
+
61
+ async _postForm(url, form, headers = {}) {
62
+ const res = await fetch(url, { method: 'POST', headers, body: form });
63
+ if (res.status === 429) { const ra = res.headers.get('retry-after'); throw new AIRateLimitError(this.name, ra ? Number(ra) : null); }
64
+ if (!res.ok) { const text = await res.text().catch(() => ''); throw new AIProviderError(this.name, `HTTP ${res.status}: ${text}`, res.status); }
65
+ return res.json();
66
+ }
67
+
68
+ async _postRaw(url, body, headers = {}) {
69
+ const res = await fetch(url, { method: 'POST', headers: { ...this._headers(), ...headers }, body: JSON.stringify(body) });
70
+ if (!res.ok) { const text = await res.text().catch(() => ''); throw new AIProviderError(this.name, `HTTP ${res.status}: ${text}`, res.status); }
71
+ return res;
72
+ }
73
+
74
+ async _delete(url, headers = {}) {
75
+ const res = await fetch(url, { method: 'DELETE', headers: { ...this._headers(), ...headers } });
76
+ if (!res.ok && res.status !== 404) { const text = await res.text().catch(() => ''); throw new AIProviderError(this.name, `HTTP ${res.status}: ${text}`, res.status); }
77
+ return res.status !== 404 ? res.json().catch(() => ({})) : {};
78
+ }
79
+
80
+ async _get(url, headers = {}) {
81
+ const res = await fetch(url, { method: 'GET', headers: { ...this._headers(), ...headers } });
82
+ if (!res.ok) { const text = await res.text().catch(() => ''); throw new AIProviderError(this.name, `HTTP ${res.status}: ${text}`, res.status); }
83
+ return res.json();
84
+ }
85
+
86
+ async *_stream(url, body, headers = {}) {
87
+ const res = await fetch(url, {
88
+ method: 'POST',
89
+ headers: { ...this._headers(), ...headers },
90
+ body: JSON.stringify(body),
91
+ });
92
+ if (!res.ok) {
93
+ const text = await res.text().catch(() => '');
94
+ throw new AIProviderError(this.name, `HTTP ${res.status}: ${text}`, res.status);
95
+ }
96
+ const reader = res.body.getReader();
97
+ const dec = new TextDecoder();
98
+ let buf = '';
99
+ while (true) {
100
+ const { done, value } = await reader.read();
101
+ if (done) break;
102
+ buf += dec.decode(value, { stream: true });
103
+ const lines = buf.split('\n');
104
+ buf = lines.pop();
105
+ for (const line of lines) yield line;
106
+ }
107
+ if (buf) yield buf;
108
+ }
109
+
110
+ async complete(_req) { throw new Error(`${this.constructor.name} must implement complete()`); }
111
+ async *stream(_req) { throw new Error(`${this.constructor.name} must implement stream()`); }
112
+ async embed(_texts) { throw new Error(`${this.constructor.name} does not support embeddings`); }
113
+ async image(_req) { throw new Error(`${this.constructor.name} does not support image generation`); }
114
+ async tts(_req) { throw new Error(`${this.constructor.name} does not support text-to-speech`); }
115
+ async transcribe(_req) { throw new Error(`${this.constructor.name} does not support transcription`); }
116
+ async rerank(_req) { throw new Error(`${this.constructor.name} does not support reranking`); }
117
+ }
118
+
119
+ // ─────────────────────────────────────────────────────────────────────────────
120
+ // Anthropic
121
+ // ─────────────────────────────────────────────────────────────────────────────
122
+
123
+ class AnthropicDriver extends BaseDriver {
124
+ get name() { return 'anthropic'; }
125
+
126
+ _apiKey() {
127
+ const key = this.config.apiKey || process.env.ANTHROPIC_API_KEY;
128
+ if (!key) throw new AIError('Anthropic API key not set. Use ANTHROPIC_API_KEY.', 'anthropic');
129
+ return key;
130
+ }
131
+
132
+ _baseUrl() { return this.config.url || 'https://api.anthropic.com'; }
133
+
134
+ _authHeaders() {
135
+ return {
136
+ 'x-api-key': this._apiKey(),
137
+ 'anthropic-version': '2023-06-01',
138
+ 'anthropic-beta': 'interleaved-thinking-2025-05-14',
139
+ };
140
+ }
141
+
142
+ _buildBody(request) {
143
+ const body = {
144
+ model: request.model || this.config.model || 'claude-sonnet-4-20250514',
145
+ max_tokens: request.maxTokens || this.config.maxTokens || 4096,
146
+ messages: request.messages.filter(m => m.role !== 'system'),
147
+ };
148
+ const system = request.messages.find(m => m.role === 'system');
149
+ if (system) body.system = system.content;
150
+ if (request.temperature !== undefined) body.temperature = request.temperature;
151
+ if (request.topP !== undefined) body.top_p = request.topP;
152
+ if (request.stopSequences?.length) body.stop_sequences = request.stopSequences;
153
+ if (request.tools?.length) {
154
+ body.tools = request.tools.map(t => t.toProviderSchema());
155
+ if (request.toolChoice) body.tool_choice = request.toolChoice === 'auto' ? { type: 'auto' } : { type: 'tool', name: request.toolChoice };
156
+ }
157
+ if (request.thinking) {
158
+ body.thinking = { type: 'enabled', budget_tokens: request.thinkingBudget || 8000 };
159
+ }
160
+ if (request.providerOptions?.anthropic) Object.assign(body, request.providerOptions.anthropic);
161
+ return body;
162
+ }
163
+
164
+ async complete(request) {
165
+ const body = this._buildBody(request);
166
+ const data = await this._post(`${this._baseUrl()}/v1/messages`, body, this._authHeaders());
167
+ const textBlock = data.content?.find(b => b.type === 'text');
168
+ const thinkingBlock = data.content?.find(b => b.type === 'thinking');
169
+ const toolBlocks = data.content?.filter(b => b.type === 'tool_use') || [];
170
+ return new AIResponse({
171
+ text: textBlock?.text || '', thinking: thinkingBlock?.thinking || null,
172
+ model: data.model, provider: 'anthropic',
173
+ inputTokens: data.usage?.input_tokens || 0, outputTokens: data.usage?.output_tokens || 0,
174
+ toolCalls: toolBlocks.map(b => ({ id: b.id, name: b.name, arguments: b.input })),
175
+ finishReason: data.stop_reason === 'end_turn' ? 'stop' : data.stop_reason, raw: data,
176
+ });
177
+ }
178
+
179
+ async *stream(request) {
180
+ const body = { ...this._buildBody(request), stream: true };
181
+ let text = '', thinking = '', inputTokens = 0, outputTokens = 0;
182
+ let model = body.model, toolCalls = [], currentTool = null, currentToolInput = '';
183
+ for await (const line of this._stream(`${this._baseUrl()}/v1/messages`, body, this._authHeaders())) {
184
+ if (!line.startsWith('data: ')) continue;
185
+ const raw = line.slice(6).trim();
186
+ if (raw === '[DONE]') break;
187
+ let evt; try { evt = JSON.parse(raw); } catch { continue; }
188
+ if (evt.type === 'content_block_start' && evt.content_block?.type === 'tool_use') { currentTool = { id: evt.content_block.id, name: evt.content_block.name }; currentToolInput = ''; }
189
+ if (evt.type === 'content_block_delta') {
190
+ if (evt.delta?.type === 'text_delta') { text += evt.delta.text; yield AIStreamEvent.delta(evt.delta.text); }
191
+ if (evt.delta?.type === 'thinking_delta') { thinking += evt.delta.thinking; yield AIStreamEvent.thinking(evt.delta.thinking); }
192
+ if (evt.delta?.type === 'input_json_delta') { currentToolInput += evt.delta.partial_json; }
193
+ }
194
+ if (evt.type === 'content_block_stop' && currentTool) {
195
+ let args = {}; try { args = JSON.parse(currentToolInput); } catch {}
196
+ const tc = { ...currentTool, arguments: args }; toolCalls.push(tc); yield AIStreamEvent.toolCall(tc); currentTool = null; currentToolInput = '';
197
+ }
198
+ if (evt.type === 'message_delta') outputTokens = evt.usage?.output_tokens || outputTokens;
199
+ if (evt.type === 'message_start') { inputTokens = evt.message?.usage?.input_tokens || 0; model = evt.message?.model || model; }
200
+ }
201
+ yield AIStreamEvent.complete(new AIResponse({ text, thinking, model, provider: 'anthropic', inputTokens, outputTokens, toolCalls, finishReason: 'stop' }));
202
+ }
203
+
204
+ async embed() { throw new AIProviderError('anthropic', 'Anthropic does not provide an embeddings API.'); }
205
+ }
206
+
207
+ // ─────────────────────────────────────────────────────────────────────────────
208
+ // OpenAI
209
+ // ─────────────────────────────────────────────────────────────────────────────
210
+
211
+ class OpenAIDriver extends BaseDriver {
212
+ get name() { return 'openai'; }
213
+
214
+ _apiKey() {
215
+ const key = this.config.apiKey || process.env.OPENAI_API_KEY;
216
+ if (!key) throw new AIError('OpenAI API key not set. Use OPENAI_API_KEY.', 'openai');
217
+ return key;
218
+ }
219
+
220
+ _authHeaders() {
221
+ const h = { Authorization: `Bearer ${this._apiKey()}` };
222
+ if (this.config.organization) h['OpenAI-Organization'] = this.config.organization;
223
+ return h;
224
+ }
225
+
226
+ _baseUrl() { return this.config.url || this.config.baseUrl || 'https://api.openai.com/v1'; }
227
+
228
+ _buildBody(request) {
229
+ const body = {
230
+ model: request.model || this.config.model || 'gpt-4o',
231
+ messages: request.messages,
232
+ };
233
+ if (request.maxTokens) body.max_tokens = request.maxTokens;
234
+ if (request.temperature !== undefined) body.temperature = request.temperature;
235
+ if (request.topP !== undefined) body.top_p = request.topP;
236
+ if (request.stopSequences?.length) body.stop = request.stopSequences;
237
+ if (request.schema) {
238
+ body.response_format = { type: 'json_schema', json_schema: { name: 'response', strict: true, schema: request.schema.toJSONSchema() } };
239
+ }
240
+ if (request.tools?.length) {
241
+ body.tools = request.tools.map(t => t.toOpenAISchema());
242
+ if (request.toolChoice) body.tool_choice = request.toolChoice;
243
+ }
244
+ if (request.providerOptions?.openai) Object.assign(body, request.providerOptions.openai);
245
+ return body;
246
+ }
247
+
248
+ async complete(request) {
249
+ const body = this._buildBody(request);
250
+ const data = await this._post(`${this._baseUrl()}/chat/completions`, body, this._authHeaders());
251
+ const choice = data.choices?.[0]; const message = choice?.message; const tcs = message?.tool_calls || [];
252
+ return new AIResponse({
253
+ text: message?.content || '', model: data.model, provider: 'openai',
254
+ inputTokens: data.usage?.prompt_tokens || 0, outputTokens: data.usage?.completion_tokens || 0,
255
+ toolCalls: tcs.map(tc => ({ id: tc.id, name: tc.function.name, arguments: (() => { try { return JSON.parse(tc.function.arguments); } catch { return {}; } })() })),
256
+ finishReason: choice?.finish_reason === 'stop' ? 'stop' : choice?.finish_reason === 'length' ? 'length' : choice?.finish_reason, raw: data,
257
+ });
258
+ }
259
+
260
+ async *stream(request) {
261
+ const body = { ...this._buildBody(request), stream: true, stream_options: { include_usage: true } };
262
+ let text = '', inputTokens = 0, outputTokens = 0, model = body.model, toolCalls = [], tcAccum = {};
263
+ for await (const line of this._stream(`${this._baseUrl()}/chat/completions`, body, this._authHeaders())) {
264
+ if (!line.startsWith('data: ')) continue;
265
+ const raw = line.slice(6).trim(); if (raw === '[DONE]') break;
266
+ let evt; try { evt = JSON.parse(raw); } catch { continue; }
267
+ model = evt.model || model;
268
+ if (evt.usage) { inputTokens = evt.usage.prompt_tokens || inputTokens; outputTokens = evt.usage.completion_tokens || outputTokens; }
269
+ const delta = evt.choices?.[0]?.delta; if (!delta) continue;
270
+ if (delta.content) { text += delta.content; yield AIStreamEvent.delta(delta.content); }
271
+ if (delta.tool_calls) {
272
+ for (const tc of delta.tool_calls) {
273
+ const idx = tc.index; if (!tcAccum[idx]) tcAccum[idx] = { id: '', name: '', args: '' };
274
+ if (tc.id) tcAccum[idx].id = tc.id; if (tc.function?.name) tcAccum[idx].name = tc.function.name; if (tc.function?.arguments) tcAccum[idx].args += tc.function.arguments;
275
+ }
276
+ }
277
+ if (evt.choices?.[0]?.finish_reason === 'tool_calls') {
278
+ toolCalls = Object.values(tcAccum).map(t => { let args = {}; try { args = JSON.parse(t.args); } catch {} const tc = { id: t.id, name: t.name, arguments: args }; return tc; });
279
+ for (const tc of toolCalls) yield AIStreamEvent.toolCall(tc);
280
+ }
281
+ }
282
+ yield AIStreamEvent.complete(new AIResponse({ text, model, provider: 'openai', inputTokens, outputTokens, toolCalls, finishReason: 'stop' }));
283
+ }
284
+
285
+ async embed(texts, model = null) {
286
+ const body = { model: model || this.config.embeddingModel || 'text-embedding-3-small', input: Array.isArray(texts) ? texts : [texts] };
287
+ const data = await this._post(`${this._baseUrl()}/embeddings`, body, this._authHeaders());
288
+ return data.data.map(d => d.embedding);
289
+ }
290
+
291
+ async image(request) {
292
+ const model = request.model || this.config.imageModel || 'dall-e-3';
293
+ // gpt-image-1 uses 'url' format; dall-e-3 supports b64_json
294
+ const useUrl = model.startsWith('gpt-image');
295
+ const body = {
296
+ model,
297
+ prompt: request.prompt,
298
+ n: request.n || 1,
299
+ size: request.size || '1024x1024',
300
+ quality: request.quality || 'standard',
301
+ ...(useUrl ? {} : { response_format: 'b64_json' }),
302
+ };
303
+ const data = await this._post(`${this._baseUrl()}/images/generations`, body, this._authHeaders());
304
+ if (useUrl || data.data[0]?.url) {
305
+ return { images: [], urls: data.data.map(d => d.url), provider: 'openai', model };
306
+ }
307
+ return { images: data.data.map(d => Buffer.from(d.b64_json, 'base64')), urls: [], provider: 'openai', model };
308
+ }
309
+
310
+ async tts(request) {
311
+ const body = {
312
+ model: request.model || 'tts-1',
313
+ input: request.text,
314
+ voice: request.voice || 'alloy',
315
+ ...(request.instructions ? { instructions: request.instructions } : {}),
316
+ response_format: 'mp3',
317
+ };
318
+ const res = await this._postRaw(`${this._baseUrl()}/audio/speech`, body, this._authHeaders());
319
+ const buf = Buffer.from(await res.arrayBuffer());
320
+ return { audio: buf, format: 'mp3', provider: 'openai' };
321
+ }
322
+
323
+ async transcribe(request) {
324
+ const form = new FormData();
325
+ form.append('model', request.model || 'whisper-1');
326
+ form.append('file', new Blob([request.audio], { type: request.mimeType || 'audio/mpeg' }), request.filename || 'audio.mp3');
327
+ if (request.language) form.append('language', request.language);
328
+ if (request.prompt) form.append('prompt', request.prompt);
329
+ const data = await this._postForm(`${this._baseUrl()}/audio/transcriptions`, form, this._authHeaders());
330
+ return { text: data.text, provider: 'openai' };
331
+ }
332
+
333
+ // ── File storage ────────────────────────────────────────────────────────────
334
+
335
+ async uploadFile({ buf, filename, mimeType, purpose = 'assistants' }) {
336
+ const form = new FormData();
337
+ form.append('purpose', purpose);
338
+ form.append('file', new Blob([buf], { type: mimeType }), filename);
339
+ const data = await this._postForm(`${this._baseUrl()}/files`, form, this._authHeaders());
340
+ return { id: data.id, filename: data.filename, size: data.bytes, createdAt: new Date(data.created_at * 1000) };
341
+ }
342
+
343
+ async getFile(fileId) {
344
+ const data = await this._get(`${this._baseUrl()}/files/${fileId}`, this._authHeaders());
345
+ return { id: data.id, filename: data.filename, size: data.bytes, createdAt: new Date(data.created_at * 1000) };
346
+ }
347
+
348
+ async deleteFile(fileId) {
349
+ return this._delete(`${this._baseUrl()}/files/${fileId}`, this._authHeaders());
350
+ }
351
+
352
+ // ── Vector stores ────────────────────────────────────────────────────────────
353
+
354
+ async createStore({ name, description = null, expiresIn = null }) {
355
+ const body = { name };
356
+ if (description) body.metadata = { description };
357
+ if (expiresIn) body.expires_after = { anchor: 'last_active_at', days: Math.ceil(expiresIn / 86400) };
358
+ const data = await this._post(`${this._baseUrl()}/vector_stores`, body, this._authHeaders());
359
+ return { id: data.id, name: data.name, fileCounts: data.file_counts, ready: data.status === 'completed' };
360
+ }
361
+
362
+ async getStore(id) {
363
+ const data = await this._get(`${this._baseUrl()}/vector_stores/${id}`, this._authHeaders());
364
+ return { id: data.id, name: data.name, fileCounts: data.file_counts, ready: data.status === 'completed' };
365
+ }
366
+
367
+ async deleteStore(id) {
368
+ return this._delete(`${this._baseUrl()}/vector_stores/${id}`, this._authHeaders());
369
+ }
370
+
371
+ async addFileToStore(storeId, fileId, metadata = {}) {
372
+ const body = { file_id: fileId };
373
+ if (Object.keys(metadata).length) body.attributes = metadata;
374
+ const data = await this._post(`${this._baseUrl()}/vector_stores/${storeId}/files`, body, this._authHeaders());
375
+ return { id: data.id, fileId: data.file_id, status: data.status };
376
+ }
377
+
378
+ async removeFileFromStore(storeId, fileId) {
379
+ return this._delete(`${this._baseUrl()}/vector_stores/${storeId}/files/${fileId}`, this._authHeaders());
380
+ }
381
+ }
382
+
383
+ // ─────────────────────────────────────────────────────────────────────────────
384
+ // Gemini
385
+ // ─────────────────────────────────────────────────────────────────────────────
386
+
387
+ class GeminiDriver extends BaseDriver {
388
+ get name() { return 'gemini'; }
389
+
390
+ _apiKey() {
391
+ const key = this.config.apiKey || process.env.GEMINI_API_KEY;
392
+ if (!key) throw new AIError('Gemini API key not set. Use GEMINI_API_KEY.', 'gemini');
393
+ return key;
394
+ }
395
+
396
+ _url(model, method, stream = false) {
397
+ const base = this.config.url || 'https://generativelanguage.googleapis.com/v1beta';
398
+ const action = stream ? `${method}?alt=sse&key=${this._apiKey()}` : `${method}?key=${this._apiKey()}`;
399
+ return `${base}/models/${model}:${action}`;
400
+ }
401
+
402
+ _buildBody(request) {
403
+ const contents = request.messages.filter(m => m.role !== 'system').map(m => ({
404
+ role: m.role === 'assistant' ? 'model' : 'user',
405
+ parts: [{ text: typeof m.content === 'string' ? m.content : JSON.stringify(m.content) }],
406
+ }));
407
+ const body = { contents };
408
+ const system = request.messages.find(m => m.role === 'system');
409
+ if (system) body.systemInstruction = { parts: [{ text: system.content }] };
410
+ const gc = {};
411
+ if (request.maxTokens) gc.maxOutputTokens = request.maxTokens;
412
+ if (request.temperature !== undefined) gc.temperature = request.temperature;
413
+ if (request.topP !== undefined) gc.topP = request.topP;
414
+ if (Object.keys(gc).length) body.generationConfig = gc;
415
+ if (request.tools?.length) body.tools = [{ functionDeclarations: request.tools.map(t => ({ name: t.name, description: t.description, parameters: t.schema })) }];
416
+ return body;
417
+ }
418
+
419
+ async complete(request) {
420
+ const model = request.model || this.config.model || 'gemini-2.0-flash';
421
+ const data = await this._post(this._url(model, 'generateContent'), this._buildBody(request));
422
+ const candidate = data.candidates?.[0]; const parts = candidate?.content?.parts || [];
423
+ const text = parts.filter(p => p.text).map(p => p.text).join('');
424
+ const fnCalls = parts.filter(p => p.functionCall);
425
+ return new AIResponse({
426
+ text, model, provider: 'gemini',
427
+ inputTokens: data.usageMetadata?.promptTokenCount || 0, outputTokens: data.usageMetadata?.candidatesTokenCount || 0,
428
+ toolCalls: fnCalls.map(p => ({ id: `gemini_${Date.now()}`, name: p.functionCall.name, arguments: p.functionCall.args || {} })),
429
+ finishReason: candidate?.finishReason === 'STOP' ? 'stop' : candidate?.finishReason, raw: data,
430
+ });
431
+ }
432
+
433
+ async *stream(request) {
434
+ const model = request.model || this.config.model || 'gemini-2.0-flash';
435
+ let text = '', inputTokens = 0, outputTokens = 0, toolCalls = [];
436
+ for await (const line of this._stream(this._url(model, 'streamGenerateContent', true), this._buildBody(request))) {
437
+ if (!line.startsWith('data: ')) continue;
438
+ let evt; try { evt = JSON.parse(line.slice(6)); } catch { continue; }
439
+ for (const part of evt.candidates?.[0]?.content?.parts || []) {
440
+ if (part.text) { text += part.text; yield AIStreamEvent.delta(part.text); }
441
+ if (part.functionCall) { const tc = { id: `gemini_${Date.now()}`, name: part.functionCall.name, arguments: part.functionCall.args || {} }; toolCalls.push(tc); yield AIStreamEvent.toolCall(tc); }
442
+ }
443
+ if (evt.usageMetadata) { inputTokens = evt.usageMetadata.promptTokenCount || inputTokens; outputTokens = evt.usageMetadata.candidatesTokenCount || outputTokens; }
444
+ }
445
+ yield AIStreamEvent.complete(new AIResponse({ text, model, provider: 'gemini', inputTokens, outputTokens, toolCalls, finishReason: 'stop' }));
446
+ }
447
+
448
+ async embed(texts, model = null) {
449
+ const m = model || this.config.embeddingModel || 'text-embedding-004'; const items = Array.isArray(texts) ? texts : [texts];
450
+ return Promise.all(items.map(async text => {
451
+ const data = await this._post(this._url(m, 'embedContent'), { model: m, content: { parts: [{ text }] } });
452
+ return data.embedding.values;
453
+ }));
454
+ }
455
+
456
+ async image(request) {
457
+ // Use gemini-2.0-flash-preview-image-generation via the standard Developer API
458
+ // (works with a regular Gemini API key from Google AI Studio)
459
+ const model = request.model || this.config.imageModel || 'gemini-2.5-flash-image';
460
+
461
+ const body = {
462
+ contents: [{ parts: [{ text: request.prompt }] }],
463
+ generationConfig: {
464
+ responseModalities: ['TEXT', 'IMAGE'],
465
+ ...(request.n ? { candidateCount: request.n } : {}),
466
+ },
467
+ };
468
+
469
+ const url = `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${this._apiKey()}`;
470
+ const data = await this._post(url, body);
471
+
472
+ const images = [];
473
+ const urls = [];
474
+
475
+ for (const candidate of data.candidates || []) {
476
+ for (const part of candidate.content?.parts || []) {
477
+ if (part.inlineData?.data) {
478
+ images.push(Buffer.from(part.inlineData.data, 'base64'));
479
+ }
480
+ if (part.fileData?.fileUri) {
481
+ urls.push(part.fileData.fileUri);
482
+ }
483
+ }
484
+ }
485
+
486
+ if (!images.length && !urls.length) {
487
+ const errMsg = data.candidates?.[0]?.finishReason || JSON.stringify(data).slice(0, 200);
488
+ throw new AIProviderError('gemini', `Image generation returned no images. Reason: ${errMsg}`);
489
+ }
490
+
491
+ return { images, urls, provider: 'gemini', model };
492
+ }
493
+ }
494
+
495
+ // ─────────────────────────────────────────────────────────────────────────────
496
+ // Ollama (local models)
497
+ // ─────────────────────────────────────────────────────────────────────────────
498
+
499
+ class OllamaDriver extends BaseDriver {
500
+ get name() { return 'ollama'; }
501
+ _baseUrl() { return (this.config.baseUrl || this.config.url || 'http://localhost:11434') + '/api'; }
502
+
503
+ async complete(request) {
504
+ const body = { model: request.model || this.config.model || 'llama3.2', messages: request.messages, stream: false,
505
+ options: { ...(request.temperature !== undefined ? { temperature: request.temperature } : {}), ...(request.topP !== undefined ? { top_p: request.topP } : {}), ...(request.maxTokens ? { num_predict: request.maxTokens } : {}) } };
506
+ const data = await this._post(`${this._baseUrl()}/chat`, body);
507
+ return new AIResponse({ text: data.message?.content || '', model: data.model, provider: 'ollama', inputTokens: data.prompt_eval_count || 0, outputTokens: data.eval_count || 0, finishReason: data.done ? 'stop' : 'unknown', raw: data });
508
+ }
509
+
510
+ async *stream(request) {
511
+ const body = { model: request.model || this.config.model || 'llama3.2', messages: request.messages, stream: true };
512
+ let text = '', inputTokens = 0, outputTokens = 0;
513
+ for await (const line of this._stream(`${this._baseUrl()}/chat`, body)) {
514
+ if (!line.trim()) continue; let evt; try { evt = JSON.parse(line); } catch { continue; }
515
+ if (evt.message?.content) { text += evt.message.content; yield AIStreamEvent.delta(evt.message.content); }
516
+ if (evt.done) { inputTokens = evt.prompt_eval_count || 0; outputTokens = evt.eval_count || 0; }
517
+ }
518
+ yield AIStreamEvent.complete(new AIResponse({ text, model: body.model, provider: 'ollama', inputTokens, outputTokens, finishReason: 'stop' }));
519
+ }
520
+
521
+ async embed(texts, model = null) {
522
+ const m = model || this.config.embeddingModel || 'nomic-embed-text'; const items = Array.isArray(texts) ? texts : [texts];
523
+ return Promise.all(items.map(async text => { const data = await this._post(`${this._baseUrl()}/embed`, { model: m, input: text }); return data.embeddings?.[0] || data.embedding || []; }));
524
+ }
525
+ }
526
+
527
+ // ─────────────────────────────────────────────────────────────────────────────
528
+ // Groq (OpenAI-compatible, ultra-fast inference)
529
+ // ─────────────────────────────────────────────────────────────────────────────
530
+
531
+ class GroqDriver extends OpenAIDriver {
532
+ get name() { return 'groq'; }
533
+ _apiKey() { const key = this.config.apiKey || process.env.GROQ_API_KEY; if (!key) throw new AIError('Groq API key not set. Use GROQ_API_KEY.', 'groq'); return key; }
534
+ _baseUrl() { return this.config.url || 'https://api.groq.com/openai/v1'; }
535
+ _buildBody(request) { const body = super._buildBody(request); body.model = request.model || this.config.model || 'llama-3.3-70b-versatile'; return body; }
536
+ async image() { throw new AIProviderError('groq', 'Groq does not support image generation.'); }
537
+ async tts() { throw new AIProviderError('groq', 'Groq does not support TTS.'); }
538
+ }
539
+
540
+ // ─────────────────────────────────────────────────────────────────────────────
541
+ // Mistral
542
+ // ─────────────────────────────────────────────────────────────────────────────
543
+
544
+ class MistralDriver extends OpenAIDriver {
545
+ get name() { return 'mistral'; }
546
+ _apiKey() { const key = this.config.apiKey || process.env.MISTRAL_API_KEY; if (!key) throw new AIError('Mistral API key not set. Use MISTRAL_API_KEY.', 'mistral'); return key; }
547
+ _baseUrl() { return this.config.url || 'https://api.mistral.ai/v1'; }
548
+ _buildBody(request) { const body = super._buildBody(request); body.model = request.model || this.config.model || 'mistral-large-latest'; return body; }
549
+
550
+ async embed(texts, model = null) {
551
+ const body = { model: model || this.config.embeddingModel || 'mistral-embed', input: Array.isArray(texts) ? texts : [texts], encoding_format: 'float' };
552
+ const data = await this._post(`${this._baseUrl()}/embeddings`, body, this._authHeaders());
553
+ return data.data.map(d => d.embedding);
554
+ }
555
+
556
+ async transcribe(request) {
557
+ const form = new FormData();
558
+ form.append('model', request.model || 'mistral-stt');
559
+ form.append('file', new Blob([request.audio], { type: request.mimeType || 'audio/mpeg' }), request.filename || 'audio.mp3');
560
+ const data = await this._postForm(`${this._baseUrl()}/audio/transcriptions`, form, this._authHeaders());
561
+ return { text: data.text, provider: 'mistral' };
562
+ }
563
+
564
+ async image() { throw new AIProviderError('mistral', 'Mistral does not support image generation.'); }
565
+ async tts() { throw new AIProviderError('mistral', 'Mistral does not support TTS.'); }
566
+ }
567
+
568
+ // ─────────────────────────────────────────────────────────────────────────────
569
+ // xAI (Grok)
570
+ // ─────────────────────────────────────────────────────────────────────────────
571
+
572
+ class XAIDriver extends OpenAIDriver {
573
+ get name() { return 'xai'; }
574
+ _apiKey() { const key = this.config.apiKey || process.env.XAI_API_KEY; if (!key) throw new AIError('xAI API key not set. Use XAI_API_KEY.', 'xai'); return key; }
575
+ _baseUrl() { return this.config.url || 'https://api.x.ai/v1'; }
576
+ _buildBody(request) { const body = super._buildBody(request); body.model = request.model || this.config.model || 'grok-3'; return body; }
577
+
578
+ async image(request) {
579
+ const body = { model: request.model || this.config.imageModel || 'aurora', prompt: request.prompt, n: request.n || 1, response_format: 'b64_json' };
580
+ const data = await this._post(`${this._baseUrl()}/images/generations`, body, this._authHeaders());
581
+ return { images: data.data.map(d => Buffer.from(d.b64_json, 'base64')), provider: 'xai', model: body.model };
582
+ }
583
+ }
584
+
585
+ // ─────────────────────────────────────────────────────────────────────────────
586
+ // DeepSeek
587
+ // ─────────────────────────────────────────────────────────────────────────────
588
+
589
+ class DeepSeekDriver extends OpenAIDriver {
590
+ get name() { return 'deepseek'; }
591
+ _apiKey() { const key = this.config.apiKey || process.env.DEEPSEEK_API_KEY; if (!key) throw new AIError('DeepSeek API key not set. Use DEEPSEEK_API_KEY.', 'deepseek'); return key; }
592
+ _baseUrl() { return this.config.url || 'https://api.deepseek.com/v1'; }
593
+ _buildBody(request) { const body = super._buildBody(request); body.model = request.model || this.config.model || 'deepseek-chat'; return body; }
594
+ async image() { throw new AIProviderError('deepseek', 'DeepSeek does not support image generation.'); }
595
+ async tts() { throw new AIProviderError('deepseek', 'DeepSeek does not support TTS.'); }
596
+ async embed() { throw new AIProviderError('deepseek', 'DeepSeek does not support embeddings.'); }
597
+ }
598
+
599
+ // ─────────────────────────────────────────────────────────────────────────────
600
+ // Azure OpenAI
601
+ // ─────────────────────────────────────────────────────────────────────────────
602
+
603
+ class AzureDriver extends BaseDriver {
604
+ get name() { return 'azure'; }
605
+
606
+ _apiKey() { const key = this.config.apiKey || process.env.AZURE_OPENAI_API_KEY; if (!key) throw new AIError('Azure OpenAI API key not set. Use AZURE_OPENAI_API_KEY.', 'azure'); return key; }
607
+ _baseUrl() {
608
+ const endpoint = this.config.endpoint || process.env.AZURE_OPENAI_ENDPOINT;
609
+ const deployment = this.config.deployment || process.env.AZURE_OPENAI_DEPLOYMENT;
610
+ const version = this.config.apiVersion || '2024-02-01';
611
+ if (!endpoint || !deployment) throw new AIError('Azure requires config.endpoint and config.deployment.', 'azure');
612
+ return `${endpoint}/openai/deployments/${deployment}`;
613
+ }
614
+ _authHeaders() { return { 'api-key': this._apiKey() }; }
615
+
616
+ async complete(request) {
617
+ const body = {
618
+ messages: request.messages,
619
+ max_tokens: request.maxTokens,
620
+ temperature: request.temperature,
621
+ top_p: request.topP,
622
+ };
623
+ const version = this.config.apiVersion || '2024-02-01';
624
+ const data = await this._post(`${this._baseUrl()}/chat/completions?api-version=${version}`, body, this._authHeaders());
625
+ const choice = data.choices?.[0];
626
+ return new AIResponse({ text: choice?.message?.content || '', model: this.config.deployment, provider: 'azure', inputTokens: data.usage?.prompt_tokens || 0, outputTokens: data.usage?.completion_tokens || 0, finishReason: choice?.finish_reason, raw: data });
627
+ }
628
+
629
+ async *stream(request) {
630
+ const version = this.config.apiVersion || '2024-02-01';
631
+ const body = { messages: request.messages, stream: true, max_tokens: request.maxTokens, temperature: request.temperature };
632
+ let text = '';
633
+ for await (const line of this._stream(`${this._baseUrl()}/chat/completions?api-version=${version}`, body, this._authHeaders())) {
634
+ if (!line.startsWith('data: ')) continue; const raw = line.slice(6).trim(); if (raw === '[DONE]') break;
635
+ let evt; try { evt = JSON.parse(raw); } catch { continue; }
636
+ const content = evt.choices?.[0]?.delta?.content; if (content) { text += content; yield AIStreamEvent.delta(content); }
637
+ }
638
+ yield AIStreamEvent.complete(new AIResponse({ text, model: this.config.deployment, provider: 'azure', finishReason: 'stop' }));
639
+ }
640
+
641
+ async embed(texts, model = null) {
642
+ const version = this.config.apiVersion || '2024-02-01';
643
+ const body = { input: Array.isArray(texts) ? texts : [texts] };
644
+ const deploymentEmbed = model || this.config.embeddingDeployment || this.config.deployment;
645
+ const endpoint = this.config.endpoint || process.env.AZURE_OPENAI_ENDPOINT;
646
+ const data = await this._post(`${endpoint}/openai/deployments/${deploymentEmbed}/embeddings?api-version=${version}`, body, this._authHeaders());
647
+ return data.data.map(d => d.embedding);
648
+ }
649
+ }
650
+
651
+ // ─────────────────────────────────────────────────────────────────────────────
652
+ // Cohere (embeddings + reranking)
653
+ // ─────────────────────────────────────────────────────────────────────────────
654
+
655
+ class CohereDriver extends BaseDriver {
656
+ get name() { return 'cohere'; }
657
+ _apiKey() { const key = this.config.apiKey || process.env.COHERE_API_KEY; if (!key) throw new AIError('Cohere API key not set. Use COHERE_API_KEY.', 'cohere'); return key; }
658
+ _baseUrl() { return this.config.url || 'https://api.cohere.com/v2'; }
659
+ _authHeaders() { return { Authorization: `Bearer ${this._apiKey()}` }; }
660
+
661
+ async complete(request) {
662
+ const body = {
663
+ model: request.model || this.config.model || 'command-r-plus-08-2024',
664
+ messages: request.messages.map(m => ({ role: m.role === 'assistant' ? 'assistant' : m.role === 'system' ? 'system' : 'user', content: m.content })),
665
+ max_tokens: request.maxTokens, temperature: request.temperature,
666
+ };
667
+ const data = await this._post(`${this._baseUrl()}/chat`, body, this._authHeaders());
668
+ return new AIResponse({ text: data.message?.content?.[0]?.text || '', model: data.model, provider: 'cohere', inputTokens: data.usage?.tokens?.input_tokens || 0, outputTokens: data.usage?.tokens?.output_tokens || 0, finishReason: 'stop', raw: data });
669
+ }
670
+
671
+ async *stream(request) {
672
+ const body = { model: request.model || this.config.model || 'command-r-plus-08-2024', messages: request.messages.map(m => ({ role: m.role, content: m.content })), stream: true };
673
+ let text = '';
674
+ for await (const line of this._stream(`${this._baseUrl()}/chat`, body, this._authHeaders())) {
675
+ if (!line.startsWith('data: ')) continue; let evt; try { evt = JSON.parse(line.slice(6)); } catch { continue; }
676
+ if (evt.type === 'content-delta') { const t = evt.delta?.message?.content?.text || ''; text += t; if (t) yield AIStreamEvent.delta(t); }
677
+ }
678
+ yield AIStreamEvent.complete(new AIResponse({ text, provider: 'cohere', finishReason: 'stop' }));
679
+ }
680
+
681
+ async embed(texts, model = null) {
682
+ const body = { model: model || this.config.embeddingModel || 'embed-v4.0', texts: Array.isArray(texts) ? texts : [texts], input_type: 'search_document', embedding_types: ['float'] };
683
+ const data = await this._post(`${this._baseUrl()}/embed`, body, this._authHeaders());
684
+ return data.embeddings?.float || [];
685
+ }
686
+
687
+ async rerank(request) {
688
+ const body = { model: request.model || this.config.rerankModel || 'rerank-v3.5', query: request.query, documents: request.documents, top_n: request.limit || request.documents.length };
689
+ const data = await this._post(`${this._baseUrl()}/rerank`, body, this._authHeaders());
690
+ return data.results.map(r => ({ index: r.index, score: r.relevance_score, document: request.documents[r.index] }));
691
+ }
692
+ }
693
+
694
+ // ─────────────────────────────────────────────────────────────────────────────
695
+ // ElevenLabs (TTS + STT)
696
+ // ─────────────────────────────────────────────────────────────────────────────
697
+
698
+ class ElevenLabsDriver extends BaseDriver {
699
+ get name() { return 'elevenlabs'; }
700
+ _apiKey() { const key = this.config.apiKey || process.env.ELEVENLABS_API_KEY; if (!key) throw new AIError('ElevenLabs API key not set. Use ELEVENLABS_API_KEY.', 'elevenlabs'); return key; }
701
+ _baseUrl() { return this.config.url || 'https://api.elevenlabs.io/v1'; }
702
+ _authHeaders() { return { 'xi-api-key': this._apiKey() }; }
703
+
704
+ async tts(request) {
705
+ const voiceId = request.voice || this.config.defaultVoice || '21m00Tcm4TlvDq8ikWAM'; // Rachel
706
+ const body = {
707
+ text: request.text,
708
+ model_id: request.model || this.config.model || 'eleven_multilingual_v2',
709
+ voice_settings: { stability: 0.5, similarity_boost: 0.75 },
710
+ };
711
+ const res = await this._postRaw(`${this._baseUrl()}/text-to-speech/${voiceId}`, body, this._authHeaders());
712
+ const buf = Buffer.from(await res.arrayBuffer());
713
+ return { audio: buf, format: 'mp3', provider: 'elevenlabs' };
714
+ }
715
+
716
+ async transcribe(request) {
717
+ const form = new FormData();
718
+ form.append('model_id', request.model || 'scribe_v1');
719
+ form.append('file', new Blob([request.audio], { type: request.mimeType || 'audio/mpeg' }), request.filename || 'audio.mp3');
720
+ if (request.diarize) form.append('diarize', 'true');
721
+ const data = await this._postForm(`${this._baseUrl()}/speech-to-text`, form, this._authHeaders());
722
+ return { text: data.text, words: data.words || [], speakers: data.speakers || [], provider: 'elevenlabs' };
723
+ }
724
+
725
+ async complete() { throw new AIProviderError('elevenlabs', 'ElevenLabs is an audio provider — use tts() and transcribe() instead.'); }
726
+ async embed() { throw new AIProviderError('elevenlabs', 'ElevenLabs does not support embeddings.'); }
727
+ }
728
+
729
+ module.exports = {
730
+ BaseDriver,
731
+ AnthropicDriver, OpenAIDriver, GeminiDriver, OllamaDriver,
732
+ GroqDriver, MistralDriver, XAIDriver, DeepSeekDriver, AzureDriver,
733
+ CohereDriver, ElevenLabsDriver,
734
+ };