outlet-orm 7.0.0 → 9.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +130 -2
  2. package/package.json +1 -1
  3. package/src/AI/AIPromptEnhancer.js +170 -0
  4. package/src/AI/AIQueryBuilder.js +234 -0
  5. package/src/AI/AIQueryOptimizer.js +185 -0
  6. package/src/AI/AISeeder.js +181 -0
  7. package/src/AI/AiBridgeManager.js +287 -0
  8. package/src/AI/Builders/TextBuilder.js +170 -0
  9. package/src/AI/Contracts/AudioProviderContract.js +29 -0
  10. package/src/AI/Contracts/ChatProviderContract.js +38 -0
  11. package/src/AI/Contracts/EmbeddingsProviderContract.js +19 -0
  12. package/src/AI/Contracts/ImageProviderContract.js +19 -0
  13. package/src/AI/Contracts/ModelsProviderContract.js +26 -0
  14. package/src/AI/Contracts/ToolContract.js +25 -0
  15. package/src/AI/Facades/AiBridge.js +79 -0
  16. package/src/AI/MCPServer.js +113 -0
  17. package/src/AI/Providers/ClaudeProvider.js +64 -0
  18. package/src/AI/Providers/CustomOpenAIProvider.js +238 -0
  19. package/src/AI/Providers/GeminiProvider.js +68 -0
  20. package/src/AI/Providers/GrokProvider.js +46 -0
  21. package/src/AI/Providers/MistralProvider.js +21 -0
  22. package/src/AI/Providers/OllamaProvider.js +249 -0
  23. package/src/AI/Providers/OllamaTurboProvider.js +32 -0
  24. package/src/AI/Providers/OnnProvider.js +46 -0
  25. package/src/AI/Providers/OpenAIProvider.js +471 -0
  26. package/src/AI/Support/AudioNormalizer.js +37 -0
  27. package/src/AI/Support/ChatNormalizer.js +42 -0
  28. package/src/AI/Support/Document.js +77 -0
  29. package/src/AI/Support/DocumentAttachmentMapper.js +101 -0
  30. package/src/AI/Support/EmbeddingsNormalizer.js +30 -0
  31. package/src/AI/Support/Exceptions/ProviderError.js +22 -0
  32. package/src/AI/Support/FileSecurity.js +56 -0
  33. package/src/AI/Support/ImageNormalizer.js +62 -0
  34. package/src/AI/Support/JsonSchemaValidator.js +73 -0
  35. package/src/AI/Support/Message.js +40 -0
  36. package/src/AI/Support/StreamChunk.js +45 -0
  37. package/src/AI/Support/ToolChatRunner.js +160 -0
  38. package/src/AI/Support/ToolRegistry.js +62 -0
  39. package/src/AI/Tools/SystemInfoTool.js +25 -0
  40. package/src/index.js +67 -1
  41. package/types/index.d.ts +326 -0
@@ -0,0 +1,238 @@
1
+ 'use strict';
2
+
3
+ const JsonSchemaValidator = require('../Support/JsonSchemaValidator');
4
+
5
+ /**
6
+ * CustomOpenAIProvider
7
+ * Fully configurable OpenAI-compatible provider.
8
+ * Works with Azure OpenAI, proxies, OpenRouter, self-hosted endpoints, etc.
9
+ * Supports chat, streaming (SSE), embeddings, images, audio TTS/STT, and models.
10
+ */
11
+ class CustomOpenAIProvider {
12
+ /**
13
+ * @param {string} apiKey
14
+ * @param {string} baseUrl
15
+ * @param {Object} [paths={}]
16
+ * @param {string} [authHeader='Authorization']
17
+ * @param {string} [authPrefix='Bearer ']
18
+ * @param {Object} [extraHeaders={}]
19
+ */
20
+ constructor(apiKey, baseUrl, paths = {}, authHeader = 'Authorization', authPrefix = 'Bearer ', extraHeaders = {}) {
21
+ this.apiKey = apiKey;
22
+ this.baseUrl = baseUrl.replace(/\/+$/, '');
23
+ this.paths = paths;
24
+ this.authHeader = authHeader;
25
+ this.authPrefix = authPrefix;
26
+ this.extraHeaders = extraHeaders;
27
+ }
28
+
29
+ /** @private */
30
+ _endpoint(key) {
31
+ const p = this.paths[key] || '';
32
+ return this.baseUrl + p;
33
+ }
34
+
35
+ /** @private */
36
+ _headers() {
37
+ return Object.assign({
38
+ [this.authHeader]: this.authPrefix + this.apiKey,
39
+ 'Content-Type': 'application/json',
40
+ 'Accept': 'application/json',
41
+ }, this.extraHeaders);
42
+ }
43
+
44
+ /** @private */
45
+ async _post(url, body, stream = false) {
46
+ const opts = { method: 'POST', headers: this._headers(), body: JSON.stringify(body) };
47
+ if (stream) return fetch(url, opts);
48
+ const res = await fetch(url, opts);
49
+ return res.json();
50
+ }
51
+
52
+ /** @private */
53
+ async _get(url) {
54
+ const res = await fetch(url, { method: 'GET', headers: this._headers() });
55
+ return res.json();
56
+ }
57
+
58
+ // ─── Chat ───
59
+ async chat(messages, options = {}) {
60
+ const payload = this._buildChatPayload(messages, options);
61
+ const res = await this._post(this._endpoint('chat'), payload);
62
+ this._normalizeToolCallsOnResponse(res);
63
+ return res || {};
64
+ }
65
+
66
+ /** @private */
67
+ _buildChatPayload(messages, options) {
68
+ const payload = {
69
+ model: options.model || options.deployment || 'gpt-like',
70
+ messages,
71
+ };
72
+ this._applySamplingOptions(payload, options);
73
+ this._applyResponseFormatOptions(payload, options);
74
+ this._applyToolsOptions(payload, options);
75
+ return payload;
76
+ }
77
+
78
+ /** @private */
79
+ _applySamplingOptions(payload, options) {
80
+ for (const k of ['temperature', 'top_p', 'max_tokens', 'frequency_penalty', 'presence_penalty', 'stop', 'seed', 'user']) {
81
+ if (options[k] !== undefined) payload[k] = options[k];
82
+ }
83
+ }
84
+
85
+ /** @private */
86
+ _applyResponseFormatOptions(payload, options) {
87
+ if (options.response_format === 'json') {
88
+ const schema = (options.json_schema || {}).schema || { type: 'object' };
89
+ payload.response_format = {
90
+ type: 'json_schema',
91
+ json_schema: options.json_schema || { name: 'auto_schema', schema },
92
+ };
93
+ }
94
+ }
95
+
96
+ /** @private */
97
+ _applyToolsOptions(payload, options) {
98
+ if (!options.tools || !Array.isArray(options.tools)) return;
99
+ payload.tools = options.tools.map(tool => ({
100
+ type: 'function',
101
+ function: {
102
+ name: tool.name,
103
+ description: tool.description || '',
104
+ parameters: tool.parameters || tool.schema || { type: 'object', properties: {} },
105
+ },
106
+ }));
107
+ if (options.tool_choice) payload.tool_choice = options.tool_choice;
108
+ }
109
+
110
+ /** @private */
111
+ _normalizeToolCallsOnResponse(res) {
112
+ if (!res || !res.choices?.[0]?.message?.tool_calls) return;
113
+ res.tool_calls = res.choices[0].message.tool_calls.map(tc => ({
114
+ id: tc.id || null,
115
+ name: (tc.function || {}).name || null,
116
+ arguments: (() => { try { return JSON.parse((tc.function || {}).arguments || '{}'); } catch { return {}; } })(),
117
+ }));
118
+ }
119
+
120
+ // ─── Streaming ───
121
+ async *stream(messages, options = {}) {
122
+ const payload = { model: options.model || 'gpt-like', messages, stream: true };
123
+ const res = await this._post(this._endpoint('chat'), payload, true);
124
+ yield* this._readSse(res.body);
125
+ }
126
+
127
+ async *streamEvents(messages, options = {}) {
128
+ const payload = { model: options.model || 'gpt-like', messages, stream: true };
129
+ const res = await this._post(this._endpoint('chat'), payload, true);
130
+ for await (const delta of this._readSse(res.body)) {
131
+ yield { type: 'delta', data: delta };
132
+ }
133
+ yield { type: 'end', data: null };
134
+ }
135
+
136
+ /** @private */
137
+ async *_readSse(body) {
138
+ const reader = body.getReader();
139
+ const decoder = new TextDecoder();
140
+ let buffer = '';
141
+ try {
142
+ while (true) {
143
+ const { done, value } = await reader.read();
144
+ if (done) break;
145
+ buffer += decoder.decode(value, { stream: true });
146
+ const lines = buffer.split(/\r?\n/);
147
+ buffer = lines.pop() || '';
148
+ for (const line of lines) {
149
+ const trimmed = line.trim();
150
+ if (trimmed === '' || trimmed.startsWith(':') || !trimmed.startsWith('data:')) continue;
151
+ const json = trimmed.slice(5).trim();
152
+ if (json === '[DONE]') return;
153
+ try {
154
+ const decoded = JSON.parse(json);
155
+ const delta = decoded?.choices?.[0]?.delta?.content || null;
156
+ if (delta !== null) yield delta;
157
+ } catch { /* skip */ }
158
+ }
159
+ }
160
+ } finally {
161
+ reader.releaseLock();
162
+ }
163
+ }
164
+
165
+ supportsStreaming() { return true; }
166
+
167
+ // ─── Models ───
168
+ async listModels() {
169
+ const url = this.baseUrl + this._modelsPath();
170
+ return (await this._get(url)) || {};
171
+ }
172
+
173
+ async getModel(id) {
174
+ const url = this.baseUrl + this._modelsPath() + '/' + encodeURIComponent(id);
175
+ return (await this._get(url)) || {};
176
+ }
177
+
178
+ /** @private */
179
+ _modelsPath() {
180
+ if (this.paths.models) return this.paths.models;
181
+ if (/\/v\d+(?:$|\/)/.test(this.baseUrl)) return '/models';
182
+ return '/v1/models';
183
+ }
184
+
185
+ // ─── Embeddings ───
186
+ async embeddings(inputs, options = {}) {
187
+ const payload = { model: options.model || 'embedding-model', input: inputs };
188
+ const res = await this._post(this._endpoint('embeddings'), payload);
189
+ return {
190
+ embeddings: (res.data || []).map(d => d.embedding || []),
191
+ usage: res.usage || {},
192
+ raw: res,
193
+ };
194
+ }
195
+
196
+ // ─── Images ───
197
+ async generateImage(prompt, options = {}) {
198
+ const payload = { prompt, model: options.model || 'image-model', n: 1 };
199
+ const res = await this._post(this._endpoint('image'), payload);
200
+ return { images: res.data || [], raw: res };
201
+ }
202
+
203
+ // ─── Audio ───
204
+ async textToSpeech(text, options = {}) {
205
+ const payload = {
206
+ model: options.model || 'tts-model',
207
+ input: text,
208
+ voice: options.voice || 'alloy',
209
+ format: options.format || 'mp3',
210
+ };
211
+ const res = await fetch(this._endpoint('tts'), {
212
+ method: 'POST',
213
+ headers: this._headers(),
214
+ body: JSON.stringify(payload),
215
+ });
216
+ const arrayBuf = await res.arrayBuffer();
217
+ return { audio: Buffer.from(arrayBuf).toString('base64'), mime: 'audio/mpeg' };
218
+ }
219
+
220
+ async speechToText(filePath, options = {}) {
221
+ const fs = require('fs');
222
+ const path = require('path');
223
+ const formData = new FormData();
224
+ const fileBuffer = fs.readFileSync(filePath);
225
+ const blob = new Blob([fileBuffer], { type: 'application/octet-stream' });
226
+ formData.append('file', blob, path.basename(filePath));
227
+ formData.append('model', options.model || 'stt-model');
228
+ formData.append('response_format', 'json');
229
+
230
+ const headers = { [this.authHeader]: this.authPrefix + this.apiKey };
231
+ Object.assign(headers, this.extraHeaders);
232
+ const res = await fetch(this._endpoint('stt'), { method: 'POST', headers, body: formData });
233
+ const data = await res.json();
234
+ return { text: data.text || '', raw: data };
235
+ }
236
+ }
237
+
238
+ module.exports = CustomOpenAIProvider;
@@ -0,0 +1,68 @@
1
+ 'use strict';
2
+
3
+ /**
4
+ * GeminiProvider
5
+ * Google Generative Language API. Supports chat, simulated streaming, and embeddings.
6
+ */
7
+ class GeminiProvider {
8
+ /**
9
+ * @param {string} apiKey
10
+ * @param {string} [chatEndpoint='https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent']
11
+ */
12
+ constructor(apiKey, chatEndpoint = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent') {
13
+ this.apiKey = apiKey;
14
+ this.chatEndpoint = chatEndpoint;
15
+ this.embedEndpoint = 'https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004:embedContent';
16
+ }
17
+
18
+ /** @private */
19
+ _keyQuery() { return `?key=${this.apiKey}`; }
20
+
21
+ async chat(messages, options = {}) {
22
+ const userTexts = messages
23
+ .filter(m => (m.role || '') !== 'system')
24
+ .map(m => m.content);
25
+
26
+ const payload = {
27
+ contents: [{ parts: [{ text: userTexts.join('\n') }] }],
28
+ };
29
+
30
+ const res = await fetch(this.chatEndpoint + this._keyQuery(), {
31
+ method: 'POST',
32
+ headers: { 'Content-Type': 'application/json' },
33
+ body: JSON.stringify(payload),
34
+ });
35
+ const data = await res.json();
36
+ return data || {};
37
+ }
38
+
39
+ async *stream(messages, options = {}) {
40
+ const full = await this.chat(messages, options);
41
+ const text = full?.candidates?.[0]?.content?.parts?.[0]?.text || '';
42
+ for (let i = 0; i < text.length; i += 80) {
43
+ yield text.slice(i, i + 80);
44
+ }
45
+ }
46
+
47
+ supportsStreaming() { return true; } // simulated
48
+
49
+ async embeddings(inputs, options = {}) {
50
+ const vectors = [];
51
+ for (const input of inputs) {
52
+ const payload = {
53
+ model: 'text-embedding-004',
54
+ content: { parts: [{ text: input }] },
55
+ };
56
+ const res = await fetch(this.embedEndpoint + this._keyQuery(), {
57
+ method: 'POST',
58
+ headers: { 'Content-Type': 'application/json' },
59
+ body: JSON.stringify(payload),
60
+ });
61
+ const data = await res.json();
62
+ vectors.push((data?.embedding || {}).values || []);
63
+ }
64
+ return { embeddings: vectors };
65
+ }
66
+ }
67
+
68
+ module.exports = GeminiProvider;
@@ -0,0 +1,46 @@
1
+ 'use strict';
2
+
3
+ /**
4
+ * GrokProvider
5
+ * Prompt-based chat with simulated streaming (80-char chunks).
6
+ */
7
+ class GrokProvider {
8
+ /**
9
+ * @param {string} apiKey
10
+ * @param {string} [endpoint='https://api.grok.com/v1/chat']
11
+ */
12
+ constructor(apiKey, endpoint = 'https://api.grok.com/v1/chat') {
13
+ this.apiKey = apiKey;
14
+ this.endpoint = endpoint;
15
+ }
16
+
17
+ async chat(messages, options = {}) {
18
+ const joined = messages.map(m => m.content).join('\n');
19
+ const payload = {
20
+ prompt: joined,
21
+ model: options.model || 'grok-default',
22
+ };
23
+ const res = await fetch(this.endpoint, {
24
+ method: 'POST',
25
+ headers: {
26
+ 'Authorization': `Bearer ${this.apiKey}`,
27
+ 'Content-Type': 'application/json',
28
+ },
29
+ body: JSON.stringify(payload),
30
+ });
31
+ const data = await res.json();
32
+ return data || {};
33
+ }
34
+
35
+ async *stream(messages, options = {}) {
36
+ const full = await this.chat(messages, options);
37
+ const text = full.response || '';
38
+ for (let i = 0; i < text.length; i += 80) {
39
+ yield text.slice(i, i + 80);
40
+ }
41
+ }
42
+
43
+ supportsStreaming() { return true; } // simulated
44
+ }
45
+
46
+ module.exports = GrokProvider;
@@ -0,0 +1,21 @@
1
+ 'use strict';
2
+
3
+ const OpenAIProvider = require('./OpenAIProvider');
4
+
5
+ /**
6
+ * MistralProvider
7
+ * Targets Mistral AI API (https://api.mistral.ai) — OpenAI-compatible endpoints.
8
+ */
9
+ class MistralProvider extends OpenAIProvider {
10
+ /**
11
+ * @param {string} apiKey
12
+ * @param {string} [chatEndpoint='https://api.mistral.ai/v1/chat/completions']
13
+ */
14
+ constructor(apiKey, chatEndpoint = 'https://api.mistral.ai/v1/chat/completions') {
15
+ super(apiKey, chatEndpoint);
16
+ this.modelsEndpoint = 'https://api.mistral.ai/v1/models';
17
+ this.embeddingsEndpoint = 'https://api.mistral.ai/v1/embeddings';
18
+ }
19
+ }
20
+
21
+ module.exports = MistralProvider;
@@ -0,0 +1,249 @@
1
+ 'use strict';
2
+
3
+ const fs = require('fs');
4
+ const path = require('path');
5
+ const DocumentAttachmentMapper = require('../Support/DocumentAttachmentMapper');
6
+ const FileSecurity = require('../Support/FileSecurity');
7
+ const ProviderError = require('../Support/Exceptions/ProviderError');
8
+
9
+ /**
10
+ * OllamaProvider
11
+ * Implements chat, NDJSON streaming, embeddings, basic image generation,
12
+ * structured JSON output (format=json), and multimodal vision input.
13
+ */
14
+ class OllamaProvider {
15
+ /**
16
+ * @param {string} [endpoint='http://localhost:11434']
17
+ */
18
+ constructor(endpoint = 'http://localhost:11434') {
19
+ this.base = endpoint.replace(/\/+$/, '');
20
+ this.chatEndpoint = `${this.base}/api/chat`;
21
+ this.embeddingsEndpoint = `${this.base}/api/embeddings`;
22
+ this.generateEndpoint = `${this.base}/api/generate`;
23
+ }
24
+
25
+ /** @protected — override in subclass to add auth */
26
+ _decorateHeaders(headers) {
27
+ return headers;
28
+ }
29
+
30
+ /** @private */
31
+ async _post(url, body, stream = false) {
32
+ const headers = this._decorateHeaders({ 'Content-Type': 'application/json' });
33
+ const fetchOpts = { method: 'POST', headers, body: JSON.stringify(body) };
34
+ if (stream) return fetch(url, fetchOpts);
35
+ const res = await fetch(url, fetchOpts);
36
+ return res.json();
37
+ }
38
+
39
+ // ─── Chat ───
40
+ async chat(messages, options = {}) {
41
+ let accFiles = [];
42
+ let accImages = [];
43
+
44
+ // Process attachments
45
+ for (let i = 0; i < messages.length; i++) {
46
+ const atts = messages[i].attachments || [];
47
+ if (atts.length > 0) {
48
+ const mapped = DocumentAttachmentMapper.toOllamaOptions(atts);
49
+ if (mapped.inlineTexts.length > 0) {
50
+ messages[i].content = ((messages[i].content || '') + '\n' + mapped.inlineTexts.join('\n\n')).trim();
51
+ }
52
+ accFiles = accFiles.concat(mapped.files);
53
+ accImages = accImages.concat(mapped.image_files);
54
+ delete messages[i].attachments;
55
+ }
56
+ }
57
+
58
+ const payload = {
59
+ model: options.model || 'gemma3:4b',
60
+ messages,
61
+ stream: false,
62
+ };
63
+
64
+ // Options
65
+ if (options.temperature) { payload.options = payload.options || {}; payload.options.temperature = options.temperature; }
66
+ if (options.top_p) { payload.options = payload.options || {}; payload.options.top_p = options.top_p; }
67
+ if (options.top_k) { payload.options = payload.options || {}; payload.options.top_k = options.top_k; }
68
+ if (options.repeat_penalty) { payload.options = payload.options || {}; payload.options.repeat_penalty = options.repeat_penalty; }
69
+ if (options.stop) { payload.options = payload.options || {}; payload.options.stop = Array.isArray(options.stop) ? options.stop : [options.stop]; }
70
+
71
+ // JSON format
72
+ if (options.response_format === 'json') {
73
+ payload.format = 'json';
74
+ const hasSystem = payload.messages.some(m => m.role === 'system');
75
+ if (!hasSystem) {
76
+ payload.messages.unshift({
77
+ role: 'system',
78
+ content: 'You must respond only with valid JSON without additional text.',
79
+ });
80
+ }
81
+ }
82
+
83
+ // Files and images
84
+ const optFiles = (options.files && Array.isArray(options.files)) ? this._prepareFiles(options.files) : [];
85
+ const optImages = (options.image_files && Array.isArray(options.image_files)) ? this._prepareImageFiles(options.image_files) : [];
86
+ const files = optFiles.concat(accFiles);
87
+ const images = optImages.concat(accImages);
88
+ if (files.length > 0) payload.files = files;
89
+ if (images.length > 0) {
90
+ const lastIndex = payload.messages.length - 1;
91
+ if (lastIndex >= 0 && payload.messages[lastIndex].role === 'user') {
92
+ payload.messages[lastIndex].images = images;
93
+ } else {
94
+ payload.messages.push({ role: 'user', content: '', images });
95
+ }
96
+ }
97
+
98
+ const data = await this._post(this.chatEndpoint, payload);
99
+ return data || {};
100
+ }
101
+
102
+ // ─── Streaming (NDJSON) ───
103
+ async *stream(messages, options = {}) {
104
+ let accFiles = [];
105
+ let accImages = [];
106
+
107
+ for (let i = 0; i < messages.length; i++) {
108
+ const atts = messages[i].attachments || [];
109
+ if (atts.length > 0) {
110
+ const mapped = DocumentAttachmentMapper.toOllamaOptions(atts);
111
+ if (mapped.inlineTexts.length > 0) {
112
+ messages[i].content = ((messages[i].content || '') + '\n' + mapped.inlineTexts.join('\n\n')).trim();
113
+ }
114
+ accFiles = accFiles.concat(mapped.files);
115
+ accImages = accImages.concat(mapped.image_files);
116
+ delete messages[i].attachments;
117
+ }
118
+ }
119
+
120
+ const payload = {
121
+ model: options.model || 'gemma3:4b',
122
+ messages,
123
+ stream: true,
124
+ };
125
+
126
+ if (options.temperature) { payload.options = payload.options || {}; payload.options.temperature = options.temperature; }
127
+ if (options.top_p) { payload.options = payload.options || {}; payload.options.top_p = options.top_p; }
128
+ if (options.top_k) { payload.options = payload.options || {}; payload.options.top_k = options.top_k; }
129
+ if (options.repeat_penalty) { payload.options = payload.options || {}; payload.options.repeat_penalty = options.repeat_penalty; }
130
+ if (options.stop) { payload.options = payload.options || {}; payload.options.stop = Array.isArray(options.stop) ? options.stop : [options.stop]; }
131
+ if (options.response_format === 'json') payload.format = 'json';
132
+
133
+ const optFiles = (options.files && Array.isArray(options.files)) ? this._prepareFiles(options.files) : [];
134
+ const optImages = (options.image_files && Array.isArray(options.image_files)) ? this._prepareImageFiles(options.image_files) : [];
135
+ const files = optFiles.concat(accFiles);
136
+ const images = optImages.concat(accImages);
137
+ if (files.length > 0) payload.files = files;
138
+ if (images.length > 0) {
139
+ const lastIndex = payload.messages.length - 1;
140
+ if (lastIndex >= 0 && payload.messages[lastIndex].role === 'user') {
141
+ payload.messages[lastIndex].images = images;
142
+ } else {
143
+ payload.messages.push({ role: 'user', content: '', images });
144
+ }
145
+ }
146
+
147
+ const res = await this._post(this.chatEndpoint, payload, true);
148
+ const reader = res.body.getReader();
149
+ const decoder = new TextDecoder();
150
+ let buffer = '';
151
+ try {
152
+ while (true) {
153
+ const { done, value } = await reader.read();
154
+ if (done) break;
155
+ buffer += decoder.decode(value, { stream: true });
156
+ const lines = buffer.split(/\r?\n/);
157
+ buffer = lines.pop() || '';
158
+ for (const line of lines) {
159
+ const trimmed = line.trim();
160
+ if (!trimmed) continue;
161
+ try {
162
+ const decoded = JSON.parse(trimmed);
163
+ if (decoded?.message?.content) yield decoded.message.content;
164
+ } catch { /* skip invalid JSON lines */ }
165
+ }
166
+ }
167
+ } finally {
168
+ reader.releaseLock();
169
+ }
170
+ }
171
+
172
+ supportsStreaming() { return true; }
173
+
174
+ // ─── Embeddings ───
175
+ async embeddings(inputs, options = {}) {
176
+ const model = options.model || 'nomic-embed-text';
177
+ const vectors = [];
178
+ for (const text of inputs) {
179
+ const res = await this._post(this.embeddingsEndpoint, { model, prompt: text });
180
+ vectors.push(res.embedding || []);
181
+ }
182
+ return { embeddings: vectors, raw: null };
183
+ }
184
+
185
+ // ─── File helpers ───
186
+ /** @private */
187
+ _prepareFiles(files) {
188
+ const out = [];
189
+ const security = FileSecurity.fromConfig();
190
+ for (const file of files) {
191
+ if (typeof file !== 'string' || !fs.existsSync(file)) continue;
192
+ if (!security.validateFile(file, false)) continue;
193
+ out.push({
194
+ name: path.basename(file),
195
+ type: 'application/octet-stream',
196
+ content: fs.readFileSync(file).toString('base64'),
197
+ });
198
+ }
199
+ return out;
200
+ }
201
+
202
+ /** @private */
203
+ _prepareImageFiles(files) {
204
+ const images = [];
205
+ const security = FileSecurity.fromConfig();
206
+ for (const file of files) {
207
+ if (typeof file !== 'string' || !fs.existsSync(file)) continue;
208
+ const ext = path.extname(file).toLowerCase();
209
+ const imageExts = ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp', '.svg'];
210
+ if (imageExts.includes(ext) && security.validateFile(file, true)) {
211
+ images.push(fs.readFileSync(file).toString('base64'));
212
+ }
213
+ }
214
+ return images;
215
+ }
216
+
217
+ // ─── Images ───
218
+ async generateImage(prompt, options = {}) {
219
+ const payload = {
220
+ model: options.model || 'stable-diffusion',
221
+ prompt,
222
+ stream: false,
223
+ };
224
+ if (options.negative_prompt) payload.negative = options.negative_prompt;
225
+ const res = await this._post(this.generateEndpoint, payload);
226
+
227
+ const images = [];
228
+ if (Array.isArray(res.images)) {
229
+ for (const img of res.images) images.push({ b64: img });
230
+ } else if (res.image) {
231
+ images.push({ b64: res.image });
232
+ } else if (typeof res.response === 'string' && res.response.startsWith('data:image')) {
233
+ const match = res.response.match(/base64,(.*)$/);
234
+ if (match) images.push({ b64: match[1] });
235
+ }
236
+ return { images, meta: { model: payload.model }, raw: res };
237
+ }
238
+
239
+ // ─── Audio (not supported) ───
240
+ async textToSpeech(_text, _options = {}) {
241
+ throw ProviderError.unsupported('ollama', 'tts');
242
+ }
243
+
244
+ async speechToText(_filePath, _options = {}) {
245
+ throw ProviderError.unsupported('ollama', 'stt');
246
+ }
247
+ }
248
+
249
+ module.exports = OllamaProvider;
@@ -0,0 +1,32 @@
1
+ 'use strict';
2
+
3
+ const OllamaProvider = require('./OllamaProvider');
4
+
5
+ /**
6
+ * OllamaTurboProvider
7
+ * Targets Ollama Turbo (SaaS) at https://ollama.com with API key auth.
8
+ */
9
+ class OllamaTurboProvider extends OllamaProvider {
10
+ /**
11
+ * @param {string|null} apiKey
12
+ * @param {string} [endpoint='https://ollama.com']
13
+ */
14
+ constructor(apiKey, endpoint = 'https://ollama.com') {
15
+ super(endpoint);
16
+ this.apiKey = apiKey || null;
17
+ }
18
+
19
+ /** @override */
20
+ _decorateHeaders(headers) {
21
+ headers = super._decorateHeaders(headers);
22
+ if (this.apiKey) {
23
+ const value = this.apiKey.toLowerCase().startsWith('bearer ')
24
+ ? this.apiKey
25
+ : `Bearer ${this.apiKey}`;
26
+ headers['Authorization'] = value;
27
+ }
28
+ return headers;
29
+ }
30
+ }
31
+
32
+ module.exports = OllamaTurboProvider;