multis 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,256 @@
1
+ const Database = require('better-sqlite3');
2
+ const path = require('path');
3
+ const fs = require('fs');
4
+ const { MULTIS_DIR } = require('../config');
5
+
6
+ const DB_PATH = path.join(MULTIS_DIR, 'documents.db');
7
+
8
+ /**
9
+ * DocumentStore - SQLite storage for document chunks with FTS5 search.
10
+ * Ported from aurora_core.store.sqlite (Python).
11
+ * Includes activation columns for future ACT-R (POC5).
12
+ */
13
+ class DocumentStore {
14
+ constructor(dbPath = DB_PATH) {
15
+ // Ensure directory exists
16
+ const dir = path.dirname(dbPath);
17
+ if (!fs.existsSync(dir)) {
18
+ fs.mkdirSync(dir, { recursive: true });
19
+ }
20
+
21
+ this.db = new Database(dbPath);
22
+ this.db.pragma('journal_mode = WAL');
23
+ this.db.pragma('foreign_keys = ON');
24
+ this._initSchema();
25
+ }
26
+
27
+ _initSchema() {
28
+ this.db.exec(`
29
+ CREATE TABLE IF NOT EXISTS chunks (
30
+ chunk_id TEXT PRIMARY KEY,
31
+ file_path TEXT NOT NULL,
32
+ page_start INTEGER DEFAULT 0,
33
+ page_end INTEGER DEFAULT 0,
34
+ element_type TEXT DEFAULT 'paragraph',
35
+ name TEXT DEFAULT '',
36
+ content TEXT DEFAULT '',
37
+ parent_chunk_id TEXT,
38
+ section_path TEXT DEFAULT '[]',
39
+ section_level INTEGER DEFAULT 0,
40
+ document_type TEXT DEFAULT 'unknown',
41
+ metadata TEXT DEFAULT '{}',
42
+ -- Activation columns for ACT-R (POC5)
43
+ activation REAL DEFAULT 0.0,
44
+ access_count INTEGER DEFAULT 0,
45
+ last_accessed TEXT,
46
+ -- Timestamps
47
+ created_at TEXT NOT NULL,
48
+ updated_at TEXT NOT NULL
49
+ );
50
+
51
+ CREATE INDEX IF NOT EXISTS idx_chunks_file ON chunks(file_path);
52
+ CREATE INDEX IF NOT EXISTS idx_chunks_type ON chunks(element_type);
53
+ CREATE INDEX IF NOT EXISTS idx_chunks_doc_type ON chunks(document_type);
54
+ CREATE INDEX IF NOT EXISTS idx_chunks_activation ON chunks(activation DESC);
55
+
56
+ -- FTS5 virtual table for full-text search (BM25)
57
+ CREATE VIRTUAL TABLE IF NOT EXISTS chunks_fts USING fts5(
58
+ chunk_id UNINDEXED,
59
+ name,
60
+ content,
61
+ section_path,
62
+ content=chunks,
63
+ content_rowid=rowid,
64
+ tokenize='porter unicode61'
65
+ );
66
+
67
+ -- Triggers to keep FTS in sync
68
+ CREATE TRIGGER IF NOT EXISTS chunks_ai AFTER INSERT ON chunks BEGIN
69
+ INSERT INTO chunks_fts(rowid, chunk_id, name, content, section_path)
70
+ VALUES (new.rowid, new.chunk_id, new.name, new.content, new.section_path);
71
+ END;
72
+
73
+ CREATE TRIGGER IF NOT EXISTS chunks_ad AFTER DELETE ON chunks BEGIN
74
+ INSERT INTO chunks_fts(chunks_fts, rowid, chunk_id, name, content, section_path)
75
+ VALUES ('delete', old.rowid, old.chunk_id, old.name, old.content, old.section_path);
76
+ END;
77
+
78
+ CREATE TRIGGER IF NOT EXISTS chunks_au AFTER UPDATE ON chunks BEGIN
79
+ INSERT INTO chunks_fts(chunks_fts, rowid, chunk_id, name, content, section_path)
80
+ VALUES ('delete', old.rowid, old.chunk_id, old.name, old.content, old.section_path);
81
+ INSERT INTO chunks_fts(rowid, chunk_id, name, content, section_path)
82
+ VALUES (new.rowid, new.chunk_id, new.name, new.content, new.section_path);
83
+ END;
84
+
85
+ -- Access history for ACT-R activation tracking (POC5)
86
+ CREATE TABLE IF NOT EXISTS access_history (
87
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
88
+ chunk_id TEXT NOT NULL,
89
+ accessed_at TEXT NOT NULL,
90
+ query TEXT,
91
+ FOREIGN KEY (chunk_id) REFERENCES chunks(chunk_id) ON DELETE CASCADE
92
+ );
93
+
94
+ CREATE INDEX IF NOT EXISTS idx_access_chunk ON access_history(chunk_id);
95
+ `);
96
+ }
97
+
98
+ /**
99
+ * Save a DocChunk to the store (insert or replace)
100
+ */
101
+ saveChunk(chunk) {
102
+ const stmt = this.db.prepare(`
103
+ INSERT OR REPLACE INTO chunks
104
+ (chunk_id, file_path, page_start, page_end, element_type, name, content,
105
+ parent_chunk_id, section_path, section_level, document_type, metadata,
106
+ created_at, updated_at)
107
+ VALUES
108
+ (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
109
+ `);
110
+
111
+ stmt.run(
112
+ chunk.chunkId,
113
+ chunk.filePath,
114
+ chunk.pageStart,
115
+ chunk.pageEnd,
116
+ chunk.elementType,
117
+ chunk.name,
118
+ chunk.content,
119
+ chunk.parentChunkId,
120
+ JSON.stringify(chunk.sectionPath),
121
+ chunk.sectionLevel,
122
+ chunk.documentType,
123
+ JSON.stringify(chunk.metadata),
124
+ chunk.createdAt,
125
+ chunk.updatedAt
126
+ );
127
+ }
128
+
129
+ /**
130
+ * Save multiple chunks in a transaction
131
+ */
132
+ saveChunks(chunks) {
133
+ const tx = this.db.transaction((items) => {
134
+ for (const chunk of items) {
135
+ this.saveChunk(chunk);
136
+ }
137
+ });
138
+ tx(chunks);
139
+ }
140
+
141
+ /**
142
+ * BM25 full-text search using FTS5
143
+ * @param {string} query - Search query
144
+ * @param {number} limit - Max results
145
+ * @returns {Array} - Chunks sorted by BM25 relevance
146
+ */
147
+ search(query, limit = 10) {
148
+ // Convert natural language to FTS5 OR query
149
+ // Filter out stopwords, join remaining with OR for broader matching
150
+ const stopwords = new Set(['a','an','the','is','are','was','were','be','been',
151
+ 'being','have','has','had','do','does','did','will','would','could','should',
152
+ 'may','might','can','i','me','my','we','our','you','your','he','she','it',
153
+ 'they','them','this','that','what','which','who','how','when','where','why',
154
+ 'not','no','so','if','or','and','but','in','on','at','to','for','of','with',
155
+ 'by','from','as','into','about','than','after','before']);
156
+
157
+ const terms = query.toLowerCase()
158
+ .replace(/[^\w\s]/g, '')
159
+ .split(/\s+/)
160
+ .filter(t => t.length > 1 && !stopwords.has(t));
161
+
162
+ if (terms.length === 0) return [];
163
+
164
+ const ftsQuery = terms.join(' OR ');
165
+
166
+ const stmt = this.db.prepare(`
167
+ SELECT c.*, rank
168
+ FROM chunks_fts fts
169
+ JOIN chunks c ON fts.chunk_id = c.chunk_id
170
+ WHERE chunks_fts MATCH ?
171
+ ORDER BY rank
172
+ LIMIT ?
173
+ `);
174
+
175
+ const rows = stmt.all(ftsQuery, limit);
176
+ return rows.map(row => ({
177
+ chunkId: row.chunk_id,
178
+ filePath: row.file_path,
179
+ pageStart: row.page_start,
180
+ pageEnd: row.page_end,
181
+ elementType: row.element_type,
182
+ name: row.name,
183
+ content: row.content,
184
+ parentChunkId: row.parent_chunk_id,
185
+ sectionPath: JSON.parse(row.section_path || '[]'),
186
+ sectionLevel: row.section_level,
187
+ documentType: row.document_type,
188
+ metadata: JSON.parse(row.metadata || '{}'),
189
+ activation: row.activation,
190
+ rank: row.rank,
191
+ createdAt: row.created_at,
192
+ updatedAt: row.updated_at
193
+ }));
194
+ }
195
+
196
+ /**
197
+ * Get chunk by ID
198
+ */
199
+ getChunk(chunkId) {
200
+ const row = this.db.prepare('SELECT * FROM chunks WHERE chunk_id = ?').get(chunkId);
201
+ if (!row) return null;
202
+ return {
203
+ chunkId: row.chunk_id,
204
+ filePath: row.file_path,
205
+ name: row.name,
206
+ content: row.content,
207
+ sectionPath: JSON.parse(row.section_path || '[]'),
208
+ documentType: row.document_type,
209
+ activation: row.activation
210
+ };
211
+ }
212
+
213
+ /**
214
+ * Delete all chunks for a file (for re-indexing)
215
+ */
216
+ deleteByFile(filePath) {
217
+ this.db.prepare('DELETE FROM chunks WHERE file_path = ?').run(filePath);
218
+ }
219
+
220
+ /**
221
+ * Get stats about indexed documents
222
+ */
223
+ getStats() {
224
+ const total = this.db.prepare('SELECT COUNT(*) as count FROM chunks').get();
225
+ const byType = this.db.prepare(
226
+ 'SELECT document_type, COUNT(*) as count FROM chunks GROUP BY document_type'
227
+ ).all();
228
+ const files = this.db.prepare(
229
+ 'SELECT DISTINCT file_path FROM chunks'
230
+ ).all();
231
+ return {
232
+ totalChunks: total.count,
233
+ byType: Object.fromEntries(byType.map(r => [r.document_type, r.count])),
234
+ indexedFiles: files.length
235
+ };
236
+ }
237
+
238
+ /**
239
+ * Record a search access for ACT-R activation tracking (POC5)
240
+ */
241
+ recordAccess(chunkId, query) {
242
+ this.db.prepare(
243
+ 'INSERT INTO access_history (chunk_id, accessed_at, query) VALUES (?, ?, ?)'
244
+ ).run(chunkId, new Date().toISOString(), query);
245
+
246
+ this.db.prepare(
247
+ 'UPDATE chunks SET access_count = access_count + 1, last_accessed = ? WHERE chunk_id = ?'
248
+ ).run(new Date().toISOString(), chunkId);
249
+ }
250
+
251
+ close() {
252
+ this.db.close();
253
+ }
254
+ }
255
+
256
+ module.exports = { DocumentStore, DB_PATH };
@@ -0,0 +1,106 @@
1
+ const https = require('https');
2
+ const { LLMProvider } = require('./base');
3
+
4
+ /**
5
+ * Anthropic Claude provider (vanilla Node.js https)
6
+ */
7
+ class AnthropicProvider extends LLMProvider {
8
+ constructor(apiKey, model = 'claude-sonnet-4-5-20250929') {
9
+ super();
10
+ this.apiKey = apiKey;
11
+ this.model = model;
12
+ this.apiVersion = '2023-06-01';
13
+ }
14
+
15
+ async generate(prompt, options = {}) {
16
+ const body = {
17
+ model: this.model,
18
+ max_tokens: options.maxTokens || 2048,
19
+ temperature: options.temperature || 0.7,
20
+ messages: [{ role: 'user', content: prompt }]
21
+ };
22
+
23
+ if (options.system) {
24
+ body.system = options.system;
25
+ }
26
+
27
+ const response = await this._makeRequest(body);
28
+
29
+ return response.content[0].text;
30
+ }
31
+
32
+ async generateWithTools(prompt, tools, options = {}) {
33
+ const response = await this._makeRequest({
34
+ model: this.model,
35
+ max_tokens: options.maxTokens || 2048,
36
+ tools: tools.map(t => ({
37
+ name: t.name,
38
+ description: t.description,
39
+ input_schema: t.inputSchema
40
+ })),
41
+ messages: [{ role: 'user', content: prompt }]
42
+ });
43
+
44
+ return response;
45
+ }
46
+
47
+ async generateWithMessages(messages, options = {}) {
48
+ const body = {
49
+ model: this.model,
50
+ max_tokens: options.maxTokens || 2048,
51
+ temperature: options.temperature || 0.7,
52
+ messages
53
+ };
54
+
55
+ if (options.system) {
56
+ body.system = options.system;
57
+ }
58
+
59
+ const response = await this._makeRequest(body);
60
+ return response.content[0].text;
61
+ }
62
+
63
+ _makeRequest(body) {
64
+ return new Promise((resolve, reject) => {
65
+ const data = JSON.stringify(body);
66
+
67
+ const options = {
68
+ hostname: 'api.anthropic.com',
69
+ path: '/v1/messages',
70
+ method: 'POST',
71
+ headers: {
72
+ 'Content-Type': 'application/json',
73
+ 'Content-Length': Buffer.byteLength(data),
74
+ 'x-api-key': this.apiKey,
75
+ 'anthropic-version': this.apiVersion
76
+ }
77
+ };
78
+
79
+ const req = https.request(options, (res) => {
80
+ let responseBody = '';
81
+
82
+ res.on('data', (chunk) => {
83
+ responseBody += chunk;
84
+ });
85
+
86
+ res.on('end', () => {
87
+ if (res.statusCode >= 200 && res.statusCode < 300) {
88
+ try {
89
+ resolve(JSON.parse(responseBody));
90
+ } catch (err) {
91
+ reject(new Error('Failed to parse response: ' + err.message));
92
+ }
93
+ } else {
94
+ reject(new Error(`API error (${res.statusCode}): ${responseBody}`));
95
+ }
96
+ });
97
+ });
98
+
99
+ req.on('error', reject);
100
+ req.write(data);
101
+ req.end();
102
+ });
103
+ }
104
+ }
105
+
106
+ module.exports = { AnthropicProvider };
@@ -0,0 +1,38 @@
1
+ /**
2
+ * Base LLM Provider interface
3
+ * All LLM providers must extend this class
4
+ */
5
+ class LLMProvider {
6
+ /**
7
+ * Generate a response from the LLM
8
+ * @param {string} prompt - The user prompt
9
+ * @param {Object} options - Additional options (context, temperature, etc.)
10
+ * @returns {Promise<string>} - The generated response
11
+ */
12
+ async generate(prompt, options = {}) {
13
+ throw new Error('Must implement generate() method');
14
+ }
15
+
16
+ /**
17
+ * Generate a response with tool calling support
18
+ * @param {string} prompt - The user prompt
19
+ * @param {Array} tools - Available tools
20
+ * @param {Object} options - Additional options
21
+ * @returns {Promise<Object>} - Response with possible tool calls
22
+ */
23
+ async generateWithTools(prompt, tools, options = {}) {
24
+ throw new Error('Must implement generateWithTools() method');
25
+ }
26
+
27
+ /**
28
+ * Generate a response from a messages array (for conversation memory)
29
+ * @param {Array<{role: string, content: string}>} messages - Conversation messages
30
+ * @param {Object} options - Additional options (system, temperature, etc.)
31
+ * @returns {Promise<string>} - The generated response
32
+ */
33
+ async generateWithMessages(messages, options = {}) {
34
+ throw new Error('Must implement generateWithMessages() method');
35
+ }
36
+ }
37
+
38
+ module.exports = { LLMProvider };
@@ -0,0 +1,34 @@
1
+ const { AnthropicProvider } = require('./anthropic');
2
+ const { OpenAIProvider } = require('./openai');
3
+ const { OllamaProvider } = require('./ollama');
4
+
5
+ /**
6
+ * Factory function to create LLM client based on config
7
+ * @param {Object} config - LLM configuration
8
+ * @returns {LLMProvider} - Configured LLM provider
9
+ */
10
+ function createLLMClient(config) {
11
+ const provider = config.provider || 'anthropic';
12
+
13
+ switch (provider.toLowerCase()) {
14
+ case 'anthropic':
15
+ if (!config.apiKey) {
16
+ throw new Error('Anthropic API key is required');
17
+ }
18
+ return new AnthropicProvider(config.apiKey, config.model);
19
+
20
+ case 'openai':
21
+ if (!config.apiKey) {
22
+ throw new Error('OpenAI API key is required');
23
+ }
24
+ return new OpenAIProvider(config.apiKey, config.model);
25
+
26
+ case 'ollama':
27
+ return new OllamaProvider(config.model, config.baseUrl);
28
+
29
+ default:
30
+ throw new Error(`Unknown LLM provider: ${provider}`);
31
+ }
32
+ }
33
+
34
+ module.exports = { createLLMClient };
@@ -0,0 +1,148 @@
1
+ const http = require('http');
2
+ const { LLMProvider } = require('./base');
3
+
4
+ /**
5
+ * Ollama local LLM provider (vanilla Node.js http)
6
+ */
7
+ class OllamaProvider extends LLMProvider {
8
+ constructor(model = 'llama3.1:8b', baseUrl = 'http://localhost:11434') {
9
+ super();
10
+ this.model = model;
11
+ this.baseUrl = baseUrl;
12
+ }
13
+
14
+ async generate(prompt, options = {}) {
15
+ const body = {
16
+ model: this.model,
17
+ prompt,
18
+ stream: false,
19
+ options: {
20
+ temperature: options.temperature || 0.7,
21
+ num_predict: options.maxTokens || 2048
22
+ }
23
+ };
24
+
25
+ if (options.system) {
26
+ body.system = options.system;
27
+ }
28
+
29
+ const response = await this._makeRequest(body);
30
+
31
+ return response.response;
32
+ }
33
+
34
+ async generateWithTools(prompt, tools, options = {}) {
35
+ // Ollama doesn't natively support tool calling yet
36
+ // We can implement a simple version by prompting the model
37
+ const toolsDescription = tools.map(t =>
38
+ `Tool: ${t.name}\nDescription: ${t.description}\nParameters: ${JSON.stringify(t.inputSchema)}`
39
+ ).join('\n\n');
40
+
41
+ const systemPrompt = `You have access to these tools:\n\n${toolsDescription}\n\nTo use a tool, respond with JSON in this format: {"tool": "tool_name", "parameters": {...}}`;
42
+
43
+ const response = await this.generate(prompt, {
44
+ ...options,
45
+ system: systemPrompt
46
+ });
47
+
48
+ return { content: response };
49
+ }
50
+
51
+ async generateWithMessages(messages, options = {}) {
52
+ const body = {
53
+ model: this.model,
54
+ messages,
55
+ stream: false,
56
+ options: {
57
+ temperature: options.temperature || 0.7,
58
+ num_predict: options.maxTokens || 2048
59
+ }
60
+ };
61
+
62
+ if (options.system) {
63
+ body.messages = [{ role: 'system', content: options.system }, ...messages];
64
+ }
65
+
66
+ const response = await this._makeChatRequest(body);
67
+ return response.message.content;
68
+ }
69
+
70
+ _makeChatRequest(body) {
71
+ return new Promise((resolve, reject) => {
72
+ const data = JSON.stringify(body);
73
+ const url = new URL('/api/chat', this.baseUrl);
74
+
75
+ const options = {
76
+ hostname: url.hostname,
77
+ port: url.port || 80,
78
+ path: url.pathname,
79
+ method: 'POST',
80
+ headers: {
81
+ 'Content-Type': 'application/json',
82
+ 'Content-Length': Buffer.byteLength(data)
83
+ }
84
+ };
85
+
86
+ const req = http.request(options, (res) => {
87
+ let responseBody = '';
88
+ res.on('data', (chunk) => { responseBody += chunk; });
89
+ res.on('end', () => {
90
+ if (res.statusCode >= 200 && res.statusCode < 300) {
91
+ try { resolve(JSON.parse(responseBody)); }
92
+ catch (err) { reject(new Error('Failed to parse response: ' + err.message)); }
93
+ } else {
94
+ reject(new Error(`Ollama error (${res.statusCode}): ${responseBody}`));
95
+ }
96
+ });
97
+ });
98
+
99
+ req.on('error', reject);
100
+ req.write(data);
101
+ req.end();
102
+ });
103
+ }
104
+
105
+ _makeRequest(body) {
106
+ return new Promise((resolve, reject) => {
107
+ const data = JSON.stringify(body);
108
+ const url = new URL('/api/generate', this.baseUrl);
109
+
110
+ const options = {
111
+ hostname: url.hostname,
112
+ port: url.port || 80,
113
+ path: url.pathname,
114
+ method: 'POST',
115
+ headers: {
116
+ 'Content-Type': 'application/json',
117
+ 'Content-Length': Buffer.byteLength(data)
118
+ }
119
+ };
120
+
121
+ const req = http.request(options, (res) => {
122
+ let responseBody = '';
123
+
124
+ res.on('data', (chunk) => {
125
+ responseBody += chunk;
126
+ });
127
+
128
+ res.on('end', () => {
129
+ if (res.statusCode >= 200 && res.statusCode < 300) {
130
+ try {
131
+ resolve(JSON.parse(responseBody));
132
+ } catch (err) {
133
+ reject(new Error('Failed to parse response: ' + err.message));
134
+ }
135
+ } else {
136
+ reject(new Error(`Ollama error (${res.statusCode}): ${responseBody}`));
137
+ }
138
+ });
139
+ });
140
+
141
+ req.on('error', reject);
142
+ req.write(data);
143
+ req.end();
144
+ });
145
+ }
146
+ }
147
+
148
+ module.exports = { OllamaProvider };
@@ -0,0 +1,107 @@
1
+ const https = require('https');
2
+ const { LLMProvider } = require('./base');
3
+
4
+ /**
5
+ * OpenAI GPT provider (vanilla Node.js https)
6
+ */
7
+ class OpenAIProvider extends LLMProvider {
8
+ constructor(apiKey, model = 'gpt-4o') {
9
+ super();
10
+ this.apiKey = apiKey;
11
+ this.model = model;
12
+ }
13
+
14
+ async generate(prompt, options = {}) {
15
+ const messages = options.system
16
+ ? [
17
+ { role: 'system', content: options.system },
18
+ { role: 'user', content: prompt }
19
+ ]
20
+ : [{ role: 'user', content: prompt }];
21
+
22
+ const response = await this._makeRequest({
23
+ model: this.model,
24
+ max_tokens: options.maxTokens || 2048,
25
+ temperature: options.temperature || 0.7,
26
+ messages
27
+ });
28
+
29
+ return response.choices[0].message.content;
30
+ }
31
+
32
+ async generateWithTools(prompt, tools, options = {}) {
33
+ const response = await this._makeRequest({
34
+ model: this.model,
35
+ max_tokens: options.maxTokens || 2048,
36
+ tools: tools.map(t => ({
37
+ type: 'function',
38
+ function: {
39
+ name: t.name,
40
+ description: t.description,
41
+ parameters: t.inputSchema
42
+ }
43
+ })),
44
+ messages: [{ role: 'user', content: prompt }]
45
+ });
46
+
47
+ return response;
48
+ }
49
+
50
+ async generateWithMessages(messages, options = {}) {
51
+ const msgs = options.system
52
+ ? [{ role: 'system', content: options.system }, ...messages]
53
+ : [...messages];
54
+
55
+ const response = await this._makeRequest({
56
+ model: this.model,
57
+ max_tokens: options.maxTokens || 2048,
58
+ temperature: options.temperature || 0.7,
59
+ messages: msgs
60
+ });
61
+
62
+ return response.choices[0].message.content;
63
+ }
64
+
65
+ _makeRequest(body) {
66
+ return new Promise((resolve, reject) => {
67
+ const data = JSON.stringify(body);
68
+
69
+ const options = {
70
+ hostname: 'api.openai.com',
71
+ path: '/v1/chat/completions',
72
+ method: 'POST',
73
+ headers: {
74
+ 'Content-Type': 'application/json',
75
+ 'Content-Length': Buffer.byteLength(data),
76
+ 'Authorization': `Bearer ${this.apiKey}`
77
+ }
78
+ };
79
+
80
+ const req = https.request(options, (res) => {
81
+ let responseBody = '';
82
+
83
+ res.on('data', (chunk) => {
84
+ responseBody += chunk;
85
+ });
86
+
87
+ res.on('end', () => {
88
+ if (res.statusCode >= 200 && res.statusCode < 300) {
89
+ try {
90
+ resolve(JSON.parse(responseBody));
91
+ } catch (err) {
92
+ reject(new Error('Failed to parse response: ' + err.message));
93
+ }
94
+ } else {
95
+ reject(new Error(`API error (${res.statusCode}): ${responseBody}`));
96
+ }
97
+ });
98
+ });
99
+
100
+ req.on('error', reject);
101
+ req.write(data);
102
+ req.end();
103
+ });
104
+ }
105
+ }
106
+
107
+ module.exports = { OpenAIProvider };