@swarmai/local-agent 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +174 -0
- package/index.js +11 -0
- package/package.json +42 -0
- package/src/agenticChatClaudeMd.js +92 -0
- package/src/agenticChatHandler.js +566 -0
- package/src/aiProviderScanner.js +115 -0
- package/src/auth.js +180 -0
- package/src/cli.js +334 -0
- package/src/commands.js +1853 -0
- package/src/config.js +98 -0
- package/src/connection.js +470 -0
- package/src/mcpManager.js +276 -0
- package/src/startup.js +297 -0
- package/src/toolScanner.js +221 -0
- package/src/workspace.js +201 -0
|
@@ -0,0 +1,566 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agentic Chat Handler
|
|
3
|
+
*
|
|
4
|
+
* Spawns Claude Code CLI with Ollama as the backend provider,
|
|
5
|
+
* giving full agentic capabilities (tool use, file editing, multi-turn)
|
|
6
|
+
* powered by local or cloud models.
|
|
7
|
+
*
|
|
8
|
+
* Falls back to raw Ollama HTTP chat if Claude CLI is unavailable.
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
const os = require('os');
|
|
12
|
+
const fs = require('fs');
|
|
13
|
+
const path = require('path');
|
|
14
|
+
const { spawn } = require('child_process');
|
|
15
|
+
const { buildAgenticChatClaudeMd } = require('./agenticChatClaudeMd');
|
|
16
|
+
|
|
17
|
+
const MAX_OUTPUT = 500 * 1024; // 500KB
|
|
18
|
+
const MAX_TURNS_CAP = 50;
|
|
19
|
+
const DEFAULT_TIMEOUT_MS = 30 * 60 * 1000; // 30 min sync default
|
|
20
|
+
const ASYNC_TIMEOUT_MS = 60 * 60 * 1000; // 60 min async max
|
|
21
|
+
const STALE_DEFAULT_MS = 5 * 60 * 1000; // 5 min stale threshold
|
|
22
|
+
|
|
23
|
+
// Regex for [FILE_GENERATED: /path/to/file] markers
|
|
24
|
+
const FILE_MARKER_RE = /\[FILE_GENERATED:\s*([^\]]+)\]/g;
|
|
25
|
+
|
|
26
|
+
// Windows/Unix path regex for scanning output
|
|
27
|
+
const WIN_PATH_RE = /[A-Za-z]:\\[\w\\./ -]+/g;
|
|
28
|
+
const UNIX_PATH_RE = /\/[\w./ -]{4,}/g;
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Create the agentic chat handler with injected dependencies.
|
|
32
|
+
*
|
|
33
|
+
* @param {object} deps
|
|
34
|
+
* @param {Map} deps.activeProcesses - Shared map for kill support
|
|
35
|
+
* @param {Function} deps.emitChunk - Streaming output emitter
|
|
36
|
+
* @param {Function} deps.uploadToServer - File upload function
|
|
37
|
+
* @param {Function} deps.getSecurityConfig - Security config getter
|
|
38
|
+
* @param {Function} deps.validateFilePath - Path validation function
|
|
39
|
+
* @returns {Function} handleAgenticChat(params, commandId)
|
|
40
|
+
*/
|
|
41
|
+
function createAgenticChatHandler(deps) {
|
|
42
|
+
const { activeProcesses, emitChunk, uploadToServer, getSecurityConfig, validateFilePath } = deps;
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Handle an agenticChat command.
|
|
46
|
+
*
|
|
47
|
+
* @param {object} params
|
|
48
|
+
* @param {string} params.model - Ollama model name (e.g., 'qwen3.5:397b-cloud')
|
|
49
|
+
* @param {Array} params.messages - [{role, content}] conversation
|
|
50
|
+
* @param {string} [params.systemPrompt] - Agent personality
|
|
51
|
+
* @param {string} [params.workspaceProfile] - Workspace profile name
|
|
52
|
+
* @param {string} [params.ollamaBaseUrl] - Override Ollama URL
|
|
53
|
+
* @param {number} [params.timeout] - Timeout in ms
|
|
54
|
+
* @param {boolean} [params.asyncMode] - Enable async (fire-and-deliver)
|
|
55
|
+
* @param {number} [params.staleThresholdMs] - Kill if no output for this long
|
|
56
|
+
* @param {number} [params.maxTurns] - Max Claude turns (capped at 50)
|
|
57
|
+
* @param {string} [params.cwd] - Working directory override
|
|
58
|
+
* @param {string} [params.taskContext] - Task description for CLAUDE.md
|
|
59
|
+
* @param {string} commandId - Unique command ID for streaming/kill
|
|
60
|
+
* @returns {Promise<object>} Structured result
|
|
61
|
+
*/
|
|
62
|
+
async function handleAgenticChat(params, commandId) {
|
|
63
|
+
const {
|
|
64
|
+
model,
|
|
65
|
+
messages = [],
|
|
66
|
+
systemPrompt = '',
|
|
67
|
+
workspaceProfile = 'agentic-chat',
|
|
68
|
+
ollamaBaseUrl = 'http://localhost:11434',
|
|
69
|
+
timeout,
|
|
70
|
+
asyncMode = false,
|
|
71
|
+
staleThresholdMs = STALE_DEFAULT_MS,
|
|
72
|
+
maxTurns = 10,
|
|
73
|
+
cwd,
|
|
74
|
+
taskContext = '',
|
|
75
|
+
} = params || {};
|
|
76
|
+
|
|
77
|
+
if (!model) throw new Error('agenticChat: model is required');
|
|
78
|
+
if (!messages || messages.length === 0) throw new Error('agenticChat: messages array is required');
|
|
79
|
+
|
|
80
|
+
// Validate Ollama base URL (prevent SSRF)
|
|
81
|
+
_validateOllamaUrl(ollamaBaseUrl);
|
|
82
|
+
|
|
83
|
+
// Find Claude CLI executable
|
|
84
|
+
const claudePath = _findClaudeExecutable();
|
|
85
|
+
if (!claudePath) {
|
|
86
|
+
console.log('[agenticChat] Claude CLI not found, falling back to raw Ollama');
|
|
87
|
+
return _fallbackToOllamaChat(params);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// Resolve workspace
|
|
91
|
+
const workspacePath = _resolveWorkspace(workspaceProfile, cwd);
|
|
92
|
+
const outputDir = path.join(workspacePath, 'output');
|
|
93
|
+
fs.mkdirSync(outputDir, { recursive: true });
|
|
94
|
+
|
|
95
|
+
// Write CLAUDE.md
|
|
96
|
+
const claudeMdContent = buildAgenticChatClaudeMd({
|
|
97
|
+
profileName: workspaceProfile,
|
|
98
|
+
systemPrompt,
|
|
99
|
+
model,
|
|
100
|
+
taskContext,
|
|
101
|
+
outputDir,
|
|
102
|
+
});
|
|
103
|
+
fs.writeFileSync(path.join(workspacePath, 'CLAUDE.md'), claudeMdContent, 'utf-8');
|
|
104
|
+
|
|
105
|
+
// Take pre-execution snapshot of output dir
|
|
106
|
+
const preSnapshot = _snapshotDir(outputDir);
|
|
107
|
+
|
|
108
|
+
// Build prompt from messages (last user message + context)
|
|
109
|
+
const prompt = _buildPromptFromMessages(messages, systemPrompt);
|
|
110
|
+
|
|
111
|
+
// Build spawn args — use the full path found by _findClaudeExecutable
|
|
112
|
+
const isWindows = os.platform() === 'win32';
|
|
113
|
+
const executable = claudePath;
|
|
114
|
+
const effectiveMaxTurns = Math.min(maxTurns || 10, MAX_TURNS_CAP);
|
|
115
|
+
|
|
116
|
+
const args = [
|
|
117
|
+
'--dangerously-skip-permissions',
|
|
118
|
+
'-p', prompt,
|
|
119
|
+
'--model', model,
|
|
120
|
+
'--max-turns', String(effectiveMaxTurns),
|
|
121
|
+
];
|
|
122
|
+
|
|
123
|
+
// Environment: redirect Claude to Ollama
|
|
124
|
+
// Per Ollama docs: ANTHROPIC_API_KEY="" but Windows EINVAL on empty strings.
|
|
125
|
+
// Using 'ollama' as dummy value — forces Claude CLI to use ANTHROPIC_BASE_URL
|
|
126
|
+
// instead of its stored Anthropic credentials.
|
|
127
|
+
// The hang fix was: stdin='ignore' + removing --output-format text (not the key).
|
|
128
|
+
const env = { ...process.env };
|
|
129
|
+
env.ANTHROPIC_BASE_URL = ollamaBaseUrl;
|
|
130
|
+
env.ANTHROPIC_AUTH_TOKEN = 'ollama';
|
|
131
|
+
env.ANTHROPIC_API_KEY = 'ollama';
|
|
132
|
+
// Remove CLAUDECODE env var to prevent "nested session" detection
|
|
133
|
+
// when this agent is launched from within a Claude Code session
|
|
134
|
+
delete env.CLAUDECODE;
|
|
135
|
+
|
|
136
|
+
// Timeout
|
|
137
|
+
const effectiveTimeout = asyncMode
|
|
138
|
+
? Math.min(timeout || ASYNC_TIMEOUT_MS, ASYNC_TIMEOUT_MS)
|
|
139
|
+
: Math.min(timeout || DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS);
|
|
140
|
+
|
|
141
|
+
console.log(`[agenticChat] Spawning: ${executable} ${args.map((a, i) => i === 2 ? '"<prompt>"' : a).join(' ')}`);
|
|
142
|
+
console.log(`[agenticChat] CWD: ${workspacePath}`);
|
|
143
|
+
console.log(`[agenticChat] ANTHROPIC_BASE_URL=${ollamaBaseUrl}, ANTHROPIC_API_KEY=${env.ANTHROPIC_API_KEY}, MODEL=${model}`);
|
|
144
|
+
|
|
145
|
+
return new Promise((resolve, reject) => {
|
|
146
|
+
const startTime = Date.now();
|
|
147
|
+
let stdout = '';
|
|
148
|
+
let stderr = '';
|
|
149
|
+
let killed = false;
|
|
150
|
+
let killTimer = null;
|
|
151
|
+
let lastOutputTime = Date.now();
|
|
152
|
+
let staleCheckTimer = null;
|
|
153
|
+
|
|
154
|
+
const child = spawn(executable, args, {
|
|
155
|
+
cwd: workspacePath,
|
|
156
|
+
env,
|
|
157
|
+
stdio: ['ignore', 'pipe', 'pipe'], // stdin='ignore' — prevents hang waiting for input
|
|
158
|
+
windowsHide: true,
|
|
159
|
+
shell: isWindows, // Windows requires shell:true for .cmd files
|
|
160
|
+
});
|
|
161
|
+
|
|
162
|
+
// Track for kill support
|
|
163
|
+
if (commandId) activeProcesses.set(commandId, child);
|
|
164
|
+
|
|
165
|
+
child.stdout.on('data', (data) => {
|
|
166
|
+
const chunk = data.toString();
|
|
167
|
+
stdout += chunk;
|
|
168
|
+
lastOutputTime = Date.now();
|
|
169
|
+
if (commandId) emitChunk(commandId, chunk, 'stdout');
|
|
170
|
+
if (stdout.length > MAX_OUTPUT) {
|
|
171
|
+
stdout = stdout.substring(0, MAX_OUTPUT);
|
|
172
|
+
if (!killed) { killed = true; child.kill('SIGTERM'); }
|
|
173
|
+
}
|
|
174
|
+
});
|
|
175
|
+
|
|
176
|
+
child.stderr.on('data', (data) => {
|
|
177
|
+
const chunk = data.toString();
|
|
178
|
+
stderr += chunk;
|
|
179
|
+
lastOutputTime = Date.now();
|
|
180
|
+
if (commandId) emitChunk(commandId, chunk, 'stderr');
|
|
181
|
+
if (stderr.length > MAX_OUTPUT) {
|
|
182
|
+
stderr = stderr.substring(0, MAX_OUTPUT);
|
|
183
|
+
}
|
|
184
|
+
});
|
|
185
|
+
|
|
186
|
+
// Stale detection for async mode
|
|
187
|
+
if (asyncMode) {
|
|
188
|
+
staleCheckTimer = setInterval(() => {
|
|
189
|
+
const silentMs = Date.now() - lastOutputTime;
|
|
190
|
+
if (silentMs > staleThresholdMs && !killed) {
|
|
191
|
+
killed = true;
|
|
192
|
+
child.kill('SIGTERM');
|
|
193
|
+
setTimeout(() => { try { child.kill('SIGKILL'); } catch {} }, 5000);
|
|
194
|
+
if (staleCheckTimer) clearInterval(staleCheckTimer);
|
|
195
|
+
}
|
|
196
|
+
}, 30000);
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
// Timeout handler
|
|
200
|
+
const timeoutTimer = setTimeout(() => {
|
|
201
|
+
if (!killed) {
|
|
202
|
+
killed = true;
|
|
203
|
+
child.kill('SIGTERM');
|
|
204
|
+
killTimer = setTimeout(() => { try { child.kill('SIGKILL'); } catch {} }, 5000);
|
|
205
|
+
}
|
|
206
|
+
}, effectiveTimeout);
|
|
207
|
+
|
|
208
|
+
child.on('close', async (code) => {
|
|
209
|
+
clearTimeout(timeoutTimer);
|
|
210
|
+
if (killTimer) clearTimeout(killTimer);
|
|
211
|
+
if (staleCheckTimer) clearInterval(staleCheckTimer);
|
|
212
|
+
if (commandId) activeProcesses.delete(commandId);
|
|
213
|
+
|
|
214
|
+
const duration = Date.now() - startTime;
|
|
215
|
+
const truncated = stdout.length >= MAX_OUTPUT;
|
|
216
|
+
|
|
217
|
+
// Debug logging
|
|
218
|
+
console.log(`[agenticChat] Process exited with code ${code} after ${duration}ms`);
|
|
219
|
+
if (stderr) console.log(`[agenticChat] stderr: ${stderr.substring(0, 500)}`);
|
|
220
|
+
if (!stdout && !stderr) console.log(`[agenticChat] No output at all. Executable: ${executable}, args: ${JSON.stringify(args).substring(0, 200)}`);
|
|
221
|
+
|
|
222
|
+
// 3-layer file detection
|
|
223
|
+
const detectedFiles = _detectFiles(stdout, workspacePath, outputDir, preSnapshot);
|
|
224
|
+
|
|
225
|
+
// Upload detected files to server
|
|
226
|
+
const uploadedFiles = [];
|
|
227
|
+
for (const filePath of detectedFiles) {
|
|
228
|
+
try {
|
|
229
|
+
const uploaded = await _uploadFile(filePath, uploadToServer);
|
|
230
|
+
if (uploaded) uploadedFiles.push(uploaded);
|
|
231
|
+
} catch (e) {
|
|
232
|
+
// Upload failure is non-fatal
|
|
233
|
+
uploadedFiles.push({ path: filePath, error: e.message });
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
// If Claude CLI failed quickly (< 5s with exit code != 0 and no stdout),
|
|
238
|
+
// it means the CLI itself failed — fallback to raw Ollama
|
|
239
|
+
if (code !== 0 && !stdout && duration < 5000) {
|
|
240
|
+
console.log(`[agenticChat] Claude CLI failed fast (${duration}ms, code=${code}). Falling back to Ollama.`);
|
|
241
|
+
console.log(`[agenticChat] stderr: ${stderr.substring(0, 300)}`);
|
|
242
|
+
try {
|
|
243
|
+
const fallbackResult = await _fallbackToOllamaChat(params);
|
|
244
|
+
return resolve(fallbackResult);
|
|
245
|
+
} catch (fbErr) {
|
|
246
|
+
// Fallback also failed — return the original error
|
|
247
|
+
console.log(`[agenticChat] Ollama fallback also failed: ${fbErr.message}`);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
// If stdout is empty but stderr has content, include stderr as the response
|
|
252
|
+
const effectiveContent = stdout || (stderr ? `[stderr] ${stderr}` : '');
|
|
253
|
+
|
|
254
|
+
const result = {
|
|
255
|
+
content: effectiveContent,
|
|
256
|
+
model,
|
|
257
|
+
provider: 'claude+ollama',
|
|
258
|
+
files: uploadedFiles,
|
|
259
|
+
exitCode: code,
|
|
260
|
+
duration,
|
|
261
|
+
truncated,
|
|
262
|
+
workspacePath,
|
|
263
|
+
async: asyncMode,
|
|
264
|
+
};
|
|
265
|
+
|
|
266
|
+
// Async mode: also emit via async-result event
|
|
267
|
+
if (asyncMode && deps._socket && commandId) {
|
|
268
|
+
deps._socket.emit('command:async-result', {
|
|
269
|
+
commandId,
|
|
270
|
+
result,
|
|
271
|
+
error: code !== 0 ? `Claude CLI exited with code ${code}` : null,
|
|
272
|
+
});
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
resolve(result);
|
|
276
|
+
});
|
|
277
|
+
|
|
278
|
+
child.on('error', (err) => {
|
|
279
|
+
clearTimeout(timeoutTimer);
|
|
280
|
+
if (killTimer) clearTimeout(killTimer);
|
|
281
|
+
if (staleCheckTimer) clearInterval(staleCheckTimer);
|
|
282
|
+
if (commandId) activeProcesses.delete(commandId);
|
|
283
|
+
|
|
284
|
+
// Fallback to raw Ollama on spawn failure
|
|
285
|
+
console.log(`[agenticChat] Claude CLI spawn failed: ${err.message}. Falling back to raw Ollama.`);
|
|
286
|
+
_fallbackToOllamaChat(params).then(resolve).catch(reject);
|
|
287
|
+
});
|
|
288
|
+
});
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
return handleAgenticChat;
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
// =====================================================
|
|
295
|
+
// Internal helpers
|
|
296
|
+
// =====================================================
|
|
297
|
+
|
|
298
|
+
/**
|
|
299
|
+
* Validate Ollama base URL to prevent SSRF
|
|
300
|
+
*/
|
|
301
|
+
function _validateOllamaUrl(urlStr) {
|
|
302
|
+
try {
|
|
303
|
+
const url = new URL(urlStr);
|
|
304
|
+
const hostname = url.hostname;
|
|
305
|
+
const allowed = ['localhost', '127.0.0.1', '0.0.0.0', '::1'];
|
|
306
|
+
if (!allowed.includes(hostname)) {
|
|
307
|
+
throw new Error(`agenticChat: ollamaBaseUrl must be localhost. Got: ${hostname}`);
|
|
308
|
+
}
|
|
309
|
+
} catch (e) {
|
|
310
|
+
if (e.message.includes('agenticChat')) throw e;
|
|
311
|
+
throw new Error(`agenticChat: invalid ollamaBaseUrl: ${urlStr}`);
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
/**
|
|
316
|
+
* Find Claude CLI executable path.
|
|
317
|
+
* Returns the full path or executable name, or null if not found.
|
|
318
|
+
* On Windows, claude is typically installed as claude.exe (not .cmd).
|
|
319
|
+
*/
|
|
320
|
+
function _findClaudeExecutable() {
|
|
321
|
+
try {
|
|
322
|
+
const { execSync } = require('child_process');
|
|
323
|
+
const isWindows = os.platform() === 'win32';
|
|
324
|
+
|
|
325
|
+
if (isWindows) {
|
|
326
|
+
// Try multiple possible names: claude.exe, claude.cmd, claude
|
|
327
|
+
const cmd = 'where claude.exe 2>nul || where claude.cmd 2>nul || where claude 2>nul';
|
|
328
|
+
const result = execSync(cmd, { encoding: 'utf-8', timeout: 5000, stdio: ['pipe', 'pipe', 'pipe'], shell: true }).trim();
|
|
329
|
+
// `where` returns multiple lines if found in multiple locations; take the first
|
|
330
|
+
const firstPath = result.split('\n')[0].trim();
|
|
331
|
+
if (firstPath) return firstPath;
|
|
332
|
+
} else {
|
|
333
|
+
const result = execSync('which claude', { encoding: 'utf-8', timeout: 5000, stdio: ['pipe', 'pipe', 'pipe'], shell: true }).trim();
|
|
334
|
+
if (result) return result;
|
|
335
|
+
}
|
|
336
|
+
} catch { /* not found */ }
|
|
337
|
+
return null;
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
/**
|
|
341
|
+
* Resolve workspace directory
|
|
342
|
+
*/
|
|
343
|
+
function _resolveWorkspace(profileName, explicitCwd) {
|
|
344
|
+
if (explicitCwd && fs.existsSync(explicitCwd)) return explicitCwd;
|
|
345
|
+
|
|
346
|
+
try {
|
|
347
|
+
const { getWorkspaceManager } = require('./workspace');
|
|
348
|
+
const wm = getWorkspaceManager();
|
|
349
|
+
if (wm) {
|
|
350
|
+
return wm.ensureProfileWorkspace(profileName, {});
|
|
351
|
+
}
|
|
352
|
+
} catch { /* fall through */ }
|
|
353
|
+
|
|
354
|
+
// Fallback: create workspace in home dir
|
|
355
|
+
const fallback = path.join(os.homedir(), '.swarmai', 'agentic-chat', profileName || 'default');
|
|
356
|
+
fs.mkdirSync(fallback, { recursive: true });
|
|
357
|
+
return fallback;
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
/**
|
|
361
|
+
* Build a single prompt string from messages array
|
|
362
|
+
*/
|
|
363
|
+
function _buildPromptFromMessages(messages, systemPrompt) {
|
|
364
|
+
const parts = [];
|
|
365
|
+
|
|
366
|
+
// System prompt as context
|
|
367
|
+
if (systemPrompt) {
|
|
368
|
+
parts.push(`[System Instructions]\n${systemPrompt}\n`);
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
// Include conversation history (last 10 messages for context window efficiency)
|
|
372
|
+
const recent = messages.slice(-10);
|
|
373
|
+
for (const msg of recent) {
|
|
374
|
+
const role = msg.role === 'user' ? 'User' : msg.role === 'assistant' ? 'Assistant' : msg.role;
|
|
375
|
+
parts.push(`[${role}]\n${msg.content}`);
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
return parts.join('\n\n');
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
/**
|
|
382
|
+
* Snapshot a directory — returns Set of {filename: mtime} entries
|
|
383
|
+
*/
|
|
384
|
+
function _snapshotDir(dirPath) {
|
|
385
|
+
const snapshot = new Map();
|
|
386
|
+
if (!fs.existsSync(dirPath)) return snapshot;
|
|
387
|
+
try {
|
|
388
|
+
const entries = fs.readdirSync(dirPath, { withFileTypes: true });
|
|
389
|
+
for (const entry of entries) {
|
|
390
|
+
if (entry.isFile()) {
|
|
391
|
+
const fullPath = path.join(dirPath, entry.name);
|
|
392
|
+
try {
|
|
393
|
+
const stat = fs.statSync(fullPath);
|
|
394
|
+
snapshot.set(fullPath, stat.mtimeMs);
|
|
395
|
+
} catch { /* skip */ }
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
} catch { /* non-critical */ }
|
|
399
|
+
return snapshot;
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
/**
|
|
403
|
+
* 3-layer file detection
|
|
404
|
+
* Layer 1: [FILE_GENERATED:] markers
|
|
405
|
+
* Layer 2: Path regex scan
|
|
406
|
+
* Layer 3: Directory diff
|
|
407
|
+
*/
|
|
408
|
+
function _detectFiles(stdout, workspacePath, outputDir, preSnapshot) {
|
|
409
|
+
const found = new Set();
|
|
410
|
+
|
|
411
|
+
// Layer 1: Sentinel markers
|
|
412
|
+
let match;
|
|
413
|
+
while ((match = FILE_MARKER_RE.exec(stdout)) !== null) {
|
|
414
|
+
const filePath = match[1].trim();
|
|
415
|
+
if (fs.existsSync(filePath)) {
|
|
416
|
+
found.add(path.resolve(filePath));
|
|
417
|
+
}
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
// Layer 2: Path regex scan
|
|
421
|
+
const isWindows = os.platform() === 'win32';
|
|
422
|
+
const pathRegex = isWindows ? WIN_PATH_RE : UNIX_PATH_RE;
|
|
423
|
+
const pathMatches = stdout.match(pathRegex) || [];
|
|
424
|
+
for (const p of pathMatches) {
|
|
425
|
+
const resolved = path.resolve(p.trim());
|
|
426
|
+
// Only include paths within workspace
|
|
427
|
+
if (resolved.startsWith(workspacePath) && fs.existsSync(resolved)) {
|
|
428
|
+
try {
|
|
429
|
+
if (fs.statSync(resolved).isFile()) {
|
|
430
|
+
found.add(resolved);
|
|
431
|
+
}
|
|
432
|
+
} catch { /* skip */ }
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
// Layer 3: Directory diff (new or modified files in output/)
|
|
437
|
+
if (fs.existsSync(outputDir)) {
|
|
438
|
+
try {
|
|
439
|
+
const entries = fs.readdirSync(outputDir, { withFileTypes: true });
|
|
440
|
+
for (const entry of entries) {
|
|
441
|
+
if (!entry.isFile()) continue;
|
|
442
|
+
const fullPath = path.join(outputDir, entry.name);
|
|
443
|
+
try {
|
|
444
|
+
const stat = fs.statSync(fullPath);
|
|
445
|
+
const prevMtime = preSnapshot.get(fullPath);
|
|
446
|
+
if (!prevMtime || stat.mtimeMs > prevMtime) {
|
|
447
|
+
found.add(fullPath);
|
|
448
|
+
}
|
|
449
|
+
} catch { /* skip */ }
|
|
450
|
+
}
|
|
451
|
+
} catch { /* non-critical */ }
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
return [...found];
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
/**
|
|
458
|
+
* Upload a file to the server
|
|
459
|
+
*/
|
|
460
|
+
async function _uploadFile(filePath, uploadToServer) {
|
|
461
|
+
if (!uploadToServer) {
|
|
462
|
+
return { path: filePath, originalName: path.basename(filePath), size: fs.statSync(filePath).size };
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
const buffer = fs.readFileSync(filePath);
|
|
466
|
+
const originalName = path.basename(filePath);
|
|
467
|
+
const ext = path.extname(filePath).toLowerCase();
|
|
468
|
+
const mimeType = _getMimeType(ext);
|
|
469
|
+
|
|
470
|
+
const result = await uploadToServer(buffer, originalName, mimeType);
|
|
471
|
+
if (result) {
|
|
472
|
+
return {
|
|
473
|
+
downloadUrl: result.downloadUrl,
|
|
474
|
+
originalName,
|
|
475
|
+
mimeType,
|
|
476
|
+
size: buffer.length,
|
|
477
|
+
};
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
// Fallback: return local path info
|
|
481
|
+
return { path: filePath, originalName, size: buffer.length };
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
/**
|
|
485
|
+
* Basic MIME type lookup
|
|
486
|
+
*/
|
|
487
|
+
function _getMimeType(ext) {
|
|
488
|
+
const map = {
|
|
489
|
+
'.txt': 'text/plain', '.md': 'text/markdown', '.json': 'application/json',
|
|
490
|
+
'.html': 'text/html', '.css': 'text/css', '.js': 'application/javascript',
|
|
491
|
+
'.ts': 'application/typescript', '.py': 'text/x-python',
|
|
492
|
+
'.pdf': 'application/pdf', '.png': 'image/png', '.jpg': 'image/jpeg',
|
|
493
|
+
'.jpeg': 'image/jpeg', '.gif': 'image/gif', '.svg': 'image/svg+xml',
|
|
494
|
+
'.csv': 'text/csv', '.xml': 'application/xml', '.zip': 'application/zip',
|
|
495
|
+
'.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
|
496
|
+
'.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
|
497
|
+
};
|
|
498
|
+
return map[ext] || 'application/octet-stream';
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
/**
|
|
502
|
+
* Fallback: raw Ollama HTTP chat (when Claude CLI is unavailable)
|
|
503
|
+
*/
|
|
504
|
+
async function _fallbackToOllamaChat(params) {
|
|
505
|
+
const { model, messages = [], systemPrompt, ollamaBaseUrl = 'http://localhost:11434' } = params;
|
|
506
|
+
const http = require('http');
|
|
507
|
+
|
|
508
|
+
const fullMessages = [...messages];
|
|
509
|
+
if (systemPrompt && !fullMessages.find(m => m.role === 'system')) {
|
|
510
|
+
fullMessages.unshift({ role: 'system', content: systemPrompt });
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
const payload = JSON.stringify({
|
|
514
|
+
model,
|
|
515
|
+
messages: fullMessages,
|
|
516
|
+
stream: false,
|
|
517
|
+
options: { temperature: 0.7, num_predict: 4096 },
|
|
518
|
+
});
|
|
519
|
+
|
|
520
|
+
return new Promise((resolve, reject) => {
|
|
521
|
+
const url = new URL(`${ollamaBaseUrl}/api/chat`);
|
|
522
|
+
const req = http.request({
|
|
523
|
+
hostname: url.hostname,
|
|
524
|
+
port: url.port,
|
|
525
|
+
path: url.pathname,
|
|
526
|
+
method: 'POST',
|
|
527
|
+
headers: {
|
|
528
|
+
'Content-Type': 'application/json',
|
|
529
|
+
'Content-Length': Buffer.byteLength(payload),
|
|
530
|
+
},
|
|
531
|
+
timeout: 120000,
|
|
532
|
+
}, (res) => {
|
|
533
|
+
let data = '';
|
|
534
|
+
res.on('data', (chunk) => { data += chunk; });
|
|
535
|
+
res.on('end', () => {
|
|
536
|
+
try {
|
|
537
|
+
const parsed = JSON.parse(data);
|
|
538
|
+
resolve({
|
|
539
|
+
content: parsed.message?.content || '',
|
|
540
|
+
model: parsed.model || model,
|
|
541
|
+
provider: 'ollama-fallback',
|
|
542
|
+
files: [],
|
|
543
|
+
exitCode: 0,
|
|
544
|
+
duration: parsed.total_duration || 0,
|
|
545
|
+
truncated: false,
|
|
546
|
+
fallback: true,
|
|
547
|
+
fallbackReason: 'Claude CLI not available',
|
|
548
|
+
usage: {
|
|
549
|
+
promptTokens: parsed.prompt_eval_count || 0,
|
|
550
|
+
completionTokens: parsed.eval_count || 0,
|
|
551
|
+
},
|
|
552
|
+
});
|
|
553
|
+
} catch (e) {
|
|
554
|
+
reject(new Error(`Ollama fallback: invalid response — ${e.message}`));
|
|
555
|
+
}
|
|
556
|
+
});
|
|
557
|
+
});
|
|
558
|
+
|
|
559
|
+
req.on('error', (e) => reject(new Error(`Ollama fallback failed: ${e.message}`)));
|
|
560
|
+
req.on('timeout', () => { req.destroy(); reject(new Error('Ollama fallback timed out')); });
|
|
561
|
+
req.write(payload);
|
|
562
|
+
req.end();
|
|
563
|
+
});
|
|
564
|
+
}
|
|
565
|
+
|
|
566
|
+
module.exports = { createAgenticChatHandler };
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AI Provider Scanner for Local Agent
|
|
3
|
+
*
|
|
4
|
+
* Auto-discovers local AI services (Ollama, LM Studio) on the user's machine.
|
|
5
|
+
* Reports available providers and models to the SwarmAI server so they can be
|
|
6
|
+
* used as AI providers via Task Routing.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
const http = require('http');
|
|
10
|
+
|
|
11
|
+
const SCAN_TIMEOUT_MS = 3000; // 3s per service
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Known local AI service endpoints to scan
|
|
15
|
+
*/
|
|
16
|
+
const AI_SERVICES = [
|
|
17
|
+
{
|
|
18
|
+
type: 'ollama',
|
|
19
|
+
baseUrl: 'http://localhost:11434',
|
|
20
|
+
modelsPath: '/api/tags',
|
|
21
|
+
parseModels: (data) => {
|
|
22
|
+
// Ollama /api/tags returns { models: [{ name, size, details: { parameter_size, ... } }] }
|
|
23
|
+
if (!data || !data.models) return [];
|
|
24
|
+
return data.models.map(m => ({
|
|
25
|
+
id: m.name,
|
|
26
|
+
name: m.name,
|
|
27
|
+
size: m.size ? `${(m.size / (1024 * 1024 * 1024)).toFixed(1)}GB` : null,
|
|
28
|
+
parameterSize: m.details?.parameter_size || null,
|
|
29
|
+
family: m.details?.family || null,
|
|
30
|
+
quantization: m.details?.quantization_level || null,
|
|
31
|
+
}));
|
|
32
|
+
},
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
type: 'lmstudio',
|
|
36
|
+
baseUrl: 'http://localhost:1234',
|
|
37
|
+
modelsPath: '/v1/models',
|
|
38
|
+
parseModels: (data) => {
|
|
39
|
+
// LM Studio OpenAI-compatible: { data: [{ id, object, owned_by }] }
|
|
40
|
+
if (!data || !data.data) return [];
|
|
41
|
+
return data.data.map(m => ({
|
|
42
|
+
id: m.id,
|
|
43
|
+
name: m.id,
|
|
44
|
+
ownedBy: m.owned_by || null,
|
|
45
|
+
}));
|
|
46
|
+
},
|
|
47
|
+
},
|
|
48
|
+
];
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Make a GET request with timeout (uses Node.js built-in http module)
|
|
52
|
+
*/
|
|
53
|
+
function httpGet(urlStr, timeoutMs) {
|
|
54
|
+
return new Promise((resolve) => {
|
|
55
|
+
try {
|
|
56
|
+
const url = new URL(urlStr);
|
|
57
|
+
const req = http.request({
|
|
58
|
+
hostname: url.hostname,
|
|
59
|
+
port: url.port,
|
|
60
|
+
path: url.pathname,
|
|
61
|
+
method: 'GET',
|
|
62
|
+
timeout: timeoutMs,
|
|
63
|
+
}, (res) => {
|
|
64
|
+
let data = '';
|
|
65
|
+
res.on('data', (chunk) => { data += chunk; });
|
|
66
|
+
res.on('end', () => {
|
|
67
|
+
try { resolve(JSON.parse(data)); } catch { resolve(null); }
|
|
68
|
+
});
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
req.on('error', () => resolve(null));
|
|
72
|
+
req.on('timeout', () => { req.destroy(); resolve(null); });
|
|
73
|
+
req.end();
|
|
74
|
+
} catch {
|
|
75
|
+
resolve(null);
|
|
76
|
+
}
|
|
77
|
+
});
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Scan for all known local AI services.
|
|
82
|
+
* Returns array of discovered providers with their models.
|
|
83
|
+
* Non-blocking — services that are down are silently skipped.
|
|
84
|
+
*
|
|
85
|
+
* @returns {Promise<Array<{ type: string, baseUrl: string, models: Array }>>}
|
|
86
|
+
*/
|
|
87
|
+
async function scanAiProviders() {
|
|
88
|
+
const results = [];
|
|
89
|
+
|
|
90
|
+
// Scan all services in parallel
|
|
91
|
+
const scans = AI_SERVICES.map(async (service) => {
|
|
92
|
+
const url = `${service.baseUrl}${service.modelsPath}`;
|
|
93
|
+
const data = await httpGet(url, SCAN_TIMEOUT_MS);
|
|
94
|
+
|
|
95
|
+
if (!data) return null;
|
|
96
|
+
|
|
97
|
+
const models = service.parseModels(data);
|
|
98
|
+
if (models.length === 0) return null;
|
|
99
|
+
|
|
100
|
+
return {
|
|
101
|
+
type: service.type,
|
|
102
|
+
baseUrl: service.baseUrl,
|
|
103
|
+
models,
|
|
104
|
+
};
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
const scanResults = await Promise.all(scans);
|
|
108
|
+
for (const result of scanResults) {
|
|
109
|
+
if (result) results.push(result);
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
return results;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
module.exports = { scanAiProviders, AI_SERVICES };
|