wispy-cli 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +17 -0
- package/README.md +162 -0
- package/bin/wispy.mjs +11 -0
- package/lib/wispy-repl.mjs +2105 -0
- package/package.json +40 -0
|
@@ -0,0 +1,2105 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* wispy — interactive AI assistant REPL
|
|
5
|
+
*
|
|
6
|
+
* Usage:
|
|
7
|
+
* wispy Start interactive session
|
|
8
|
+
* wispy "message" One-shot message
|
|
9
|
+
* wispy home <subcommand> Operator commands (legacy CLI)
|
|
10
|
+
*
|
|
11
|
+
* Requires: OPENAI_API_KEY in env
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import os from "node:os";
|
|
15
|
+
import path from "node:path";
|
|
16
|
+
import { createInterface } from "node:readline";
|
|
17
|
+
import { appendFile, mkdir, readFile, writeFile } from "node:fs/promises";
|
|
18
|
+
|
|
19
|
+
// ---------------------------------------------------------------------------
|
|
20
|
+
// Config
|
|
21
|
+
// ---------------------------------------------------------------------------
|
|
22
|
+
|
|
23
|
+
const WISPY_DIR = path.join(os.homedir(), ".wispy");
|
|
24
|
+
const MEMORY_DIR = path.join(WISPY_DIR, "memory");
|
|
25
|
+
|
|
26
|
+
// Workstream-aware conversation storage
|
|
27
|
+
// wispy -w "project-name" → separate conversation per workstream
|
|
28
|
+
const ACTIVE_WORKSTREAM = process.env.WISPY_WORKSTREAM ??
|
|
29
|
+
process.argv.find((a, i) => (process.argv[i-1] === "-w" || process.argv[i-1] === "--workstream")) ?? "default";
|
|
30
|
+
const CONVERSATIONS_DIR = path.join(WISPY_DIR, "conversations");
|
|
31
|
+
const HISTORY_FILE = path.join(CONVERSATIONS_DIR, `${ACTIVE_WORKSTREAM}.json`);
|
|
32
|
+
|
|
33
|
+
// ---------------------------------------------------------------------------
|
|
34
|
+
// Multi-provider config with auto-detection & setup guidance
|
|
35
|
+
// ---------------------------------------------------------------------------
|
|
36
|
+
|
|
37
|
+
const PROVIDERS = {
|
|
38
|
+
google: { envKeys: ["GOOGLE_AI_KEY", "GEMINI_API_KEY"], defaultModel: "gemini-2.5-flash", label: "Google AI (Gemini)", signupUrl: "https://aistudio.google.com/apikey" },
|
|
39
|
+
anthropic: { envKeys: ["ANTHROPIC_API_KEY"], defaultModel: "claude-sonnet-4-20250514", label: "Anthropic (Claude)", signupUrl: "https://console.anthropic.com/settings/keys" },
|
|
40
|
+
openai: { envKeys: ["OPENAI_API_KEY"], defaultModel: "gpt-4o", label: "OpenAI", signupUrl: "https://platform.openai.com/api-keys" },
|
|
41
|
+
openrouter:{ envKeys: ["OPENROUTER_API_KEY"], defaultModel: "anthropic/claude-sonnet-4-20250514", label: "OpenRouter (multi-model)", signupUrl: "https://openrouter.ai/keys" },
|
|
42
|
+
groq: { envKeys: ["GROQ_API_KEY"], defaultModel: "llama-3.3-70b-versatile", label: "Groq (fast inference)", signupUrl: "https://console.groq.com/keys" },
|
|
43
|
+
deepseek: { envKeys: ["DEEPSEEK_API_KEY"], defaultModel: "deepseek-chat", label: "DeepSeek", signupUrl: "https://platform.deepseek.com/api_keys" },
|
|
44
|
+
ollama: { envKeys: ["OLLAMA_HOST"], defaultModel: "llama3.2", label: "Ollama (local)", signupUrl: null, local: true },
|
|
45
|
+
};
|
|
46
|
+
|
|
47
|
+
// Also try macOS Keychain for keys
|
|
48
|
+
async function tryKeychainKey(service) {
|
|
49
|
+
try {
|
|
50
|
+
const { execFile: ef } = await import("node:child_process");
|
|
51
|
+
const { promisify } = await import("node:util");
|
|
52
|
+
const exec = promisify(ef);
|
|
53
|
+
const { stdout } = await exec("security", ["find-generic-password", "-s", service, "-a", "poropo", "-w"], { timeout: 3000 });
|
|
54
|
+
return stdout.trim() || null;
|
|
55
|
+
} catch { return null; }
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
function getEnvKey(envKeys) {
|
|
59
|
+
for (const k of envKeys) {
|
|
60
|
+
if (process.env[k]) return process.env[k];
|
|
61
|
+
}
|
|
62
|
+
return null;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// Detect provider — env var, then config file, then keychain
|
|
66
|
+
async function detectProvider() {
|
|
67
|
+
// 1. Check WISPY_PROVIDER env override
|
|
68
|
+
const forced = process.env.WISPY_PROVIDER;
|
|
69
|
+
if (forced && PROVIDERS[forced]) {
|
|
70
|
+
const key = getEnvKey(PROVIDERS[forced].envKeys);
|
|
71
|
+
if (key || PROVIDERS[forced].local) return { provider: forced, key, model: process.env.WISPY_MODEL ?? PROVIDERS[forced].defaultModel };
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// 2. Check config file
|
|
75
|
+
const configPath = path.join(WISPY_DIR, "config.json");
|
|
76
|
+
try {
|
|
77
|
+
const cfg = JSON.parse(await readFile(configPath, "utf8"));
|
|
78
|
+
if (cfg.provider && PROVIDERS[cfg.provider]) {
|
|
79
|
+
const key = getEnvKey(PROVIDERS[cfg.provider].envKeys) ?? cfg.apiKey;
|
|
80
|
+
if (key || PROVIDERS[cfg.provider].local) return { provider: cfg.provider, key, model: cfg.model ?? PROVIDERS[cfg.provider].defaultModel };
|
|
81
|
+
}
|
|
82
|
+
} catch { /* no config */ }
|
|
83
|
+
|
|
84
|
+
// 3. Auto-detect from env vars (priority order)
|
|
85
|
+
const order = ["google", "anthropic", "openai", "openrouter", "groq", "deepseek", "ollama"];
|
|
86
|
+
for (const p of order) {
|
|
87
|
+
const key = getEnvKey(PROVIDERS[p].envKeys);
|
|
88
|
+
if (key || (p === "ollama" && process.env.OLLAMA_HOST)) {
|
|
89
|
+
return { provider: p, key, model: process.env.WISPY_MODEL ?? PROVIDERS[p].defaultModel };
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
// 4. Try macOS Keychain
|
|
94
|
+
const keychainMap = { "google-ai-key": "google", "anthropic-api-key": "anthropic", "openai-api-key": "openai" };
|
|
95
|
+
for (const [service, provider] of Object.entries(keychainMap)) {
|
|
96
|
+
const key = await tryKeychainKey(service);
|
|
97
|
+
if (key) {
|
|
98
|
+
// Set env for later use
|
|
99
|
+
process.env[PROVIDERS[provider].envKeys[0]] = key;
|
|
100
|
+
return { provider, key, model: process.env.WISPY_MODEL ?? PROVIDERS[provider].defaultModel };
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
return null;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
function printSetupGuide() {
|
|
108
|
+
console.log(`
|
|
109
|
+
${bold("🌿 Wispy — API key setup")}
|
|
110
|
+
|
|
111
|
+
${bold("Supported providers:")}
|
|
112
|
+
${Object.entries(PROVIDERS).map(([id, p]) => {
|
|
113
|
+
const envStr = p.envKeys.join(" or ");
|
|
114
|
+
const url = p.signupUrl ? dim(p.signupUrl) : dim("(local)");
|
|
115
|
+
return ` ${green(id.padEnd(12))} ${p.label}\n env: ${envStr}\n ${url}`;
|
|
116
|
+
}).join("\n\n")}
|
|
117
|
+
|
|
118
|
+
${bold("Quick start (pick one):")}
|
|
119
|
+
${cyan("export GOOGLE_AI_KEY=your-key")} ${dim("# free tier available")}
|
|
120
|
+
${cyan("export ANTHROPIC_API_KEY=your-key")} ${dim("# Claude")}
|
|
121
|
+
${cyan("export OPENAI_API_KEY=your-key")} ${dim("# GPT-4o")}
|
|
122
|
+
${cyan("export OPENROUTER_API_KEY=your-key")} ${dim("# any model")}
|
|
123
|
+
|
|
124
|
+
${bold("Or save to config:")}
|
|
125
|
+
${cyan('wispy config set provider google --global')}
|
|
126
|
+
${cyan('wispy config set apiKey your-key --global')}
|
|
127
|
+
|
|
128
|
+
${bold("macOS Keychain (auto-detected):")}
|
|
129
|
+
${dim('security add-generic-password -s "google-ai-key" -a "poropo" -w "your-key"')}
|
|
130
|
+
`);
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
const detected = await detectProvider();
|
|
134
|
+
const PROVIDER = detected?.provider ?? "none";
|
|
135
|
+
const API_KEY = detected?.key ?? null;
|
|
136
|
+
const MODEL = detected?.model ?? "unknown";
|
|
137
|
+
const MAX_CONTEXT_CHARS = 40_000;
|
|
138
|
+
|
|
139
|
+
// ---------------------------------------------------------------------------
|
|
140
|
+
// Colors (minimal, no deps)
|
|
141
|
+
// ---------------------------------------------------------------------------
|
|
142
|
+
|
|
143
|
+
const dim = (s) => `\x1b[2m${s}\x1b[0m`;
|
|
144
|
+
const bold = (s) => `\x1b[1m${s}\x1b[0m`;
|
|
145
|
+
const green = (s) => `\x1b[32m${s}\x1b[0m`;
|
|
146
|
+
const cyan = (s) => `\x1b[36m${s}\x1b[0m`;
|
|
147
|
+
const yellow = (s) => `\x1b[33m${s}\x1b[0m`;
|
|
148
|
+
const red = (s) => `\x1b[31m${s}\x1b[0m`;
|
|
149
|
+
|
|
150
|
+
// ---------------------------------------------------------------------------
|
|
151
|
+
// File helpers
|
|
152
|
+
// ---------------------------------------------------------------------------
|
|
153
|
+
|
|
154
|
+
async function readFileOr(filePath, fallback = null) {
|
|
155
|
+
try { return await readFile(filePath, "utf8"); } catch { return fallback; }
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
async function loadWispyMd() {
|
|
159
|
+
const paths = [
|
|
160
|
+
path.resolve("WISPY.md"),
|
|
161
|
+
path.resolve(".wispy", "WISPY.md"),
|
|
162
|
+
path.join(WISPY_DIR, "WISPY.md"),
|
|
163
|
+
];
|
|
164
|
+
for (const p of paths) {
|
|
165
|
+
const content = await readFileOr(p);
|
|
166
|
+
if (content) return content.slice(0, MAX_CONTEXT_CHARS);
|
|
167
|
+
}
|
|
168
|
+
return null;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
async function loadMemories() {
|
|
172
|
+
const types = ["user", "feedback", "project", "references"];
|
|
173
|
+
const sections = [];
|
|
174
|
+
for (const type of types) {
|
|
175
|
+
const content = await readFileOr(path.join(MEMORY_DIR, `${type}.md`));
|
|
176
|
+
if (content?.trim()) {
|
|
177
|
+
sections.push(`## ${type} memory\n${content.trim()}`);
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
return sections.length ? sections.join("\n\n") : null;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
async function loadConversation() {
|
|
184
|
+
const raw = await readFileOr(HISTORY_FILE);
|
|
185
|
+
if (!raw) return [];
|
|
186
|
+
try { return JSON.parse(raw); } catch { return []; }
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
async function saveConversation(messages) {
|
|
190
|
+
await mkdir(CONVERSATIONS_DIR, { recursive: true });
|
|
191
|
+
// Keep last 50 messages to prevent unbounded growth
|
|
192
|
+
const trimmed = messages.slice(-50);
|
|
193
|
+
await writeFile(HISTORY_FILE, JSON.stringify(trimmed, null, 2) + "\n", "utf8");
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
async function listWorkstreams() {
|
|
197
|
+
try {
|
|
198
|
+
const { readdir } = await import("node:fs/promises");
|
|
199
|
+
const files = await readdir(CONVERSATIONS_DIR);
|
|
200
|
+
return files
|
|
201
|
+
.filter(f => f.endsWith(".json"))
|
|
202
|
+
.map(f => f.replace(".json", ""));
|
|
203
|
+
} catch { return []; }
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
async function loadWorkstreamConversation(wsName) {
|
|
207
|
+
try {
|
|
208
|
+
const wsPath = path.join(CONVERSATIONS_DIR, `${wsName}.json`);
|
|
209
|
+
const raw = await readFile(wsPath, "utf8");
|
|
210
|
+
return JSON.parse(raw);
|
|
211
|
+
} catch { return []; }
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
// ---------------------------------------------------------------------------
|
|
215
|
+
// Director mode — overview across all workstreams
|
|
216
|
+
// ---------------------------------------------------------------------------
|
|
217
|
+
|
|
218
|
+
async function showOverview() {
|
|
219
|
+
const wsList = await listWorkstreams();
|
|
220
|
+
if (wsList.length === 0) {
|
|
221
|
+
console.log(dim("No workstreams yet. Start one: wispy -w <name> \"message\""));
|
|
222
|
+
return;
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
console.log(`\n${bold("🌿 Wispy Director — All Workstreams")}\n`);
|
|
226
|
+
|
|
227
|
+
let totalMsgs = 0;
|
|
228
|
+
let totalToolCalls = 0;
|
|
229
|
+
const summaries = [];
|
|
230
|
+
|
|
231
|
+
for (const ws of wsList) {
|
|
232
|
+
const conv = await loadWorkstreamConversation(ws);
|
|
233
|
+
const userMsgs = conv.filter(m => m.role === "user");
|
|
234
|
+
const assistantMsgs = conv.filter(m => m.role === "assistant");
|
|
235
|
+
const toolResults = conv.filter(m => m.role === "tool_result");
|
|
236
|
+
const lastUser = userMsgs[userMsgs.length - 1];
|
|
237
|
+
const lastAssistant = assistantMsgs[assistantMsgs.length - 1];
|
|
238
|
+
|
|
239
|
+
totalMsgs += userMsgs.length;
|
|
240
|
+
totalToolCalls += toolResults.length;
|
|
241
|
+
|
|
242
|
+
const isActive = ws === ACTIVE_WORKSTREAM;
|
|
243
|
+
const marker = isActive ? green("● ") : " ";
|
|
244
|
+
const label = isActive ? green(ws) : ws;
|
|
245
|
+
|
|
246
|
+
console.log(`${marker}${bold(label)}`);
|
|
247
|
+
console.log(` Messages: ${userMsgs.length} user / ${assistantMsgs.length} assistant / ${toolResults.length} tool calls`);
|
|
248
|
+
if (lastUser) {
|
|
249
|
+
console.log(` Last request: ${dim(lastUser.content.slice(0, 60))}${lastUser.content.length > 60 ? "..." : ""}`);
|
|
250
|
+
}
|
|
251
|
+
if (lastAssistant) {
|
|
252
|
+
console.log(` Last response: ${dim(lastAssistant.content.slice(0, 60))}${lastAssistant.content.length > 60 ? "..." : ""}`);
|
|
253
|
+
}
|
|
254
|
+
console.log("");
|
|
255
|
+
|
|
256
|
+
summaries.push({ ws, userCount: userMsgs.length, toolCount: toolResults.length, lastMsg: lastUser?.content ?? "" });
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
console.log(dim(`─────────────────────────────────`));
|
|
260
|
+
console.log(` ${bold("Total")}: ${wsList.length} workstreams, ${totalMsgs} messages, ${totalToolCalls} tool calls`);
|
|
261
|
+
console.log(dim(` Active: ${ACTIVE_WORKSTREAM}`));
|
|
262
|
+
console.log(dim(` Switch: wispy -w <name>`));
|
|
263
|
+
console.log("");
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
async function searchAcrossWorkstreams(query) {
|
|
267
|
+
const wsList = await listWorkstreams();
|
|
268
|
+
const lowerQuery = query.toLowerCase();
|
|
269
|
+
let totalMatches = 0;
|
|
270
|
+
|
|
271
|
+
console.log(`\n${bold("🔍 Searching all workstreams for:")} ${cyan(query)}\n`);
|
|
272
|
+
|
|
273
|
+
for (const ws of wsList) {
|
|
274
|
+
const conv = await loadWorkstreamConversation(ws);
|
|
275
|
+
const matches = conv.filter(m =>
|
|
276
|
+
(m.role === "user" || m.role === "assistant") &&
|
|
277
|
+
m.content?.toLowerCase().includes(lowerQuery)
|
|
278
|
+
);
|
|
279
|
+
|
|
280
|
+
if (matches.length > 0) {
|
|
281
|
+
console.log(` ${bold(ws)} (${matches.length} matches):`);
|
|
282
|
+
for (const m of matches.slice(-3)) { // Show last 3 matches
|
|
283
|
+
const role = m.role === "user" ? "👤" : "🌿";
|
|
284
|
+
const preview = m.content.slice(0, 80).replace(/\n/g, " ");
|
|
285
|
+
console.log(` ${role} ${dim(preview)}${m.content.length > 80 ? "..." : ""}`);
|
|
286
|
+
}
|
|
287
|
+
console.log("");
|
|
288
|
+
totalMatches += matches.length;
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
if (totalMatches === 0) {
|
|
293
|
+
console.log(dim(` No matches found for "${query}"`));
|
|
294
|
+
} else {
|
|
295
|
+
console.log(dim(` ${totalMatches} total matches across ${wsList.length} workstreams`));
|
|
296
|
+
}
|
|
297
|
+
console.log("");
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
async function appendToMemory(type, entry) {
|
|
301
|
+
await mkdir(MEMORY_DIR, { recursive: true });
|
|
302
|
+
const ts = new Date().toISOString().slice(0, 16);
|
|
303
|
+
await appendFile(path.join(MEMORY_DIR, `${type}.md`), `\n- [${ts}] ${entry}\n`, "utf8");
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
// ---------------------------------------------------------------------------
|
|
307
|
+
// System prompt builder
|
|
308
|
+
// ---------------------------------------------------------------------------
|
|
309
|
+
|
|
310
|
+
// ---------------------------------------------------------------------------
|
|
311
|
+
// Token / cost tracking
|
|
312
|
+
// ---------------------------------------------------------------------------
|
|
313
|
+
|
|
314
|
+
let sessionTokens = { input: 0, output: 0 };
|
|
315
|
+
|
|
316
|
+
function estimateTokens(text) {
|
|
317
|
+
// Rough estimate: ~4 chars per token
|
|
318
|
+
return Math.ceil((text?.length ?? 0) / 4);
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
// Model pricing database (per 1M tokens)
|
|
322
|
+
const MODEL_PRICING = {
|
|
323
|
+
// Google
|
|
324
|
+
"gemini-2.5-flash": { input: 0.15, output: 0.60, tier: "cheap" },
|
|
325
|
+
"gemini-2.5-pro": { input: 1.25, output: 10.0, tier: "mid" },
|
|
326
|
+
"gemini-2.0-flash": { input: 0.10, output: 0.40, tier: "cheap" },
|
|
327
|
+
// Anthropic
|
|
328
|
+
"claude-sonnet-4-20250514": { input: 3.0, output: 15.0, tier: "mid" },
|
|
329
|
+
"claude-opus-4-6": { input: 15.0, output: 75.0, tier: "expensive" },
|
|
330
|
+
"claude-haiku-3.5": { input: 0.80, output: 4.0, tier: "cheap" },
|
|
331
|
+
// OpenAI
|
|
332
|
+
"gpt-4o": { input: 2.50, output: 10.0, tier: "mid" },
|
|
333
|
+
"gpt-4o-mini": { input: 0.15, output: 0.60, tier: "cheap" },
|
|
334
|
+
"gpt-4.1": { input: 2.0, output: 8.0, tier: "mid" },
|
|
335
|
+
"gpt-4.1-mini": { input: 0.40, output: 1.60, tier: "cheap" },
|
|
336
|
+
"gpt-4.1-nano": { input: 0.10, output: 0.40, tier: "cheap" },
|
|
337
|
+
"o4-mini": { input: 1.10, output: 4.40, tier: "mid" },
|
|
338
|
+
// OpenRouter (pass-through, estimate)
|
|
339
|
+
"anthropic/claude-sonnet-4-20250514": { input: 3.0, output: 15.0, tier: "mid" },
|
|
340
|
+
// Groq (fast, cheap)
|
|
341
|
+
"llama-3.3-70b-versatile": { input: 0.59, output: 0.79, tier: "cheap" },
|
|
342
|
+
// DeepSeek
|
|
343
|
+
"deepseek-chat": { input: 0.27, output: 1.10, tier: "cheap" },
|
|
344
|
+
// Ollama (free)
|
|
345
|
+
"llama3.2": { input: 0, output: 0, tier: "free" },
|
|
346
|
+
};
|
|
347
|
+
|
|
348
|
+
function getModelPricing(modelName) {
|
|
349
|
+
return MODEL_PRICING[modelName] ?? { input: 1.0, output: 3.0, tier: "unknown" };
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
function formatCost() {
|
|
353
|
+
const pricing = getModelPricing(MODEL);
|
|
354
|
+
const cost = (sessionTokens.input * pricing.input + sessionTokens.output * pricing.output) / 1_000_000;
|
|
355
|
+
return `${sessionTokens.input + sessionTokens.output} tokens (~$${cost.toFixed(4)})`;
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
// ---------------------------------------------------------------------------
|
|
359
|
+
// Task-aware model routing — pick cheapest model for the job
|
|
360
|
+
// ---------------------------------------------------------------------------
|
|
361
|
+
|
|
362
|
+
const TASK_MODEL_MAP = {
|
|
363
|
+
// Simple tasks → cheapest model
|
|
364
|
+
simple: { google: "gemini-2.5-flash", anthropic: "claude-haiku-3.5", openai: "gpt-4.1-nano", groq: "llama-3.3-70b-versatile" },
|
|
365
|
+
// Complex tasks → mid-tier
|
|
366
|
+
complex: { google: "gemini-2.5-pro", anthropic: "claude-sonnet-4-20250514", openai: "gpt-4o", groq: "llama-3.3-70b-versatile" },
|
|
367
|
+
// Critical tasks → best available
|
|
368
|
+
critical: { google: "gemini-2.5-pro", anthropic: "claude-opus-4-6", openai: "gpt-4o", groq: "llama-3.3-70b-versatile" },
|
|
369
|
+
};
|
|
370
|
+
|
|
371
|
+
function classifyTaskComplexity(prompt) {
|
|
372
|
+
const lower = prompt.toLowerCase();
|
|
373
|
+
|
|
374
|
+
// Critical: code review, architecture, security, debugging complex issues
|
|
375
|
+
if (/architect|security|review.*code|refactor|debug.*complex|design.*system/i.test(lower)) return "critical";
|
|
376
|
+
|
|
377
|
+
// Complex: code writing, analysis, multi-step reasoning
|
|
378
|
+
if (/write.*code|implement|analyze|compare|explain.*detail|create.*plan|build/i.test(lower)) return "complex";
|
|
379
|
+
|
|
380
|
+
// Simple: questions, formatting, translation, simple file ops
|
|
381
|
+
return "simple";
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
function getOptimalModel(prompt) {
|
|
385
|
+
// If user explicitly set a model, respect it
|
|
386
|
+
if (process.env.WISPY_MODEL) return process.env.WISPY_MODEL;
|
|
387
|
+
|
|
388
|
+
const complexity = classifyTaskComplexity(prompt);
|
|
389
|
+
const taskModels = TASK_MODEL_MAP[complexity];
|
|
390
|
+
return taskModels[PROVIDER] ?? MODEL;
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
// ---------------------------------------------------------------------------
|
|
394
|
+
// Budget management (per-workstream)
|
|
395
|
+
// ---------------------------------------------------------------------------
|
|
396
|
+
|
|
397
|
+
const BUDGET_FILE = path.join(WISPY_DIR, "budgets.json");
|
|
398
|
+
|
|
399
|
+
async function loadBudgets() {
|
|
400
|
+
try {
|
|
401
|
+
return JSON.parse(await readFile(BUDGET_FILE, "utf8"));
|
|
402
|
+
} catch { return {}; }
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
async function saveBudgets(budgets) {
|
|
406
|
+
await mkdir(WISPY_DIR, { recursive: true });
|
|
407
|
+
await writeFile(BUDGET_FILE, JSON.stringify(budgets, null, 2) + "\n", "utf8");
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
async function trackSpending(workstream, inputTokens, outputTokens, modelName) {
|
|
411
|
+
const budgets = await loadBudgets();
|
|
412
|
+
if (!budgets[workstream]) budgets[workstream] = { limitUsd: null, spentUsd: 0, totalTokens: 0 };
|
|
413
|
+
|
|
414
|
+
const pricing = getModelPricing(modelName);
|
|
415
|
+
const cost = (inputTokens * pricing.input + outputTokens * pricing.output) / 1_000_000;
|
|
416
|
+
budgets[workstream].spentUsd += cost;
|
|
417
|
+
budgets[workstream].totalTokens += inputTokens + outputTokens;
|
|
418
|
+
await saveBudgets(budgets);
|
|
419
|
+
|
|
420
|
+
// Check budget limit
|
|
421
|
+
if (budgets[workstream].limitUsd !== null && budgets[workstream].spentUsd > budgets[workstream].limitUsd) {
|
|
422
|
+
return { overBudget: true, spent: budgets[workstream].spentUsd, limit: budgets[workstream].limitUsd };
|
|
423
|
+
}
|
|
424
|
+
return { overBudget: false, spent: budgets[workstream].spentUsd };
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
// ---------------------------------------------------------------------------
|
|
428
|
+
// Context window optimization — compact messages to fit token budget
|
|
429
|
+
// ---------------------------------------------------------------------------
|
|
430
|
+
|
|
431
|
+
function estimateMessagesTokens(messages) {
|
|
432
|
+
return messages.reduce((sum, m) => sum + estimateTokens(m.content ?? JSON.stringify(m)), 0);
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
function optimizeContext(messages, maxTokens = 30_000) {
|
|
436
|
+
const total = estimateMessagesTokens(messages);
|
|
437
|
+
if (total <= maxTokens) return messages; // fits, no optimization needed
|
|
438
|
+
|
|
439
|
+
// Strategy: keep system prompt + last N messages, summarize old ones
|
|
440
|
+
const system = messages.filter(m => m.role === "system");
|
|
441
|
+
const rest = messages.filter(m => m.role !== "system");
|
|
442
|
+
|
|
443
|
+
// Keep removing oldest messages until we fit
|
|
444
|
+
let optimized = [...rest];
|
|
445
|
+
while (estimateMessagesTokens([...system, ...optimized]) > maxTokens && optimized.length > 4) {
|
|
446
|
+
optimized.shift(); // remove oldest
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
// If still too big, truncate message contents
|
|
450
|
+
if (estimateMessagesTokens([...system, ...optimized]) > maxTokens) {
|
|
451
|
+
optimized = optimized.map(m => ({
|
|
452
|
+
...m,
|
|
453
|
+
content: m.content ? m.content.slice(0, 2000) : m.content,
|
|
454
|
+
}));
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
return [...system, ...optimized];
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
// ---------------------------------------------------------------------------
|
|
461
|
+
// Tool definitions (Gemini function calling format)
|
|
462
|
+
// ---------------------------------------------------------------------------
|
|
463
|
+
|
|
464
|
+
const TOOL_DEFINITIONS = [
|
|
465
|
+
{
|
|
466
|
+
name: "read_file",
|
|
467
|
+
description: "Read the contents of a file at the given path",
|
|
468
|
+
parameters: {
|
|
469
|
+
type: "object",
|
|
470
|
+
properties: {
|
|
471
|
+
path: { type: "string", description: "File path to read" },
|
|
472
|
+
},
|
|
473
|
+
required: ["path"],
|
|
474
|
+
},
|
|
475
|
+
},
|
|
476
|
+
{
|
|
477
|
+
name: "write_file",
|
|
478
|
+
description: "Write content to a file, creating it if it doesn't exist",
|
|
479
|
+
parameters: {
|
|
480
|
+
type: "object",
|
|
481
|
+
properties: {
|
|
482
|
+
path: { type: "string", description: "File path to write" },
|
|
483
|
+
content: { type: "string", description: "Content to write" },
|
|
484
|
+
},
|
|
485
|
+
required: ["path", "content"],
|
|
486
|
+
},
|
|
487
|
+
},
|
|
488
|
+
{
|
|
489
|
+
name: "run_command",
|
|
490
|
+
description: "Execute a shell command and return stdout/stderr",
|
|
491
|
+
parameters: {
|
|
492
|
+
type: "object",
|
|
493
|
+
properties: {
|
|
494
|
+
command: { type: "string", description: "Shell command to execute" },
|
|
495
|
+
},
|
|
496
|
+
required: ["command"],
|
|
497
|
+
},
|
|
498
|
+
},
|
|
499
|
+
{
|
|
500
|
+
name: "list_directory",
|
|
501
|
+
description: "List files and directories at the given path",
|
|
502
|
+
parameters: {
|
|
503
|
+
type: "object",
|
|
504
|
+
properties: {
|
|
505
|
+
path: { type: "string", description: "Directory path (default: current dir)" },
|
|
506
|
+
},
|
|
507
|
+
required: [],
|
|
508
|
+
},
|
|
509
|
+
},
|
|
510
|
+
{
|
|
511
|
+
name: "web_search",
|
|
512
|
+
description: "Search the web and return results",
|
|
513
|
+
parameters: {
|
|
514
|
+
type: "object",
|
|
515
|
+
properties: {
|
|
516
|
+
query: { type: "string", description: "Search query" },
|
|
517
|
+
},
|
|
518
|
+
required: ["query"],
|
|
519
|
+
},
|
|
520
|
+
},
|
|
521
|
+
{
|
|
522
|
+
name: "spawn_agent",
|
|
523
|
+
description: "Spawn a sub-agent for a well-scoped task. Use for sidecar tasks that can run in parallel. Do NOT spawn for the immediate blocking step — do that yourself. Each agent gets its own context. Prefer concrete, bounded tasks with clear deliverables.",
|
|
524
|
+
parameters: {
|
|
525
|
+
type: "object",
|
|
526
|
+
properties: {
|
|
527
|
+
task: { type: "string", description: "Concrete task description for the sub-agent" },
|
|
528
|
+
role: {
|
|
529
|
+
type: "string",
|
|
530
|
+
enum: ["explorer", "planner", "worker", "reviewer"],
|
|
531
|
+
description: "explorer=codebase search, planner=strategy design, worker=implementation, reviewer=code review/QA",
|
|
532
|
+
},
|
|
533
|
+
model_tier: {
|
|
534
|
+
type: "string",
|
|
535
|
+
enum: ["cheap", "mid", "expensive"],
|
|
536
|
+
description: "cheap for simple tasks, mid for coding, expensive for critical analysis. Default: auto based on role",
|
|
537
|
+
},
|
|
538
|
+
fork_context: { type: "boolean", description: "If true, copy current conversation context to the sub-agent" },
|
|
539
|
+
},
|
|
540
|
+
required: ["task", "role"],
|
|
541
|
+
},
|
|
542
|
+
},
|
|
543
|
+
{
|
|
544
|
+
name: "list_agents",
|
|
545
|
+
description: "List all running/completed sub-agents and their status",
|
|
546
|
+
parameters: { type: "object", properties: {}, required: [] },
|
|
547
|
+
},
|
|
548
|
+
{
|
|
549
|
+
name: "get_agent_result",
|
|
550
|
+
description: "Get the result from a completed sub-agent",
|
|
551
|
+
parameters: {
|
|
552
|
+
type: "object",
|
|
553
|
+
properties: {
|
|
554
|
+
agent_id: { type: "string", description: "ID of the sub-agent" },
|
|
555
|
+
},
|
|
556
|
+
required: ["agent_id"],
|
|
557
|
+
},
|
|
558
|
+
},
|
|
559
|
+
{
|
|
560
|
+
name: "update_plan",
|
|
561
|
+
description: "Create or update a step-by-step plan for the current task. Use to track progress.",
|
|
562
|
+
parameters: {
|
|
563
|
+
type: "object",
|
|
564
|
+
properties: {
|
|
565
|
+
explanation: { type: "string", description: "Brief explanation of the plan" },
|
|
566
|
+
steps: {
|
|
567
|
+
type: "array",
|
|
568
|
+
items: {
|
|
569
|
+
type: "object",
|
|
570
|
+
properties: {
|
|
571
|
+
step: { type: "string" },
|
|
572
|
+
status: { type: "string", enum: ["pending", "in_progress", "completed", "skipped"] },
|
|
573
|
+
},
|
|
574
|
+
},
|
|
575
|
+
description: "List of plan steps with status",
|
|
576
|
+
},
|
|
577
|
+
},
|
|
578
|
+
required: ["steps"],
|
|
579
|
+
},
|
|
580
|
+
},
|
|
581
|
+
{
|
|
582
|
+
name: "pipeline",
|
|
583
|
+
description: "Run a sequential pipeline of agent roles. Each stage's output feeds into the next. Example: explore→planner→worker→reviewer. Use for complex multi-step tasks that need different specialists in sequence.",
|
|
584
|
+
parameters: {
|
|
585
|
+
type: "object",
|
|
586
|
+
properties: {
|
|
587
|
+
task: { type: "string", description: "The overall task to accomplish" },
|
|
588
|
+
stages: {
|
|
589
|
+
type: "array",
|
|
590
|
+
items: { type: "string", enum: ["explorer", "planner", "worker", "reviewer"] },
|
|
591
|
+
description: "Ordered list of agent roles to chain",
|
|
592
|
+
},
|
|
593
|
+
},
|
|
594
|
+
required: ["task", "stages"],
|
|
595
|
+
},
|
|
596
|
+
},
|
|
597
|
+
{
|
|
598
|
+
name: "spawn_async_agent",
|
|
599
|
+
description: "Spawn a sub-agent that runs in the background. Returns immediately with an agent_id. Check results later with get_agent_result. Use for sidecar tasks while you continue working on the main task.",
|
|
600
|
+
parameters: {
|
|
601
|
+
type: "object",
|
|
602
|
+
properties: {
|
|
603
|
+
task: { type: "string", description: "Task for the background agent" },
|
|
604
|
+
role: { type: "string", enum: ["explorer", "planner", "worker", "reviewer"], description: "Agent role" },
|
|
605
|
+
},
|
|
606
|
+
required: ["task", "role"],
|
|
607
|
+
},
|
|
608
|
+
},
|
|
609
|
+
{
|
|
610
|
+
name: "ralph_loop",
|
|
611
|
+
description: "Persistence mode — keep retrying a task until it's verified complete. The worker agent executes, then a reviewer verifies. If not done, worker tries again. Max 5 iterations. Use for tasks that MUST be completed correctly.",
|
|
612
|
+
parameters: {
|
|
613
|
+
type: "object",
|
|
614
|
+
properties: {
|
|
615
|
+
task: { type: "string", description: "Task that must be completed" },
|
|
616
|
+
success_criteria: { type: "string", description: "How to verify the task is truly done" },
|
|
617
|
+
},
|
|
618
|
+
required: ["task"],
|
|
619
|
+
},
|
|
620
|
+
},
|
|
621
|
+
];
|
|
622
|
+
|
|
623
|
+
// ---------------------------------------------------------------------------
|
|
624
|
+
// Tool execution
|
|
625
|
+
// ---------------------------------------------------------------------------
|
|
626
|
+
|
|
627
|
+
// Try server API first, fallback to local execution
|
|
628
|
+
async function executeToolViaServer(name, args) {
|
|
629
|
+
try {
|
|
630
|
+
const serverUrl = `http://127.0.0.1:${DEFAULT_SERVER_PORT}`;
|
|
631
|
+
|
|
632
|
+
if (name === "read_file") {
|
|
633
|
+
const resp = await fetch(`${serverUrl}/api/node-filesystem-actions`, {
|
|
634
|
+
method: "POST",
|
|
635
|
+
headers: { "Content-Type": "application/json" },
|
|
636
|
+
body: JSON.stringify({ subAction: "read_file", path: args.path }),
|
|
637
|
+
signal: AbortSignal.timeout(10_000),
|
|
638
|
+
});
|
|
639
|
+
const data = await resp.json();
|
|
640
|
+
if (data.success) return { success: true, content: data.data?.slice(0, 10_000) ?? "" };
|
|
641
|
+
// Fallback to local if server rejects path
|
|
642
|
+
return null;
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
if (name === "write_file") {
|
|
646
|
+
const resp = await fetch(`${serverUrl}/api/node-filesystem-actions`, {
|
|
647
|
+
method: "POST",
|
|
648
|
+
headers: { "Content-Type": "application/json" },
|
|
649
|
+
body: JSON.stringify({ subAction: "write_file", path: args.path, content: args.content }),
|
|
650
|
+
signal: AbortSignal.timeout(10_000),
|
|
651
|
+
});
|
|
652
|
+
const data = await resp.json();
|
|
653
|
+
if (data.success) return { success: true, message: `Written to ${args.path} (via server)` };
|
|
654
|
+
return null;
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
if (name === "list_directory") {
|
|
658
|
+
const resp = await fetch(`${serverUrl}/api/node-filesystem-actions`, {
|
|
659
|
+
method: "POST",
|
|
660
|
+
headers: { "Content-Type": "application/json" },
|
|
661
|
+
body: JSON.stringify({ subAction: "list_dir", path: args.path || "." }),
|
|
662
|
+
signal: AbortSignal.timeout(10_000),
|
|
663
|
+
});
|
|
664
|
+
const data = await resp.json();
|
|
665
|
+
if (data.success && data.entries) {
|
|
666
|
+
const listing = data.entries.map(e => `${e.isDir ? "📁" : "📄"} ${e.name}`).join("\n");
|
|
667
|
+
return { success: true, listing };
|
|
668
|
+
}
|
|
669
|
+
return null;
|
|
670
|
+
}
|
|
671
|
+
} catch {
|
|
672
|
+
// Server not available, fallback to local
|
|
673
|
+
return null;
|
|
674
|
+
}
|
|
675
|
+
return null; // Not handled by server
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
async function executeTool(name, args) {
|
|
679
|
+
// Try server first (sandboxed execution)
|
|
680
|
+
const serverResult = await executeToolViaServer(name, args);
|
|
681
|
+
if (serverResult) return serverResult;
|
|
682
|
+
|
|
683
|
+
const { execFile } = await import("node:child_process");
|
|
684
|
+
const { promisify } = await import("node:util");
|
|
685
|
+
const execAsync = promisify(execFile);
|
|
686
|
+
|
|
687
|
+
try {
|
|
688
|
+
switch (name) {
|
|
689
|
+
case "read_file": {
|
|
690
|
+
const filePath = args.path.replace(/^~/, os.homedir());
|
|
691
|
+
const content = await readFile(filePath, "utf8");
|
|
692
|
+
// Truncate large files
|
|
693
|
+
const truncated = content.length > 10_000
|
|
694
|
+
? content.slice(0, 10_000) + `\n\n... (truncated, ${content.length} chars total)`
|
|
695
|
+
: content;
|
|
696
|
+
return { success: true, content: truncated };
|
|
697
|
+
}
|
|
698
|
+
|
|
699
|
+
case "write_file": {
|
|
700
|
+
args.path = args.path.replace(/^~/, os.homedir());
|
|
701
|
+
const dir = path.dirname(args.path);
|
|
702
|
+
await mkdir(dir, { recursive: true });
|
|
703
|
+
await writeFile(args.path, args.content, "utf8");
|
|
704
|
+
return { success: true, message: `Written ${args.content.length} chars to ${args.path}` };
|
|
705
|
+
}
|
|
706
|
+
|
|
707
|
+
case "run_command": {
|
|
708
|
+
console.log(dim(` $ ${args.command}`));
|
|
709
|
+
const { stdout, stderr } = await execAsync("/bin/bash", ["-c", args.command], {
|
|
710
|
+
timeout: 30_000,
|
|
711
|
+
maxBuffer: 1024 * 1024,
|
|
712
|
+
cwd: process.cwd(),
|
|
713
|
+
});
|
|
714
|
+
const result = (stdout + (stderr ? `\nSTDERR: ${stderr}` : "")).trim();
|
|
715
|
+
const truncated = result.length > 5_000
|
|
716
|
+
? result.slice(0, 5_000) + "\n... (truncated)"
|
|
717
|
+
: result;
|
|
718
|
+
return { success: true, output: truncated };
|
|
719
|
+
}
|
|
720
|
+
|
|
721
|
+
case "list_directory": {
|
|
722
|
+
const { readdir } = await import("node:fs/promises");
|
|
723
|
+
const targetPath = (args.path || ".").replace(/^~/, os.homedir());
|
|
724
|
+
const entries = await readdir(targetPath, { withFileTypes: true });
|
|
725
|
+
const list = entries.map(e => `${e.isDirectory() ? "📁" : "📄"} ${e.name}`).join("\n");
|
|
726
|
+
return { success: true, listing: list };
|
|
727
|
+
}
|
|
728
|
+
|
|
729
|
+
case "web_search": {
|
|
730
|
+
const { promisify } = await import("node:util");
|
|
731
|
+
const { execFile: ef } = await import("node:child_process");
|
|
732
|
+
const execP = promisify(ef);
|
|
733
|
+
|
|
734
|
+
// Try DuckDuckGo Lite first (lighter HTML, easier to parse)
|
|
735
|
+
const encoded = encodeURIComponent(args.query);
|
|
736
|
+
try {
|
|
737
|
+
const { stdout: html } = await execP("/usr/bin/curl", [
|
|
738
|
+
"-sL", "--max-time", "10",
|
|
739
|
+
"-H", "User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)",
|
|
740
|
+
`https://lite.duckduckgo.com/lite/?q=${encoded}`,
|
|
741
|
+
], { timeout: 15_000 });
|
|
742
|
+
|
|
743
|
+
// Parse DuckDuckGo Lite results
|
|
744
|
+
const snippets = [];
|
|
745
|
+
// Match result links and snippets
|
|
746
|
+
const linkRegex = /<a[^>]*class="result-link"[^>]*>(.*?)<\/a>/gs;
|
|
747
|
+
const snippetRegex = /<td class="result-snippet">(.*?)<\/td>/gs;
|
|
748
|
+
|
|
749
|
+
const links = [];
|
|
750
|
+
let m;
|
|
751
|
+
while ((m = linkRegex.exec(html)) !== null) links.push(m[1].replace(/<[^>]+>/g, "").trim());
|
|
752
|
+
|
|
753
|
+
const snips = [];
|
|
754
|
+
while ((m = snippetRegex.exec(html)) !== null) snips.push(m[1].replace(/<[^>]+>/g, "").trim());
|
|
755
|
+
|
|
756
|
+
for (let i = 0; i < Math.min(links.length, 5); i++) {
|
|
757
|
+
const snippet = snips[i] ? `${links[i]}\n${snips[i]}` : links[i];
|
|
758
|
+
if (snippet) snippets.push(snippet);
|
|
759
|
+
}
|
|
760
|
+
|
|
761
|
+
if (snippets.length > 0) {
|
|
762
|
+
return { success: true, results: snippets.join("\n\n") };
|
|
763
|
+
}
|
|
764
|
+
|
|
765
|
+
// Fallback: extract any text content from result cells
|
|
766
|
+
const cellRegex = /<td[^>]*>(.*?)<\/td>/gs;
|
|
767
|
+
const cells = [];
|
|
768
|
+
while ((m = cellRegex.exec(html)) !== null && cells.length < 10) {
|
|
769
|
+
const text = m[1].replace(/<[^>]+>/g, "").trim();
|
|
770
|
+
if (text.length > 20) cells.push(text);
|
|
771
|
+
}
|
|
772
|
+
if (cells.length > 0) {
|
|
773
|
+
return { success: true, results: cells.slice(0, 5).join("\n\n") };
|
|
774
|
+
}
|
|
775
|
+
} catch { /* fallback below */ }
|
|
776
|
+
|
|
777
|
+
// Fallback: use run_command with curl to a simple search API
|
|
778
|
+
return {
|
|
779
|
+
success: true,
|
|
780
|
+
results: `Search for "${args.query}" — try using run_command with: curl -s "https://api.duckduckgo.com/?q=${encoded}&format=json&no_html=1"`,
|
|
781
|
+
};
|
|
782
|
+
}
|
|
783
|
+
|
|
784
|
+
case "spawn_agent": {
|
|
785
|
+
const role = args.role ?? "worker";
|
|
786
|
+
const tierMap = { explorer: "cheap", planner: "mid", worker: "mid", reviewer: "mid" };
|
|
787
|
+
const tier = args.model_tier ?? tierMap[role] ?? "mid";
|
|
788
|
+
const modelForTier = TASK_MODEL_MAP[tier === "cheap" ? "simple" : tier === "expensive" ? "critical" : "complex"];
|
|
789
|
+
const agentModel = modelForTier?.[PROVIDER] ?? MODEL;
|
|
790
|
+
|
|
791
|
+
const agentId = `agent-${Date.now().toString(36)}-${role}`;
|
|
792
|
+
const agentsFile = path.join(WISPY_DIR, "agents.json");
|
|
793
|
+
let agents = [];
|
|
794
|
+
try { agents = JSON.parse(await readFile(agentsFile, "utf8")); } catch {}
|
|
795
|
+
|
|
796
|
+
const agent = {
|
|
797
|
+
id: agentId, role, task: args.task, model: agentModel,
|
|
798
|
+
status: "running", createdAt: new Date().toISOString(),
|
|
799
|
+
workstream: ACTIVE_WORKSTREAM, result: null,
|
|
800
|
+
};
|
|
801
|
+
|
|
802
|
+
console.log(dim(` 🤖 Spawning ${role} agent (${agentModel})...`));
|
|
803
|
+
|
|
804
|
+
// Run sub-agent — single-turn LLM call with the task
|
|
805
|
+
try {
|
|
806
|
+
const agentSystemPrompt = `You are a ${role} sub-agent for Wispy. Your role:
|
|
807
|
+
${role === "explorer" ? "Search and analyze codebases, find relevant files and patterns." : ""}
|
|
808
|
+
${role === "planner" ? "Design implementation strategies and create step-by-step plans." : ""}
|
|
809
|
+
${role === "worker" ? "Implement code changes, write files, execute commands." : ""}
|
|
810
|
+
${role === "reviewer" ? "Review code for bugs, security issues, and best practices." : ""}
|
|
811
|
+
Be concise and deliver actionable results. Respond in the same language as the task.`;
|
|
812
|
+
|
|
813
|
+
const agentMessages = [
|
|
814
|
+
{ role: "system", content: agentSystemPrompt },
|
|
815
|
+
];
|
|
816
|
+
|
|
817
|
+
// Fork context if requested
|
|
818
|
+
if (args.fork_context) {
|
|
819
|
+
const parentContext = await loadConversation();
|
|
820
|
+
const recentContext = parentContext.filter(m => m.role === "user" || m.role === "assistant").slice(-6);
|
|
821
|
+
for (const m of recentContext) {
|
|
822
|
+
agentMessages.push({ role: m.role, content: m.content });
|
|
823
|
+
}
|
|
824
|
+
}
|
|
825
|
+
|
|
826
|
+
agentMessages.push({ role: "user", content: args.task });
|
|
827
|
+
|
|
828
|
+
const agentResult = await chatWithTools(agentMessages, null);
|
|
829
|
+
agent.result = agentResult.type === "text" ? agentResult.text : JSON.stringify(agentResult);
|
|
830
|
+
agent.status = "completed";
|
|
831
|
+
agent.completedAt = new Date().toISOString();
|
|
832
|
+
} catch (err) {
|
|
833
|
+
agent.result = `Error: ${err.message}`;
|
|
834
|
+
agent.status = "failed";
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
agents.push(agent);
|
|
838
|
+
// Keep last 50 agents
|
|
839
|
+
if (agents.length > 50) agents = agents.slice(-50);
|
|
840
|
+
await mkdir(WISPY_DIR, { recursive: true });
|
|
841
|
+
await writeFile(agentsFile, JSON.stringify(agents, null, 2) + "\n", "utf8");
|
|
842
|
+
|
|
843
|
+
return {
|
|
844
|
+
success: true,
|
|
845
|
+
agent_id: agentId,
|
|
846
|
+
role,
|
|
847
|
+
model: agentModel,
|
|
848
|
+
status: agent.status,
|
|
849
|
+
result_preview: agent.result?.slice(0, 200),
|
|
850
|
+
};
|
|
851
|
+
}
|
|
852
|
+
|
|
853
|
+
case "list_agents": {
|
|
854
|
+
const agentsFile = path.join(WISPY_DIR, "agents.json");
|
|
855
|
+
let agents = [];
|
|
856
|
+
try { agents = JSON.parse(await readFile(agentsFile, "utf8")); } catch {}
|
|
857
|
+
const wsAgents = agents.filter(a => a.workstream === ACTIVE_WORKSTREAM);
|
|
858
|
+
return {
|
|
859
|
+
success: true,
|
|
860
|
+
agents: wsAgents.map(a => ({
|
|
861
|
+
id: a.id, role: a.role, status: a.status,
|
|
862
|
+
task: a.task.slice(0, 60),
|
|
863
|
+
model: a.model,
|
|
864
|
+
createdAt: a.createdAt,
|
|
865
|
+
})),
|
|
866
|
+
};
|
|
867
|
+
}
|
|
868
|
+
|
|
869
|
+
case "get_agent_result": {
|
|
870
|
+
const agentsFile = path.join(WISPY_DIR, "agents.json");
|
|
871
|
+
let agents = [];
|
|
872
|
+
try { agents = JSON.parse(await readFile(agentsFile, "utf8")); } catch {}
|
|
873
|
+
const found = agents.find(a => a.id === args.agent_id);
|
|
874
|
+
if (!found) return { success: false, error: `Agent not found: ${args.agent_id}` };
|
|
875
|
+
return { success: true, id: found.id, role: found.role, status: found.status, result: found.result };
|
|
876
|
+
}
|
|
877
|
+
|
|
878
|
+
case "update_plan": {
|
|
879
|
+
const planFile = path.join(CONVERSATIONS_DIR, `${ACTIVE_WORKSTREAM}.plan.json`);
|
|
880
|
+
const plan = { explanation: args.explanation, steps: args.steps, updatedAt: new Date().toISOString() };
|
|
881
|
+
await mkdir(CONVERSATIONS_DIR, { recursive: true });
|
|
882
|
+
await writeFile(planFile, JSON.stringify(plan, null, 2) + "\n", "utf8");
|
|
883
|
+
// Pretty print plan
|
|
884
|
+
if (args.steps) {
|
|
885
|
+
for (const s of args.steps) {
|
|
886
|
+
const icon = s.status === "completed" ? "✅" : s.status === "in_progress" ? "🔄" : s.status === "skipped" ? "⏭️" : "⬜";
|
|
887
|
+
console.log(dim(` ${icon} ${s.step}`));
|
|
888
|
+
}
|
|
889
|
+
}
|
|
890
|
+
return { success: true, message: "Plan updated" };
|
|
891
|
+
}
|
|
892
|
+
|
|
893
|
+
case "pipeline": {
|
|
894
|
+
const stages = args.stages ?? ["explorer", "planner", "worker"];
|
|
895
|
+
let stageInput = args.task;
|
|
896
|
+
const results = [];
|
|
897
|
+
|
|
898
|
+
console.log(dim(` 📋 Pipeline: ${stages.join(" → ")}`));
|
|
899
|
+
|
|
900
|
+
for (let i = 0; i < stages.length; i++) {
|
|
901
|
+
const role = stages[i];
|
|
902
|
+
const icon = { explorer: "🔍", planner: "📋", worker: "🔨", reviewer: "🔎" }[role] ?? "🤖";
|
|
903
|
+
console.log(dim(`\n ${icon} Stage ${i + 1}/${stages.length}: ${role}`));
|
|
904
|
+
|
|
905
|
+
// Build stage prompt with previous stage output
|
|
906
|
+
const stagePrompt = i === 0
|
|
907
|
+
? stageInput
|
|
908
|
+
: `Previous stage (${stages[i-1]}) output:\n${results[i-1].slice(0, 3000)}\n\nYour task as ${role}: ${args.task}`;
|
|
909
|
+
|
|
910
|
+
const stageSystem = `You are a ${role} agent in a pipeline. Stage ${i + 1} of ${stages.length}.
|
|
911
|
+
${role === "explorer" ? "Find relevant files, patterns, and information." : ""}
|
|
912
|
+
${role === "planner" ? "Design a concrete implementation plan based on the exploration results." : ""}
|
|
913
|
+
${role === "worker" ? "Implement the plan. Write code, create files, run commands." : ""}
|
|
914
|
+
${role === "reviewer" ? "Review the implementation. Check for bugs, security issues, completeness." : ""}
|
|
915
|
+
Be concise. Your output feeds into the next stage.`;
|
|
916
|
+
|
|
917
|
+
const stageMessages = [
|
|
918
|
+
{ role: "system", content: stageSystem },
|
|
919
|
+
{ role: "user", content: stagePrompt },
|
|
920
|
+
];
|
|
921
|
+
|
|
922
|
+
try {
|
|
923
|
+
const result = await chatWithTools(stageMessages, null);
|
|
924
|
+
const output = result.type === "text" ? result.text : JSON.stringify(result);
|
|
925
|
+
results.push(output);
|
|
926
|
+
console.log(dim(` ✅ ${output.slice(0, 100)}...`));
|
|
927
|
+
stageInput = output;
|
|
928
|
+
} catch (err) {
|
|
929
|
+
results.push(`Error: ${err.message}`);
|
|
930
|
+
console.log(red(` ❌ ${err.message.slice(0, 100)}`));
|
|
931
|
+
break;
|
|
932
|
+
}
|
|
933
|
+
}
|
|
934
|
+
|
|
935
|
+
return {
|
|
936
|
+
success: true,
|
|
937
|
+
stages: stages.map((role, i) => ({ role, output: results[i]?.slice(0, 500) ?? "skipped" })),
|
|
938
|
+
final_output: results[results.length - 1]?.slice(0, 1000),
|
|
939
|
+
};
|
|
940
|
+
}
|
|
941
|
+
|
|
942
|
+
case "spawn_async_agent": {
|
|
943
|
+
const role = args.role ?? "worker";
|
|
944
|
+
const agentId = `async-${Date.now().toString(36)}-${role}`;
|
|
945
|
+
const agentsFile = path.join(WISPY_DIR, "agents.json");
|
|
946
|
+
let agents = [];
|
|
947
|
+
try { agents = JSON.parse(await readFile(agentsFile, "utf8")); } catch {}
|
|
948
|
+
|
|
949
|
+
const agent = {
|
|
950
|
+
id: agentId, role, task: args.task,
|
|
951
|
+
status: "running", async: true,
|
|
952
|
+
createdAt: new Date().toISOString(),
|
|
953
|
+
workstream: ACTIVE_WORKSTREAM, result: null,
|
|
954
|
+
};
|
|
955
|
+
|
|
956
|
+
// Save as "running" immediately
|
|
957
|
+
agents.push(agent);
|
|
958
|
+
if (agents.length > 50) agents = agents.slice(-50);
|
|
959
|
+
await mkdir(WISPY_DIR, { recursive: true });
|
|
960
|
+
await writeFile(agentsFile, JSON.stringify(agents, null, 2) + "\n", "utf8");
|
|
961
|
+
|
|
962
|
+
console.log(dim(` 🤖 Async agent ${agentId} launched in background`));
|
|
963
|
+
|
|
964
|
+
// Fire and forget — run in background
|
|
965
|
+
(async () => {
|
|
966
|
+
const tierMap = { explorer: "cheap", planner: "mid", worker: "mid", reviewer: "mid" };
|
|
967
|
+
const tier = tierMap[role] ?? "mid";
|
|
968
|
+
const modelForTier = TASK_MODEL_MAP[tier === "cheap" ? "simple" : "complex"];
|
|
969
|
+
const agentModel = modelForTier?.[PROVIDER] ?? MODEL;
|
|
970
|
+
|
|
971
|
+
const agentSystem = `You are a ${role} sub-agent. Be concise and actionable.`;
|
|
972
|
+
const agentMessages = [
|
|
973
|
+
{ role: "system", content: agentSystem },
|
|
974
|
+
{ role: "user", content: args.task },
|
|
975
|
+
];
|
|
976
|
+
|
|
977
|
+
try {
|
|
978
|
+
const result = await chatWithTools(agentMessages, null);
|
|
979
|
+
agent.result = result.type === "text" ? result.text : JSON.stringify(result);
|
|
980
|
+
agent.status = "completed";
|
|
981
|
+
} catch (err) {
|
|
982
|
+
agent.result = `Error: ${err.message}`;
|
|
983
|
+
agent.status = "failed";
|
|
984
|
+
}
|
|
985
|
+
agent.completedAt = new Date().toISOString();
|
|
986
|
+
|
|
987
|
+
// Update agents file
|
|
988
|
+
let currentAgents = [];
|
|
989
|
+
try { currentAgents = JSON.parse(await readFile(agentsFile, "utf8")); } catch {}
|
|
990
|
+
const idx = currentAgents.findIndex(a => a.id === agentId);
|
|
991
|
+
if (idx !== -1) currentAgents[idx] = agent;
|
|
992
|
+
await writeFile(agentsFile, JSON.stringify(currentAgents, null, 2) + "\n", "utf8");
|
|
993
|
+
})();
|
|
994
|
+
|
|
995
|
+
return {
|
|
996
|
+
success: true,
|
|
997
|
+
agent_id: agentId,
|
|
998
|
+
role,
|
|
999
|
+
status: "running",
|
|
1000
|
+
message: "Agent launched in background. Use get_agent_result to check when done.",
|
|
1001
|
+
};
|
|
1002
|
+
}
|
|
1003
|
+
|
|
1004
|
+
case "ralph_loop": {
|
|
1005
|
+
const MAX_ITERATIONS = 5;
|
|
1006
|
+
const criteria = args.success_criteria ?? "Task is fully completed and verified";
|
|
1007
|
+
let lastResult = "";
|
|
1008
|
+
|
|
1009
|
+
console.log(dim(` 🪨 Ralph mode: will retry up to ${MAX_ITERATIONS} times until verified complete`));
|
|
1010
|
+
|
|
1011
|
+
for (let attempt = 1; attempt <= MAX_ITERATIONS; attempt++) {
|
|
1012
|
+
// Worker attempt
|
|
1013
|
+
console.log(dim(`\n 🔨 Attempt ${attempt}/${MAX_ITERATIONS}: worker executing...`));
|
|
1014
|
+
|
|
1015
|
+
const workerPrompt = attempt === 1
|
|
1016
|
+
? args.task
|
|
1017
|
+
: `Previous attempt output:\n${lastResult.slice(0, 2000)}\n\nThe reviewer said this is NOT complete yet. Try again.\nTask: ${args.task}\nSuccess criteria: ${criteria}`;
|
|
1018
|
+
|
|
1019
|
+
const workerMessages = [
|
|
1020
|
+
{ role: "system", content: "You are a worker agent. Execute the task thoroughly. Do not stop until the task is fully done." },
|
|
1021
|
+
{ role: "user", content: workerPrompt },
|
|
1022
|
+
];
|
|
1023
|
+
|
|
1024
|
+
try {
|
|
1025
|
+
const workerResult = await chatWithTools(workerMessages, null);
|
|
1026
|
+
lastResult = workerResult.type === "text" ? workerResult.text : JSON.stringify(workerResult);
|
|
1027
|
+
console.log(dim(` ✅ Worker output: ${lastResult.slice(0, 100)}...`));
|
|
1028
|
+
} catch (err) {
|
|
1029
|
+
console.log(red(` ❌ Worker error: ${err.message.slice(0, 100)}`));
|
|
1030
|
+
continue;
|
|
1031
|
+
}
|
|
1032
|
+
|
|
1033
|
+
// Reviewer verification
|
|
1034
|
+
console.log(dim(` 🔎 Reviewer verifying...`));
|
|
1035
|
+
|
|
1036
|
+
const reviewerMessages = [
|
|
1037
|
+
{ role: "system", content: "You are a reviewer agent. Your ONLY job is to determine if the task is TRULY complete. Reply with JSON: {\"complete\": true/false, \"reason\": \"why\"}" },
|
|
1038
|
+
{ role: "user", content: `Task: ${args.task}\nSuccess criteria: ${criteria}\n\nWorker output:\n${lastResult.slice(0, 3000)}\n\nIs this task TRULY complete? Reply with JSON only.` },
|
|
1039
|
+
];
|
|
1040
|
+
|
|
1041
|
+
try {
|
|
1042
|
+
const reviewResult = await chatWithTools(reviewerMessages, null);
|
|
1043
|
+
const reviewText = reviewResult.type === "text" ? reviewResult.text : "";
|
|
1044
|
+
|
|
1045
|
+
// Try to parse JSON from review
|
|
1046
|
+
const jsonMatch = reviewText.match(/\{[\s\S]*"complete"[\s\S]*\}/);
|
|
1047
|
+
if (jsonMatch) {
|
|
1048
|
+
try {
|
|
1049
|
+
const verdict = JSON.parse(jsonMatch[0]);
|
|
1050
|
+
if (verdict.complete) {
|
|
1051
|
+
console.log(green(` ✅ Reviewer: COMPLETE — ${verdict.reason?.slice(0, 80) ?? "verified"}`));
|
|
1052
|
+
return { success: true, iterations: attempt, result: lastResult, verified: true };
|
|
1053
|
+
}
|
|
1054
|
+
console.log(yellow(` ⏳ Reviewer: NOT COMPLETE — ${verdict.reason?.slice(0, 80) ?? "needs more work"}`));
|
|
1055
|
+
} catch { /* parse failed, continue */ }
|
|
1056
|
+
}
|
|
1057
|
+
} catch (err) {
|
|
1058
|
+
console.log(dim(` ⚠️ Review error: ${err.message.slice(0, 80)}`));
|
|
1059
|
+
}
|
|
1060
|
+
}
|
|
1061
|
+
|
|
1062
|
+
// Max iterations reached
|
|
1063
|
+
console.log(yellow(` 🪨 Ralph: max iterations (${MAX_ITERATIONS}) reached`));
|
|
1064
|
+
return { success: true, iterations: MAX_ITERATIONS, result: lastResult, verified: false, message: "Max iterations reached" };
|
|
1065
|
+
}
|
|
1066
|
+
|
|
1067
|
+
default:
|
|
1068
|
+
return { success: false, error: `Unknown tool: ${name}` };
|
|
1069
|
+
}
|
|
1070
|
+
} catch (err) {
|
|
1071
|
+
return { success: false, error: err.message };
|
|
1072
|
+
}
|
|
1073
|
+
}
|
|
1074
|
+
|
|
1075
|
+
// ---------------------------------------------------------------------------
|
|
1076
|
+
// System prompt builder
|
|
1077
|
+
// ---------------------------------------------------------------------------
|
|
1078
|
+
|
|
1079
|
+
// ---------------------------------------------------------------------------
|
|
1080
|
+
// work.md — per-workstream context file
|
|
1081
|
+
// ---------------------------------------------------------------------------
|
|
1082
|
+
|
|
1083
|
+
async function loadWorkMd() {
|
|
1084
|
+
const searchPaths = [
|
|
1085
|
+
path.join(CONVERSATIONS_DIR, `${ACTIVE_WORKSTREAM}.work.md`),
|
|
1086
|
+
path.resolve(`.wispy/${ACTIVE_WORKSTREAM}.work.md`),
|
|
1087
|
+
path.resolve(`work.md`), // project root fallback
|
|
1088
|
+
];
|
|
1089
|
+
for (const p of searchPaths) {
|
|
1090
|
+
const content = await readFileOr(p);
|
|
1091
|
+
if (content) return { path: p, content: content.slice(0, 20_000) };
|
|
1092
|
+
}
|
|
1093
|
+
return null;
|
|
1094
|
+
}
|
|
1095
|
+
|
|
1096
|
+
async function buildSystemPrompt() {
|
|
1097
|
+
const parts = [
|
|
1098
|
+
"You are Wispy 🌿, a personal AI workspace assistant.",
|
|
1099
|
+
"You are friendly, concise, and action-oriented.",
|
|
1100
|
+
"Respond in the same language the user uses.",
|
|
1101
|
+
"If the user speaks Korean, respond in Korean with casual tone (반말).",
|
|
1102
|
+
"",
|
|
1103
|
+
"You have access to tools: read_file, write_file, run_command, list_directory, web_search, spawn_agent, spawn_async_agent, pipeline, ralph_loop, update_plan, list_agents, get_agent_result.",
|
|
1104
|
+
"Use them proactively when the user asks you to do something that requires file access, commands, or web info.",
|
|
1105
|
+
"When using tools, briefly tell the user what you're doing.",
|
|
1106
|
+
"",
|
|
1107
|
+
];
|
|
1108
|
+
|
|
1109
|
+
const wispyMd = await loadWispyMd();
|
|
1110
|
+
if (wispyMd) {
|
|
1111
|
+
parts.push("## Project Context (WISPY.md)");
|
|
1112
|
+
parts.push(wispyMd);
|
|
1113
|
+
parts.push("");
|
|
1114
|
+
}
|
|
1115
|
+
|
|
1116
|
+
// Per-workstream context
|
|
1117
|
+
const workMd = await loadWorkMd();
|
|
1118
|
+
if (workMd) {
|
|
1119
|
+
parts.push(`## Workstream Context (${ACTIVE_WORKSTREAM})`);
|
|
1120
|
+
parts.push(workMd.content);
|
|
1121
|
+
parts.push("");
|
|
1122
|
+
}
|
|
1123
|
+
|
|
1124
|
+
const memories = await loadMemories();
|
|
1125
|
+
if (memories) {
|
|
1126
|
+
parts.push("## Persistent Memory");
|
|
1127
|
+
parts.push(memories);
|
|
1128
|
+
parts.push("");
|
|
1129
|
+
}
|
|
1130
|
+
|
|
1131
|
+
return parts.join("\n");
|
|
1132
|
+
}
|
|
1133
|
+
|
|
1134
|
+
// ---------------------------------------------------------------------------
|
|
1135
|
+
// OpenAI API (streaming)
|
|
1136
|
+
// ---------------------------------------------------------------------------
|
|
1137
|
+
|
|
1138
|
+
// ---------------------------------------------------------------------------
|
|
1139
|
+
// Gemini API with function calling (non-streaming for tool calls, streaming for text)
|
|
1140
|
+
// ---------------------------------------------------------------------------
|
|
1141
|
+
|
|
1142
|
+
// OpenAI-compatible API endpoints for various providers
|
|
1143
|
+
const OPENAI_COMPAT_ENDPOINTS = {
|
|
1144
|
+
openai: "https://api.openai.com/v1/chat/completions",
|
|
1145
|
+
openrouter: "https://openrouter.ai/api/v1/chat/completions",
|
|
1146
|
+
groq: "https://api.groq.com/openai/v1/chat/completions",
|
|
1147
|
+
deepseek: "https://api.deepseek.com/v1/chat/completions",
|
|
1148
|
+
ollama: `${process.env.OLLAMA_HOST ?? "http://localhost:11434"}/v1/chat/completions`,
|
|
1149
|
+
};
|
|
1150
|
+
|
|
1151
|
+
async function chatWithTools(messages, onChunk) {
|
|
1152
|
+
if (PROVIDER === "google") return chatGeminiWithTools(messages, onChunk);
|
|
1153
|
+
if (PROVIDER === "anthropic") return chatAnthropicWithTools(messages, onChunk);
|
|
1154
|
+
// All others use OpenAI-compatible API
|
|
1155
|
+
return chatOpenAIWithTools(messages, onChunk);
|
|
1156
|
+
}
|
|
1157
|
+
|
|
1158
|
+
async function chatGeminiWithTools(messages, onChunk) {
|
|
1159
|
+
const systemInstruction = messages.find(m => m.role === "system")?.content ?? "";
|
|
1160
|
+
|
|
1161
|
+
// Build Gemini contents — handle tool results too
|
|
1162
|
+
const contents = [];
|
|
1163
|
+
for (const m of messages) {
|
|
1164
|
+
if (m.role === "system") continue;
|
|
1165
|
+
if (m.role === "tool_result") {
|
|
1166
|
+
contents.push({
|
|
1167
|
+
role: "user",
|
|
1168
|
+
parts: [{ functionResponse: { name: m.toolName, response: m.result } }],
|
|
1169
|
+
});
|
|
1170
|
+
} else if (m.role === "assistant" && m.toolCalls) {
|
|
1171
|
+
contents.push({
|
|
1172
|
+
role: "model",
|
|
1173
|
+
parts: m.toolCalls.map(tc => ({
|
|
1174
|
+
functionCall: { name: tc.name, args: tc.args },
|
|
1175
|
+
})),
|
|
1176
|
+
});
|
|
1177
|
+
} else {
|
|
1178
|
+
contents.push({
|
|
1179
|
+
role: m.role === "assistant" ? "model" : "user",
|
|
1180
|
+
parts: [{ text: m.content }],
|
|
1181
|
+
});
|
|
1182
|
+
}
|
|
1183
|
+
}
|
|
1184
|
+
|
|
1185
|
+
// Track input tokens
|
|
1186
|
+
const inputText = contents.map(c => c.parts?.map(p => p.text ?? JSON.stringify(p)).join("")).join("");
|
|
1187
|
+
sessionTokens.input += estimateTokens(systemInstruction + inputText);
|
|
1188
|
+
|
|
1189
|
+
const geminiTools = [{
|
|
1190
|
+
functionDeclarations: TOOL_DEFINITIONS.map(t => ({
|
|
1191
|
+
name: t.name,
|
|
1192
|
+
description: t.description,
|
|
1193
|
+
parameters: t.parameters,
|
|
1194
|
+
})),
|
|
1195
|
+
}];
|
|
1196
|
+
|
|
1197
|
+
// Use streaming when no tool results in the conversation (pure text),
|
|
1198
|
+
// non-streaming when tool results are present (function calling needs it)
|
|
1199
|
+
const hasToolResults = messages.some(m => m.role === "tool_result");
|
|
1200
|
+
const useStreaming = !hasToolResults;
|
|
1201
|
+
const endpoint = useStreaming ? "streamGenerateContent" : "generateContent";
|
|
1202
|
+
const url = `https://generativelanguage.googleapis.com/v1beta/models/${MODEL}:${endpoint}?${useStreaming ? "alt=sse&" : ""}key=${API_KEY}`;
|
|
1203
|
+
|
|
1204
|
+
const response = await fetch(url, {
|
|
1205
|
+
method: "POST",
|
|
1206
|
+
headers: { "Content-Type": "application/json" },
|
|
1207
|
+
body: JSON.stringify({
|
|
1208
|
+
system_instruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : undefined,
|
|
1209
|
+
contents,
|
|
1210
|
+
tools: geminiTools,
|
|
1211
|
+
generationConfig: { temperature: 0.7, maxOutputTokens: 4096 },
|
|
1212
|
+
}),
|
|
1213
|
+
});
|
|
1214
|
+
|
|
1215
|
+
if (!response.ok) {
|
|
1216
|
+
const err = await response.text();
|
|
1217
|
+
throw new Error(`Gemini API error ${response.status}: ${err.slice(0, 300)}`);
|
|
1218
|
+
}
|
|
1219
|
+
|
|
1220
|
+
if (useStreaming) {
|
|
1221
|
+
// SSE streaming response
|
|
1222
|
+
const reader = response.body.getReader();
|
|
1223
|
+
const decoder = new TextDecoder();
|
|
1224
|
+
let fullText = "";
|
|
1225
|
+
let sseBuffer = "";
|
|
1226
|
+
|
|
1227
|
+
while (true) {
|
|
1228
|
+
const { done, value } = await reader.read();
|
|
1229
|
+
if (done) break;
|
|
1230
|
+
sseBuffer += decoder.decode(value, { stream: true });
|
|
1231
|
+
const sseLines = sseBuffer.split("\n");
|
|
1232
|
+
sseBuffer = sseLines.pop() ?? "";
|
|
1233
|
+
|
|
1234
|
+
for (const line of sseLines) {
|
|
1235
|
+
if (!line.startsWith("data: ")) continue;
|
|
1236
|
+
const ld = line.slice(6).trim();
|
|
1237
|
+
if (!ld || ld === "[DONE]") continue;
|
|
1238
|
+
try {
|
|
1239
|
+
const parsed = JSON.parse(ld);
|
|
1240
|
+
// Check for function calls in stream
|
|
1241
|
+
const streamParts = parsed.candidates?.[0]?.content?.parts ?? [];
|
|
1242
|
+
const streamFC = streamParts.filter(p => p.functionCall);
|
|
1243
|
+
if (streamFC.length > 0) {
|
|
1244
|
+
sessionTokens.output += estimateTokens(JSON.stringify(streamFC));
|
|
1245
|
+
return { type: "tool_calls", calls: streamFC.map(p => ({ name: p.functionCall.name, args: p.functionCall.args })) };
|
|
1246
|
+
}
|
|
1247
|
+
const t = streamParts.map(p => p.text ?? "").join("");
|
|
1248
|
+
if (t) { fullText += t; onChunk?.(t); }
|
|
1249
|
+
} catch { /* skip */ }
|
|
1250
|
+
}
|
|
1251
|
+
}
|
|
1252
|
+
sessionTokens.output += estimateTokens(fullText);
|
|
1253
|
+
return { type: "text", text: fullText };
|
|
1254
|
+
}
|
|
1255
|
+
|
|
1256
|
+
// Non-streaming response (when tool results present)
|
|
1257
|
+
const data = await response.json();
|
|
1258
|
+
const candidate = data.candidates?.[0];
|
|
1259
|
+
if (!candidate) throw new Error("No response from Gemini");
|
|
1260
|
+
|
|
1261
|
+
const parts = candidate.content?.parts ?? [];
|
|
1262
|
+
const functionCalls = parts.filter(p => p.functionCall);
|
|
1263
|
+
if (functionCalls.length > 0) {
|
|
1264
|
+
sessionTokens.output += estimateTokens(JSON.stringify(functionCalls));
|
|
1265
|
+
return { type: "tool_calls", calls: functionCalls.map(p => ({ name: p.functionCall.name, args: p.functionCall.args })) };
|
|
1266
|
+
}
|
|
1267
|
+
|
|
1268
|
+
const text = parts.map(p => p.text ?? "").join("");
|
|
1269
|
+
sessionTokens.output += estimateTokens(text);
|
|
1270
|
+
if (onChunk) onChunk(text);
|
|
1271
|
+
return { type: "text", text };
|
|
1272
|
+
}
|
|
1273
|
+
|
|
1274
|
+
async function chatOpenAIWithTools(messages, onChunk) {
|
|
1275
|
+
const openaiMessages = messages.filter(m => m.role !== "tool_result").map(m => {
|
|
1276
|
+
if (m.role === "tool_result") return { role: "tool", tool_call_id: m.toolCallId, content: JSON.stringify(m.result) };
|
|
1277
|
+
return { role: m.role === "assistant" ? "assistant" : m.role, content: m.content };
|
|
1278
|
+
});
|
|
1279
|
+
|
|
1280
|
+
const openaiTools = TOOL_DEFINITIONS.map(t => ({
|
|
1281
|
+
type: "function",
|
|
1282
|
+
function: { name: t.name, description: t.description, parameters: t.parameters },
|
|
1283
|
+
}));
|
|
1284
|
+
|
|
1285
|
+
const inputText = openaiMessages.map(m => m.content ?? "").join("");
|
|
1286
|
+
sessionTokens.input += estimateTokens(inputText);
|
|
1287
|
+
|
|
1288
|
+
const endpoint = OPENAI_COMPAT_ENDPOINTS[PROVIDER] ?? OPENAI_COMPAT_ENDPOINTS.openai;
|
|
1289
|
+
const headers = { "Content-Type": "application/json" };
|
|
1290
|
+
if (API_KEY) headers["Authorization"] = `Bearer ${API_KEY}`;
|
|
1291
|
+
if (PROVIDER === "openrouter") headers["HTTP-Referer"] = "https://wispy.dev";
|
|
1292
|
+
|
|
1293
|
+
// Some providers don't support tools (Ollama, some Groq models)
|
|
1294
|
+
const supportsTools = !["ollama"].includes(PROVIDER);
|
|
1295
|
+
const body = { model: MODEL, messages: openaiMessages, temperature: 0.7, max_tokens: 4096 };
|
|
1296
|
+
if (supportsTools) body.tools = openaiTools;
|
|
1297
|
+
|
|
1298
|
+
const response = await fetch(endpoint, { method: "POST", headers, body: JSON.stringify(body) });
|
|
1299
|
+
|
|
1300
|
+
if (!response.ok) {
|
|
1301
|
+
const err = await response.text();
|
|
1302
|
+
throw new Error(`OpenAI API error ${response.status}: ${err.slice(0, 300)}`);
|
|
1303
|
+
}
|
|
1304
|
+
|
|
1305
|
+
const data = await response.json();
|
|
1306
|
+
const choice = data.choices?.[0];
|
|
1307
|
+
if (!choice) throw new Error("No response from OpenAI");
|
|
1308
|
+
|
|
1309
|
+
if (choice.message?.tool_calls?.length > 0) {
|
|
1310
|
+
const calls = choice.message.tool_calls.map(tc => ({
|
|
1311
|
+
name: tc.function.name,
|
|
1312
|
+
args: JSON.parse(tc.function.arguments),
|
|
1313
|
+
}));
|
|
1314
|
+
sessionTokens.output += estimateTokens(JSON.stringify(calls));
|
|
1315
|
+
return { type: "tool_calls", calls };
|
|
1316
|
+
}
|
|
1317
|
+
|
|
1318
|
+
const text = choice.message?.content ?? "";
|
|
1319
|
+
sessionTokens.output += estimateTokens(text);
|
|
1320
|
+
if (onChunk) onChunk(text);
|
|
1321
|
+
return { type: "text", text };
|
|
1322
|
+
}
|
|
1323
|
+
|
|
1324
|
+
// ---------------------------------------------------------------------------
|
|
1325
|
+
// Anthropic API with tool use (streaming text + tool calls)
|
|
1326
|
+
// ---------------------------------------------------------------------------
|
|
1327
|
+
|
|
1328
|
+
async function chatAnthropicWithTools(messages, onChunk) {
|
|
1329
|
+
const systemPrompt = messages.find(m => m.role === "system")?.content ?? "";
|
|
1330
|
+
|
|
1331
|
+
// Build Anthropic messages
|
|
1332
|
+
const anthropicMessages = [];
|
|
1333
|
+
for (const m of messages) {
|
|
1334
|
+
if (m.role === "system") continue;
|
|
1335
|
+
if (m.role === "tool_result") {
|
|
1336
|
+
anthropicMessages.push({
|
|
1337
|
+
role: "user",
|
|
1338
|
+
content: [{ type: "tool_result", tool_use_id: m.toolUseId ?? m.toolName, content: JSON.stringify(m.result) }],
|
|
1339
|
+
});
|
|
1340
|
+
} else if (m.role === "assistant" && m.toolCalls) {
|
|
1341
|
+
anthropicMessages.push({
|
|
1342
|
+
role: "assistant",
|
|
1343
|
+
content: m.toolCalls.map(tc => ({
|
|
1344
|
+
type: "tool_use", id: tc.id ?? tc.name, name: tc.name, input: tc.args,
|
|
1345
|
+
})),
|
|
1346
|
+
});
|
|
1347
|
+
} else {
|
|
1348
|
+
anthropicMessages.push({
|
|
1349
|
+
role: m.role === "assistant" ? "assistant" : "user",
|
|
1350
|
+
content: m.content,
|
|
1351
|
+
});
|
|
1352
|
+
}
|
|
1353
|
+
}
|
|
1354
|
+
|
|
1355
|
+
const inputText = anthropicMessages.map(m => typeof m.content === "string" ? m.content : JSON.stringify(m.content)).join("");
|
|
1356
|
+
sessionTokens.input += estimateTokens(systemPrompt + inputText);
|
|
1357
|
+
|
|
1358
|
+
const anthropicTools = TOOL_DEFINITIONS.map(t => ({
|
|
1359
|
+
name: t.name,
|
|
1360
|
+
description: t.description,
|
|
1361
|
+
input_schema: t.parameters,
|
|
1362
|
+
}));
|
|
1363
|
+
|
|
1364
|
+
const response = await fetch("https://api.anthropic.com/v1/messages", {
|
|
1365
|
+
method: "POST",
|
|
1366
|
+
headers: {
|
|
1367
|
+
"Content-Type": "application/json",
|
|
1368
|
+
"x-api-key": API_KEY,
|
|
1369
|
+
"anthropic-version": "2023-06-01",
|
|
1370
|
+
},
|
|
1371
|
+
body: JSON.stringify({
|
|
1372
|
+
model: MODEL,
|
|
1373
|
+
max_tokens: 4096,
|
|
1374
|
+
system: systemPrompt,
|
|
1375
|
+
messages: anthropicMessages,
|
|
1376
|
+
tools: anthropicTools,
|
|
1377
|
+
stream: true,
|
|
1378
|
+
}),
|
|
1379
|
+
});
|
|
1380
|
+
|
|
1381
|
+
if (!response.ok) {
|
|
1382
|
+
const err = await response.text();
|
|
1383
|
+
throw new Error(`Anthropic API error ${response.status}: ${err.slice(0, 300)}`);
|
|
1384
|
+
}
|
|
1385
|
+
|
|
1386
|
+
// Parse SSE stream
|
|
1387
|
+
const reader = response.body.getReader();
|
|
1388
|
+
const decoder = new TextDecoder();
|
|
1389
|
+
let buffer = "";
|
|
1390
|
+
let fullText = "";
|
|
1391
|
+
const toolCalls = [];
|
|
1392
|
+
let currentToolCall = null;
|
|
1393
|
+
let currentToolInput = "";
|
|
1394
|
+
|
|
1395
|
+
while (true) {
|
|
1396
|
+
const { done, value } = await reader.read();
|
|
1397
|
+
if (done) break;
|
|
1398
|
+
|
|
1399
|
+
buffer += decoder.decode(value, { stream: true });
|
|
1400
|
+
const lines = buffer.split("\n");
|
|
1401
|
+
buffer = lines.pop() ?? "";
|
|
1402
|
+
|
|
1403
|
+
for (const line of lines) {
|
|
1404
|
+
if (!line.startsWith("data: ")) continue;
|
|
1405
|
+
const data = line.slice(6).trim();
|
|
1406
|
+
if (!data) continue;
|
|
1407
|
+
|
|
1408
|
+
try {
|
|
1409
|
+
const event = JSON.parse(data);
|
|
1410
|
+
|
|
1411
|
+
if (event.type === "content_block_start") {
|
|
1412
|
+
if (event.content_block?.type === "tool_use") {
|
|
1413
|
+
currentToolCall = { id: event.content_block.id, name: event.content_block.name, args: {} };
|
|
1414
|
+
currentToolInput = "";
|
|
1415
|
+
}
|
|
1416
|
+
} else if (event.type === "content_block_delta") {
|
|
1417
|
+
if (event.delta?.type === "text_delta") {
|
|
1418
|
+
fullText += event.delta.text;
|
|
1419
|
+
onChunk?.(event.delta.text);
|
|
1420
|
+
} else if (event.delta?.type === "input_json_delta") {
|
|
1421
|
+
currentToolInput += event.delta.partial_json ?? "";
|
|
1422
|
+
}
|
|
1423
|
+
} else if (event.type === "content_block_stop") {
|
|
1424
|
+
if (currentToolCall) {
|
|
1425
|
+
try { currentToolCall.args = JSON.parse(currentToolInput); } catch { currentToolCall.args = {}; }
|
|
1426
|
+
toolCalls.push(currentToolCall);
|
|
1427
|
+
currentToolCall = null;
|
|
1428
|
+
currentToolInput = "";
|
|
1429
|
+
}
|
|
1430
|
+
}
|
|
1431
|
+
} catch { /* skip */ }
|
|
1432
|
+
}
|
|
1433
|
+
}
|
|
1434
|
+
|
|
1435
|
+
sessionTokens.output += estimateTokens(fullText + JSON.stringify(toolCalls));
|
|
1436
|
+
|
|
1437
|
+
if (toolCalls.length > 0) {
|
|
1438
|
+
return { type: "tool_calls", calls: toolCalls };
|
|
1439
|
+
}
|
|
1440
|
+
return { type: "text", text: fullText };
|
|
1441
|
+
}
|
|
1442
|
+
|
|
1443
|
+
// ---------------------------------------------------------------------------
|
|
1444
|
+
// Agentic loop — handles tool calls iteratively
|
|
1445
|
+
// ---------------------------------------------------------------------------
|
|
1446
|
+
|
|
1447
|
+
async function agentLoop(messages, onChunk) {
|
|
1448
|
+
const MAX_TOOL_ROUNDS = 10;
|
|
1449
|
+
|
|
1450
|
+
// Optimize context window before sending
|
|
1451
|
+
const lastUserMsg = messages.filter(m => m.role === "user").pop();
|
|
1452
|
+
const optimizedMessages = optimizeContext(messages);
|
|
1453
|
+
if (optimizedMessages.length < messages.length) {
|
|
1454
|
+
messages.length = 0;
|
|
1455
|
+
messages.push(...optimizedMessages);
|
|
1456
|
+
}
|
|
1457
|
+
|
|
1458
|
+
for (let round = 0; round < MAX_TOOL_ROUNDS; round++) {
|
|
1459
|
+
// Check budget before calling API
|
|
1460
|
+
const budgetCheck = await loadBudgets();
|
|
1461
|
+
const wsBudget = budgetCheck[ACTIVE_WORKSTREAM];
|
|
1462
|
+
if (wsBudget?.limitUsd !== null && wsBudget?.spentUsd > wsBudget?.limitUsd) {
|
|
1463
|
+
return `⚠️ Budget exceeded for workstream "${ACTIVE_WORKSTREAM}" ($${wsBudget.spentUsd.toFixed(4)} / $${wsBudget.limitUsd.toFixed(4)}). Use /budget to adjust.`;
|
|
1464
|
+
}
|
|
1465
|
+
|
|
1466
|
+
const result = await chatWithTools(messages, onChunk);
|
|
1467
|
+
|
|
1468
|
+
if (result.type === "text") {
|
|
1469
|
+
// Track spending for this workstream
|
|
1470
|
+
await trackSpending(ACTIVE_WORKSTREAM, sessionTokens.input, sessionTokens.output, MODEL);
|
|
1471
|
+
return result.text;
|
|
1472
|
+
}
|
|
1473
|
+
|
|
1474
|
+
// Handle tool calls
|
|
1475
|
+
console.log(""); // newline before tool output
|
|
1476
|
+
const toolCallMsg = { role: "assistant", toolCalls: result.calls, content: "" };
|
|
1477
|
+
messages.push(toolCallMsg);
|
|
1478
|
+
|
|
1479
|
+
for (const call of result.calls) {
|
|
1480
|
+
console.log(dim(` 🔧 ${call.name}(${JSON.stringify(call.args).slice(0, 80)})`));
|
|
1481
|
+
const toolResult = await executeTool(call.name, call.args);
|
|
1482
|
+
|
|
1483
|
+
if (toolResult.success) {
|
|
1484
|
+
const preview = JSON.stringify(toolResult).slice(0, 100);
|
|
1485
|
+
console.log(dim(` ✅ ${preview}${preview.length >= 100 ? "..." : ""}`));
|
|
1486
|
+
} else {
|
|
1487
|
+
console.log(red(` ❌ ${toolResult.error}`));
|
|
1488
|
+
}
|
|
1489
|
+
|
|
1490
|
+
messages.push({
|
|
1491
|
+
role: "tool_result",
|
|
1492
|
+
toolName: call.name,
|
|
1493
|
+
toolUseId: call.id ?? call.name,
|
|
1494
|
+
result: toolResult,
|
|
1495
|
+
});
|
|
1496
|
+
}
|
|
1497
|
+
console.log(""); // newline before next response
|
|
1498
|
+
}
|
|
1499
|
+
|
|
1500
|
+
return "(tool call limit reached)";
|
|
1501
|
+
}
|
|
1502
|
+
|
|
1503
|
+
// ---------------------------------------------------------------------------
|
|
1504
|
+
// Slash commands
|
|
1505
|
+
// ---------------------------------------------------------------------------
|
|
1506
|
+
|
|
1507
|
+
async function handleSlashCommand(input, conversation) {
|
|
1508
|
+
const parts = input.trim().split(/\s+/);
|
|
1509
|
+
const cmd = parts[0].toLowerCase();
|
|
1510
|
+
|
|
1511
|
+
if (cmd === "/help") {
|
|
1512
|
+
console.log(`
|
|
1513
|
+
${bold("Wispy Commands:")}
|
|
1514
|
+
${cyan("/help")} Show this help
|
|
1515
|
+
${cyan("/compact")} Summarize & compress conversation
|
|
1516
|
+
${cyan("/memory")} <type> <text> Save to persistent memory (user|feedback|project|references)
|
|
1517
|
+
${cyan("/clear")} Reset conversation
|
|
1518
|
+
${cyan("/history")} Show conversation length
|
|
1519
|
+
${cyan("/model")} [name] Show or change model
|
|
1520
|
+
${cyan("/quit")} or ${cyan("/exit")} Exit
|
|
1521
|
+
`);
|
|
1522
|
+
return true;
|
|
1523
|
+
}
|
|
1524
|
+
|
|
1525
|
+
if (cmd === "/clear") {
|
|
1526
|
+
conversation.length = 0;
|
|
1527
|
+
await saveConversation([]);
|
|
1528
|
+
console.log(green("🌿 Conversation cleared."));
|
|
1529
|
+
return true;
|
|
1530
|
+
}
|
|
1531
|
+
|
|
1532
|
+
if (cmd === "/history") {
|
|
1533
|
+
console.log(dim(`${conversation.length} messages in current conversation.`));
|
|
1534
|
+
return true;
|
|
1535
|
+
}
|
|
1536
|
+
|
|
1537
|
+
if (cmd === "/model") {
|
|
1538
|
+
if (parts[1]) {
|
|
1539
|
+
process.env.WISPY_MODEL = parts[1];
|
|
1540
|
+
console.log(green(`Model changed to: ${parts[1]}`));
|
|
1541
|
+
} else {
|
|
1542
|
+
console.log(dim(`Current model: ${MODEL}`));
|
|
1543
|
+
}
|
|
1544
|
+
return true;
|
|
1545
|
+
}
|
|
1546
|
+
|
|
1547
|
+
if (cmd === "/memory") {
|
|
1548
|
+
const type = parts[1];
|
|
1549
|
+
const content = parts.slice(2).join(" ");
|
|
1550
|
+
if (!type || !content) {
|
|
1551
|
+
console.log(yellow("Usage: /memory <user|feedback|project|references> <content>"));
|
|
1552
|
+
return true;
|
|
1553
|
+
}
|
|
1554
|
+
const validTypes = ["user", "feedback", "project", "references"];
|
|
1555
|
+
if (!validTypes.includes(type)) {
|
|
1556
|
+
console.log(yellow(`Invalid type. Use: ${validTypes.join(", ")}`));
|
|
1557
|
+
return true;
|
|
1558
|
+
}
|
|
1559
|
+
await appendToMemory(type, content);
|
|
1560
|
+
console.log(green(`✅ Saved to ${type} memory.`));
|
|
1561
|
+
return true;
|
|
1562
|
+
}
|
|
1563
|
+
|
|
1564
|
+
if (cmd === "/compact") {
|
|
1565
|
+
// Ask the AI to summarize, then replace history with summary
|
|
1566
|
+
const summaryMessages = [
|
|
1567
|
+
{ role: "system", content: "Summarize the following conversation in 3-5 bullet points. Be concise." },
|
|
1568
|
+
...conversation.filter(m => m.role !== "system").slice(-20),
|
|
1569
|
+
{ role: "user", content: "Summarize our conversation so far." },
|
|
1570
|
+
];
|
|
1571
|
+
|
|
1572
|
+
process.stdout.write(cyan("🌿 Compacting... "));
|
|
1573
|
+
const summary = await chatStream(summaryMessages, (chunk) => process.stdout.write(chunk));
|
|
1574
|
+
console.log("\n");
|
|
1575
|
+
|
|
1576
|
+
// Save summary to memory and reset conversation
|
|
1577
|
+
await appendToMemory("project", `Session compact: ${summary.slice(0, 200)}`);
|
|
1578
|
+
conversation.length = 0;
|
|
1579
|
+
conversation.push({ role: "assistant", content: `[Previous session summary]\n${summary}` });
|
|
1580
|
+
await saveConversation(conversation);
|
|
1581
|
+
console.log(green("📦 Conversation compacted."));
|
|
1582
|
+
return true;
|
|
1583
|
+
}
|
|
1584
|
+
|
|
1585
|
+
if (cmd === "/cost" || cmd === "/tokens" || cmd === "/usage") {
|
|
1586
|
+
console.log(dim(`📊 Session usage: ${formatCost()}`));
|
|
1587
|
+
return true;
|
|
1588
|
+
}
|
|
1589
|
+
|
|
1590
|
+
if (cmd === "/workstreams" || cmd === "/ws") {
|
|
1591
|
+
const wsList = await listWorkstreams();
|
|
1592
|
+
if (wsList.length === 0) {
|
|
1593
|
+
console.log(dim("No workstreams yet."));
|
|
1594
|
+
} else {
|
|
1595
|
+
console.log(bold("\n📋 Workstreams:\n"));
|
|
1596
|
+
for (const ws of wsList) {
|
|
1597
|
+
const marker = ws === ACTIVE_WORKSTREAM ? green("● ") : " ";
|
|
1598
|
+
// Show last message preview
|
|
1599
|
+
const wsConv = await loadWorkstreamConversation(ws);
|
|
1600
|
+
const lastMsg = wsConv.filter(m => m.role === "user").pop();
|
|
1601
|
+
const preview = lastMsg ? dim(` — "${lastMsg.content.slice(0, 40)}${lastMsg.content.length > 40 ? "..." : ""}"`) : "";
|
|
1602
|
+
const msgCount = wsConv.filter(m => m.role === "user").length;
|
|
1603
|
+
console.log(`${marker}${ws.padEnd(20)} ${dim(`${msgCount} msgs`)}${preview}`);
|
|
1604
|
+
}
|
|
1605
|
+
console.log(dim(`\nSwitch: wispy -w <name>`));
|
|
1606
|
+
}
|
|
1607
|
+
return true;
|
|
1608
|
+
}
|
|
1609
|
+
|
|
1610
|
+
if (cmd === "/overview" || cmd === "/dashboard" || cmd === "/all") {
|
|
1611
|
+
await showOverview();
|
|
1612
|
+
return true;
|
|
1613
|
+
}
|
|
1614
|
+
|
|
1615
|
+
if (cmd === "/search") {
|
|
1616
|
+
const query = parts.slice(1).join(" ");
|
|
1617
|
+
if (!query) {
|
|
1618
|
+
console.log(yellow("Usage: /search <keyword> — search across all workstreams"));
|
|
1619
|
+
return true;
|
|
1620
|
+
}
|
|
1621
|
+
await searchAcrossWorkstreams(query);
|
|
1622
|
+
return true;
|
|
1623
|
+
}
|
|
1624
|
+
|
|
1625
|
+
if (cmd === "/work") {
|
|
1626
|
+
const workMd = await loadWorkMd();
|
|
1627
|
+
if (parts[1] === "edit" || parts[1] === "set") {
|
|
1628
|
+
const content = parts.slice(2).join(" ");
|
|
1629
|
+
if (!content) {
|
|
1630
|
+
console.log(yellow("Usage: /work set <content> or create file manually:"));
|
|
1631
|
+
console.log(dim(` ${path.join(CONVERSATIONS_DIR, `${ACTIVE_WORKSTREAM}.work.md`)}`));
|
|
1632
|
+
return true;
|
|
1633
|
+
}
|
|
1634
|
+
const workPath = path.join(CONVERSATIONS_DIR, `${ACTIVE_WORKSTREAM}.work.md`);
|
|
1635
|
+
await mkdir(CONVERSATIONS_DIR, { recursive: true });
|
|
1636
|
+
await appendFile(workPath, `\n${content}\n`, "utf8");
|
|
1637
|
+
console.log(green(`✅ Added to ${ACTIVE_WORKSTREAM} work.md`));
|
|
1638
|
+
return true;
|
|
1639
|
+
}
|
|
1640
|
+
if (parts[1] === "init") {
|
|
1641
|
+
const workPath = path.join(CONVERSATIONS_DIR, `${ACTIVE_WORKSTREAM}.work.md`);
|
|
1642
|
+
if (workMd) {
|
|
1643
|
+
console.log(dim(`work.md already exists at ${workMd.path}`));
|
|
1644
|
+
return true;
|
|
1645
|
+
}
|
|
1646
|
+
await mkdir(CONVERSATIONS_DIR, { recursive: true });
|
|
1647
|
+
await writeFile(workPath, `# ${ACTIVE_WORKSTREAM}\n\n## Goals\n\n## Context\n\n## Notes\n\n`, "utf8");
|
|
1648
|
+
console.log(green(`✅ Created ${workPath}`));
|
|
1649
|
+
return true;
|
|
1650
|
+
}
|
|
1651
|
+
// Show current work.md
|
|
1652
|
+
if (workMd) {
|
|
1653
|
+
console.log(`\n${bold(`📋 work.md (${ACTIVE_WORKSTREAM})`)}`);
|
|
1654
|
+
console.log(dim(` ${workMd.path}\n`));
|
|
1655
|
+
console.log(workMd.content);
|
|
1656
|
+
} else {
|
|
1657
|
+
console.log(dim(`No work.md for "${ACTIVE_WORKSTREAM}". Create one:`));
|
|
1658
|
+
console.log(dim(` /work init`));
|
|
1659
|
+
console.log(dim(` /work set "project goals and context here"`));
|
|
1660
|
+
}
|
|
1661
|
+
return true;
|
|
1662
|
+
}
|
|
1663
|
+
|
|
1664
|
+
if (cmd === "/budget") {
|
|
1665
|
+
const budgets = await loadBudgets();
|
|
1666
|
+
if (parts[1] === "set") {
|
|
1667
|
+
const limit = parseFloat(parts[2]);
|
|
1668
|
+
if (isNaN(limit)) {
|
|
1669
|
+
console.log(yellow("Usage: /budget set <amount_usd> — e.g., /budget set 1.00"));
|
|
1670
|
+
return true;
|
|
1671
|
+
}
|
|
1672
|
+
if (!budgets[ACTIVE_WORKSTREAM]) budgets[ACTIVE_WORKSTREAM] = { limitUsd: null, spentUsd: 0, totalTokens: 0 };
|
|
1673
|
+
budgets[ACTIVE_WORKSTREAM].limitUsd = limit;
|
|
1674
|
+
await saveBudgets(budgets);
|
|
1675
|
+
console.log(green(`💰 Budget set: $${limit.toFixed(2)} for "${ACTIVE_WORKSTREAM}"`));
|
|
1676
|
+
return true;
|
|
1677
|
+
}
|
|
1678
|
+
if (parts[1] === "clear") {
|
|
1679
|
+
if (budgets[ACTIVE_WORKSTREAM]) budgets[ACTIVE_WORKSTREAM].limitUsd = null;
|
|
1680
|
+
await saveBudgets(budgets);
|
|
1681
|
+
console.log(dim("Budget limit removed."));
|
|
1682
|
+
return true;
|
|
1683
|
+
}
|
|
1684
|
+
// Show all budgets
|
|
1685
|
+
const wsList = Object.keys(budgets);
|
|
1686
|
+
if (wsList.length === 0) {
|
|
1687
|
+
console.log(dim("No spending tracked yet."));
|
|
1688
|
+
return true;
|
|
1689
|
+
}
|
|
1690
|
+
console.log(bold("\n💰 Budget Overview:\n"));
|
|
1691
|
+
for (const ws of wsList) {
|
|
1692
|
+
const b = budgets[ws];
|
|
1693
|
+
const marker = ws === ACTIVE_WORKSTREAM ? green("● ") : " ";
|
|
1694
|
+
const limit = b.limitUsd !== null ? `/ $${b.limitUsd.toFixed(2)}` : dim("(no limit)");
|
|
1695
|
+
const pct = b.limitUsd ? ` (${((b.spentUsd / b.limitUsd) * 100).toFixed(1)}%)` : "";
|
|
1696
|
+
const warning = b.limitUsd && b.spentUsd > b.limitUsd ? red(" ⚠ OVER") : "";
|
|
1697
|
+
console.log(`${marker}${ws.padEnd(20)} $${b.spentUsd.toFixed(4)} ${limit}${pct}${warning} ${dim(`${b.totalTokens} tokens`)}`);
|
|
1698
|
+
}
|
|
1699
|
+
console.log(dim("\nSet limit: /budget set <usd> | Remove: /budget clear"));
|
|
1700
|
+
console.log("");
|
|
1701
|
+
return true;
|
|
1702
|
+
}
|
|
1703
|
+
|
|
1704
|
+
if (cmd === "/provider") {
|
|
1705
|
+
console.log(dim(`Provider: ${PROVIDERS[PROVIDER]?.label ?? PROVIDER}`));
|
|
1706
|
+
console.log(dim(`Model: ${MODEL}`));
|
|
1707
|
+
console.log(dim(`Workstream: ${ACTIVE_WORKSTREAM}`));
|
|
1708
|
+
return true;
|
|
1709
|
+
}
|
|
1710
|
+
|
|
1711
|
+
if (cmd === "/quit" || cmd === "/exit") {
|
|
1712
|
+
console.log(dim(`🌿 Bye! (${formatCost()})`));
|
|
1713
|
+
process.exit(0);
|
|
1714
|
+
}
|
|
1715
|
+
|
|
1716
|
+
return false;
|
|
1717
|
+
}
|
|
1718
|
+
|
|
1719
|
+
// ---------------------------------------------------------------------------
|
|
1720
|
+
// Interactive REPL
|
|
1721
|
+
// ---------------------------------------------------------------------------
|
|
1722
|
+
|
|
1723
|
+
async function runRepl() {
|
|
1724
|
+
const wsLabel = ACTIVE_WORKSTREAM === "default" ? "" : ` [${cyan(ACTIVE_WORKSTREAM)}]`;
|
|
1725
|
+
const providerLabel = PROVIDERS[PROVIDER]?.label ?? PROVIDER;
|
|
1726
|
+
console.log(`
|
|
1727
|
+
${bold("🌿 Wispy")}${wsLabel} ${dim(`— ${providerLabel} (${MODEL})`)}
|
|
1728
|
+
${dim("Type a message to chat. /help for commands. Ctrl+C to exit.")}
|
|
1729
|
+
`);
|
|
1730
|
+
|
|
1731
|
+
const systemPrompt = await buildSystemPrompt();
|
|
1732
|
+
const conversation = await loadConversation();
|
|
1733
|
+
|
|
1734
|
+
// Ensure system prompt is first
|
|
1735
|
+
if (conversation.length === 0 || conversation[0].role !== "system") {
|
|
1736
|
+
conversation.unshift({ role: "system", content: systemPrompt });
|
|
1737
|
+
} else {
|
|
1738
|
+
conversation[0].content = systemPrompt; // Refresh system prompt
|
|
1739
|
+
}
|
|
1740
|
+
|
|
1741
|
+
const rl = createInterface({
|
|
1742
|
+
input: process.stdin,
|
|
1743
|
+
output: process.stdout,
|
|
1744
|
+
prompt: green("› "),
|
|
1745
|
+
historySize: 100,
|
|
1746
|
+
});
|
|
1747
|
+
|
|
1748
|
+
rl.prompt();
|
|
1749
|
+
|
|
1750
|
+
rl.on("line", async (line) => {
|
|
1751
|
+
const input = line.trim();
|
|
1752
|
+
if (!input) { rl.prompt(); return; }
|
|
1753
|
+
|
|
1754
|
+
// Slash commands
|
|
1755
|
+
if (input.startsWith("/")) {
|
|
1756
|
+
const handled = await handleSlashCommand(input, conversation);
|
|
1757
|
+
if (handled) { rl.prompt(); return; }
|
|
1758
|
+
}
|
|
1759
|
+
|
|
1760
|
+
// Add user message
|
|
1761
|
+
conversation.push({ role: "user", content: input });
|
|
1762
|
+
|
|
1763
|
+
// Agent loop with tool calls
|
|
1764
|
+
process.stdout.write(cyan("🌿 "));
|
|
1765
|
+
try {
|
|
1766
|
+
const response = await agentLoop(conversation, (chunk) => {
|
|
1767
|
+
process.stdout.write(chunk);
|
|
1768
|
+
});
|
|
1769
|
+
console.log("\n");
|
|
1770
|
+
|
|
1771
|
+
conversation.push({ role: "assistant", content: response });
|
|
1772
|
+
await saveConversation(conversation);
|
|
1773
|
+
console.log(dim(` ${formatCost()}`));
|
|
1774
|
+
} catch (err) {
|
|
1775
|
+
// Friendly error handling
|
|
1776
|
+
if (err.message.includes("429") || err.message.includes("rate")) {
|
|
1777
|
+
console.log(yellow("\n\n⏳ Rate limited — wait a moment and try again."));
|
|
1778
|
+
} else if (err.message.includes("401") || err.message.includes("403")) {
|
|
1779
|
+
console.log(red("\n\n🔑 Authentication error — check your API key."));
|
|
1780
|
+
} else if (err.message.includes("network") || err.message.includes("fetch")) {
|
|
1781
|
+
console.log(red("\n\n🌐 Network error — check your connection."));
|
|
1782
|
+
} else {
|
|
1783
|
+
console.log(red(`\n\n❌ Error: ${err.message.slice(0, 200)}`));
|
|
1784
|
+
}
|
|
1785
|
+
}
|
|
1786
|
+
|
|
1787
|
+
rl.prompt();
|
|
1788
|
+
});
|
|
1789
|
+
|
|
1790
|
+
rl.on("close", () => {
|
|
1791
|
+
console.log(dim(`\n🌿 Bye! (${formatCost()})`));
|
|
1792
|
+
process.exit(0);
|
|
1793
|
+
});
|
|
1794
|
+
}
|
|
1795
|
+
|
|
1796
|
+
// ---------------------------------------------------------------------------
|
|
1797
|
+
// One-shot mode
|
|
1798
|
+
// ---------------------------------------------------------------------------
|
|
1799
|
+
|
|
1800
|
+
async function runOneShot(message) {
|
|
1801
|
+
const systemPrompt = await buildSystemPrompt();
|
|
1802
|
+
const conversation = await loadConversation();
|
|
1803
|
+
|
|
1804
|
+
if (conversation.length === 0 || conversation[0].role !== "system") {
|
|
1805
|
+
conversation.unshift({ role: "system", content: systemPrompt });
|
|
1806
|
+
} else {
|
|
1807
|
+
conversation[0].content = systemPrompt;
|
|
1808
|
+
}
|
|
1809
|
+
|
|
1810
|
+
conversation.push({ role: "user", content: message });
|
|
1811
|
+
|
|
1812
|
+
try {
|
|
1813
|
+
const response = await agentLoop(conversation, (chunk) => {
|
|
1814
|
+
process.stdout.write(chunk);
|
|
1815
|
+
});
|
|
1816
|
+
console.log("");
|
|
1817
|
+
|
|
1818
|
+
conversation.push({ role: "assistant", content: response });
|
|
1819
|
+
await saveConversation(conversation);
|
|
1820
|
+
console.log(dim(`${formatCost()}`));
|
|
1821
|
+
} catch (err) {
|
|
1822
|
+
if (err.message.includes("429")) {
|
|
1823
|
+
console.error(yellow("\n⏳ Rate limited — try again shortly."));
|
|
1824
|
+
} else {
|
|
1825
|
+
console.error(red(`\n❌ ${err.message.slice(0, 200)}`));
|
|
1826
|
+
}
|
|
1827
|
+
process.exit(1);
|
|
1828
|
+
}
|
|
1829
|
+
}
|
|
1830
|
+
|
|
1831
|
+
// ---------------------------------------------------------------------------
|
|
1832
|
+
// Auto server management — start AWOS server if not running
|
|
1833
|
+
// ---------------------------------------------------------------------------
|
|
1834
|
+
|
|
1835
|
+
import { spawn as spawnProcess } from "node:child_process";
|
|
1836
|
+
import { fileURLToPath } from "node:url";
|
|
1837
|
+
|
|
1838
|
+
const SCRIPT_DIR = path.dirname(fileURLToPath(import.meta.url));
|
|
1839
|
+
const REPO_ROOT = process.env.WISPY_REPO_ROOT ?? path.resolve(SCRIPT_DIR, "..");
|
|
1840
|
+
// Server binary: check env → ~/.wispy/bin/ → repo build path
|
|
1841
|
+
import { statSync } from "node:fs";
|
|
1842
|
+
const SERVER_BINARY = process.env.WISPY_SERVER_BINARY
|
|
1843
|
+
?? (() => {
|
|
1844
|
+
const candidates = [
|
|
1845
|
+
path.join(os.homedir(), ".wispy", "bin", "awos-server"),
|
|
1846
|
+
path.join(REPO_ROOT, "src-tauri", "target", "release", "awos-server"),
|
|
1847
|
+
path.join(REPO_ROOT, "src-tauri", "target", "debug", "awos-server"),
|
|
1848
|
+
];
|
|
1849
|
+
for (const c of candidates) {
|
|
1850
|
+
try { if (statSync(c).isFile()) return c; } catch {}
|
|
1851
|
+
}
|
|
1852
|
+
return candidates[0];
|
|
1853
|
+
})();
|
|
1854
|
+
const SERVER_PID_FILE = path.join(WISPY_DIR, "server.pid");
|
|
1855
|
+
const DEFAULT_SERVER_PORT = process.env.AWOS_PORT ?? "8090";
|
|
1856
|
+
|
|
1857
|
+
async function isServerRunning() {
|
|
1858
|
+
try {
|
|
1859
|
+
const resp = await fetch(`http://127.0.0.1:${DEFAULT_SERVER_PORT}/api/health`, {
|
|
1860
|
+
signal: AbortSignal.timeout(2000),
|
|
1861
|
+
});
|
|
1862
|
+
return resp.ok;
|
|
1863
|
+
} catch {
|
|
1864
|
+
return false;
|
|
1865
|
+
}
|
|
1866
|
+
}
|
|
1867
|
+
|
|
1868
|
+
async function startServerIfNeeded() {
|
|
1869
|
+
if (await isServerRunning()) {
|
|
1870
|
+
return { started: false, port: DEFAULT_SERVER_PORT };
|
|
1871
|
+
}
|
|
1872
|
+
|
|
1873
|
+
// Check if binary exists
|
|
1874
|
+
try {
|
|
1875
|
+
const { stat } = await import("node:fs/promises");
|
|
1876
|
+
await stat(SERVER_BINARY);
|
|
1877
|
+
} catch {
|
|
1878
|
+
// No binary — skip auto-start silently, CLI-only mode
|
|
1879
|
+
return { started: false, port: DEFAULT_SERVER_PORT, noBinary: true };
|
|
1880
|
+
}
|
|
1881
|
+
|
|
1882
|
+
// Start server in background
|
|
1883
|
+
const logFile = path.join(WISPY_DIR, "server.log");
|
|
1884
|
+
await mkdir(WISPY_DIR, { recursive: true });
|
|
1885
|
+
const { openSync } = await import("node:fs");
|
|
1886
|
+
const logFd = openSync(logFile, "a");
|
|
1887
|
+
|
|
1888
|
+
const child = spawnProcess(SERVER_BINARY, [], {
|
|
1889
|
+
cwd: REPO_ROOT,
|
|
1890
|
+
env: { ...process.env, AWOS_PORT: DEFAULT_SERVER_PORT },
|
|
1891
|
+
detached: true,
|
|
1892
|
+
stdio: ["ignore", logFd, logFd],
|
|
1893
|
+
});
|
|
1894
|
+
|
|
1895
|
+
child.unref();
|
|
1896
|
+
|
|
1897
|
+
// Save PID for cleanup
|
|
1898
|
+
await writeFile(SERVER_PID_FILE, String(child.pid), "utf8");
|
|
1899
|
+
|
|
1900
|
+
// Wait up to 5 seconds for server to be ready
|
|
1901
|
+
for (let i = 0; i < 25; i++) {
|
|
1902
|
+
await new Promise(r => setTimeout(r, 200));
|
|
1903
|
+
if (await isServerRunning()) {
|
|
1904
|
+
return { started: true, port: DEFAULT_SERVER_PORT, pid: child.pid };
|
|
1905
|
+
}
|
|
1906
|
+
}
|
|
1907
|
+
|
|
1908
|
+
return { started: true, port: DEFAULT_SERVER_PORT, pid: child.pid, slow: true };
|
|
1909
|
+
}
|
|
1910
|
+
|
|
1911
|
+
async function stopServer() {
|
|
1912
|
+
try {
|
|
1913
|
+
const pidStr = await readFile(SERVER_PID_FILE, "utf8");
|
|
1914
|
+
const pid = parseInt(pidStr.trim(), 10);
|
|
1915
|
+
if (pid && !isNaN(pid)) {
|
|
1916
|
+
process.kill(pid, "SIGTERM");
|
|
1917
|
+
// Clean up PID file
|
|
1918
|
+
const { unlink } = await import("node:fs/promises");
|
|
1919
|
+
await unlink(SERVER_PID_FILE).catch(() => {});
|
|
1920
|
+
}
|
|
1921
|
+
} catch {
|
|
1922
|
+
// No PID file or already stopped
|
|
1923
|
+
}
|
|
1924
|
+
}
|
|
1925
|
+
|
|
1926
|
+
// ---------------------------------------------------------------------------
|
|
1927
|
+
// Main
|
|
1928
|
+
// ---------------------------------------------------------------------------
|
|
1929
|
+
|
|
1930
|
+
// Filter out -w/--workstream flag from args
|
|
1931
|
+
const rawArgs = process.argv.slice(2);
|
|
1932
|
+
const args = [];
|
|
1933
|
+
for (let i = 0; i < rawArgs.length; i++) {
|
|
1934
|
+
if (rawArgs[i] === "-w" || rawArgs[i] === "--workstream") { i++; continue; } // skip flag + value
|
|
1935
|
+
args.push(rawArgs[i]);
|
|
1936
|
+
}
|
|
1937
|
+
|
|
1938
|
+
// Route to legacy CLI for operator commands
|
|
1939
|
+
const operatorCommands = new Set([
|
|
1940
|
+
"home", "node", "runtime", "agents", "agent",
|
|
1941
|
+
"workstreams", "workstream", "doctor", "setup",
|
|
1942
|
+
"package", "config", "server",
|
|
1943
|
+
]);
|
|
1944
|
+
|
|
1945
|
+
// wispy server <start|stop|status>
|
|
1946
|
+
if (args[0] === "server") {
|
|
1947
|
+
const sub = args[1] ?? "status";
|
|
1948
|
+
if (sub === "status") {
|
|
1949
|
+
const running = await isServerRunning();
|
|
1950
|
+
if (running) {
|
|
1951
|
+
console.log(green(`✅ Server running on port ${DEFAULT_SERVER_PORT}`));
|
|
1952
|
+
const pidStr = await readFileOr(SERVER_PID_FILE);
|
|
1953
|
+
if (pidStr) console.log(dim(` PID: ${pidStr.trim()}`));
|
|
1954
|
+
} else {
|
|
1955
|
+
console.log(dim("Server not running."));
|
|
1956
|
+
}
|
|
1957
|
+
process.exit(0);
|
|
1958
|
+
}
|
|
1959
|
+
if (sub === "stop") {
|
|
1960
|
+
await stopServer();
|
|
1961
|
+
console.log(dim("🌿 Server stopped."));
|
|
1962
|
+
process.exit(0);
|
|
1963
|
+
}
|
|
1964
|
+
if (sub === "start") {
|
|
1965
|
+
const status = await startServerIfNeeded();
|
|
1966
|
+
if (status.started) {
|
|
1967
|
+
console.log(green(`🌿 Server started on port ${status.port} (PID: ${status.pid})`));
|
|
1968
|
+
} else if (status.noBinary) {
|
|
1969
|
+
console.log(red("Server binary not found. Run: cd agent-workstream-os && cargo build --manifest-path src-tauri/Cargo.toml --no-default-features --features server"));
|
|
1970
|
+
} else {
|
|
1971
|
+
console.log(dim(`Server already running on port ${status.port}`));
|
|
1972
|
+
}
|
|
1973
|
+
process.exit(0);
|
|
1974
|
+
}
|
|
1975
|
+
console.log("Usage: wispy server <start|stop|status>");
|
|
1976
|
+
process.exit(1);
|
|
1977
|
+
}
|
|
1978
|
+
|
|
1979
|
+
if (args[0] && operatorCommands.has(args[0])) {
|
|
1980
|
+
// Delegate to the full CLI
|
|
1981
|
+
const cliPath = process.env.WISPY_OPERATOR_CLI ?? path.join(SCRIPT_DIR, "awos-node-cli.mjs");
|
|
1982
|
+
const { execFileSync } = await import("node:child_process");
|
|
1983
|
+
try {
|
|
1984
|
+
execFileSync(process.execPath, ["--experimental-strip-types", cliPath, ...args], {
|
|
1985
|
+
stdio: "inherit",
|
|
1986
|
+
env: process.env,
|
|
1987
|
+
});
|
|
1988
|
+
} catch (e) {
|
|
1989
|
+
process.exit(e.status ?? 1);
|
|
1990
|
+
}
|
|
1991
|
+
process.exit(0);
|
|
1992
|
+
}
|
|
1993
|
+
|
|
1994
|
+
// Check API key
|
|
1995
|
+
if (!API_KEY && PROVIDER !== "ollama") {
|
|
1996
|
+
printSetupGuide();
|
|
1997
|
+
process.exit(1);
|
|
1998
|
+
}
|
|
1999
|
+
|
|
2000
|
+
// Auto-start server before entering REPL or one-shot
|
|
2001
|
+
const serverStatus = await startServerIfNeeded();
|
|
2002
|
+
if (serverStatus.started) {
|
|
2003
|
+
if (serverStatus.slow) {
|
|
2004
|
+
console.log(yellow(`⚠ Server starting on port ${serverStatus.port} (may take a moment)...`));
|
|
2005
|
+
} else {
|
|
2006
|
+
console.log(dim(`🌿 Server started on port ${serverStatus.port}`));
|
|
2007
|
+
}
|
|
2008
|
+
} else if (serverStatus.noBinary) {
|
|
2009
|
+
// Silent — no binary built yet, that's fine for chat-only mode
|
|
2010
|
+
}
|
|
2011
|
+
|
|
2012
|
+
// Server runs as a background daemon — survives CLI exit.
|
|
2013
|
+
// Use `wispy server stop` to stop it explicitly.
|
|
2014
|
+
|
|
2015
|
+
if (args[0] === "overview" || args[0] === "dashboard") {
|
|
2016
|
+
await showOverview();
|
|
2017
|
+
process.exit(0);
|
|
2018
|
+
}
|
|
2019
|
+
|
|
2020
|
+
if (args[0] === "search" && args[1]) {
|
|
2021
|
+
await searchAcrossWorkstreams(args.slice(1).join(" "));
|
|
2022
|
+
process.exit(0);
|
|
2023
|
+
}
|
|
2024
|
+
|
|
2025
|
+
if (args[0] === "--continue" || args[0] === "-c") {
|
|
2026
|
+
// Continue previous session with optional message
|
|
2027
|
+
const message = args.slice(1).join(" ").trim();
|
|
2028
|
+
if (message) {
|
|
2029
|
+
await runOneShot(message);
|
|
2030
|
+
} else {
|
|
2031
|
+
await runRepl();
|
|
2032
|
+
}
|
|
2033
|
+
} else if (args[0] === "--new" || args[0] === "-n") {
|
|
2034
|
+
// Force new session
|
|
2035
|
+
await saveConversation([]);
|
|
2036
|
+
const message = args.slice(1).join(" ").trim();
|
|
2037
|
+
if (message) {
|
|
2038
|
+
await runOneShot(message);
|
|
2039
|
+
} else {
|
|
2040
|
+
console.log(dim("🌿 Starting fresh session."));
|
|
2041
|
+
await runRepl();
|
|
2042
|
+
}
|
|
2043
|
+
} else if (args.length > 0 && args[0] !== "--help" && args[0] !== "-h") {
|
|
2044
|
+
// One-shot mode: wispy "message"
|
|
2045
|
+
const message = args.join(" ");
|
|
2046
|
+
await runOneShot(message);
|
|
2047
|
+
} else if (args[0] === "--help" || args[0] === "-h") {
|
|
2048
|
+
console.log(`
|
|
2049
|
+
${bold("🌿 Wispy")} — AI workspace assistant
|
|
2050
|
+
|
|
2051
|
+
${bold("Usage:")}
|
|
2052
|
+
wispy Start interactive session
|
|
2053
|
+
wispy "message" One-shot message
|
|
2054
|
+
wispy -w <name> Use specific workstream
|
|
2055
|
+
wispy -w <name> "msg" Workstream + message
|
|
2056
|
+
wispy --continue "msg" Continue previous session
|
|
2057
|
+
wispy --new "msg" Start fresh session
|
|
2058
|
+
wispy home <subcommand> Operator commands
|
|
2059
|
+
wispy config Show/set config
|
|
2060
|
+
wispy server status Server management
|
|
2061
|
+
wispy doctor Diagnose environment
|
|
2062
|
+
|
|
2063
|
+
${bold("Tools (AI can use):")}
|
|
2064
|
+
read_file Read file contents
|
|
2065
|
+
write_file Write/create files
|
|
2066
|
+
run_command Execute shell commands
|
|
2067
|
+
list_directory List files in directory
|
|
2068
|
+
web_search Search the web
|
|
2069
|
+
|
|
2070
|
+
${bold("In-session commands:")}
|
|
2071
|
+
/help Show commands
|
|
2072
|
+
/compact Compress conversation
|
|
2073
|
+
/memory <type> <text> Save to persistent memory
|
|
2074
|
+
/clear Reset conversation
|
|
2075
|
+
/model [name] Show/change model
|
|
2076
|
+
/cost Show session token usage
|
|
2077
|
+
/work Show workstream context (work.md)
|
|
2078
|
+
/work init Create work.md for current workstream
|
|
2079
|
+
/work set <text> Append to work.md
|
|
2080
|
+
/budget Show spending per workstream
|
|
2081
|
+
/budget set <usd> Set budget limit for current workstream
|
|
2082
|
+
/workstreams List all workstreams
|
|
2083
|
+
/overview Director view — all workstreams at a glance
|
|
2084
|
+
/search <keyword> Search across all workstreams
|
|
2085
|
+
/provider Show current provider info
|
|
2086
|
+
/quit Exit
|
|
2087
|
+
|
|
2088
|
+
${bold("Providers (auto-detected):")}
|
|
2089
|
+
GOOGLE_AI_KEY Google AI / Gemini ${dim("(free tier)")}
|
|
2090
|
+
ANTHROPIC_API_KEY Anthropic / Claude
|
|
2091
|
+
OPENAI_API_KEY OpenAI / GPT-4o
|
|
2092
|
+
OPENROUTER_API_KEY OpenRouter ${dim("(any model)")}
|
|
2093
|
+
GROQ_API_KEY Groq ${dim("(fast inference)")}
|
|
2094
|
+
DEEPSEEK_API_KEY DeepSeek
|
|
2095
|
+
OLLAMA_HOST Ollama ${dim("(local, no key needed)")}
|
|
2096
|
+
|
|
2097
|
+
${bold("Options:")}
|
|
2098
|
+
WISPY_PROVIDER Force provider (google/anthropic/openai/openrouter/groq/deepseek/ollama)
|
|
2099
|
+
WISPY_MODEL Override model name
|
|
2100
|
+
WISPY_WORKSTREAM Set active workstream
|
|
2101
|
+
`);
|
|
2102
|
+
} else {
|
|
2103
|
+
// Interactive REPL
|
|
2104
|
+
await runRepl();
|
|
2105
|
+
}
|