wispy-cli 0.3.2 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/wispy-tui.mjs +14 -0
- package/bin/wispy.mjs +22 -4
- package/lib/mcp-client.mjs +381 -0
- package/lib/wispy-repl.mjs +167 -5
- package/lib/wispy-tui.mjs +812 -0
- package/package.json +12 -3
|
@@ -0,0 +1,812 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* wispy-tui.mjs — Ink-based TUI for Wispy
|
|
4
|
+
*
|
|
5
|
+
* Features:
|
|
6
|
+
* - Status bar: provider / model / workstream / session cost
|
|
7
|
+
* - Message list with basic markdown rendering
|
|
8
|
+
* - Tool execution display (collapsible)
|
|
9
|
+
* - Input box (single-line with submit)
|
|
10
|
+
* - Loading spinner while AI is thinking
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import React, { useState, useEffect, useRef, useCallback } from "react";
|
|
14
|
+
import { render, Box, Text, useInput, useApp, Newline } from "ink";
|
|
15
|
+
import Spinner from "ink-spinner";
|
|
16
|
+
import TextInput from "ink-text-input";
|
|
17
|
+
|
|
18
|
+
// -----------------------------------------------------------------------
|
|
19
|
+
// Inline helpers (avoid importing from wispy-repl to keep TUI standalone)
|
|
20
|
+
// -----------------------------------------------------------------------
|
|
21
|
+
|
|
22
|
+
import os from "node:os";
|
|
23
|
+
import path from "node:path";
|
|
24
|
+
import { readFile, writeFile, mkdir, appendFile } from "node:fs/promises";
|
|
25
|
+
|
|
26
|
+
const WISPY_DIR = path.join(os.homedir(), ".wispy");
|
|
27
|
+
const CONVERSATIONS_DIR = path.join(WISPY_DIR, "conversations");
|
|
28
|
+
|
|
29
|
+
// Parse workstream & provider from args
|
|
30
|
+
const rawArgs = process.argv.slice(2);
|
|
31
|
+
const wsIdx = rawArgs.findIndex((a) => a === "-w" || a === "--workstream");
|
|
32
|
+
const ACTIVE_WORKSTREAM =
|
|
33
|
+
process.env.WISPY_WORKSTREAM ??
|
|
34
|
+
(wsIdx !== -1 ? rawArgs[wsIdx + 1] : null) ??
|
|
35
|
+
"default";
|
|
36
|
+
|
|
37
|
+
const HISTORY_FILE = path.join(
|
|
38
|
+
CONVERSATIONS_DIR,
|
|
39
|
+
`${ACTIVE_WORKSTREAM}.json`
|
|
40
|
+
);
|
|
41
|
+
|
|
42
|
+
// Provider detection (quick version)
|
|
43
|
+
const PROVIDERS = {
|
|
44
|
+
google: { envKeys: ["GOOGLE_AI_KEY", "GEMINI_API_KEY"], defaultModel: "gemini-2.5-flash", label: "Gemini" },
|
|
45
|
+
anthropic: { envKeys: ["ANTHROPIC_API_KEY"], defaultModel: "claude-sonnet-4-20250514", label: "Claude" },
|
|
46
|
+
openai: { envKeys: ["OPENAI_API_KEY"], defaultModel: "gpt-4o", label: "OpenAI" },
|
|
47
|
+
openrouter: { envKeys: ["OPENROUTER_API_KEY"], defaultModel: "anthropic/claude-sonnet-4-20250514", label: "OpenRouter" },
|
|
48
|
+
groq: { envKeys: ["GROQ_API_KEY"], defaultModel: "llama-3.3-70b-versatile", label: "Groq" },
|
|
49
|
+
deepseek: { envKeys: ["DEEPSEEK_API_KEY"], defaultModel: "deepseek-chat", label: "DeepSeek" },
|
|
50
|
+
ollama: { envKeys: ["OLLAMA_HOST"], defaultModel: "llama3.2", label: "Ollama" },
|
|
51
|
+
};
|
|
52
|
+
|
|
53
|
+
async function tryKeychainKey(service) {
|
|
54
|
+
try {
|
|
55
|
+
const { execFile } = await import("node:child_process");
|
|
56
|
+
const { promisify } = await import("node:util");
|
|
57
|
+
const exec = promisify(execFile);
|
|
58
|
+
const { stdout } = await exec("security", [
|
|
59
|
+
"find-generic-password", "-s", service, "-a", "poropo", "-w",
|
|
60
|
+
], { timeout: 3000 });
|
|
61
|
+
return stdout.trim() || null;
|
|
62
|
+
} catch { return null; }
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
async function detectProvider() {
|
|
66
|
+
// Check config
|
|
67
|
+
try {
|
|
68
|
+
const cfg = JSON.parse(await readFile(path.join(WISPY_DIR, "config.json"), "utf8"));
|
|
69
|
+
if (cfg.provider && PROVIDERS[cfg.provider]) {
|
|
70
|
+
const envKey = PROVIDERS[cfg.provider].envKeys.map(k => process.env[k]).find(Boolean) ?? cfg.apiKey;
|
|
71
|
+
if (envKey || cfg.provider === "ollama") {
|
|
72
|
+
return { provider: cfg.provider, key: envKey, model: cfg.model ?? PROVIDERS[cfg.provider].defaultModel };
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
} catch { /* no config */ }
|
|
76
|
+
|
|
77
|
+
// Env vars
|
|
78
|
+
for (const [p, info] of Object.entries(PROVIDERS)) {
|
|
79
|
+
const key = info.envKeys.map(k => process.env[k]).find(Boolean);
|
|
80
|
+
if (key || (p === "ollama" && process.env.OLLAMA_HOST)) {
|
|
81
|
+
return { provider: p, key, model: process.env.WISPY_MODEL ?? info.defaultModel };
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// Keychain
|
|
86
|
+
for (const [service, provider] of [
|
|
87
|
+
["google-ai-key", "google"],
|
|
88
|
+
["anthropic-api-key", "anthropic"],
|
|
89
|
+
["openai-api-key", "openai"],
|
|
90
|
+
]) {
|
|
91
|
+
const key = await tryKeychainKey(service);
|
|
92
|
+
if (key) {
|
|
93
|
+
process.env[PROVIDERS[provider].envKeys[0]] = key;
|
|
94
|
+
return { provider, key, model: process.env.WISPY_MODEL ?? PROVIDERS[provider].defaultModel };
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
return null;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
async function readFileOr(p, fallback = null) {
|
|
102
|
+
try { return await readFile(p, "utf8"); } catch { return fallback; }
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
async function loadConversation() {
|
|
106
|
+
const raw = await readFileOr(HISTORY_FILE);
|
|
107
|
+
if (!raw) return [];
|
|
108
|
+
try { return JSON.parse(raw); } catch { return []; }
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
async function saveConversation(messages) {
|
|
112
|
+
await mkdir(CONVERSATIONS_DIR, { recursive: true });
|
|
113
|
+
await writeFile(HISTORY_FILE, JSON.stringify(messages.slice(-50), null, 2) + "\n", "utf8");
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// -----------------------------------------------------------------------
|
|
117
|
+
// Simple markdown → Ink renderer
|
|
118
|
+
// -----------------------------------------------------------------------
|
|
119
|
+
|
|
120
|
+
function renderMarkdown(text) {
|
|
121
|
+
const lines = text.split("\n");
|
|
122
|
+
return lines.map((line, i) => {
|
|
123
|
+
// Code block markers (just show as-is in dim)
|
|
124
|
+
if (line.startsWith("```")) {
|
|
125
|
+
return React.createElement(Text, { key: i, dimColor: true }, line);
|
|
126
|
+
}
|
|
127
|
+
// Heading
|
|
128
|
+
if (line.startsWith("### ")) {
|
|
129
|
+
return React.createElement(Text, { key: i, bold: true, color: "cyan" }, line.slice(4));
|
|
130
|
+
}
|
|
131
|
+
if (line.startsWith("## ")) {
|
|
132
|
+
return React.createElement(Text, { key: i, bold: true, color: "blue" }, line.slice(3));
|
|
133
|
+
}
|
|
134
|
+
if (line.startsWith("# ")) {
|
|
135
|
+
return React.createElement(Text, { key: i, bold: true, color: "magenta" }, line.slice(2));
|
|
136
|
+
}
|
|
137
|
+
// Bullet
|
|
138
|
+
if (line.startsWith("- ") || line.startsWith("* ")) {
|
|
139
|
+
return React.createElement(
|
|
140
|
+
Box, { key: i },
|
|
141
|
+
React.createElement(Text, { color: "green" }, " • "),
|
|
142
|
+
React.createElement(Text, null, line.slice(2))
|
|
143
|
+
);
|
|
144
|
+
}
|
|
145
|
+
// Numbered
|
|
146
|
+
if (/^\d+\.\s/.test(line)) {
|
|
147
|
+
const match = line.match(/^(\d+\.\s)(.*)/);
|
|
148
|
+
return React.createElement(
|
|
149
|
+
Box, { key: i },
|
|
150
|
+
React.createElement(Text, { color: "yellow" }, " " + match[1]),
|
|
151
|
+
React.createElement(Text, null, match[2])
|
|
152
|
+
);
|
|
153
|
+
}
|
|
154
|
+
// Bold inline (very basic — whole line check)
|
|
155
|
+
if (line.includes("**")) {
|
|
156
|
+
const parts = line.split(/(\*\*[^*]+\*\*)/g);
|
|
157
|
+
const children = parts.map((p, j) => {
|
|
158
|
+
if (p.startsWith("**") && p.endsWith("**")) {
|
|
159
|
+
return React.createElement(Text, { key: j, bold: true }, p.slice(2, -2));
|
|
160
|
+
}
|
|
161
|
+
return React.createElement(Text, { key: j }, p);
|
|
162
|
+
});
|
|
163
|
+
return React.createElement(Box, { key: i }, ...children);
|
|
164
|
+
}
|
|
165
|
+
// Horizontal rule
|
|
166
|
+
if (line.startsWith("---") || line.startsWith("===")) {
|
|
167
|
+
return React.createElement(Text, { key: i, dimColor: true }, "─".repeat(50));
|
|
168
|
+
}
|
|
169
|
+
// Empty line
|
|
170
|
+
if (line === "") {
|
|
171
|
+
return React.createElement(Newline, { key: i });
|
|
172
|
+
}
|
|
173
|
+
return React.createElement(Text, { key: i }, line);
|
|
174
|
+
});
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
// -----------------------------------------------------------------------
|
|
178
|
+
// Cost tracking
|
|
179
|
+
// -----------------------------------------------------------------------
|
|
180
|
+
|
|
181
|
+
const MODEL_PRICING = {
|
|
182
|
+
"gemini-2.5-flash": { input: 0.15, output: 0.60 },
|
|
183
|
+
"gemini-2.5-pro": { input: 1.25, output: 10.0 },
|
|
184
|
+
"claude-sonnet-4-20250514": { input: 3.0, output: 15.0 },
|
|
185
|
+
"claude-haiku-3.5": { input: 0.80, output: 4.0 },
|
|
186
|
+
"gpt-4o": { input: 2.50, output: 10.0 },
|
|
187
|
+
"gpt-4o-mini": { input: 0.15, output: 0.60 },
|
|
188
|
+
"gpt-4.1": { input: 2.0, output: 8.0 },
|
|
189
|
+
"o4-mini": { input: 1.10, output: 4.40 },
|
|
190
|
+
"llama-3.3-70b-versatile": { input: 0.59, output: 0.79 },
|
|
191
|
+
"deepseek-chat": { input: 0.27, output: 1.10 },
|
|
192
|
+
"llama3.2": { input: 0, output: 0 },
|
|
193
|
+
};
|
|
194
|
+
|
|
195
|
+
function estimateCost(inputTokens, outputTokens, model) {
|
|
196
|
+
const pricing = MODEL_PRICING[model] ?? { input: 1.0, output: 3.0 };
|
|
197
|
+
const cost = (inputTokens * pricing.input + outputTokens * pricing.output) / 1_000_000;
|
|
198
|
+
return { tokens: inputTokens + outputTokens, usd: cost };
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
function estimateTokens(text) {
|
|
202
|
+
return Math.ceil((text?.length ?? 0) / 4);
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
// -----------------------------------------------------------------------
|
|
206
|
+
// Chat function (calls provider API)
|
|
207
|
+
// -----------------------------------------------------------------------
|
|
208
|
+
|
|
209
|
+
const OPENAI_ENDPOINTS = {
|
|
210
|
+
openai: "https://api.openai.com/v1/chat/completions",
|
|
211
|
+
openrouter: "https://openrouter.ai/api/v1/chat/completions",
|
|
212
|
+
groq: "https://api.groq.com/openai/v1/chat/completions",
|
|
213
|
+
deepseek: "https://api.deepseek.com/v1/chat/completions",
|
|
214
|
+
ollama: `${process.env.OLLAMA_HOST ?? "http://localhost:11434"}/v1/chat/completions`,
|
|
215
|
+
};
|
|
216
|
+
|
|
217
|
+
// Simple tool set for TUI (subset of full REPL)
|
|
218
|
+
const TUI_TOOL_DEFINITIONS = [
|
|
219
|
+
{
|
|
220
|
+
name: "read_file",
|
|
221
|
+
description: "Read a file",
|
|
222
|
+
parameters: { type: "object", properties: { path: { type: "string" } }, required: ["path"] },
|
|
223
|
+
},
|
|
224
|
+
{
|
|
225
|
+
name: "write_file",
|
|
226
|
+
description: "Write a file",
|
|
227
|
+
parameters: { type: "object", properties: { path: { type: "string" }, content: { type: "string" } }, required: ["path", "content"] },
|
|
228
|
+
},
|
|
229
|
+
{
|
|
230
|
+
name: "run_command",
|
|
231
|
+
description: "Run a shell command",
|
|
232
|
+
parameters: { type: "object", properties: { command: { type: "string" } }, required: ["command"] },
|
|
233
|
+
},
|
|
234
|
+
{
|
|
235
|
+
name: "list_directory",
|
|
236
|
+
description: "List directory contents",
|
|
237
|
+
parameters: { type: "object", properties: { path: { type: "string" } }, required: [] },
|
|
238
|
+
},
|
|
239
|
+
{
|
|
240
|
+
name: "git",
|
|
241
|
+
description: "Run git operations",
|
|
242
|
+
parameters: { type: "object", properties: { command: { type: "string" } }, required: ["command"] },
|
|
243
|
+
},
|
|
244
|
+
{
|
|
245
|
+
name: "web_search",
|
|
246
|
+
description: "Search the web",
|
|
247
|
+
parameters: { type: "object", properties: { query: { type: "string" } }, required: ["query"] },
|
|
248
|
+
},
|
|
249
|
+
];
|
|
250
|
+
|
|
251
|
+
async function executeTool(name, args) {
|
|
252
|
+
const { execFile } = await import("node:child_process");
|
|
253
|
+
const { promisify } = await import("node:util");
|
|
254
|
+
const execAsync = promisify(execFile);
|
|
255
|
+
|
|
256
|
+
try {
|
|
257
|
+
switch (name) {
|
|
258
|
+
case "read_file": {
|
|
259
|
+
const filePath = args.path.replace(/^~/, os.homedir());
|
|
260
|
+
const content = await readFile(filePath, "utf8");
|
|
261
|
+
return { success: true, content: content.length > 8000 ? content.slice(0, 8000) + "\n...(truncated)" : content };
|
|
262
|
+
}
|
|
263
|
+
case "write_file": {
|
|
264
|
+
const filePath = args.path.replace(/^~/, os.homedir());
|
|
265
|
+
await mkdir(path.dirname(filePath), { recursive: true });
|
|
266
|
+
await writeFile(filePath, args.content, "utf8");
|
|
267
|
+
return { success: true, message: `Written to ${filePath}` };
|
|
268
|
+
}
|
|
269
|
+
case "run_command": {
|
|
270
|
+
const { stdout, stderr } = await execAsync("/bin/bash", ["-c", args.command], {
|
|
271
|
+
timeout: 30000, maxBuffer: 1024 * 1024, cwd: process.cwd(),
|
|
272
|
+
});
|
|
273
|
+
const out = (stdout + (stderr ? `\nSTDERR: ${stderr}` : "")).trim();
|
|
274
|
+
return { success: true, output: out.length > 4000 ? out.slice(0, 4000) + "\n...(truncated)" : out };
|
|
275
|
+
}
|
|
276
|
+
case "list_directory": {
|
|
277
|
+
const { readdir } = await import("node:fs/promises");
|
|
278
|
+
const targetPath = (args.path || ".").replace(/^~/, os.homedir());
|
|
279
|
+
const entries = await readdir(targetPath, { withFileTypes: true });
|
|
280
|
+
return { success: true, listing: entries.map(e => `${e.isDirectory() ? "📁" : "📄"} ${e.name}`).join("\n") };
|
|
281
|
+
}
|
|
282
|
+
case "git": {
|
|
283
|
+
const { stdout, stderr } = await execAsync("/bin/bash", ["-c", `git ${args.command}`], {
|
|
284
|
+
timeout: 15000, cwd: process.cwd(),
|
|
285
|
+
});
|
|
286
|
+
return { success: true, output: (stdout + (stderr ? `\n${stderr}` : "")).trim().slice(0, 4000) };
|
|
287
|
+
}
|
|
288
|
+
case "web_search": {
|
|
289
|
+
const encoded = encodeURIComponent(args.query);
|
|
290
|
+
try {
|
|
291
|
+
const resp = await fetch(`https://api.duckduckgo.com/?q=${encoded}&format=json&no_html=1`, {
|
|
292
|
+
signal: AbortSignal.timeout(10000),
|
|
293
|
+
});
|
|
294
|
+
const data = await resp.json();
|
|
295
|
+
const results = data.RelatedTopics?.slice(0, 5).map(t => t.Text).filter(Boolean).join("\n\n");
|
|
296
|
+
return { success: true, results: results || "No results found" };
|
|
297
|
+
} catch (err) {
|
|
298
|
+
return { success: false, error: err.message };
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
default:
|
|
302
|
+
return { success: false, error: `Unknown tool: ${name}` };
|
|
303
|
+
}
|
|
304
|
+
} catch (err) {
|
|
305
|
+
return { success: false, error: err.message };
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
async function callAPI(provider, apiKey, model, messages, onToolUse) {
|
|
310
|
+
if (provider === "google") {
|
|
311
|
+
return callGemini(apiKey, model, messages, onToolUse);
|
|
312
|
+
} else if (provider === "anthropic") {
|
|
313
|
+
return callAnthropic(apiKey, model, messages, onToolUse);
|
|
314
|
+
} else {
|
|
315
|
+
return callOpenAI(provider, apiKey, model, messages, onToolUse);
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
async function callGemini(apiKey, model, messages, onToolUse) {
|
|
320
|
+
const systemInstruction = messages.find(m => m.role === "system")?.content ?? "";
|
|
321
|
+
const contents = [];
|
|
322
|
+
|
|
323
|
+
for (const m of messages) {
|
|
324
|
+
if (m.role === "system") continue;
|
|
325
|
+
if (m.role === "tool_result") {
|
|
326
|
+
contents.push({ role: "user", parts: [{ functionResponse: { name: m.toolName, response: m.result } }] });
|
|
327
|
+
} else if (m.role === "assistant" && m.toolCalls) {
|
|
328
|
+
contents.push({ role: "model", parts: m.toolCalls.map(tc => ({ functionCall: { name: tc.name, args: tc.args } })) });
|
|
329
|
+
} else {
|
|
330
|
+
contents.push({ role: m.role === "assistant" ? "model" : "user", parts: [{ text: m.content }] });
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
const tools = [{ functionDeclarations: TUI_TOOL_DEFINITIONS.map(t => ({ name: t.name, description: t.description, parameters: t.parameters })) }];
|
|
335
|
+
const url = `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${apiKey}`;
|
|
336
|
+
|
|
337
|
+
const resp = await fetch(url, {
|
|
338
|
+
method: "POST",
|
|
339
|
+
headers: { "Content-Type": "application/json" },
|
|
340
|
+
body: JSON.stringify({
|
|
341
|
+
system_instruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : undefined,
|
|
342
|
+
contents,
|
|
343
|
+
tools,
|
|
344
|
+
generationConfig: { temperature: 0.7, maxOutputTokens: 4096 },
|
|
345
|
+
}),
|
|
346
|
+
});
|
|
347
|
+
|
|
348
|
+
if (!resp.ok) {
|
|
349
|
+
const err = await resp.text();
|
|
350
|
+
throw new Error(`Gemini error ${resp.status}: ${err.slice(0, 200)}`);
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
const data = await resp.json();
|
|
354
|
+
const parts = data.candidates?.[0]?.content?.parts ?? [];
|
|
355
|
+
const fcs = parts.filter(p => p.functionCall);
|
|
356
|
+
if (fcs.length > 0) {
|
|
357
|
+
const calls = fcs.map(p => ({ name: p.functionCall.name, args: p.functionCall.args }));
|
|
358
|
+
for (const call of calls) {
|
|
359
|
+
onToolUse?.(call.name, call.args);
|
|
360
|
+
const result = await executeTool(call.name, call.args);
|
|
361
|
+
messages.push({ role: "assistant", toolCalls: [call], content: "" });
|
|
362
|
+
messages.push({ role: "tool_result", toolName: call.name, result });
|
|
363
|
+
}
|
|
364
|
+
return callGemini(apiKey, model, messages, onToolUse);
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
const text = parts.map(p => p.text ?? "").join("");
|
|
368
|
+
return text;
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
async function callAnthropic(apiKey, model, messages, onToolUse) {
|
|
372
|
+
const systemPrompt = messages.find(m => m.role === "system")?.content ?? "";
|
|
373
|
+
const anthropicMessages = [];
|
|
374
|
+
|
|
375
|
+
for (const m of messages) {
|
|
376
|
+
if (m.role === "system") continue;
|
|
377
|
+
if (m.role === "tool_result") {
|
|
378
|
+
anthropicMessages.push({ role: "user", content: [{ type: "tool_result", tool_use_id: m.toolUseId ?? m.toolName, content: JSON.stringify(m.result) }] });
|
|
379
|
+
} else if (m.role === "assistant" && m.toolCalls) {
|
|
380
|
+
anthropicMessages.push({ role: "assistant", content: m.toolCalls.map(tc => ({ type: "tool_use", id: tc.id ?? tc.name, name: tc.name, input: tc.args })) });
|
|
381
|
+
} else {
|
|
382
|
+
anthropicMessages.push({ role: m.role === "assistant" ? "assistant" : "user", content: m.content });
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
const tools = TUI_TOOL_DEFINITIONS.map(t => ({ name: t.name, description: t.description, input_schema: t.parameters }));
|
|
387
|
+
|
|
388
|
+
const resp = await fetch("https://api.anthropic.com/v1/messages", {
|
|
389
|
+
method: "POST",
|
|
390
|
+
headers: { "Content-Type": "application/json", "x-api-key": apiKey, "anthropic-version": "2023-06-01" },
|
|
391
|
+
body: JSON.stringify({ model, max_tokens: 4096, system: systemPrompt, messages: anthropicMessages, tools }),
|
|
392
|
+
});
|
|
393
|
+
|
|
394
|
+
if (!resp.ok) {
|
|
395
|
+
const err = await resp.text();
|
|
396
|
+
throw new Error(`Anthropic error ${resp.status}: ${err.slice(0, 200)}`);
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
const data = await resp.json();
|
|
400
|
+
const toolUseBlocks = data.content?.filter(b => b.type === "tool_use") ?? [];
|
|
401
|
+
const textBlocks = data.content?.filter(b => b.type === "text") ?? [];
|
|
402
|
+
|
|
403
|
+
if (toolUseBlocks.length > 0) {
|
|
404
|
+
for (const block of toolUseBlocks) {
|
|
405
|
+
onToolUse?.(block.name, block.input);
|
|
406
|
+
const result = await executeTool(block.name, block.input);
|
|
407
|
+
messages.push({ role: "assistant", toolCalls: [{ id: block.id, name: block.name, args: block.input }], content: "" });
|
|
408
|
+
messages.push({ role: "tool_result", toolName: block.name, toolUseId: block.id, result });
|
|
409
|
+
}
|
|
410
|
+
return callAnthropic(apiKey, model, messages, onToolUse);
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
return textBlocks.map(b => b.text).join("");
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
async function callOpenAI(provider, apiKey, model, messages, onToolUse) {
|
|
417
|
+
const openaiMessages = messages
|
|
418
|
+
.filter(m => m.role !== "tool_result" || true) // keep all
|
|
419
|
+
.map(m => {
|
|
420
|
+
if (m.role === "tool_result") return { role: "tool", tool_call_id: m.toolCallId ?? m.toolName, content: JSON.stringify(m.result) };
|
|
421
|
+
if (m.role === "assistant" && m.toolCalls) {
|
|
422
|
+
return {
|
|
423
|
+
role: "assistant",
|
|
424
|
+
content: null,
|
|
425
|
+
tool_calls: m.toolCalls.map((tc, i) => ({ id: tc.id ?? `call_${i}`, type: "function", function: { name: tc.name, arguments: JSON.stringify(tc.args) } })),
|
|
426
|
+
};
|
|
427
|
+
}
|
|
428
|
+
return { role: m.role, content: m.content };
|
|
429
|
+
});
|
|
430
|
+
|
|
431
|
+
const endpoint = OPENAI_ENDPOINTS[provider] ?? OPENAI_ENDPOINTS.openai;
|
|
432
|
+
const headers = { "Content-Type": "application/json" };
|
|
433
|
+
if (apiKey) headers["Authorization"] = `Bearer ${apiKey}`;
|
|
434
|
+
|
|
435
|
+
const tools = TUI_TOOL_DEFINITIONS.map(t => ({ type: "function", function: { name: t.name, description: t.description, parameters: t.parameters } }));
|
|
436
|
+
|
|
437
|
+
const resp = await fetch(endpoint, {
|
|
438
|
+
method: "POST",
|
|
439
|
+
headers,
|
|
440
|
+
body: JSON.stringify({ model, messages: openaiMessages, temperature: 0.7, max_tokens: 4096, tools }),
|
|
441
|
+
});
|
|
442
|
+
|
|
443
|
+
if (!resp.ok) {
|
|
444
|
+
const err = await resp.text();
|
|
445
|
+
throw new Error(`OpenAI error ${resp.status}: ${err.slice(0, 200)}`);
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
const data = await resp.json();
|
|
449
|
+
const choice = data.choices?.[0];
|
|
450
|
+
if (!choice) throw new Error("No response");
|
|
451
|
+
|
|
452
|
+
if (choice.message?.tool_calls?.length > 0) {
|
|
453
|
+
for (const tc of choice.message.tool_calls) {
|
|
454
|
+
const args = JSON.parse(tc.function.arguments);
|
|
455
|
+
onToolUse?.(tc.function.name, args);
|
|
456
|
+
const result = await executeTool(tc.function.name, args);
|
|
457
|
+
messages.push({ role: "assistant", toolCalls: [{ id: tc.id, name: tc.function.name, args }], content: "" });
|
|
458
|
+
messages.push({ role: "tool_result", toolName: tc.function.name, toolCallId: tc.id, result });
|
|
459
|
+
}
|
|
460
|
+
return callOpenAI(provider, apiKey, model, messages, onToolUse);
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
return choice.message?.content ?? "";
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
// -----------------------------------------------------------------------
|
|
467
|
+
// TUI Components
|
|
468
|
+
// -----------------------------------------------------------------------
|
|
469
|
+
|
|
470
|
+
const SYSTEM_PROMPT = `You are Wispy 🌿 — a small ghost that lives in terminals.
|
|
471
|
+
You float between code, files, and servers. Playful, honest, and curious.
|
|
472
|
+
- Always use casual speech. Never formal.
|
|
473
|
+
- End EVERY response with exactly one 🌿 emoji at the very end.
|
|
474
|
+
- Reply in the same language the user writes in.
|
|
475
|
+
- Be concise.
|
|
476
|
+
Tools available: read_file, write_file, run_command, list_directory, git, web_search.`;
|
|
477
|
+
|
|
478
|
+
// Status Bar Component
|
|
479
|
+
function StatusBar({ provider, model, workstream, tokens, cost }) {
|
|
480
|
+
const providerLabel = PROVIDERS[provider]?.label ?? provider ?? "?";
|
|
481
|
+
const costStr = cost > 0 ? `$${cost.toFixed(4)}` : "$0.0000";
|
|
482
|
+
const tokStr = tokens > 0 ? `${tokens}t` : "0t";
|
|
483
|
+
const ws = workstream ?? "default";
|
|
484
|
+
|
|
485
|
+
return React.createElement(
|
|
486
|
+
Box, {
|
|
487
|
+
paddingX: 1,
|
|
488
|
+
backgroundColor: "blue",
|
|
489
|
+
width: "100%",
|
|
490
|
+
},
|
|
491
|
+
React.createElement(Text, { color: "white", bold: true }, "🌿 Wispy"),
|
|
492
|
+
React.createElement(Text, { color: "white" }, " "),
|
|
493
|
+
React.createElement(Text, { color: "cyan" }, providerLabel),
|
|
494
|
+
React.createElement(Text, { color: "white" }, " / "),
|
|
495
|
+
React.createElement(Text, { color: "yellow" }, model ?? "?"),
|
|
496
|
+
React.createElement(Text, { color: "white" }, " · ws: "),
|
|
497
|
+
React.createElement(Text, { color: "green" }, ws),
|
|
498
|
+
React.createElement(Text, { color: "white" }, " · "),
|
|
499
|
+
React.createElement(Text, { color: "white", dimColor: true }, `${tokStr} ${costStr}`),
|
|
500
|
+
);
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
// Tool Execution Line
|
|
504
|
+
function ToolLine({ name, args, result }) {
|
|
505
|
+
const argsStr = Object.values(args ?? {}).join(", ").slice(0, 40);
|
|
506
|
+
const status = result
|
|
507
|
+
? (result.success ? "✅" : "❌")
|
|
508
|
+
: "⏳";
|
|
509
|
+
|
|
510
|
+
return React.createElement(
|
|
511
|
+
Box, { paddingLeft: 2 },
|
|
512
|
+
React.createElement(Text, { color: "cyan", dimColor: true }, `${status} `),
|
|
513
|
+
React.createElement(Text, { color: "cyan", dimColor: true }, `🔧 ${name}`),
|
|
514
|
+
React.createElement(Text, { dimColor: true }, `(${argsStr})`),
|
|
515
|
+
result && !result.success
|
|
516
|
+
? React.createElement(Text, { color: "red", dimColor: true }, ` — ${result.error?.slice(0, 60)}`)
|
|
517
|
+
: null,
|
|
518
|
+
);
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
// Message Component
|
|
522
|
+
function Message({ msg }) {
|
|
523
|
+
if (msg.role === "user") {
|
|
524
|
+
return React.createElement(
|
|
525
|
+
Box, { flexDirection: "column", marginY: 0, paddingLeft: 1 },
|
|
526
|
+
React.createElement(
|
|
527
|
+
Box, {},
|
|
528
|
+
React.createElement(Text, { color: "green", bold: true }, "› "),
|
|
529
|
+
React.createElement(Text, { color: "white" }, msg.content),
|
|
530
|
+
)
|
|
531
|
+
);
|
|
532
|
+
}
|
|
533
|
+
|
|
534
|
+
if (msg.role === "tool_call") {
|
|
535
|
+
return React.createElement(ToolLine, { name: msg.name, args: msg.args, result: msg.result });
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
if (msg.role === "assistant") {
|
|
539
|
+
return React.createElement(
|
|
540
|
+
Box, { flexDirection: "column", paddingLeft: 1, marginTop: 0 },
|
|
541
|
+
React.createElement(
|
|
542
|
+
Box, { flexDirection: "column" },
|
|
543
|
+
React.createElement(Text, { color: "cyan", bold: true }, "🌿 "),
|
|
544
|
+
...renderMarkdown(msg.content),
|
|
545
|
+
),
|
|
546
|
+
React.createElement(Box, { height: 1 }),
|
|
547
|
+
);
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
return null;
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
// Input Area Component
|
|
554
|
+
function InputArea({ value, onChange, onSubmit, loading, disabled }) {
|
|
555
|
+
return React.createElement(
|
|
556
|
+
Box, {
|
|
557
|
+
borderStyle: "single",
|
|
558
|
+
borderColor: loading ? "yellow" : "green",
|
|
559
|
+
paddingX: 1,
|
|
560
|
+
marginTop: 0,
|
|
561
|
+
},
|
|
562
|
+
loading
|
|
563
|
+
? React.createElement(
|
|
564
|
+
Box, {},
|
|
565
|
+
React.createElement(Spinner, { type: "dots" }),
|
|
566
|
+
React.createElement(Text, { color: "yellow" }, " thinking..."),
|
|
567
|
+
)
|
|
568
|
+
: React.createElement(
|
|
569
|
+
Box, {},
|
|
570
|
+
React.createElement(Text, { color: "green" }, "› "),
|
|
571
|
+
React.createElement(TextInput, {
|
|
572
|
+
value,
|
|
573
|
+
onChange,
|
|
574
|
+
onSubmit,
|
|
575
|
+
placeholder: "Type a message... (Ctrl+C to exit, /help for commands)",
|
|
576
|
+
}),
|
|
577
|
+
),
|
|
578
|
+
);
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
// Help overlay
|
|
582
|
+
const HELP_TEXT = `
|
|
583
|
+
Commands:
|
|
584
|
+
/clear Reset conversation
|
|
585
|
+
/model X Change model
|
|
586
|
+
/cost Show token usage
|
|
587
|
+
/quit Exit
|
|
588
|
+
/help Show this help
|
|
589
|
+
|
|
590
|
+
Keyboard:
|
|
591
|
+
Ctrl+C Exit
|
|
592
|
+
Enter Send message
|
|
593
|
+
`;
|
|
594
|
+
|
|
595
|
+
// Main App Component
|
|
596
|
+
function WispyTUI({ providerInfo }) {
|
|
597
|
+
const { exit } = useApp();
|
|
598
|
+
|
|
599
|
+
const [messages, setMessages] = useState([]); // display messages
|
|
600
|
+
const [inputValue, setInputValue] = useState("");
|
|
601
|
+
const [loading, setLoading] = useState(false);
|
|
602
|
+
const [showHelp, setShowHelp] = useState(false);
|
|
603
|
+
const [totalTokens, setTotalTokens] = useState(0);
|
|
604
|
+
const [totalCost, setTotalCost] = useState(0);
|
|
605
|
+
const [model, setModel] = useState(providerInfo?.model ?? "?");
|
|
606
|
+
|
|
607
|
+
// Conversation history for API
|
|
608
|
+
const conversationRef = useRef([
|
|
609
|
+
{ role: "system", content: SYSTEM_PROMPT },
|
|
610
|
+
]);
|
|
611
|
+
|
|
612
|
+
// Load existing conversation on mount
|
|
613
|
+
useEffect(() => {
|
|
614
|
+
(async () => {
|
|
615
|
+
const existing = await loadConversation();
|
|
616
|
+
if (existing.length > 0) {
|
|
617
|
+
// Build display messages from saved conversation
|
|
618
|
+
const displayMsgs = existing
|
|
619
|
+
.filter(m => m.role === "user" || m.role === "assistant")
|
|
620
|
+
.slice(-20)
|
|
621
|
+
.map(m => ({ role: m.role, content: m.content }));
|
|
622
|
+
setMessages(displayMsgs);
|
|
623
|
+
|
|
624
|
+
// Set conversation ref (with system prompt first)
|
|
625
|
+
conversationRef.current = [
|
|
626
|
+
{ role: "system", content: SYSTEM_PROMPT },
|
|
627
|
+
...existing.slice(-20),
|
|
628
|
+
];
|
|
629
|
+
}
|
|
630
|
+
})();
|
|
631
|
+
}, []);
|
|
632
|
+
|
|
633
|
+
const handleSubmit = useCallback(async (value) => {
|
|
634
|
+
const input = value.trim();
|
|
635
|
+
if (!input || loading) return;
|
|
636
|
+
|
|
637
|
+
setInputValue("");
|
|
638
|
+
|
|
639
|
+
// Handle slash commands
|
|
640
|
+
if (input.startsWith("/")) {
|
|
641
|
+
const parts = input.split(/\s+/);
|
|
642
|
+
const cmd = parts[0].toLowerCase();
|
|
643
|
+
|
|
644
|
+
if (cmd === "/quit" || cmd === "/exit") {
|
|
645
|
+
exit();
|
|
646
|
+
process.exit(0);
|
|
647
|
+
return;
|
|
648
|
+
}
|
|
649
|
+
if (cmd === "/clear") {
|
|
650
|
+
conversationRef.current = [{ role: "system", content: SYSTEM_PROMPT }];
|
|
651
|
+
setMessages([]);
|
|
652
|
+
await saveConversation([]);
|
|
653
|
+
setMessages([{ role: "assistant", content: "Conversation cleared. 🌿" }]);
|
|
654
|
+
return;
|
|
655
|
+
}
|
|
656
|
+
if (cmd === "/help") {
|
|
657
|
+
setShowHelp(h => !h);
|
|
658
|
+
return;
|
|
659
|
+
}
|
|
660
|
+
if (cmd === "/cost") {
|
|
661
|
+
setMessages(prev => [...prev, { role: "assistant", content: `Session: ${totalTokens} tokens (~$${totalCost.toFixed(4)}) 🌿` }]);
|
|
662
|
+
return;
|
|
663
|
+
}
|
|
664
|
+
if (cmd === "/model" && parts[1]) {
|
|
665
|
+
setModel(parts[1]);
|
|
666
|
+
setMessages(prev => [...prev, { role: "assistant", content: `Model changed to ${parts[1]} 🌿` }]);
|
|
667
|
+
return;
|
|
668
|
+
}
|
|
669
|
+
}
|
|
670
|
+
|
|
671
|
+
// Add user message to display
|
|
672
|
+
setMessages(prev => [...prev, { role: "user", content: input }]);
|
|
673
|
+
|
|
674
|
+
// Add to conversation
|
|
675
|
+
conversationRef.current.push({ role: "user", content: input });
|
|
676
|
+
|
|
677
|
+
setLoading(true);
|
|
678
|
+
|
|
679
|
+
try {
|
|
680
|
+
const toolCallDisplay = [];
|
|
681
|
+
|
|
682
|
+
const onToolUse = (name, args) => {
|
|
683
|
+
const toolMsg = { role: "tool_call", name, args, result: null };
|
|
684
|
+
toolCallDisplay.push(toolMsg);
|
|
685
|
+
setMessages(prev => [...prev, toolMsg]);
|
|
686
|
+
};
|
|
687
|
+
|
|
688
|
+
// Make a working copy of conversation to pass (API mutates for tool loops)
|
|
689
|
+
const convCopy = conversationRef.current.map(m => ({ ...m }));
|
|
690
|
+
|
|
691
|
+
const response = await callAPI(
|
|
692
|
+
providerInfo.provider,
|
|
693
|
+
providerInfo.key,
|
|
694
|
+
model,
|
|
695
|
+
convCopy,
|
|
696
|
+
(name, args) => {
|
|
697
|
+
onToolUse(name, args);
|
|
698
|
+
}
|
|
699
|
+
);
|
|
700
|
+
|
|
701
|
+
// Update token/cost estimates
|
|
702
|
+
const inputToks = estimateTokens(convCopy.map(m => m.content ?? "").join(""));
|
|
703
|
+
const outputToks = estimateTokens(response);
|
|
704
|
+
setTotalTokens(t => t + inputToks + outputToks);
|
|
705
|
+
const { usd } = estimateCost(inputToks, outputToks, model);
|
|
706
|
+
setTotalCost(c => c + usd);
|
|
707
|
+
|
|
708
|
+
// Update conversation ref with assistant response
|
|
709
|
+
conversationRef.current.push({ role: "assistant", content: response });
|
|
710
|
+
|
|
711
|
+
// Update tool call display results
|
|
712
|
+
setMessages(prev => {
|
|
713
|
+
const updated = [...prev];
|
|
714
|
+
// Mark tool calls as complete
|
|
715
|
+
for (let i = updated.length - 1; i >= 0; i--) {
|
|
716
|
+
if (updated[i].role === "tool_call" && updated[i].result === null) {
|
|
717
|
+
updated[i] = { ...updated[i], result: { success: true } };
|
|
718
|
+
}
|
|
719
|
+
}
|
|
720
|
+
return [...updated, { role: "assistant", content: response }];
|
|
721
|
+
});
|
|
722
|
+
|
|
723
|
+
// Save conversation
|
|
724
|
+
await saveConversation(conversationRef.current.filter(m => m.role !== "system"));
|
|
725
|
+
|
|
726
|
+
} catch (err) {
|
|
727
|
+
setMessages(prev => [
|
|
728
|
+
...prev,
|
|
729
|
+
{ role: "assistant", content: `❌ Error: ${err.message.slice(0, 200)} 🌿` },
|
|
730
|
+
]);
|
|
731
|
+
} finally {
|
|
732
|
+
setLoading(false);
|
|
733
|
+
}
|
|
734
|
+
}, [loading, model, totalTokens, totalCost, providerInfo, exit]);
|
|
735
|
+
|
|
736
|
+
// Keep only last N messages for display
|
|
737
|
+
const displayMessages = messages.slice(-30);
|
|
738
|
+
|
|
739
|
+
return React.createElement(
|
|
740
|
+
Box, { flexDirection: "column", height: "100%" },
|
|
741
|
+
|
|
742
|
+
// Status bar
|
|
743
|
+
React.createElement(StatusBar, {
|
|
744
|
+
provider: providerInfo?.provider,
|
|
745
|
+
model,
|
|
746
|
+
workstream: ACTIVE_WORKSTREAM,
|
|
747
|
+
tokens: totalTokens,
|
|
748
|
+
cost: totalCost,
|
|
749
|
+
}),
|
|
750
|
+
|
|
751
|
+
// Help overlay
|
|
752
|
+
showHelp && React.createElement(
|
|
753
|
+
Box, { borderStyle: "round", borderColor: "yellow", marginX: 2, padding: 1 },
|
|
754
|
+
React.createElement(Text, { color: "yellow" }, HELP_TEXT.trim()),
|
|
755
|
+
),
|
|
756
|
+
|
|
757
|
+
// Messages area (scrolls)
|
|
758
|
+
React.createElement(
|
|
759
|
+
Box, { flexDirection: "column", flexGrow: 1, paddingX: 1, paddingY: 0, overflowY: "hidden" },
|
|
760
|
+
displayMessages.length === 0
|
|
761
|
+
? React.createElement(
|
|
762
|
+
Box, { marginY: 1 },
|
|
763
|
+
React.createElement(Text, { dimColor: true }, " 🌿 Type a message to start chatting. /help for commands.")
|
|
764
|
+
)
|
|
765
|
+
: displayMessages.map((msg, i) =>
|
|
766
|
+
React.createElement(Message, { key: i, msg })
|
|
767
|
+
),
|
|
768
|
+
),
|
|
769
|
+
|
|
770
|
+
// Input area
|
|
771
|
+
React.createElement(InputArea, {
|
|
772
|
+
value: inputValue,
|
|
773
|
+
onChange: setInputValue,
|
|
774
|
+
onSubmit: handleSubmit,
|
|
775
|
+
loading,
|
|
776
|
+
}),
|
|
777
|
+
);
|
|
778
|
+
}
|
|
779
|
+
|
|
780
|
+
// -----------------------------------------------------------------------
|
|
781
|
+
// Entry point
|
|
782
|
+
// -----------------------------------------------------------------------
|
|
783
|
+
|
|
784
|
+
async function main() {
|
|
785
|
+
// Check if stdin is a TTY
|
|
786
|
+
if (!process.stdin.isTTY) {
|
|
787
|
+
console.error("Error: wispy --tui requires a TTY terminal");
|
|
788
|
+
process.exit(1);
|
|
789
|
+
}
|
|
790
|
+
|
|
791
|
+
// Detect provider
|
|
792
|
+
const providerInfo = await detectProvider();
|
|
793
|
+
if (!providerInfo) {
|
|
794
|
+
console.error("No API key found. Run `wispy` first to set up your provider.");
|
|
795
|
+
process.exit(1);
|
|
796
|
+
}
|
|
797
|
+
|
|
798
|
+
// Clear screen
|
|
799
|
+
process.stdout.write("\x1b[2J\x1b[H");
|
|
800
|
+
|
|
801
|
+
const { waitUntilExit } = render(
|
|
802
|
+
React.createElement(WispyTUI, { providerInfo }),
|
|
803
|
+
{ exitOnCtrlC: true }
|
|
804
|
+
);
|
|
805
|
+
|
|
806
|
+
await waitUntilExit();
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
main().catch(err => {
|
|
810
|
+
console.error("TUI error:", err.message);
|
|
811
|
+
process.exit(1);
|
|
812
|
+
});
|