heyhank 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +40 -0
- package/bin/cli.ts +168 -0
- package/bin/ctl.ts +528 -0
- package/bin/generate-token.ts +28 -0
- package/dist/apple-touch-icon.png +0 -0
- package/dist/assets/AgentsPage-BPhirnCe.js +7 -0
- package/dist/assets/AssistantPage-DJ-cMQfb.js +1 -0
- package/dist/assets/CronManager-DDbz-yiT.js +1 -0
- package/dist/assets/HelpPage-DMfkzERp.js +1 -0
- package/dist/assets/IntegrationsPage-CrOitCmJ.js +1 -0
- package/dist/assets/MediaPage-CE5rdvkC.js +1 -0
- package/dist/assets/PlatformDashboard-Do6F0O2p.js +1 -0
- package/dist/assets/Playground-Fc5cdc5p.js +109 -0
- package/dist/assets/ProcessPanel-CslEiZkI.js +2 -0
- package/dist/assets/PromptsPage-D2EhsdNO.js +4 -0
- package/dist/assets/RunsPage-C5BZF5Rx.js +1 -0
- package/dist/assets/SandboxManager-a1AVI5q2.js +8 -0
- package/dist/assets/SettingsPage-DirhjQrJ.js +51 -0
- package/dist/assets/SocialMediaPage-DBuM28vD.js +1 -0
- package/dist/assets/TailscalePage-CHiFhZXF.js +1 -0
- package/dist/assets/TelephonyPage-x0VV0fOo.js +1 -0
- package/dist/assets/TerminalPage-Drwyrnfd.js +1 -0
- package/dist/assets/gemini-audio-t-TSU-To.js +17 -0
- package/dist/assets/gemini-live-client-C7rqAW7G.js +166 -0
- package/dist/assets/index-C8M_PUmX.css +32 -0
- package/dist/assets/index-CEqZnThB.js +204 -0
- package/dist/assets/sw-register-LSSpj6RU.js +1 -0
- package/dist/assets/time-ago-B6r_l9u1.js +1 -0
- package/dist/assets/workbox-window.prod.es5-BIl4cyR9.js +2 -0
- package/dist/favicon-32-original.png +0 -0
- package/dist/favicon-32.png +0 -0
- package/dist/favicon.ico +0 -0
- package/dist/favicon.svg +8 -0
- package/dist/fonts/MesloLGSNerdFontMono-Bold.woff2 +0 -0
- package/dist/fonts/MesloLGSNerdFontMono-Regular.woff2 +0 -0
- package/dist/heyhank-mascot-poster.png +0 -0
- package/dist/heyhank-mascot.mp4 +0 -0
- package/dist/heyhank-mascot.webm +0 -0
- package/dist/icon-192-original.png +0 -0
- package/dist/icon-192.png +0 -0
- package/dist/icon-512-original.png +0 -0
- package/dist/icon-512.png +0 -0
- package/dist/index.html +21 -0
- package/dist/logo-192.png +0 -0
- package/dist/logo-512.png +0 -0
- package/dist/logo-codex.svg +14 -0
- package/dist/logo-docker.svg +4 -0
- package/dist/logo-original.png +0 -0
- package/dist/logo.png +0 -0
- package/dist/logo.svg +14 -0
- package/dist/manifest.json +24 -0
- package/dist/push-sw.js +34 -0
- package/dist/sw.js +1 -0
- package/dist/workbox-d2a0910a.js +1 -0
- package/package.json +109 -0
- package/server/agent-cron-migrator.ts +85 -0
- package/server/agent-executor.ts +357 -0
- package/server/agent-store.ts +185 -0
- package/server/agent-timeout.ts +107 -0
- package/server/agent-types.ts +122 -0
- package/server/ai-validation-settings.ts +37 -0
- package/server/ai-validator.ts +181 -0
- package/server/anthropic-provider-migration.ts +48 -0
- package/server/assistant-store.ts +272 -0
- package/server/auth-manager.ts +150 -0
- package/server/auto-approve.ts +153 -0
- package/server/auto-namer.ts +36 -0
- package/server/backend-adapter.ts +54 -0
- package/server/cache-headers.ts +61 -0
- package/server/calendar-service.ts +434 -0
- package/server/claude-adapter.ts +889 -0
- package/server/claude-container-auth.ts +30 -0
- package/server/claude-session-discovery.ts +157 -0
- package/server/claude-session-history.ts +410 -0
- package/server/cli-launcher.ts +1303 -0
- package/server/codex-adapter.ts +3027 -0
- package/server/codex-container-auth.ts +24 -0
- package/server/codex-home.ts +27 -0
- package/server/codex-ws-proxy.cjs +226 -0
- package/server/commands-discovery.ts +81 -0
- package/server/constants.ts +7 -0
- package/server/container-manager.ts +1053 -0
- package/server/cost-tracker.ts +222 -0
- package/server/cron-scheduler.ts +243 -0
- package/server/cron-store.ts +148 -0
- package/server/cron-types.ts +63 -0
- package/server/email-service.ts +354 -0
- package/server/env-manager.ts +161 -0
- package/server/event-bus-types.ts +75 -0
- package/server/event-bus.ts +124 -0
- package/server/execution-store.ts +170 -0
- package/server/federation/node-connection.ts +190 -0
- package/server/federation/node-manager.ts +366 -0
- package/server/federation/node-store.ts +86 -0
- package/server/federation/node-types.ts +121 -0
- package/server/fs-utils.ts +15 -0
- package/server/git-utils.ts +421 -0
- package/server/github-pr.ts +379 -0
- package/server/google-media.ts +342 -0
- package/server/image-pull-manager.ts +279 -0
- package/server/index.ts +491 -0
- package/server/internal-ai.ts +237 -0
- package/server/kill-switch.ts +99 -0
- package/server/llm-providers.ts +342 -0
- package/server/logger.ts +259 -0
- package/server/mcp-registry.ts +401 -0
- package/server/message-bus.ts +271 -0
- package/server/message-delivery.ts +128 -0
- package/server/metrics-collector.ts +350 -0
- package/server/metrics-types.ts +108 -0
- package/server/middleware/managed-auth.ts +195 -0
- package/server/novnc-proxy.ts +99 -0
- package/server/path-resolver.ts +186 -0
- package/server/paths.ts +13 -0
- package/server/pr-poller.ts +162 -0
- package/server/prompt-manager.ts +211 -0
- package/server/protocol/claude-upstream/README.md +19 -0
- package/server/protocol/claude-upstream/sdk.d.ts.txt +1943 -0
- package/server/protocol/codex-upstream/ClientNotification.ts.txt +5 -0
- package/server/protocol/codex-upstream/ClientRequest.ts.txt +60 -0
- package/server/protocol/codex-upstream/README.md +18 -0
- package/server/protocol/codex-upstream/ServerNotification.ts.txt +41 -0
- package/server/protocol/codex-upstream/ServerRequest.ts.txt +16 -0
- package/server/protocol/codex-upstream/v2/DynamicToolCallParams.ts.txt +6 -0
- package/server/protocol/codex-upstream/v2/DynamicToolCallResponse.ts.txt +6 -0
- package/server/protocol-monitor.ts +50 -0
- package/server/provider-manager.ts +111 -0
- package/server/provider-registry.ts +393 -0
- package/server/push-notifications.ts +221 -0
- package/server/recorder.ts +374 -0
- package/server/recording-hub/compat-validator.ts +284 -0
- package/server/recording-hub/diagnostics.ts +299 -0
- package/server/recording-hub/hub-config.ts +19 -0
- package/server/recording-hub/hub-routes.ts +236 -0
- package/server/recording-hub/hub-store.ts +265 -0
- package/server/recording-hub/replay-adapter.ts +207 -0
- package/server/relay-client.ts +320 -0
- package/server/reminder-scheduler.ts +38 -0
- package/server/replay.ts +78 -0
- package/server/routes/agent-routes.ts +264 -0
- package/server/routes/assistant-routes.ts +90 -0
- package/server/routes/cron-routes.ts +103 -0
- package/server/routes/env-routes.ts +95 -0
- package/server/routes/federation-routes.ts +76 -0
- package/server/routes/fs-routes.ts +622 -0
- package/server/routes/git-routes.ts +97 -0
- package/server/routes/llm-routes.ts +166 -0
- package/server/routes/media-routes.ts +135 -0
- package/server/routes/metrics-routes.ts +13 -0
- package/server/routes/platform-routes.ts +1379 -0
- package/server/routes/prompt-routes.ts +67 -0
- package/server/routes/provider-routes.ts +109 -0
- package/server/routes/sandbox-routes.ts +127 -0
- package/server/routes/settings-routes.ts +285 -0
- package/server/routes/skills-routes.ts +100 -0
- package/server/routes/socialmedia-routes.ts +208 -0
- package/server/routes/system-routes.ts +228 -0
- package/server/routes/tailscale-routes.ts +22 -0
- package/server/routes/telephony-routes.ts +259 -0
- package/server/routes.ts +1379 -0
- package/server/sandbox-manager.ts +168 -0
- package/server/service.ts +718 -0
- package/server/session-creation-service.ts +457 -0
- package/server/session-git-info.ts +104 -0
- package/server/session-names.ts +67 -0
- package/server/session-orchestrator.ts +824 -0
- package/server/session-state-machine.ts +207 -0
- package/server/session-store.ts +146 -0
- package/server/session-types.ts +511 -0
- package/server/settings-manager.ts +149 -0
- package/server/shared-context.ts +157 -0
- package/server/socialmedia/adapter.ts +15 -0
- package/server/socialmedia/adapters/ayrshare-adapter.ts +169 -0
- package/server/socialmedia/adapters/buffer-adapter.ts +299 -0
- package/server/socialmedia/adapters/postiz-adapter.ts +298 -0
- package/server/socialmedia/manager.ts +227 -0
- package/server/socialmedia/store.ts +98 -0
- package/server/socialmedia/types.ts +89 -0
- package/server/tailscale-manager.ts +451 -0
- package/server/telephony/audio-bridge.ts +331 -0
- package/server/telephony/call-manager.ts +457 -0
- package/server/telephony/call-types.ts +108 -0
- package/server/telephony/telephony-store.ts +119 -0
- package/server/terminal-manager.ts +240 -0
- package/server/update-checker.ts +192 -0
- package/server/usage-limits.ts +225 -0
- package/server/web-push.d.ts +51 -0
- package/server/worktree-tracker.ts +84 -0
- package/server/ws-auth.ts +41 -0
- package/server/ws-bridge-browser-ingest.ts +72 -0
- package/server/ws-bridge-browser.ts +112 -0
- package/server/ws-bridge-cli-ingest.ts +81 -0
- package/server/ws-bridge-codex.ts +266 -0
- package/server/ws-bridge-controls.ts +20 -0
- package/server/ws-bridge-persist.ts +66 -0
- package/server/ws-bridge-publish.ts +79 -0
- package/server/ws-bridge-replay.ts +61 -0
- package/server/ws-bridge-types.ts +121 -0
- package/server/ws-bridge.ts +1240 -0
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Provider-agnostic internal AI caller.
|
|
3
|
+
* Used by auto-namer and ai-validator. Supports any provider from the registry
|
|
4
|
+
* via OpenAI-compatible Chat Completions format, plus Anthropic Messages API.
|
|
5
|
+
*/
|
|
6
|
+
import { getSettings } from "./settings-manager.js";
|
|
7
|
+
import { getProviderConfig, getEnabledProviders } from "./provider-manager.js";
|
|
8
|
+
import { getProviderById } from "./provider-registry.js";
|
|
9
|
+
|
|
10
|
+
interface InternalAiRequest {
|
|
11
|
+
systemPrompt?: string;
|
|
12
|
+
userPrompt: string;
|
|
13
|
+
maxTokens?: number;
|
|
14
|
+
temperature?: number;
|
|
15
|
+
timeoutMs?: number;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
interface InternalAiResponse {
|
|
19
|
+
text: string;
|
|
20
|
+
ok: boolean;
|
|
21
|
+
error?: string;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/** Known provider base URLs for OpenAI-compatible endpoints */
|
|
25
|
+
const PROVIDER_BASE_URLS: Record<string, string> = {
|
|
26
|
+
openai: "https://api.openai.com/v1",
|
|
27
|
+
deepseek: "https://api.deepseek.com/v1",
|
|
28
|
+
mistral: "https://api.mistral.ai/v1",
|
|
29
|
+
together: "https://api.together.xyz/v1",
|
|
30
|
+
openrouter: "https://openrouter.ai/api/v1",
|
|
31
|
+
xai: "https://api.x.ai/v1",
|
|
32
|
+
groq: "https://api.groq.com/openai/v1",
|
|
33
|
+
huggingface: "https://api-inference.huggingface.co/v1",
|
|
34
|
+
venice: "https://api.venice.ai/api/v1",
|
|
35
|
+
minimax: "https://api.minimax.chat/v1",
|
|
36
|
+
moonshot: "https://api.moonshot.cn/v1",
|
|
37
|
+
qwen: "https://dashscope.aliyuncs.com/compatible-mode/v1",
|
|
38
|
+
"qwen-alibaba": "https://dashscope.aliyuncs.com/compatible-mode/v1",
|
|
39
|
+
chutes: "https://api.chutes.ai/v1",
|
|
40
|
+
zai: "https://open.bigmodel.cn/api/paas/v4",
|
|
41
|
+
};
|
|
42
|
+
|
|
43
|
+
/** Default small/fast models per provider (for cheap internal tasks) */
|
|
44
|
+
const PROVIDER_INTERNAL_MODELS: Record<string, string> = {
|
|
45
|
+
anthropic: "claude-haiku-4-5-20251001",
|
|
46
|
+
openai: "gpt-4o-mini",
|
|
47
|
+
google: "gemini-2.0-flash",
|
|
48
|
+
mistral: "mistral-small-latest",
|
|
49
|
+
deepseek: "deepseek-chat",
|
|
50
|
+
together: "meta-llama/Llama-3.1-8B-Instruct-Turbo",
|
|
51
|
+
openrouter: "meta-llama/llama-3.1-8b-instruct:free",
|
|
52
|
+
xai: "grok-3-mini",
|
|
53
|
+
ollama: "llama3.1",
|
|
54
|
+
qwen: "qwen-turbo",
|
|
55
|
+
};
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Resolve which provider and credentials to use for internal AI calls.
|
|
59
|
+
* Priority: settings.internalAiProvider > first enabled provider > anthropic legacy key
|
|
60
|
+
*/
|
|
61
|
+
function resolveProvider(): { providerId: string; apiKey: string; baseUrl: string; model: string } | null {
|
|
62
|
+
const settings = getSettings();
|
|
63
|
+
|
|
64
|
+
// 1. Check explicit internalAiProvider setting
|
|
65
|
+
const explicitId = settings.internalAiProvider as string | undefined;
|
|
66
|
+
if (explicitId) {
|
|
67
|
+
const result = resolveProviderById(explicitId);
|
|
68
|
+
if (result) return result;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
// 2. Legacy: check anthropicApiKey in settings
|
|
72
|
+
if (settings.anthropicApiKey?.trim()) {
|
|
73
|
+
return {
|
|
74
|
+
providerId: "anthropic",
|
|
75
|
+
apiKey: settings.anthropicApiKey.trim(),
|
|
76
|
+
baseUrl: "https://api.anthropic.com/v1/messages",
|
|
77
|
+
model: settings.anthropicModel?.trim() || "claude-haiku-4-5-20251001",
|
|
78
|
+
};
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// 3. Try first enabled provider
|
|
82
|
+
const enabled = getEnabledProviders();
|
|
83
|
+
for (const cfg of enabled) {
|
|
84
|
+
const result = resolveProviderById(cfg.providerId);
|
|
85
|
+
if (result) return result;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
return null;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
function resolveProviderById(id: string): { providerId: string; apiKey: string; baseUrl: string; model: string } | null {
|
|
92
|
+
const def = getProviderById(id);
|
|
93
|
+
if (!def) return null;
|
|
94
|
+
|
|
95
|
+
const cfg = getProviderConfig(id);
|
|
96
|
+
if (!cfg) return null;
|
|
97
|
+
|
|
98
|
+
// Get API key from the first secret envField
|
|
99
|
+
const secretField = def.envFields.find((f) => f.secret && f.required);
|
|
100
|
+
const apiKey = secretField ? (cfg.envValues[secretField.key] || "") : "";
|
|
101
|
+
|
|
102
|
+
// Get base URL from non-secret URL field or known defaults
|
|
103
|
+
const urlField = def.envFields.find((f) => f.key.includes("BASE_URL"));
|
|
104
|
+
let baseUrl = urlField ? (cfg.envValues[urlField.key] || "") : "";
|
|
105
|
+
|
|
106
|
+
if (!baseUrl) {
|
|
107
|
+
baseUrl = PROVIDER_BASE_URLS[id] || "";
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// Special case: Ollama defaults
|
|
111
|
+
if (id === "ollama" && !baseUrl) {
|
|
112
|
+
baseUrl = "http://localhost:11434/v1";
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
const model = cfg.customModel || PROVIDER_INTERNAL_MODELS[id] || def.defaultModel || "";
|
|
116
|
+
|
|
117
|
+
if (id === "anthropic" && apiKey) {
|
|
118
|
+
return { providerId: id, apiKey, baseUrl: "https://api.anthropic.com/v1/messages", model };
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
if (!baseUrl || (!apiKey && id !== "ollama")) return null;
|
|
122
|
+
return { providerId: id, apiKey, baseUrl, model };
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Call the configured internal AI provider.
|
|
127
|
+
*/
|
|
128
|
+
export async function callInternalAI(req: InternalAiRequest): Promise<InternalAiResponse> {
|
|
129
|
+
const provider = resolveProvider();
|
|
130
|
+
if (!provider) {
|
|
131
|
+
return { text: "", ok: false, error: "No AI provider configured for internal features" };
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
const timeout = req.timeoutMs || 15_000;
|
|
135
|
+
const controller = new AbortController();
|
|
136
|
+
const timer = setTimeout(() => controller.abort(), timeout);
|
|
137
|
+
|
|
138
|
+
try {
|
|
139
|
+
if (provider.providerId === "anthropic") {
|
|
140
|
+
return await callAnthropic(provider, req, controller.signal);
|
|
141
|
+
}
|
|
142
|
+
return await callOpenAICompatible(provider, req, controller.signal);
|
|
143
|
+
} catch (err) {
|
|
144
|
+
const isAbort = err instanceof Error && err.name === "AbortError";
|
|
145
|
+
return {
|
|
146
|
+
text: "",
|
|
147
|
+
ok: false,
|
|
148
|
+
error: isAbort ? "AI request timed out" : `AI request failed: ${err instanceof Error ? err.message : String(err)}`,
|
|
149
|
+
};
|
|
150
|
+
} finally {
|
|
151
|
+
clearTimeout(timer);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
async function callAnthropic(
|
|
156
|
+
provider: { apiKey: string; baseUrl: string; model: string },
|
|
157
|
+
req: InternalAiRequest,
|
|
158
|
+
signal: AbortSignal,
|
|
159
|
+
): Promise<InternalAiResponse> {
|
|
160
|
+
const body: Record<string, unknown> = {
|
|
161
|
+
model: provider.model,
|
|
162
|
+
max_tokens: req.maxTokens || 256,
|
|
163
|
+
messages: [{ role: "user", content: req.userPrompt }],
|
|
164
|
+
temperature: req.temperature ?? 0.2,
|
|
165
|
+
};
|
|
166
|
+
if (req.systemPrompt) body.system = req.systemPrompt;
|
|
167
|
+
|
|
168
|
+
const res = await fetch(provider.baseUrl, {
|
|
169
|
+
method: "POST",
|
|
170
|
+
headers: {
|
|
171
|
+
"Content-Type": "application/json",
|
|
172
|
+
"x-api-key": provider.apiKey,
|
|
173
|
+
"anthropic-version": "2023-06-01",
|
|
174
|
+
},
|
|
175
|
+
body: JSON.stringify(body),
|
|
176
|
+
signal,
|
|
177
|
+
});
|
|
178
|
+
|
|
179
|
+
if (!res.ok) {
|
|
180
|
+
return { text: "", ok: false, error: `Anthropic API error: ${res.status}` };
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
const data = await res.json() as { content?: Array<{ type: string; text?: string }> };
|
|
184
|
+
const text = data.content?.[0]?.type === "text" ? (data.content[0].text ?? "") : "";
|
|
185
|
+
return { text, ok: true };
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
async function callOpenAICompatible(
|
|
189
|
+
provider: { apiKey: string; baseUrl: string; model: string },
|
|
190
|
+
req: InternalAiRequest,
|
|
191
|
+
signal: AbortSignal,
|
|
192
|
+
): Promise<InternalAiResponse> {
|
|
193
|
+
const url = provider.baseUrl.replace(/\/+$/, "") + "/chat/completions";
|
|
194
|
+
const messages: Array<{ role: string; content: string }> = [];
|
|
195
|
+
if (req.systemPrompt) messages.push({ role: "system", content: req.systemPrompt });
|
|
196
|
+
messages.push({ role: "user", content: req.userPrompt });
|
|
197
|
+
|
|
198
|
+
const headers: Record<string, string> = { "Content-Type": "application/json" };
|
|
199
|
+
if (provider.apiKey) headers["Authorization"] = `Bearer ${provider.apiKey}`;
|
|
200
|
+
|
|
201
|
+
const res = await fetch(url, {
|
|
202
|
+
method: "POST",
|
|
203
|
+
headers,
|
|
204
|
+
body: JSON.stringify({
|
|
205
|
+
model: provider.model,
|
|
206
|
+
max_tokens: req.maxTokens || 256,
|
|
207
|
+
messages,
|
|
208
|
+
temperature: req.temperature ?? 0.2,
|
|
209
|
+
}),
|
|
210
|
+
signal,
|
|
211
|
+
});
|
|
212
|
+
|
|
213
|
+
if (!res.ok) {
|
|
214
|
+
return { text: "", ok: false, error: `AI API error: ${res.status}` };
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
const data = await res.json() as { choices?: Array<{ message?: { content?: string } }> };
|
|
218
|
+
const text = data.choices?.[0]?.message?.content ?? "";
|
|
219
|
+
return { text, ok: true };
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
/**
|
|
223
|
+
* Check if any AI provider is configured for internal features.
|
|
224
|
+
*/
|
|
225
|
+
export function hasInternalAI(): boolean {
|
|
226
|
+
return resolveProvider() !== null;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* Get the name of the currently configured internal AI provider.
|
|
231
|
+
*/
|
|
232
|
+
export function getInternalAIProviderName(): string | null {
|
|
233
|
+
const provider = resolveProvider();
|
|
234
|
+
if (!provider) return null;
|
|
235
|
+
const def = getProviderById(provider.providerId);
|
|
236
|
+
return def?.name ?? provider.providerId;
|
|
237
|
+
}
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
// ─── Kill Switch (6-Layer Safety) ────────────────────────────────────────────
|
|
2
|
+
// Ported from AgentManager/src/kill-switch.ts — simplified for HeyHank
|
|
3
|
+
|
|
4
|
+
import {
|
|
5
|
+
existsSync,
|
|
6
|
+
mkdirSync,
|
|
7
|
+
readFileSync,
|
|
8
|
+
writeFileSync,
|
|
9
|
+
unlinkSync,
|
|
10
|
+
} from "node:fs";
|
|
11
|
+
import { join, dirname } from "node:path";
|
|
12
|
+
import { HEYHANK_HOME } from "./paths.js";
|
|
13
|
+
|
|
14
|
+
// ─── Types ───────────────────────────────────────────────────────────────────
|
|
15
|
+
|
|
16
|
+
export interface KillSwitchState {
|
|
17
|
+
killed: boolean;
|
|
18
|
+
reason?: string;
|
|
19
|
+
activatedAt?: string;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
// ─── Constants ───────────────────────────────────────────────────────────────
|
|
23
|
+
|
|
24
|
+
const KILL_SWITCH_FILE = join(HEYHANK_HOME, "kill-switch.json");
|
|
25
|
+
|
|
26
|
+
// ─── State ───────────────────────────────────────────────────────────────────
|
|
27
|
+
|
|
28
|
+
let state: KillSwitchState = { killed: false };
|
|
29
|
+
|
|
30
|
+
// ─── Core Functions ──────────────────────────────────────────────────────────
|
|
31
|
+
|
|
32
|
+
/** Fast in-memory check (hot path). */
|
|
33
|
+
export function isKilled(): boolean {
|
|
34
|
+
return state.killed;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/** Get full kill switch state. */
|
|
38
|
+
export function getKillSwitchState(): KillSwitchState {
|
|
39
|
+
return { ...state };
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/** Activate the kill switch. */
|
|
43
|
+
export function activate(reason?: string): KillSwitchState {
|
|
44
|
+
state = {
|
|
45
|
+
killed: true,
|
|
46
|
+
reason: reason || "Manual activation",
|
|
47
|
+
activatedAt: new Date().toISOString(),
|
|
48
|
+
};
|
|
49
|
+
|
|
50
|
+
// Persist to disk
|
|
51
|
+
try {
|
|
52
|
+
mkdirSync(dirname(KILL_SWITCH_FILE), { recursive: true });
|
|
53
|
+
writeFileSync(KILL_SWITCH_FILE, JSON.stringify(state, null, 2), "utf-8");
|
|
54
|
+
} catch (err) {
|
|
55
|
+
console.error("[kill-switch] Failed to persist:", err);
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
console.warn(
|
|
59
|
+
`[kill-switch] ⛔ ACTIVATED: ${state.reason} at ${state.activatedAt}`,
|
|
60
|
+
);
|
|
61
|
+
return { ...state };
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
/** Deactivate the kill switch. */
|
|
65
|
+
export function deactivate(): KillSwitchState {
|
|
66
|
+
state = { killed: false };
|
|
67
|
+
|
|
68
|
+
// Remove persisted file
|
|
69
|
+
try {
|
|
70
|
+
if (existsSync(KILL_SWITCH_FILE)) {
|
|
71
|
+
unlinkSync(KILL_SWITCH_FILE);
|
|
72
|
+
}
|
|
73
|
+
} catch (err) {
|
|
74
|
+
console.error("[kill-switch] Failed to remove persisted file:", err);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
console.log("[kill-switch] ✅ Deactivated");
|
|
78
|
+
return { ...state };
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/** Load persisted state on startup. */
|
|
82
|
+
export function loadPersistedState(): void {
|
|
83
|
+
try {
|
|
84
|
+
if (!existsSync(KILL_SWITCH_FILE)) return;
|
|
85
|
+
const raw = readFileSync(KILL_SWITCH_FILE, "utf-8");
|
|
86
|
+
const persisted = JSON.parse(raw) as KillSwitchState;
|
|
87
|
+
if (persisted.killed) {
|
|
88
|
+
state = persisted;
|
|
89
|
+
console.warn(
|
|
90
|
+
`[kill-switch] ⛔ Restored KILLED state: ${persisted.reason} (since ${persisted.activatedAt})`,
|
|
91
|
+
);
|
|
92
|
+
}
|
|
93
|
+
} catch (err) {
|
|
94
|
+
console.error("[kill-switch] Failed to load persisted state:", err);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Load on import
|
|
99
|
+
loadPersistedState();
|
|
@@ -0,0 +1,342 @@
|
|
|
1
|
+
// ─── Multi-LLM Provider System ───────────────────────────────────────────────
|
|
2
|
+
// Unified interface for calling different LLM backends
|
|
3
|
+
// Used by agents that don't need the full CLI WebSocket bridge
|
|
4
|
+
|
|
5
|
+
// ─── Types ───────────────────────────────────────────────────────────────────
|
|
6
|
+
|
|
7
|
+
export interface LLMMessage {
|
|
8
|
+
role: "system" | "user" | "assistant";
|
|
9
|
+
content: string;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
export interface LLMResponse {
|
|
13
|
+
content: string;
|
|
14
|
+
model: string;
|
|
15
|
+
provider: string;
|
|
16
|
+
tokensIn?: number;
|
|
17
|
+
tokensOut?: number;
|
|
18
|
+
estimatedCost?: number;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export interface LLMStreamChunk {
|
|
22
|
+
content: string;
|
|
23
|
+
done: boolean;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export interface LLMProviderConfig {
|
|
27
|
+
provider: "ollama" | "openrouter" | "gemini";
|
|
28
|
+
model: string;
|
|
29
|
+
apiKey?: string;
|
|
30
|
+
baseUrl?: string;
|
|
31
|
+
temperature?: number;
|
|
32
|
+
maxTokens?: number;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
// ─── Cost Estimates (USD per 1M tokens) ──────────────────────────────────────
|
|
36
|
+
|
|
37
|
+
const COST_TABLE: Record<string, { input: number; output: number }> = {
|
|
38
|
+
// OpenRouter pricing (approximate)
|
|
39
|
+
"gpt-4o": { input: 2.5, output: 10 },
|
|
40
|
+
"gpt-4o-mini": { input: 0.15, output: 0.6 },
|
|
41
|
+
"claude-3-5-sonnet": { input: 3, output: 15 },
|
|
42
|
+
"deepseek-chat": { input: 0.14, output: 0.28 },
|
|
43
|
+
"deepseek-coder": { input: 0.14, output: 0.28 },
|
|
44
|
+
// Ollama is free (local)
|
|
45
|
+
default: { input: 0, output: 0 },
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
function estimateCost(
|
|
49
|
+
model: string,
|
|
50
|
+
tokensIn: number,
|
|
51
|
+
tokensOut: number,
|
|
52
|
+
): number {
|
|
53
|
+
const pricing = COST_TABLE[model] ?? COST_TABLE["default"];
|
|
54
|
+
return (
|
|
55
|
+
(tokensIn / 1_000_000) * pricing.input +
|
|
56
|
+
(tokensOut / 1_000_000) * pricing.output
|
|
57
|
+
);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// ─── Ollama Provider ─────────────────────────────────────────────────────────
|
|
61
|
+
|
|
62
|
+
async function callOllama(
|
|
63
|
+
messages: LLMMessage[],
|
|
64
|
+
config: LLMProviderConfig,
|
|
65
|
+
): Promise<LLMResponse> {
|
|
66
|
+
const baseUrl = config.baseUrl || "http://localhost:11434";
|
|
67
|
+
const response = await fetch(`${baseUrl}/api/chat`, {
|
|
68
|
+
method: "POST",
|
|
69
|
+
headers: { "Content-Type": "application/json" },
|
|
70
|
+
body: JSON.stringify({
|
|
71
|
+
model: config.model,
|
|
72
|
+
messages: messages.map((m) => ({
|
|
73
|
+
role: m.role,
|
|
74
|
+
content: m.content,
|
|
75
|
+
})),
|
|
76
|
+
stream: false,
|
|
77
|
+
options: {
|
|
78
|
+
temperature: config.temperature ?? 0.7,
|
|
79
|
+
num_predict: config.maxTokens ?? 4096,
|
|
80
|
+
},
|
|
81
|
+
}),
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
if (!response.ok) {
|
|
85
|
+
const text = await response.text();
|
|
86
|
+
throw new Error(`Ollama error ${response.status}: ${text}`);
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
const data = (await response.json()) as {
|
|
90
|
+
message: { content: string };
|
|
91
|
+
model: string;
|
|
92
|
+
eval_count?: number;
|
|
93
|
+
prompt_eval_count?: number;
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
const tokensIn = data.prompt_eval_count ?? 0;
|
|
97
|
+
const tokensOut = data.eval_count ?? 0;
|
|
98
|
+
|
|
99
|
+
return {
|
|
100
|
+
content: data.message.content,
|
|
101
|
+
model: data.model,
|
|
102
|
+
provider: "ollama",
|
|
103
|
+
tokensIn,
|
|
104
|
+
tokensOut,
|
|
105
|
+
estimatedCost: 0, // Local = free
|
|
106
|
+
};
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
/** Stream from Ollama. Yields chunks. */
|
|
110
|
+
export async function* streamOllama(
|
|
111
|
+
messages: LLMMessage[],
|
|
112
|
+
config: LLMProviderConfig,
|
|
113
|
+
): AsyncGenerator<LLMStreamChunk> {
|
|
114
|
+
const baseUrl = config.baseUrl || "http://localhost:11434";
|
|
115
|
+
const response = await fetch(`${baseUrl}/api/chat`, {
|
|
116
|
+
method: "POST",
|
|
117
|
+
headers: { "Content-Type": "application/json" },
|
|
118
|
+
body: JSON.stringify({
|
|
119
|
+
model: config.model,
|
|
120
|
+
messages: messages.map((m) => ({
|
|
121
|
+
role: m.role,
|
|
122
|
+
content: m.content,
|
|
123
|
+
})),
|
|
124
|
+
stream: true,
|
|
125
|
+
options: {
|
|
126
|
+
temperature: config.temperature ?? 0.7,
|
|
127
|
+
num_predict: config.maxTokens ?? 4096,
|
|
128
|
+
},
|
|
129
|
+
}),
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
if (!response.ok) {
|
|
133
|
+
throw new Error(`Ollama stream error ${response.status}`);
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
const reader = response.body?.getReader();
|
|
137
|
+
if (!reader) throw new Error("No response body");
|
|
138
|
+
|
|
139
|
+
const decoder = new TextDecoder();
|
|
140
|
+
let buffer = "";
|
|
141
|
+
|
|
142
|
+
while (true) {
|
|
143
|
+
const { done, value } = await reader.read();
|
|
144
|
+
if (done) break;
|
|
145
|
+
|
|
146
|
+
buffer += decoder.decode(value, { stream: true });
|
|
147
|
+
const lines = buffer.split("\n");
|
|
148
|
+
buffer = lines.pop() || "";
|
|
149
|
+
|
|
150
|
+
for (const line of lines) {
|
|
151
|
+
if (!line.trim()) continue;
|
|
152
|
+
try {
|
|
153
|
+
const data = JSON.parse(line) as {
|
|
154
|
+
message?: { content: string };
|
|
155
|
+
done: boolean;
|
|
156
|
+
};
|
|
157
|
+
yield {
|
|
158
|
+
content: data.message?.content || "",
|
|
159
|
+
done: data.done,
|
|
160
|
+
};
|
|
161
|
+
} catch {
|
|
162
|
+
// Skip malformed lines
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
// ─── OpenRouter Provider ─────────────────────────────────────────────────────
|
|
169
|
+
|
|
170
|
+
async function callOpenRouter(
|
|
171
|
+
messages: LLMMessage[],
|
|
172
|
+
config: LLMProviderConfig,
|
|
173
|
+
): Promise<LLMResponse> {
|
|
174
|
+
const apiKey = config.apiKey || process.env.OPENROUTER_API_KEY;
|
|
175
|
+
if (!apiKey) {
|
|
176
|
+
throw new Error("OpenRouter API key required (set OPENROUTER_API_KEY)");
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
const response = await fetch("https://openrouter.ai/api/v1/chat/completions", {
|
|
180
|
+
method: "POST",
|
|
181
|
+
headers: {
|
|
182
|
+
"Content-Type": "application/json",
|
|
183
|
+
Authorization: `Bearer ${apiKey}`,
|
|
184
|
+
"HTTP-Referer": "https://github.com/maxx-agent/platform",
|
|
185
|
+
"X-Title": "Agent Platform",
|
|
186
|
+
},
|
|
187
|
+
body: JSON.stringify({
|
|
188
|
+
model: config.model,
|
|
189
|
+
messages,
|
|
190
|
+
temperature: config.temperature ?? 0.7,
|
|
191
|
+
max_tokens: config.maxTokens ?? 4096,
|
|
192
|
+
}),
|
|
193
|
+
});
|
|
194
|
+
|
|
195
|
+
if (!response.ok) {
|
|
196
|
+
const text = await response.text();
|
|
197
|
+
throw new Error(`OpenRouter error ${response.status}: ${text}`);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
const data = (await response.json()) as {
|
|
201
|
+
choices: Array<{ message: { content: string } }>;
|
|
202
|
+
model: string;
|
|
203
|
+
usage?: { prompt_tokens: number; completion_tokens: number };
|
|
204
|
+
};
|
|
205
|
+
|
|
206
|
+
const tokensIn = data.usage?.prompt_tokens ?? 0;
|
|
207
|
+
const tokensOut = data.usage?.completion_tokens ?? 0;
|
|
208
|
+
|
|
209
|
+
return {
|
|
210
|
+
content: data.choices[0]?.message?.content || "",
|
|
211
|
+
model: data.model,
|
|
212
|
+
provider: "openrouter",
|
|
213
|
+
tokensIn,
|
|
214
|
+
tokensOut,
|
|
215
|
+
estimatedCost: estimateCost(config.model, tokensIn, tokensOut),
|
|
216
|
+
};
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
// ─── Gemini Provider ─────────────────────────────────────────────────────────
|
|
220
|
+
|
|
221
|
+
async function callGemini(
|
|
222
|
+
messages: LLMMessage[],
|
|
223
|
+
config: LLMProviderConfig,
|
|
224
|
+
): Promise<LLMResponse> {
|
|
225
|
+
const apiKey = config.apiKey || process.env.GEMINI_API_KEY;
|
|
226
|
+
if (!apiKey) {
|
|
227
|
+
throw new Error("Gemini API key required (set GEMINI_API_KEY)");
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
const model = config.model || "gemini-2.5-flash";
|
|
231
|
+
|
|
232
|
+
// Convert messages to Gemini format
|
|
233
|
+
const systemInstruction = messages
|
|
234
|
+
.filter((m) => m.role === "system")
|
|
235
|
+
.map((m) => m.content)
|
|
236
|
+
.join("\n");
|
|
237
|
+
|
|
238
|
+
const contents = messages
|
|
239
|
+
.filter((m) => m.role !== "system")
|
|
240
|
+
.map((m) => ({
|
|
241
|
+
role: m.role === "assistant" ? "model" : "user",
|
|
242
|
+
parts: [{ text: m.content }],
|
|
243
|
+
}));
|
|
244
|
+
|
|
245
|
+
const response = await fetch(
|
|
246
|
+
`https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${apiKey}`,
|
|
247
|
+
{
|
|
248
|
+
method: "POST",
|
|
249
|
+
headers: { "Content-Type": "application/json" },
|
|
250
|
+
body: JSON.stringify({
|
|
251
|
+
contents,
|
|
252
|
+
systemInstruction: systemInstruction
|
|
253
|
+
? { parts: [{ text: systemInstruction }] }
|
|
254
|
+
: undefined,
|
|
255
|
+
generationConfig: {
|
|
256
|
+
temperature: config.temperature ?? 0.7,
|
|
257
|
+
maxOutputTokens: config.maxTokens ?? 4096,
|
|
258
|
+
},
|
|
259
|
+
}),
|
|
260
|
+
},
|
|
261
|
+
);
|
|
262
|
+
|
|
263
|
+
if (!response.ok) {
|
|
264
|
+
const text = await response.text();
|
|
265
|
+
throw new Error(`Gemini error ${response.status}: ${text}`);
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
const data = (await response.json()) as {
|
|
269
|
+
candidates: Array<{
|
|
270
|
+
content: { parts: Array<{ text: string }> };
|
|
271
|
+
}>;
|
|
272
|
+
usageMetadata?: {
|
|
273
|
+
promptTokenCount: number;
|
|
274
|
+
candidatesTokenCount: number;
|
|
275
|
+
};
|
|
276
|
+
};
|
|
277
|
+
|
|
278
|
+
const tokensIn = data.usageMetadata?.promptTokenCount ?? 0;
|
|
279
|
+
const tokensOut = data.usageMetadata?.candidatesTokenCount ?? 0;
|
|
280
|
+
const content =
|
|
281
|
+
data.candidates?.[0]?.content?.parts?.map((p) => p.text).join("") || "";
|
|
282
|
+
|
|
283
|
+
return {
|
|
284
|
+
content,
|
|
285
|
+
model,
|
|
286
|
+
provider: "gemini",
|
|
287
|
+
tokensIn,
|
|
288
|
+
tokensOut,
|
|
289
|
+
estimatedCost: estimateCost(model, tokensIn, tokensOut),
|
|
290
|
+
};
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
// ─── Unified Call Function ───────────────────────────────────────────────────
|
|
294
|
+
|
|
295
|
+
/** Call any LLM provider with a unified interface. */
|
|
296
|
+
export async function callLLM(
|
|
297
|
+
messages: LLMMessage[],
|
|
298
|
+
config: LLMProviderConfig,
|
|
299
|
+
): Promise<LLMResponse> {
|
|
300
|
+
switch (config.provider) {
|
|
301
|
+
case "ollama":
|
|
302
|
+
return callOllama(messages, config);
|
|
303
|
+
case "openrouter":
|
|
304
|
+
return callOpenRouter(messages, config);
|
|
305
|
+
case "gemini":
|
|
306
|
+
return callGemini(messages, config);
|
|
307
|
+
default:
|
|
308
|
+
throw new Error(`Unknown provider: ${config.provider}`);
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
/** List available Ollama models. */
|
|
313
|
+
export async function listOllamaModels(
|
|
314
|
+
baseUrl = "http://localhost:11434",
|
|
315
|
+
): Promise<Array<{ name: string; size: number; modified_at: string }>> {
|
|
316
|
+
try {
|
|
317
|
+
const response = await fetch(`${baseUrl}/api/tags`);
|
|
318
|
+
if (!response.ok) return [];
|
|
319
|
+
const data = (await response.json()) as {
|
|
320
|
+
models: Array<{ name: string; size: number; modified_at: string }>;
|
|
321
|
+
};
|
|
322
|
+
return data.models || [];
|
|
323
|
+
} catch {
|
|
324
|
+
return [];
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
/** Pull an Ollama model (non-blocking). */
|
|
329
|
+
export async function pullOllamaModel(
|
|
330
|
+
model: string,
|
|
331
|
+
baseUrl = "http://localhost:11434",
|
|
332
|
+
): Promise<void> {
|
|
333
|
+
const response = await fetch(`${baseUrl}/api/pull`, {
|
|
334
|
+
method: "POST",
|
|
335
|
+
headers: { "Content-Type": "application/json" },
|
|
336
|
+
body: JSON.stringify({ name: model, stream: false }),
|
|
337
|
+
});
|
|
338
|
+
if (!response.ok) {
|
|
339
|
+
const text = await response.text();
|
|
340
|
+
throw new Error(`Failed to pull model ${model}: ${text}`);
|
|
341
|
+
}
|
|
342
|
+
}
|