bloby-bot 0.46.2 → 0.47.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist-bloby/assets/{bloby-BnHElaWD.js → bloby-E-QLmQDW.js} +4 -4
- package/dist-bloby/assets/globals-Ci0CEj1X.js +18 -0
- package/dist-bloby/assets/globals-DriF_8Q_.css +2 -0
- package/dist-bloby/assets/{highlighted-body-OFNGDK62-B4IKFiNq.js → highlighted-body-OFNGDK62-CTiboTVa.js} +1 -1
- package/dist-bloby/assets/mermaid-GHXKKRXX-CgVqYCFU.js +1 -0
- package/dist-bloby/assets/{onboard-DoRN5jiz.js → onboard-C1uMxuk2.js} +1 -1
- package/dist-bloby/bloby.html +3 -3
- package/dist-bloby/onboard.html +3 -3
- package/package.json +1 -1
- package/supervisor/chat/OnboardWizard.tsx +327 -2
- package/supervisor/chat/src/hooks/useBlobyChat.ts +4 -4
- package/supervisor/harnesses/pi/auth-storage.ts +56 -0
- package/supervisor/harnesses/pi/sub-providers.ts +205 -0
- package/supervisor/harnesses/pi/test-completion.ts +196 -0
- package/supervisor/index.ts +31 -13
- package/worker/db.ts +27 -12
- package/worker/index.ts +88 -2
- package/dist-bloby/assets/globals-BYieEOqL.js +0 -18
- package/dist-bloby/assets/globals-BzeCWV3t.css +0 -2
- package/dist-bloby/assets/mermaid-GHXKKRXX-32SDjrR3.js +0 -1
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Pi auth storage — persistent credentials for the Bloby (pi) harness.
|
|
3
|
+
*
|
|
4
|
+
* Stored in ~/.bloby/pi-auth.json (separate from the main config.json so we
|
|
5
|
+
* can wipe/rotate the LLM credentials without touching the rest of the bot
|
|
6
|
+
* config). Iteration 1: a single active sub-provider at a time.
|
|
7
|
+
*/
|
|
8
|
+
import fs from 'fs';
|
|
9
|
+
import path from 'path';
|
|
10
|
+
import { DATA_DIR } from '../../../shared/paths.js';
|
|
11
|
+
|
|
12
|
+
export interface PiAuth {
|
|
13
|
+
subProvider: string;
|
|
14
|
+
apiKey?: string;
|
|
15
|
+
baseUrl?: string;
|
|
16
|
+
modelId?: string;
|
|
17
|
+
savedAt: number;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const PI_AUTH_PATH = path.join(DATA_DIR, 'pi-auth.json');
|
|
21
|
+
|
|
22
|
+
export function readPiAuth(): PiAuth | null {
|
|
23
|
+
try {
|
|
24
|
+
if (!fs.existsSync(PI_AUTH_PATH)) return null;
|
|
25
|
+
const raw = fs.readFileSync(PI_AUTH_PATH, 'utf-8');
|
|
26
|
+
const parsed = JSON.parse(raw);
|
|
27
|
+
if (!parsed?.subProvider) return null;
|
|
28
|
+
return parsed as PiAuth;
|
|
29
|
+
} catch {
|
|
30
|
+
return null;
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export function writePiAuth(auth: Omit<PiAuth, 'savedAt'>): PiAuth {
|
|
35
|
+
fs.mkdirSync(DATA_DIR, { recursive: true });
|
|
36
|
+
const full: PiAuth = { ...auth, savedAt: Date.now() };
|
|
37
|
+
fs.writeFileSync(PI_AUTH_PATH, JSON.stringify(full, null, 2), { mode: 0o600 });
|
|
38
|
+
return full;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
export function clearPiAuth(): void {
|
|
42
|
+
try {
|
|
43
|
+
fs.rmSync(PI_AUTH_PATH, { force: true });
|
|
44
|
+
} catch {}
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
export function getPiAuthStatus(): { configured: boolean; subProvider?: string; modelId?: string; baseUrl?: string } {
|
|
48
|
+
const auth = readPiAuth();
|
|
49
|
+
if (!auth) return { configured: false };
|
|
50
|
+
return {
|
|
51
|
+
configured: true,
|
|
52
|
+
subProvider: auth.subProvider,
|
|
53
|
+
modelId: auth.modelId,
|
|
54
|
+
baseUrl: auth.baseUrl,
|
|
55
|
+
};
|
|
56
|
+
}
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Pi sub-provider catalog.
|
|
3
|
+
*
|
|
4
|
+
* The Bloby (pi) harness is a meta-provider: the user picks an underlying LLM
|
|
5
|
+
* vendor and supplies their own credentials. This file enumerates the set we
|
|
6
|
+
* currently support in the onboarding wizard plus enough metadata to drive the
|
|
7
|
+
* test-completion call without per-provider branching at the call site.
|
|
8
|
+
*
|
|
9
|
+
* Iteration 1 scope: API-key flows only. OAuth-based sub-providers (Anthropic
|
|
10
|
+
* Pro/Max, GitHub Copilot, OpenAI Codex) are deliberately out of scope — they
|
|
11
|
+
* duplicate auth flows we already ship under the dedicated Claude and OpenAI
|
|
12
|
+
* Codex harnesses.
|
|
13
|
+
*/
|
|
14
|
+
export type PiApiFlavor = 'openai-completions' | 'anthropic-messages' | 'google-gemini';
|
|
15
|
+
|
|
16
|
+
export interface PiSubProviderModel {
|
|
17
|
+
id: string;
|
|
18
|
+
label: string;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export interface PiSubProvider {
|
|
22
|
+
id: string;
|
|
23
|
+
name: string;
|
|
24
|
+
subtitle: string;
|
|
25
|
+
flavor: PiApiFlavor;
|
|
26
|
+
/** Default base URL — Ollama / LM Studio / custom let the user override it. */
|
|
27
|
+
baseUrl?: string;
|
|
28
|
+
/** Whether the user must supply a base URL (Ollama, LM Studio, custom). */
|
|
29
|
+
needsBaseUrl?: boolean;
|
|
30
|
+
/** Whether the user must supply an API key. Ollama defaults to false. */
|
|
31
|
+
needsApiKey?: boolean;
|
|
32
|
+
/** Optional: where to obtain a key (shown as a help link). */
|
|
33
|
+
apiKeyUrl?: string;
|
|
34
|
+
/** Hand-curated model list. `dynamic` ⇒ free-form ID input. */
|
|
35
|
+
models: PiSubProviderModel[] | 'dynamic';
|
|
36
|
+
/** Default model selection when the user hasn't picked one. */
|
|
37
|
+
defaultModel?: string;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
export const PI_SUB_PROVIDERS: PiSubProvider[] = [
|
|
41
|
+
{
|
|
42
|
+
id: 'google',
|
|
43
|
+
name: 'Google Gemini',
|
|
44
|
+
subtitle: 'Gemini 2.x via AI Studio',
|
|
45
|
+
flavor: 'google-gemini',
|
|
46
|
+
baseUrl: 'https://generativelanguage.googleapis.com/v1beta',
|
|
47
|
+
needsApiKey: true,
|
|
48
|
+
apiKeyUrl: 'https://aistudio.google.com/apikey',
|
|
49
|
+
models: [
|
|
50
|
+
{ id: 'gemini-2.5-pro', label: 'Gemini 2.5 Pro' },
|
|
51
|
+
{ id: 'gemini-2.5-flash', label: 'Gemini 2.5 Flash' },
|
|
52
|
+
{ id: 'gemini-2.0-flash', label: 'Gemini 2.0 Flash' },
|
|
53
|
+
],
|
|
54
|
+
defaultModel: 'gemini-2.5-pro',
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
id: 'deepseek',
|
|
58
|
+
name: 'DeepSeek',
|
|
59
|
+
subtitle: 'deepseek.com API',
|
|
60
|
+
flavor: 'openai-completions',
|
|
61
|
+
baseUrl: 'https://api.deepseek.com/v1',
|
|
62
|
+
needsApiKey: true,
|
|
63
|
+
apiKeyUrl: 'https://platform.deepseek.com/api_keys',
|
|
64
|
+
models: [
|
|
65
|
+
{ id: 'deepseek-chat', label: 'DeepSeek V3 (chat)' },
|
|
66
|
+
{ id: 'deepseek-reasoner', label: 'DeepSeek R1 (reasoner)' },
|
|
67
|
+
],
|
|
68
|
+
defaultModel: 'deepseek-chat',
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
id: 'groq',
|
|
72
|
+
name: 'Groq',
|
|
73
|
+
subtitle: 'Fast inference for Llama / Mixtral',
|
|
74
|
+
flavor: 'openai-completions',
|
|
75
|
+
baseUrl: 'https://api.groq.com/openai/v1',
|
|
76
|
+
needsApiKey: true,
|
|
77
|
+
apiKeyUrl: 'https://console.groq.com/keys',
|
|
78
|
+
models: [
|
|
79
|
+
{ id: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70B Versatile' },
|
|
80
|
+
{ id: 'llama-3.1-8b-instant', label: 'Llama 3.1 8B Instant' },
|
|
81
|
+
{ id: 'moonshotai/kimi-k2-instruct', label: 'Kimi K2 Instruct' },
|
|
82
|
+
],
|
|
83
|
+
defaultModel: 'llama-3.3-70b-versatile',
|
|
84
|
+
},
|
|
85
|
+
{
|
|
86
|
+
id: 'xai',
|
|
87
|
+
name: 'xAI (Grok)',
|
|
88
|
+
subtitle: 'x.ai API',
|
|
89
|
+
flavor: 'openai-completions',
|
|
90
|
+
baseUrl: 'https://api.x.ai/v1',
|
|
91
|
+
needsApiKey: true,
|
|
92
|
+
apiKeyUrl: 'https://console.x.ai/',
|
|
93
|
+
models: [
|
|
94
|
+
{ id: 'grok-4', label: 'Grok 4' },
|
|
95
|
+
{ id: 'grok-code-fast-1', label: 'Grok Code Fast 1' },
|
|
96
|
+
{ id: 'grok-3', label: 'Grok 3' },
|
|
97
|
+
],
|
|
98
|
+
defaultModel: 'grok-4',
|
|
99
|
+
},
|
|
100
|
+
{
|
|
101
|
+
id: 'cerebras',
|
|
102
|
+
name: 'Cerebras',
|
|
103
|
+
subtitle: 'Wafer-scale inference',
|
|
104
|
+
flavor: 'openai-completions',
|
|
105
|
+
baseUrl: 'https://api.cerebras.ai/v1',
|
|
106
|
+
needsApiKey: true,
|
|
107
|
+
apiKeyUrl: 'https://cloud.cerebras.ai/?tab=api-keys',
|
|
108
|
+
models: [
|
|
109
|
+
{ id: 'qwen-3-coder-480b', label: 'Qwen 3 Coder 480B' },
|
|
110
|
+
{ id: 'llama-3.3-70b', label: 'Llama 3.3 70B' },
|
|
111
|
+
],
|
|
112
|
+
defaultModel: 'qwen-3-coder-480b',
|
|
113
|
+
},
|
|
114
|
+
{
|
|
115
|
+
id: 'openrouter',
|
|
116
|
+
name: 'OpenRouter',
|
|
117
|
+
subtitle: 'Aggregator: 300+ models, one key',
|
|
118
|
+
flavor: 'openai-completions',
|
|
119
|
+
baseUrl: 'https://openrouter.ai/api/v1',
|
|
120
|
+
needsApiKey: true,
|
|
121
|
+
apiKeyUrl: 'https://openrouter.ai/keys',
|
|
122
|
+
models: 'dynamic',
|
|
123
|
+
defaultModel: 'anthropic/claude-sonnet-4',
|
|
124
|
+
},
|
|
125
|
+
{
|
|
126
|
+
id: 'mistral',
|
|
127
|
+
name: 'Mistral',
|
|
128
|
+
subtitle: 'mistral.ai API',
|
|
129
|
+
flavor: 'openai-completions',
|
|
130
|
+
baseUrl: 'https://api.mistral.ai/v1',
|
|
131
|
+
needsApiKey: true,
|
|
132
|
+
apiKeyUrl: 'https://console.mistral.ai/api-keys/',
|
|
133
|
+
models: [
|
|
134
|
+
{ id: 'mistral-large-latest', label: 'Mistral Large' },
|
|
135
|
+
{ id: 'codestral-latest', label: 'Codestral' },
|
|
136
|
+
],
|
|
137
|
+
defaultModel: 'mistral-large-latest',
|
|
138
|
+
},
|
|
139
|
+
{
|
|
140
|
+
id: 'openai-api',
|
|
141
|
+
name: 'OpenAI (API key)',
|
|
142
|
+
subtitle: 'platform.openai.com',
|
|
143
|
+
flavor: 'openai-completions',
|
|
144
|
+
baseUrl: 'https://api.openai.com/v1',
|
|
145
|
+
needsApiKey: true,
|
|
146
|
+
apiKeyUrl: 'https://platform.openai.com/api-keys',
|
|
147
|
+
models: [
|
|
148
|
+
{ id: 'gpt-5', label: 'GPT-5' },
|
|
149
|
+
{ id: 'gpt-5-mini', label: 'GPT-5 Mini' },
|
|
150
|
+
{ id: 'gpt-4.1', label: 'GPT-4.1' },
|
|
151
|
+
{ id: 'o3', label: 'o3' },
|
|
152
|
+
],
|
|
153
|
+
defaultModel: 'gpt-5',
|
|
154
|
+
},
|
|
155
|
+
{
|
|
156
|
+
id: 'anthropic-api',
|
|
157
|
+
name: 'Anthropic (API key)',
|
|
158
|
+
subtitle: 'console.anthropic.com',
|
|
159
|
+
flavor: 'anthropic-messages',
|
|
160
|
+
baseUrl: 'https://api.anthropic.com/v1',
|
|
161
|
+
needsApiKey: true,
|
|
162
|
+
apiKeyUrl: 'https://console.anthropic.com/settings/keys',
|
|
163
|
+
models: [
|
|
164
|
+
{ id: 'claude-opus-4-5', label: 'Claude Opus 4.5' },
|
|
165
|
+
{ id: 'claude-sonnet-4-5', label: 'Claude Sonnet 4.5' },
|
|
166
|
+
{ id: 'claude-haiku-4-5', label: 'Claude Haiku 4.5' },
|
|
167
|
+
],
|
|
168
|
+
defaultModel: 'claude-sonnet-4-5',
|
|
169
|
+
},
|
|
170
|
+
{
|
|
171
|
+
id: 'ollama',
|
|
172
|
+
name: 'Ollama',
|
|
173
|
+
subtitle: 'Local — http://localhost:11434',
|
|
174
|
+
flavor: 'openai-completions',
|
|
175
|
+
baseUrl: 'http://localhost:11434/v1',
|
|
176
|
+
needsBaseUrl: true,
|
|
177
|
+
needsApiKey: false,
|
|
178
|
+
apiKeyUrl: 'https://ollama.com/library',
|
|
179
|
+
models: 'dynamic',
|
|
180
|
+
defaultModel: 'llama3.1',
|
|
181
|
+
},
|
|
182
|
+
{
|
|
183
|
+
id: 'lm-studio',
|
|
184
|
+
name: 'LM Studio',
|
|
185
|
+
subtitle: 'Local — http://localhost:1234',
|
|
186
|
+
flavor: 'openai-completions',
|
|
187
|
+
baseUrl: 'http://localhost:1234/v1',
|
|
188
|
+
needsBaseUrl: true,
|
|
189
|
+
needsApiKey: false,
|
|
190
|
+
models: 'dynamic',
|
|
191
|
+
},
|
|
192
|
+
{
|
|
193
|
+
id: 'custom',
|
|
194
|
+
name: 'Custom (OpenAI-compatible)',
|
|
195
|
+
subtitle: 'Any /v1/chat/completions endpoint',
|
|
196
|
+
flavor: 'openai-completions',
|
|
197
|
+
needsBaseUrl: true,
|
|
198
|
+
needsApiKey: true,
|
|
199
|
+
models: 'dynamic',
|
|
200
|
+
},
|
|
201
|
+
];
|
|
202
|
+
|
|
203
|
+
export function getPiSubProvider(id: string): PiSubProvider | undefined {
|
|
204
|
+
return PI_SUB_PROVIDERS.find((p) => p.id === id);
|
|
205
|
+
}
|
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Pi test-completion — single-shot, non-streaming completion call.
|
|
3
|
+
*
|
|
4
|
+
* Iteration 1 of the pi harness: just enough to verify the saved sub-provider
|
|
5
|
+
* + credentials actually reach an LLM and return text. Replaces the full
|
|
6
|
+
* pi-ai streaming stack until we vendor it alongside the agent loop.
|
|
7
|
+
*
|
|
8
|
+
* Supported API flavors:
|
|
9
|
+
* - openai-completions → POST {baseUrl}/chat/completions
|
|
10
|
+
* - anthropic-messages → POST {baseUrl}/messages
|
|
11
|
+
* - google-gemini → POST {baseUrl}/models/{modelId}:generateContent
|
|
12
|
+
*/
|
|
13
|
+
import { getPiSubProvider, type PiApiFlavor } from './sub-providers.js';
|
|
14
|
+
|
|
15
|
+
export interface PiTestCompletionInput {
|
|
16
|
+
subProvider: string;
|
|
17
|
+
apiKey?: string;
|
|
18
|
+
baseUrl?: string;
|
|
19
|
+
modelId?: string;
|
|
20
|
+
prompt: string;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export interface PiTestCompletionResult {
|
|
24
|
+
ok: boolean;
|
|
25
|
+
text?: string;
|
|
26
|
+
error?: string;
|
|
27
|
+
status?: number;
|
|
28
|
+
modelId?: string;
|
|
29
|
+
subProvider?: string;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
const REQUEST_TIMEOUT_MS = 30_000;
|
|
33
|
+
|
|
34
|
+
async function timedFetch(url: string, init: RequestInit): Promise<Response> {
|
|
35
|
+
const ctl = new AbortController();
|
|
36
|
+
const timer = setTimeout(() => ctl.abort(), REQUEST_TIMEOUT_MS);
|
|
37
|
+
try {
|
|
38
|
+
return await fetch(url, { ...init, signal: ctl.signal });
|
|
39
|
+
} finally {
|
|
40
|
+
clearTimeout(timer);
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
function pickBaseUrl(input: PiTestCompletionInput): string | undefined {
|
|
45
|
+
if (input.baseUrl?.trim()) return input.baseUrl.replace(/\/+$/, '');
|
|
46
|
+
const def = getPiSubProvider(input.subProvider)?.baseUrl;
|
|
47
|
+
return def?.replace(/\/+$/, '');
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
function pickModelId(input: PiTestCompletionInput): string | undefined {
|
|
51
|
+
if (input.modelId?.trim()) return input.modelId.trim();
|
|
52
|
+
return getPiSubProvider(input.subProvider)?.defaultModel;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
export async function runPiTestCompletion(input: PiTestCompletionInput): Promise<PiTestCompletionResult> {
|
|
56
|
+
const provider = getPiSubProvider(input.subProvider);
|
|
57
|
+
if (!provider) {
|
|
58
|
+
return { ok: false, error: `Unknown sub-provider: ${input.subProvider}` };
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
const baseUrl = pickBaseUrl(input);
|
|
62
|
+
if (!baseUrl) return { ok: false, error: 'Missing base URL' };
|
|
63
|
+
|
|
64
|
+
const modelId = pickModelId(input);
|
|
65
|
+
if (!modelId) return { ok: false, error: 'Missing model ID' };
|
|
66
|
+
|
|
67
|
+
if (provider.needsApiKey && !input.apiKey?.trim()) {
|
|
68
|
+
return { ok: false, error: 'Missing API key' };
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
try {
|
|
72
|
+
const text = await callByFlavor(provider.flavor, {
|
|
73
|
+
baseUrl,
|
|
74
|
+
modelId,
|
|
75
|
+
apiKey: input.apiKey?.trim() || '',
|
|
76
|
+
prompt: input.prompt,
|
|
77
|
+
});
|
|
78
|
+
return { ok: true, text, modelId, subProvider: provider.id };
|
|
79
|
+
} catch (err: any) {
|
|
80
|
+
return {
|
|
81
|
+
ok: false,
|
|
82
|
+
error: err?.message || String(err),
|
|
83
|
+
status: err?.status,
|
|
84
|
+
modelId,
|
|
85
|
+
subProvider: provider.id,
|
|
86
|
+
};
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
interface DispatchArgs {
|
|
91
|
+
baseUrl: string;
|
|
92
|
+
modelId: string;
|
|
93
|
+
apiKey: string;
|
|
94
|
+
prompt: string;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
async function callByFlavor(flavor: PiApiFlavor, args: DispatchArgs): Promise<string> {
|
|
98
|
+
switch (flavor) {
|
|
99
|
+
case 'openai-completions':
|
|
100
|
+
return callOpenAICompletions(args);
|
|
101
|
+
case 'anthropic-messages':
|
|
102
|
+
return callAnthropicMessages(args);
|
|
103
|
+
case 'google-gemini':
|
|
104
|
+
return callGoogleGemini(args);
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
/* ── OpenAI / OpenAI-compatible ── */
|
|
109
|
+
|
|
110
|
+
async function callOpenAICompletions({ baseUrl, modelId, apiKey, prompt }: DispatchArgs): Promise<string> {
|
|
111
|
+
const headers: Record<string, string> = { 'content-type': 'application/json' };
|
|
112
|
+
if (apiKey) headers['authorization'] = `Bearer ${apiKey}`;
|
|
113
|
+
|
|
114
|
+
const res = await timedFetch(`${baseUrl}/chat/completions`, {
|
|
115
|
+
method: 'POST',
|
|
116
|
+
headers,
|
|
117
|
+
body: JSON.stringify({
|
|
118
|
+
model: modelId,
|
|
119
|
+
messages: [{ role: 'user', content: prompt }],
|
|
120
|
+
max_tokens: 256,
|
|
121
|
+
stream: false,
|
|
122
|
+
}),
|
|
123
|
+
});
|
|
124
|
+
|
|
125
|
+
if (!res.ok) throw await httpError(res);
|
|
126
|
+
|
|
127
|
+
const body: any = await res.json();
|
|
128
|
+
const text = body?.choices?.[0]?.message?.content;
|
|
129
|
+
if (typeof text !== 'string' || !text.trim()) {
|
|
130
|
+
throw new Error(`Empty response (${JSON.stringify(body).slice(0, 200)})`);
|
|
131
|
+
}
|
|
132
|
+
return text.trim();
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
/* ── Anthropic Messages API ── */
|
|
136
|
+
|
|
137
|
+
async function callAnthropicMessages({ baseUrl, modelId, apiKey, prompt }: DispatchArgs): Promise<string> {
|
|
138
|
+
const res = await timedFetch(`${baseUrl}/messages`, {
|
|
139
|
+
method: 'POST',
|
|
140
|
+
headers: {
|
|
141
|
+
'content-type': 'application/json',
|
|
142
|
+
'x-api-key': apiKey,
|
|
143
|
+
'anthropic-version': '2023-06-01',
|
|
144
|
+
},
|
|
145
|
+
body: JSON.stringify({
|
|
146
|
+
model: modelId,
|
|
147
|
+
max_tokens: 256,
|
|
148
|
+
messages: [{ role: 'user', content: prompt }],
|
|
149
|
+
}),
|
|
150
|
+
});
|
|
151
|
+
|
|
152
|
+
if (!res.ok) throw await httpError(res);
|
|
153
|
+
|
|
154
|
+
const body: any = await res.json();
|
|
155
|
+
const block = Array.isArray(body?.content)
|
|
156
|
+
? body.content.find((b: any) => b?.type === 'text')
|
|
157
|
+
: null;
|
|
158
|
+
const text = block?.text;
|
|
159
|
+
if (typeof text !== 'string' || !text.trim()) {
|
|
160
|
+
throw new Error(`Empty response (${JSON.stringify(body).slice(0, 200)})`);
|
|
161
|
+
}
|
|
162
|
+
return text.trim();
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
/* ── Google Gemini ── */
|
|
166
|
+
|
|
167
|
+
async function callGoogleGemini({ baseUrl, modelId, apiKey, prompt }: DispatchArgs): Promise<string> {
|
|
168
|
+
const url = `${baseUrl}/models/${encodeURIComponent(modelId)}:generateContent?key=${encodeURIComponent(apiKey)}`;
|
|
169
|
+
const res = await timedFetch(url, {
|
|
170
|
+
method: 'POST',
|
|
171
|
+
headers: { 'content-type': 'application/json' },
|
|
172
|
+
body: JSON.stringify({
|
|
173
|
+
contents: [{ role: 'user', parts: [{ text: prompt }] }],
|
|
174
|
+
generationConfig: { maxOutputTokens: 256 },
|
|
175
|
+
}),
|
|
176
|
+
});
|
|
177
|
+
|
|
178
|
+
if (!res.ok) throw await httpError(res);
|
|
179
|
+
|
|
180
|
+
const body: any = await res.json();
|
|
181
|
+
const parts: any[] = body?.candidates?.[0]?.content?.parts || [];
|
|
182
|
+
const text = parts.map((p) => p?.text).filter(Boolean).join('\n').trim();
|
|
183
|
+
if (!text) throw new Error(`Empty response (${JSON.stringify(body).slice(0, 200)})`);
|
|
184
|
+
return text;
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
/* ── Helpers ── */
|
|
188
|
+
|
|
189
|
+
async function httpError(res: Response): Promise<Error> {
|
|
190
|
+
let detail = '';
|
|
191
|
+
try { detail = await res.text(); } catch {}
|
|
192
|
+
const trimmed = detail.length > 400 ? `${detail.slice(0, 400)}…` : detail;
|
|
193
|
+
const err: any = new Error(`HTTP ${res.status} ${res.statusText}${trimmed ? `: ${trimmed}` : ''}`);
|
|
194
|
+
err.status = res.status;
|
|
195
|
+
return err;
|
|
196
|
+
}
|
package/supervisor/index.ts
CHANGED
|
@@ -351,6 +351,12 @@ export async function startSupervisor() {
|
|
|
351
351
|
'POST /api/auth/codex/start',
|
|
352
352
|
'POST /api/auth/codex/cancel',
|
|
353
353
|
'GET /api/auth/codex/status',
|
|
354
|
+
'GET /api/auth/pi/providers',
|
|
355
|
+
'GET /api/auth/pi/status',
|
|
356
|
+
'POST /api/auth/pi/test',
|
|
357
|
+
'POST /api/auth/pi/save',
|
|
358
|
+
'DELETE /api/auth/pi',
|
|
359
|
+
'POST /api/auth/pi/completion',
|
|
354
360
|
'POST /api/portal/totp/setup',
|
|
355
361
|
'POST /api/portal/totp/verify-setup',
|
|
356
362
|
'POST /api/portal/totp/disable',
|
|
@@ -1327,7 +1333,11 @@ ${!connected ? `<script>
|
|
|
1327
1333
|
const data = msg.data || {};
|
|
1328
1334
|
const content = data.content;
|
|
1329
1335
|
if (!content) return;
|
|
1330
|
-
|
|
1336
|
+
// Note: we intentionally ignore data.conversationId from the client.
|
|
1337
|
+
// The server is the authority on which DB conversation this WS belongs to —
|
|
1338
|
+
// honoring a client-supplied id let stale browser state drive messages into
|
|
1339
|
+
// an orphan conv whose row had been deleted, causing FK failures on every
|
|
1340
|
+
// INSERT. Server resolution below (clientConvs → context.current → create).
|
|
1331
1341
|
|
|
1332
1342
|
// Re-read config on each message so post-onboard changes are picked up
|
|
1333
1343
|
const freshConfig = loadConfig();
|
|
@@ -1396,6 +1406,10 @@ ${!connected ? `<script>
|
|
|
1396
1406
|
});
|
|
1397
1407
|
} catch (err: any) {
|
|
1398
1408
|
log.warn(`[bloby] DB persist error: ${err.message}`);
|
|
1409
|
+
// Surface to all clients so they can flag the missing user bubble
|
|
1410
|
+
// instead of pretending it's saved. addMessage() in worker/db.ts is
|
|
1411
|
+
// self-healing for orphan convIds, so this should now be rare.
|
|
1412
|
+
broadcastBloby('chat:persist-error', { conversationId: convId, role: 'user', error: err.message });
|
|
1399
1413
|
}
|
|
1400
1414
|
|
|
1401
1415
|
// Fetch agent/user names and recent messages in parallel
|
|
@@ -1438,7 +1452,7 @@ ${!connected ? `<script>
|
|
|
1438
1452
|
// the self-chat mirror (the user's own number).
|
|
1439
1453
|
const waState = channelManager.createWaStreamState();
|
|
1440
1454
|
|
|
1441
|
-
await startConversation(convId, freshConfig.ai.model, (type, eventData) => {
|
|
1455
|
+
await startConversation(convId, freshConfig.ai.model, async (type, eventData) => {
|
|
1442
1456
|
// Track stream buffer for reconnecting clients
|
|
1443
1457
|
if (type === 'bot:typing') {
|
|
1444
1458
|
currentStreamConvId = convId;
|
|
@@ -1493,19 +1507,23 @@ ${!connected ? `<script>
|
|
|
1493
1507
|
return;
|
|
1494
1508
|
}
|
|
1495
1509
|
|
|
1496
|
-
// Save assistant response to DB
|
|
1510
|
+
// Save assistant response to DB BEFORE broadcasting so a refresh
|
|
1511
|
+
// immediately after the bubble appears can't race the INSERT and lose
|
|
1512
|
+
// the message. addMessage() in worker/db.ts is self-healing —
|
|
1513
|
+
// it INSERT OR IGNOREs the parent conversation row first, so even an
|
|
1514
|
+
// orphan convId persists cleanly.
|
|
1497
1515
|
if (type === 'bot:response') {
|
|
1498
1516
|
currentStreamBuffer = '';
|
|
1499
|
-
|
|
1500
|
-
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1504
|
-
|
|
1505
|
-
|
|
1506
|
-
|
|
1507
|
-
}
|
|
1508
|
-
}
|
|
1517
|
+
try {
|
|
1518
|
+
await workerApi(`/api/conversations/${convId}/messages`, 'POST', {
|
|
1519
|
+
role: 'assistant', content: eventData.content, meta: { model: freshConfig.ai.model },
|
|
1520
|
+
});
|
|
1521
|
+
} catch (err: any) {
|
|
1522
|
+
log.warn(`[bloby] DB persist bot response error: ${err.message}`);
|
|
1523
|
+
// Tell clients the bubble they're about to see is not durable —
|
|
1524
|
+
// they can flag/retry rather than silently losing it on refresh.
|
|
1525
|
+
broadcastBloby('chat:persist-error', { conversationId: convId, role: 'assistant', error: err.message });
|
|
1526
|
+
}
|
|
1509
1527
|
}
|
|
1510
1528
|
|
|
1511
1529
|
// Stream all events to every connected client
|
package/worker/db.ts
CHANGED
|
@@ -93,10 +93,18 @@ export function deleteConversation(id: string) {
|
|
|
93
93
|
|
|
94
94
|
// Messages
|
|
95
95
|
export function addMessage(convId: string, role: string, content: string, meta?: { tokens_in?: number; tokens_out?: number; model?: string; audio_data?: string; attachments?: string }) {
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
96
|
+
// Self-heal: if the conversation row is missing (orphan live convId, harness session
|
|
97
|
+
// drift, deleted parent, etc.), create it so the FK constraint never fires.
|
|
98
|
+
// Use the first user message as title; assistant-first stays NULL (filled by UI).
|
|
99
|
+
const tx = db.transaction(() => {
|
|
100
|
+
db.prepare('INSERT OR IGNORE INTO conversations (id, title, model) VALUES (?, ?, ?)')
|
|
101
|
+
.run(convId, role === 'user' ? content.slice(0, 80) : null, meta?.model ?? null);
|
|
102
|
+
const msg = db.prepare('INSERT INTO messages (conversation_id, role, content, tokens_in, tokens_out, model, audio_data, attachments) VALUES (?, ?, ?, ?, ?, ?, ?, ?) RETURNING *')
|
|
103
|
+
.get(convId, role, content, meta?.tokens_in ?? null, meta?.tokens_out ?? null, meta?.model ?? null, meta?.audio_data ?? null, meta?.attachments ?? null);
|
|
104
|
+
db.prepare('UPDATE conversations SET updated_at = CURRENT_TIMESTAMP WHERE id = ?').run(convId);
|
|
105
|
+
return msg;
|
|
106
|
+
});
|
|
107
|
+
return tx() as any;
|
|
100
108
|
}
|
|
101
109
|
export function getMessages(convId: string) {
|
|
102
110
|
return db.prepare('SELECT * FROM messages WHERE conversation_id = ? ORDER BY created_at ASC').all(convId);
|
|
@@ -177,22 +185,29 @@ export function deleteAllTrustedDevices() {
|
|
|
177
185
|
db.prepare('DELETE FROM trusted_devices').run();
|
|
178
186
|
}
|
|
179
187
|
|
|
180
|
-
// Recent messages (for context injection)
|
|
188
|
+
// Recent messages (for context injection).
|
|
189
|
+
// Order by rowid (monotonic insertion order) — created_at has 1-second resolution
|
|
190
|
+
// so rapid-fire messages can collide. rowid never does.
|
|
191
|
+
// rowid is a hidden column, so `SELECT *` omits it — we must alias it explicitly
|
|
192
|
+
// in the inner query for the outer ORDER BY to reach it.
|
|
181
193
|
export function getRecentMessages(convId: string, limit = 20) {
|
|
182
194
|
return db.prepare(`
|
|
183
195
|
SELECT * FROM (
|
|
184
|
-
SELECT
|
|
185
|
-
|
|
196
|
+
SELECT messages.*, messages.rowid AS _rid FROM messages
|
|
197
|
+
WHERE conversation_id = ? ORDER BY messages.rowid DESC LIMIT ?
|
|
198
|
+
) sub ORDER BY _rid ASC
|
|
186
199
|
`).all(convId, limit);
|
|
187
200
|
}
|
|
188
201
|
|
|
189
|
-
// Cursor-based pagination: messages before a given
|
|
202
|
+
// Cursor-based pagination: messages before a given message id.
|
|
203
|
+
// Use rowid for comparison — message.id is random hex so `id < ?` is meaningless.
|
|
190
204
|
export function getMessagesBefore(convId: string, beforeId: string, limit = 20) {
|
|
191
205
|
return db.prepare(`
|
|
192
206
|
SELECT * FROM (
|
|
193
|
-
SELECT
|
|
194
|
-
WHERE conversation_id = ?
|
|
195
|
-
|
|
196
|
-
|
|
207
|
+
SELECT messages.*, messages.rowid AS _rid FROM messages
|
|
208
|
+
WHERE conversation_id = ?
|
|
209
|
+
AND messages.rowid < (SELECT rowid FROM messages WHERE id = ?)
|
|
210
|
+
ORDER BY messages.rowid DESC LIMIT ?
|
|
211
|
+
) sub ORDER BY _rid ASC
|
|
197
212
|
`).all(convId, beforeId, limit);
|
|
198
213
|
}
|