bloby-bot 0.46.3 → 0.47.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/bin/cli.js +42 -8
  2. package/dist-bloby/assets/{bloby-BnHElaWD.js → bloby-E-QLmQDW.js} +4 -4
  3. package/dist-bloby/assets/globals-Ci0CEj1X.js +18 -0
  4. package/dist-bloby/assets/globals-DriF_8Q_.css +2 -0
  5. package/dist-bloby/assets/{highlighted-body-OFNGDK62-B4IKFiNq.js → highlighted-body-OFNGDK62-CTiboTVa.js} +1 -1
  6. package/dist-bloby/assets/mermaid-GHXKKRXX-CgVqYCFU.js +1 -0
  7. package/dist-bloby/assets/{onboard-DoRN5jiz.js → onboard-C1uMxuk2.js} +1 -1
  8. package/dist-bloby/bloby.html +3 -3
  9. package/dist-bloby/onboard.html +3 -3
  10. package/package.json +3 -2
  11. package/scripts/postinstall.js +19 -2
  12. package/scripts/sync-pi-models.ts +146 -0
  13. package/shared/config.ts +1 -1
  14. package/supervisor/bloby-agent.ts +2 -0
  15. package/supervisor/chat/OnboardWizard.tsx +327 -2
  16. package/supervisor/harnesses/pi/async-queue.ts +45 -0
  17. package/supervisor/harnesses/pi/auth-storage.ts +56 -0
  18. package/supervisor/harnesses/pi/index.ts +474 -0
  19. package/supervisor/harnesses/pi/models-catalog.generated.ts +579 -0
  20. package/supervisor/harnesses/pi/providers/stream-google.ts +156 -0
  21. package/supervisor/harnesses/pi/providers/stream.ts +21 -0
  22. package/supervisor/harnesses/pi/providers/types.ts +60 -0
  23. package/supervisor/harnesses/pi/session.ts +140 -0
  24. package/supervisor/harnesses/pi/sub-providers.ts +191 -0
  25. package/supervisor/harnesses/pi/test-completion.ts +196 -0
  26. package/supervisor/index.ts +6 -0
  27. package/worker/index.ts +86 -0
  28. package/dist-bloby/assets/globals-BYieEOqL.js +0 -18
  29. package/dist-bloby/assets/globals-BzeCWV3t.css +0 -2
  30. package/dist-bloby/assets/mermaid-GHXKKRXX-32SDjrR3.js +0 -1
@@ -0,0 +1,156 @@
1
+ /**
2
+ * Google Gemini streaming provider.
3
+ *
4
+ * Hand-written equivalent of the slice of pi-ai/providers/google.ts that bloby
5
+ * needs — text streaming via `:streamGenerateContent?alt=sse`. Function-calling
6
+ * is wired up in Phase 2; for now we drop tools and stream text only.
7
+ *
8
+ * Endpoint: POST {baseUrl}/models/{modelId}:streamGenerateContent?alt=sse&key={apiKey}
9
+ * Stream: SSE — each `data: {...}` is one candidate update.
10
+ */
11
+ import type {
12
+ PiStreamRequest,
13
+ PiStreamEvent,
14
+ PiMessage,
15
+ PiContentBlock,
16
+ PiStopReason,
17
+ } from './types.js';
18
+
19
+ /** Walk an SSE byte stream and yield each parsed JSON event. */
20
+ async function* parseSse(res: Response): AsyncIterable<any> {
21
+ if (!res.body) return;
22
+ const reader = res.body.getReader();
23
+ const decoder = new TextDecoder();
24
+ let buffer = '';
25
+ try {
26
+ while (true) {
27
+ const { value, done } = await reader.read();
28
+ if (done) break;
29
+ buffer += decoder.decode(value, { stream: true });
30
+ // SSE event boundary is a blank line. Process every complete event in buffer.
31
+ let idx;
32
+ while ((idx = buffer.indexOf('\n\n')) !== -1) {
33
+ const raw = buffer.slice(0, idx);
34
+ buffer = buffer.slice(idx + 2);
35
+ const dataLines = raw.split('\n').filter((l) => l.startsWith('data:'));
36
+ if (!dataLines.length) continue;
37
+ const data = dataLines.map((l) => l.slice(5).trimStart()).join('\n');
38
+ if (!data || data === '[DONE]') continue;
39
+ try {
40
+ yield JSON.parse(data);
41
+ } catch {
42
+ // Skip malformed chunks rather than killing the whole turn.
43
+ }
44
+ }
45
+ }
46
+ } finally {
47
+ try { reader.releaseLock(); } catch {}
48
+ }
49
+ }
50
+
51
+ function toGeminiRole(role: PiMessage['role']): 'user' | 'model' {
52
+ return role === 'assistant' ? 'model' : 'user';
53
+ }
54
+
55
+ function toGeminiParts(content: PiContentBlock[]): any[] {
56
+ const parts: any[] = [];
57
+ for (const b of content) {
58
+ if (b.type === 'text') parts.push({ text: b.text });
59
+ else if (b.type === 'image') parts.push({ inlineData: { mimeType: b.mediaType, data: b.data } });
60
+ // tool_use / tool_result are Phase 2.
61
+ }
62
+ return parts;
63
+ }
64
+
65
+ function mapStopReason(reason?: string): PiStopReason {
66
+ switch (reason) {
67
+ case 'STOP':
68
+ case 'FINISH_REASON_STOP':
69
+ return 'end_turn';
70
+ case 'MAX_TOKENS':
71
+ return 'max_tokens';
72
+ case 'SAFETY':
73
+ case 'RECITATION':
74
+ case 'OTHER':
75
+ return 'error';
76
+ default:
77
+ return 'end_turn';
78
+ }
79
+ }
80
+
81
+ export async function* streamGoogle(req: PiStreamRequest): AsyncIterable<PiStreamEvent> {
82
+ const url =
83
+ `${req.baseUrl.replace(/\/+$/, '')}/models/${encodeURIComponent(req.modelId)}:streamGenerateContent` +
84
+ `?alt=sse&key=${encodeURIComponent(req.apiKey)}`;
85
+
86
+ // Filter out empty messages — Gemini rejects requests with no user content.
87
+ const contents = req.messages
88
+ .filter((m) => m.content.length > 0)
89
+ .map((m) => ({ role: toGeminiRole(m.role), parts: toGeminiParts(m.content) }))
90
+ .filter((m) => m.parts.length > 0);
91
+
92
+ const body: any = {
93
+ contents,
94
+ generationConfig: {
95
+ maxOutputTokens: req.maxOutputTokens ?? 4096,
96
+ },
97
+ };
98
+ if (req.systemPrompt?.trim()) {
99
+ body.systemInstruction = { parts: [{ text: req.systemPrompt }] };
100
+ }
101
+
102
+ let res: Response;
103
+ try {
104
+ res = await fetch(url, {
105
+ method: 'POST',
106
+ headers: { 'content-type': 'application/json' },
107
+ body: JSON.stringify(body),
108
+ signal: req.signal,
109
+ });
110
+ } catch (err: any) {
111
+ yield { type: 'error', error: err?.message || String(err) };
112
+ return;
113
+ }
114
+
115
+ if (!res.ok) {
116
+ let detail = '';
117
+ try { detail = await res.text(); } catch {}
118
+ yield { type: 'error', error: `Google ${res.status} ${res.statusText}${detail ? `: ${detail.slice(0, 400)}` : ''}` };
119
+ return;
120
+ }
121
+
122
+ let accumulated = '';
123
+ let lastFinish: string | undefined;
124
+ let usage: { inputTokens?: number; outputTokens?: number } | undefined;
125
+
126
+ try {
127
+ for await (const chunk of parseSse(res)) {
128
+ const candidate = chunk?.candidates?.[0];
129
+ const parts: any[] = candidate?.content?.parts || [];
130
+ for (const part of parts) {
131
+ if (typeof part?.text === 'string' && part.text.length > 0) {
132
+ accumulated += part.text;
133
+ yield { type: 'text_delta', delta: part.text };
134
+ }
135
+ }
136
+ if (candidate?.finishReason) lastFinish = candidate.finishReason;
137
+ const usageMeta = chunk?.usageMetadata;
138
+ if (usageMeta) {
139
+ usage = {
140
+ inputTokens: usageMeta.promptTokenCount,
141
+ outputTokens: usageMeta.candidatesTokenCount,
142
+ };
143
+ }
144
+ }
145
+ } catch (err: any) {
146
+ if (err?.name === 'AbortError') {
147
+ yield { type: 'done', stopReason: 'aborted' };
148
+ return;
149
+ }
150
+ yield { type: 'error', error: err?.message || String(err) };
151
+ return;
152
+ }
153
+
154
+ if (accumulated) yield { type: 'text_end', text: accumulated };
155
+ yield { type: 'done', stopReason: mapStopReason(lastFinish), usage };
156
+ }
@@ -0,0 +1,21 @@
1
+ /**
2
+ * Provider dispatcher.
3
+ *
4
+ * One function that turns a `(flavor, request)` into a `PiStreamEvent`
5
+ * AsyncIterable. The session loop only knows this entry point — provider
6
+ * choice happens here based on the sub-provider's `flavor` field.
7
+ */
8
+ import type { PiApiFlavor } from '../sub-providers.js';
9
+ import type { PiStreamRequest, PiStreamEvent } from './types.js';
10
+ import { streamGoogle } from './stream-google.js';
11
+
12
+ export function streamProvider(flavor: PiApiFlavor, req: PiStreamRequest): AsyncIterable<PiStreamEvent> {
13
+ switch (flavor) {
14
+ case 'google-gemini':
15
+ return streamGoogle(req);
16
+ case 'openai-completions':
17
+ throw new Error('openai-completions streaming is not implemented yet (Phase 3).');
18
+ case 'anthropic-messages':
19
+ throw new Error('anthropic-messages streaming is not implemented yet (Phase 3).');
20
+ }
21
+ }
@@ -0,0 +1,60 @@
1
+ /**
2
+ * Provider-shared types for the pi harness.
3
+ *
4
+ * One unified message + event shape regardless of which underlying LLM API
5
+ * (Google Gemini, OpenAI-compatible /v1/chat/completions, Anthropic Messages)
6
+ * is handling the request. Each provider implements `streamProvider(req): AsyncIterable<StreamEvent>`
7
+ * and the session loop consumes the events without knowing the flavor.
8
+ *
9
+ * Modelled after pi-ai's event vocabulary (text_start/delta/end, toolcall_*,
10
+ * done, error) so we can copy fixes from upstream if needed, but only the
11
+ * fields bloby actually consumes are kept.
12
+ */
13
+
14
+ export type PiRole = 'user' | 'assistant' | 'tool';
15
+
16
+ /** A single content block inside a message. */
17
+ export type PiContentBlock =
18
+ | { type: 'text'; text: string }
19
+ | { type: 'image'; mediaType: string; data: string } // base64
20
+ | { type: 'tool_use'; id: string; name: string; input: any }
21
+ | { type: 'tool_result'; toolUseId: string; content: string; isError?: boolean };
22
+
23
+ export interface PiMessage {
24
+ role: PiRole;
25
+ content: PiContentBlock[];
26
+ }
27
+
28
+ /** Schema for one tool the model can call. Plain JSON Schema for input. */
29
+ export interface PiToolDef {
30
+ name: string;
31
+ description: string;
32
+ inputSchema: Record<string, any>;
33
+ }
34
+
35
+ export interface PiStreamRequest {
36
+ modelId: string;
37
+ baseUrl: string;
38
+ apiKey: string;
39
+ systemPrompt: string;
40
+ messages: PiMessage[];
41
+ tools?: PiToolDef[];
42
+ /** Hard cap on output tokens for a single turn. */
43
+ maxOutputTokens?: number;
44
+ /** Optional abort signal so the session can interrupt in-flight requests. */
45
+ signal?: AbortSignal;
46
+ }
47
+
48
+ export type PiStopReason = 'end_turn' | 'tool_use' | 'max_tokens' | 'error' | 'aborted';
49
+
50
+ export type PiStreamEvent =
51
+ | { type: 'text_delta'; delta: string }
52
+ | { type: 'text_end'; text: string }
53
+ | { type: 'tool_use'; id: string; name: string; input: any }
54
+ | { type: 'done'; stopReason: PiStopReason; usage?: PiUsage }
55
+ | { type: 'error'; error: string };
56
+
57
+ export interface PiUsage {
58
+ inputTokens?: number;
59
+ outputTokens?: number;
60
+ }
@@ -0,0 +1,140 @@
1
+ /**
2
+ * Pi agent session — the live conversation loop.
3
+ *
4
+ * Mirrors the *shape* of the Claude harness loop in `harnesses/claude.ts`:
5
+ * - one long-lived session per conversation
6
+ * - user messages arrive via an `AsyncQueue<PiMessage>` input
7
+ * - the loop drains the queue one turn at a time
8
+ * - each turn streams provider events back through a single `onEvent`
9
+ * callback the caller hooked up
10
+ *
11
+ * Phase 1 scope: text-only, no tools. Each user turn = one provider call.
12
+ * Phase 2 will plug tools into the inner loop (model emits `tool_use` →
13
+ * execute → append `tool_result` → re-stream → repeat until `end_turn`).
14
+ *
15
+ * Phase 1 explicitly does NOT spawn sub-agents — Bruno will add those later.
16
+ */
17
+ import { log } from '../../../shared/logger.js';
18
+ import type { PiApiFlavor } from './sub-providers.js';
19
+ import { streamProvider } from './providers/stream.js';
20
+ import type { PiMessage, PiStreamEvent, PiToolDef } from './providers/types.js';
21
+ import type { AsyncQueue } from './async-queue.js';
22
+
23
+ export type PiSessionEvent =
24
+ | { type: 'turn_started' }
25
+ | { type: 'text_delta'; delta: string }
26
+ | { type: 'text_end'; text: string }
27
+ | { type: 'tool_use'; id: string; name: string; input: any } // Phase 2
28
+ | { type: 'turn_complete'; usedFileTools: boolean }
29
+ | { type: 'error'; error: string };
30
+
31
+ export interface PiSessionInit {
32
+ flavor: PiApiFlavor;
33
+ modelId: string;
34
+ baseUrl: string;
35
+ apiKey: string;
36
+ systemPrompt: string;
37
+ /** Pre-loaded history before the first new user turn. */
38
+ initialMessages?: PiMessage[];
39
+ /** Phase 2 wires this through. Empty for Phase 1. */
40
+ tools?: PiToolDef[];
41
+ maxOutputTokens?: number;
42
+ /** Used to interrupt in-flight provider calls when the session ends. */
43
+ abortController: AbortController;
44
+ /** Caller's event sink — translated to bloby's `bot:*` events one layer up. */
45
+ onEvent: (evt: PiSessionEvent) => void;
46
+ }
47
+
48
+ export interface PiSession {
49
+ /** Resolves when the loop exits (queue closed or aborted). */
50
+ run(input: AsyncQueue<PiMessage>): Promise<void>;
51
+ /** Cumulative history including prefilled context and live turns. */
52
+ getMessages(): PiMessage[];
53
+ }
54
+
55
+ const FILE_TOOL_NAMES = new Set(['Write', 'Edit', 'write', 'edit']);
56
+
57
+ export function createPiSession(init: PiSessionInit): PiSession {
58
+ const messages: PiMessage[] = init.initialMessages ? [...init.initialMessages] : [];
59
+
60
+ async function runOneTurn(userMsg: PiMessage): Promise<void> {
61
+ if (init.abortController.signal.aborted) return;
62
+ messages.push(userMsg);
63
+ init.onEvent({ type: 'turn_started' });
64
+
65
+ let accumulated = '';
66
+ const usedTools = new Set<string>();
67
+ let errored = false;
68
+
69
+ try {
70
+ const stream = streamProvider(init.flavor, {
71
+ modelId: init.modelId,
72
+ baseUrl: init.baseUrl,
73
+ apiKey: init.apiKey,
74
+ systemPrompt: init.systemPrompt,
75
+ messages,
76
+ tools: init.tools,
77
+ maxOutputTokens: init.maxOutputTokens,
78
+ signal: init.abortController.signal,
79
+ });
80
+
81
+ for await (const evt of stream as AsyncIterable<PiStreamEvent>) {
82
+ if (init.abortController.signal.aborted) return;
83
+ switch (evt.type) {
84
+ case 'text_delta':
85
+ accumulated += evt.delta;
86
+ init.onEvent({ type: 'text_delta', delta: evt.delta });
87
+ break;
88
+ case 'text_end':
89
+ // Provider gives us the final accumulated text; trust the deltas
90
+ // we already forwarded and reconcile state from here.
91
+ accumulated = evt.text;
92
+ init.onEvent({ type: 'text_end', text: evt.text });
93
+ break;
94
+ case 'tool_use':
95
+ // Phase 2: execute the tool, append a tool_result message, re-stream.
96
+ usedTools.add(evt.name);
97
+ init.onEvent({ type: 'tool_use', id: evt.id, name: evt.name, input: evt.input });
98
+ break;
99
+ case 'error':
100
+ errored = true;
101
+ init.onEvent({ type: 'error', error: evt.error });
102
+ break;
103
+ case 'done':
104
+ // Loop back if the model is waiting on a tool result (Phase 2);
105
+ // for now `tool_use` is impossible since we don't pass tools.
106
+ break;
107
+ }
108
+ }
109
+ } catch (err: any) {
110
+ if (init.abortController.signal.aborted) return;
111
+ errored = true;
112
+ init.onEvent({ type: 'error', error: err?.message || String(err) });
113
+ }
114
+
115
+ if (accumulated) {
116
+ messages.push({ role: 'assistant', content: [{ type: 'text', text: accumulated }] });
117
+ }
118
+ if (!errored) {
119
+ const usedFileTools = Array.from(usedTools).some((t) => FILE_TOOL_NAMES.has(t));
120
+ init.onEvent({ type: 'turn_complete', usedFileTools });
121
+ }
122
+ }
123
+
124
+ return {
125
+ async run(input) {
126
+ for await (const userMsg of input) {
127
+ if (init.abortController.signal.aborted) break;
128
+ try {
129
+ await runOneTurn(userMsg);
130
+ } catch (err: any) {
131
+ log.warn(`[pi/session] Turn failed: ${err?.message || err}`);
132
+ init.onEvent({ type: 'error', error: err?.message || String(err) });
133
+ }
134
+ }
135
+ },
136
+ getMessages() {
137
+ return messages;
138
+ },
139
+ };
140
+ }
@@ -0,0 +1,191 @@
1
+ /**
2
+ * Pi sub-provider catalog.
3
+ *
4
+ * The Bloby (pi) harness is a meta-provider: the user picks an underlying LLM
5
+ * vendor and supplies their own credentials. This file enumerates the set we
6
+ * currently support in the onboarding wizard plus enough metadata to drive the
7
+ * test-completion call without per-provider branching at the call site.
8
+ *
9
+ * Iteration 1 scope: API-key flows only. OAuth-based sub-providers (Anthropic
10
+ * Pro/Max, GitHub Copilot, OpenAI Codex) are deliberately out of scope — they
11
+ * duplicate auth flows we already ship under the dedicated Claude and OpenAI
12
+ * Codex harnesses.
13
+ *
14
+ * Per-provider model lists come from `models-catalog.generated.ts`, which is
15
+ * synced from upstream pi via `npm run sync:pi-models`. Sub-providers without
16
+ * a pi mapping (Ollama, LM Studio, custom) stay `'dynamic'` — free-form ID.
17
+ */
18
+ import { PI_MODELS_CATALOG } from './models-catalog.generated.js';
19
+
20
+ export type PiApiFlavor = 'openai-completions' | 'anthropic-messages' | 'google-gemini';
21
+
22
+ export interface PiSubProviderModel {
23
+ id: string;
24
+ label: string;
25
+ }
26
+
27
+ export interface PiSubProvider {
28
+ id: string;
29
+ name: string;
30
+ subtitle: string;
31
+ flavor: PiApiFlavor;
32
+ /** Default base URL — Ollama / LM Studio / custom let the user override it. */
33
+ baseUrl?: string;
34
+ /** Whether the user must supply a base URL (Ollama, LM Studio, custom). */
35
+ needsBaseUrl?: boolean;
36
+ /** Whether the user must supply an API key. Ollama defaults to false. */
37
+ needsApiKey?: boolean;
38
+ /** Optional: where to obtain a key (shown as a help link). */
39
+ apiKeyUrl?: string;
40
+ /** Hand-curated model list. `dynamic` ⇒ free-form ID input. */
41
+ models: PiSubProviderModel[] | 'dynamic';
42
+ /** Default model selection when the user hasn't picked one. */
43
+ defaultModel?: string;
44
+ }
45
+
46
+ function fromCatalog(key: string): PiSubProviderModel[] | 'dynamic' {
47
+ const list = PI_MODELS_CATALOG[key];
48
+ return list && list.length > 0 ? list : 'dynamic';
49
+ }
50
+
51
+ function defaultFor(key: string): string | undefined {
52
+ return PI_MODELS_CATALOG[key]?.[0]?.id;
53
+ }
54
+
55
+ export const PI_SUB_PROVIDERS: PiSubProvider[] = [
56
+ {
57
+ id: 'google',
58
+ name: 'Google Gemini',
59
+ subtitle: 'AI Studio API key',
60
+ flavor: 'google-gemini',
61
+ baseUrl: 'https://generativelanguage.googleapis.com/v1beta',
62
+ needsApiKey: true,
63
+ apiKeyUrl: 'https://aistudio.google.com/apikey',
64
+ models: fromCatalog('google'),
65
+ defaultModel: defaultFor('google'),
66
+ },
67
+ {
68
+ id: 'deepseek',
69
+ name: 'DeepSeek',
70
+ subtitle: 'deepseek.com API',
71
+ flavor: 'openai-completions',
72
+ baseUrl: 'https://api.deepseek.com/v1',
73
+ needsApiKey: true,
74
+ apiKeyUrl: 'https://platform.deepseek.com/api_keys',
75
+ models: fromCatalog('deepseek'),
76
+ defaultModel: defaultFor('deepseek'),
77
+ },
78
+ {
79
+ id: 'groq',
80
+ name: 'Groq',
81
+ subtitle: 'Fast inference (Llama / Kimi / Qwen)',
82
+ flavor: 'openai-completions',
83
+ baseUrl: 'https://api.groq.com/openai/v1',
84
+ needsApiKey: true,
85
+ apiKeyUrl: 'https://console.groq.com/keys',
86
+ models: fromCatalog('groq'),
87
+ defaultModel: defaultFor('groq'),
88
+ },
89
+ {
90
+ id: 'xai',
91
+ name: 'xAI (Grok)',
92
+ subtitle: 'x.ai API',
93
+ flavor: 'openai-completions',
94
+ baseUrl: 'https://api.x.ai/v1',
95
+ needsApiKey: true,
96
+ apiKeyUrl: 'https://console.x.ai/',
97
+ models: fromCatalog('xai'),
98
+ defaultModel: defaultFor('xai'),
99
+ },
100
+ {
101
+ id: 'cerebras',
102
+ name: 'Cerebras',
103
+ subtitle: 'Wafer-scale inference',
104
+ flavor: 'openai-completions',
105
+ baseUrl: 'https://api.cerebras.ai/v1',
106
+ needsApiKey: true,
107
+ apiKeyUrl: 'https://cloud.cerebras.ai/?tab=api-keys',
108
+ models: fromCatalog('cerebras'),
109
+ defaultModel: defaultFor('cerebras'),
110
+ },
111
+ {
112
+ id: 'openrouter',
113
+ name: 'OpenRouter',
114
+ subtitle: 'Aggregator: 300+ models, one key',
115
+ flavor: 'openai-completions',
116
+ baseUrl: 'https://openrouter.ai/api/v1',
117
+ needsApiKey: true,
118
+ apiKeyUrl: 'https://openrouter.ai/keys',
119
+ // OpenRouter has 270+ entries — too many to list. Free-form ID input instead.
120
+ models: 'dynamic',
121
+ defaultModel: 'anthropic/claude-sonnet-4',
122
+ },
123
+ {
124
+ id: 'mistral',
125
+ name: 'Mistral',
126
+ subtitle: 'mistral.ai API',
127
+ flavor: 'openai-completions',
128
+ baseUrl: 'https://api.mistral.ai/v1',
129
+ needsApiKey: true,
130
+ apiKeyUrl: 'https://console.mistral.ai/api-keys/',
131
+ models: fromCatalog('mistral'),
132
+ defaultModel: defaultFor('mistral'),
133
+ },
134
+ {
135
+ id: 'openai-api',
136
+ name: 'OpenAI (API key)',
137
+ subtitle: 'platform.openai.com',
138
+ flavor: 'openai-completions',
139
+ baseUrl: 'https://api.openai.com/v1',
140
+ needsApiKey: true,
141
+ apiKeyUrl: 'https://platform.openai.com/api-keys',
142
+ models: fromCatalog('openai-api'),
143
+ defaultModel: defaultFor('openai-api'),
144
+ },
145
+ {
146
+ id: 'anthropic-api',
147
+ name: 'Anthropic (API key)',
148
+ subtitle: 'console.anthropic.com',
149
+ flavor: 'anthropic-messages',
150
+ baseUrl: 'https://api.anthropic.com/v1',
151
+ needsApiKey: true,
152
+ apiKeyUrl: 'https://console.anthropic.com/settings/keys',
153
+ models: fromCatalog('anthropic-api'),
154
+ defaultModel: defaultFor('anthropic-api'),
155
+ },
156
+ {
157
+ id: 'ollama',
158
+ name: 'Ollama',
159
+ subtitle: 'Local — http://localhost:11434',
160
+ flavor: 'openai-completions',
161
+ baseUrl: 'http://localhost:11434/v1',
162
+ needsBaseUrl: true,
163
+ needsApiKey: false,
164
+ apiKeyUrl: 'https://ollama.com/library',
165
+ models: 'dynamic',
166
+ defaultModel: 'llama3.1',
167
+ },
168
+ {
169
+ id: 'lm-studio',
170
+ name: 'LM Studio',
171
+ subtitle: 'Local — http://localhost:1234',
172
+ flavor: 'openai-completions',
173
+ baseUrl: 'http://localhost:1234/v1',
174
+ needsBaseUrl: true,
175
+ needsApiKey: false,
176
+ models: 'dynamic',
177
+ },
178
+ {
179
+ id: 'custom',
180
+ name: 'Custom (OpenAI-compatible)',
181
+ subtitle: 'Any /v1/chat/completions endpoint',
182
+ flavor: 'openai-completions',
183
+ needsBaseUrl: true,
184
+ needsApiKey: true,
185
+ models: 'dynamic',
186
+ },
187
+ ];
188
+
189
+ export function getPiSubProvider(id: string): PiSubProvider | undefined {
190
+ return PI_SUB_PROVIDERS.find((p) => p.id === id);
191
+ }