swarm-code 0.1.6 → 0.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/interactive-swarm.js +13 -79
- package/dist/routing/model-resolver.d.ts +25 -0
- package/dist/routing/model-resolver.js +177 -0
- package/dist/swarm.js +12 -79
- package/dist/ui/onboarding.js +325 -83
- package/package.json +1 -1
|
@@ -21,7 +21,7 @@ import * as fs from "node:fs";
|
|
|
21
21
|
import * as path from "node:path";
|
|
22
22
|
import * as readline from "node:readline";
|
|
23
23
|
// Dynamic imports — ensures env.js has set process.env BEFORE pi-ai loads
|
|
24
|
-
|
|
24
|
+
await import("@mariozechner/pi-ai");
|
|
25
25
|
const { PythonRepl } = await import("./core/repl.js");
|
|
26
26
|
const { runRlmLoop } = await import("./core/rlm.js");
|
|
27
27
|
const { loadConfig } = await import("./config.js");
|
|
@@ -34,6 +34,7 @@ await import("./agents/aider.js");
|
|
|
34
34
|
import { randomBytes } from "node:crypto";
|
|
35
35
|
import { EpisodicMemory } from "./memory/episodic.js";
|
|
36
36
|
import { buildSwarmSystemPrompt } from "./prompts/orchestrator.js";
|
|
37
|
+
import { resolveModel } from "./routing/model-resolver.js";
|
|
37
38
|
import { classifyTaskComplexity, describeAvailableAgents, FailureTracker, routeTask } from "./routing/model-router.js";
|
|
38
39
|
import { ThreadManager } from "./threads/manager.js";
|
|
39
40
|
import { ThreadDashboard } from "./ui/dashboard.js";
|
|
@@ -183,81 +184,6 @@ function scanDirectory(dir, maxFiles = 200, maxTotalSize = 2 * 1024 * 1024) {
|
|
|
183
184
|
}
|
|
184
185
|
return parts.join("\n");
|
|
185
186
|
}
|
|
186
|
-
// ── Model resolution (mirrored from swarm.ts) ───────────────────────────────
|
|
187
|
-
function resolveModel(modelId) {
|
|
188
|
-
const providerKeys = {
|
|
189
|
-
anthropic: "ANTHROPIC_API_KEY",
|
|
190
|
-
openai: "OPENAI_API_KEY",
|
|
191
|
-
google: "GEMINI_API_KEY",
|
|
192
|
-
};
|
|
193
|
-
const defaultModels = {
|
|
194
|
-
anthropic: "claude-sonnet-4-6",
|
|
195
|
-
openai: "gpt-4o",
|
|
196
|
-
google: "gemini-2.5-flash",
|
|
197
|
-
};
|
|
198
|
-
const knownProviders = new Set(Object.keys(providerKeys));
|
|
199
|
-
let model;
|
|
200
|
-
let resolvedProvider = "";
|
|
201
|
-
for (const provider of getProviders()) {
|
|
202
|
-
if (!knownProviders.has(provider))
|
|
203
|
-
continue;
|
|
204
|
-
const key = providerKeys[provider];
|
|
205
|
-
if (!process.env[key])
|
|
206
|
-
continue;
|
|
207
|
-
for (const m of getModels(provider)) {
|
|
208
|
-
if (m.id === modelId) {
|
|
209
|
-
model = m;
|
|
210
|
-
resolvedProvider = provider;
|
|
211
|
-
break;
|
|
212
|
-
}
|
|
213
|
-
}
|
|
214
|
-
if (model)
|
|
215
|
-
break;
|
|
216
|
-
}
|
|
217
|
-
if (!model) {
|
|
218
|
-
for (const provider of getProviders()) {
|
|
219
|
-
if (knownProviders.has(provider))
|
|
220
|
-
continue;
|
|
221
|
-
for (const m of getModels(provider)) {
|
|
222
|
-
if (m.id === modelId) {
|
|
223
|
-
model = m;
|
|
224
|
-
resolvedProvider = provider;
|
|
225
|
-
break;
|
|
226
|
-
}
|
|
227
|
-
}
|
|
228
|
-
if (model)
|
|
229
|
-
break;
|
|
230
|
-
}
|
|
231
|
-
}
|
|
232
|
-
if (!model) {
|
|
233
|
-
for (const [prov, envKey] of Object.entries(providerKeys)) {
|
|
234
|
-
if (!process.env[envKey])
|
|
235
|
-
continue;
|
|
236
|
-
const fallbackId = defaultModels[prov];
|
|
237
|
-
if (!fallbackId)
|
|
238
|
-
continue;
|
|
239
|
-
for (const p of getProviders()) {
|
|
240
|
-
if (p !== prov)
|
|
241
|
-
continue;
|
|
242
|
-
for (const m of getModels(p)) {
|
|
243
|
-
if (m.id === fallbackId) {
|
|
244
|
-
model = m;
|
|
245
|
-
resolvedProvider = prov;
|
|
246
|
-
logWarn(`Using ${fallbackId} (${prov}) — model "${modelId}" not found`);
|
|
247
|
-
break;
|
|
248
|
-
}
|
|
249
|
-
}
|
|
250
|
-
if (model)
|
|
251
|
-
break;
|
|
252
|
-
}
|
|
253
|
-
if (model)
|
|
254
|
-
break;
|
|
255
|
-
}
|
|
256
|
-
}
|
|
257
|
-
if (!model)
|
|
258
|
-
return null;
|
|
259
|
-
return { model, provider: resolvedProvider };
|
|
260
|
-
}
|
|
261
187
|
// ── Formatting helpers ──────────────────────────────────────────────────────
|
|
262
188
|
function formatDuration(ms) {
|
|
263
189
|
if (ms < 1000)
|
|
@@ -663,10 +589,18 @@ export async function runInteractiveSwarm(rawArgs) {
|
|
|
663
589
|
config.max_session_budget_usd = args.maxBudget;
|
|
664
590
|
if (args.autoRoute)
|
|
665
591
|
config.auto_model_selection = true;
|
|
666
|
-
// Resolve orchestrator model
|
|
667
|
-
const
|
|
592
|
+
// Resolve orchestrator model — prefer CLI arg, then config's default_model
|
|
593
|
+
const orchestratorModelId = args.orchestratorModel !== "claude-sonnet-4-6"
|
|
594
|
+
? args.orchestratorModel
|
|
595
|
+
: config.default_model || args.orchestratorModel;
|
|
596
|
+
// For standard pi-ai models, strip provider prefix (e.g. "anthropic/claude-sonnet-4-6" → "claude-sonnet-4-6")
|
|
597
|
+
// Ollama/OpenRouter prefixes are kept as-is (handled by resolveModel)
|
|
598
|
+
const modelLookupId = orchestratorModelId.startsWith("ollama/") || orchestratorModelId.startsWith("openrouter/")
|
|
599
|
+
? orchestratorModelId
|
|
600
|
+
: orchestratorModelId.replace(/^(anthropic|openai|google)\//, "");
|
|
601
|
+
const resolved = resolveModel(modelLookupId, logWarn);
|
|
668
602
|
if (!resolved) {
|
|
669
|
-
logError(`Could not find model "${
|
|
603
|
+
logError(`Could not find model "${orchestratorModelId}"`, "Set ANTHROPIC_API_KEY, OPENAI_API_KEY, or GEMINI_API_KEY in your .env file, or use Ollama/OpenRouter");
|
|
670
604
|
process.exit(1);
|
|
671
605
|
}
|
|
672
606
|
// Initialize episodic memory and failure tracker
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Model resolver — resolves model IDs to pi-ai Model objects.
|
|
3
|
+
*
|
|
4
|
+
* Handles three cases:
|
|
5
|
+
* 1. Standard pi-ai models (anthropic, openai, google) — looked up from pi-ai registry
|
|
6
|
+
* 2. Ollama models (ollama/*) — creates synthetic Model<"openai-completions"> pointing at localhost:11434
|
|
7
|
+
* 3. OpenRouter models (openrouter/*) — creates synthetic Model<"openai-completions"> pointing at openrouter.ai
|
|
8
|
+
*
|
|
9
|
+
* This preserves the RLM loop for all backends — the orchestrator always uses pi-ai's completeSimple().
|
|
10
|
+
*/
|
|
11
|
+
import type { Api, Model } from "@mariozechner/pi-ai";
|
|
12
|
+
export interface ResolvedModel {
|
|
13
|
+
model: Model<Api>;
|
|
14
|
+
provider: string;
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* Resolve a model ID to a pi-ai Model object.
|
|
18
|
+
*
|
|
19
|
+
* Supports:
|
|
20
|
+
* - "ollama/deepseek-coder-v2" → Ollama local model
|
|
21
|
+
* - "openrouter/auto" → OpenRouter cloud model
|
|
22
|
+
* - "claude-sonnet-4-6" → standard pi-ai model lookup
|
|
23
|
+
* - Falls back to any available provider's default model
|
|
24
|
+
*/
|
|
25
|
+
export declare function resolveModel(modelId: string, warnFn?: (msg: string) => void): ResolvedModel | null;
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Model resolver — resolves model IDs to pi-ai Model objects.
|
|
3
|
+
*
|
|
4
|
+
* Handles three cases:
|
|
5
|
+
* 1. Standard pi-ai models (anthropic, openai, google) — looked up from pi-ai registry
|
|
6
|
+
* 2. Ollama models (ollama/*) — creates synthetic Model<"openai-completions"> pointing at localhost:11434
|
|
7
|
+
* 3. OpenRouter models (openrouter/*) — creates synthetic Model<"openai-completions"> pointing at openrouter.ai
|
|
8
|
+
*
|
|
9
|
+
* This preserves the RLM loop for all backends — the orchestrator always uses pi-ai's completeSimple().
|
|
10
|
+
*/
|
|
11
|
+
const { getModels, getProviders } = await import("@mariozechner/pi-ai");
|
|
12
|
+
const PROVIDER_KEYS = {
|
|
13
|
+
anthropic: "ANTHROPIC_API_KEY",
|
|
14
|
+
openai: "OPENAI_API_KEY",
|
|
15
|
+
google: "GEMINI_API_KEY",
|
|
16
|
+
};
|
|
17
|
+
const DEFAULT_MODELS = {
|
|
18
|
+
anthropic: "claude-sonnet-4-6",
|
|
19
|
+
openai: "gpt-4o",
|
|
20
|
+
google: "gemini-2.5-flash",
|
|
21
|
+
};
|
|
22
|
+
/**
|
|
23
|
+
* Create a synthetic pi-ai Model for Ollama (OpenAI-compatible API at localhost:11434).
|
|
24
|
+
*/
|
|
25
|
+
function createOllamaModel(modelId) {
|
|
26
|
+
const shortId = modelId.replace("ollama/", "");
|
|
27
|
+
return {
|
|
28
|
+
id: shortId,
|
|
29
|
+
name: shortId,
|
|
30
|
+
api: "openai-completions",
|
|
31
|
+
provider: "ollama",
|
|
32
|
+
baseUrl: "http://localhost:11434/v1",
|
|
33
|
+
reasoning: false,
|
|
34
|
+
input: ["text"],
|
|
35
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
36
|
+
contextWindow: 32768,
|
|
37
|
+
maxTokens: 4096,
|
|
38
|
+
compat: {
|
|
39
|
+
supportsStore: false,
|
|
40
|
+
supportsDeveloperRole: false,
|
|
41
|
+
supportsReasoningEffort: false,
|
|
42
|
+
supportsUsageInStreaming: false,
|
|
43
|
+
maxTokensField: "max_tokens",
|
|
44
|
+
requiresToolResultName: false,
|
|
45
|
+
requiresAssistantAfterToolResult: false,
|
|
46
|
+
requiresThinkingAsText: false,
|
|
47
|
+
requiresMistralToolIds: false,
|
|
48
|
+
thinkingFormat: "openai",
|
|
49
|
+
supportsStrictMode: false,
|
|
50
|
+
},
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Create a synthetic pi-ai Model for OpenRouter (OpenAI-compatible API).
|
|
55
|
+
*/
|
|
56
|
+
function createOpenRouterModel(modelId) {
|
|
57
|
+
const shortId = modelId.replace("openrouter/", "");
|
|
58
|
+
const apiKey = process.env.OPENROUTER_API_KEY || "";
|
|
59
|
+
return {
|
|
60
|
+
id: shortId,
|
|
61
|
+
name: shortId,
|
|
62
|
+
api: "openai-completions",
|
|
63
|
+
provider: "openrouter",
|
|
64
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
65
|
+
reasoning: false,
|
|
66
|
+
input: ["text"],
|
|
67
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
68
|
+
contextWindow: 128000,
|
|
69
|
+
maxTokens: 4096,
|
|
70
|
+
headers: {
|
|
71
|
+
Authorization: `Bearer ${apiKey}`,
|
|
72
|
+
"HTTP-Referer": "https://github.com/kingjulio8238/swarm-code",
|
|
73
|
+
"X-Title": "swarm-code",
|
|
74
|
+
},
|
|
75
|
+
compat: {
|
|
76
|
+
supportsStore: false,
|
|
77
|
+
supportsDeveloperRole: false,
|
|
78
|
+
supportsReasoningEffort: false,
|
|
79
|
+
supportsUsageInStreaming: true,
|
|
80
|
+
maxTokensField: "max_tokens",
|
|
81
|
+
requiresToolResultName: false,
|
|
82
|
+
requiresAssistantAfterToolResult: false,
|
|
83
|
+
requiresThinkingAsText: false,
|
|
84
|
+
requiresMistralToolIds: false,
|
|
85
|
+
thinkingFormat: "openai",
|
|
86
|
+
supportsStrictMode: false,
|
|
87
|
+
openRouterRouting: undefined,
|
|
88
|
+
},
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
/**
|
|
92
|
+
* Resolve a model ID to a pi-ai Model object.
|
|
93
|
+
*
|
|
94
|
+
* Supports:
|
|
95
|
+
* - "ollama/deepseek-coder-v2" → Ollama local model
|
|
96
|
+
* - "openrouter/auto" → OpenRouter cloud model
|
|
97
|
+
* - "claude-sonnet-4-6" → standard pi-ai model lookup
|
|
98
|
+
* - Falls back to any available provider's default model
|
|
99
|
+
*/
|
|
100
|
+
export function resolveModel(modelId, warnFn) {
|
|
101
|
+
// Ollama models — create synthetic model
|
|
102
|
+
if (modelId.startsWith("ollama/")) {
|
|
103
|
+
return { model: createOllamaModel(modelId), provider: "ollama" };
|
|
104
|
+
}
|
|
105
|
+
// OpenRouter models — create synthetic model
|
|
106
|
+
if (modelId.startsWith("openrouter/")) {
|
|
107
|
+
return { model: createOpenRouterModel(modelId), provider: "openrouter" };
|
|
108
|
+
}
|
|
109
|
+
// Standard pi-ai model lookup
|
|
110
|
+
const knownProviders = new Set(Object.keys(PROVIDER_KEYS));
|
|
111
|
+
let model;
|
|
112
|
+
let resolvedProvider = "";
|
|
113
|
+
// Try known providers with API keys first
|
|
114
|
+
for (const provider of getProviders()) {
|
|
115
|
+
if (!knownProviders.has(provider))
|
|
116
|
+
continue;
|
|
117
|
+
const key = PROVIDER_KEYS[provider];
|
|
118
|
+
if (!process.env[key])
|
|
119
|
+
continue;
|
|
120
|
+
for (const m of getModels(provider)) {
|
|
121
|
+
if (m.id === modelId) {
|
|
122
|
+
model = m;
|
|
123
|
+
resolvedProvider = provider;
|
|
124
|
+
break;
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
if (model)
|
|
128
|
+
break;
|
|
129
|
+
}
|
|
130
|
+
// Try unknown providers
|
|
131
|
+
if (!model) {
|
|
132
|
+
for (const provider of getProviders()) {
|
|
133
|
+
if (knownProviders.has(provider))
|
|
134
|
+
continue;
|
|
135
|
+
for (const m of getModels(provider)) {
|
|
136
|
+
if (m.id === modelId) {
|
|
137
|
+
model = m;
|
|
138
|
+
resolvedProvider = provider;
|
|
139
|
+
break;
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
if (model)
|
|
143
|
+
break;
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
// Fallback: try default model for any provider that has a key
|
|
147
|
+
if (!model) {
|
|
148
|
+
for (const [prov, envKey] of Object.entries(PROVIDER_KEYS)) {
|
|
149
|
+
if (!process.env[envKey])
|
|
150
|
+
continue;
|
|
151
|
+
const fallbackId = DEFAULT_MODELS[prov];
|
|
152
|
+
if (!fallbackId)
|
|
153
|
+
continue;
|
|
154
|
+
for (const p of getProviders()) {
|
|
155
|
+
if (p !== prov)
|
|
156
|
+
continue;
|
|
157
|
+
for (const m of getModels(p)) {
|
|
158
|
+
if (m.id === fallbackId) {
|
|
159
|
+
model = m;
|
|
160
|
+
resolvedProvider = prov;
|
|
161
|
+
if (warnFn)
|
|
162
|
+
warnFn(`Using ${fallbackId} (${prov}) — model "${modelId}" not found`);
|
|
163
|
+
break;
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
if (model)
|
|
167
|
+
break;
|
|
168
|
+
}
|
|
169
|
+
if (model)
|
|
170
|
+
break;
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
if (!model)
|
|
174
|
+
return null;
|
|
175
|
+
return { model, provider: resolvedProvider };
|
|
176
|
+
}
|
|
177
|
+
//# sourceMappingURL=model-resolver.js.map
|
package/dist/swarm.js
CHANGED
|
@@ -14,7 +14,7 @@ import "./env.js";
|
|
|
14
14
|
import * as fs from "node:fs";
|
|
15
15
|
import * as path from "node:path";
|
|
16
16
|
// Dynamic imports — ensures env.js has set process.env BEFORE pi-ai loads
|
|
17
|
-
|
|
17
|
+
await import("@mariozechner/pi-ai");
|
|
18
18
|
const { PythonRepl } = await import("./core/repl.js");
|
|
19
19
|
const { runRlmLoop } = await import("./core/rlm.js");
|
|
20
20
|
const { loadConfig } = await import("./config.js");
|
|
@@ -25,9 +25,11 @@ await import("./agents/claude-code.js");
|
|
|
25
25
|
await import("./agents/codex.js");
|
|
26
26
|
await import("./agents/aider.js");
|
|
27
27
|
import { randomBytes } from "node:crypto";
|
|
28
|
+
// Api/Model types used via resolveModel from model-resolver
|
|
28
29
|
import { loadHooks, runHooks } from "./hooks/runner.js";
|
|
29
30
|
import { EpisodicMemory } from "./memory/episodic.js";
|
|
30
31
|
import { buildSwarmSystemPrompt } from "./prompts/orchestrator.js";
|
|
32
|
+
import { resolveModel } from "./routing/model-resolver.js";
|
|
31
33
|
import { classifyTaskComplexity, describeAvailableAgents, FailureTracker, routeTask } from "./routing/model-router.js";
|
|
32
34
|
import { ThreadManager } from "./threads/manager.js";
|
|
33
35
|
import { renderBanner } from "./ui/banner.js";
|
|
@@ -218,81 +220,6 @@ function scanDirectory(dir, maxFiles = 200, maxTotalSize = 2 * 1024 * 1024) {
|
|
|
218
220
|
}
|
|
219
221
|
return parts.join("\n");
|
|
220
222
|
}
|
|
221
|
-
// ── Model resolution ────────────────────────────────────────────────────────
|
|
222
|
-
function resolveModel(modelId) {
|
|
223
|
-
const providerKeys = {
|
|
224
|
-
anthropic: "ANTHROPIC_API_KEY",
|
|
225
|
-
openai: "OPENAI_API_KEY",
|
|
226
|
-
google: "GEMINI_API_KEY",
|
|
227
|
-
};
|
|
228
|
-
const defaultModels = {
|
|
229
|
-
anthropic: "claude-sonnet-4-6",
|
|
230
|
-
openai: "gpt-4o",
|
|
231
|
-
google: "gemini-2.5-flash",
|
|
232
|
-
};
|
|
233
|
-
const knownProviders = new Set(Object.keys(providerKeys));
|
|
234
|
-
let model;
|
|
235
|
-
let resolvedProvider = "";
|
|
236
|
-
for (const provider of getProviders()) {
|
|
237
|
-
if (!knownProviders.has(provider))
|
|
238
|
-
continue;
|
|
239
|
-
const key = providerKeys[provider];
|
|
240
|
-
if (!process.env[key])
|
|
241
|
-
continue;
|
|
242
|
-
for (const m of getModels(provider)) {
|
|
243
|
-
if (m.id === modelId) {
|
|
244
|
-
model = m;
|
|
245
|
-
resolvedProvider = provider;
|
|
246
|
-
break;
|
|
247
|
-
}
|
|
248
|
-
}
|
|
249
|
-
if (model)
|
|
250
|
-
break;
|
|
251
|
-
}
|
|
252
|
-
if (!model) {
|
|
253
|
-
for (const provider of getProviders()) {
|
|
254
|
-
if (knownProviders.has(provider))
|
|
255
|
-
continue;
|
|
256
|
-
for (const m of getModels(provider)) {
|
|
257
|
-
if (m.id === modelId) {
|
|
258
|
-
model = m;
|
|
259
|
-
resolvedProvider = provider;
|
|
260
|
-
break;
|
|
261
|
-
}
|
|
262
|
-
}
|
|
263
|
-
if (model)
|
|
264
|
-
break;
|
|
265
|
-
}
|
|
266
|
-
}
|
|
267
|
-
if (!model) {
|
|
268
|
-
for (const [prov, envKey] of Object.entries(providerKeys)) {
|
|
269
|
-
if (!process.env[envKey])
|
|
270
|
-
continue;
|
|
271
|
-
const fallbackId = defaultModels[prov];
|
|
272
|
-
if (!fallbackId)
|
|
273
|
-
continue;
|
|
274
|
-
for (const p of getProviders()) {
|
|
275
|
-
if (p !== prov)
|
|
276
|
-
continue;
|
|
277
|
-
for (const m of getModels(p)) {
|
|
278
|
-
if (m.id === fallbackId) {
|
|
279
|
-
model = m;
|
|
280
|
-
resolvedProvider = prov;
|
|
281
|
-
logWarn(`Using ${fallbackId} (${prov}) — model "${modelId}" not found`);
|
|
282
|
-
break;
|
|
283
|
-
}
|
|
284
|
-
}
|
|
285
|
-
if (model)
|
|
286
|
-
break;
|
|
287
|
-
}
|
|
288
|
-
if (model)
|
|
289
|
-
break;
|
|
290
|
-
}
|
|
291
|
-
}
|
|
292
|
-
if (!model)
|
|
293
|
-
return null;
|
|
294
|
-
return { model, provider: resolvedProvider };
|
|
295
|
-
}
|
|
296
223
|
// ── Main ────────────────────────────────────────────────────────────────────
|
|
297
224
|
export async function runSwarmMode(rawArgs) {
|
|
298
225
|
const args = parseSwarmArgs(rawArgs);
|
|
@@ -319,10 +246,16 @@ export async function runSwarmMode(rawArgs) {
|
|
|
319
246
|
config.max_session_budget_usd = args.maxBudget;
|
|
320
247
|
if (args.autoRoute)
|
|
321
248
|
config.auto_model_selection = true;
|
|
322
|
-
// Resolve orchestrator model
|
|
323
|
-
const
|
|
249
|
+
// Resolve orchestrator model — prefer CLI arg, then config's default_model
|
|
250
|
+
const orchestratorModelId = args.orchestratorModel !== "claude-sonnet-4-6"
|
|
251
|
+
? args.orchestratorModel
|
|
252
|
+
: config.default_model || args.orchestratorModel;
|
|
253
|
+
const modelLookupId = orchestratorModelId.startsWith("ollama/") || orchestratorModelId.startsWith("openrouter/")
|
|
254
|
+
? orchestratorModelId
|
|
255
|
+
: orchestratorModelId.replace(/^(anthropic|openai|google)\//, "");
|
|
256
|
+
const resolved = resolveModel(modelLookupId, logWarn);
|
|
324
257
|
if (!resolved) {
|
|
325
|
-
logError(`Could not find model "${
|
|
258
|
+
logError(`Could not find model "${orchestratorModelId}"`, "Set ANTHROPIC_API_KEY, OPENAI_API_KEY, or GEMINI_API_KEY in your .env file, or use Ollama/OpenRouter");
|
|
326
259
|
process.exit(1);
|
|
327
260
|
}
|
|
328
261
|
// Initialize episodic memory if enabled
|
package/dist/ui/onboarding.js
CHANGED
|
@@ -10,11 +10,12 @@
|
|
|
10
10
|
*
|
|
11
11
|
* Triggered once on first `swarm --dir` invocation. Saves ~/.swarm/.initialized marker.
|
|
12
12
|
*/
|
|
13
|
-
import { spawn } from "node:child_process";
|
|
13
|
+
import { execFileSync, execSync, spawn, spawn as spawnChild } from "node:child_process";
|
|
14
14
|
import * as fs from "node:fs";
|
|
15
15
|
import * as os from "node:os";
|
|
16
16
|
import * as path from "node:path";
|
|
17
17
|
import * as readline from "node:readline";
|
|
18
|
+
import { fileURLToPath } from "node:url";
|
|
18
19
|
import { getLogLevel, isJsonMode } from "./log.js";
|
|
19
20
|
import { bold, coral, cyan, dim, green, isTTY, red, stripAnsi, symbols, termWidth, yellow } from "./theme.js";
|
|
20
21
|
// ── Constants ─────────────────────────────────────────────────────────────────
|
|
@@ -22,7 +23,19 @@ const SWARM_DIR = path.join(os.homedir(), ".swarm");
|
|
|
22
23
|
const MARKER_FILE = path.join(SWARM_DIR, ".initialized");
|
|
23
24
|
const CRED_FILE = path.join(SWARM_DIR, "credentials");
|
|
24
25
|
const USER_CONFIG_FILE = path.join(SWARM_DIR, "config.yaml");
|
|
25
|
-
|
|
26
|
+
// Read version from package.json instead of hardcoding
|
|
27
|
+
function getVersion() {
|
|
28
|
+
try {
|
|
29
|
+
const __dir = path.dirname(fileURLToPath(import.meta.url));
|
|
30
|
+
const pkgPath = path.join(__dir, "..", "..", "package.json");
|
|
31
|
+
const pkg = JSON.parse(fs.readFileSync(pkgPath, "utf-8"));
|
|
32
|
+
return pkg.version || "0.0.0";
|
|
33
|
+
}
|
|
34
|
+
catch {
|
|
35
|
+
return "0.0.0";
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
const VERSION = getVersion();
|
|
26
39
|
/** Which API key each agent backend requires (or supports). */
|
|
27
40
|
const AGENT_PROVIDERS = {
|
|
28
41
|
opencode: {
|
|
@@ -127,6 +140,147 @@ async function checkAgentBackends() {
|
|
|
127
140
|
results.push({ name: "direct-llm", ok: true, detail: "built-in" });
|
|
128
141
|
return results;
|
|
129
142
|
}
|
|
143
|
+
// ── Ollama helpers ────────────────────────────────────────────────────────────
|
|
144
|
+
function isOllamaInstalled() {
|
|
145
|
+
try {
|
|
146
|
+
execFileSync("ollama", ["--version"], { stdio: ["ignore", "pipe", "pipe"], timeout: 5000 });
|
|
147
|
+
return true;
|
|
148
|
+
}
|
|
149
|
+
catch {
|
|
150
|
+
return false;
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
function isOllamaModelAvailable(model) {
|
|
154
|
+
try {
|
|
155
|
+
const output = execFileSync("ollama", ["list"], {
|
|
156
|
+
encoding: "utf-8",
|
|
157
|
+
stdio: ["ignore", "pipe", "pipe"],
|
|
158
|
+
timeout: 10000,
|
|
159
|
+
});
|
|
160
|
+
return output.includes(model);
|
|
161
|
+
}
|
|
162
|
+
catch {
|
|
163
|
+
return false;
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
async function installOllama() {
|
|
167
|
+
process.stderr.write(`\n ${bold("Installing Ollama...")}\n\n`);
|
|
168
|
+
if (process.platform === "darwin") {
|
|
169
|
+
try {
|
|
170
|
+
execFileSync("brew", ["--version"], { stdio: "ignore", timeout: 5000 });
|
|
171
|
+
process.stderr.write(` ${dim("Using Homebrew...")}\n`);
|
|
172
|
+
try {
|
|
173
|
+
execSync("brew install ollama", { stdio: "inherit", timeout: 120000 });
|
|
174
|
+
return true;
|
|
175
|
+
}
|
|
176
|
+
catch {
|
|
177
|
+
process.stderr.write(` ${dim("Homebrew install failed, trying curl installer...")}\n`);
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
catch {
|
|
181
|
+
// No brew — fall through to curl
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
if (process.platform === "linux" || process.platform === "darwin") {
|
|
185
|
+
try {
|
|
186
|
+
execSync("curl -fsSL https://ollama.com/install.sh | sh", { stdio: "inherit", timeout: 180000 });
|
|
187
|
+
return true;
|
|
188
|
+
}
|
|
189
|
+
catch {
|
|
190
|
+
return false;
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
process.stderr.write(` ${dim("Download Ollama from: https://ollama.com/download")}\n`);
|
|
194
|
+
return false;
|
|
195
|
+
}
|
|
196
|
+
function isOllamaServing() {
|
|
197
|
+
try {
|
|
198
|
+
execFileSync("curl", ["-sf", "http://127.0.0.1:11434/api/tags"], {
|
|
199
|
+
stdio: ["ignore", "pipe", "pipe"],
|
|
200
|
+
timeout: 3000,
|
|
201
|
+
});
|
|
202
|
+
return true;
|
|
203
|
+
}
|
|
204
|
+
catch {
|
|
205
|
+
return false;
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
function startOllamaServe() {
|
|
209
|
+
const child = spawnChild("ollama", ["serve"], { stdio: "ignore", detached: true });
|
|
210
|
+
child.unref();
|
|
211
|
+
}
|
|
212
|
+
async function pullOllamaModel(model) {
|
|
213
|
+
process.stderr.write(`\n ${bold(`Pulling ${model}...`)} ${dim("(this may take a few minutes)")}\n\n`);
|
|
214
|
+
return new Promise((resolve) => {
|
|
215
|
+
const child = spawnChild("ollama", ["pull", model], { stdio: "inherit" });
|
|
216
|
+
child.on("close", (code) => resolve(code === 0));
|
|
217
|
+
child.on("error", () => resolve(false));
|
|
218
|
+
});
|
|
219
|
+
}
|
|
220
|
+
async function ensureOllamaSetup(promptFn, model) {
|
|
221
|
+
const shortModel = model.replace("ollama/", "");
|
|
222
|
+
if (!isOllamaInstalled()) {
|
|
223
|
+
process.stderr.write(`\n ${dim("Ollama is not installed. It's needed to run open-source models locally.")}\n`);
|
|
224
|
+
const prompt = promptFn();
|
|
225
|
+
const install = await prompt.ask(` ${coral(symbols.arrow)} Install Ollama now? [Y/n]: `);
|
|
226
|
+
prompt.close();
|
|
227
|
+
if (install.toLowerCase() !== "n" && install.toLowerCase() !== "no") {
|
|
228
|
+
const ok = await installOllama();
|
|
229
|
+
if (!ok) {
|
|
230
|
+
process.stderr.write(`\n ${red("Failed to install Ollama.")}\n`);
|
|
231
|
+
process.stderr.write(` ${dim("Install manually from https://ollama.com/download")}\n\n`);
|
|
232
|
+
return false;
|
|
233
|
+
}
|
|
234
|
+
process.stderr.write(` ${green(symbols.check)} Ollama installed\n`);
|
|
235
|
+
}
|
|
236
|
+
else {
|
|
237
|
+
process.stderr.write(`\n ${dim("Install later from https://ollama.com/download")}\n\n`);
|
|
238
|
+
return false;
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
else {
|
|
242
|
+
process.stderr.write(` ${green(symbols.check)} Ollama installed\n`);
|
|
243
|
+
}
|
|
244
|
+
if (!isOllamaServing()) {
|
|
245
|
+
process.stderr.write(` ${dim("Starting Ollama server...")}\n`);
|
|
246
|
+
startOllamaServe();
|
|
247
|
+
let retries = 10;
|
|
248
|
+
while (retries > 0 && !isOllamaServing()) {
|
|
249
|
+
await new Promise((r) => setTimeout(r, 1000));
|
|
250
|
+
retries--;
|
|
251
|
+
}
|
|
252
|
+
if (isOllamaServing()) {
|
|
253
|
+
process.stderr.write(` ${green(symbols.check)} Ollama server running\n`);
|
|
254
|
+
}
|
|
255
|
+
else {
|
|
256
|
+
process.stderr.write(` ${yellow(symbols.warn)} Could not start Ollama server. Run ${bold("ollama serve")} manually.\n`);
|
|
257
|
+
return false;
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
else {
|
|
261
|
+
process.stderr.write(` ${green(symbols.check)} Ollama server running\n`);
|
|
262
|
+
}
|
|
263
|
+
if (isOllamaModelAvailable(shortModel)) {
|
|
264
|
+
process.stderr.write(` ${green(symbols.check)} Model ${bold(shortModel)} ready\n\n`);
|
|
265
|
+
return true;
|
|
266
|
+
}
|
|
267
|
+
process.stderr.write(` ${dim(`Model ${shortModel} not found locally.`)}\n`);
|
|
268
|
+
const prompt = promptFn();
|
|
269
|
+
const pull = await prompt.ask(`\n ${coral(symbols.arrow)} Pull ${bold(shortModel)} now? [Y/n]: `);
|
|
270
|
+
prompt.close();
|
|
271
|
+
if (pull.toLowerCase() !== "n" && pull.toLowerCase() !== "no") {
|
|
272
|
+
const ok = await pullOllamaModel(shortModel);
|
|
273
|
+
if (!ok) {
|
|
274
|
+
process.stderr.write(`\n ${red(`Failed to pull ${shortModel}.`)}\n`);
|
|
275
|
+
process.stderr.write(` ${dim(`Try manually: ollama pull ${shortModel}`)}\n\n`);
|
|
276
|
+
return false;
|
|
277
|
+
}
|
|
278
|
+
process.stderr.write(` ${green(symbols.check)} Model ${bold(shortModel)} ready\n\n`);
|
|
279
|
+
return true;
|
|
280
|
+
}
|
|
281
|
+
process.stderr.write(`\n ${dim(`Run later: ollama pull ${shortModel}`)}\n\n`);
|
|
282
|
+
return false;
|
|
283
|
+
}
|
|
130
284
|
// ── Interactive helpers ───────────────────────────────────────────────────────
|
|
131
285
|
function createPrompt() {
|
|
132
286
|
const rl = readline.createInterface({ input: process.stdin, output: process.stderr });
|
|
@@ -396,93 +550,174 @@ export async function runOnboarding() {
|
|
|
396
550
|
process.stderr.write(`\n ${dim("Using direct-llm (no coding agent) as fallback.")}\n`);
|
|
397
551
|
chosenAgent = "direct-llm";
|
|
398
552
|
}
|
|
399
|
-
// ── Step 2: Configure API keys
|
|
553
|
+
// ── Step 2: Configure backend / API keys ────────────────────────────
|
|
400
554
|
const agentInfo = AGENT_PROVIDERS[chosenAgent];
|
|
401
555
|
const neededKeys = agentInfo?.required ?? ["ANTHROPIC_API_KEY"];
|
|
402
|
-
// For agents that accept any provider (opencode, aider), at least one key is needed
|
|
403
556
|
const needsAnyKey = neededKeys.length > 1;
|
|
404
|
-
const missingKeys = neededKeys.filter((k) => !apiKeys.has(k));
|
|
405
557
|
const hasAnyNeeded = neededKeys.some((k) => apiKeys.has(k));
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
558
|
+
let chosenModel = "anthropic/claude-sonnet-4-6";
|
|
559
|
+
let usesOllama = false;
|
|
560
|
+
let usesOpenRouter = false;
|
|
561
|
+
// OpenCode with no API keys → offer Ollama (default) or OpenRouter
|
|
562
|
+
if (chosenAgent === "opencode" && !hasAnyNeeded) {
|
|
563
|
+
sectionHeader("Backend", w);
|
|
564
|
+
process.stderr.write(` ${bold("OpenCode")} ${dim("— choose your backend:")}\n\n`);
|
|
565
|
+
process.stderr.write(` ${cyan("1")} Ollama ${dim("Run models locally (free, requires download)")}\n`);
|
|
566
|
+
process.stderr.write(` ${cyan("2")} OpenRouter ${dim("Cloud API for 200+ models (requires API key)")}\n`);
|
|
567
|
+
process.stderr.write(` ${cyan("3")} API Key ${dim("Configure Anthropic/OpenAI/Google directly")}\n`);
|
|
568
|
+
process.stderr.write("\n");
|
|
569
|
+
const prompt = createPrompt();
|
|
570
|
+
const backendChoice = await prompt.ask(` ${coral(symbols.arrow)} Backend [1]: `);
|
|
571
|
+
prompt.close();
|
|
572
|
+
const choice = backendChoice.trim();
|
|
573
|
+
if (choice === "2") {
|
|
574
|
+
// OpenRouter setup
|
|
575
|
+
process.stderr.write("\n");
|
|
576
|
+
const keyPrompt = createPrompt();
|
|
577
|
+
const orKey = await keyPrompt.ask(` ${coral(symbols.arrow)} OPENROUTER_API_KEY: `);
|
|
578
|
+
keyPrompt.close();
|
|
579
|
+
if (orKey?.trim()) {
|
|
580
|
+
process.env.OPENROUTER_API_KEY = orKey.trim();
|
|
581
|
+
saveCredential("OPENROUTER_API_KEY", orKey.trim());
|
|
582
|
+
process.stderr.write(` ${green(symbols.check)} OpenRouter configured\n`);
|
|
583
|
+
chosenModel = "openrouter/auto";
|
|
584
|
+
usesOpenRouter = true;
|
|
585
|
+
}
|
|
586
|
+
else {
|
|
587
|
+
process.stderr.write(` ${dim("No key provided — you can set OPENROUTER_API_KEY in .env later")}\n`);
|
|
588
|
+
}
|
|
589
|
+
}
|
|
590
|
+
else if (choice === "3") {
|
|
591
|
+
// Fall through to standard API key setup below
|
|
410
592
|
}
|
|
411
593
|
else {
|
|
412
|
-
|
|
594
|
+
// Default: Ollama setup
|
|
595
|
+
process.stderr.write("\n");
|
|
596
|
+
const ok = await ensureOllamaSetup(createPrompt, "ollama/deepseek-coder-v2");
|
|
597
|
+
if (ok) {
|
|
598
|
+
usesOllama = true;
|
|
599
|
+
chosenModel = "ollama/deepseek-coder-v2";
|
|
600
|
+
}
|
|
413
601
|
}
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
const
|
|
419
|
-
const
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
602
|
+
// If choice was "3", do the standard API key flow
|
|
603
|
+
if (choice === "3") {
|
|
604
|
+
sectionHeader("API Keys", w);
|
|
605
|
+
process.stderr.write(` ${bold(chosenAgent)} supports multiple providers. Configure at least one:\n\n`);
|
|
606
|
+
const missingKeys = neededKeys.filter((k) => !apiKeys.has(k));
|
|
607
|
+
for (const envVar of missingKeys) {
|
|
608
|
+
const provider = PROVIDERS.find((p) => p.envVar === envVar);
|
|
609
|
+
if (!provider)
|
|
610
|
+
continue;
|
|
611
|
+
const kp = createPrompt();
|
|
612
|
+
const yn = await kp.ask(` ${coral(symbols.arrow)} Configure ${bold(provider.name)} (${dim(envVar)})? [y/n]: `);
|
|
613
|
+
kp.close();
|
|
614
|
+
if (yn.toLowerCase() !== "y" && yn.toLowerCase() !== "yes") {
|
|
615
|
+
process.stderr.write(` ${dim(symbols.dash)} Skipped ${provider.name}\n`);
|
|
616
|
+
continue;
|
|
617
|
+
}
|
|
618
|
+
process.stderr.write(` ${dim(`Paste your ${provider.name} API key (input hidden):`)}\n`);
|
|
619
|
+
const key = await readHiddenInput(` ${coral(symbols.arrow)} `);
|
|
620
|
+
if (key && key.length >= 10) {
|
|
621
|
+
saveCredential(envVar, key);
|
|
622
|
+
apiKeys.set(envVar, key);
|
|
623
|
+
process.stderr.write(` ${green(symbols.check)} Saved ${provider.name} key to ${dim("~/.swarm/credentials")}\n\n`);
|
|
624
|
+
}
|
|
625
|
+
else {
|
|
626
|
+
process.stderr.write(` ${dim("Skipped — set later in .env or ~/.swarm/credentials")}\n\n`);
|
|
627
|
+
}
|
|
628
|
+
if (apiKeys.has(envVar))
|
|
629
|
+
break;
|
|
424
630
|
}
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
631
|
+
}
|
|
632
|
+
}
|
|
633
|
+
else if (!hasAnyNeeded) {
|
|
634
|
+
// Non-opencode agent with missing keys — standard API key flow
|
|
635
|
+
const missingKeys = neededKeys.filter((k) => !apiKeys.has(k));
|
|
636
|
+
if (missingKeys.length > 0) {
|
|
637
|
+
sectionHeader("API Keys", w);
|
|
638
|
+
if (needsAnyKey) {
|
|
639
|
+
process.stderr.write(` ${bold(chosenAgent)} supports multiple providers. Configure at least one:\n\n`);
|
|
431
640
|
}
|
|
432
641
|
else {
|
|
433
|
-
process.stderr.write(` ${
|
|
642
|
+
process.stderr.write(` ${bold(chosenAgent)} needs the following API key(s):\n\n`);
|
|
643
|
+
}
|
|
644
|
+
for (const envVar of missingKeys) {
|
|
645
|
+
const provider = PROVIDERS.find((p) => p.envVar === envVar);
|
|
646
|
+
if (!provider)
|
|
647
|
+
continue;
|
|
648
|
+
const prompt = createPrompt();
|
|
649
|
+
const yn = await prompt.ask(` ${coral(symbols.arrow)} Configure ${bold(provider.name)} (${dim(envVar)})? [y/n]: `);
|
|
650
|
+
prompt.close();
|
|
651
|
+
if (yn.toLowerCase() !== "y" && yn.toLowerCase() !== "yes") {
|
|
652
|
+
process.stderr.write(` ${dim(symbols.dash)} Skipped ${provider.name}\n`);
|
|
653
|
+
continue;
|
|
654
|
+
}
|
|
655
|
+
process.stderr.write(` ${dim(`Paste your ${provider.name} API key (input hidden):`)}\n`);
|
|
656
|
+
const key = await readHiddenInput(` ${coral(symbols.arrow)} `);
|
|
657
|
+
if (key && key.length >= 10) {
|
|
658
|
+
saveCredential(envVar, key);
|
|
659
|
+
apiKeys.set(envVar, key);
|
|
660
|
+
process.stderr.write(` ${green(symbols.check)} Saved ${provider.name} key to ${dim("~/.swarm/credentials")}\n\n`);
|
|
661
|
+
}
|
|
662
|
+
else {
|
|
663
|
+
process.stderr.write(` ${dim("Skipped — set later in .env or ~/.swarm/credentials")}\n\n`);
|
|
664
|
+
}
|
|
665
|
+
if (needsAnyKey && apiKeys.has(envVar))
|
|
666
|
+
break;
|
|
434
667
|
}
|
|
435
|
-
// For multi-provider agents, stop after first successful key
|
|
436
|
-
if (needsAnyKey && apiKeys.has(envVar))
|
|
437
|
-
break;
|
|
438
668
|
}
|
|
439
669
|
}
|
|
440
|
-
else
|
|
441
|
-
// Keys already configured — just confirm
|
|
670
|
+
else {
|
|
442
671
|
process.stderr.write(` ${green(symbols.check)} API keys already configured\n`);
|
|
443
672
|
}
|
|
444
673
|
// ── Step 3: Choose default model ─────────────────────────────────────
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
674
|
+
if (!usesOllama && !usesOpenRouter) {
|
|
675
|
+
const configuredProviders = PROVIDERS.filter((p) => apiKeys.has(p.envVar));
|
|
676
|
+
if (configuredProviders.length > 0) {
|
|
677
|
+
sectionHeader("Default Model", w);
|
|
678
|
+
const modelOptions = configuredProviders.flatMap((p) => {
|
|
679
|
+
const models = [];
|
|
680
|
+
if (p.envVar === "ANTHROPIC_API_KEY") {
|
|
681
|
+
models.push({
|
|
682
|
+
label: `claude-sonnet-4-6 ${dim("(fast, capable)")}`,
|
|
683
|
+
value: "anthropic/claude-sonnet-4-6",
|
|
684
|
+
recommended: true,
|
|
685
|
+
}, {
|
|
686
|
+
label: `claude-opus-4-6 ${dim("(most capable)")}`,
|
|
687
|
+
value: "anthropic/claude-opus-4-6",
|
|
688
|
+
});
|
|
689
|
+
}
|
|
690
|
+
else if (p.envVar === "OPENAI_API_KEY") {
|
|
691
|
+
models.push({ label: `gpt-4o ${dim("(fast, versatile)")}`, value: "openai/gpt-4o" }, { label: `o3 ${dim("(reasoning)")}`, value: "openai/o3" });
|
|
692
|
+
}
|
|
693
|
+
else if (p.envVar === "GEMINI_API_KEY") {
|
|
694
|
+
models.push({
|
|
695
|
+
label: `gemini-2.5-flash ${dim("(fast, cheap)")}`,
|
|
696
|
+
value: "google/gemini-2.5-flash",
|
|
697
|
+
}, { label: `gemini-2.5-pro ${dim("(capable)")}`, value: "google/gemini-2.5-pro" });
|
|
698
|
+
}
|
|
699
|
+
return models;
|
|
700
|
+
});
|
|
701
|
+
if (modelOptions.length > 0) {
|
|
702
|
+
process.stderr.write(` ${bold("Pick a default model for coding threads:")}\n\n`);
|
|
703
|
+
for (let i = 0; i < modelOptions.length; i++) {
|
|
704
|
+
const opt = modelOptions[i];
|
|
705
|
+
const rec = opt.recommended ? ` ${coral("(recommended)")}` : "";
|
|
706
|
+
process.stderr.write(` ${cyan(String(i + 1))} ${opt.label}${rec}\n`);
|
|
707
|
+
}
|
|
708
|
+
process.stderr.write("\n");
|
|
709
|
+
const prompt = createPrompt();
|
|
710
|
+
const modelChoice = await prompt.ask(` ${coral(symbols.arrow)} Choice [1]: `);
|
|
711
|
+
prompt.close();
|
|
712
|
+
const idx = parseInt(modelChoice, 10);
|
|
713
|
+
if (idx >= 1 && idx <= modelOptions.length) {
|
|
714
|
+
chosenModel = modelOptions[idx - 1].value;
|
|
715
|
+
}
|
|
716
|
+
else {
|
|
717
|
+
chosenModel = modelOptions[0].value;
|
|
718
|
+
}
|
|
719
|
+
process.stderr.write(` ${green(symbols.check)} Default model: ${bold(chosenModel)}\n`);
|
|
484
720
|
}
|
|
485
|
-
process.stderr.write(` ${green(symbols.check)} Default model: ${bold(chosenModel)}\n`);
|
|
486
721
|
}
|
|
487
722
|
}
|
|
488
723
|
// ── Step 4: Save config ──────────────────────────────────────────────
|
|
@@ -491,22 +726,29 @@ export async function runOnboarding() {
|
|
|
491
726
|
sectionHeader("Ready", w);
|
|
492
727
|
process.stderr.write(` ${green(symbols.check)} Agent: ${bold(chosenAgent)}\n`);
|
|
493
728
|
process.stderr.write(` ${green(symbols.check)} Model: ${bold(chosenModel)}\n`);
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
729
|
+
if (usesOllama) {
|
|
730
|
+
process.stderr.write(` ${green(symbols.check)} Backend: ${bold("Ollama")} ${dim("(local)")}\n`);
|
|
731
|
+
}
|
|
732
|
+
else if (usesOpenRouter) {
|
|
733
|
+
process.stderr.write(` ${green(symbols.check)} Backend: ${bold("OpenRouter")}\n`);
|
|
734
|
+
}
|
|
735
|
+
else {
|
|
736
|
+
const keyNames = [...apiKeys.keys()].map((k) => {
|
|
737
|
+
const p = PROVIDERS.find((pr) => pr.envVar === k);
|
|
738
|
+
return p?.name ?? k;
|
|
739
|
+
});
|
|
740
|
+
if (keyNames.length > 0) {
|
|
741
|
+
process.stderr.write(` ${green(symbols.check)} Keys: ${bold(keyNames.join(", "))}\n`);
|
|
742
|
+
}
|
|
500
743
|
}
|
|
501
744
|
process.stderr.write(` ${green(symbols.check)} Config: ${dim("~/.swarm/config.yaml")}\n`);
|
|
502
|
-
process.stderr.write(
|
|
503
|
-
process.stderr.write(` ${dim("Edit")} ${cyan("~/.swarm/config.yaml")} ${dim("to change these settings anytime.")}\n`);
|
|
745
|
+
process.stderr.write("\n");
|
|
504
746
|
// If still missing critical deps, show warnings
|
|
505
747
|
if (!gitVer) {
|
|
506
|
-
process.stderr.write(
|
|
748
|
+
process.stderr.write(` ${red(symbols.cross)} ${bold("git is required.")} Install it before using swarm.\n`);
|
|
507
749
|
}
|
|
508
|
-
if (apiKeys.size === 0) {
|
|
509
|
-
process.stderr.write(
|
|
750
|
+
if (apiKeys.size === 0 && !usesOllama && !usesOpenRouter) {
|
|
751
|
+
process.stderr.write(` ${yellow(symbols.warn)} ${bold("No API keys configured.")}\n`);
|
|
510
752
|
process.stderr.write(` ${dim("Add keys to")} ${cyan("~/.swarm/credentials")} ${dim("or")} ${cyan(".env")}${dim(":")}\n`);
|
|
511
753
|
process.stderr.write(` ${dim("ANTHROPIC_API_KEY=sk-ant-...")}\n`);
|
|
512
754
|
process.stderr.write(` ${dim("OPENAI_API_KEY=sk-...")}\n`);
|
package/package.json
CHANGED