swarm-code 0.1.7 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,7 +21,7 @@ import * as fs from "node:fs";
21
21
  import * as path from "node:path";
22
22
  import * as readline from "node:readline";
23
23
  // Dynamic imports — ensures env.js has set process.env BEFORE pi-ai loads
24
- const { getModels, getProviders } = await import("@mariozechner/pi-ai");
24
+ await import("@mariozechner/pi-ai");
25
25
  const { PythonRepl } = await import("./core/repl.js");
26
26
  const { runRlmLoop } = await import("./core/rlm.js");
27
27
  const { loadConfig } = await import("./config.js");
@@ -34,6 +34,7 @@ await import("./agents/aider.js");
34
34
  import { randomBytes } from "node:crypto";
35
35
  import { EpisodicMemory } from "./memory/episodic.js";
36
36
  import { buildSwarmSystemPrompt } from "./prompts/orchestrator.js";
37
+ import { resolveModel } from "./routing/model-resolver.js";
37
38
  import { classifyTaskComplexity, describeAvailableAgents, FailureTracker, routeTask } from "./routing/model-router.js";
38
39
  import { ThreadManager } from "./threads/manager.js";
39
40
  import { ThreadDashboard } from "./ui/dashboard.js";
@@ -183,81 +184,6 @@ function scanDirectory(dir, maxFiles = 200, maxTotalSize = 2 * 1024 * 1024) {
183
184
  }
184
185
  return parts.join("\n");
185
186
  }
186
- // ── Model resolution (mirrored from swarm.ts) ───────────────────────────────
187
- function resolveModel(modelId) {
188
- const providerKeys = {
189
- anthropic: "ANTHROPIC_API_KEY",
190
- openai: "OPENAI_API_KEY",
191
- google: "GEMINI_API_KEY",
192
- };
193
- const defaultModels = {
194
- anthropic: "claude-sonnet-4-6",
195
- openai: "gpt-4o",
196
- google: "gemini-2.5-flash",
197
- };
198
- const knownProviders = new Set(Object.keys(providerKeys));
199
- let model;
200
- let resolvedProvider = "";
201
- for (const provider of getProviders()) {
202
- if (!knownProviders.has(provider))
203
- continue;
204
- const key = providerKeys[provider];
205
- if (!process.env[key])
206
- continue;
207
- for (const m of getModels(provider)) {
208
- if (m.id === modelId) {
209
- model = m;
210
- resolvedProvider = provider;
211
- break;
212
- }
213
- }
214
- if (model)
215
- break;
216
- }
217
- if (!model) {
218
- for (const provider of getProviders()) {
219
- if (knownProviders.has(provider))
220
- continue;
221
- for (const m of getModels(provider)) {
222
- if (m.id === modelId) {
223
- model = m;
224
- resolvedProvider = provider;
225
- break;
226
- }
227
- }
228
- if (model)
229
- break;
230
- }
231
- }
232
- if (!model) {
233
- for (const [prov, envKey] of Object.entries(providerKeys)) {
234
- if (!process.env[envKey])
235
- continue;
236
- const fallbackId = defaultModels[prov];
237
- if (!fallbackId)
238
- continue;
239
- for (const p of getProviders()) {
240
- if (p !== prov)
241
- continue;
242
- for (const m of getModels(p)) {
243
- if (m.id === fallbackId) {
244
- model = m;
245
- resolvedProvider = prov;
246
- logWarn(`Using ${fallbackId} (${prov}) — model "${modelId}" not found`);
247
- break;
248
- }
249
- }
250
- if (model)
251
- break;
252
- }
253
- if (model)
254
- break;
255
- }
256
- }
257
- if (!model)
258
- return null;
259
- return { model, provider: resolvedProvider };
260
- }
261
187
  // ── Formatting helpers ──────────────────────────────────────────────────────
262
188
  function formatDuration(ms) {
263
189
  if (ms < 1000)
@@ -663,10 +589,18 @@ export async function runInteractiveSwarm(rawArgs) {
663
589
  config.max_session_budget_usd = args.maxBudget;
664
590
  if (args.autoRoute)
665
591
  config.auto_model_selection = true;
666
- // Resolve orchestrator model
667
- const resolved = resolveModel(args.orchestratorModel);
592
+ // Resolve orchestrator model — prefer CLI arg, then config's default_model
593
+ const orchestratorModelId = args.orchestratorModel !== "claude-sonnet-4-6"
594
+ ? args.orchestratorModel
595
+ : config.default_model || args.orchestratorModel;
596
+ // For standard pi-ai models, strip provider prefix (e.g. "anthropic/claude-sonnet-4-6" → "claude-sonnet-4-6")
597
+ // Ollama/OpenRouter prefixes are kept as-is (handled by resolveModel)
598
+ const modelLookupId = orchestratorModelId.startsWith("ollama/") || orchestratorModelId.startsWith("openrouter/")
599
+ ? orchestratorModelId
600
+ : orchestratorModelId.replace(/^(anthropic|openai|google)\//, "");
601
+ const resolved = resolveModel(modelLookupId, logWarn);
668
602
  if (!resolved) {
669
- logError(`Could not find model "${args.orchestratorModel}"`, "Set ANTHROPIC_API_KEY, OPENAI_API_KEY, or GEMINI_API_KEY in your .env file");
603
+ logError(`Could not find model "${orchestratorModelId}"`, "Set ANTHROPIC_API_KEY, OPENAI_API_KEY, or GEMINI_API_KEY in your .env file, or use Ollama/OpenRouter");
670
604
  process.exit(1);
671
605
  }
672
606
  // Initialize episodic memory and failure tracker
@@ -0,0 +1,25 @@
1
+ /**
2
+ * Model resolver — resolves model IDs to pi-ai Model objects.
3
+ *
4
+ * Handles three cases:
5
+ * 1. Standard pi-ai models (anthropic, openai, google) — looked up from pi-ai registry
6
+ * 2. Ollama models (ollama/*) — creates synthetic Model<"openai-completions"> pointing at localhost:11434
7
+ * 3. OpenRouter models (openrouter/*) — creates synthetic Model<"openai-completions"> pointing at openrouter.ai
8
+ *
9
+ * This preserves the RLM loop for all backends — the orchestrator always uses pi-ai's completeSimple().
10
+ */
11
+ import type { Api, Model } from "@mariozechner/pi-ai";
12
+ export interface ResolvedModel {
13
+ model: Model<Api>;
14
+ provider: string;
15
+ }
16
+ /**
17
+ * Resolve a model ID to a pi-ai Model object.
18
+ *
19
+ * Supports:
20
+ * - "ollama/deepseek-coder-v2" → Ollama local model
21
+ * - "openrouter/auto" → OpenRouter cloud model
22
+ * - "claude-sonnet-4-6" → standard pi-ai model lookup
23
+ * - Falls back to any available provider's default model
24
+ */
25
+ export declare function resolveModel(modelId: string, warnFn?: (msg: string) => void): ResolvedModel | null;
@@ -0,0 +1,177 @@
1
+ /**
2
+ * Model resolver — resolves model IDs to pi-ai Model objects.
3
+ *
4
+ * Handles three cases:
5
+ * 1. Standard pi-ai models (anthropic, openai, google) — looked up from pi-ai registry
6
+ * 2. Ollama models (ollama/*) — creates synthetic Model<"openai-completions"> pointing at localhost:11434
7
+ * 3. OpenRouter models (openrouter/*) — creates synthetic Model<"openai-completions"> pointing at openrouter.ai
8
+ *
9
+ * This preserves the RLM loop for all backends — the orchestrator always uses pi-ai's completeSimple().
10
+ */
11
+ const { getModels, getProviders } = await import("@mariozechner/pi-ai");
12
+ const PROVIDER_KEYS = {
13
+ anthropic: "ANTHROPIC_API_KEY",
14
+ openai: "OPENAI_API_KEY",
15
+ google: "GEMINI_API_KEY",
16
+ };
17
+ const DEFAULT_MODELS = {
18
+ anthropic: "claude-sonnet-4-6",
19
+ openai: "gpt-4o",
20
+ google: "gemini-2.5-flash",
21
+ };
22
+ /**
23
+ * Create a synthetic pi-ai Model for Ollama (OpenAI-compatible API at localhost:11434).
24
+ */
25
+ function createOllamaModel(modelId) {
26
+ const shortId = modelId.replace("ollama/", "");
27
+ return {
28
+ id: shortId,
29
+ name: shortId,
30
+ api: "openai-completions",
31
+ provider: "ollama",
32
+ baseUrl: "http://localhost:11434/v1",
33
+ reasoning: false,
34
+ input: ["text"],
35
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
36
+ contextWindow: 32768,
37
+ maxTokens: 4096,
38
+ compat: {
39
+ supportsStore: false,
40
+ supportsDeveloperRole: false,
41
+ supportsReasoningEffort: false,
42
+ supportsUsageInStreaming: false,
43
+ maxTokensField: "max_tokens",
44
+ requiresToolResultName: false,
45
+ requiresAssistantAfterToolResult: false,
46
+ requiresThinkingAsText: false,
47
+ requiresMistralToolIds: false,
48
+ thinkingFormat: "openai",
49
+ supportsStrictMode: false,
50
+ },
51
+ };
52
+ }
53
+ /**
54
+ * Create a synthetic pi-ai Model for OpenRouter (OpenAI-compatible API).
55
+ */
56
+ function createOpenRouterModel(modelId) {
57
+ const shortId = modelId.replace("openrouter/", "");
58
+ const apiKey = process.env.OPENROUTER_API_KEY || "";
59
+ return {
60
+ id: shortId,
61
+ name: shortId,
62
+ api: "openai-completions",
63
+ provider: "openrouter",
64
+ baseUrl: "https://openrouter.ai/api/v1",
65
+ reasoning: false,
66
+ input: ["text"],
67
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
68
+ contextWindow: 128000,
69
+ maxTokens: 4096,
70
+ headers: {
71
+ Authorization: `Bearer ${apiKey}`,
72
+ "HTTP-Referer": "https://github.com/kingjulio8238/swarm-code",
73
+ "X-Title": "swarm-code",
74
+ },
75
+ compat: {
76
+ supportsStore: false,
77
+ supportsDeveloperRole: false,
78
+ supportsReasoningEffort: false,
79
+ supportsUsageInStreaming: true,
80
+ maxTokensField: "max_tokens",
81
+ requiresToolResultName: false,
82
+ requiresAssistantAfterToolResult: false,
83
+ requiresThinkingAsText: false,
84
+ requiresMistralToolIds: false,
85
+ thinkingFormat: "openai",
86
+ supportsStrictMode: false,
87
+ openRouterRouting: undefined,
88
+ },
89
+ };
90
+ }
91
+ /**
92
+ * Resolve a model ID to a pi-ai Model object.
93
+ *
94
+ * Supports:
95
+ * - "ollama/deepseek-coder-v2" → Ollama local model
96
+ * - "openrouter/auto" → OpenRouter cloud model
97
+ * - "claude-sonnet-4-6" → standard pi-ai model lookup
98
+ * - Falls back to any available provider's default model
99
+ */
100
+ export function resolveModel(modelId, warnFn) {
101
+ // Ollama models — create synthetic model
102
+ if (modelId.startsWith("ollama/")) {
103
+ return { model: createOllamaModel(modelId), provider: "ollama" };
104
+ }
105
+ // OpenRouter models — create synthetic model
106
+ if (modelId.startsWith("openrouter/")) {
107
+ return { model: createOpenRouterModel(modelId), provider: "openrouter" };
108
+ }
109
+ // Standard pi-ai model lookup
110
+ const knownProviders = new Set(Object.keys(PROVIDER_KEYS));
111
+ let model;
112
+ let resolvedProvider = "";
113
+ // Try known providers with API keys first
114
+ for (const provider of getProviders()) {
115
+ if (!knownProviders.has(provider))
116
+ continue;
117
+ const key = PROVIDER_KEYS[provider];
118
+ if (!process.env[key])
119
+ continue;
120
+ for (const m of getModels(provider)) {
121
+ if (m.id === modelId) {
122
+ model = m;
123
+ resolvedProvider = provider;
124
+ break;
125
+ }
126
+ }
127
+ if (model)
128
+ break;
129
+ }
130
+ // Try unknown providers
131
+ if (!model) {
132
+ for (const provider of getProviders()) {
133
+ if (knownProviders.has(provider))
134
+ continue;
135
+ for (const m of getModels(provider)) {
136
+ if (m.id === modelId) {
137
+ model = m;
138
+ resolvedProvider = provider;
139
+ break;
140
+ }
141
+ }
142
+ if (model)
143
+ break;
144
+ }
145
+ }
146
+ // Fallback: try default model for any provider that has a key
147
+ if (!model) {
148
+ for (const [prov, envKey] of Object.entries(PROVIDER_KEYS)) {
149
+ if (!process.env[envKey])
150
+ continue;
151
+ const fallbackId = DEFAULT_MODELS[prov];
152
+ if (!fallbackId)
153
+ continue;
154
+ for (const p of getProviders()) {
155
+ if (p !== prov)
156
+ continue;
157
+ for (const m of getModels(p)) {
158
+ if (m.id === fallbackId) {
159
+ model = m;
160
+ resolvedProvider = prov;
161
+ if (warnFn)
162
+ warnFn(`Using ${fallbackId} (${prov}) — model "${modelId}" not found`);
163
+ break;
164
+ }
165
+ }
166
+ if (model)
167
+ break;
168
+ }
169
+ if (model)
170
+ break;
171
+ }
172
+ }
173
+ if (!model)
174
+ return null;
175
+ return { model, provider: resolvedProvider };
176
+ }
177
+ //# sourceMappingURL=model-resolver.js.map
package/dist/swarm.js CHANGED
@@ -14,7 +14,7 @@ import "./env.js";
14
14
  import * as fs from "node:fs";
15
15
  import * as path from "node:path";
16
16
  // Dynamic imports — ensures env.js has set process.env BEFORE pi-ai loads
17
- const { getModels, getProviders } = await import("@mariozechner/pi-ai");
17
+ await import("@mariozechner/pi-ai");
18
18
  const { PythonRepl } = await import("./core/repl.js");
19
19
  const { runRlmLoop } = await import("./core/rlm.js");
20
20
  const { loadConfig } = await import("./config.js");
@@ -25,9 +25,11 @@ await import("./agents/claude-code.js");
25
25
  await import("./agents/codex.js");
26
26
  await import("./agents/aider.js");
27
27
  import { randomBytes } from "node:crypto";
28
+ // Api/Model types used via resolveModel from model-resolver
28
29
  import { loadHooks, runHooks } from "./hooks/runner.js";
29
30
  import { EpisodicMemory } from "./memory/episodic.js";
30
31
  import { buildSwarmSystemPrompt } from "./prompts/orchestrator.js";
32
+ import { resolveModel } from "./routing/model-resolver.js";
31
33
  import { classifyTaskComplexity, describeAvailableAgents, FailureTracker, routeTask } from "./routing/model-router.js";
32
34
  import { ThreadManager } from "./threads/manager.js";
33
35
  import { renderBanner } from "./ui/banner.js";
@@ -218,81 +220,6 @@ function scanDirectory(dir, maxFiles = 200, maxTotalSize = 2 * 1024 * 1024) {
218
220
  }
219
221
  return parts.join("\n");
220
222
  }
221
- // ── Model resolution ────────────────────────────────────────────────────────
222
- function resolveModel(modelId) {
223
- const providerKeys = {
224
- anthropic: "ANTHROPIC_API_KEY",
225
- openai: "OPENAI_API_KEY",
226
- google: "GEMINI_API_KEY",
227
- };
228
- const defaultModels = {
229
- anthropic: "claude-sonnet-4-6",
230
- openai: "gpt-4o",
231
- google: "gemini-2.5-flash",
232
- };
233
- const knownProviders = new Set(Object.keys(providerKeys));
234
- let model;
235
- let resolvedProvider = "";
236
- for (const provider of getProviders()) {
237
- if (!knownProviders.has(provider))
238
- continue;
239
- const key = providerKeys[provider];
240
- if (!process.env[key])
241
- continue;
242
- for (const m of getModels(provider)) {
243
- if (m.id === modelId) {
244
- model = m;
245
- resolvedProvider = provider;
246
- break;
247
- }
248
- }
249
- if (model)
250
- break;
251
- }
252
- if (!model) {
253
- for (const provider of getProviders()) {
254
- if (knownProviders.has(provider))
255
- continue;
256
- for (const m of getModels(provider)) {
257
- if (m.id === modelId) {
258
- model = m;
259
- resolvedProvider = provider;
260
- break;
261
- }
262
- }
263
- if (model)
264
- break;
265
- }
266
- }
267
- if (!model) {
268
- for (const [prov, envKey] of Object.entries(providerKeys)) {
269
- if (!process.env[envKey])
270
- continue;
271
- const fallbackId = defaultModels[prov];
272
- if (!fallbackId)
273
- continue;
274
- for (const p of getProviders()) {
275
- if (p !== prov)
276
- continue;
277
- for (const m of getModels(p)) {
278
- if (m.id === fallbackId) {
279
- model = m;
280
- resolvedProvider = prov;
281
- logWarn(`Using ${fallbackId} (${prov}) — model "${modelId}" not found`);
282
- break;
283
- }
284
- }
285
- if (model)
286
- break;
287
- }
288
- if (model)
289
- break;
290
- }
291
- }
292
- if (!model)
293
- return null;
294
- return { model, provider: resolvedProvider };
295
- }
296
223
  // ── Main ────────────────────────────────────────────────────────────────────
297
224
  export async function runSwarmMode(rawArgs) {
298
225
  const args = parseSwarmArgs(rawArgs);
@@ -319,10 +246,16 @@ export async function runSwarmMode(rawArgs) {
319
246
  config.max_session_budget_usd = args.maxBudget;
320
247
  if (args.autoRoute)
321
248
  config.auto_model_selection = true;
322
- // Resolve orchestrator model
323
- const resolved = resolveModel(args.orchestratorModel);
249
+ // Resolve orchestrator model — prefer CLI arg, then config's default_model
250
+ const orchestratorModelId = args.orchestratorModel !== "claude-sonnet-4-6"
251
+ ? args.orchestratorModel
252
+ : config.default_model || args.orchestratorModel;
253
+ const modelLookupId = orchestratorModelId.startsWith("ollama/") || orchestratorModelId.startsWith("openrouter/")
254
+ ? orchestratorModelId
255
+ : orchestratorModelId.replace(/^(anthropic|openai|google)\//, "");
256
+ const resolved = resolveModel(modelLookupId, logWarn);
324
257
  if (!resolved) {
325
- logError(`Could not find model "${args.orchestratorModel}"`, "Set ANTHROPIC_API_KEY, OPENAI_API_KEY, or GEMINI_API_KEY in your .env file");
258
+ logError(`Could not find model "${orchestratorModelId}"`, "Set ANTHROPIC_API_KEY, OPENAI_API_KEY, or GEMINI_API_KEY in your .env file, or use Ollama/OpenRouter");
326
259
  process.exit(1);
327
260
  }
328
261
  // Initialize episodic memory if enabled
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "swarm-code",
3
- "version": "0.1.7",
3
+ "version": "0.1.8",
4
4
  "description": "Open-source swarm-native coding agent orchestrator — spawns parallel coding agents in isolated git worktrees, built on RLM (arXiv:2512.24601)",
5
5
  "type": "module",
6
6
  "bin": {