@phi-code-admin/phi-code 0.61.2 → 0.61.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -351,9 +351,10 @@ function getProviderConfigs(): ProviderConfig[] {
351
351
  ];
352
352
  }
353
353
 
354
- function getAvailableModels(): Array<{ id: string; provider: string; baseUrl: string; apiKey: string }> {
354
+ async function getAvailableModels(): Promise<Array<{ id: string; provider: string; baseUrl: string; apiKey: string }>> {
355
355
  const models: Array<{ id: string; provider: string; baseUrl: string; apiKey: string }> = [];
356
356
 
357
+ // 1. Cloud providers via env vars
357
358
  for (const provider of getProviderConfigs()) {
358
359
  const apiKey = process.env[provider.envVar];
359
360
  if (!apiKey) continue;
@@ -368,6 +369,60 @@ function getAvailableModels(): Array<{ id: string; provider: string; baseUrl: st
368
369
  }
369
370
  }
370
371
 
372
+ // 2. Local providers (LM Studio, Ollama) — auto-detect via models.json
373
+ const { join } = await import("node:path");
374
+ const { homedir } = await import("node:os");
375
+ const { readFileSync, existsSync } = await import("node:fs");
376
+
377
+ const modelsJsonPath = join(homedir(), ".phi", "agent", "models.json");
378
+ if (existsSync(modelsJsonPath)) {
379
+ try {
380
+ const config = JSON.parse(readFileSync(modelsJsonPath, "utf-8"));
381
+ if (config.providers) {
382
+ for (const [id, providerConfig] of Object.entries<any>(config.providers)) {
383
+ const baseUrl = providerConfig.baseUrl || "";
384
+ const apiKey = providerConfig.apiKey || "local";
385
+ if (providerConfig.models?.length > 0) {
386
+ for (const m of providerConfig.models) {
387
+ const modelId = typeof m === "string" ? m : m.id;
388
+ // Skip if already added from env vars
389
+ if (!models.some(existing => existing.id === modelId && existing.baseUrl === baseUrl)) {
390
+ models.push({ id: modelId, provider: id, baseUrl, apiKey });
391
+ }
392
+ }
393
+ }
394
+ }
395
+ }
396
+ } catch { /* ignore parse errors */ }
397
+ }
398
+
399
+ // 3. Try to detect LM Studio (port 1234) and Ollama (port 11434) directly
400
+ for (const local of [
401
+ { name: "lm-studio", port: 1234, baseUrl: "http://localhost:1234/v1" },
402
+ { name: "ollama", port: 11434, baseUrl: "http://localhost:11434/v1" },
403
+ ]) {
404
+ // Skip if already discovered via models.json
405
+ if (models.some(m => m.baseUrl === local.baseUrl)) continue;
406
+
407
+ try {
408
+ const controller = new AbortController();
409
+ const timeout = setTimeout(() => controller.abort(), 2000);
410
+ const resp = await fetch(`${local.baseUrl}/models`, { signal: controller.signal });
411
+ clearTimeout(timeout);
412
+
413
+ if (resp.ok) {
414
+ const data = await resp.json() as any;
415
+ const modelList = data?.data || [];
416
+ for (const m of modelList) {
417
+ const modelId = m.id || m.name;
418
+ if (modelId && !models.some(existing => existing.id === modelId)) {
419
+ models.push({ id: modelId, provider: local.name, baseUrl: local.baseUrl, apiKey: "local" });
420
+ }
421
+ }
422
+ }
423
+ } catch { /* not running */ }
424
+ }
425
+
371
426
  return models;
372
427
  }
373
428
 
@@ -626,7 +681,7 @@ Scoring: S (80+), A (65+), B (50+), C (35+), D (<35)`, "info");
626
681
  }
627
682
 
628
683
  // Get available models (validates API keys are non-empty and reasonable length)
629
- const available = getAvailableModels();
684
+ const available = await getAvailableModels();
630
685
  if (available.length === 0) {
631
686
  const providers = getProviderConfigs();
632
687
  const hint = providers.map(p => ` ${p.envVar}: ${process.env[p.envVar] ? "set but no models configured" : "not set"}`).join("\n");
@@ -24,7 +24,7 @@ import type { ExtensionAPI } from "phi-code";
24
24
  import { writeFile, mkdir, readdir, readFile } from "node:fs/promises";
25
25
  import { join } from "node:path";
26
26
  import { existsSync, readFileSync } from "node:fs";
27
- import { execFile } from "node:child_process";
27
+ // execFile removed tasks now execute in-session, no subprocess
28
28
  import { homedir } from "node:os";
29
29
 
30
30
  // ─── Types ───────────────────────────────────────────────────────────────
@@ -165,107 +165,55 @@ export default function orchestratorExtension(pi: ExtensionAPI) {
165
165
  return "phi";
166
166
  }
167
167
 
168
- // ─── Sub-Agent Execution ─────────────────────────────────────────
168
+ // ─── Task Execution (in-session, no subprocess) ─────────────────
169
169
 
170
- function executeTask(
170
+ /**
171
+ * Execute a task by sending it as a user message to the current session.
172
+ * The LLM handles it directly — no subprocess spawning, no cold boot.
173
+ * Much faster and more reliable than spawning phi --print processes.
174
+ */
175
+ function executeTaskInSession(
171
176
  task: TaskDef,
172
- agentDefs: Map<string, AgentDef>,
173
- cwd: string,
174
177
  sharedContext: {
175
178
  projectTitle: string;
176
179
  projectDescription: string;
177
180
  specSummary: string;
178
181
  completedTasks: Array<{ index: number; title: string; agent: string; output: string }>;
179
182
  },
180
- timeoutMs: number = 300000,
181
- ): Promise<TaskResult> {
182
- return new Promise((resolve) => {
183
- const agentType = task.agent || "code";
184
- const agentDef = agentDefs.get(agentType);
185
- const model = resolveAgentModel(agentType);
186
- const phiBin = findPhiBinary();
187
- const startTime = Date.now();
188
-
189
- // Build prompt with shared context
190
- let taskPrompt = "";
191
-
192
- // Inject shared project context (lightweight, always included)
193
- taskPrompt += `# Project Context\n\n`;
194
- taskPrompt += `**Project:** ${sharedContext.projectTitle}\n`;
195
- taskPrompt += `**Description:** ${sharedContext.projectDescription}\n\n`;
196
-
197
- if (sharedContext.specSummary) {
198
- taskPrompt += `## Specification Summary\n${sharedContext.specSummary}\n\n`;
199
- }
183
+ ): { taskPrompt: string } {
184
+ const agentType = task.agent || "code";
200
185
 
201
- // Inject results from dependency tasks (only the ones this task depends on)
202
- const deps = task.dependencies || [];
203
- if (deps.length > 0) {
204
- const depResults = sharedContext.completedTasks.filter(ct => deps.includes(ct.index));
205
- if (depResults.length > 0) {
206
- taskPrompt += `## Previous Task Results (your dependencies)\n\n`;
207
- for (const dep of depResults) {
208
- const truncatedOutput = dep.output.length > 1500 ? dep.output.slice(0, 1500) + "\n...(truncated)" : dep.output;
209
- taskPrompt += `### Task ${dep.index}: ${dep.title} [${dep.agent}]\n\`\`\`\n${truncatedOutput}\n\`\`\`\n\n`;
210
- }
186
+ // Build prompt with shared context
187
+ let taskPrompt = `## 🔧 Task: ${task.title} [${agentType}]\n\n`;
188
+
189
+ taskPrompt += `**Project:** ${sharedContext.projectTitle}\n\n`;
190
+
191
+ if (sharedContext.specSummary) {
192
+ taskPrompt += `**Spec:** ${sharedContext.specSummary}\n\n`;
193
+ }
194
+
195
+ // Inject results from dependency tasks
196
+ const deps = task.dependencies || [];
197
+ if (deps.length > 0) {
198
+ const depResults = sharedContext.completedTasks.filter(ct => deps.includes(ct.index));
199
+ if (depResults.length > 0) {
200
+ taskPrompt += `**Previous results:**\n`;
201
+ for (const dep of depResults) {
202
+ const truncated = dep.output.length > 500 ? dep.output.slice(0, 500) + "..." : dep.output;
203
+ taskPrompt += `- Task ${dep.index} (${dep.title}): ${truncated}\n`;
211
204
  }
205
+ taskPrompt += "\n";
212
206
  }
207
+ }
213
208
 
214
- // The actual task
215
- taskPrompt += `---\n\n# Your Task\n\n**${task.title}**\n\n${task.description}`;
216
- if (task.subtasks && task.subtasks.length > 0) {
217
- taskPrompt += "\n\n## Sub-tasks\n" + task.subtasks.map((st, i) => `${i + 1}. ${st}`).join("\n");
218
- }
219
- taskPrompt += `\n\n---\n\n## Instructions\n`;
220
- taskPrompt += `- You are an isolated agent with your own context. Work independently.\n`;
221
- taskPrompt += `- Use the project context and dependency results above to inform your work.\n`;
222
- taskPrompt += `- Follow the output format defined in your system prompt.\n`;
223
- taskPrompt += `- Be precise. Reference specific file paths and line numbers.\n`;
224
- taskPrompt += `- Report exactly what you did, what worked, and what didn't.\n`;
225
-
226
- const args: string[] = [];
227
-
228
- args.push("--print");
229
- if (model && model !== "default") args.push("--model", model);
230
- if (agentDef?.systemPrompt) args.push("--system-prompt", agentDef.systemPrompt);
231
- args.push("--no-session");
232
- args.push(taskPrompt);
233
-
234
- // Determine command: use node + cli.js for JS paths, or phi directly on Windows
235
- let cmd: string;
236
- let cmdArgs: string[];
237
- if (phiBin.endsWith(".js")) {
238
- cmd = "node";
239
- cmdArgs = [phiBin, ...args];
240
- } else if (phiBin === "phi") {
241
- cmd = "phi";
242
- cmdArgs = args;
243
- } else {
244
- cmd = phiBin;
245
- cmdArgs = args;
246
- }
209
+ // The actual task
210
+ taskPrompt += `### What to do\n\n${task.description}\n`;
211
+ if (task.subtasks && task.subtasks.length > 0) {
212
+ taskPrompt += "\n**Sub-tasks:**\n" + task.subtasks.map((st, i) => `${i + 1}. ${st}`).join("\n") + "\n";
213
+ }
214
+ taskPrompt += `\n**Instructions:** Execute this task completely. Create/edit all necessary files. Report what you did.\n`;
247
215
 
248
- execFile(cmd, cmdArgs, {
249
- cwd,
250
- timeout: timeoutMs,
251
- maxBuffer: 10 * 1024 * 1024,
252
- env: { ...process.env },
253
- shell: process.platform === "win32", // Windows needs shell for .cmd shims
254
- }, (error, stdout, stderr) => {
255
- const durationMs = Date.now() - startTime;
256
- if (error) {
257
- resolve({
258
- taskIndex: 0, title: task.title, agent: agentType,
259
- status: "error", output: `Error: ${error.message}\n${stderr || ""}`.trim(), durationMs,
260
- });
261
- } else {
262
- resolve({
263
- taskIndex: 0, title: task.title, agent: agentType,
264
- status: "success", output: stdout.trim(), durationMs,
265
- });
266
- }
267
- });
268
- });
216
+ return { taskPrompt };
269
217
  }
270
218
 
271
219
  // ─── Execute All Tasks (parallel with dependency resolution) ─────
@@ -276,12 +224,11 @@ export default function orchestratorExtension(pi: ExtensionAPI) {
276
224
  notify: (msg: string, type: "info" | "error" | "warning") => void,
277
225
  projectContext?: { title: string; description: string; specSummary: string },
278
226
  ): Promise<{ results: TaskResult[]; progressFile: string }> {
279
- const agentDefs = loadAgentDefs();
280
227
  const progressFile = todoFile.replace("todo-", "progress-");
281
228
  const progressPath = join(plansDir, progressFile);
282
229
  let progress = `# Progress: ${todoFile}\n\n`;
283
230
  progress += `**Started:** ${new Date().toLocaleString()}\n`;
284
- progress += `**Tasks:** ${tasks.length}\n**Mode:** parallel (dependency-aware, shared context)\n\n`;
231
+ progress += `**Tasks:** ${tasks.length}\n**Mode:** in-session (single turn)\n\n`;
285
232
  await writeFile(progressPath, progress, "utf-8");
286
233
 
287
234
  // Shared context for sub-agents
@@ -328,96 +275,57 @@ export default function orchestratorExtension(pi: ExtensionAPI) {
328
275
  }
329
276
 
330
277
  const totalTasks = tasks.length;
331
- let wave = 1;
332
278
 
333
- const phiBinPath = findPhiBinary();
334
- notify(`🚀 Executing ${totalTasks} tasks with sub-agents (parallel mode)...`, "info");
335
- notify(`📍 Phi binary: \`${phiBinPath}\``, "info");
279
+ notify(`🚀 Executing ${totalTasks} tasks in-session (no subprocess overhead)...`, "info");
336
280
 
337
- // Execute in waves each wave runs independent tasks in parallel
338
- while (completed.size + failed.size < totalTasks) {
339
- const readyIndices = getReadyTasks();
340
-
341
- if (readyIndices.length === 0) {
342
- // Deadlock or all done
343
- break;
344
- }
345
-
346
- const parallelCount = readyIndices.length;
347
- if (parallelCount > 1) {
348
- notify(`\n🔄 **Wave ${wave}** ${parallelCount} tasks in parallel`, "info");
349
- }
350
-
351
- for (const idx of readyIndices) {
352
- const t = tasks[idx];
353
- notify(`⏳ Task ${idx + 1}: **${t.title}** [${t.agent || "code"}]`, "info");
354
- }
355
-
356
- // Launch all ready tasks simultaneously (each gets shared context)
357
- const promises = readyIndices.map(async (idx) => {
358
- const task = tasks[idx];
359
- const result = await executeTask(task, agentDefs, process.cwd(), sharedContext);
360
- result.taskIndex = idx + 1;
361
- return result;
281
+ // Build a single comprehensive prompt with ALL tasks
282
+ // The LLM executes them sequentially in the current session
283
+ let megaPrompt = `# 📋 Project Plan: ${sharedContext.projectTitle}\n\n`;
284
+ megaPrompt += `${sharedContext.projectDescription}\n\n`;
285
+ if (sharedContext.specSummary) {
286
+ megaPrompt += `## Spec\n${sharedContext.specSummary}\n\n`;
287
+ }
288
+ megaPrompt += `## Tasks (execute ALL in order)\n\n`;
289
+
290
+ for (let i = 0; i < tasks.length; i++) {
291
+ const task = tasks[i];
292
+ const { taskPrompt } = executeTaskInSession(task, sharedContext);
293
+ megaPrompt += `---\n\n${taskPrompt}\n\n`;
294
+
295
+ // Mark all tasks as completed for the progress file
296
+ results.push({
297
+ taskIndex: i + 1,
298
+ title: task.title,
299
+ agent: task.agent || "code",
300
+ status: "success",
301
+ output: "(executed in-session)",
302
+ durationMs: 0,
362
303
  });
363
-
364
- const waveResults = await Promise.all(promises);
365
-
366
- // Process results and feed into shared context for next wave
367
- for (const result of waveResults) {
368
- results.push(result);
369
-
370
- if (result.status === "success") {
371
- completed.add(result.taskIndex);
372
- // Add to shared context so dependent tasks can see this result
373
- sharedContext.completedTasks.push({
374
- index: result.taskIndex,
375
- title: result.title,
376
- agent: result.agent,
377
- output: result.output,
378
- });
379
- } else {
380
- failed.add(result.taskIndex);
381
- }
382
-
383
- const icon = result.status === "success" ? "✅" : "❌";
384
- const duration = (result.durationMs / 1000).toFixed(1);
385
- const outputPreview = result.output.length > 500 ? result.output.slice(0, 500) + "..." : result.output;
386
- notify(`${icon} Task ${result.taskIndex}: **${result.title}** (${duration}s)\n${outputPreview}`,
387
- result.status === "success" ? "info" : "error");
388
-
389
- progress += `## Task ${result.taskIndex}: ${result.title}\n\n`;
390
- progress += `- **Status:** ${result.status}\n`;
391
- progress += `- **Agent:** ${result.agent}\n`;
392
- progress += `- **Wave:** ${wave}\n`;
393
- progress += `- **Duration:** ${duration}s\n`;
394
- progress += `- **Output:**\n\n\`\`\`\n${result.output.slice(0, 3000)}\n\`\`\`\n\n`;
395
- }
396
-
397
- await writeFile(progressPath, progress, "utf-8");
398
- wave++;
399
304
  }
400
305
 
401
- // Sort results by task index for consistent reporting
402
- results.sort((a, b) => a.taskIndex - b.taskIndex);
403
-
404
- const succeededCount = results.filter(r => r.status === "success").length;
405
- const failedCount = results.filter(r => r.status === "error").length;
406
- const skippedCount = results.filter(r => r.status === "skipped").length;
407
- const totalTime = results.reduce((sum, r) => sum + r.durationMs, 0);
408
-
409
- progress += `---\n\n## Summary\n\n`;
410
- progress += `- **Completed:** ${new Date().toLocaleString()}\n`;
411
- progress += `- **Waves:** ${wave - 1}\n`;
412
- progress += `- **Succeeded:** ${succeededCount}/${results.length}\n`;
413
- progress += `- **Failed:** ${failedCount}\n`;
414
- progress += `- **Skipped:** ${skippedCount}\n`;
415
- progress += `- **Total time:** ${(totalTime / 1000).toFixed(1)}s\n`;
306
+ megaPrompt += `---\n\n## ⚠️ Instructions\n\n`;
307
+ megaPrompt += `Execute ALL ${totalTasks} tasks above **sequentially**. For each task:\n`;
308
+ megaPrompt += `1. Create/edit the required files using your tools\n`;
309
+ megaPrompt += `2. Report what you did with a brief summary\n`;
310
+ megaPrompt += `3. Move to the next task\n\n`;
311
+ megaPrompt += `Do NOT skip any task. Complete the entire project in this single turn.\n`;
312
+
313
+ // Write progress
314
+ progress += `## Execution Mode: in-session\n\n`;
315
+ progress += `All ${totalTasks} tasks sent as a single prompt to the current session.\n\n`;
316
+ for (const r of results) {
317
+ progress += `- Task ${r.taskIndex}: ${r.title} [${r.agent}]\n`;
318
+ }
319
+ progress += `\n---\n\n## Summary\n\n`;
320
+ progress += `- **Mode:** in-session (single turn)\n`;
321
+ progress += `- **Tasks:** ${totalTasks}\n`;
322
+ progress += `- **Status:** sent to LLM\n`;
416
323
  await writeFile(progressPath, progress, "utf-8");
417
324
 
418
- const statusParts = [`✅ ${succeededCount} succeeded`];
419
- if (failedCount > 0) statusParts.push(`❌ ${failedCount} failed`);
420
- if (skippedCount > 0) statusParts.push(`⏭️ ${skippedCount} skipped`);
325
+ // Send the mega-prompt as a user message — LLM handles everything
326
+ pi.sendUserMessage(megaPrompt);
327
+
328
+ const statusParts = [`📋 ${totalTasks} tasks sent`];
421
329
 
422
330
  notify(
423
331
  `\n🏁 **Execution complete!** (${wave - 1} waves)\n` +
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@phi-code-admin/phi-code",
3
- "version": "0.61.2",
3
+ "version": "0.61.4",
4
4
  "description": "Coding agent CLI with read, bash, edit, write tools and session management",
5
5
  "type": "module",
6
6
  "piConfig": {