pi-lens 3.6.7 → 3.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,62 @@
2
2
 
3
3
  All notable changes to pi-lens will be documented in this file.
4
4
 
5
+ ## [3.7.0] - 2026-04-05
6
+
7
+ ### Added
8
+ - **Test runner in pipeline** — After every file write/edit, pi-lens now automatically detects and
9
+ runs the corresponding test file (vitest, jest, pytest). Results surface inline so the agent sees
10
+ failures immediately without a separate test step. Supports TypeScript/JS/Python; file-level
11
+ targeted — only the test for the edited file runs, not the full suite.
12
+
13
+ - **Parallel dispatch groups** — Lint runners now execute in parallel across independent groups
14
+ (e.g. `lsp`, `tree-sitter`, `ast-grep-napi`, `type-safety`, `similarity` all fire at once).
15
+ Typical wall-clock savings: 500–1500ms per file write (`parallelGainMs` logged in latency log).
16
+
17
+ ### Fixed
18
+ - **`semantic: "none"` when 0 diagnostics** — LSP, Pyright, and type-safety runners were returning
19
+ `semantic: "warning"` even when `diagnosticCount` was 0 (clean file). Now correctly returns
20
+ `"none"` when no diagnostics are present, `"warning"` when warnings exist, `"blocking"` on errors.
21
+
22
+ - **`ast_grep_replace` with `apply=true` not writing files** — Replaced tool was silently
23
+ discarding the rewritten content instead of persisting it to disk.
24
+
25
+ - **Pipeline event loop blocked during test execution** — `spawnSync` in the test runner was
26
+ blocking the Node.js event loop for the duration of the test run. Switched to async spawn.
27
+
28
+ - **Formatters: venv/vendor/node_modules awareness** — Formatters now skip files inside virtual
29
+ environments, vendor directories, and `node_modules` instead of attempting to format them.
30
+ CSharpier detection also improved.
31
+
32
+ - **Formatter nearest-wins resolution** — When multiple formatter configs exist at different
33
+ directory levels, the one closest to the edited file is now used (was previously using the
34
+ root-level config regardless of nesting).
35
+
36
+ - **Prettier auto-install** — Prettier is now auto-installed when detected as the project
37
+ formatter but not present, consistent with the Biome/Ruff auto-install behaviour.
38
+
39
+ - **6 missing formatters added** — `clang-format` (C/C++/ObjC), `ktlint` (Kotlin), `scalafmt`
40
+ (Scala), `mix format` (Elixir), `dart format` (Dart), `terraform fmt` (HCL) now detected
41
+ and invoked automatically.
42
+
43
+ - **LSP tier-4 install prompts** — Corrected missing interactive-install prompts for tier-4
44
+ language servers (less common languages). Users now see the install suggestion instead of a
45
+ silent skip.
46
+
47
+ ### Changed
48
+ - **`startedAt` added to latency log runner entries** — Every runner entry now records when it
49
+ started, making wall-clock vs. sequential comparisons accurate. `dispatch_complete` also logs
50
+ `parallelGainMs = sumMs - wallClockMs` to quantify parallelism benefit.
51
+
52
+ - **Dynamic imports removed from hot path** — Dispatch module no longer uses `await import()`
53
+ for runner loading; all imports are static, eliminating ~50ms warm-up latency on first dispatch.
54
+
55
+ ### Tests
56
+ - Added formatter venv/vendor resolution and interactive-install coverage
57
+ - Added LSP lifecycle test suite with mock LSP server (process spawn, open/change/close, shutdown)
58
+
59
+ ---
60
+
5
61
  ## [3.6.7] - 2026-04-04
6
62
 
7
63
  ### Fixed
@@ -96,21 +96,43 @@ export class AstGrepClient {
96
96
  paths: string[],
97
97
  apply = false,
98
98
  ): Promise<{ matches: AstGrepMatch[]; applied: boolean; error?: string }> {
99
- const args = [
99
+ const baseArgs = ["run", "-p", pattern, "-r", rewrite, "--lang", lang];
100
+
101
+ if (!apply) {
102
+ // Dry-run: --json=compact shows what would change without writing
103
+ const result = await this.runner.exec([
104
+ ...baseArgs,
105
+ "--json=compact",
106
+ ...paths,
107
+ ]);
108
+ return { matches: result.matches, applied: false, error: result.error };
109
+ }
110
+
111
+ // Apply: --update-all and --json are MUTUALLY EXCLUSIVE in sg.
112
+ // Run twice:
113
+ // 1. --update-all to actually write the files
114
+ // 2. --json=compact (without rewrite) to collect matches for display
115
+ const applyResult = await this.runner.exec([
116
+ ...baseArgs,
117
+ "--update-all",
118
+ ...paths,
119
+ ]);
120
+ if (applyResult.error) {
121
+ return { matches: [], applied: false, error: applyResult.error };
122
+ }
123
+
124
+ // Search for what was changed (pattern no longer matches after rewrite,
125
+ // so search for the rewrite pattern to show what was applied)
126
+ const searchResult = await this.runner.exec([
100
127
  "run",
101
128
  "-p",
102
- pattern,
103
- "-r",
104
129
  rewrite,
105
130
  "--lang",
106
131
  lang,
107
132
  "--json=compact",
108
- ];
109
- if (apply) args.push("--update-all");
110
- args.push(...paths);
111
-
112
- const result = await this.runner.exec(args);
113
- return { matches: result.matches, applied: apply, error: result.error };
133
+ ...paths,
134
+ ]);
135
+ return { matches: searchResult.matches, applied: true, error: undefined };
114
136
  }
115
137
 
116
138
  /**
@@ -254,6 +254,135 @@ export function formatLatencyReport(report: DispatchLatencyReport): string {
254
254
  return lines.join("\n");
255
255
  }
256
256
 
257
+ // --- Group runner (used by dispatchForFile for parallel execution) ---
258
+
259
+ interface GroupResult {
260
+ diagnostics: Diagnostic[];
261
+ latencies: RunnerLatency[];
262
+ hadBlocker: boolean;
263
+ }
264
+
265
+ /**
266
+ * Execute all runners in a single group.
267
+ *
268
+ * - mode "fallback": run runners sequentially and stop at the first
269
+ * one that succeeds (returns status !== "skipped").
270
+ * - mode "all" (default): run all runners in the group sequentially
271
+ * and collect every diagnostic.
272
+ *
273
+ * Groups themselves are run in parallel by dispatchForFile, so this
274
+ * function must NOT mutate shared state.
275
+ */
276
+ async function runGroup(
277
+ ctx: DispatchContext,
278
+ group: RunnerGroup,
279
+ ): Promise<GroupResult> {
280
+ const diagnostics: Diagnostic[] = [];
281
+ const latencies: RunnerLatency[] = [];
282
+ let hadBlocker = false;
283
+
284
+ // Filter runners by kind if specified
285
+ const runnerIds = group.filterKinds
286
+ ? group.runnerIds.filter((id) => {
287
+ const runner = getRunner(id);
288
+ return runner && ctx.kind && group.filterKinds?.includes(ctx.kind);
289
+ })
290
+ : group.runnerIds;
291
+
292
+ const semantic = group.semantic ?? "warning";
293
+
294
+ for (const runnerId of runnerIds) {
295
+ const runnerStart = Date.now();
296
+ const runner = getRunner(runnerId);
297
+
298
+ if (!runner) {
299
+ latencies.push({
300
+ runnerId,
301
+ startTime: runnerStart,
302
+ endTime: Date.now(),
303
+ durationMs: 0,
304
+ status: "skipped",
305
+ diagnosticCount: 0,
306
+ semantic: "unknown",
307
+ });
308
+ logLatency({
309
+ type: "runner",
310
+ filePath: ctx.filePath,
311
+ runnerId,
312
+ durationMs: 0,
313
+ status: "not_registered",
314
+ diagnosticCount: 0,
315
+ semantic: "unknown",
316
+ });
317
+ continue;
318
+ }
319
+
320
+ // Check preconditions
321
+ if (runner.when && !(await runner.when(ctx))) {
322
+ latencies.push({
323
+ runnerId,
324
+ startTime: runnerStart,
325
+ endTime: Date.now(),
326
+ durationMs: Date.now() - runnerStart,
327
+ status: "when_skipped",
328
+ diagnosticCount: 0,
329
+ semantic: runner.id,
330
+ });
331
+ logLatency({
332
+ type: "runner",
333
+ filePath: ctx.filePath,
334
+ runnerId,
335
+ durationMs: 0,
336
+ status: "when_skipped",
337
+ diagnosticCount: 0,
338
+ semantic: "when_condition",
339
+ });
340
+ continue;
341
+ }
342
+
343
+ const result = await runRunner(ctx, runner, semantic);
344
+ const runnerEnd = Date.now();
345
+ const duration = runnerEnd - runnerStart;
346
+
347
+ latencies.push({
348
+ runnerId,
349
+ startTime: runnerStart,
350
+ endTime: runnerEnd,
351
+ durationMs: duration,
352
+ status: result.status,
353
+ diagnosticCount: result.diagnostics.length,
354
+ semantic: result.semantic ?? semantic,
355
+ });
356
+ logLatency({
357
+ type: "runner",
358
+ filePath: ctx.filePath,
359
+ runnerId,
360
+ startedAt: new Date(runnerStart).toISOString(),
361
+ durationMs: duration,
362
+ status: result.status,
363
+ diagnosticCount: result.diagnostics.length,
364
+ semantic: result.semantic ?? semantic,
365
+ });
366
+
367
+ diagnostics.push(...result.diagnostics);
368
+
369
+ const resultSemantic = result.semantic ?? semantic;
370
+ if (
371
+ (resultSemantic === "blocking" && result.diagnostics.length > 0) ||
372
+ result.diagnostics.some((d) => d.semantic === "blocking")
373
+ ) {
374
+ hadBlocker = true;
375
+ }
376
+
377
+ // mode:"fallback" — stop at first runner that produced results
378
+ if (group.mode === "fallback" && result.status !== "skipped") {
379
+ break;
380
+ }
381
+ }
382
+
383
+ return { diagnostics, latencies, hadBlocker };
384
+ }
385
+
257
386
  // --- Main Dispatch Function ---
258
387
 
259
388
  export async function dispatchForFile(
@@ -280,124 +409,38 @@ export async function dispatchForFile(
280
409
  },
281
410
  });
282
411
 
283
- for (const group of groups) {
284
- if (stopped && ctx.pi.getFlag("stop-on-error")) {
285
- break;
286
- }
287
-
288
- // Filter runners by kind if specified
289
- const runnerIds = group.filterKinds
290
- ? group.runnerIds.filter((id) => {
291
- const runner = getRunner(id);
292
- return runner && ctx.kind && group.filterKinds?.includes(ctx.kind);
293
- })
294
- : group.runnerIds;
295
-
296
- const semantic = group.semantic ?? "warning";
297
-
298
- for (const runnerId of runnerIds) {
299
- const runnerStart = Date.now();
300
- const runner = getRunner(runnerId);
301
- if (!runner) {
302
- runnerLatencies.push({
303
- runnerId,
304
- startTime: runnerStart,
305
- endTime: Date.now(),
306
- durationMs: 0,
307
- status: "skipped",
308
- diagnosticCount: 0,
309
- semantic: "unknown",
310
- });
311
- logLatency({
312
- type: "runner",
313
- filePath: ctx.filePath,
314
- runnerId,
315
- durationMs: 0,
316
- status: "not_registered",
317
- diagnosticCount: 0,
318
- semantic: "unknown",
319
- });
320
- continue;
321
- }
322
-
323
- // Check preconditions
324
- if (runner.when && !(await runner.when(ctx))) {
325
- runnerLatencies.push({
326
- runnerId,
327
- startTime: runnerStart,
328
- endTime: Date.now(),
329
- durationMs: Date.now() - runnerStart,
330
- status: "when_skipped",
331
- diagnosticCount: 0,
332
- semantic: runner.id,
333
- });
334
- logLatency({
335
- type: "runner",
336
- filePath: ctx.filePath,
337
- runnerId,
338
- durationMs: 0,
339
- status: "when_skipped",
340
- diagnosticCount: 0,
341
- semantic: "when_condition",
342
- });
343
- continue;
344
- }
345
-
346
- const result = await runRunner(ctx, runner, semantic);
347
- const runnerEnd = Date.now();
348
- const duration = runnerEnd - runnerStart;
349
-
350
- // Track latency for this runner
351
- runnerLatencies.push({
352
- runnerId,
353
- startTime: runnerStart,
354
- endTime: runnerEnd,
355
- durationMs: duration,
356
- status: result.status,
357
- diagnosticCount: result.diagnostics.length,
358
- semantic: result.semantic ?? semantic,
359
- });
360
-
361
- // IMMEDIATE LOG: Each runner result (for debugging)
362
- logLatency({
363
- type: "runner",
364
- filePath: ctx.filePath,
365
- runnerId,
366
- durationMs: duration,
367
- status: result.status,
368
- diagnosticCount: result.diagnostics.length,
369
- semantic: result.semantic ?? semantic,
370
- });
371
-
372
- // Apply delta mode filtering
373
- let diagnostics = result.diagnostics;
374
- if (ctx.deltaMode && result.semantic !== "silent") {
375
- const before = ctx.baselines.get(ctx.filePath);
376
- if (before) {
377
- const filtered = filterDelta(
378
- diagnostics,
379
- before as Diagnostic[],
380
- (d) => d.id,
381
- );
382
- diagnostics = filtered.new;
383
- // TODO: Track fixed diagnostics
384
- }
385
- // Update baseline
386
- ctx.baselines.set(ctx.filePath, [...allDiagnostics, ...diagnostics]);
387
- }
388
-
389
- allDiagnostics.push(...diagnostics);
412
+ // Run all groups in parallel — they are independent and don't depend on
413
+ // each other's results. Within each group, mode:"fallback" semantics are
414
+ // preserved (sequential first-success). Results are merged in original
415
+ // group order so output is deterministic.
416
+ const groupResults = await Promise.all(
417
+ groups.map((group) => runGroup(ctx, group)),
418
+ );
390
419
 
391
- // Check for blockers - use result semantic (not group default) and check individual diagnostics
392
- const resultSemantic = result.semantic ?? semantic;
393
- if (resultSemantic === "blocking" && diagnostics.length > 0) {
394
- stopped = true;
395
- }
396
- // Also check if any individual diagnostic is blocking
397
- if (diagnostics.some((d) => d.semantic === "blocking")) {
398
- stopped = true;
420
+ for (const {
421
+ diagnostics: groupDiags,
422
+ latencies,
423
+ hadBlocker,
424
+ } of groupResults) {
425
+ runnerLatencies.push(...latencies);
426
+
427
+ // Apply delta mode filtering across the accumulated set
428
+ let diagnostics = groupDiags;
429
+ if (ctx.deltaMode) {
430
+ const before = ctx.baselines.get(ctx.filePath);
431
+ if (before) {
432
+ const filtered = filterDelta(
433
+ diagnostics,
434
+ before as Diagnostic[],
435
+ (d) => d.id,
436
+ );
437
+ diagnostics = filtered.new;
399
438
  }
439
+ ctx.baselines.set(ctx.filePath, [...allDiagnostics, ...diagnostics]);
400
440
  }
441
+
442
+ allDiagnostics.push(...diagnostics);
443
+ if (hadBlocker) stopped = true;
401
444
  }
402
445
 
403
446
  // Categorize results
@@ -440,14 +483,20 @@ export async function dispatchForFile(
440
483
  // No need to log again here - would create duplicates in the log
441
484
 
442
485
  // Log summary to latency log only (not console - avoid noise)
486
+ const sumMs = runnerLatencies.reduce((s, r) => s + r.durationMs, 0);
487
+ const wallClockMs = latencyReport.totalDurationMs;
443
488
  logLatency({
444
489
  type: "tool_result",
445
490
  filePath: ctx.filePath,
446
- durationMs: latencyReport.totalDurationMs,
491
+ durationMs: wallClockMs,
492
+ wallClockMs,
493
+ sumMs,
494
+ parallelGainMs: Math.max(0, sumMs - wallClockMs),
447
495
  result: "dispatch_complete",
448
496
  metadata: {
449
497
  runners: runnerLatencies.map((r) => ({
450
498
  id: r.runnerId,
499
+ startedAt: new Date(r.startTime).toISOString(),
451
500
  duration: r.durationMs,
452
501
  status: r.status,
453
502
  })),
@@ -11,10 +11,13 @@ import {
11
11
  createBaselineStore,
12
12
  createDispatchContext,
13
13
  type DispatchLatencyReport,
14
+ dispatchForFile,
14
15
  formatLatencyReport,
15
16
  getLatencyReports,
17
+ getRunnersForKind,
16
18
  type RunnerLatency,
17
19
  } from "./dispatcher.js";
20
+ import { TOOL_PLANS } from "./plan.js";
18
21
  import type { BaselineStore, DispatchResult, PiAgentAPI } from "./types.js";
19
22
 
20
23
  export type { DispatchLatencyReport, RunnerLatency };
@@ -57,11 +60,6 @@ export async function dispatchLint(
57
60
  // pre-existing issues after the first write.
58
61
  const ctx = createDispatchContext(filePath, cwd, pi, sessionBaselines, true);
59
62
 
60
- // Import dispatchForFile dynamically to avoid circular deps
61
- const { dispatchForFile } = await import("./dispatcher.js");
62
- const { getRunnersForKind } = await import("./dispatcher.js");
63
- const { TOOL_PLANS } = await import("./plan.js");
64
-
65
63
  const kind = ctx.kind;
66
64
  if (!kind) return "";
67
65
 
@@ -82,10 +80,6 @@ export async function dispatchLintWithResult(
82
80
  ): Promise<DispatchResult> {
83
81
  const ctx = createDispatchContext(filePath, cwd, pi, sessionBaselines, true);
84
82
 
85
- const { dispatchForFile } = await import("./dispatcher.js");
86
- const { getRunnersForKind } = await import("./dispatcher.js");
87
- const { TOOL_PLANS } = await import("./plan.js");
88
-
89
83
  const kind = ctx.kind;
90
84
  if (!kind) {
91
85
  return {
@@ -110,7 +104,7 @@ export async function dispatchLintWithResult(
110
104
  };
111
105
  }
112
106
 
113
- return await dispatchForFile(ctx, plan.groups);
107
+ return dispatchForFile(ctx, plan.groups);
114
108
  }
115
109
 
116
110
  /**
@@ -136,7 +130,6 @@ export async function getAvailableRunners(filePath: string): Promise<string[]> {
136
130
  const kind = detectFileKind(filePath);
137
131
  if (!kind) return [];
138
132
 
139
- const { getRunnersForKind } = await import("./dispatcher.js");
140
133
  const runners = getRunnersForKind(kind);
141
134
  return runners.map((r) => r.id);
142
135
  }
@@ -117,7 +117,11 @@ const lspRunner: RunnerDefinition = {
117
117
  return {
118
118
  status: hasErrors ? "failed" : "succeeded",
119
119
  diagnostics,
120
- semantic: hasErrors ? "blocking" : "warning",
120
+ semantic: hasErrors
121
+ ? "blocking"
122
+ : diagnostics.length > 0
123
+ ? "warning"
124
+ : "none",
121
125
  };
122
126
  },
123
127
  };
@@ -82,7 +82,11 @@ const pyrightRunner: RunnerDefinition = {
82
82
  return {
83
83
  status: hasErrors ? "failed" : "succeeded",
84
84
  diagnostics,
85
- semantic: hasErrors ? "blocking" : "warning",
85
+ semantic: hasErrors
86
+ ? "blocking"
87
+ : diagnostics.length > 0
88
+ ? "warning"
89
+ : "none",
86
90
  };
87
91
  } catch {
88
92
  // JSON parse error
@@ -51,7 +51,11 @@ const typeSafetyRunner: RunnerDefinition = {
51
51
  return {
52
52
  status: hasErrors ? "failed" : "succeeded",
53
53
  diagnostics,
54
- semantic: hasErrors ? "blocking" : "warning",
54
+ semantic: hasErrors
55
+ ? "blocking"
56
+ : diagnostics.length > 0
57
+ ? "warning"
58
+ : "none",
55
59
  };
56
60
  },
57
61
  };