@ryanfw/prompt-orchestration-pipeline 0.15.0 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,277 @@
1
+ /**
2
+ * Batch Runner - Concurrent job processing with SQLite state management
3
+ */
4
+
5
+ import crypto from "node:crypto";
6
+ import pLimit from "p-limit";
7
+
8
+ /**
9
+ * Creates the batch_jobs table and index if they don't exist
10
+ * @param {import('better-sqlite3').Database} db - SQLite database instance
11
+ */
12
+ export function ensureBatchSchema(db) {
13
+ db.exec(`
14
+ CREATE TABLE IF NOT EXISTS batch_jobs (
15
+ id TEXT PRIMARY KEY,
16
+ batch_id TEXT NOT NULL,
17
+ status TEXT NOT NULL DEFAULT 'pending',
18
+ input TEXT NOT NULL,
19
+ output TEXT,
20
+ error TEXT,
21
+ retry_count INTEGER NOT NULL DEFAULT 0,
22
+ started_at TEXT,
23
+ completed_at TEXT
24
+ );
25
+ CREATE INDEX IF NOT EXISTS idx_batch_jobs_batch_status ON batch_jobs(batch_id, status);
26
+ `);
27
+ }
28
+
29
+ /**
30
+ * Inserts jobs into the batch_jobs table
31
+ * @param {import('better-sqlite3').Database} db - SQLite database instance
32
+ * @param {string} batchId - Unique batch identifier
33
+ * @param {Array<Object>} jobs - Array of job objects
34
+ * @returns {string[]} Array of job IDs inserted
35
+ */
36
+ export function insertJobs(db, batchId, jobs) {
37
+ const insertStmt = db.prepare(
38
+ `INSERT OR IGNORE INTO batch_jobs (id, batch_id, status, input) VALUES (?, ?, 'pending', ?)`
39
+ );
40
+ const selectStatusStmt = db.prepare(
41
+ `SELECT status FROM batch_jobs WHERE id = ? AND batch_id = ?`
42
+ );
43
+
44
+ const insertMany = db.transaction((jobList) => {
45
+ const ids = [];
46
+ for (const job of jobList) {
47
+ const id = job.id ?? crypto.randomUUID();
48
+ const input = JSON.stringify(job);
49
+ const result = insertStmt.run(id, batchId, input);
50
+
51
+ // If no row was inserted, the job already exists. Validate its state.
52
+ if (result.changes === 0) {
53
+ const existing = selectStatusStmt.get(id, batchId);
54
+ if (existing && (existing.status === "complete" || existing.status === "permanently_failed")) {
55
+ throw new Error(
56
+ `Cannot re-insert job "${id}" for batch "${batchId}": existing job is in terminal state "${existing.status}".`
57
+ );
58
+ }
59
+ }
60
+ ids.push(id);
61
+ }
62
+ return ids;
63
+ });
64
+
65
+ return insertMany(jobs);
66
+ }
67
+
68
+ /**
69
+ * Marks a job as processing
70
+ * @param {import('better-sqlite3').Database} db - SQLite database instance
71
+ * @param {string} jobId - Job identifier
72
+ */
73
+ export function markProcessing(db, jobId) {
74
+ const stmt = db.prepare(
75
+ `UPDATE batch_jobs SET status = 'processing', started_at = datetime('now') WHERE id = ?`
76
+ );
77
+ stmt.run(jobId);
78
+ }
79
+
80
+ /**
81
+ * Marks a job as complete with output
82
+ * @param {import('better-sqlite3').Database} db - SQLite database instance
83
+ * @param {string} jobId - Job identifier
84
+ * @param {*} output - Job output (will be JSON serialized)
85
+ */
86
+ export function markComplete(db, jobId, output) {
87
+ const stmt = db.prepare(
88
+ `UPDATE batch_jobs SET status = 'complete', output = ?, completed_at = datetime('now') WHERE id = ?`
89
+ );
90
+ stmt.run(JSON.stringify(output), jobId);
91
+ }
92
+
93
+ /**
94
+ * Marks a job as failed and increments retry count
95
+ * @param {import('better-sqlite3').Database} db - SQLite database instance
96
+ * @param {string} jobId - Job identifier
97
+ * @param {string} error - Error message
98
+ */
99
+ export function markFailed(db, jobId, error) {
100
+ const stmt = db.prepare(
101
+ `UPDATE batch_jobs SET status = 'failed', error = ?, retry_count = retry_count + 1 WHERE id = ?`
102
+ );
103
+ stmt.run(error, jobId);
104
+ }
105
+
106
+ /**
107
+ * Gets pending and failed jobs that are under the retry limit
108
+ * @param {import('better-sqlite3').Database} db - SQLite database instance
109
+ * @param {string} batchId - Unique batch identifier
110
+ * @param {number} maxRetries - Maximum retry attempts
111
+ * @returns {Array<{id: string, input: Object, retryCount: number}>} Array of pending jobs
112
+ */
113
+ export function getPendingJobs(db, batchId, maxRetries) {
114
+ const stmt = db.prepare(
115
+ `SELECT id, input, retry_count FROM batch_jobs WHERE batch_id = ? AND status IN ('pending', 'failed') AND retry_count < ? ORDER BY id`
116
+ );
117
+ const rows = stmt.all(batchId, maxRetries);
118
+ return rows.map((row) => ({
119
+ id: row.id,
120
+ input: JSON.parse(row.input),
121
+ retryCount: row.retry_count,
122
+ }));
123
+ }
124
+
125
+ /**
126
+ * Recovers jobs stuck in 'processing' state (from process crash)
127
+ * @param {import('better-sqlite3').Database} db - SQLite database instance
128
+ * @param {string} batchId - Unique batch identifier
129
+ * @returns {number} Number of jobs recovered
130
+ */
131
+ export function recoverStaleJobs(db, batchId) {
132
+ const stmt = db.prepare(
133
+ `UPDATE batch_jobs SET status = 'pending' WHERE batch_id = ? AND status = 'processing'`
134
+ );
135
+ const result = stmt.run(batchId);
136
+ return result.changes;
137
+ }
138
+
139
+ /**
140
+ * Gets completed jobs for a batch
141
+ * @param {import('better-sqlite3').Database} db - SQLite database instance
142
+ * @param {string} batchId - Unique batch identifier
143
+ * @returns {Array<{id: string, input: Object, output: *}>} Array of completed jobs
144
+ */
145
+ function getCompletedJobs(db, batchId) {
146
+ const stmt = db.prepare(
147
+ `SELECT id, input, output FROM batch_jobs WHERE batch_id = ? AND status = 'complete'`
148
+ );
149
+ const rows = stmt.all(batchId);
150
+ return rows.map((row) => ({
151
+ id: row.id,
152
+ input: JSON.parse(row.input),
153
+ output: JSON.parse(row.output),
154
+ }));
155
+ }
156
+
157
+ /**
158
+ * Gets failed jobs for a batch (those that exhausted retries)
159
+ * @param {import('better-sqlite3').Database} db - SQLite database instance
160
+ * @param {string} batchId - Unique batch identifier
161
+ * @param {number} maxRetries - Maximum retry attempts
162
+ * @returns {Array<{id: string, input: Object, error: string, retryCount: number}>} Array of failed jobs
163
+ */
164
+ function getFailedJobs(db, batchId, maxRetries) {
165
+ const stmt = db.prepare(
166
+ `SELECT id, input, error, retry_count FROM batch_jobs WHERE batch_id = ? AND status = 'failed' AND retry_count >= ?`
167
+ );
168
+ const rows = stmt.all(batchId, maxRetries);
169
+ return rows.map((row) => ({
170
+ id: row.id,
171
+ input: JSON.parse(row.input),
172
+ error: row.error,
173
+ retryCount: row.retry_count,
174
+ }));
175
+ }
176
+
177
+ /**
178
+ * Processes a single job with try/catch and status updates
179
+ * @param {import('better-sqlite3').Database} db - SQLite database instance
180
+ * @param {Object} job - Job object with id, input, retryCount
181
+ * @param {Function} processor - async (input, ctx) => result
182
+ * @param {string} batchId - Unique batch identifier
183
+ */
184
+ async function processOneJob(db, job, processor, batchId) {
185
+ markProcessing(db, job.id);
186
+ try {
187
+ const output = await processor(job.input, {
188
+ attempt: job.retryCount + 1,
189
+ batchId,
190
+ db,
191
+ });
192
+ markComplete(db, job.id, output);
193
+ } catch (err) {
194
+ markFailed(db, job.id, err.message || String(err));
195
+ }
196
+ }
197
+
198
+ /**
199
+ * Validates batch options and throws with descriptive errors if invalid
200
+ * @param {Object} options - Batch options to validate
201
+ * @throws {Error} If options are invalid
202
+ */
203
+ export function validateBatchOptions(options) {
204
+ if (!options || typeof options !== "object") {
205
+ throw new Error(
206
+ `runBatch: options must be an object, got: ${typeof options}`
207
+ );
208
+ }
209
+ if (!Array.isArray(options.jobs)) {
210
+ throw new Error(
211
+ `runBatch: jobs must be an array, got: ${typeof options.jobs}`
212
+ );
213
+ }
214
+ if (options.jobs.length === 0) {
215
+ throw new Error("runBatch: jobs must be a non-empty array");
216
+ }
217
+ if (typeof options.processor !== "function") {
218
+ throw new Error(
219
+ `runBatch: processor must be a function, got: ${typeof options.processor}`
220
+ );
221
+ }
222
+ if (options.concurrency !== undefined) {
223
+ if (!Number.isInteger(options.concurrency) || options.concurrency < 1) {
224
+ throw new Error(
225
+ `runBatch: concurrency must be a positive integer, got: ${options.concurrency}`
226
+ );
227
+ }
228
+ }
229
+ if (options.maxRetries !== undefined) {
230
+ if (!Number.isInteger(options.maxRetries) || options.maxRetries < 0) {
231
+ throw new Error(
232
+ `runBatch: maxRetries must be a non-negative integer, got: ${options.maxRetries}`
233
+ );
234
+ }
235
+ }
236
+ }
237
+
238
+ /**
239
+ * Executes a batch of jobs concurrently with retry support
240
+ * @param {import('better-sqlite3').Database} db - SQLite database instance
241
+ * @param {Object} options - Batch options
242
+ * @param {Array<Object>} options.jobs - Array of job objects
243
+ * @param {Function} options.processor - async (input, ctx) => result
244
+ * @param {number} [options.concurrency=10] - Max concurrent jobs
245
+ * @param {number} [options.maxRetries=3] - Max retry attempts per job
246
+ * @param {string} [options.batchId] - Unique batch identifier (auto-generated if omitted)
247
+ * @returns {Promise<{completed: Array, failed: Array}>} Batch results
248
+ */
249
+ export async function executeBatch(db, options) {
250
+ const {
251
+ jobs,
252
+ processor,
253
+ concurrency = 10,
254
+ maxRetries = 3,
255
+ batchId = crypto.randomUUID(),
256
+ } = options;
257
+
258
+ ensureBatchSchema(db);
259
+ recoverStaleJobs(db, batchId);
260
+ insertJobs(db, batchId, jobs);
261
+
262
+ const limit = pLimit(concurrency);
263
+
264
+ let pending = getPendingJobs(db, batchId, maxRetries);
265
+ while (pending.length > 0) {
266
+ const promises = pending.map((job) =>
267
+ limit(() => processOneJob(db, job, processor, batchId))
268
+ );
269
+ await Promise.allSettled(promises);
270
+ pending = getPendingJobs(db, batchId, maxRetries);
271
+ }
272
+
273
+ return {
274
+ completed: getCompletedJobs(db, batchId),
275
+ failed: getFailedJobs(db, batchId, maxRetries),
276
+ };
277
+ }
@@ -8,6 +8,8 @@ import {
8
8
  isValidLogEvent,
9
9
  isValidLogFileExtension,
10
10
  } from "../config/log-events.js";
11
+ import Database from "better-sqlite3";
12
+ import { executeBatch, validateBatchOptions } from "./batch-runner.js";
11
13
 
12
14
  /**
13
15
  * Creates a task-scoped file I/O interface that manages file operations
@@ -26,7 +28,7 @@ async function ensureDir(dir) {
26
28
  }
27
29
 
28
30
  function ensureDirSync(dir) {
29
- fsSync.mkdir(dir, { recursive: true });
31
+ fsSync.mkdirSync(dir, { recursive: true });
30
32
  }
31
33
 
32
34
  export function createTaskFileIO({
@@ -293,6 +295,40 @@ export function createTaskFileIO({
293
295
  getCurrentStage() {
294
296
  return getStage();
295
297
  },
298
+
299
+ /**
300
+ * Get a SQLite database instance for this job run
301
+ * @param {Object} options - better-sqlite3 options
302
+ * @returns {Database} better-sqlite3 Database instance
303
+ */
304
+ getDB(options = {}) {
305
+ ensureDirSync(artifactsDir);
306
+ const dbPath = path.join(artifactsDir, "run.db");
307
+ const db = new Database(dbPath, options);
308
+ db.pragma("journal_mode = WAL");
309
+ updateStatusWithFilesSync("artifacts", "run.db");
310
+ return db;
311
+ },
312
+
313
+ /**
314
+ * Execute a batch of jobs concurrently with SQLite state management
315
+ * @param {Object} options - Batch options
316
+ * @param {Array<Object>} options.jobs - Array of job objects
317
+ * @param {Function} options.processor - async (input, ctx) => result
318
+ * @param {number} [options.concurrency=10] - Max concurrent jobs
319
+ * @param {number} [options.maxRetries=3] - Max retry attempts per job
320
+ * @param {string} [options.batchId] - Unique batch identifier (auto-generated if omitted)
321
+ * @returns {Promise<{completed: Array, failed: Array}>} Batch results
322
+ */
323
+ async runBatch(options) {
324
+ validateBatchOptions(options);
325
+ const db = this.getDB();
326
+ try {
327
+ return await executeBatch(db, options);
328
+ } finally {
329
+ db.close();
330
+ }
331
+ },
296
332
  };
297
333
  }
298
334
 
@@ -123,10 +123,8 @@ export async function startOrchestrator(opts) {
123
123
  } catch {}
124
124
 
125
125
  // Move seed to current/{jobId}/seed.json
126
- logger.log("Moving file", { from: filePath, to: dest });
127
126
  try {
128
127
  await moveFile(filePath, dest);
129
- logger.log("Successfully moved file", { destination: dest });
130
128
  } catch (error) {
131
129
  logger.error("Failed to move file", {
132
130
  from: filePath,
@@ -173,12 +171,6 @@ export async function startOrchestrator(opts) {
173
171
  // Apply artifact initialization to the status
174
172
  const updatedStatus = applyArtifacts(status);
175
173
  await fs.writeFile(statusPath, JSON.stringify(updatedStatus, null, 2));
176
-
177
- logger.log("Initialized status from upload artifacts", {
178
- jobId,
179
- pipeline: seed?.pipeline,
180
- artifactsCount: updatedStatus.files?.artifacts?.length || 0,
181
- });
182
174
  } catch (artifactError) {
183
175
  // Don't fail job startup if artifact initialization fails, just log
184
176
  logger.warn("Failed to initialize status from artifacts", {
@@ -233,7 +225,6 @@ export async function startOrchestrator(opts) {
233
225
 
234
226
  // Watch pending directory for seeds
235
227
  const watchPattern = path.join(dirs.pending, "*.json");
236
- logger.log("Watching pattern", { pattern: watchPattern });
237
228
  const watcher = watcherFactory(watchPattern, {
238
229
  ignoreInitial: false,
239
230
  awaitWriteFinish: false, // Disable awaitWriteFinish for faster detection
@@ -243,7 +234,6 @@ export async function startOrchestrator(opts) {
243
234
  // Wait for watcher to be ready before resolving
244
235
  await new Promise((resolve, reject) => {
245
236
  watcher.on("ready", () => {
246
- logger.log("Watcher is ready");
247
237
  resolve();
248
238
  });
249
239
 
@@ -254,7 +244,6 @@ export async function startOrchestrator(opts) {
254
244
  });
255
245
 
256
246
  watcher.on("add", (file) => {
257
- logger.log("Detected file add", { file });
258
247
  // Return promise so tests awaiting the add handler block until processing completes
259
248
  return handleSeedAdd(file);
260
249
  });
@@ -330,13 +319,6 @@ function spawnRunner(
330
319
  const availablePipelines = Object.keys(configSnapshot?.pipelines ?? {});
331
320
  const pipelineSlug = seed?.pipeline;
332
321
 
333
- logger.log("spawnRunner invoked", {
334
- jobId,
335
- pipelineSlug: pipelineSlug ?? null,
336
- availablePipelines,
337
- seedKeys: seed ? Object.keys(seed) : null,
338
- });
339
-
340
322
  if (!availablePipelines.length) {
341
323
  logger.warn(
342
324
  "No pipelines registered in config() when spawnRunner invoked"
@@ -143,10 +143,6 @@ try {
143
143
  startFromTask &&
144
144
  taskNames.indexOf(taskName) < taskNames.indexOf(startFromTask)
145
145
  ) {
146
- logger.log("Skipping task before restart point", {
147
- taskName,
148
- startFromTask,
149
- });
150
146
  continue;
151
147
  }
152
148
 
@@ -155,7 +151,6 @@ try {
155
151
  const outputPath = path.join(workDir, "tasks", taskName, "output.json");
156
152
  const output = JSON.parse(await fs.readFile(outputPath, "utf8"));
157
153
  pipelineArtifacts[taskName] = output;
158
- logger.log("Task already completed", { taskName });
159
154
  } catch {
160
155
  logger.warn("Failed to read completed task output", { taskName });
161
156
  }
@@ -190,7 +185,6 @@ try {
190
185
  }
191
186
  }
192
187
 
193
- logger.log("Starting task", { taskName });
194
188
  await updateStatus(taskName, {
195
189
  state: TaskState.RUNNING,
196
190
  startedAt: now(),
@@ -265,13 +259,6 @@ try {
265
259
  process.exitCode = 1;
266
260
  process.exit(1);
267
261
  }
268
-
269
- logger.log("Task symlinks repaired successfully", {
270
- taskName,
271
- taskDir,
272
- repairDuration: repairResult.duration,
273
- relocatedEntry: repairResult.relocatedEntry,
274
- });
275
262
  } else {
276
263
  logger.debug("Task symlinks validation passed", {
277
264
  taskName,
@@ -295,7 +282,6 @@ try {
295
282
  statusPath: tasksStatusPath,
296
283
  });
297
284
 
298
- logger.log("Running task", { taskName, modulePath: absoluteModulePath });
299
285
  const result = await runPipeline(relocatedEntry, ctx);
300
286
 
301
287
  if (!result.ok) {
@@ -363,13 +349,6 @@ try {
363
349
  process.exit(1);
364
350
  }
365
351
 
366
- logger.log("Task completed successfully", {
367
- taskName,
368
- executionTimeMs:
369
- result.logs?.reduce((total, log) => total + (log.ms || 0), 0) || 0,
370
- refinementAttempts: result.refinementAttempts || 0,
371
- });
372
-
373
352
  // The file I/O system automatically handles writing outputs and updating tasks-status.json
374
353
  // No need to manually write output.json or enumerate artifacts
375
354
 
@@ -396,7 +375,6 @@ try {
396
375
 
397
376
  // Check if this is a single task run and we've completed the target task
398
377
  if (runSingleTask && taskName === startFromTask) {
399
- logger.log("Stopping after single task execution", { taskName });
400
378
  break;
401
379
  }
402
380
  } catch (err) {
@@ -415,19 +393,6 @@ try {
415
393
  await fs.mkdir(COMPLETE_DIR, { recursive: true });
416
394
  const dest = path.join(COMPLETE_DIR, jobId);
417
395
 
418
- logger.log("Pipeline completed", {
419
- jobId,
420
- totalExecutionTime: Object.values(status.tasks).reduce(
421
- (total, t) => total + (t.executionTimeMs || 0),
422
- 0
423
- ),
424
- totalRefinementAttempts: Object.values(status.tasks).reduce(
425
- (total, t) => total + (t.refinementAttempts || 0),
426
- 0
427
- ),
428
- finalArtifacts: Object.keys(pipelineArtifacts),
429
- });
430
-
431
396
  await fs.rename(workDir, dest);
432
397
  await appendLine(
433
398
  path.join(COMPLETE_DIR, "runs.jsonl"),
@@ -449,8 +414,6 @@ try {
449
414
 
450
415
  // Clean up task symlinks to avoid dangling links in archives
451
416
  await cleanupTaskSymlinks(dest);
452
- } else {
453
- logger.log("Single task run completed, job remains in current", { jobId });
454
417
  }
455
418
  } catch (error) {
456
419
  throw error;
@@ -164,13 +164,8 @@ export async function writeJobStatus(jobDir, updateFn) {
164
164
 
165
165
  const next = prev
166
166
  .then(async () => {
167
- logger.group("Status Write Operation");
168
- logger.log(`Updating status for job: ${jobId}`);
169
- logger.log(`Status file path: ${statusPath}`);
170
-
171
167
  // Read existing status or create default
172
168
  const current = await readStatusFile(statusPath, jobId);
173
- logger.log("Current status snapshot:", current);
174
169
 
175
170
  // Validate basic structure
176
171
  const validated = validateStatusSnapshot(current);
@@ -188,11 +183,9 @@ export async function writeJobStatus(jobDir, updateFn) {
188
183
  );
189
184
 
190
185
  snapshot.lastUpdated = new Date().toISOString();
191
- logger.log("Status after update function:", snapshot);
192
186
 
193
187
  // Atomic write
194
188
  await atomicWrite(statusPath, snapshot);
195
- logger.log("Status file written successfully");
196
189
 
197
190
  // Emit SSE event for tasks-status.json change using logger
198
191
  try {
@@ -202,7 +195,6 @@ export async function writeJobStatus(jobDir, updateFn) {
202
195
  jobId,
203
196
  };
204
197
  await logger.sse("state:change", eventData);
205
- logger.log("SSE event broadcasted successfully");
206
198
  } catch (error) {
207
199
  // Don't fail the write if SSE emission fails
208
200
  logger.error("Failed to emit SSE event:", error);
@@ -218,10 +210,6 @@ export async function writeJobStatus(jobDir, updateFn) {
218
210
  reason: snapshot.lifecycleBlockReason,
219
211
  };
220
212
  await logger.sse("lifecycle_block", lifecycleEventData);
221
- logger.log(
222
- "lifecycle_block SSE event broadcasted successfully",
223
- lifecycleEventData
224
- );
225
213
  } catch (error) {
226
214
  // Don't fail the write if SSE emission fails
227
215
  logger.error("Failed to emit lifecycle_block SSE event:", error);
@@ -310,9 +298,6 @@ export async function updateTaskStatus(jobDir, taskId, taskUpdateFn) {
310
298
 
311
299
  const next = prev
312
300
  .then(async () => {
313
- logger.group("Task Status Update Operation");
314
- logger.log(`Updating task ${taskId} for job: ${jobId}`);
315
-
316
301
  const statusPath = path.join(jobDir, "tasks-status.json");
317
302
 
318
303
  // Read existing status or create default
@@ -336,7 +321,6 @@ export async function updateTaskStatus(jobDir, taskId, taskUpdateFn) {
336
321
 
337
322
  // Atomic write
338
323
  await atomicWrite(statusPath, validated);
339
- logger.log("Task status file written successfully");
340
324
 
341
325
  // Emit task:updated SSE event after successful write
342
326
  try {
@@ -346,13 +330,11 @@ export async function updateTaskStatus(jobDir, taskId, taskUpdateFn) {
346
330
  task: validated.tasks[taskId],
347
331
  };
348
332
  await logger.sse("task:updated", eventData);
349
- logger.log("task:updated SSE event broadcasted successfully");
350
333
  } catch (error) {
351
334
  // Don't fail the write if SSE emission fails
352
335
  logger.error("Failed to emit task:updated SSE event:", error);
353
336
  }
354
337
 
355
- logger.groupEnd();
356
338
  resultSnapshot = validated;
357
339
  })
358
340
  .catch((e) => {
@@ -196,12 +196,6 @@ export async function repairTaskSymlinks(taskDir, poRoot, taskModulePath) {
196
196
  const startTime = Date.now();
197
197
 
198
198
  try {
199
- logger.log("Repairing task symlinks", {
200
- taskDir,
201
- poRoot,
202
- taskModulePath,
203
- });
204
-
205
199
  // Use existing ensureTaskSymlinkBridge for repairs
206
200
  const relocatedEntry = await ensureTaskSymlinkBridge({
207
201
  taskDir,
@@ -211,12 +205,6 @@ export async function repairTaskSymlinks(taskDir, poRoot, taskModulePath) {
211
205
 
212
206
  const duration = Date.now() - startTime;
213
207
 
214
- logger.log("Task symlinks repaired successfully", {
215
- taskDir,
216
- duration,
217
- relocatedEntry,
218
- });
219
-
220
208
  return {
221
209
  success: true,
222
210
  relocatedEntry,
@@ -484,10 +484,6 @@ export async function runPipeline(modulePath, initialContext = {}) {
484
484
 
485
485
  // Skip stages when skipIf predicate returns true
486
486
  if (stageConfig.skipIf && stageConfig.skipIf(context.flags)) {
487
- logger.log("Skipping stage", {
488
- stage: stageName,
489
- reason: "skipIf predicate returned true",
490
- });
491
487
  context.logs.push({
492
488
  stage: stageName,
493
489
  action: "skipped",
@@ -499,7 +495,6 @@ export async function runPipeline(modulePath, initialContext = {}) {
499
495
 
500
496
  // Skip if handler is not available (not implemented)
501
497
  if (typeof stageHandler !== "function") {
502
- logger.log("Stage not available, skipping", { stage: stageName });
503
498
  logs.push({
504
499
  stage: stageName,
505
500
  skipped: true,
@@ -526,11 +521,6 @@ export async function runPipeline(modulePath, initialContext = {}) {
526
521
  // Set current stage before execution
527
522
  context.currentStage = stageName;
528
523
 
529
- logger.log("Starting stage execution", {
530
- stage: stageName,
531
- taskName: context.meta.taskName,
532
- });
533
-
534
524
  // Write stage start status using writeJobStatus
535
525
  if (context.meta.workDir && context.meta.taskName) {
536
526
  try {
@@ -719,12 +709,6 @@ export async function runPipeline(modulePath, initialContext = {}) {
719
709
  );
720
710
 
721
711
  const ms = +(performance.now() - start).toFixed(2);
722
- logger.log("Stage completed successfully", {
723
- stage: stageName,
724
- executionTimeMs: ms,
725
- outputType: typeof stageResult.output,
726
- flagKeys: Object.keys(stageResult.flags),
727
- });
728
712
  logs.push({
729
713
  stage: stageName,
730
714
  ok: true,
@@ -819,13 +803,6 @@ export async function runPipeline(modulePath, initialContext = {}) {
819
803
 
820
804
  llmEvents.off("llm:request:complete", onLLMComplete);
821
805
 
822
- logger.log("Pipeline completed successfully", {
823
- taskName: context.meta.taskName,
824
- totalStages: PIPELINE_STAGES.length,
825
- executedStages: logs.filter((l) => l.ok).length,
826
- llmMetricsCount: llmMetrics.length,
827
- });
828
-
829
806
  // Write final status with currentStage: null to indicate completion
830
807
  if (context.meta.workDir && context.meta.taskName) {
831
808
  try {