task-summary-extractor 8.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2006 @@
1
+ /**
2
+ * Pipeline orchestrator — main processing flow.
3
+ *
4
+ * Compress → Upload → AI Segment Analysis → AI Final Compilation → JSON + MD output.
5
+ *
6
+ * Architecture: each pipeline phase is a separate function that receives
7
+ * a shared `ctx` (context) object. This makes phases independently testable
8
+ * and allows the main `run()` to read as a clean sequence of steps.
9
+ *
10
+ * v6 improvements:
11
+ * - Confidence Scoring: every extracted item gets HIGH/MEDIUM/LOW confidence
12
+ * - Multi-Pass Focused Re-extraction: targeted second pass for weak areas
13
+ * - Learning Loop: historical analysis to auto-adjust budgets and thresholds
14
+ * - Diff-Aware Compilation: delta report comparing against previous runs
15
+ * - Structured Logging: JSONL structured log with phase spans and metrics
16
+ * - Parallel Segment Analysis: process 2-3 segments concurrently
17
+ * - All v5 features retained: quality gate, adaptive budget, boundary detection, health dashboard
18
+ */
19
+
20
+ 'use strict';
21
+
22
+ const fs = require('fs');
23
+ const path = require('path');
24
+
25
+ // --- Config ---
26
+ const config = require('./config');
27
+ const {
28
+ VIDEO_EXTS, DOC_EXTS, SPEED, SEG_TIME, PRESET,
29
+ LOG_LEVEL, MAX_PARALLEL_UPLOADS, THINKING_BUDGET, COMPILATION_THINKING_BUDGET,
30
+ validateConfig, GEMINI_MODELS, setActiveModel, getActiveModelPricing,
31
+ } = config;
32
+
33
+ // --- Services ---
34
+ const { initFirebase, uploadToStorage, storageExists } = require('./services/firebase');
35
+ const { initGemini, prepareDocsForGemini, processWithGemini, compileFinalResult, analyzeVideoForContext, cleanupGeminiFiles } = require('./services/gemini');
36
+ const { compressAndSegment, probeFormat, verifySegment } = require('./services/video');
37
+ const { isGitAvailable, isGitRepo, initRepo } = require('./services/git');
38
+
39
+ // --- Utils ---
40
+ const { findDocsRecursive } = require('./utils/fs');
41
+ const { fmtDuration, fmtBytes } = require('./utils/format');
42
+ const { promptUser, promptUserText } = require('./utils/prompt');
43
+ const { parseArgs, showHelp, selectFolder, selectModel } = require('./utils/cli');
44
+ const { parallelMap } = require('./utils/retry');
45
+ const Progress = require('./utils/progress');
46
+ const CostTracker = require('./utils/cost-tracker');
47
+ const { assessQuality, formatQualityLine, getConfidenceStats, THRESHOLDS } = require('./utils/quality-gate');
48
+ const { calculateThinkingBudget, calculateCompilationBudget } = require('./utils/adaptive-budget');
49
+ const { detectBoundaryContext, sliceVttForSegment } = require('./utils/context-manager');
50
+ const { buildHealthReport, printHealthDashboard } = require('./utils/health-dashboard');
51
+ const { identifyWeaknesses, runFocusedPass, mergeFocusedResults } = require('./utils/focused-reanalysis');
52
+ const { loadHistory, saveHistory, buildHistoryEntry, analyzeHistory, printLearningInsights } = require('./utils/learning-loop');
53
+ const { loadPreviousCompilation, generateDiff, renderDiffMarkdown } = require('./utils/diff-engine');
54
+ const { detectAllChanges, serializeReport } = require('./utils/change-detector');
55
+ const { assessProgressLocal, assessProgressWithAI, mergeProgressIntoAnalysis, buildProgressSummary, renderProgressMarkdown, STATUS_ICONS } = require('./utils/progress-updater');
56
+ const { discoverTopics, generateAllDocuments, writeDeepDiveOutput } = require('./utils/deep-dive');
57
+ const { planTopics, generateAllDynamicDocuments, writeDynamicOutput } = require('./utils/dynamic-mode');
58
+ const { promptForKey } = require('./utils/global-config');
59
+
60
+ // --- Renderers ---
61
+ const { renderResultsMarkdown } = require('./renderers/markdown');
62
+
63
+ // --- Logger ---
64
+ const Logger = require('./logger');
65
+
66
+ // Global reference — set in run()
67
+ let log = null;
68
+
69
+ // Graceful shutdown flag
70
+ let shuttingDown = false;
71
+
72
+ // ======================== PROJECT ROOT ========================
73
+ // PKG_ROOT = where the package is installed (for reading prompt.json, package.json)
74
+ // PROJECT_ROOT = where the user runs from (CWD) — logs, history, gemini_runs go here
75
+ const PKG_ROOT = path.resolve(__dirname, '..');
76
+ const PROJECT_ROOT = process.cwd();
77
+
78
+ // ======================== PHASE HELPERS ========================
79
+
80
+ /** Create a timing wrapper for phase profiling — also writes structured log spans */
81
+ function phaseTimer(phaseName) {
82
+ const t0 = Date.now();
83
+ if (log && log.phaseStart) log.phaseStart(phaseName);
84
+ return {
85
+ end(meta = {}) {
86
+ const ms = Date.now() - t0;
87
+ if (log && log.phaseEnd) log.phaseEnd({ ...meta, durationMs: ms });
88
+ if (log) log.step(`PHASE ${phaseName} completed in ${(ms / 1000).toFixed(1)}s`);
89
+ return ms;
90
+ },
91
+ };
92
+ }
93
+
94
+ // ======================== PHASE: INIT ========================
95
+
96
+ /**
97
+ * Parse CLI args, validate config, initialize logger, set up shutdown handlers.
98
+ * Returns the pipeline context object shared by all phases.
99
+ */
100
+ async function phaseInit() {
101
+ const { flags, positional } = parseArgs(process.argv.slice(2));
102
+
103
+ if (flags.help || flags.h) showHelp();
104
+ if (flags.version || flags.v) {
105
+ const pkg = JSON.parse(fs.readFileSync(path.join(PKG_ROOT, 'package.json'), 'utf8'));
106
+ process.stdout.write(`v${pkg.version}\n`);
107
+ throw Object.assign(new Error('VERSION_SHOWN'), { code: 'VERSION_SHOWN' });
108
+ }
109
+
110
+ const opts = {
111
+ skipUpload: !!flags['skip-upload'],
112
+ forceUpload: !!flags['force-upload'],
113
+ noStorageUrl: !!flags['no-storage-url'],
114
+ skipCompression: !!flags['skip-compression'],
115
+ skipGemini: !!flags['skip-gemini'],
116
+ resume: !!flags.resume,
117
+ reanalyze: !!flags.reanalyze,
118
+ dryRun: !!flags['dry-run'],
119
+ userName: flags.name || null,
120
+ parallel: parseInt(flags.parallel, 10) || MAX_PARALLEL_UPLOADS,
121
+ logLevel: flags['log-level'] || LOG_LEVEL,
122
+ outputDir: flags.output || null,
123
+ thinkingBudget: parseInt(flags['thinking-budget'], 10) || THINKING_BUDGET,
124
+ compilationThinkingBudget: parseInt(flags['compilation-thinking-budget'], 10) || COMPILATION_THINKING_BUDGET,
125
+ parallelAnalysis: parseInt(flags['parallel-analysis'], 10) || 2, // concurrent segment analysis
126
+ disableFocusedPass: !!flags['no-focused-pass'],
127
+ disableLearning: !!flags['no-learning'],
128
+ disableDiff: !!flags['no-diff'],
129
+ deepDive: !!flags['deep-dive'],
130
+ dynamic: !!flags.dynamic,
131
+ request: typeof flags.request === 'string' ? flags.request : null,
132
+ updateProgress: !!flags['update-progress'],
133
+ repoPath: flags.repo || null,
134
+ model: typeof flags.model === 'string' ? flags.model : null,
135
+ };
136
+
137
+ // --- Resolve folder: positional arg or interactive selection ---
138
+ let folderArg = positional[0];
139
+ if (!folderArg) {
140
+ folderArg = await selectFolder(PROJECT_ROOT);
141
+ if (!folderArg) {
142
+ showHelp();
143
+ }
144
+ }
145
+
146
+ const targetDir = path.resolve(folderArg);
147
+ if (!fs.existsSync(targetDir) || !fs.statSync(targetDir).isDirectory()) {
148
+ throw new Error(`"${targetDir}" is not a valid folder. Check the path and try again.`);
149
+ }
150
+
151
+ // --- Validate configuration (with first-run recovery) ---
152
+ let configCheck = validateConfig({
153
+ skipFirebase: opts.skipUpload,
154
+ skipGemini: opts.skipGemini,
155
+ });
156
+
157
+ // First-run experience: if GEMINI_API_KEY is missing, prompt interactively
158
+ if (!configCheck.valid && !opts.skipGemini && !config.GEMINI_API_KEY) {
159
+ const key = await promptForKey('GEMINI_API_KEY');
160
+ if (key) {
161
+ // Re-validate after user provided the key
162
+ configCheck = validateConfig({
163
+ skipFirebase: opts.skipUpload,
164
+ skipGemini: opts.skipGemini,
165
+ });
166
+ }
167
+ }
168
+
169
+ if (!configCheck.valid) {
170
+ console.error('\n Configuration errors:');
171
+ configCheck.errors.forEach(e => console.error(` ✗ ${e}`));
172
+ console.error('\n Fix these via:');
173
+ console.error(' • taskex config (save globally for all projects)');
174
+ console.error(' • .env file (project-specific config)');
175
+ console.error(' • --gemini-key <key> (one-time inline)\n');
176
+ throw new Error('Invalid configuration. See errors above.');
177
+ }
178
+
179
+ // --- Initialize logger ---
180
+ const logsDir = path.join(PROJECT_ROOT, 'logs');
181
+ log = new Logger(logsDir, path.basename(targetDir), { level: opts.logLevel });
182
+ log.patchConsole();
183
+ log.step(`START processing "${path.basename(targetDir)}"`);
184
+
185
+ // --- Learning Loop: load historical insights ---
186
+ let learningInsights = { hasData: false, budgetAdjustment: 0, compilationBudgetAdjustment: 0 };
187
+ if (!opts.disableLearning) {
188
+ const history = loadHistory(PROJECT_ROOT);
189
+ learningInsights = analyzeHistory(history);
190
+ if (learningInsights.hasData) {
191
+ printLearningInsights(learningInsights);
192
+ // Apply budget adjustments from learning
193
+ if (learningInsights.budgetAdjustment !== 0) {
194
+ opts.thinkingBudget = Math.max(8192, opts.thinkingBudget + learningInsights.budgetAdjustment);
195
+ log.step(`Learning: adjusted thinking budget → ${opts.thinkingBudget}`);
196
+ }
197
+ if (learningInsights.compilationBudgetAdjustment !== 0) {
198
+ opts.compilationThinkingBudget = Math.max(8192, opts.compilationThinkingBudget + learningInsights.compilationBudgetAdjustment);
199
+ log.step(`Learning: adjusted compilation budget → ${opts.compilationThinkingBudget}`);
200
+ }
201
+ }
202
+ }
203
+
204
+ // --- Graceful shutdown handler ---
205
+ const shutdown = (signal) => {
206
+ if (shuttingDown) return;
207
+ shuttingDown = true;
208
+ console.warn(`\n ⚠ Received ${signal} — shutting down gracefully...`);
209
+ log.step(`SHUTDOWN requested (${signal})`);
210
+ log.close();
211
+ };
212
+ process.on('SIGINT', () => shutdown('SIGINT'));
213
+ process.on('SIGTERM', () => shutdown('SIGTERM'));
214
+
215
+ // --- Model selection ---
216
+ if (opts.model) {
217
+ // CLI flag: --model <id> — validate and activate
218
+ setActiveModel(opts.model);
219
+ log.step(`Model set via flag: ${config.GEMINI_MODEL}`);
220
+ } else {
221
+ // Interactive model selection
222
+ const chosenModel = await selectModel(GEMINI_MODELS, config.GEMINI_MODEL);
223
+ setActiveModel(chosenModel);
224
+ log.step(`Model selected: ${config.GEMINI_MODEL}`);
225
+ }
226
+
227
+ // --- Initialize progress tracking ---
228
+ const progress = new Progress(targetDir);
229
+ const costTracker = new CostTracker(getActiveModelPricing());
230
+
231
+ return { opts, targetDir, progress, costTracker };
232
+ }
233
+
234
+ // ======================== PHASE: DISCOVER ========================
235
+
236
+ /**
237
+ * Discover videos and documents, resolve user name, show banner.
238
+ * Returns augmented ctx with videoFiles, allDocFiles, userName.
239
+ */
240
+ async function phaseDiscover(ctx) {
241
+ const timer = phaseTimer('discover');
242
+ const { opts, targetDir, progress } = ctx;
243
+
244
+ console.log('');
245
+ console.log('==============================================');
246
+ console.log(' Video Compress → Upload → AI Process');
247
+ console.log('==============================================');
248
+
249
+ // Show active flags
250
+ const activeFlags = [];
251
+ if (opts.skipUpload) activeFlags.push('skip-upload');
252
+ if (opts.forceUpload) activeFlags.push('force-upload');
253
+ if (opts.noStorageUrl) activeFlags.push('no-storage-url');
254
+ if (opts.skipCompression) activeFlags.push('skip-compression');
255
+ if (opts.skipGemini) activeFlags.push('skip-gemini');
256
+ if (opts.resume) activeFlags.push('resume');
257
+ if (opts.reanalyze) activeFlags.push('reanalyze');
258
+ if (opts.dryRun) activeFlags.push('dry-run');
259
+ if (activeFlags.length > 0) {
260
+ console.log(` Flags: ${activeFlags.join(', ')}`);
261
+ }
262
+ console.log('');
263
+
264
+ // --- Resume check ---
265
+ if (opts.resume && progress.hasResumableState()) {
266
+ progress.printResumeSummary();
267
+ console.log('');
268
+ }
269
+
270
+ // --- Ask for user's name (or use --name flag) ---
271
+ let userName = opts.userName;
272
+ if (!userName) {
273
+ if (opts.resume && progress.state.userName) {
274
+ userName = progress.state.userName;
275
+ console.log(` Using saved name: ${userName}`);
276
+ } else {
277
+ userName = await promptUserText(' Your name (for task assignment detection): ');
278
+ }
279
+ }
280
+ if (!userName) {
281
+ throw new Error('Name is required for personalized analysis. Use --name "Your Name" or enter it when prompted.');
282
+ }
283
+ log.step(`User identified as: ${userName}`);
284
+
285
+ // --- Find video files ---
286
+ let videoFiles = fs.readdirSync(targetDir)
287
+ .filter(f => {
288
+ const stat = fs.statSync(path.join(targetDir, f));
289
+ return stat.isFile() && VIDEO_EXTS.includes(path.extname(f).toLowerCase());
290
+ })
291
+ .map(f => path.join(targetDir, f));
292
+
293
+ if (videoFiles.length === 0) {
294
+ throw new Error('No video files found (mp4/mkv/avi/mov/webm). Check that the folder contains video files.');
295
+ }
296
+
297
+ // --- Find ALL document files recursively ---
298
+ const allDocFiles = findDocsRecursive(targetDir, DOC_EXTS);
299
+
300
+ console.log('');
301
+ console.log(` User : ${userName}`);
302
+ console.log(` Source : ${targetDir}`);
303
+ console.log(` Videos : ${videoFiles.length}`);
304
+ console.log(` Docs : ${allDocFiles.length}`);
305
+ console.log(` Speed : ${SPEED}x`);
306
+ console.log(` Segments: < 5 min each (${SEG_TIME}s)`);
307
+ console.log(` Model : ${config.GEMINI_MODEL}`);
308
+ console.log(` Parallel: ${opts.parallel} concurrent uploads`);
309
+ console.log(` Thinking: ${opts.thinkingBudget} tokens (analysis) / ${opts.compilationThinkingBudget} tokens (compilation)`);
310
+ console.log('');
311
+
312
+ // Save progress init
313
+ progress.init(path.basename(targetDir), userName);
314
+
315
+ console.log(` Found ${videoFiles.length} video file(s):`);
316
+ videoFiles.forEach((f, i) => console.log(` [${i + 1}] ${path.basename(f)}`));
317
+
318
+ // If multiple video files found, let user select which to process
319
+ if (videoFiles.length > 1) {
320
+ console.log('');
321
+ const selectionInput = await promptUserText(` Which files to process? (comma-separated numbers, or "all", default: all): `);
322
+ const trimmed = (selectionInput || '').trim().toLowerCase();
323
+ if (trimmed && trimmed !== 'all') {
324
+ const indices = trimmed.split(',').map(s => parseInt(s.trim(), 10) - 1).filter(n => !isNaN(n) && n >= 0 && n < videoFiles.length);
325
+ if (indices.length > 0) {
326
+ videoFiles = indices.map(i => videoFiles[i]);
327
+ console.log(` → Processing ${videoFiles.length} selected file(s):`);
328
+ videoFiles.forEach(f => console.log(` - ${path.basename(f)}`));
329
+ } else {
330
+ console.log(' → Invalid selection, processing all files');
331
+ }
332
+ } else {
333
+ console.log(' → Processing all video files');
334
+ }
335
+ }
336
+ log.step(`Found ${videoFiles.length} video(s): ${videoFiles.map(f => path.basename(f)).join(', ')}`);
337
+ console.log('');
338
+
339
+ if (allDocFiles.length > 0) {
340
+ console.log(` Found ${allDocFiles.length} document(s) for context (recursive):`);
341
+ allDocFiles.forEach(f => console.log(` - ${f.relPath}`));
342
+ console.log('');
343
+ }
344
+
345
+ timer.end();
346
+ return { ...ctx, videoFiles, allDocFiles, userName };
347
+ }
348
+
349
+ // ======================== PHASE: SERVICES ========================
350
+
351
+ /**
352
+ * Initialize Firebase and Gemini services, prepare context documents.
353
+ * Returns augmented ctx with storage, firebaseReady, ai, contextDocs.
354
+ */
355
+ async function phaseServices(ctx) {
356
+ const timer = phaseTimer('services');
357
+ const { opts, allDocFiles } = ctx;
358
+ const callName = path.basename(ctx.targetDir);
359
+
360
+ console.log('Initializing services...');
361
+
362
+ let storage = null;
363
+ let firebaseReady = false;
364
+ if (!opts.skipUpload && !opts.dryRun) {
365
+ const fb = await initFirebase();
366
+ storage = fb.storage;
367
+ firebaseReady = fb.authenticated;
368
+ } else if (opts.skipUpload) {
369
+ console.log(' Firebase: skipped (--skip-upload)');
370
+ } else {
371
+ console.log(' Firebase: skipped (--dry-run)');
372
+ }
373
+
374
+ let ai = null;
375
+ if (!opts.skipGemini && !opts.dryRun) {
376
+ ai = await initGemini();
377
+ console.log(' Gemini AI: ready');
378
+ } else if (opts.skipGemini) {
379
+ console.log(' Gemini AI: skipped (--skip-gemini)');
380
+ } else {
381
+ console.log(' Gemini AI: skipped (--dry-run)');
382
+ }
383
+
384
+ log.step(`Services: Firebase auth=${firebaseReady}, Gemini=${ai ? 'ready' : 'skipped'}`);
385
+
386
+ // --- Prepare documents for Gemini ---
387
+ let contextDocs = [];
388
+ if (ai) {
389
+ contextDocs = await prepareDocsForGemini(ai, allDocFiles);
390
+ } else if (allDocFiles.length > 0) {
391
+ console.log(` ⚠ Skipping Gemini doc preparation (AI not active)`);
392
+ contextDocs = allDocFiles
393
+ .filter(({ absPath }) => ['.txt', '.md', '.vtt', '.srt', '.csv', '.json', '.xml', '.html']
394
+ .includes(path.extname(absPath).toLowerCase()))
395
+ .map(({ absPath, relPath }) => ({
396
+ type: 'inlineText',
397
+ fileName: relPath,
398
+ content: fs.readFileSync(absPath, 'utf8'),
399
+ }));
400
+ }
401
+
402
+ // --- Upload documents to Firebase Storage for archival ---
403
+ const docStorageUrls = {};
404
+ if (firebaseReady && !opts.skipUpload) {
405
+ await parallelMap(allDocFiles, async ({ absPath: docPath, relPath }) => {
406
+ if (shuttingDown) return;
407
+ const docStoragePath = `calls/${callName}/documents/${relPath}`;
408
+ try {
409
+ if (!opts.forceUpload) {
410
+ const existingUrl = await storageExists(storage, docStoragePath);
411
+ if (existingUrl) {
412
+ docStorageUrls[relPath] = existingUrl;
413
+ console.log(` ✓ Document already in Storage → ${docStoragePath}`);
414
+ return;
415
+ }
416
+ }
417
+ const url = await uploadToStorage(storage, docPath, docStoragePath);
418
+ docStorageUrls[relPath] = url;
419
+ console.log(` ✓ Document ${opts.forceUpload ? '(re-uploaded)' : '→'} ${docStoragePath}`);
420
+ } catch (err) {
421
+ console.warn(` ⚠ Document upload failed (${relPath}): ${err.message}`);
422
+ }
423
+ }, opts.parallel);
424
+ } else if (opts.skipUpload) {
425
+ console.log(' ⚠ Skipping document uploads (--skip-upload)');
426
+ } else {
427
+ console.log(' ⚠ Skipping document uploads (Firebase auth not configured)');
428
+ }
429
+ console.log('');
430
+
431
+ timer.end();
432
+ return { ...ctx, storage, firebaseReady, ai, contextDocs, docStorageUrls, callName };
433
+ }
434
+
435
+ // ======================== PHASE: PROCESS VIDEO ========================
436
+
437
+ /**
438
+ * Process a single video: compress → upload segments → analyze with Gemini.
439
+ * Returns { fileResult, segmentAnalyses }.
440
+ */
441
+ async function phaseProcessVideo(ctx, videoPath, videoIndex) {
442
+ const {
443
+ opts, callName, storage, firebaseReady, ai, contextDocs,
444
+ progress, costTracker, userName,
445
+ } = ctx;
446
+
447
+ const baseName = path.basename(videoPath, path.extname(videoPath));
448
+ const compressedDir = path.join(ctx.targetDir, 'compressed');
449
+
450
+ console.log('──────────────────────────────────────────────');
451
+ console.log(`[${videoIndex + 1}/${ctx.videoFiles.length}] ${path.basename(videoPath)}`);
452
+ console.log('──────────────────────────────────────────────');
453
+
454
+ // ---- Compress & Segment ----
455
+ log.step(`Compressing "${path.basename(videoPath)}"`);
456
+ const segmentDir = path.join(compressedDir, baseName);
457
+ let segments;
458
+ const existingSegments = fs.existsSync(segmentDir)
459
+ ? fs.readdirSync(segmentDir).filter(f => f.startsWith('segment_') && f.endsWith('.mp4')).sort()
460
+ : [];
461
+
462
+ if (opts.skipCompression || opts.dryRun) {
463
+ if (existingSegments.length > 0) {
464
+ segments = existingSegments.map(f => path.join(segmentDir, f));
465
+ console.log(` ✓ Using ${segments.length} existing segment(s) (${opts.dryRun ? '--dry-run' : '--skip-compression'})`);
466
+ } else {
467
+ console.warn(` ⚠ No existing segments found — cannot skip compression for "${baseName}"`);
468
+ if (opts.dryRun) {
469
+ console.log(` [DRY-RUN] Would compress "${path.basename(videoPath)}" into segments`);
470
+ return { fileResult: null, segmentAnalyses: [] };
471
+ }
472
+ segments = compressAndSegment(videoPath, segmentDir);
473
+ log.step(`Compressed → ${segments.length} segment(s)`);
474
+ }
475
+ } else if (existingSegments.length > 0) {
476
+ segments = existingSegments.map(f => path.join(segmentDir, f));
477
+ log.step(`SKIP compression — ${segments.length} segment(s) already on disk`);
478
+ console.log(` ✓ Skipped compression — ${segments.length} segment(s) already exist`);
479
+ } else {
480
+ segments = compressAndSegment(videoPath, segmentDir);
481
+ log.step(`Compressed → ${segments.length} segment(s)`);
482
+ console.log(` → ${segments.length} segment(s) created`);
483
+ }
484
+
485
+ progress.markCompressed(baseName, segments.length);
486
+ const origSize = fs.statSync(videoPath).size;
487
+ log.step(`original=${(origSize / 1048576).toFixed(2)}MB (${fmtBytes(origSize)}) | ${segments.length} segment(s)`);
488
+ console.log('');
489
+
490
+ const fileResult = {
491
+ originalFile: path.basename(videoPath),
492
+ originalSizeMB: (origSize / 1048576).toFixed(2),
493
+ segmentCount: segments.length,
494
+ segments: [],
495
+ };
496
+
497
+ // ---- Pre-validate all segments before sending to Gemini ----
498
+ if (!opts.skipGemini && !opts.dryRun) {
499
+ const invalidSegs = segments.filter(s => !verifySegment(s));
500
+ if (invalidSegs.length > 0) {
501
+ console.warn(` ⚠ Pre-validation: ${invalidSegs.length}/${segments.length} segment(s) are corrupt:`);
502
+ invalidSegs.forEach(s => console.warn(` ✗ ${path.basename(s)}`));
503
+ console.warn(` → Corrupt segments will be skipped during analysis.`);
504
+ console.warn(` → Delete "${segmentDir}" and re-run to re-compress.`);
505
+ log.warn(`Pre-validation: ${invalidSegs.length} corrupt segments in ${baseName}`);
506
+ }
507
+ }
508
+
509
+ // ---- Upload all segments to Firebase (parallel) ----
510
+ progress.setPhase('upload');
511
+ const segmentMeta = [];
512
+
513
+ if (!opts.skipUpload && firebaseReady && !opts.dryRun) {
514
+ const metaList = segments.map((segPath) => {
515
+ const segName = path.basename(segPath);
516
+ const storagePath = `calls/${callName}/segments/${baseName}/${segName}`;
517
+ const durStr = probeFormat(segPath, 'duration');
518
+ const durSec = durStr ? parseFloat(durStr) : null;
519
+ const sizeMB = (fs.statSync(segPath).size / 1048576).toFixed(2);
520
+ return { segPath, segName, storagePath, durSec, sizeMB, storageUrl: null };
521
+ });
522
+
523
+ await parallelMap(metaList, async (meta, j) => {
524
+ if (shuttingDown) return;
525
+ console.log(` ── Segment ${j + 1}/${segments.length}: ${meta.segName} (upload) ──`);
526
+ console.log(` Duration: ${fmtDuration(meta.durSec)} | Size: ${meta.sizeMB} MB`);
527
+
528
+ const resumedUrl = progress.getUploadUrl(meta.storagePath);
529
+ if (resumedUrl && opts.resume) {
530
+ meta.storageUrl = resumedUrl;
531
+ console.log(` ✓ Upload resumed from checkpoint`);
532
+ return;
533
+ }
534
+
535
+ try {
536
+ if (!opts.forceUpload) {
537
+ const existingUrl = await storageExists(storage, meta.storagePath);
538
+ if (existingUrl) {
539
+ meta.storageUrl = existingUrl;
540
+ log.step(`SKIP upload — ${meta.segName} already in Storage`);
541
+ console.log(` ✓ Already in Storage → ${meta.storagePath}`);
542
+ progress.markUploaded(meta.storagePath, meta.storageUrl);
543
+ return;
544
+ }
545
+ }
546
+ console.log(` ${opts.forceUpload ? 'Re-uploading' : 'Uploading'} to Firebase Storage...`);
547
+ meta.storageUrl = await uploadToStorage(storage, meta.segPath, meta.storagePath);
548
+ console.log(` ✓ ${opts.forceUpload ? 'Re-uploaded' : 'Uploaded'} → ${meta.storagePath}`);
549
+ log.step(`Upload OK: ${meta.segName} → ${meta.storagePath}`);
550
+ progress.markUploaded(meta.storagePath, meta.storageUrl);
551
+ } catch (err) {
552
+ console.error(` ✗ Firebase upload failed: ${err.message}`);
553
+ log.error(`Upload FAIL: ${meta.segName} — ${err.message}`);
554
+ }
555
+ }, opts.parallel);
556
+
557
+ segmentMeta.push(...metaList);
558
+ } else {
559
+ for (let j = 0; j < segments.length; j++) {
560
+ const segPath = segments[j];
561
+ const segName = path.basename(segPath);
562
+ const storagePath = `calls/${callName}/segments/${baseName}/${segName}`;
563
+ const durStr = probeFormat(segPath, 'duration');
564
+ const durSec = durStr ? parseFloat(durStr) : null;
565
+ const sizeMB = (fs.statSync(segPath).size / 1048576).toFixed(2);
566
+
567
+ console.log(` ── Segment ${j + 1}/${segments.length}: ${segName} ──`);
568
+ console.log(` Duration: ${fmtDuration(durSec)} | Size: ${sizeMB} MB`);
569
+ if (opts.skipUpload) console.log(` ⚠ Upload skipped (--skip-upload)`);
570
+
571
+ segmentMeta.push({ segPath, segName, storagePath, storageUrl: null, durSec, sizeMB });
572
+ }
573
+ }
574
+
575
+ // Calculate cumulative time offsets for VTT time-slicing
576
+ let cumulativeTimeSec = 0;
577
+ for (const meta of segmentMeta) {
578
+ meta.startTimeSec = cumulativeTimeSec;
579
+ meta.endTimeSec = cumulativeTimeSec + (meta.durSec || 0) * SPEED;
580
+ cumulativeTimeSec = meta.endTimeSec;
581
+ }
582
+
583
+ console.log('');
584
+ log.step(`All ${segments.length} segment(s) processed. Starting Gemini analysis...`);
585
+ console.log('');
586
+
587
+ // ---- Analyze all segments with Gemini ----
588
+ progress.setPhase('analyze');
589
+ const geminiRunsDir = path.join(PROJECT_ROOT, 'gemini_runs', callName, baseName);
590
+ fs.mkdirSync(geminiRunsDir, { recursive: true });
591
+
592
+ let forceReanalyze = opts.reanalyze;
593
+ if (!forceReanalyze && !opts.skipGemini && !opts.dryRun) {
594
+ const allExistingRuns = fs.readdirSync(geminiRunsDir).filter(f => f.endsWith('.json'));
595
+ if (allExistingRuns.length > 0) {
596
+ console.log(` Found ${allExistingRuns.length} existing Gemini run file(s) in:`);
597
+ console.log(` ${geminiRunsDir}`);
598
+ console.log('');
599
+ if (!opts.resume) {
600
+ forceReanalyze = await promptUser(' Re-analyze all segments? (y/n, default: n): ');
601
+ }
602
+ if (forceReanalyze) {
603
+ console.log(' → Will re-analyze all segments (previous runs preserved with timestamps)');
604
+ log.step('User chose to re-analyze all segments');
605
+ } else {
606
+ console.log(' → Using cached results where available');
607
+ }
608
+ console.log('');
609
+ }
610
+ }
611
+
612
+ const previousAnalyses = [];
613
+ const segmentAnalyses = [];
614
+ const segmentReports = []; // Quality reports for health dashboard
615
+
616
+ for (let j = 0; j < segments.length; j++) {
617
+ if (shuttingDown) break;
618
+
619
+ const { segPath, segName, storagePath, storageUrl, durSec, sizeMB } = segmentMeta[j];
620
+
621
+ console.log(` ── Segment ${j + 1}/${segments.length}: ${segName} (AI) ──`);
622
+
623
+ if (opts.skipGemini) {
624
+ console.log(` ⚠ Skipped (--skip-gemini)`);
625
+ fileResult.segments.push({
626
+ segmentFile: segName, segmentIndex: j,
627
+ storagePath, storageUrl,
628
+ duration: fmtDuration(durSec), durationSeconds: durSec,
629
+ fileSizeMB: parseFloat(sizeMB), geminiRunFile: null, analysis: null,
630
+ });
631
+ console.log('');
632
+ continue;
633
+ }
634
+
635
+ if (opts.dryRun) {
636
+ console.log(` [DRY-RUN] Would analyze with ${config.GEMINI_MODEL}`);
637
+ fileResult.segments.push({
638
+ segmentFile: segName, segmentIndex: j,
639
+ storagePath, storageUrl,
640
+ duration: fmtDuration(durSec), durationSeconds: durSec,
641
+ fileSizeMB: parseFloat(sizeMB), geminiRunFile: null, analysis: null,
642
+ });
643
+ console.log('');
644
+ continue;
645
+ }
646
+
647
+ const runPrefix = `segment_${String(j).padStart(2, '0')}_`;
648
+ const existingRuns = fs.readdirSync(geminiRunsDir)
649
+ .filter(f => f.startsWith(runPrefix) && f.endsWith('.json'))
650
+ .sort();
651
+ const latestRunFile = existingRuns.length > 0 ? existingRuns[existingRuns.length - 1] : null;
652
+ const latestRunPath = latestRunFile ? path.join(geminiRunsDir, latestRunFile) : null;
653
+
654
+ let analysis = null;
655
+ let geminiRunFile = null;
656
+
657
+ // Skip if valid run exists and user didn't choose to re-analyze
658
+ if (!forceReanalyze && latestRunPath && fs.existsSync(latestRunPath)) {
659
+ try {
660
+ const existingRun = JSON.parse(fs.readFileSync(latestRunPath, 'utf8'));
661
+ geminiRunFile = path.relative(PROJECT_ROOT, path.join(geminiRunsDir, latestRunFile));
662
+ analysis = existingRun.output.parsed || { rawResponse: existingRun.output.raw };
663
+ analysis._geminiMeta = {
664
+ model: existingRun.run.model,
665
+ processedAt: existingRun.run.timestamp,
666
+ durationMs: existingRun.run.durationMs,
667
+ tokenUsage: existingRun.run.tokenUsage || null,
668
+ runFile: geminiRunFile,
669
+ parseSuccess: existingRun.output.parseSuccess,
670
+ skipped: true,
671
+ };
672
+ previousAnalyses.push(analysis);
673
+ // Track cached run costs too
674
+ if (existingRun.run.tokenUsage) {
675
+ costTracker.addSegment(segName, existingRun.run.tokenUsage, existingRun.run.durationMs, true);
676
+ }
677
+
678
+ // Quality gate on cached results
679
+ const cachedQuality = assessQuality(analysis, {
680
+ parseSuccess: existingRun.output.parseSuccess,
681
+ rawLength: (existingRun.output.raw || '').length,
682
+ });
683
+ segmentReports.push({ segmentName: segName, qualityReport: cachedQuality, retried: false, retryImproved: false });
684
+ console.log(formatQualityLine(cachedQuality, segName));
685
+
686
+ const ticketCount = analysis.tickets ? analysis.tickets.length : 0;
687
+ log.step(`SKIP Gemini — ${segName} already analyzed (${ticketCount} ticket(s), quality: ${cachedQuality.score}/100)`);
688
+ console.log(` ✓ Already analyzed — loaded from ${latestRunFile}`);
689
+ } catch (err) {
690
+ console.warn(` ⚠ Existing run file corrupt, re-analyzing: ${err.message}`);
691
+ analysis = null;
692
+ }
693
+ }
694
+
695
+ if (!analysis) {
696
+ // Pre-flight: verify segment is a valid MP4
697
+ if (!verifySegment(segPath)) {
698
+ console.error(` ✗ Segment "${segName}" is corrupt (missing moov atom / unreadable).`);
699
+ console.error(` → Delete "${path.dirname(segPath)}" and re-run to re-compress.`);
700
+ log.error(`Segment corrupt: ${segName} — skipping Gemini`);
701
+ analysis = { error: `Segment file corrupt: ${segName}` };
702
+ fileResult.segments.push({
703
+ segmentFile: segName, segmentIndex: j,
704
+ storagePath, storageUrl,
705
+ duration: fmtDuration(durSec), durationSeconds: durSec,
706
+ fileSizeMB: parseFloat(sizeMB), geminiRunFile: null, analysis,
707
+ });
708
+ console.log('');
709
+ continue;
710
+ }
711
+
712
+ // === ADAPTIVE THINKING BUDGET ===
713
+ // Find VTT content for this segment for complexity analysis
714
+ let vttContentForAnalysis = '';
715
+ for (const doc of contextDocs) {
716
+ if (doc.type === 'inlineText' && (doc.fileName.endsWith('.vtt') || doc.fileName.endsWith('.srt'))) {
717
+ if (segmentMeta[j].startTimeSec != null && segmentMeta[j].endTimeSec != null) {
718
+ vttContentForAnalysis = sliceVttForSegment(doc.content, segmentMeta[j].startTimeSec, segmentMeta[j].endTimeSec);
719
+ } else {
720
+ vttContentForAnalysis = doc.content;
721
+ }
722
+ break;
723
+ }
724
+ }
725
+
726
+ const budgetResult = calculateThinkingBudget({
727
+ segmentIndex: j,
728
+ totalSegments: segments.length,
729
+ previousAnalyses,
730
+ contextDocs,
731
+ vttContent: vttContentForAnalysis,
732
+ baseBudget: opts.thinkingBudget,
733
+ });
734
+ const adaptiveBudget = budgetResult.budget;
735
+ console.log(` Thinking budget: ${adaptiveBudget.toLocaleString()} tokens (${budgetResult.reason})`);
736
+ if (budgetResult.complexity.complexityScore > 0) {
737
+ log.debug(`Segment ${j} complexity: ${budgetResult.complexity.complexityScore}/100 — words:${budgetResult.complexity.wordCount} speakers:${budgetResult.complexity.speakerCount} tech:${budgetResult.complexity.hasTechnicalTerms}`);
738
+ }
739
+
740
+ // === SMART BOUNDARY CONTEXT ===
741
+ const prevAnalysis = previousAnalyses.length > 0 ? previousAnalyses[previousAnalyses.length - 1] : null;
742
+ const boundaryCtx = detectBoundaryContext(
743
+ vttContentForAnalysis,
744
+ segmentMeta[j].startTimeSec || 0,
745
+ segmentMeta[j].endTimeSec || 0,
746
+ j,
747
+ prevAnalysis
748
+ );
749
+
750
+ // === FIRST ATTEMPT ===
751
+ let retried = false;
752
+ let retryImproved = false;
753
+ let geminiFileUri = null; // Gemini File API URI — reused for retry + focused pass
754
+ let geminiFileMime = null;
755
+ let geminiFileName = null; // Gemini resource name — needed for cleanup
756
+
757
+ try {
758
+ const geminiRun = await processWithGemini(
759
+ ai, segPath,
760
+ `${callName}_${baseName}_seg${String(j).padStart(2, '0')}`,
761
+ contextDocs,
762
+ previousAnalyses,
763
+ userName,
764
+ PKG_ROOT,
765
+ {
766
+ segmentIndex: j,
767
+ totalSegments: segments.length,
768
+ segmentStartSec: segmentMeta[j].startTimeSec,
769
+ segmentEndSec: segmentMeta[j].endTimeSec,
770
+ thinkingBudget: adaptiveBudget,
771
+ boundaryContext: boundaryCtx,
772
+ storageDownloadUrl: opts.noStorageUrl ? null : (storageUrl || null),
773
+ }
774
+ );
775
+
776
+ const ts = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19);
777
+ const runFileName = `segment_${String(j).padStart(2, '0')}_${ts}.json`;
778
+ const runFilePath = path.join(geminiRunsDir, runFileName);
779
+ fs.writeFileSync(runFilePath, JSON.stringify(geminiRun, null, 2), 'utf8');
780
+ geminiRunFile = path.relative(PROJECT_ROOT, runFilePath);
781
+ log.debug(`Gemini model run saved → ${runFilePath}`);
782
+
783
+ // Capture Gemini File API URI for reuse in retry / focused pass
784
+ // When external URL was used, fileUri IS the storage URL — reuse it the same way
785
+ geminiFileUri = geminiRun.input.videoFile.fileUri;
786
+ geminiFileMime = geminiRun.input.videoFile.mimeType;
787
+ geminiFileName = geminiRun.input.videoFile.geminiFileName || null;
788
+ const usedExternalUrl = geminiRun.input.videoFile.usedExternalUrl || false;
789
+
790
+ analysis = geminiRun.output.parsed || { rawResponse: geminiRun.output.raw };
791
+ analysis._geminiMeta = {
792
+ model: geminiRun.run.model,
793
+ processedAt: geminiRun.run.timestamp,
794
+ durationMs: geminiRun.run.durationMs,
795
+ tokenUsage: geminiRun.run.tokenUsage || null,
796
+ runFile: geminiRunFile,
797
+ parseSuccess: geminiRun.output.parseSuccess,
798
+ };
799
+
800
+ // Track cost
801
+ costTracker.addSegment(segName, geminiRun.run.tokenUsage, geminiRun.run.durationMs, false);
802
+
803
+ // === QUALITY GATE ===
804
+ const qualityReport = assessQuality(analysis, {
805
+ parseSuccess: geminiRun.output.parseSuccess,
806
+ rawLength: (geminiRun.output.raw || '').length,
807
+ segmentIndex: j,
808
+ totalSegments: segments.length,
809
+ });
810
+ console.log(formatQualityLine(qualityReport, segName));
811
+
812
+ // === AUTO-RETRY on FAIL ===
813
+ if (qualityReport.shouldRetry && !shuttingDown) {
814
+ console.log(` ↻ Quality below threshold (${qualityReport.score}/${THRESHOLDS.PASS}) — retrying with enhanced hints...`);
815
+ log.step(`Quality gate FAIL for ${segName} (score: ${qualityReport.score}) — retrying`);
816
+ retried = true;
817
+
818
+ // Boost thinking budget for retry (+25%)
819
+ const retryBudget = Math.min(32768, Math.round(adaptiveBudget * 1.25));
820
+
821
+ try {
822
+ const retryRun = await processWithGemini(
823
+ ai, segPath,
824
+ `${callName}_${baseName}_seg${String(j).padStart(2, '0')}_retry`,
825
+ contextDocs,
826
+ previousAnalyses,
827
+ userName,
828
+ PKG_ROOT,
829
+ {
830
+ segmentIndex: j,
831
+ totalSegments: segments.length,
832
+ segmentStartSec: segmentMeta[j].startTimeSec,
833
+ segmentEndSec: segmentMeta[j].endTimeSec,
834
+ thinkingBudget: retryBudget,
835
+ boundaryContext: boundaryCtx,
836
+ retryHints: qualityReport.retryHints,
837
+ existingFileUri: geminiFileUri,
838
+ existingFileMime: geminiFileMime,
839
+ existingGeminiFileName: geminiFileName,
840
+ }
841
+ );
842
+
843
+ const retryTs = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19);
844
+ const retryRunFileName = `segment_${String(j).padStart(2, '0')}_retry_${retryTs}.json`;
845
+ const retryRunFilePath = path.join(geminiRunsDir, retryRunFileName);
846
+ fs.writeFileSync(retryRunFilePath, JSON.stringify(retryRun, null, 2), 'utf8');
847
+
848
+ const retryAnalysis = retryRun.output.parsed || { rawResponse: retryRun.output.raw };
849
+ const retryQuality = assessQuality(retryAnalysis, {
850
+ parseSuccess: retryRun.output.parseSuccess,
851
+ rawLength: (retryRun.output.raw || '').length,
852
+ segmentIndex: j,
853
+ totalSegments: segments.length,
854
+ });
855
+
856
+ // Track retry cost
857
+ costTracker.addSegment(`${segName}_retry`, retryRun.run.tokenUsage, retryRun.run.durationMs, false);
858
+
859
+ // Use retry result if better
860
+ if (retryQuality.score > qualityReport.score) {
861
+ retryImproved = true;
862
+ analysis = retryAnalysis;
863
+ analysis._geminiMeta = {
864
+ model: retryRun.run.model,
865
+ processedAt: retryRun.run.timestamp,
866
+ durationMs: retryRun.run.durationMs,
867
+ tokenUsage: retryRun.run.tokenUsage || null,
868
+ runFile: path.relative(PROJECT_ROOT, retryRunFilePath),
869
+ parseSuccess: retryRun.output.parseSuccess,
870
+ retryOf: geminiRunFile,
871
+ };
872
+ geminiRunFile = path.relative(PROJECT_ROOT, retryRunFilePath);
873
+ console.log(` ✓ Retry improved quality: ${qualityReport.score} → ${retryQuality.score}`);
874
+ console.log(formatQualityLine(retryQuality, segName));
875
+ log.step(`Retry improved ${segName}: ${qualityReport.score} → ${retryQuality.score}`);
876
+ segmentReports.push({ segmentName: segName, qualityReport: retryQuality, retried: true, retryImproved: true });
877
+ } else {
878
+ console.log(` ⚠ Retry did not improve (${qualityReport.score} → ${retryQuality.score}), keeping original`);
879
+ segmentReports.push({ segmentName: segName, qualityReport, retried: true, retryImproved: false });
880
+ }
881
+ } catch (retryErr) {
882
+ console.warn(` ⚠ Retry failed: ${retryErr.message} — keeping original result`);
883
+ segmentReports.push({ segmentName: segName, qualityReport, retried: true, retryImproved: false });
884
+ }
885
+ } else {
886
+ segmentReports.push({ segmentName: segName, qualityReport, retried: false, retryImproved: false });
887
+ }
888
+
889
+ // === FOCUSED RE-ANALYSIS (v6) ===
890
+ if (!opts.disableFocusedPass && ai && !shuttingDown) {
891
+ const lastReport = segmentReports[segmentReports.length - 1];
892
+ const weakness = identifyWeaknesses(lastReport.qualityReport, analysis);
893
+ if (weakness.shouldReanalyze) {
894
+ console.log(` 🔍 Focused re-analysis: ${weakness.weakAreas.length} weak area(s) → ${weakness.weakAreas.join(', ')}`);
895
+ log.step(`Focused re-analysis for ${segName}: ${weakness.weakAreas.join(', ')}`);
896
+ try {
897
+ const focusedResult = await runFocusedPass(ai, analysis, weakness.focusPrompt, {
898
+ videoUri: geminiFileUri || null,
899
+ segmentIndex: j,
900
+ totalSegments: segments.length,
901
+ thinkingBudget: 12288,
902
+ });
903
+ if (focusedResult) {
904
+ analysis = mergeFocusedResults(analysis, focusedResult);
905
+ if (focusedResult._focusedPassMeta) {
906
+ costTracker.addSegment(`${segName}_focused`, focusedResult._focusedPassMeta, 0, false);
907
+ }
908
+ console.log(` ✓ Focused pass enhanced ${weakness.weakAreas.length} area(s)`);
909
+ log.step(`Focused re-analysis merged for ${segName}`);
910
+ } else {
911
+ console.log(` ℹ Focused pass found no additional items`);
912
+ }
913
+ } catch (focErr) {
914
+ console.warn(` ⚠ Focused re-analysis error: ${focErr.message}`);
915
+ log.warn(`Focused re-analysis failed for ${segName}: ${focErr.message}`);
916
+ }
917
+ }
918
+ }
919
+
920
+ // === CONFIDENCE STATS (v6) ===
921
+ const confStats = getConfidenceStats(analysis);
922
+ if (confStats.total > 0) {
923
+ console.log(` Confidence: ${confStats.high}H/${confStats.medium}M/${confStats.low}L/${confStats.missing}? (${confStats.coverage}% coverage)`);
924
+ if (log.metric) log.metric('confidence_coverage', confStats.coverage);
925
+ }
926
+
927
+ previousAnalyses.push(analysis);
928
+
929
+ // === CLEANUP: delete Gemini File API upload after all passes ===
930
+ // Skip cleanup when external URL was used — no Gemini file was uploaded
931
+ if (geminiFileName && ai && !usedExternalUrl) {
932
+ cleanupGeminiFiles(ai, geminiFileName).catch(() => {});
933
+ }
934
+
935
+ const ticketCount = analysis.tickets ? analysis.tickets.length : 0;
936
+ const tok = geminiRun.run.tokenUsage || {};
937
+ const sourceLabel = usedExternalUrl ? 'via Storage URL' : (geminiFileName ? 'via File API' : 'direct');
938
+ log.step(`Gemini OK: ${segName} (${sourceLabel}) — ${ticketCount} ticket(s) | ${geminiRun.run.durationMs}ms | tokens: ${tok.inputTokens || 0}in/${tok.outputTokens || 0}out/${tok.thoughtTokens || 0}think/${tok.totalTokens || 0}total`);
939
+ log.debug(`Gemini parsed: ${JSON.stringify(analysis).substring(0, 500)}`);
940
+ console.log(` ✓ AI analysis complete (${(geminiRun.run.durationMs / 1000).toFixed(1)}s)${retried ? (retryImproved ? ' [retry improved]' : ' [retried]') : ''}`);
941
+ progress.markAnalyzed(`${baseName}_seg${j}`, geminiRunFile);
942
+ } catch (err) {
943
+ console.error(` ✗ Gemini failed: ${err.message}`);
944
+ log.error(`Gemini FAIL: ${segName} — ${err.message}`);
945
+ analysis = { error: err.message };
946
+ segmentReports.push({ segmentName: segName, qualityReport: { grade: 'FAIL', score: 0, issues: [err.message] }, retried: false, retryImproved: false });
947
+ }
948
+ }
949
+
950
+ fileResult.segments.push({
951
+ segmentFile: segName,
952
+ segmentIndex: j,
953
+ storagePath,
954
+ storageUrl,
955
+ duration: fmtDuration(durSec),
956
+ durationSeconds: durSec,
957
+ fileSizeMB: parseFloat(sizeMB),
958
+ geminiRunFile,
959
+ analysis,
960
+ });
961
+
962
+ // Collect for final compilation (skip errored)
963
+ if (analysis && !analysis.error) {
964
+ const segNum = j + 1;
965
+ const tagSeg = (arr) => (arr || []).forEach(item => { item.source_segment = segNum; });
966
+ tagSeg(analysis.action_items);
967
+ tagSeg(analysis.change_requests);
968
+ tagSeg(analysis.blockers);
969
+ tagSeg(analysis.scope_changes);
970
+ tagSeg(analysis.file_references);
971
+ if (analysis.tickets) {
972
+ analysis.tickets.forEach(t => {
973
+ t.source_segment = segNum;
974
+ tagSeg(t.comments);
975
+ tagSeg(t.code_changes);
976
+ tagSeg(t.video_segments);
977
+ });
978
+ }
979
+ if (analysis.your_tasks) {
980
+ tagSeg(analysis.your_tasks.tasks_todo);
981
+ tagSeg(analysis.your_tasks.tasks_waiting_on_others);
982
+ tagSeg(analysis.your_tasks.decisions_needed);
983
+ }
984
+ segmentAnalyses.push(analysis);
985
+ }
986
+
987
+ console.log('');
988
+ }
989
+
990
+ // Compute totals for this file
991
+ fileResult.compressedTotalMB = fileResult.segments
992
+ .reduce((sum, s) => sum + s.fileSizeMB, 0).toFixed(2);
993
+ fileResult.compressionRatio = (
994
+ (1 - parseFloat(fileResult.compressedTotalMB) / parseFloat(fileResult.originalSizeMB)) * 100
995
+ ).toFixed(1) + '% reduction';
996
+
997
+ return { fileResult, segmentAnalyses, segmentReports };
998
+ }
999
+
1000
+ // ======================== PHASE: COMPILE ========================
1001
+
1002
+ /**
1003
+ * Send all segment analyses to Gemini for final compilation.
1004
+ * Returns { compiledAnalysis, compilationRun }.
1005
+ */
1006
+ async function phaseCompile(ctx, allSegmentAnalyses) {
1007
+ const timer = phaseTimer('compile');
1008
+ const { opts, ai, userName, callName, costTracker, progress } = ctx;
1009
+
1010
+ progress.setPhase('compile');
1011
+
1012
+ let compiledAnalysis = null;
1013
+ let compilationRun = null;
1014
+
1015
+ if (allSegmentAnalyses.length > 0 && !opts.skipGemini && !opts.dryRun && !shuttingDown) {
1016
+ try {
1017
+ // Adaptive compilation budget
1018
+ const compBudget = calculateCompilationBudget(allSegmentAnalyses, opts.compilationThinkingBudget);
1019
+ console.log(` Compilation thinking budget: ${compBudget.budget.toLocaleString()} tokens (${compBudget.reason})`);
1020
+
1021
+ const compilationResult = await compileFinalResult(
1022
+ ai, allSegmentAnalyses, userName, callName, PKG_ROOT,
1023
+ { thinkingBudget: compBudget.budget }
1024
+ );
1025
+
1026
+ compiledAnalysis = compilationResult.compiled;
1027
+ compilationRun = compilationResult.run;
1028
+
1029
+ // Track compilation cost
1030
+ if (compilationRun?.tokenUsage) {
1031
+ costTracker.addCompilation(compilationRun.tokenUsage, compilationRun.durationMs);
1032
+ }
1033
+
1034
+ // Validate compilation output
1035
+ if (compiledAnalysis) {
1036
+ const hasTickets = Array.isArray(compiledAnalysis.tickets) && compiledAnalysis.tickets.length > 0;
1037
+ const hasActions = Array.isArray(compiledAnalysis.action_items) && compiledAnalysis.action_items.length > 0;
1038
+ const hasBlockers = Array.isArray(compiledAnalysis.blockers) && compiledAnalysis.blockers.length > 0;
1039
+ const hasCRs = Array.isArray(compiledAnalysis.change_requests) && compiledAnalysis.change_requests.length > 0;
1040
+
1041
+ if (!hasTickets && !hasActions && !hasBlockers && !hasCRs) {
1042
+ console.warn(' ⚠ Compilation parsed OK but is missing structured data (no tickets, actions, blockers, or CRs)');
1043
+ console.warn(' → Falling back to raw segment merge for full data');
1044
+ log.warn('Compilation incomplete — missing all structured fields, using segment merge fallback');
1045
+ compiledAnalysis._incomplete = true;
1046
+ }
1047
+ }
1048
+
1049
+ // Save compilation run
1050
+ const compilationDir = path.join(PROJECT_ROOT, 'gemini_runs', callName);
1051
+ fs.mkdirSync(compilationDir, { recursive: true });
1052
+ const compTs = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19);
1053
+ const compilationFile = path.join(compilationDir, `compilation_${compTs}.json`);
1054
+ const compilationPayload = {
1055
+ run: compilationRun,
1056
+ output: { raw: compilationResult.raw, parsed: compiledAnalysis, parseSuccess: compiledAnalysis !== null },
1057
+ };
1058
+ fs.writeFileSync(compilationFile, JSON.stringify(compilationPayload, null, 2), 'utf8');
1059
+ log.step(`Compilation run saved → ${compilationFile}`);
1060
+
1061
+ progress.markCompilationDone();
1062
+
1063
+ timer.end();
1064
+ return { compiledAnalysis, compilationRun, compilationPayload, compilationFile };
1065
+ } catch (err) {
1066
+ console.error(` ✗ Final compilation failed: ${err.message}`);
1067
+ log.error(`Compilation FAIL — ${err.message}`);
1068
+ console.warn(' → Falling back to raw segment merge for MD');
1069
+ }
1070
+ }
1071
+
1072
+ timer.end();
1073
+ return { compiledAnalysis, compilationRun, compilationPayload: null, compilationFile: null };
1074
+ }
1075
+
1076
+ // ======================== PHASE: OUTPUT ========================
1077
+
1078
+ /**
1079
+ * Write results JSON, generate Markdown, upload final artifacts.
1080
+ * Returns { runDir, jsonPath, mdPath }.
1081
+ */
1082
+ async function phaseOutput(ctx, results, compiledAnalysis, compilationRun, compilationPayload) {
1083
+ const timer = phaseTimer('output');
1084
+ const { opts, targetDir, storage, firebaseReady, callName, progress, costTracker, userName } = ctx;
1085
+
1086
+ progress.setPhase('output');
1087
+
1088
+ // Determine output directory
1089
+ const runTs = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19);
1090
+ const runDir = opts.outputDir
1091
+ ? path.resolve(opts.outputDir)
1092
+ : path.join(targetDir, 'runs', runTs);
1093
+ fs.mkdirSync(runDir, { recursive: true });
1094
+ log.step(`Run folder created → ${runDir}`);
1095
+
1096
+ // Copy compilation JSON into run folder
1097
+ if (compilationPayload) {
1098
+ const runCompFile = path.join(runDir, 'compilation.json');
1099
+ fs.writeFileSync(runCompFile, JSON.stringify(compilationPayload, null, 2), 'utf8');
1100
+ }
1101
+
1102
+ // Attach cost summary to results
1103
+ results.costSummary = costTracker.getSummary();
1104
+
1105
+ // Write results JSON
1106
+ const jsonPath = path.join(runDir, 'results.json');
1107
+ fs.writeFileSync(jsonPath, JSON.stringify(results, null, 2), 'utf8');
1108
+ log.step(`Results JSON saved → ${jsonPath}`);
1109
+
1110
+ // Generate Markdown
1111
+ const mdPath = path.join(runDir, 'results.md');
1112
+ const totalSegs = results.files.reduce((s, f) => s + f.segmentCount, 0);
1113
+
1114
+ if (compiledAnalysis && !compiledAnalysis._incomplete) {
1115
+ const mdContent = renderResultsMarkdown({
1116
+ compiled: compiledAnalysis,
1117
+ meta: {
1118
+ callName: results.callName,
1119
+ processedAt: results.processedAt,
1120
+ geminiModel: config.GEMINI_MODEL,
1121
+ userName,
1122
+ segmentCount: totalSegs,
1123
+ compilation: compilationRun || null,
1124
+ costSummary: results.costSummary,
1125
+ segments: results.files.flatMap(f => {
1126
+ const speed = results.settings?.speed || 1;
1127
+ let cum = 0;
1128
+ return (f.segments || []).map(s => {
1129
+ const startSec = cum;
1130
+ cum += (s.durationSeconds || 0) * speed;
1131
+ return {
1132
+ file: s.segmentFile,
1133
+ duration: s.duration,
1134
+ durationSeconds: s.durationSeconds,
1135
+ sizeMB: s.fileSizeMB,
1136
+ video: f.originalFile,
1137
+ startTimeSec: startSec,
1138
+ endTimeSec: cum,
1139
+ segmentNumber: (s.segmentIndex || 0) + 1,
1140
+ };
1141
+ });
1142
+ }),
1143
+ settings: results.settings,
1144
+ },
1145
+ });
1146
+ fs.writeFileSync(mdPath, mdContent, 'utf8');
1147
+ log.step(`Results MD saved (compiled) → ${mdPath}`);
1148
+ console.log(` ✓ Markdown report (AI-compiled) → ${path.basename(mdPath)}`);
1149
+ } else {
1150
+ const { renderResultsMarkdownLegacy } = require('./renderers/markdown');
1151
+ const mdContent = renderResultsMarkdownLegacy(results);
1152
+ fs.writeFileSync(mdPath, mdContent, 'utf8');
1153
+ log.step(`Results MD saved (legacy merge) → ${mdPath}`);
1154
+ console.log(` ✓ Markdown report (legacy merge) → ${path.basename(mdPath)}`);
1155
+ }
1156
+
1157
+ // === DIFF ENGINE (v6) ===
1158
+ let diffResult = null;
1159
+ if (!opts.disableDiff && compiledAnalysis) {
1160
+ try {
1161
+ const prevComp = loadPreviousCompilation(targetDir, runTs);
1162
+ if (prevComp && prevComp.compiled) {
1163
+ diffResult = generateDiff(compiledAnalysis, prevComp.compiled);
1164
+ // Inject the previous run timestamp into the diff
1165
+ if (diffResult.hasDiff) {
1166
+ diffResult.previousTimestamp = prevComp.timestamp;
1167
+ const diffMd = renderDiffMarkdown(diffResult);
1168
+ fs.appendFileSync(mdPath, '\n\n' + diffMd, 'utf8');
1169
+ fs.writeFileSync(path.join(runDir, 'diff.json'), JSON.stringify(diffResult, null, 2), 'utf8');
1170
+ log.step(`Diff report: ${diffResult.totals.newItems} new, ${diffResult.totals.removedItems} removed, ${diffResult.totals.changedItems} changed`);
1171
+ console.log(` ✓ Diff report appended (vs ${prevComp.timestamp})`);
1172
+ } else {
1173
+ console.log(` ℹ No differences vs previous run (${prevComp.timestamp})`);
1174
+ }
1175
+ } else {
1176
+ console.log(` ℹ No previous compilation found for diff comparison`);
1177
+ }
1178
+ } catch (diffErr) {
1179
+ console.warn(` ⚠ Diff generation failed: ${diffErr.message}`);
1180
+ log.warn(`Diff generation error: ${diffErr.message}`);
1181
+ }
1182
+ }
1183
+
1184
+ // Upload results to Firebase
1185
+ if (firebaseReady && !opts.skipUpload && !opts.dryRun) {
1186
+ try {
1187
+ const resultsStoragePath = `calls/${callName}/runs/${runTs}/results.json`;
1188
+ // Results always upload fresh (never skip-existing) — they change every run
1189
+ const url = await uploadToStorage(storage, jsonPath, resultsStoragePath);
1190
+ results.storageUrl = url;
1191
+ fs.writeFileSync(jsonPath, JSON.stringify(results, null, 2), 'utf8');
1192
+ console.log(` ✓ Results JSON uploaded → ${resultsStoragePath}`);
1193
+
1194
+ const mdStoragePath = `calls/${callName}/runs/${runTs}/results.md`;
1195
+ await uploadToStorage(storage, mdPath, mdStoragePath);
1196
+ console.log(` ✓ Results MD uploaded → ${mdStoragePath}`);
1197
+ } catch (err) {
1198
+ console.warn(` ⚠ Results upload failed: ${err.message}`);
1199
+ }
1200
+ } else if (opts.skipUpload) {
1201
+ console.log(' ⚠ Skipping results upload (--skip-upload)');
1202
+ } else {
1203
+ console.log(' ⚠ Skipping results upload (Firebase auth not configured)');
1204
+ }
1205
+
1206
+ timer.end();
1207
+ return { runDir, jsonPath, mdPath, runTs };
1208
+ }
1209
+
1210
+ // ======================== PHASE: SUMMARY ========================
1211
+
1212
+ /**
1213
+ * Print the final summary with timing, cost, and file locations.
1214
+ */
1215
+ function phaseSummary(ctx, results, { jsonPath, mdPath, runTs, compilationRun }) {
1216
+ const { opts, firebaseReady, callName, docStorageUrls, costTracker } = ctx;
1217
+ const totalSegs = results.files.reduce((s, f) => s + f.segmentCount, 0);
1218
+
1219
+ console.log('');
1220
+ console.log('==============================================');
1221
+ console.log(' COMPLETE');
1222
+ console.log('==============================================');
1223
+ console.log(` Results JSON : ${jsonPath}`);
1224
+ console.log(` Results MD : ${mdPath}`);
1225
+ console.log(` Files : ${results.files.length}`);
1226
+ console.log(` Segments : ${totalSegs}`);
1227
+ console.log(` Elapsed : ${log.elapsed()}`);
1228
+ if (compilationRun) {
1229
+ console.log(` Compilation : ${(compilationRun.durationMs / 1000).toFixed(1)}s | ${compilationRun.tokenUsage?.totalTokens?.toLocaleString() || '?'} tokens`);
1230
+ }
1231
+ results.files.forEach(f => {
1232
+ console.log(` ${f.originalFile}: ${f.originalSizeMB} MB → ${f.compressedTotalMB} MB (${f.compressionRatio})`);
1233
+ });
1234
+
1235
+ // Cost breakdown
1236
+ const cost = costTracker.getSummary();
1237
+ if (cost.totalTokens > 0) {
1238
+ console.log('');
1239
+ console.log(` Cost estimate (${config.GEMINI_MODEL}):`);
1240
+ console.log(` Input tokens : ${cost.inputTokens.toLocaleString()} ($${cost.inputCost.toFixed(4)})`);
1241
+ console.log(` Output tokens : ${cost.outputTokens.toLocaleString()} ($${cost.outputCost.toFixed(4)})`);
1242
+ console.log(` Thinking tokens: ${cost.thinkingTokens.toLocaleString()} ($${cost.thinkingCost.toFixed(4)})`);
1243
+ console.log(` Total : ${cost.totalTokens.toLocaleString()} tokens | $${cost.totalCost.toFixed(4)}`);
1244
+ console.log(` AI time : ${(cost.totalDurationMs / 1000).toFixed(1)}s`);
1245
+ }
1246
+
1247
+ if (firebaseReady && !opts.skipUpload) {
1248
+ console.log('');
1249
+ console.log(' Firebase Storage:');
1250
+ console.log(` calls/${callName}/documents/ → ${Object.keys(docStorageUrls).length} doc(s)`);
1251
+ console.log(` calls/${callName}/segments/ → ${totalSegs} segment(s)`);
1252
+ console.log(` calls/${callName}/runs/${runTs}/ → results.json + results.md`);
1253
+ if (results.storageUrl) {
1254
+ console.log(` Results URL: ${results.storageUrl}`);
1255
+ }
1256
+ } else {
1257
+ console.log('');
1258
+ console.log(' ⚠ Firebase Storage: uploads skipped');
1259
+ }
1260
+
1261
+ // Log summary
1262
+ log.summary([
1263
+ `Call: ${callName}`,
1264
+ `Videos: ${results.files.length}`,
1265
+ `Segments: ${totalSegs}`,
1266
+ `Compiled: ${results.compilation ? 'Yes (AI)' : 'No (fallback merge)'}`,
1267
+ `Firebase: ${firebaseReady && !opts.skipUpload ? 'OK' : 'skipped'}`,
1268
+ `Documents: ${results.contextDocuments.length}`,
1269
+ `Cost: $${cost.totalCost.toFixed(4)} (${cost.totalTokens.toLocaleString()} tokens)`,
1270
+ `Elapsed: ${log.elapsed()}`,
1271
+ ...results.files.map(f => ` ${f.originalFile}: ${f.originalSizeMB}MB → ${f.compressedTotalMB}MB (${f.compressionRatio})`),
1272
+ `Results JSON: ${jsonPath}`,
1273
+ `Results MD: ${mdPath}`,
1274
+ `Logs: ${log.detailedPath}`,
1275
+ ]);
1276
+ log.step('DONE');
1277
+
1278
+ console.log(` Logs: ${log.detailedPath}`);
1279
+ console.log(` ${log.minimalPath}`);
1280
+ console.log('');
1281
+ }
1282
+
1283
+ // ======================== PHASE: DEEP DIVE ========================
1284
+
1285
+ /**
1286
+ * Generate explanatory documents for topics discussed in the meeting.
1287
+ * Two-phase: discover topics → generate documents in parallel.
1288
+ */
1289
+ async function phaseDeepDive(ctx, compiledAnalysis, runDir) {
1290
+ const timer = phaseTimer('deep_dive');
1291
+ const { ai, callName, userName, costTracker, opts, contextDocs } = ctx;
1292
+
1293
+ console.log('');
1294
+ console.log('══════════════════════════════════════════════');
1295
+ console.log(' DEEP DIVE — Generating Explanatory Documents');
1296
+ console.log('══════════════════════════════════════════════');
1297
+ console.log('');
1298
+
1299
+ const thinkingBudget = opts.thinkingBudget ||
1300
+ require('./config').DEEP_DIVE_THINKING_BUDGET;
1301
+
1302
+ // Gather context snippets from inline text docs (for richer AI context)
1303
+ const contextSnippets = [];
1304
+ for (const doc of (contextDocs || [])) {
1305
+ if (doc.type === 'inlineText' && doc.content) {
1306
+ const snippet = doc.content.length > 3000
1307
+ ? doc.content.slice(0, 3000) + '\n... (truncated)'
1308
+ : doc.content;
1309
+ contextSnippets.push(`[${doc.fileName}]\n${snippet}`);
1310
+ }
1311
+ }
1312
+
1313
+ // Phase 1: Discover topics
1314
+ console.log(' Phase 1: Discovering topics...');
1315
+ let topicResult;
1316
+ try {
1317
+ topicResult = await discoverTopics(ai, compiledAnalysis, {
1318
+ callName, userName, thinkingBudget, contextSnippets,
1319
+ });
1320
+ } catch (err) {
1321
+ console.error(` ✗ Topic discovery failed: ${err.message}`);
1322
+ log.error(`Deep dive topic discovery failed: ${err.message}`);
1323
+ timer.end();
1324
+ return;
1325
+ }
1326
+
1327
+ const topics = topicResult.topics;
1328
+ if (!topics || topics.length === 0) {
1329
+ console.log(' ℹ No topics identified for deep dive');
1330
+ log.step('Deep dive: no topics discovered');
1331
+ timer.end();
1332
+ return;
1333
+ }
1334
+
1335
+ console.log(` ✓ Found ${topics.length} topic(s):`);
1336
+ topics.forEach(t => console.log(` ${t.id} [${t.category}] ${t.title}`));
1337
+ console.log('');
1338
+
1339
+ if (topicResult.tokenUsage) {
1340
+ costTracker.addSegment('deep-dive-discovery', topicResult.tokenUsage, topicResult.durationMs, false);
1341
+ }
1342
+ log.step(`Deep dive: ${topics.length} topics discovered in ${(topicResult.durationMs / 1000).toFixed(1)}s`);
1343
+
1344
+ // Phase 2: Generate documents
1345
+ console.log(` Phase 2: Generating ${topics.length} document(s)...`);
1346
+ const documents = await generateAllDocuments(ai, topics, compiledAnalysis, {
1347
+ callName,
1348
+ userName,
1349
+ thinkingBudget,
1350
+ contextSnippets,
1351
+ concurrency: Math.min(opts.parallelAnalysis || 2, 3), // match pipeline parallelism
1352
+ onProgress: (done, total, topic) => {
1353
+ console.log(` [${done}/${total}] ✓ ${topic.title}`);
1354
+ },
1355
+ });
1356
+
1357
+ // Track cost
1358
+ for (const doc of documents) {
1359
+ if (doc.tokenUsage && doc.tokenUsage.totalTokens > 0) {
1360
+ costTracker.addSegment(`deep-dive-${doc.topic.id}`, doc.tokenUsage, doc.durationMs, false);
1361
+ }
1362
+ }
1363
+
1364
+ // Phase 3: Write output
1365
+ const deepDiveDir = path.join(runDir, 'deep-dive');
1366
+ const ts = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19);
1367
+ const { indexPath, stats } = writeDeepDiveOutput(deepDiveDir, documents, {
1368
+ callName,
1369
+ timestamp: ts,
1370
+ });
1371
+
1372
+ console.log('');
1373
+ console.log(` ✓ Deep dive complete: ${stats.successful}/${stats.total} documents generated`);
1374
+ console.log(` Output: ${path.relative(PROJECT_ROOT, deepDiveDir)}/`);
1375
+ console.log(` Index: ${path.relative(PROJECT_ROOT, indexPath)}`);
1376
+ if (stats.failed > 0) {
1377
+ console.log(` ⚠ ${stats.failed} document(s) failed`);
1378
+ }
1379
+ console.log(` Tokens: ${stats.totalTokens.toLocaleString()} | Time: ${(stats.totalDurationMs / 1000).toFixed(1)}s`);
1380
+ console.log('');
1381
+
1382
+ log.step(`Deep dive complete: ${stats.successful} docs, ${stats.totalTokens} tokens, ${(stats.totalDurationMs / 1000).toFixed(1)}s`);
1383
+ timer.end();
1384
+ }
1385
+
1386
+ // ======================== MAIN PIPELINE ========================
1387
+
1388
+ async function run() {
1389
+ // Phase 1: Init
1390
+ const initCtx = await phaseInit();
1391
+ if (!initCtx) return; // --version early exit
1392
+
1393
+ // --- Smart Change Detection mode ---
1394
+ if (initCtx.opts.updateProgress) {
1395
+ return await runProgressUpdate(initCtx);
1396
+ }
1397
+
1398
+ // --- Dynamic document-only mode ---
1399
+ if (initCtx.opts.dynamic) {
1400
+ return await runDynamic(initCtx);
1401
+ }
1402
+
1403
+ // Phase 2: Discover
1404
+ const ctx = await phaseDiscover(initCtx);
1405
+
1406
+ // Phase 3: Services
1407
+ const fullCtx = await phaseServices(ctx);
1408
+
1409
+ // Phase 4: Process each video
1410
+ const allSegmentAnalyses = [];
1411
+ const allSegmentReports = [];
1412
+ const pipelineStartMs = Date.now();
1413
+ const results = {
1414
+ processedAt: new Date().toISOString(),
1415
+ sourceFolder: fullCtx.targetDir,
1416
+ callName: fullCtx.callName,
1417
+ userName: fullCtx.userName,
1418
+ settings: {
1419
+ speed: SPEED,
1420
+ segmentTimeSec: SEG_TIME,
1421
+ preset: PRESET,
1422
+ geminiModel: config.GEMINI_MODEL,
1423
+ thinkingBudget: fullCtx.opts.thinkingBudget,
1424
+ },
1425
+ flags: fullCtx.opts,
1426
+ contextDocuments: fullCtx.contextDocs.map(d => d.fileName),
1427
+ documentStorageUrls: fullCtx.docStorageUrls,
1428
+ firebaseAuthenticated: fullCtx.firebaseReady,
1429
+ files: [],
1430
+ };
1431
+
1432
+ fullCtx.progress.setPhase('compress');
1433
+ if (log && log.phaseStart) log.phaseStart('process_videos');
1434
+
1435
+ for (let i = 0; i < fullCtx.videoFiles.length; i++) {
1436
+ if (shuttingDown) break;
1437
+
1438
+ const { fileResult, segmentAnalyses, segmentReports } = await phaseProcessVideo(fullCtx, fullCtx.videoFiles[i], i);
1439
+ if (fileResult) {
1440
+ results.files.push(fileResult);
1441
+ allSegmentAnalyses.push(...segmentAnalyses);
1442
+ allSegmentReports.push(...(segmentReports || []));
1443
+ }
1444
+ }
1445
+
1446
+ if (log && log.phaseEnd) log.phaseEnd({ videoCount: fullCtx.videoFiles.length, segmentCount: allSegmentAnalyses.length });
1447
+
1448
+ // Phase 5: Compile
1449
+ const { compiledAnalysis, compilationRun, compilationPayload, compilationFile } = await phaseCompile(fullCtx, allSegmentAnalyses);
1450
+
1451
+ // Quality gate on compilation output
1452
+ let compilationQuality = null;
1453
+ if (compiledAnalysis && compilationRun) {
1454
+ compilationQuality = assessQuality(compiledAnalysis, {
1455
+ parseSuccess: compilationRun.parseSuccess,
1456
+ rawLength: 0, // not easily accessible but not critical
1457
+ });
1458
+ log.step(`Compilation quality: ${compilationQuality.score}/100 (${compilationQuality.grade})`);
1459
+ }
1460
+
1461
+ if (compilationRun) {
1462
+ results.compilation = {
1463
+ runFile: compilationFile ? path.relative(PROJECT_ROOT, compilationFile) : null,
1464
+ ...compilationRun,
1465
+ };
1466
+ }
1467
+ results._compilationPayload = compilationPayload;
1468
+
1469
+ // Phase 6: Output
1470
+ const outputResult = await phaseOutput(fullCtx, results, compiledAnalysis, compilationRun, compilationPayload);
1471
+ delete results._compilationPayload;
1472
+
1473
+ // Phase 7: Health Dashboard
1474
+ const healthReport = buildHealthReport({
1475
+ segmentReports: allSegmentReports,
1476
+ allSegmentAnalyses,
1477
+ costSummary: fullCtx.costTracker.getSummary(),
1478
+ compilationQuality,
1479
+ totalDurationMs: Date.now() - pipelineStartMs,
1480
+ });
1481
+ printHealthDashboard(healthReport);
1482
+
1483
+ // Add health report to results
1484
+ results.healthReport = healthReport;
1485
+
1486
+ // --- Learning Loop: save run history (v6) ---
1487
+ if (!fullCtx.opts.disableLearning) {
1488
+ try {
1489
+ const hadFocusedPasses = allSegmentAnalyses.some(a => a._focused_pass_applied);
1490
+ const entry = buildHistoryEntry({
1491
+ callName: fullCtx.callName,
1492
+ healthReport,
1493
+ costSummary: fullCtx.costTracker.getSummary(),
1494
+ segmentCount: allSegmentAnalyses.length,
1495
+ compilationQuality,
1496
+ baseBudget: fullCtx.opts.thinkingBudget,
1497
+ compilationBudget: fullCtx.opts.compilationThinkingBudget,
1498
+ hadFocusedPasses,
1499
+ });
1500
+ saveHistory(PROJECT_ROOT, entry);
1501
+ log.step('Learning history saved');
1502
+ } catch (histErr) {
1503
+ log.warn(`Failed to save learning history: ${histErr.message}`);
1504
+ }
1505
+ }
1506
+
1507
+ // Phase 8: Summary
1508
+ phaseSummary(fullCtx, results, { ...outputResult, compilationRun });
1509
+
1510
+ // Phase 9 (optional): Deep Dive — generate explanatory documents
1511
+ if (fullCtx.opts.deepDive && compiledAnalysis && !fullCtx.opts.skipGemini && !fullCtx.opts.dryRun && !shuttingDown) {
1512
+ await phaseDeepDive(fullCtx, compiledAnalysis, outputResult.runDir);
1513
+ }
1514
+
1515
+ // Cleanup
1516
+ fullCtx.progress.cleanup();
1517
+ log.close();
1518
+ }
1519
+
1520
+ // ======================== DYNAMIC DOCUMENT-ONLY MODE ========================
1521
+
1522
+ /**
1523
+ * Alternative pipeline mode: generate documents from context docs + user request.
1524
+ * No video required — works purely from documents and the user's request.
1525
+ *
1526
+ * Triggered by --dynamic flag.
1527
+ */
1528
+ async function runDynamic(initCtx) {
1529
+ const { opts, targetDir } = initCtx;
1530
+ const folderName = path.basename(targetDir);
1531
+ const ts = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19);
1532
+
1533
+ console.log('');
1534
+ console.log('══════════════════════════════════════════════');
1535
+ console.log(' DYNAMIC MODE — AI Document Generation');
1536
+ console.log('══════════════════════════════════════════════');
1537
+ console.log(` Folder: ${folderName}`);
1538
+ console.log(` Source: ${targetDir}`);
1539
+ console.log(` Mode: Video + Documents (auto-detect)`);
1540
+ console.log('');
1541
+
1542
+ // 1. Get user request (from --request flag or interactive prompt)
1543
+ let userRequest = opts.request;
1544
+ if (!userRequest) {
1545
+ userRequest = await promptUserText(' What do you want to generate?\n (e.g. "Plan migration from X to Y", "Explain this codebase", "Create learning guide for React")\n\n → ');
1546
+ }
1547
+ if (!userRequest || !userRequest.trim()) {
1548
+ console.error('\n ✗ A request is required for dynamic mode.');
1549
+ console.error(' Use --request "your request" or enter it when prompted.');
1550
+ initCtx.progress.cleanup();
1551
+ log.close();
1552
+ return;
1553
+ }
1554
+ console.log(`\n Request: "${userRequest}"`);
1555
+ log.step(`Dynamic mode: "${userRequest}"`);
1556
+
1557
+ // 2. Ask for user name (for attribution)
1558
+ let userName = opts.userName;
1559
+ if (!userName) {
1560
+ userName = await promptUserText('\n Your name (optional, press Enter to skip): ');
1561
+ }
1562
+ if (userName) log.step(`User: ${userName}`);
1563
+
1564
+ // 3. Discover documents AND video files
1565
+ console.log('');
1566
+ console.log(' Discovering content...');
1567
+ const allDocFiles = findDocsRecursive(targetDir, DOC_EXTS);
1568
+ const videoFiles = fs.readdirSync(targetDir)
1569
+ .filter(f => {
1570
+ const stat = fs.statSync(path.join(targetDir, f));
1571
+ return stat.isFile() && VIDEO_EXTS.includes(path.extname(f).toLowerCase());
1572
+ })
1573
+ .map(f => path.join(targetDir, f));
1574
+
1575
+ console.log(` Found ${allDocFiles.length} document(s)`);
1576
+ if (allDocFiles.length > 0) {
1577
+ allDocFiles.forEach(f => console.log(` - ${f.relPath}`));
1578
+ }
1579
+ console.log(` Found ${videoFiles.length} video file(s)`);
1580
+ if (videoFiles.length > 0) {
1581
+ videoFiles.forEach(f => console.log(` - ${path.basename(f)}`));
1582
+ }
1583
+ log.step(`Discovered ${allDocFiles.length} document(s), ${videoFiles.length} video(s)`);
1584
+
1585
+ // 4. Initialize Gemini
1586
+ console.log('');
1587
+ console.log(' Initializing AI...');
1588
+ if (opts.skipGemini || opts.dryRun) {
1589
+ console.error(' ✗ Dynamic mode requires Gemini AI. Remove --skip-gemini / --dry-run.');
1590
+ initCtx.progress.cleanup();
1591
+ log.close();
1592
+ return;
1593
+ }
1594
+
1595
+ // Validate config for Gemini
1596
+ const configCheck = validateConfig({ skipFirebase: true, skipGemini: false });
1597
+ if (!configCheck.valid) {
1598
+ console.error('\n Configuration errors:');
1599
+ configCheck.errors.forEach(e => console.error(` ✗ ${e}`));
1600
+ initCtx.progress.cleanup();
1601
+ log.close();
1602
+ return;
1603
+ }
1604
+
1605
+ const ai = await initGemini();
1606
+ console.log(' ✓ Gemini AI ready');
1607
+ const costTracker = initCtx.costTracker;
1608
+
1609
+ // 5. Process video files (compress → segment → analyze for context)
1610
+ const videoSummaries = [];
1611
+ if (videoFiles.length > 0) {
1612
+ console.log('');
1613
+ console.log(` ── Video Processing (${videoFiles.length} file${videoFiles.length > 1 ? 's' : ''}) ──`);
1614
+ const compressedDir = path.join(targetDir, 'compressed');
1615
+
1616
+ for (let vi = 0; vi < videoFiles.length; vi++) {
1617
+ const videoPath = videoFiles[vi];
1618
+ const baseName = path.basename(videoPath, path.extname(videoPath));
1619
+ const segmentDir = path.join(compressedDir, baseName);
1620
+
1621
+ console.log(`\n [${vi + 1}/${videoFiles.length}] ${path.basename(videoPath)}`);
1622
+
1623
+ // Compress & segment (reuse existing if available)
1624
+ let segments;
1625
+ const existingSegments = fs.existsSync(segmentDir)
1626
+ ? fs.readdirSync(segmentDir).filter(f => f.startsWith('segment_') && f.endsWith('.mp4')).sort()
1627
+ : [];
1628
+
1629
+ if (existingSegments.length > 0) {
1630
+ segments = existingSegments.map(f => path.join(segmentDir, f));
1631
+ console.log(` ✓ Using ${segments.length} existing segment(s)`);
1632
+ log.step(`SKIP compression — ${segments.length} segment(s) already on disk for "${baseName}"`);
1633
+ } else {
1634
+ console.log(' Compressing & segmenting...');
1635
+ segments = compressAndSegment(videoPath, segmentDir);
1636
+ console.log(` → ${segments.length} segment(s) created`);
1637
+ log.step(`Compressed "${baseName}" → ${segments.length} segment(s)`);
1638
+ }
1639
+
1640
+ // Validate segments
1641
+ const validSegments = segments.filter(s => verifySegment(s));
1642
+ if (validSegments.length < segments.length) {
1643
+ console.warn(` ⚠ ${segments.length - validSegments.length} corrupt segment(s) skipped`);
1644
+ }
1645
+
1646
+ // Analyze each segment with Gemini to extract context
1647
+ console.log(` Analyzing ${validSegments.length} segment(s) for content...`);
1648
+ for (let si = 0; si < validSegments.length; si++) {
1649
+ const segPath = validSegments[si];
1650
+ const segName = path.basename(segPath);
1651
+ const displayName = `${baseName}/${segName}`;
1652
+
1653
+ try {
1654
+ const result = await analyzeVideoForContext(ai, segPath, displayName, {
1655
+ thinkingBudget: 8192,
1656
+ segmentIndex: si,
1657
+ totalSegments: validSegments.length,
1658
+ });
1659
+
1660
+ videoSummaries.push({
1661
+ videoFile: path.basename(videoPath),
1662
+ segment: segName,
1663
+ segmentIndex: si,
1664
+ totalSegments: validSegments.length,
1665
+ summary: result.summary,
1666
+ });
1667
+
1668
+ if (result.tokenUsage) {
1669
+ costTracker.addSegment(`dynamic-video-${baseName}-${segName}`, result.tokenUsage, result.durationMs, false);
1670
+ }
1671
+ } catch (err) {
1672
+ console.error(` ✗ Failed to analyze ${segName}: ${err.message}`);
1673
+ log.error(`Dynamic video analysis failed for ${displayName}: ${err.message}`);
1674
+ }
1675
+ }
1676
+ }
1677
+
1678
+ console.log('');
1679
+ console.log(` ✓ Video analysis complete: ${videoSummaries.length} segment summary(ies)`);
1680
+ log.step(`Dynamic video analysis: ${videoSummaries.length} segment summaries extracted`);
1681
+ }
1682
+
1683
+ // 6. Load document contents as snippets for AI
1684
+ const INLINE_EXTS = ['.vtt', '.srt', '.txt', '.md', '.csv', '.json', '.xml', '.html'];
1685
+ const docSnippets = [];
1686
+ for (const { absPath, relPath } of allDocFiles) {
1687
+ if (INLINE_EXTS.includes(path.extname(absPath).toLowerCase())) {
1688
+ try {
1689
+ let content = fs.readFileSync(absPath, 'utf8');
1690
+ if (content.length > 8000) {
1691
+ content = content.slice(0, 8000) + '\n... (truncated)';
1692
+ }
1693
+ docSnippets.push(`[${relPath}]\n${content}`);
1694
+ } catch { /* skip unreadable */ }
1695
+ }
1696
+ }
1697
+ console.log(` Loaded ${docSnippets.length} document(s) as context for AI`);
1698
+ if (videoSummaries.length > 0) {
1699
+ console.log(` Plus ${videoSummaries.length} video segment summary(ies) as context`);
1700
+ }
1701
+ console.log('');
1702
+
1703
+ const thinkingBudget = opts.thinkingBudget || THINKING_BUDGET;
1704
+
1705
+ // 7. Phase 1: Plan topics
1706
+ console.log(' Phase 1: Planning documents...');
1707
+ let planResult;
1708
+ try {
1709
+ planResult = await planTopics(ai, userRequest, docSnippets, {
1710
+ folderName, userName, thinkingBudget, videoSummaries,
1711
+ });
1712
+ } catch (err) {
1713
+ console.error(` ✗ Topic planning failed: ${err.message}`);
1714
+ log.error(`Dynamic topic planning failed: ${err.message}`);
1715
+ initCtx.progress.cleanup();
1716
+ log.close();
1717
+ return;
1718
+ }
1719
+
1720
+ const topics = planResult.topics;
1721
+ if (!topics || topics.length === 0) {
1722
+ console.log(' ℹ No documents planned — request may be too vague.');
1723
+ console.log(' Try a more specific request or add context documents to the folder.');
1724
+ initCtx.progress.cleanup();
1725
+ log.close();
1726
+ return;
1727
+ }
1728
+
1729
+ if (planResult.tokenUsage) {
1730
+ costTracker.addSegment('dynamic-planning', planResult.tokenUsage, planResult.durationMs, false);
1731
+ }
1732
+
1733
+ console.log(` ✓ Planned ${topics.length} document(s) in ${(planResult.durationMs / 1000).toFixed(1)}s:`);
1734
+ topics.forEach(t => console.log(` ${t.id} [${t.category}] ${t.title}`));
1735
+ if (planResult.projectSummary) {
1736
+ console.log(`\n Summary: ${planResult.projectSummary}`);
1737
+ }
1738
+ console.log('');
1739
+ log.step(`Dynamic mode: ${topics.length} topics planned in ${(planResult.durationMs / 1000).toFixed(1)}s`);
1740
+
1741
+ // 8. Phase 2: Generate all documents
1742
+ console.log(` Phase 2: Generating ${topics.length} document(s)...`);
1743
+ const documents = await generateAllDynamicDocuments(ai, topics, userRequest, docSnippets, {
1744
+ folderName,
1745
+ userName,
1746
+ thinkingBudget,
1747
+ videoSummaries,
1748
+ concurrency: Math.min(opts.parallelAnalysis || 2, 3),
1749
+ onProgress: (done, total, topic) => {
1750
+ console.log(` [${done}/${total}] ✓ ${topic.title}`);
1751
+ },
1752
+ });
1753
+
1754
+ // Track cost
1755
+ for (const doc of documents) {
1756
+ if (doc.tokenUsage && doc.tokenUsage.totalTokens > 0) {
1757
+ costTracker.addSegment(`dynamic-${doc.topic.id}`, doc.tokenUsage, doc.durationMs, false);
1758
+ }
1759
+ }
1760
+
1761
+ // 9. Write output
1762
+ const runDir = opts.outputDir
1763
+ ? path.resolve(opts.outputDir)
1764
+ : path.join(targetDir, 'runs', ts);
1765
+ const { indexPath, stats } = writeDynamicOutput(runDir, documents, {
1766
+ folderName,
1767
+ userRequest,
1768
+ projectSummary: planResult.projectSummary,
1769
+ timestamp: ts,
1770
+ });
1771
+
1772
+ console.log('');
1773
+ console.log(` ✓ Dynamic generation complete: ${stats.successful}/${stats.total} documents`);
1774
+ console.log(` Output: ${path.relative(PROJECT_ROOT, runDir)}/`);
1775
+ console.log(` Index: ${path.relative(PROJECT_ROOT, indexPath)}`);
1776
+ if (stats.failed > 0) {
1777
+ console.log(` ⚠ ${stats.failed} document(s) failed`);
1778
+ }
1779
+ console.log(` Tokens: ${stats.totalTokens.toLocaleString()} | Time: ${(stats.totalDurationMs / 1000).toFixed(1)}s`);
1780
+
1781
+ // 10. Cost summary
1782
+ const cost = costTracker.getSummary();
1783
+ if (cost.totalTokens > 0) {
1784
+ console.log('');
1785
+ console.log(` Cost estimate (${config.GEMINI_MODEL}):`);
1786
+ console.log(` Input: ${cost.inputTokens.toLocaleString()} ($${cost.inputCost.toFixed(4)})`);
1787
+ console.log(` Output: ${cost.outputTokens.toLocaleString()} ($${cost.outputCost.toFixed(4)})`);
1788
+ console.log(` Thinking: ${cost.thinkingTokens.toLocaleString()} ($${cost.thinkingCost.toFixed(4)})`);
1789
+ console.log(` Total: ${cost.totalTokens.toLocaleString()} tokens | $${cost.totalCost.toFixed(4)}`);
1790
+ }
1791
+
1792
+ // 11. Firebase upload (optional)
1793
+ if (!opts.skipUpload) {
1794
+ try {
1795
+ const { storage, authenticated } = await initFirebase();
1796
+ if (authenticated && storage) {
1797
+ const storagePath = `calls/${folderName}/dynamic/${ts}`;
1798
+ const indexStoragePath = `${storagePath}/INDEX.md`;
1799
+ await uploadToStorage(storage, indexPath, indexStoragePath);
1800
+ console.log(` ✓ Uploaded to Firebase: ${storagePath}/`);
1801
+ log.step(`Firebase upload complete: ${storagePath}`);
1802
+ }
1803
+ } catch (fbErr) {
1804
+ console.warn(` ⚠ Firebase upload failed: ${fbErr.message}`);
1805
+ log.warn(`Firebase upload failed: ${fbErr.message}`);
1806
+ }
1807
+ }
1808
+
1809
+ console.log('');
1810
+ console.log(' ══════════════════════════════════════');
1811
+ console.log(' Dynamic Mode Complete');
1812
+ console.log(' ══════════════════════════════════════');
1813
+ if (videoSummaries.length > 0) {
1814
+ console.log(` Videos: ${videoFiles.length} (${videoSummaries.length} segments analyzed)`);
1815
+ }
1816
+ console.log(` Documents: ${stats.successful}`);
1817
+ console.log(` Output: ${path.relative(PROJECT_ROOT, runDir)}/`);
1818
+ console.log(` Elapsed: ${log.elapsed()}`);
1819
+ console.log('');
1820
+
1821
+ log.step(`Dynamic mode complete: ${stats.successful} docs, ${stats.totalTokens} tokens`);
1822
+ log.step('DONE');
1823
+ initCtx.progress.cleanup();
1824
+ log.close();
1825
+ }
1826
+
1827
+ // ======================== SMART CHANGE DETECTION ========================
1828
+
1829
+ /**
1830
+ * Alternative pipeline mode: detect what changed since last analysis
1831
+ * and assess progress on extracted items via git + AI.
1832
+ *
1833
+ * Triggered by --update-progress flag.
1834
+ */
1835
+ async function runProgressUpdate(initCtx) {
1836
+ const { opts, targetDir } = initCtx;
1837
+ const callName = path.basename(targetDir);
1838
+ const ts = new Date().toISOString().replace(/:/g, '-').replace(/\.\d+Z$/, '');
1839
+
1840
+ console.log('');
1841
+ console.log('==============================================');
1842
+ console.log(' Smart Change Detection & Progress Update');
1843
+ console.log('==============================================');
1844
+ console.log(` Call: ${callName}`);
1845
+ console.log(` Folder: ${targetDir}`);
1846
+ console.log('');
1847
+
1848
+ // 0. Ensure a git repo exists for change tracking
1849
+ if (isGitAvailable() && !isGitRepo(opts.repoPath || targetDir)) {
1850
+ try {
1851
+ const { root, created } = initRepo(targetDir);
1852
+ if (created) {
1853
+ console.log(` ✓ Initialized git repository in ${root}`);
1854
+ log.step(`Git repo initialized: ${root}`);
1855
+ }
1856
+ } catch (gitErr) {
1857
+ console.warn(` ⚠ Could not initialize git: ${gitErr.message}`);
1858
+ log.warn(`Git init failed: ${gitErr.message}`);
1859
+ }
1860
+ }
1861
+
1862
+ // 1. Load previous compilation
1863
+ const prev = loadPreviousCompilation(targetDir);
1864
+ if (!prev) {
1865
+ console.error(' ✗ No previous analysis found. Run the full pipeline first.');
1866
+ console.error(' Usage: taskex "' + callName + '"');
1867
+ initCtx.progress.cleanup();
1868
+ log.close();
1869
+ return;
1870
+ }
1871
+ console.log(` ✓ Loaded previous analysis from ${prev.timestamp}`);
1872
+ log.step(`Loaded previous compilation: ${prev.timestamp}`);
1873
+
1874
+ // 2. Detect changes
1875
+ console.log(' Detecting changes...');
1876
+ const changeReport = detectAllChanges({
1877
+ repoPath: opts.repoPath,
1878
+ callDir: targetDir,
1879
+ sinceISO: prev.timestamp,
1880
+ analysis: prev.compiled,
1881
+ });
1882
+
1883
+ console.log(` ✓ Git: ${changeReport.totals.commits} commits, ${changeReport.totals.filesChanged} files changed`);
1884
+ console.log(` ✓ Docs: ${changeReport.totals.docsChanged} document(s) updated`);
1885
+ console.log(` ✓ Items: ${changeReport.items.length} trackable items found`);
1886
+ console.log(` ✓ Correlations: ${changeReport.totals.itemsWithMatches} items with matches`);
1887
+ log.step(`Changes detected: ${changeReport.totals.commits} commits, ${changeReport.totals.filesChanged} files, ${changeReport.totals.docsChanged} docs`);
1888
+ console.log('');
1889
+
1890
+ // 3. Local assessment (always runs)
1891
+ const localAssessments = assessProgressLocal(changeReport.items, changeReport.correlations);
1892
+ const localSummary = buildProgressSummary(localAssessments);
1893
+ console.log(` Local assessment: ${localSummary.done} done, ${localSummary.inProgress} in progress, ${localSummary.notStarted} not started`);
1894
+
1895
+ // 4. AI-enhanced assessment (if Gemini is available)
1896
+ let finalAssessments = localAssessments;
1897
+ let overallSummary = null;
1898
+ let recommendations = [];
1899
+ let aiMode = 'local';
1900
+
1901
+ if (!opts.skipGemini) {
1902
+ try {
1903
+ console.log(' Running AI-enhanced assessment...');
1904
+ const ai = await initGemini();
1905
+ const aiResult = await assessProgressWithAI(ai, changeReport.items, changeReport, localAssessments, {
1906
+ thinkingBudget: opts.thinkingBudget,
1907
+ });
1908
+ finalAssessments = aiResult.assessments;
1909
+ overallSummary = aiResult.overall_summary;
1910
+ recommendations = aiResult.recommendations;
1911
+ aiMode = 'ai-enhanced';
1912
+
1913
+ const aiSummary = buildProgressSummary(finalAssessments);
1914
+ console.log(` ✓ AI assessment: ${aiSummary.done} done, ${aiSummary.inProgress} in progress, ${aiSummary.notStarted} not started`);
1915
+
1916
+ if (aiResult.tokenUsage) {
1917
+ initCtx.costTracker.addSegment('progress-assessment', aiResult.tokenUsage, 0, false);
1918
+ }
1919
+ log.step(`AI assessment complete (model: ${aiResult.model})`);
1920
+ } catch (err) {
1921
+ console.warn(` ⚠ AI assessment failed, using local assessment: ${err.message}`);
1922
+ log.warn(`AI assessment failed: ${err.message}`);
1923
+ }
1924
+ } else {
1925
+ console.log(' Skipping AI assessment (--skip-gemini)');
1926
+ }
1927
+ console.log('');
1928
+
1929
+ // 5. Merge progress into analysis
1930
+ const annotatedAnalysis = mergeProgressIntoAnalysis(
1931
+ JSON.parse(JSON.stringify(prev.compiled)),
1932
+ finalAssessments
1933
+ );
1934
+
1935
+ // 6. Create output
1936
+ const runDir = path.join(targetDir, 'runs', ts);
1937
+ fs.mkdirSync(runDir, { recursive: true });
1938
+
1939
+ const progressData = {
1940
+ timestamp: ts,
1941
+ mode: aiMode,
1942
+ callName,
1943
+ sinceAnalysis: prev.timestamp,
1944
+ changeReport: serializeReport(changeReport),
1945
+ assessments: finalAssessments,
1946
+ summary: buildProgressSummary(finalAssessments),
1947
+ overallSummary,
1948
+ recommendations,
1949
+ annotatedAnalysis,
1950
+ };
1951
+
1952
+ const progressJsonPath = path.join(runDir, 'progress.json');
1953
+ fs.writeFileSync(progressJsonPath, JSON.stringify(progressData, null, 2));
1954
+ log.step(`Wrote ${progressJsonPath}`);
1955
+
1956
+ const progressMd = renderProgressMarkdown({
1957
+ assessments: finalAssessments,
1958
+ changeReport,
1959
+ overallSummary,
1960
+ recommendations,
1961
+ meta: { callName, timestamp: ts, mode: aiMode },
1962
+ });
1963
+ const progressMdPath = path.join(runDir, 'progress.md');
1964
+ fs.writeFileSync(progressMdPath, progressMd);
1965
+ log.step(`Wrote ${progressMdPath}`);
1966
+
1967
+ console.log(` ✓ Progress report: ${path.relative(PROJECT_ROOT, progressMdPath)}`);
1968
+ console.log(` ✓ Progress data: ${path.relative(PROJECT_ROOT, progressJsonPath)}`);
1969
+
1970
+ // 7. Firebase upload (if available)
1971
+ if (!opts.skipUpload) {
1972
+ try {
1973
+ const { storage, authenticated } = await initFirebase();
1974
+ if (storage && authenticated) {
1975
+ const storagePath = `calls/${callName}/progress/${ts}`;
1976
+ await uploadToStorage(storage, progressJsonPath, `${storagePath}/progress.json`);
1977
+ await uploadToStorage(storage, progressMdPath, `${storagePath}/progress.md`);
1978
+ console.log(` ✓ Uploaded to Firebase: ${storagePath}/`);
1979
+ log.step(`Firebase upload complete: ${storagePath}`);
1980
+ }
1981
+ } catch (fbErr) {
1982
+ console.warn(` ⚠ Firebase upload failed: ${fbErr.message}`);
1983
+ log.warn(`Firebase upload failed: ${fbErr.message}`);
1984
+ }
1985
+ }
1986
+
1987
+ // 8. Print summary
1988
+ const finalSummary = buildProgressSummary(finalAssessments);
1989
+ console.log('');
1990
+ console.log(' ══════════════════════════════════════');
1991
+ console.log(' Progress Update Complete');
1992
+ console.log(' ══════════════════════════════════════');
1993
+ console.log(` ${STATUS_ICONS.DONE} Completed: ${finalSummary.done}`);
1994
+ console.log(` ${STATUS_ICONS.IN_PROGRESS} In Progress: ${finalSummary.inProgress}`);
1995
+ console.log(` ${STATUS_ICONS.NOT_STARTED} Not Started: ${finalSummary.notStarted}`);
1996
+ console.log(` ${STATUS_ICONS.SUPERSEDED} Superseded: ${finalSummary.superseded}`);
1997
+ const completionPct = finalSummary.total > 0 ? ((finalSummary.done / finalSummary.total) * 100).toFixed(0) : 0;
1998
+ console.log(` Overall: ${completionPct}% complete (${finalSummary.done}/${finalSummary.total})`);
1999
+ console.log('');
2000
+
2001
+ // Cleanup
2002
+ initCtx.progress.cleanup();
2003
+ log.close();
2004
+ }
2005
+
2006
+ module.exports = { run, getLog: () => log };