task-summary-extractor 9.6.0 → 9.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ARCHITECTURE.md +51 -0
- package/QUICK_START.md +11 -0
- package/README.md +11 -7
- package/package.json +1 -1
- package/src/modes/deep-summary.js +37 -0
- package/src/modes/focused-reanalysis.js +16 -1
- package/src/phases/init.js +3 -0
- package/src/phases/process-media.js +238 -2
- package/src/phases/summary.js +5 -5
- package/src/pipeline.js +2 -1
- package/src/renderers/docx.js +1 -1
- package/src/renderers/html.js +1 -2
- package/src/services/gemini.js +233 -1
- package/src/services/video.js +9 -9
- package/src/utils/cli.js +7 -3
- package/src/utils/context-manager.js +152 -0
- package/src/utils/diff-engine.js +7 -7
- package/src/utils/interactive.js +50 -4
- package/src/utils/progress-bar.js +11 -10
- package/src/utils/schema-validator.js +33 -2
package/ARCHITECTURE.md
CHANGED
|
@@ -15,6 +15,7 @@
|
|
|
15
15
|
- [Per-Segment Processing](#per-segment-processing)
|
|
16
16
|
- [File Resolution Strategies](#file-resolution-strategies)
|
|
17
17
|
- [Quality Gate Decision Table](#quality-gate-decision-table)
|
|
18
|
+
- [Multi-Segment Batching](#multi-segment-batching)
|
|
18
19
|
- [Smart Change Detection](#smart-change-detection)
|
|
19
20
|
- [Correlation Strategies](#correlation-strategies)
|
|
20
21
|
- [Assessment Thresholds](#assessment-thresholds)
|
|
@@ -249,6 +250,56 @@ After all passes complete, any Gemini File API uploads are cleaned up (fire-and-
|
|
|
249
250
|
|
|
250
251
|
---
|
|
251
252
|
|
|
253
|
+
## Multi-Segment Batching
|
|
254
|
+
|
|
255
|
+
When the Gemini context window has enough headroom, consecutive video segments are grouped into single API calls. This reduces the number of Gemini calls and gives the model better cross-segment awareness.
|
|
256
|
+
|
|
257
|
+
```mermaid
|
|
258
|
+
flowchart TB
|
|
259
|
+
START(["All Segments"]) --> CHECK{"Batching enabled?\n!noBatch && !skipGemini\n&& segments > 1"}
|
|
260
|
+
CHECK -->|No| SINGLE["Single-segment\nprocessing (original)"]
|
|
261
|
+
CHECK -->|Yes| PLAN["planSegmentBatches()\nGreedy bin-packing"]
|
|
262
|
+
|
|
263
|
+
PLAN --> BUDGET["Calculate token budget:\ncontextWindow (1M)\n− promptOverhead (120K)\n− docTokens\n− prevAnalysesTokens\n= available for video"]
|
|
264
|
+
|
|
265
|
+
BUDGET --> FIT{"batchSize > 1?"}
|
|
266
|
+
FIT -->|No| SINGLE
|
|
267
|
+
FIT -->|Yes| BATCH["Process in batches"]
|
|
268
|
+
|
|
269
|
+
BATCH --> B1["Batch 1:\nsegs 1–N"]
|
|
270
|
+
BATCH --> B2["Batch 2:\nsegs N+1–M"]
|
|
271
|
+
BATCH --> BN["..."]
|
|
272
|
+
|
|
273
|
+
B1 --> CALL["processSegmentBatch()\nMultiple fileData parts\nper Gemini call"]
|
|
274
|
+
CALL --> PARSE["Parse + Quality Gate\n+ Schema Validation"]
|
|
275
|
+
PARSE --> TAG["Tag items with\nsource_segment"]
|
|
276
|
+
|
|
277
|
+
CALL -->|Error| FALLBACK["Fall back to\nsingle-segment mode"]
|
|
278
|
+
FALLBACK --> SINGLE
|
|
279
|
+
```
|
|
280
|
+
|
|
281
|
+
### How It Works
|
|
282
|
+
|
|
283
|
+
| Step | Detail |
|
|
284
|
+
| ------ | -------- |
|
|
285
|
+
| **Token budget** | `contextWindow − 120K overhead − docTokens − prevAnalysesTokens = available` |
|
|
286
|
+
| **Video cost** | ~300 tokens/sec × segment duration |
|
|
287
|
+
| **Bin-packing** | Greedy: add consecutive segments until budget or max batch size (8) reached |
|
|
288
|
+
| **Deep summary synergy** | Deep summary frees 60–80% of doc tokens → more room for video → larger batches |
|
|
289
|
+
| **Fallback** | Any batch failure → entire remaining file falls back to single-segment processing |
|
|
290
|
+
| **Cache aware** | Cached segment runs are loaded from disk; only uncached batches hit the API |
|
|
291
|
+
| **Disable** | `--no-batch` forces original single-segment behavior |
|
|
292
|
+
|
|
293
|
+
### Token Math Example
|
|
294
|
+
|
|
295
|
+
| Scenario | Doc Tokens | Available | Seg Duration | Tokens/Seg | Batch Size |
|
|
296
|
+
| ---------- | ----------- | ----------- | ------------- | ----------- | ----------- |
|
|
297
|
+
| No deep summary | 300K | ~580K | 280s | 84K | 6 |
|
|
298
|
+
| With deep summary | 60K | ~820K | 280s | 84K | 9 |
|
|
299
|
+
| Raw mode | 60K | ~820K | 1200s | 360K | 2 |
|
|
300
|
+
|
|
301
|
+
---
|
|
302
|
+
|
|
252
303
|
## Smart Change Detection
|
|
253
304
|
|
|
254
305
|
The `--update-progress` mode tracks which extracted items have been addressed:
|
package/QUICK_START.md
CHANGED
|
@@ -236,6 +236,17 @@ my-project/runs/{timestamp}/
|
|
|
236
236
|
|
|
237
237
|
---
|
|
238
238
|
|
|
239
|
+
## Advanced Features
|
|
240
|
+
|
|
241
|
+
| Feature | Flag | Description |
|
|
242
|
+
| --------- | ------ | ------------- |
|
|
243
|
+
| **Deep Summary** | `--deep-summary` | Pre-summarizes context docs — saves 60-80% input tokens per segment |
|
|
244
|
+
| **Deep Dive** | `--deep-dive` | Generates explanatory docs for each discussion topic |
|
|
245
|
+
| **Multi-Segment Batching** | enabled by default | When context window has headroom, groups consecutive segments into single API calls — fewer requests, better cross-segment awareness. Use `--no-batch` to disable |
|
|
246
|
+
| **Raw Video Mode** | `--no-compress` | Skip re-encoding — pass video directly to Gemini |
|
|
247
|
+
|
|
248
|
+
---
|
|
249
|
+
|
|
239
250
|
## Troubleshooting
|
|
240
251
|
|
|
241
252
|
| Problem | Fix |
|
package/README.md
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
# Task Summary Extractor
|
|
2
2
|
|
|
3
|
-
> **v9.
|
|
3
|
+
> **v9.8.0** — AI-powered content analysis CLI — meetings, recordings, documents, or any mix. Install globally, run anywhere.
|
|
4
4
|
|
|
5
5
|
<p align="center">
|
|
6
6
|
<img src="https://img.shields.io/badge/node-%3E%3D18.0.0-green" alt="Node.js" />
|
|
7
7
|
<img src="https://img.shields.io/badge/gemini-2.5--flash-blue" alt="Gemini" />
|
|
8
|
-
<img src="https://img.shields.io/badge/firebase-
|
|
9
|
-
<img src="https://img.shields.io/badge/version-9.
|
|
10
|
-
<img src="https://img.shields.io/badge/tests-
|
|
8
|
+
<img src="https://img.shields.io/badge/firebase-12.x-orange" alt="Firebase" />
|
|
9
|
+
<img src="https://img.shields.io/badge/version-9.7.0-brightgreen" alt="Version" />
|
|
10
|
+
<img src="https://img.shields.io/badge/tests-345%20passing-brightgreen" alt="Tests" />
|
|
11
11
|
<img src="https://img.shields.io/badge/npm-task--summary--extractor-red" alt="npm" />
|
|
12
12
|
</p>
|
|
13
13
|
|
|
@@ -183,7 +183,7 @@ These are the ones you'll actually use:
|
|
|
183
183
|
| `--resume` | Continue an interrupted run | `--resume` |
|
|
184
184
|
| `--reanalyze` | Force fresh analysis (ignore cache) | `--reanalyze` |
|
|
185
185
|
| `--dry-run` | Preview what would run, without running | `--dry-run` |
|
|
186
|
-
| `--format <type>` | Output format: `md`, `html`, `json`, `pdf`, `docx`, `all` (default: `
|
|
186
|
+
| `--format <type>` | Output format: `md`, `html`, `json`, `pdf`, `docx`, `all` (default: `all`) | `--format html` |
|
|
187
187
|
| `--min-confidence <level>` | Filter items by confidence: `high`, `medium`, `low` | `--min-confidence high` |
|
|
188
188
|
| `--no-html` | Suppress HTML report generation | `--no-html` |
|
|
189
189
|
| `--deep-summary` | Pre-summarize context docs (60-80% token savings) | `--deep-summary` |
|
|
@@ -273,6 +273,7 @@ Control how video is processed before AI analysis:
|
|
|
273
273
|
| `--no-focused-pass` | enabled | Disable targeted re-analysis of weak segments |
|
|
274
274
|
| `--no-learning` | enabled | Disable auto-tuning from historical run data |
|
|
275
275
|
| `--no-diff` | enabled | Disable diff comparison with the previous run |
|
|
276
|
+
| `--no-batch` | enabled | Disable multi-segment batching (force 1 segment per API call) |
|
|
276
277
|
|
|
277
278
|
### Available Models
|
|
278
279
|
|
|
@@ -304,7 +305,7 @@ DYNAMIC --request <text>
|
|
|
304
305
|
PROGRESS --repo <path>
|
|
305
306
|
TUNING --thinking-budget --compilation-thinking-budget --parallel
|
|
306
307
|
--parallel-analysis --log-level --output
|
|
307
|
-
--no-focused-pass --no-learning --no-diff
|
|
308
|
+
--no-focused-pass --no-learning --no-diff --no-batch
|
|
308
309
|
INFO --help (-h) --version (-v)
|
|
309
310
|
```
|
|
310
311
|
|
|
@@ -472,6 +473,7 @@ GEMINI_API_KEY=your-key-here
|
|
|
472
473
|
| **Deep Summary** | `--deep-summary` pre-summarizes context docs, 60-80% token savings per segment |
|
|
473
474
|
| **Context Window Safety** | Auto-truncation, pre-flight token checks, RESOURCE_EXHAUSTED recovery |
|
|
474
475
|
| **Multi-Format Output** | `--format` flag: Markdown, HTML, JSON, PDF, DOCX, or all formats at once |
|
|
476
|
+
| **Multi-Segment Batching** | Groups consecutive segments into single API calls when context window has headroom — fewer calls, better cross-segment awareness. `--no-batch` to disable |
|
|
475
477
|
| **Interactive CLI** | Run with no args → guided experience |
|
|
476
478
|
| **Resume / Checkpoint** | `--resume` continues interrupted runs |
|
|
477
479
|
| **Firebase Upload** | Team access via cloud (optional) |
|
|
@@ -586,7 +588,7 @@ task-summary-extractor/
|
|
|
586
588
|
| `npm run check` | Validate environment |
|
|
587
589
|
| `npm start` | Run the pipeline |
|
|
588
590
|
| `npm run help` | Show CLI help |
|
|
589
|
-
| `npm test` | Run test suite (
|
|
591
|
+
| `npm test` | Run test suite (345 tests) |
|
|
590
592
|
| `npm run test:watch` | Run tests in watch mode |
|
|
591
593
|
| `npm run test:coverage` | Run tests with coverage report |
|
|
592
594
|
|
|
@@ -596,6 +598,8 @@ task-summary-extractor/
|
|
|
596
598
|
|
|
597
599
|
| Version | Highlights |
|
|
598
600
|
|---------|-----------|
|
|
601
|
+
| **v9.8.0** | **Schema hardening & transcript handling** — VTT/SRT auto-excluded from deep-summary (transcripts routed to workflow, not summarizer), `normalizeAnalysis()` fills missing `summary`/`confidence`/`discussed_state` defaults before validation, batch Storage URL→File API auto-retry on `INVALID_ARGUMENT`, focused re-analysis skips sparse segments (≤2 items + low density), 367 tests |
|
|
602
|
+
| **v9.7.0** | **Multi-segment batching** — groups consecutive video segments into single Gemini API calls when context window has headroom, greedy bin-packing by token budget (`planSegmentBatches`), `processSegmentBatch()` multi-video API calls, automatic fallback to single-segment on failure, `--no-batch` to disable, codebase audit fixes (unused imports, variable shadowing) |
|
|
599
603
|
| **v9.6.0** | **Interactive CLI UX** — arrow-key navigation for all selectors (folder, model, run mode, formats, confidence, doc exclusion), zero-dependency prompt engine (`interactive.js`), `selectOne()` with ↑↓+Enter, `selectMany()` with Space toggle + A all/none, non-TTY fallback to number input |
|
|
600
604
|
| **v9.5.0** | **Video processing flags** — `--no-compress`, `--speed`, `--segment-time` CLI flags, hardcoded 1200s for raw mode, deprecated `--skip-compression` |
|
|
601
605
|
| **v9.4.0** | **Context window safety** — pre-flight token checks, auto-truncation for oversized docs/VTTs, RESOURCE_EXHAUSTED recovery with automatic doc shedding, chunked compilation for large segment sets, P0/P1 hard cap (2× budget) prevents context overflow, improved deep-summary prompt quality |
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "task-summary-extractor",
|
|
3
|
-
"version": "9.
|
|
3
|
+
"version": "9.8.0",
|
|
4
4
|
"description": "AI-powered meeting analysis & document generation CLI — video + document processing, deep dive docs, dynamic mode, interactive CLI with model selection, confidence scoring, learning loop, git progress tracking",
|
|
5
5
|
"main": "process_and_upload.js",
|
|
6
6
|
"bin": {
|
|
@@ -27,6 +27,20 @@ const config = require('../config');
|
|
|
27
27
|
|
|
28
28
|
// ======================== CONSTANTS ========================
|
|
29
29
|
|
|
30
|
+
/**
|
|
31
|
+
* Transcript file extensions that should NEVER be summarized.
|
|
32
|
+
* VTT/SRT files are time-sliced per segment during analysis — summarising
|
|
33
|
+
* them would destroy the timestamp-indexed structure that `sliceVttForSegment`
|
|
34
|
+
* relies on. They are automatically kept at full fidelity.
|
|
35
|
+
*/
|
|
36
|
+
const TRANSCRIPT_EXTENSIONS = ['.vtt', '.srt'];
|
|
37
|
+
|
|
38
|
+
/** Check whether a filename is a transcript file (VTT/SRT). */
|
|
39
|
+
function isTranscriptFile(fileName) {
|
|
40
|
+
const lower = (fileName || '').toLowerCase();
|
|
41
|
+
return TRANSCRIPT_EXTENSIONS.some(ext => lower.endsWith(ext));
|
|
42
|
+
}
|
|
43
|
+
|
|
30
44
|
/** Max tokens for a single summarization call output */
|
|
31
45
|
const SUMMARY_MAX_OUTPUT = 16384;
|
|
32
46
|
|
|
@@ -262,6 +276,13 @@ async function deepSummarize(ai, contextDocs, opts = {}) {
|
|
|
262
276
|
continue;
|
|
263
277
|
}
|
|
264
278
|
|
|
279
|
+
// Auto-exclude transcript files (VTT/SRT) — they are time-sliced per
|
|
280
|
+
// segment during analysis and must retain their timestamp structure.
|
|
281
|
+
if (isTranscriptFile(doc.fileName)) {
|
|
282
|
+
keepFull.push(doc);
|
|
283
|
+
continue;
|
|
284
|
+
}
|
|
285
|
+
|
|
265
286
|
// Keep excluded docs at full fidelity
|
|
266
287
|
if (excludeSet.has(doc.fileName.toLowerCase())) {
|
|
267
288
|
keepFull.push(doc);
|
|
@@ -294,14 +315,22 @@ async function deepSummarize(ai, contextDocs, opts = {}) {
|
|
|
294
315
|
}
|
|
295
316
|
|
|
296
317
|
// Build focus topics from excluded docs (tell summarizer what to prioritize)
|
|
318
|
+
// NOTE: transcript files (VTT/SRT) are auto-excluded but NOT used as focus
|
|
319
|
+
// topics — they are time-sliced per segment and don't represent "topics".
|
|
297
320
|
const focusTopics = keepFull
|
|
298
321
|
.filter(d => d.type === 'inlineText' && excludeSet.has(d.fileName.toLowerCase()))
|
|
299
322
|
.map(d => d.fileName);
|
|
300
323
|
|
|
324
|
+
// Count auto-excluded transcript files for logging
|
|
325
|
+
const autoExcludedTranscripts = keepFull.filter(d => isTranscriptFile(d.fileName));
|
|
326
|
+
|
|
301
327
|
// Batch documents
|
|
302
328
|
const batches = buildBatches(toSummarize);
|
|
303
329
|
|
|
304
330
|
console.log(` Batched ${c.highlight(toSummarize.length)} doc(s) into ${c.highlight(batches.length)} summarization batch(es)`);
|
|
331
|
+
if (autoExcludedTranscripts.length > 0) {
|
|
332
|
+
console.log(` Auto-excluded ${c.highlight(autoExcludedTranscripts.length)} transcript file(s) (VTT/SRT — time-sliced per segment)`);
|
|
333
|
+
}
|
|
305
334
|
if (focusTopics.length > 0) {
|
|
306
335
|
console.log(` Focus topics from ${c.highlight(focusTopics.length)} excluded doc(s):`);
|
|
307
336
|
focusTopics.forEach(t => console.log(` ${c.dim('•')} ${c.cyan(t)}`));
|
|
@@ -350,6 +379,12 @@ async function deepSummarize(ai, contextDocs, opts = {}) {
|
|
|
350
379
|
continue;
|
|
351
380
|
}
|
|
352
381
|
|
|
382
|
+
// Auto-exclude transcript files (VTT/SRT)
|
|
383
|
+
if (isTranscriptFile(doc.fileName)) {
|
|
384
|
+
resultDocs.push(doc);
|
|
385
|
+
continue;
|
|
386
|
+
}
|
|
387
|
+
|
|
353
388
|
// Check if we have a summary for this doc
|
|
354
389
|
const summaryKey = doc.fileName.toLowerCase();
|
|
355
390
|
const summary = allSummaries.get(summaryKey);
|
|
@@ -399,6 +434,8 @@ module.exports = {
|
|
|
399
434
|
deepSummarize,
|
|
400
435
|
summarizeBatch,
|
|
401
436
|
buildBatches,
|
|
437
|
+
isTranscriptFile,
|
|
438
|
+
TRANSCRIPT_EXTENSIONS,
|
|
402
439
|
SUMMARY_MAX_OUTPUT,
|
|
403
440
|
BATCH_MAX_CHARS,
|
|
404
441
|
MIN_SUMMARIZE_LENGTH,
|
|
@@ -136,9 +136,24 @@ function identifyWeaknesses(qualityReport, analysis) {
|
|
|
136
136
|
);
|
|
137
137
|
}
|
|
138
138
|
|
|
139
|
+
// ── Skip focused pass for simple / sparse segments ──────────────────────
|
|
140
|
+
// When the analysis has very few extracted items AND the density dimension
|
|
141
|
+
// is low, the segment is likely simple (chit-chat, small-talk, intro) or
|
|
142
|
+
// the AI legitimately had nothing to extract. A focused pass won't help.
|
|
143
|
+
const totalItems = [
|
|
144
|
+
...(analysis.tickets || []),
|
|
145
|
+
...(analysis.action_items || []),
|
|
146
|
+
...(analysis.change_requests || []),
|
|
147
|
+
...(analysis.blockers || []),
|
|
148
|
+
...(analysis.scope_changes || []),
|
|
149
|
+
].length;
|
|
150
|
+
|
|
151
|
+
const isSparseSegment = totalItems <= 2 && dims.density && dims.density.score < 30;
|
|
152
|
+
|
|
139
153
|
const shouldReanalyze = focusInstructions.length > 0 &&
|
|
140
154
|
qualityReport.score < 60 && // Only re-analyze if quality is truly lacking
|
|
141
|
-
weakAreas.length >= 2
|
|
155
|
+
weakAreas.length >= 2 && // At least 2 weak areas to justify the cost
|
|
156
|
+
!isSparseSegment; // Don't waste tokens on sparse / simple segments
|
|
142
157
|
|
|
143
158
|
const focusPrompt = focusInstructions.length > 0
|
|
144
159
|
? focusInstructions.join('\n\n')
|
package/src/phases/init.js
CHANGED
|
@@ -66,6 +66,7 @@ async function phaseInit() {
|
|
|
66
66
|
disableLearning: !!flags['no-learning'],
|
|
67
67
|
disableDiff: !!flags['no-diff'],
|
|
68
68
|
noHtml: !!flags['no-html'],
|
|
69
|
+
noBatch: !!flags['no-batch'],
|
|
69
70
|
// Video processing flags
|
|
70
71
|
noCompress: !!flags['no-compress'],
|
|
71
72
|
speed: flags.speed ? parseFloat(flags.speed) : null,
|
|
@@ -355,6 +356,7 @@ function _printRunSummary(opts, modelId, models, targetDir) {
|
|
|
355
356
|
if (opts.deepDive) features.push(c.cyan('deep-dive'));
|
|
356
357
|
if (opts.deepSummary) features.push(c.cyan('deep-summary'));
|
|
357
358
|
if (opts.dynamic) features.push(c.cyan('dynamic'));
|
|
359
|
+
if (!opts.noBatch) features.push(c.green('batch'));
|
|
358
360
|
if (opts.resume) features.push(c.yellow('resume'));
|
|
359
361
|
if (opts.dryRun) features.push(c.yellow('dry-run'));
|
|
360
362
|
if (opts.skipUpload) features.push(c.dim('skip-upload'));
|
|
@@ -363,6 +365,7 @@ function _printRunSummary(opts, modelId, models, targetDir) {
|
|
|
363
365
|
if (opts.disableFocusedPass) disabled.push(c.dim('no-focused'));
|
|
364
366
|
if (opts.disableLearning) disabled.push(c.dim('no-learning'));
|
|
365
367
|
if (opts.disableDiff) disabled.push(c.dim('no-diff'));
|
|
368
|
+
if (opts.noBatch) disabled.push(c.dim('no-batch'));
|
|
366
369
|
|
|
367
370
|
if (features.length > 0) {
|
|
368
371
|
console.log(` ${c.dim('Features:')} ${features.join(c.dim(' · '))}`);
|
|
@@ -9,7 +9,7 @@ const { AUDIO_EXTS, SPEED } = config;
|
|
|
9
9
|
|
|
10
10
|
// --- Services ---
|
|
11
11
|
const { uploadToStorage, storageExists } = require('../services/firebase');
|
|
12
|
-
const { processWithGemini, cleanupGeminiFiles } = require('../services/gemini');
|
|
12
|
+
const { processWithGemini, processSegmentBatch, cleanupGeminiFiles } = require('../services/gemini');
|
|
13
13
|
const { compressAndSegment, compressAndSegmentAudio, splitOnly, probeFormat, verifySegment } = require('../services/video');
|
|
14
14
|
|
|
15
15
|
// --- Utils ---
|
|
@@ -19,7 +19,7 @@ const { parallelMap } = require('../utils/retry');
|
|
|
19
19
|
const { assessQuality, formatQualityLine, getConfidenceStats, THRESHOLDS } = require('../utils/quality-gate');
|
|
20
20
|
const { validateAnalysis, formatSchemaLine, schemaScore, normalizeAnalysis } = require('../utils/schema-validator');
|
|
21
21
|
const { calculateThinkingBudget } = require('../utils/adaptive-budget');
|
|
22
|
-
const { detectBoundaryContext, sliceVttForSegment } = require('../utils/context-manager');
|
|
22
|
+
const { detectBoundaryContext, sliceVttForSegment, planSegmentBatches, estimateTokens, buildProgressiveContext } = require('../utils/context-manager');
|
|
23
23
|
|
|
24
24
|
// --- Modes ---
|
|
25
25
|
const { identifyWeaknesses, runFocusedPass, mergeFocusedResults } = require('../modes/focused-reanalysis');
|
|
@@ -245,6 +245,240 @@ async function phaseProcessVideo(ctx, videoPath, videoIndex) {
|
|
|
245
245
|
const segmentAnalyses = [];
|
|
246
246
|
const segmentReports = []; // Quality reports for health dashboard
|
|
247
247
|
|
|
248
|
+
// ════════════════════════════════════════════════════════════
|
|
249
|
+
// Multi-Segment Batching — pass multiple segments per call
|
|
250
|
+
// when the context window has enough headroom.
|
|
251
|
+
// ════════════════════════════════════════════════════════════
|
|
252
|
+
const useBatching = !opts.noBatch && !opts.skipGemini && !opts.dryRun && segments.length > 1;
|
|
253
|
+
let batchedSuccessfully = false;
|
|
254
|
+
|
|
255
|
+
if (useBatching) {
|
|
256
|
+
const prevTokens = estimateTokens(buildProgressiveContext(previousAnalyses, userName) || '');
|
|
257
|
+
const { batches, batchSize, reason } = planSegmentBatches(
|
|
258
|
+
segmentMeta, contextDocs,
|
|
259
|
+
{
|
|
260
|
+
contextWindow: config.GEMINI_CONTEXT_WINDOW || 1_048_576,
|
|
261
|
+
previousAnalysesTokens: prevTokens,
|
|
262
|
+
}
|
|
263
|
+
);
|
|
264
|
+
|
|
265
|
+
if (batchSize > 1) {
|
|
266
|
+
console.log(` ${c.cyan('⚡ Multi-segment batching:')} ${batches.length} batch(es), up to ${batchSize} segments/batch`);
|
|
267
|
+
console.log(` ${c.dim(reason)}`);
|
|
268
|
+
console.log('');
|
|
269
|
+
batchedSuccessfully = true; // will be set false if we need to fall back
|
|
270
|
+
|
|
271
|
+
for (let bIdx = 0; bIdx < batches.length; bIdx++) {
|
|
272
|
+
if (isShuttingDown()) break;
|
|
273
|
+
const batchIndices = batches[bIdx];
|
|
274
|
+
const batchSegs = batchIndices.map(i => ({
|
|
275
|
+
segPath: segmentMeta[i].segPath,
|
|
276
|
+
segName: segmentMeta[i].segName,
|
|
277
|
+
durSec: segmentMeta[i].durSec,
|
|
278
|
+
storageUrl: segmentMeta[i].storageUrl,
|
|
279
|
+
}));
|
|
280
|
+
const batchTimes = batchIndices.map(i => ({
|
|
281
|
+
startTimeSec: segmentMeta[i].startTimeSec,
|
|
282
|
+
endTimeSec: segmentMeta[i].endTimeSec,
|
|
283
|
+
}));
|
|
284
|
+
|
|
285
|
+
const batchLabel = batchIndices.length === 1
|
|
286
|
+
? `seg ${batchIndices[0] + 1}`
|
|
287
|
+
: `segs ${batchIndices[0] + 1}–${batchIndices[batchIndices.length - 1] + 1}`;
|
|
288
|
+
console.log(` ${c.cyan('══')} Batch ${c.highlight(`${bIdx + 1}/${batches.length}`)} (${batchLabel}) ${c.cyan('══')}`);
|
|
289
|
+
|
|
290
|
+
// Skip batches where all segments have cached runs and user didn't force re-analyze
|
|
291
|
+
if (!forceReanalyze) {
|
|
292
|
+
const allCached = batchIndices.every(i => {
|
|
293
|
+
const prefix = `segment_${String(i).padStart(2, '0')}_`;
|
|
294
|
+
const existing = fs.readdirSync(geminiRunsDir).filter(f => f.startsWith(prefix) && f.endsWith('.json'));
|
|
295
|
+
return existing.length > 0;
|
|
296
|
+
});
|
|
297
|
+
if (allCached) {
|
|
298
|
+
// Load cached results for all segments in this batch
|
|
299
|
+
let cacheOk = true;
|
|
300
|
+
for (const i of batchIndices) {
|
|
301
|
+
const prefix = `segment_${String(i).padStart(2, '0')}_`;
|
|
302
|
+
const existing = fs.readdirSync(geminiRunsDir).filter(f => f.startsWith(prefix) && f.endsWith('.json')).sort();
|
|
303
|
+
const latestFile = existing[existing.length - 1];
|
|
304
|
+
try {
|
|
305
|
+
const cached = JSON.parse(fs.readFileSync(path.join(geminiRunsDir, latestFile), 'utf8'));
|
|
306
|
+
const analysis = normalizeAnalysis(cached.output.parsed || { rawResponse: cached.output.raw });
|
|
307
|
+
analysis._geminiMeta = {
|
|
308
|
+
model: cached.run.model,
|
|
309
|
+
processedAt: cached.run.timestamp,
|
|
310
|
+
durationMs: cached.run.durationMs,
|
|
311
|
+
tokenUsage: cached.run.tokenUsage || null,
|
|
312
|
+
runFile: path.relative(PROJECT_ROOT, path.join(geminiRunsDir, latestFile)),
|
|
313
|
+
parseSuccess: cached.output.parseSuccess,
|
|
314
|
+
skipped: true,
|
|
315
|
+
};
|
|
316
|
+
if (cached.run.tokenUsage) {
|
|
317
|
+
costTracker.addSegment(segmentMeta[i].segName, cached.run.tokenUsage, cached.run.durationMs, true);
|
|
318
|
+
}
|
|
319
|
+
const cachedQuality = assessQuality(analysis, { parseSuccess: cached.output.parseSuccess, rawLength: (cached.output.raw || '').length });
|
|
320
|
+
segmentReports.push({ segmentName: segmentMeta[i].segName, qualityReport: cachedQuality, retried: false, retryImproved: false });
|
|
321
|
+
previousAnalyses.push(analysis);
|
|
322
|
+
segmentAnalyses.push(analysis);
|
|
323
|
+
|
|
324
|
+
fileResult.segments.push({
|
|
325
|
+
segmentFile: segmentMeta[i].segName, segmentIndex: i,
|
|
326
|
+
storagePath: segmentMeta[i].storagePath, storageUrl: segmentMeta[i].storageUrl,
|
|
327
|
+
duration: fmtDuration(segmentMeta[i].durSec), durationSeconds: segmentMeta[i].durSec,
|
|
328
|
+
fileSizeMB: parseFloat(segmentMeta[i].sizeMB),
|
|
329
|
+
geminiRunFile: path.relative(PROJECT_ROOT, path.join(geminiRunsDir, latestFile)),
|
|
330
|
+
analysis,
|
|
331
|
+
});
|
|
332
|
+
console.log(` ${c.success(`seg ${i + 1}: loaded from cache (${latestFile})`)}`);
|
|
333
|
+
} catch (err) {
|
|
334
|
+
console.warn(` ${c.warn(`seg ${i + 1}: cache corrupt — will re-analyze`)}`);
|
|
335
|
+
cacheOk = false;
|
|
336
|
+
break;
|
|
337
|
+
}
|
|
338
|
+
}
|
|
339
|
+
if (cacheOk) {
|
|
340
|
+
console.log('');
|
|
341
|
+
continue; // skip to next batch
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
// Verify all segments in batch
|
|
347
|
+
const invalidInBatch = batchIndices.filter(i => !verifySegment(segmentMeta[i].segPath));
|
|
348
|
+
if (invalidInBatch.length > 0) {
|
|
349
|
+
console.warn(` ${c.warn(`${invalidInBatch.length} corrupt segment(s) in batch — falling back to single-segment mode`)}`);
|
|
350
|
+
batchedSuccessfully = false;
|
|
351
|
+
break;
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
try {
|
|
355
|
+
let batchRun;
|
|
356
|
+
try {
|
|
357
|
+
batchRun = await processSegmentBatch(
|
|
358
|
+
ai, batchSegs,
|
|
359
|
+
`${callName}_${baseName}_batch${bIdx}`,
|
|
360
|
+
contextDocs, previousAnalyses, userName, PKG_ROOT,
|
|
361
|
+
{
|
|
362
|
+
segmentIndices: batchIndices,
|
|
363
|
+
totalSegments: segments.length,
|
|
364
|
+
segmentTimes: batchTimes,
|
|
365
|
+
thinkingBudget: opts.thinkingBudget || 24576,
|
|
366
|
+
noStorageUrl: !!opts.noStorageUrl,
|
|
367
|
+
}
|
|
368
|
+
);
|
|
369
|
+
} catch (batchErr) {
|
|
370
|
+
const msg = batchErr.message || '';
|
|
371
|
+
// If Storage URL was rejected, retry batch with forced File API uploads
|
|
372
|
+
if (!opts.noStorageUrl && msg.includes('INVALID_ARGUMENT') && batchSegs.some(s => s.storageUrl)) {
|
|
373
|
+
console.log(` ${c.warn('Storage URL rejected — retrying batch with File API uploads...')}`);
|
|
374
|
+
log.warn(`Batch ${bIdx} Storage URL rejected — retrying with noStorageUrl=true`);
|
|
375
|
+
batchRun = await processSegmentBatch(
|
|
376
|
+
ai, batchSegs,
|
|
377
|
+
`${callName}_${baseName}_batch${bIdx}`,
|
|
378
|
+
contextDocs, previousAnalyses, userName, PKG_ROOT,
|
|
379
|
+
{
|
|
380
|
+
segmentIndices: batchIndices,
|
|
381
|
+
totalSegments: segments.length,
|
|
382
|
+
segmentTimes: batchTimes,
|
|
383
|
+
thinkingBudget: opts.thinkingBudget || 24576,
|
|
384
|
+
noStorageUrl: true,
|
|
385
|
+
}
|
|
386
|
+
);
|
|
387
|
+
console.log(` ${c.success('File API batch retry succeeded')}`);
|
|
388
|
+
} else {
|
|
389
|
+
throw batchErr;
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
// Save batch run file
|
|
394
|
+
const ts = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19);
|
|
395
|
+
const batchRunFileName = `batch_${bIdx}_segs_${batchIndices[0]}-${batchIndices[batchIndices.length - 1]}_${ts}.json`;
|
|
396
|
+
const batchRunPath = path.join(geminiRunsDir, batchRunFileName);
|
|
397
|
+
fs.writeFileSync(batchRunPath, JSON.stringify(batchRun, null, 2), 'utf8');
|
|
398
|
+
|
|
399
|
+
const analysis = normalizeAnalysis(batchRun.output.parsed || { rawResponse: batchRun.output.raw });
|
|
400
|
+
analysis._geminiMeta = {
|
|
401
|
+
model: batchRun.run.model,
|
|
402
|
+
processedAt: batchRun.run.timestamp,
|
|
403
|
+
durationMs: batchRun.run.durationMs,
|
|
404
|
+
tokenUsage: batchRun.run.tokenUsage || null,
|
|
405
|
+
runFile: path.relative(PROJECT_ROOT, batchRunPath),
|
|
406
|
+
parseSuccess: batchRun.output.parseSuccess,
|
|
407
|
+
batchMode: true,
|
|
408
|
+
segmentIndices: batchIndices,
|
|
409
|
+
};
|
|
410
|
+
|
|
411
|
+
// Track cost
|
|
412
|
+
costTracker.addSegment(`batch_${bIdx}`, batchRun.run.tokenUsage, batchRun.run.durationMs, false);
|
|
413
|
+
|
|
414
|
+
// Quality gate
|
|
415
|
+
const qualityReport = assessQuality(analysis, {
|
|
416
|
+
parseSuccess: batchRun.output.parseSuccess,
|
|
417
|
+
rawLength: (batchRun.output.raw || '').length,
|
|
418
|
+
});
|
|
419
|
+
console.log(formatQualityLine(qualityReport, `batch ${bIdx + 1}`));
|
|
420
|
+
|
|
421
|
+
// Schema validation
|
|
422
|
+
const schemaReport = validateAnalysis(analysis, 'segment');
|
|
423
|
+
console.log(formatSchemaLine(schemaReport));
|
|
424
|
+
|
|
425
|
+
// Assign batch analysis to each segment in the batch
|
|
426
|
+
for (const i of batchIndices) {
|
|
427
|
+
segmentReports.push({ segmentName: segmentMeta[i].segName, qualityReport, retried: false, retryImproved: false });
|
|
428
|
+
fileResult.segments.push({
|
|
429
|
+
segmentFile: segmentMeta[i].segName, segmentIndex: i,
|
|
430
|
+
storagePath: segmentMeta[i].storagePath, storageUrl: segmentMeta[i].storageUrl,
|
|
431
|
+
duration: fmtDuration(segmentMeta[i].durSec), durationSeconds: segmentMeta[i].durSec,
|
|
432
|
+
fileSizeMB: parseFloat(segmentMeta[i].sizeMB),
|
|
433
|
+
geminiRunFile: path.relative(PROJECT_ROOT, batchRunPath),
|
|
434
|
+
analysis,
|
|
435
|
+
});
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
// Source-segment tagging
|
|
439
|
+
const tagSeg = (arr, segNum) => (arr || []).forEach(item => { if (!item.source_segment) item.source_segment = segNum; });
|
|
440
|
+
for (const i of batchIndices) {
|
|
441
|
+
tagSeg(analysis.action_items, i + 1);
|
|
442
|
+
tagSeg(analysis.change_requests, i + 1);
|
|
443
|
+
tagSeg(analysis.blockers, i + 1);
|
|
444
|
+
tagSeg(analysis.scope_changes, i + 1);
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
previousAnalyses.push(analysis);
|
|
448
|
+
segmentAnalyses.push(analysis);
|
|
449
|
+
|
|
450
|
+
// Cleanup Gemini File API uploads
|
|
451
|
+
if (batchRun._geminiFileNames && batchRun._geminiFileNames.length > 0 && ai) {
|
|
452
|
+
cleanupGeminiFiles(ai, batchRun._geminiFileNames).catch(() => {});
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
const dur = (batchRun.run.durationMs / 1000).toFixed(1);
|
|
456
|
+
console.log(` ${c.success(`Batch analysis complete (${dur}s, ${batchIndices.length} segments)`)}`);
|
|
457
|
+
progress.markAnalyzed(`${baseName}_batch${bIdx}`, path.relative(PROJECT_ROOT, batchRunPath));
|
|
458
|
+
} catch (err) {
|
|
459
|
+
console.error(` ${c.error(`Batch analysis failed: ${err.message}`)}`);
|
|
460
|
+
console.warn(` ${c.warn('Falling back to single-segment processing for remaining segments')}`);
|
|
461
|
+
console.warn(` ${c.dim('Tip: use --no-batch to disable batching if this persists.')}`);
|
|
462
|
+
log.error(`Batch ${bIdx} failed — ${err.message}`);
|
|
463
|
+
batchedSuccessfully = false;
|
|
464
|
+
break;
|
|
465
|
+
}
|
|
466
|
+
console.log('');
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
if (batchedSuccessfully) {
|
|
470
|
+
const totalSegs = batches.reduce((s, b) => s + b.length, 0);
|
|
471
|
+
console.log(` ${c.success(`All ${batches.length} batch(es) complete: ${totalSegs} segments analyzed`)}`);
|
|
472
|
+
console.log('');
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
// ════════════════════════════════════════════════════════════
|
|
478
|
+
// Single-Segment Processing (original path / fallback)
|
|
479
|
+
// ════════════════════════════════════════════════════════════
|
|
480
|
+
if (!batchedSuccessfully) {
|
|
481
|
+
|
|
248
482
|
for (let j = 0; j < segments.length; j++) {
|
|
249
483
|
if (isShuttingDown()) break;
|
|
250
484
|
|
|
@@ -647,6 +881,8 @@ async function phaseProcessVideo(ctx, videoPath, videoIndex) {
|
|
|
647
881
|
console.log('');
|
|
648
882
|
}
|
|
649
883
|
|
|
884
|
+
} // end if (!batchedSuccessfully) — single-segment fallback
|
|
885
|
+
|
|
650
886
|
// Compute totals for this file
|
|
651
887
|
fileResult.compressedTotalMB = fileResult.segments
|
|
652
888
|
.reduce((sum, s) => sum + s.fileSizeMB, 0).toFixed(2);
|
package/src/phases/summary.js
CHANGED
|
@@ -40,11 +40,11 @@ function phaseSummary(ctx, results, { jsonPath, mdPath, runTs, compilationRun })
|
|
|
40
40
|
if (cost.totalTokens > 0) {
|
|
41
41
|
console.log('');
|
|
42
42
|
console.log(` ${c.heading(`Cost estimate (${config.GEMINI_MODEL}):`)}`);
|
|
43
|
-
console.log(` Input
|
|
44
|
-
console.log(` Output
|
|
45
|
-
console.log(` Thinking
|
|
46
|
-
console.log(` Total
|
|
47
|
-
console.log(` AI time
|
|
43
|
+
console.log(` Input: ${c.yellow(cost.inputTokens.toLocaleString())} ${c.dim(`($${cost.inputCost.toFixed(4)})`)}`);
|
|
44
|
+
console.log(` Output: ${c.yellow(cost.outputTokens.toLocaleString())} ${c.dim(`($${cost.outputCost.toFixed(4)})`)}`);
|
|
45
|
+
console.log(` Thinking: ${c.yellow(cost.thinkingTokens.toLocaleString())} ${c.dim(`($${cost.thinkingCost.toFixed(4)})`)}`);
|
|
46
|
+
console.log(` Total: ${c.highlight(cost.totalTokens.toLocaleString() + ' tokens')} | ${c.green('$' + cost.totalCost.toFixed(4))}`);
|
|
47
|
+
console.log(` AI time: ${c.yellow((cost.totalDurationMs / 1000).toFixed(1) + 's')}`);
|
|
48
48
|
}
|
|
49
49
|
|
|
50
50
|
if (firebaseReady && !opts.skipUpload) {
|
package/src/pipeline.js
CHANGED
|
@@ -151,7 +151,7 @@ async function run() {
|
|
|
151
151
|
files: [],
|
|
152
152
|
};
|
|
153
153
|
|
|
154
|
-
fullCtx.progress.setPhase('
|
|
154
|
+
fullCtx.progress.setPhase('analyze');
|
|
155
155
|
bar.setPhase('analyze', mediaFiles.length);
|
|
156
156
|
if (log && log.phaseStart) log.phaseStart('process_videos');
|
|
157
157
|
|
|
@@ -702,6 +702,7 @@ async function runDynamic(initCtx) {
|
|
|
702
702
|
});
|
|
703
703
|
} catch (err) {
|
|
704
704
|
console.error(` ${c.error(`Topic planning failed: ${err.message}`)}`);
|
|
705
|
+
console.error(` ${c.dim('Tip: check your Gemini API key, or try a simpler --request.')}`);
|
|
705
706
|
log.error(`Dynamic topic planning failed: ${err.message}`); bar.finish(); initCtx.progress.cleanup();
|
|
706
707
|
log.close();
|
|
707
708
|
return;
|
package/src/renderers/docx.js
CHANGED
package/src/renderers/html.js
CHANGED