task-summary-extractor 9.6.0 → 9.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ARCHITECTURE.md +51 -0
- package/QUICK_START.md +11 -0
- package/README.md +10 -7
- package/package.json +1 -1
- package/src/phases/init.js +3 -0
- package/src/phases/process-media.js +213 -2
- package/src/phases/summary.js +5 -5
- package/src/pipeline.js +2 -1
- package/src/renderers/docx.js +1 -1
- package/src/renderers/html.js +1 -2
- package/src/services/gemini.js +233 -1
- package/src/services/video.js +9 -9
- package/src/utils/cli.js +2 -1
- package/src/utils/context-manager.js +152 -0
- package/src/utils/diff-engine.js +7 -7
- package/src/utils/interactive.js +50 -4
- package/src/utils/progress-bar.js +11 -10
package/ARCHITECTURE.md
CHANGED
|
@@ -15,6 +15,7 @@
|
|
|
15
15
|
- [Per-Segment Processing](#per-segment-processing)
|
|
16
16
|
- [File Resolution Strategies](#file-resolution-strategies)
|
|
17
17
|
- [Quality Gate Decision Table](#quality-gate-decision-table)
|
|
18
|
+
- [Multi-Segment Batching](#multi-segment-batching)
|
|
18
19
|
- [Smart Change Detection](#smart-change-detection)
|
|
19
20
|
- [Correlation Strategies](#correlation-strategies)
|
|
20
21
|
- [Assessment Thresholds](#assessment-thresholds)
|
|
@@ -249,6 +250,56 @@ After all passes complete, any Gemini File API uploads are cleaned up (fire-and-
|
|
|
249
250
|
|
|
250
251
|
---
|
|
251
252
|
|
|
253
|
+
## Multi-Segment Batching
|
|
254
|
+
|
|
255
|
+
When the Gemini context window has enough headroom, consecutive video segments are grouped into single API calls. This reduces the number of Gemini calls and gives the model better cross-segment awareness.
|
|
256
|
+
|
|
257
|
+
```mermaid
|
|
258
|
+
flowchart TB
|
|
259
|
+
START(["All Segments"]) --> CHECK{"Batching enabled?\n!noBatch && !skipGemini\n&& segments > 1"}
|
|
260
|
+
CHECK -->|No| SINGLE["Single-segment\nprocessing (original)"]
|
|
261
|
+
CHECK -->|Yes| PLAN["planSegmentBatches()\nGreedy bin-packing"]
|
|
262
|
+
|
|
263
|
+
PLAN --> BUDGET["Calculate token budget:\ncontextWindow (1M)\n− promptOverhead (120K)\n− docTokens\n− prevAnalysesTokens\n= available for video"]
|
|
264
|
+
|
|
265
|
+
BUDGET --> FIT{"batchSize > 1?"}
|
|
266
|
+
FIT -->|No| SINGLE
|
|
267
|
+
FIT -->|Yes| BATCH["Process in batches"]
|
|
268
|
+
|
|
269
|
+
BATCH --> B1["Batch 1:\nsegs 1–N"]
|
|
270
|
+
BATCH --> B2["Batch 2:\nsegs N+1–M"]
|
|
271
|
+
BATCH --> BN["..."]
|
|
272
|
+
|
|
273
|
+
B1 --> CALL["processSegmentBatch()\nMultiple fileData parts\nper Gemini call"]
|
|
274
|
+
CALL --> PARSE["Parse + Quality Gate\n+ Schema Validation"]
|
|
275
|
+
PARSE --> TAG["Tag items with\nsource_segment"]
|
|
276
|
+
|
|
277
|
+
CALL -->|Error| FALLBACK["Fall back to\nsingle-segment mode"]
|
|
278
|
+
FALLBACK --> SINGLE
|
|
279
|
+
```
|
|
280
|
+
|
|
281
|
+
### How It Works
|
|
282
|
+
|
|
283
|
+
| Step | Detail |
|
|
284
|
+
| ------ | -------- |
|
|
285
|
+
| **Token budget** | `contextWindow − 120K overhead − docTokens − prevAnalysesTokens = available` |
|
|
286
|
+
| **Video cost** | ~300 tokens/sec × segment duration |
|
|
287
|
+
| **Bin-packing** | Greedy: add consecutive segments until budget or max batch size (8) reached |
|
|
288
|
+
| **Deep summary synergy** | Deep summary frees 60–80% of doc tokens → more room for video → larger batches |
|
|
289
|
+
| **Fallback** | Any batch failure → entire remaining file falls back to single-segment processing |
|
|
290
|
+
| **Cache aware** | Cached segment runs are loaded from disk; only uncached batches hit the API |
|
|
291
|
+
| **Disable** | `--no-batch` forces original single-segment behavior |
|
|
292
|
+
|
|
293
|
+
### Token Math Example
|
|
294
|
+
|
|
295
|
+
| Scenario | Doc Tokens | Available | Seg Duration | Tokens/Seg | Batch Size |
|
|
296
|
+
| ---------- | ----------- | ----------- | ------------- | ----------- | ----------- |
|
|
297
|
+
| No deep summary | 300K | ~580K | 280s | 84K | 6 |
|
|
298
|
+
| With deep summary | 60K | ~820K | 280s | 84K | 9 |
|
|
299
|
+
| Raw mode | 60K | ~820K | 1200s | 360K | 2 |
|
|
300
|
+
|
|
301
|
+
---
|
|
302
|
+
|
|
252
303
|
## Smart Change Detection
|
|
253
304
|
|
|
254
305
|
The `--update-progress` mode tracks which extracted items have been addressed:
|
package/QUICK_START.md
CHANGED
|
@@ -236,6 +236,17 @@ my-project/runs/{timestamp}/
|
|
|
236
236
|
|
|
237
237
|
---
|
|
238
238
|
|
|
239
|
+
## Advanced Features
|
|
240
|
+
|
|
241
|
+
| Feature | Flag | Description |
|
|
242
|
+
| --------- | ------ | ------------- |
|
|
243
|
+
| **Deep Summary** | `--deep-summary` | Pre-summarizes context docs — saves 60-80% input tokens per segment |
|
|
244
|
+
| **Deep Dive** | `--deep-dive` | Generates explanatory docs for each discussion topic |
|
|
245
|
+
| **Multi-Segment Batching** | enabled by default | When context window has headroom, groups consecutive segments into single API calls — fewer requests, better cross-segment awareness. Use `--no-batch` to disable |
|
|
246
|
+
| **Raw Video Mode** | `--no-compress` | Skip re-encoding — pass video directly to Gemini |
|
|
247
|
+
|
|
248
|
+
---
|
|
249
|
+
|
|
239
250
|
## Troubleshooting
|
|
240
251
|
|
|
241
252
|
| Problem | Fix |
|
package/README.md
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
# Task Summary Extractor
|
|
2
2
|
|
|
3
|
-
> **v9.
|
|
3
|
+
> **v9.7.0** — AI-powered content analysis CLI — meetings, recordings, documents, or any mix. Install globally, run anywhere.
|
|
4
4
|
|
|
5
5
|
<p align="center">
|
|
6
6
|
<img src="https://img.shields.io/badge/node-%3E%3D18.0.0-green" alt="Node.js" />
|
|
7
7
|
<img src="https://img.shields.io/badge/gemini-2.5--flash-blue" alt="Gemini" />
|
|
8
|
-
<img src="https://img.shields.io/badge/firebase-
|
|
9
|
-
<img src="https://img.shields.io/badge/version-9.
|
|
10
|
-
<img src="https://img.shields.io/badge/tests-
|
|
8
|
+
<img src="https://img.shields.io/badge/firebase-12.x-orange" alt="Firebase" />
|
|
9
|
+
<img src="https://img.shields.io/badge/version-9.7.0-brightgreen" alt="Version" />
|
|
10
|
+
<img src="https://img.shields.io/badge/tests-345%20passing-brightgreen" alt="Tests" />
|
|
11
11
|
<img src="https://img.shields.io/badge/npm-task--summary--extractor-red" alt="npm" />
|
|
12
12
|
</p>
|
|
13
13
|
|
|
@@ -183,7 +183,7 @@ These are the ones you'll actually use:
|
|
|
183
183
|
| `--resume` | Continue an interrupted run | `--resume` |
|
|
184
184
|
| `--reanalyze` | Force fresh analysis (ignore cache) | `--reanalyze` |
|
|
185
185
|
| `--dry-run` | Preview what would run, without running | `--dry-run` |
|
|
186
|
-
| `--format <type>` | Output format: `md`, `html`, `json`, `pdf`, `docx`, `all` (default: `
|
|
186
|
+
| `--format <type>` | Output format: `md`, `html`, `json`, `pdf`, `docx`, `all` (default: `all`) | `--format html` |
|
|
187
187
|
| `--min-confidence <level>` | Filter items by confidence: `high`, `medium`, `low` | `--min-confidence high` |
|
|
188
188
|
| `--no-html` | Suppress HTML report generation | `--no-html` |
|
|
189
189
|
| `--deep-summary` | Pre-summarize context docs (60-80% token savings) | `--deep-summary` |
|
|
@@ -273,6 +273,7 @@ Control how video is processed before AI analysis:
|
|
|
273
273
|
| `--no-focused-pass` | enabled | Disable targeted re-analysis of weak segments |
|
|
274
274
|
| `--no-learning` | enabled | Disable auto-tuning from historical run data |
|
|
275
275
|
| `--no-diff` | enabled | Disable diff comparison with the previous run |
|
|
276
|
+
| `--no-batch` | enabled | Disable multi-segment batching (force 1 segment per API call) |
|
|
276
277
|
|
|
277
278
|
### Available Models
|
|
278
279
|
|
|
@@ -304,7 +305,7 @@ DYNAMIC --request <text>
|
|
|
304
305
|
PROGRESS --repo <path>
|
|
305
306
|
TUNING --thinking-budget --compilation-thinking-budget --parallel
|
|
306
307
|
--parallel-analysis --log-level --output
|
|
307
|
-
--no-focused-pass --no-learning --no-diff
|
|
308
|
+
--no-focused-pass --no-learning --no-diff --no-batch
|
|
308
309
|
INFO --help (-h) --version (-v)
|
|
309
310
|
```
|
|
310
311
|
|
|
@@ -472,6 +473,7 @@ GEMINI_API_KEY=your-key-here
|
|
|
472
473
|
| **Deep Summary** | `--deep-summary` pre-summarizes context docs, 60-80% token savings per segment |
|
|
473
474
|
| **Context Window Safety** | Auto-truncation, pre-flight token checks, RESOURCE_EXHAUSTED recovery |
|
|
474
475
|
| **Multi-Format Output** | `--format` flag: Markdown, HTML, JSON, PDF, DOCX, or all formats at once |
|
|
476
|
+
| **Multi-Segment Batching** | Groups consecutive segments into single API calls when context window has headroom — fewer calls, better cross-segment awareness. `--no-batch` to disable |
|
|
475
477
|
| **Interactive CLI** | Run with no args → guided experience |
|
|
476
478
|
| **Resume / Checkpoint** | `--resume` continues interrupted runs |
|
|
477
479
|
| **Firebase Upload** | Team access via cloud (optional) |
|
|
@@ -586,7 +588,7 @@ task-summary-extractor/
|
|
|
586
588
|
| `npm run check` | Validate environment |
|
|
587
589
|
| `npm start` | Run the pipeline |
|
|
588
590
|
| `npm run help` | Show CLI help |
|
|
589
|
-
| `npm test` | Run test suite (
|
|
591
|
+
| `npm test` | Run test suite (345 tests) |
|
|
590
592
|
| `npm run test:watch` | Run tests in watch mode |
|
|
591
593
|
| `npm run test:coverage` | Run tests with coverage report |
|
|
592
594
|
|
|
@@ -596,6 +598,7 @@ task-summary-extractor/
|
|
|
596
598
|
|
|
597
599
|
| Version | Highlights |
|
|
598
600
|
|---------|-----------|
|
|
601
|
+
| **v9.7.0** | **Multi-segment batching** — groups consecutive video segments into single Gemini API calls when context window has headroom, greedy bin-packing by token budget (`planSegmentBatches`), `processSegmentBatch()` multi-video API calls, automatic fallback to single-segment on failure, `--no-batch` to disable, codebase audit fixes (unused imports, variable shadowing) |
|
|
599
602
|
| **v9.6.0** | **Interactive CLI UX** — arrow-key navigation for all selectors (folder, model, run mode, formats, confidence, doc exclusion), zero-dependency prompt engine (`interactive.js`), `selectOne()` with ↑↓+Enter, `selectMany()` with Space toggle + A all/none, non-TTY fallback to number input |
|
|
600
603
|
| **v9.5.0** | **Video processing flags** — `--no-compress`, `--speed`, `--segment-time` CLI flags, hardcoded 1200s for raw mode, deprecated `--skip-compression` |
|
|
601
604
|
| **v9.4.0** | **Context window safety** — pre-flight token checks, auto-truncation for oversized docs/VTTs, RESOURCE_EXHAUSTED recovery with automatic doc shedding, chunked compilation for large segment sets, P0/P1 hard cap (2× budget) prevents context overflow, improved deep-summary prompt quality |
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "task-summary-extractor",
|
|
3
|
-
"version": "9.
|
|
3
|
+
"version": "9.7.0",
|
|
4
4
|
"description": "AI-powered meeting analysis & document generation CLI — video + document processing, deep dive docs, dynamic mode, interactive CLI with model selection, confidence scoring, learning loop, git progress tracking",
|
|
5
5
|
"main": "process_and_upload.js",
|
|
6
6
|
"bin": {
|
package/src/phases/init.js
CHANGED
|
@@ -66,6 +66,7 @@ async function phaseInit() {
|
|
|
66
66
|
disableLearning: !!flags['no-learning'],
|
|
67
67
|
disableDiff: !!flags['no-diff'],
|
|
68
68
|
noHtml: !!flags['no-html'],
|
|
69
|
+
noBatch: !!flags['no-batch'],
|
|
69
70
|
// Video processing flags
|
|
70
71
|
noCompress: !!flags['no-compress'],
|
|
71
72
|
speed: flags.speed ? parseFloat(flags.speed) : null,
|
|
@@ -355,6 +356,7 @@ function _printRunSummary(opts, modelId, models, targetDir) {
|
|
|
355
356
|
if (opts.deepDive) features.push(c.cyan('deep-dive'));
|
|
356
357
|
if (opts.deepSummary) features.push(c.cyan('deep-summary'));
|
|
357
358
|
if (opts.dynamic) features.push(c.cyan('dynamic'));
|
|
359
|
+
if (!opts.noBatch) features.push(c.green('batch'));
|
|
358
360
|
if (opts.resume) features.push(c.yellow('resume'));
|
|
359
361
|
if (opts.dryRun) features.push(c.yellow('dry-run'));
|
|
360
362
|
if (opts.skipUpload) features.push(c.dim('skip-upload'));
|
|
@@ -363,6 +365,7 @@ function _printRunSummary(opts, modelId, models, targetDir) {
|
|
|
363
365
|
if (opts.disableFocusedPass) disabled.push(c.dim('no-focused'));
|
|
364
366
|
if (opts.disableLearning) disabled.push(c.dim('no-learning'));
|
|
365
367
|
if (opts.disableDiff) disabled.push(c.dim('no-diff'));
|
|
368
|
+
if (opts.noBatch) disabled.push(c.dim('no-batch'));
|
|
366
369
|
|
|
367
370
|
if (features.length > 0) {
|
|
368
371
|
console.log(` ${c.dim('Features:')} ${features.join(c.dim(' · '))}`);
|
|
@@ -9,7 +9,7 @@ const { AUDIO_EXTS, SPEED } = config;
|
|
|
9
9
|
|
|
10
10
|
// --- Services ---
|
|
11
11
|
const { uploadToStorage, storageExists } = require('../services/firebase');
|
|
12
|
-
const { processWithGemini, cleanupGeminiFiles } = require('../services/gemini');
|
|
12
|
+
const { processWithGemini, processSegmentBatch, cleanupGeminiFiles } = require('../services/gemini');
|
|
13
13
|
const { compressAndSegment, compressAndSegmentAudio, splitOnly, probeFormat, verifySegment } = require('../services/video');
|
|
14
14
|
|
|
15
15
|
// --- Utils ---
|
|
@@ -19,7 +19,7 @@ const { parallelMap } = require('../utils/retry');
|
|
|
19
19
|
const { assessQuality, formatQualityLine, getConfidenceStats, THRESHOLDS } = require('../utils/quality-gate');
|
|
20
20
|
const { validateAnalysis, formatSchemaLine, schemaScore, normalizeAnalysis } = require('../utils/schema-validator');
|
|
21
21
|
const { calculateThinkingBudget } = require('../utils/adaptive-budget');
|
|
22
|
-
const { detectBoundaryContext, sliceVttForSegment } = require('../utils/context-manager');
|
|
22
|
+
const { detectBoundaryContext, sliceVttForSegment, planSegmentBatches, estimateTokens, buildProgressiveContext } = require('../utils/context-manager');
|
|
23
23
|
|
|
24
24
|
// --- Modes ---
|
|
25
25
|
const { identifyWeaknesses, runFocusedPass, mergeFocusedResults } = require('../modes/focused-reanalysis');
|
|
@@ -245,6 +245,215 @@ async function phaseProcessVideo(ctx, videoPath, videoIndex) {
|
|
|
245
245
|
const segmentAnalyses = [];
|
|
246
246
|
const segmentReports = []; // Quality reports for health dashboard
|
|
247
247
|
|
|
248
|
+
// ════════════════════════════════════════════════════════════
|
|
249
|
+
// Multi-Segment Batching — pass multiple segments per call
|
|
250
|
+
// when the context window has enough headroom.
|
|
251
|
+
// ════════════════════════════════════════════════════════════
|
|
252
|
+
const useBatching = !opts.noBatch && !opts.skipGemini && !opts.dryRun && segments.length > 1;
|
|
253
|
+
let batchedSuccessfully = false;
|
|
254
|
+
|
|
255
|
+
if (useBatching) {
|
|
256
|
+
const prevTokens = estimateTokens(buildProgressiveContext(previousAnalyses, userName) || '');
|
|
257
|
+
const { batches, batchSize, reason } = planSegmentBatches(
|
|
258
|
+
segmentMeta, contextDocs,
|
|
259
|
+
{
|
|
260
|
+
contextWindow: config.GEMINI_CONTEXT_WINDOW || 1_048_576,
|
|
261
|
+
previousAnalysesTokens: prevTokens,
|
|
262
|
+
}
|
|
263
|
+
);
|
|
264
|
+
|
|
265
|
+
if (batchSize > 1) {
|
|
266
|
+
console.log(` ${c.cyan('⚡ Multi-segment batching:')} ${batches.length} batch(es), up to ${batchSize} segments/batch`);
|
|
267
|
+
console.log(` ${c.dim(reason)}`);
|
|
268
|
+
console.log('');
|
|
269
|
+
batchedSuccessfully = true; // will be set false if we need to fall back
|
|
270
|
+
|
|
271
|
+
for (let bIdx = 0; bIdx < batches.length; bIdx++) {
|
|
272
|
+
if (isShuttingDown()) break;
|
|
273
|
+
const batchIndices = batches[bIdx];
|
|
274
|
+
const batchSegs = batchIndices.map(i => ({
|
|
275
|
+
segPath: segmentMeta[i].segPath,
|
|
276
|
+
segName: segmentMeta[i].segName,
|
|
277
|
+
durSec: segmentMeta[i].durSec,
|
|
278
|
+
storageUrl: segmentMeta[i].storageUrl,
|
|
279
|
+
}));
|
|
280
|
+
const batchTimes = batchIndices.map(i => ({
|
|
281
|
+
startTimeSec: segmentMeta[i].startTimeSec,
|
|
282
|
+
endTimeSec: segmentMeta[i].endTimeSec,
|
|
283
|
+
}));
|
|
284
|
+
|
|
285
|
+
const batchLabel = batchIndices.length === 1
|
|
286
|
+
? `seg ${batchIndices[0] + 1}`
|
|
287
|
+
: `segs ${batchIndices[0] + 1}–${batchIndices[batchIndices.length - 1] + 1}`;
|
|
288
|
+
console.log(` ${c.cyan('══')} Batch ${c.highlight(`${bIdx + 1}/${batches.length}`)} (${batchLabel}) ${c.cyan('══')}`);
|
|
289
|
+
|
|
290
|
+
// Skip batches where all segments have cached runs and user didn't force re-analyze
|
|
291
|
+
if (!forceReanalyze) {
|
|
292
|
+
const allCached = batchIndices.every(i => {
|
|
293
|
+
const prefix = `segment_${String(i).padStart(2, '0')}_`;
|
|
294
|
+
const existing = fs.readdirSync(geminiRunsDir).filter(f => f.startsWith(prefix) && f.endsWith('.json'));
|
|
295
|
+
return existing.length > 0;
|
|
296
|
+
});
|
|
297
|
+
if (allCached) {
|
|
298
|
+
// Load cached results for all segments in this batch
|
|
299
|
+
let cacheOk = true;
|
|
300
|
+
for (const i of batchIndices) {
|
|
301
|
+
const prefix = `segment_${String(i).padStart(2, '0')}_`;
|
|
302
|
+
const existing = fs.readdirSync(geminiRunsDir).filter(f => f.startsWith(prefix) && f.endsWith('.json')).sort();
|
|
303
|
+
const latestFile = existing[existing.length - 1];
|
|
304
|
+
try {
|
|
305
|
+
const cached = JSON.parse(fs.readFileSync(path.join(geminiRunsDir, latestFile), 'utf8'));
|
|
306
|
+
const analysis = normalizeAnalysis(cached.output.parsed || { rawResponse: cached.output.raw });
|
|
307
|
+
analysis._geminiMeta = {
|
|
308
|
+
model: cached.run.model,
|
|
309
|
+
processedAt: cached.run.timestamp,
|
|
310
|
+
durationMs: cached.run.durationMs,
|
|
311
|
+
tokenUsage: cached.run.tokenUsage || null,
|
|
312
|
+
runFile: path.relative(PROJECT_ROOT, path.join(geminiRunsDir, latestFile)),
|
|
313
|
+
parseSuccess: cached.output.parseSuccess,
|
|
314
|
+
skipped: true,
|
|
315
|
+
};
|
|
316
|
+
if (cached.run.tokenUsage) {
|
|
317
|
+
costTracker.addSegment(segmentMeta[i].segName, cached.run.tokenUsage, cached.run.durationMs, true);
|
|
318
|
+
}
|
|
319
|
+
const cachedQuality = assessQuality(analysis, { parseSuccess: cached.output.parseSuccess, rawLength: (cached.output.raw || '').length });
|
|
320
|
+
segmentReports.push({ segmentName: segmentMeta[i].segName, qualityReport: cachedQuality, retried: false, retryImproved: false });
|
|
321
|
+
previousAnalyses.push(analysis);
|
|
322
|
+
segmentAnalyses.push(analysis);
|
|
323
|
+
|
|
324
|
+
fileResult.segments.push({
|
|
325
|
+
segmentFile: segmentMeta[i].segName, segmentIndex: i,
|
|
326
|
+
storagePath: segmentMeta[i].storagePath, storageUrl: segmentMeta[i].storageUrl,
|
|
327
|
+
duration: fmtDuration(segmentMeta[i].durSec), durationSeconds: segmentMeta[i].durSec,
|
|
328
|
+
fileSizeMB: parseFloat(segmentMeta[i].sizeMB),
|
|
329
|
+
geminiRunFile: path.relative(PROJECT_ROOT, path.join(geminiRunsDir, latestFile)),
|
|
330
|
+
analysis,
|
|
331
|
+
});
|
|
332
|
+
console.log(` ${c.success(`seg ${i + 1}: loaded from cache (${latestFile})`)}`);
|
|
333
|
+
} catch (err) {
|
|
334
|
+
console.warn(` ${c.warn(`seg ${i + 1}: cache corrupt — will re-analyze`)}`);
|
|
335
|
+
cacheOk = false;
|
|
336
|
+
break;
|
|
337
|
+
}
|
|
338
|
+
}
|
|
339
|
+
if (cacheOk) {
|
|
340
|
+
console.log('');
|
|
341
|
+
continue; // skip to next batch
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
// Verify all segments in batch
|
|
347
|
+
const invalidInBatch = batchIndices.filter(i => !verifySegment(segmentMeta[i].segPath));
|
|
348
|
+
if (invalidInBatch.length > 0) {
|
|
349
|
+
console.warn(` ${c.warn(`${invalidInBatch.length} corrupt segment(s) in batch — falling back to single-segment mode`)}`);
|
|
350
|
+
batchedSuccessfully = false;
|
|
351
|
+
break;
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
try {
|
|
355
|
+
const batchRun = await processSegmentBatch(
|
|
356
|
+
ai, batchSegs,
|
|
357
|
+
`${callName}_${baseName}_batch${bIdx}`,
|
|
358
|
+
contextDocs, previousAnalyses, userName, PKG_ROOT,
|
|
359
|
+
{
|
|
360
|
+
segmentIndices: batchIndices,
|
|
361
|
+
totalSegments: segments.length,
|
|
362
|
+
segmentTimes: batchTimes,
|
|
363
|
+
thinkingBudget: opts.thinkingBudget || 24576,
|
|
364
|
+
noStorageUrl: !!opts.noStorageUrl,
|
|
365
|
+
}
|
|
366
|
+
);
|
|
367
|
+
|
|
368
|
+
// Save batch run file
|
|
369
|
+
const ts = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19);
|
|
370
|
+
const batchRunFileName = `batch_${bIdx}_segs_${batchIndices[0]}-${batchIndices[batchIndices.length - 1]}_${ts}.json`;
|
|
371
|
+
const batchRunPath = path.join(geminiRunsDir, batchRunFileName);
|
|
372
|
+
fs.writeFileSync(batchRunPath, JSON.stringify(batchRun, null, 2), 'utf8');
|
|
373
|
+
|
|
374
|
+
const analysis = normalizeAnalysis(batchRun.output.parsed || { rawResponse: batchRun.output.raw });
|
|
375
|
+
analysis._geminiMeta = {
|
|
376
|
+
model: batchRun.run.model,
|
|
377
|
+
processedAt: batchRun.run.timestamp,
|
|
378
|
+
durationMs: batchRun.run.durationMs,
|
|
379
|
+
tokenUsage: batchRun.run.tokenUsage || null,
|
|
380
|
+
runFile: path.relative(PROJECT_ROOT, batchRunPath),
|
|
381
|
+
parseSuccess: batchRun.output.parseSuccess,
|
|
382
|
+
batchMode: true,
|
|
383
|
+
segmentIndices: batchIndices,
|
|
384
|
+
};
|
|
385
|
+
|
|
386
|
+
// Track cost
|
|
387
|
+
costTracker.addSegment(`batch_${bIdx}`, batchRun.run.tokenUsage, batchRun.run.durationMs, false);
|
|
388
|
+
|
|
389
|
+
// Quality gate
|
|
390
|
+
const qualityReport = assessQuality(analysis, {
|
|
391
|
+
parseSuccess: batchRun.output.parseSuccess,
|
|
392
|
+
rawLength: (batchRun.output.raw || '').length,
|
|
393
|
+
});
|
|
394
|
+
console.log(formatQualityLine(qualityReport, `batch ${bIdx + 1}`));
|
|
395
|
+
|
|
396
|
+
// Schema validation
|
|
397
|
+
const schemaReport = validateAnalysis(analysis, 'segment');
|
|
398
|
+
console.log(formatSchemaLine(schemaReport));
|
|
399
|
+
|
|
400
|
+
// Assign batch analysis to each segment in the batch
|
|
401
|
+
for (const i of batchIndices) {
|
|
402
|
+
segmentReports.push({ segmentName: segmentMeta[i].segName, qualityReport, retried: false, retryImproved: false });
|
|
403
|
+
fileResult.segments.push({
|
|
404
|
+
segmentFile: segmentMeta[i].segName, segmentIndex: i,
|
|
405
|
+
storagePath: segmentMeta[i].storagePath, storageUrl: segmentMeta[i].storageUrl,
|
|
406
|
+
duration: fmtDuration(segmentMeta[i].durSec), durationSeconds: segmentMeta[i].durSec,
|
|
407
|
+
fileSizeMB: parseFloat(segmentMeta[i].sizeMB),
|
|
408
|
+
geminiRunFile: path.relative(PROJECT_ROOT, batchRunPath),
|
|
409
|
+
analysis,
|
|
410
|
+
});
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
// Source-segment tagging
|
|
414
|
+
const tagSeg = (arr, segNum) => (arr || []).forEach(item => { if (!item.source_segment) item.source_segment = segNum; });
|
|
415
|
+
for (const i of batchIndices) {
|
|
416
|
+
tagSeg(analysis.action_items, i + 1);
|
|
417
|
+
tagSeg(analysis.change_requests, i + 1);
|
|
418
|
+
tagSeg(analysis.blockers, i + 1);
|
|
419
|
+
tagSeg(analysis.scope_changes, i + 1);
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
previousAnalyses.push(analysis);
|
|
423
|
+
segmentAnalyses.push(analysis);
|
|
424
|
+
|
|
425
|
+
// Cleanup Gemini File API uploads
|
|
426
|
+
if (batchRun._geminiFileNames && batchRun._geminiFileNames.length > 0 && ai) {
|
|
427
|
+
cleanupGeminiFiles(ai, batchRun._geminiFileNames).catch(() => {});
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
const dur = (batchRun.run.durationMs / 1000).toFixed(1);
|
|
431
|
+
console.log(` ${c.success(`Batch analysis complete (${dur}s, ${batchIndices.length} segments)`)}`);
|
|
432
|
+
progress.markAnalyzed(`${baseName}_batch${bIdx}`, path.relative(PROJECT_ROOT, batchRunPath));
|
|
433
|
+
} catch (err) {
|
|
434
|
+
console.error(` ${c.error(`Batch analysis failed: ${err.message}`)}`);
|
|
435
|
+
console.warn(` ${c.warn('Falling back to single-segment processing for remaining segments')}`);
|
|
436
|
+
console.warn(` ${c.dim('Tip: use --no-batch to disable batching if this persists.')}`);
|
|
437
|
+
log.error(`Batch ${bIdx} failed — ${err.message}`);
|
|
438
|
+
batchedSuccessfully = false;
|
|
439
|
+
break;
|
|
440
|
+
}
|
|
441
|
+
console.log('');
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
if (batchedSuccessfully) {
|
|
445
|
+
const totalSegs = batches.reduce((s, b) => s + b.length, 0);
|
|
446
|
+
console.log(` ${c.success(`All ${batches.length} batch(es) complete: ${totalSegs} segments analyzed`)}`);
|
|
447
|
+
console.log('');
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
// ════════════════════════════════════════════════════════════
|
|
453
|
+
// Single-Segment Processing (original path / fallback)
|
|
454
|
+
// ════════════════════════════════════════════════════════════
|
|
455
|
+
if (!batchedSuccessfully) {
|
|
456
|
+
|
|
248
457
|
for (let j = 0; j < segments.length; j++) {
|
|
249
458
|
if (isShuttingDown()) break;
|
|
250
459
|
|
|
@@ -647,6 +856,8 @@ async function phaseProcessVideo(ctx, videoPath, videoIndex) {
|
|
|
647
856
|
console.log('');
|
|
648
857
|
}
|
|
649
858
|
|
|
859
|
+
} // end if (!batchedSuccessfully) — single-segment fallback
|
|
860
|
+
|
|
650
861
|
// Compute totals for this file
|
|
651
862
|
fileResult.compressedTotalMB = fileResult.segments
|
|
652
863
|
.reduce((sum, s) => sum + s.fileSizeMB, 0).toFixed(2);
|
package/src/phases/summary.js
CHANGED
|
@@ -40,11 +40,11 @@ function phaseSummary(ctx, results, { jsonPath, mdPath, runTs, compilationRun })
|
|
|
40
40
|
if (cost.totalTokens > 0) {
|
|
41
41
|
console.log('');
|
|
42
42
|
console.log(` ${c.heading(`Cost estimate (${config.GEMINI_MODEL}):`)}`);
|
|
43
|
-
console.log(` Input
|
|
44
|
-
console.log(` Output
|
|
45
|
-
console.log(` Thinking
|
|
46
|
-
console.log(` Total
|
|
47
|
-
console.log(` AI time
|
|
43
|
+
console.log(` Input: ${c.yellow(cost.inputTokens.toLocaleString())} ${c.dim(`($${cost.inputCost.toFixed(4)})`)}`);
|
|
44
|
+
console.log(` Output: ${c.yellow(cost.outputTokens.toLocaleString())} ${c.dim(`($${cost.outputCost.toFixed(4)})`)}`);
|
|
45
|
+
console.log(` Thinking: ${c.yellow(cost.thinkingTokens.toLocaleString())} ${c.dim(`($${cost.thinkingCost.toFixed(4)})`)}`);
|
|
46
|
+
console.log(` Total: ${c.highlight(cost.totalTokens.toLocaleString() + ' tokens')} | ${c.green('$' + cost.totalCost.toFixed(4))}`);
|
|
47
|
+
console.log(` AI time: ${c.yellow((cost.totalDurationMs / 1000).toFixed(1) + 's')}`);
|
|
48
48
|
}
|
|
49
49
|
|
|
50
50
|
if (firebaseReady && !opts.skipUpload) {
|
package/src/pipeline.js
CHANGED
|
@@ -151,7 +151,7 @@ async function run() {
|
|
|
151
151
|
files: [],
|
|
152
152
|
};
|
|
153
153
|
|
|
154
|
-
fullCtx.progress.setPhase('
|
|
154
|
+
fullCtx.progress.setPhase('analyze');
|
|
155
155
|
bar.setPhase('analyze', mediaFiles.length);
|
|
156
156
|
if (log && log.phaseStart) log.phaseStart('process_videos');
|
|
157
157
|
|
|
@@ -702,6 +702,7 @@ async function runDynamic(initCtx) {
|
|
|
702
702
|
});
|
|
703
703
|
} catch (err) {
|
|
704
704
|
console.error(` ${c.error(`Topic planning failed: ${err.message}`)}`);
|
|
705
|
+
console.error(` ${c.dim('Tip: check your Gemini API key, or try a simpler --request.')}`);
|
|
705
706
|
log.error(`Dynamic topic planning failed: ${err.message}`); bar.finish(); initCtx.progress.cleanup();
|
|
706
707
|
log.close();
|
|
707
708
|
return;
|
package/src/renderers/docx.js
CHANGED
package/src/renderers/html.js
CHANGED
package/src/services/gemini.js
CHANGED
|
@@ -26,7 +26,9 @@ const {
|
|
|
26
26
|
sliceVttForSegment,
|
|
27
27
|
buildProgressiveContext,
|
|
28
28
|
buildSegmentFocus,
|
|
29
|
+
buildBatchSegmentFocus,
|
|
29
30
|
estimateTokens,
|
|
31
|
+
estimateDocTokens,
|
|
30
32
|
} = require('../utils/context-manager');
|
|
31
33
|
const { formatHMS } = require('../utils/format');
|
|
32
34
|
const { withRetry } = require('../utils/retry');
|
|
@@ -564,6 +566,230 @@ async function processWithGemini(ai, filePath, displayName, contextDocs = [], pr
|
|
|
564
566
|
};
|
|
565
567
|
}
|
|
566
568
|
|
|
569
|
+
// ======================== MULTI-SEGMENT BATCH ANALYSIS ========================
|
|
570
|
+
|
|
571
|
+
/**
|
|
572
|
+
* Process multiple consecutive video segments in a single Gemini call.
|
|
573
|
+
* This takes advantage of unused context-window headroom (especially after
|
|
574
|
+
* deep summary) to reduce the number of API calls and give the model a
|
|
575
|
+
* more holistic view of the meeting.
|
|
576
|
+
*
|
|
577
|
+
* @param {object} ai – Gemini AI instance
|
|
578
|
+
* @param {Array<{ segPath: string, segName: string, durSec: number, storageUrl?: string }>} batchSegments
|
|
579
|
+
* @param {string} displayName – label for logging (e.g. "call1_video_batch0-2")
|
|
580
|
+
* @param {Array} contextDocs – prepared context docs
|
|
581
|
+
* @param {Array} previousAnalyses – analyses from earlier batches
|
|
582
|
+
* @param {string} userName
|
|
583
|
+
* @param {string} scriptDir – where prompt.json lives
|
|
584
|
+
* @param {object} batchOpts
|
|
585
|
+
* @param {number[]} batchOpts.segmentIndices – 0-based global indices of the segments
|
|
586
|
+
* @param {number} batchOpts.totalSegments – total segment count for the whole file
|
|
587
|
+
* @param {Array<{startTimeSec: number, endTimeSec: number}>} batchOpts.segmentTimes
|
|
588
|
+
* @param {number} [batchOpts.thinkingBudget=24576]
|
|
589
|
+
* @param {boolean} [batchOpts.noStorageUrl=false]
|
|
590
|
+
* @returns {Promise<object>} Run record (same shape as processWithGemini)
|
|
591
|
+
*/
|
|
592
|
+
async function processSegmentBatch(ai, batchSegments, displayName, contextDocs, previousAnalyses, userName, scriptDir, batchOpts = {}) {
|
|
593
|
+
const {
|
|
594
|
+
segmentIndices = batchSegments.map((_, i) => i),
|
|
595
|
+
totalSegments = batchSegments.length,
|
|
596
|
+
segmentTimes = [],
|
|
597
|
+
thinkingBudget = 24576,
|
|
598
|
+
noStorageUrl = false,
|
|
599
|
+
} = batchOpts;
|
|
600
|
+
|
|
601
|
+
const { systemInstruction, promptText } = loadPrompt(scriptDir);
|
|
602
|
+
|
|
603
|
+
const EXTERNAL_URL_MAX_BYTES = 20 * 1024 * 1024;
|
|
604
|
+
|
|
605
|
+
// ── Upload / reference all video files ─────────────────────────────────────
|
|
606
|
+
const fileRefs = []; // { uri, mimeType, name, usedExternalUrl }
|
|
607
|
+
|
|
608
|
+
for (const seg of batchSegments) {
|
|
609
|
+
const fileSizeBytes = fs.existsSync(seg.segPath) ? fs.statSync(seg.segPath).size : 0;
|
|
610
|
+
|
|
611
|
+
if (!noStorageUrl && seg.storageUrl && fileSizeBytes <= EXTERNAL_URL_MAX_BYTES) {
|
|
612
|
+
fileRefs.push({ uri: seg.storageUrl, mimeType: 'video/mp4', name: null, usedExternalUrl: true });
|
|
613
|
+
console.log(` ${seg.segName}: using Storage URL`);
|
|
614
|
+
} else {
|
|
615
|
+
// Upload via Gemini File API
|
|
616
|
+
console.log(` ${seg.segName}: uploading to Gemini File API...`);
|
|
617
|
+
let uploaded = await withRetry(
|
|
618
|
+
() => ai.files.upload({
|
|
619
|
+
file: seg.segPath,
|
|
620
|
+
config: { mimeType: 'video/mp4', displayName: `${displayName}_${seg.segName}` },
|
|
621
|
+
}),
|
|
622
|
+
{ label: `Gemini upload (${seg.segName})`, maxRetries: 3 }
|
|
623
|
+
);
|
|
624
|
+
|
|
625
|
+
let waited = 0;
|
|
626
|
+
const pollStart = Date.now();
|
|
627
|
+
while (uploaded.state === 'PROCESSING') {
|
|
628
|
+
if (Date.now() - pollStart > GEMINI_POLL_TIMEOUT_MS) {
|
|
629
|
+
throw new Error(`File "${seg.segName}" still processing after ${(GEMINI_POLL_TIMEOUT_MS / 1000).toFixed(0)}s`);
|
|
630
|
+
}
|
|
631
|
+
process.stdout.write(` Processing ${seg.segName}${'.'.repeat((waited % 3) + 1)} \r`);
|
|
632
|
+
await new Promise(r => setTimeout(r, 5000));
|
|
633
|
+
waited++;
|
|
634
|
+
uploaded = await withRetry(
|
|
635
|
+
() => ai.files.get({ name: uploaded.name }),
|
|
636
|
+
{ label: 'Gemini file status', maxRetries: 2, baseDelay: 1000 }
|
|
637
|
+
);
|
|
638
|
+
}
|
|
639
|
+
if (uploaded.state === 'FAILED') {
|
|
640
|
+
throw new Error(`Gemini processing failed for ${seg.segName}`);
|
|
641
|
+
}
|
|
642
|
+
fileRefs.push({ uri: uploaded.uri, mimeType: uploaded.mimeType || 'video/mp4', name: uploaded.name, usedExternalUrl: false });
|
|
643
|
+
console.log(` ${seg.segName}: upload complete`);
|
|
644
|
+
}
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
// ── Build content parts ────────────────────────────────────────────────────
|
|
648
|
+
const contentParts = [];
|
|
649
|
+
|
|
650
|
+
// Video files — one fileData part per segment, in order
|
|
651
|
+
for (let i = 0; i < fileRefs.length; i++) {
|
|
652
|
+
const ref = fileRefs[i];
|
|
653
|
+
const segIdx = segmentIndices[i];
|
|
654
|
+
contentParts.push({ text: `=== VIDEO SEGMENT ${segIdx + 1} of ${totalSegments} ===` });
|
|
655
|
+
contentParts.push({ fileData: { mimeType: ref.mimeType, fileUri: ref.uri } });
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
// Context docs — same budget logic as single-segment but account for multiple videos
|
|
659
|
+
const videoTokenEstimate = batchSegments.reduce((sum, s) => sum + Math.ceil((s.durSec || 280) * 300), 0);
|
|
660
|
+
const prevContextEstimate = estimateTokens(buildProgressiveContext(previousAnalyses, userName) || '');
|
|
661
|
+
const docBudget = Math.max(50000, config.GEMINI_CONTEXT_WINDOW - videoTokenEstimate - 120000 - prevContextEstimate);
|
|
662
|
+
console.log(` Doc budget: ${(docBudget / 1000).toFixed(0)}K tokens for ${contextDocs.length} doc(s)`);
|
|
663
|
+
|
|
664
|
+
const { selected: selectedDocs, excluded } = selectDocsByBudget(contextDocs, docBudget, { segmentIndex: segmentIndices[0] });
|
|
665
|
+
if (excluded.length > 0) {
|
|
666
|
+
console.log(` Context: ${selectedDocs.length} docs included, ${excluded.length} excluded`);
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
// Attach selected docs with VTT time-slicing across the batch range
|
|
670
|
+
const batchStartSec = segmentTimes.length > 0 ? segmentTimes[0].startTimeSec : null;
|
|
671
|
+
const batchEndSec = segmentTimes.length > 0 ? segmentTimes[segmentTimes.length - 1].endTimeSec : null;
|
|
672
|
+
|
|
673
|
+
for (const doc of selectedDocs) {
|
|
674
|
+
if (doc.type === 'inlineText') {
|
|
675
|
+
let content = doc.content;
|
|
676
|
+
const isVtt = doc.fileName.toLowerCase().endsWith('.vtt') || doc.fileName.toLowerCase().endsWith('.srt');
|
|
677
|
+
if (isVtt && batchStartSec != null && batchEndSec != null) {
|
|
678
|
+
content = sliceVttForSegment(content, batchStartSec, batchEndSec);
|
|
679
|
+
console.log(` VTT sliced to ${formatHMS(batchStartSec)}–${formatHMS(batchEndSec)} range`);
|
|
680
|
+
}
|
|
681
|
+
contentParts.push({ text: `=== Document: ${doc.fileName} ===\n${content}` });
|
|
682
|
+
} else if (doc.type === 'fileData') {
|
|
683
|
+
contentParts.push({ fileData: { mimeType: doc.mimeType, fileUri: doc.fileUri } });
|
|
684
|
+
}
|
|
685
|
+
}
|
|
686
|
+
|
|
687
|
+
// Bridge text
|
|
688
|
+
const bridgeText = buildDocBridgeText(selectedDocs);
|
|
689
|
+
if (bridgeText) contentParts.push({ text: bridgeText });
|
|
690
|
+
|
|
691
|
+
// Progressive context from previous batches
|
|
692
|
+
const prevText = buildProgressiveContext(previousAnalyses, userName);
|
|
693
|
+
if (prevText) contentParts.push({ text: prevText });
|
|
694
|
+
|
|
695
|
+
// Multi-segment focus instructions
|
|
696
|
+
const focusText = buildBatchSegmentFocus(segmentIndices, totalSegments, previousAnalyses, userName);
|
|
697
|
+
contentParts.push({ text: focusText });
|
|
698
|
+
|
|
699
|
+
// User identity
|
|
700
|
+
if (userName) {
|
|
701
|
+
contentParts.push({
|
|
702
|
+
text: `CURRENT USER: "${userName}". Tag tasks assigned to or owned by "${userName}". Populate the "your_tasks" section.`
|
|
703
|
+
});
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
contentParts.push({ text: promptText });
|
|
707
|
+
|
|
708
|
+
// ── Send request ──────────────────────────────────────────────────────────
|
|
709
|
+
console.log(` Analyzing batch [segments ${segmentIndices[0] + 1}–${segmentIndices[segmentIndices.length - 1] + 1}] with ${config.GEMINI_MODEL}...`);
|
|
710
|
+
|
|
711
|
+
const requestPayload = {
|
|
712
|
+
model: config.GEMINI_MODEL,
|
|
713
|
+
contents: [{ role: 'user', parts: contentParts }],
|
|
714
|
+
config: {
|
|
715
|
+
systemInstruction,
|
|
716
|
+
maxOutputTokens: 65536,
|
|
717
|
+
temperature: 0,
|
|
718
|
+
},
|
|
719
|
+
};
|
|
720
|
+
|
|
721
|
+
const t0 = Date.now();
|
|
722
|
+
const response = await withRetry(
|
|
723
|
+
() => ai.models.generateContent(requestPayload),
|
|
724
|
+
{ label: `Gemini batch analysis (${displayName})`, maxRetries: 2, baseDelay: 5000 }
|
|
725
|
+
);
|
|
726
|
+
const durationMs = Date.now() - t0;
|
|
727
|
+
|
|
728
|
+
const rawText = response.text;
|
|
729
|
+
|
|
730
|
+
// Token usage
|
|
731
|
+
const usage = response.usageMetadata || {};
|
|
732
|
+
const tokenUsage = {
|
|
733
|
+
inputTokens: usage.promptTokenCount || 0,
|
|
734
|
+
outputTokens: usage.candidatesTokenCount || 0,
|
|
735
|
+
totalTokens: usage.totalTokenCount || 0,
|
|
736
|
+
thoughtTokens: usage.thoughtsTokenCount || 0,
|
|
737
|
+
};
|
|
738
|
+
const contextRemaining = config.GEMINI_CONTEXT_WINDOW - tokenUsage.inputTokens;
|
|
739
|
+
const contextUsedPct = ((tokenUsage.inputTokens / config.GEMINI_CONTEXT_WINDOW) * 100).toFixed(1);
|
|
740
|
+
tokenUsage.contextWindow = config.GEMINI_CONTEXT_WINDOW;
|
|
741
|
+
tokenUsage.contextRemaining = contextRemaining;
|
|
742
|
+
tokenUsage.contextUsedPct = parseFloat(contextUsedPct);
|
|
743
|
+
|
|
744
|
+
console.log(` Tokens — input: ${tokenUsage.inputTokens.toLocaleString()} | output: ${tokenUsage.outputTokens.toLocaleString()} | thinking: ${tokenUsage.thoughtTokens.toLocaleString()}`);
|
|
745
|
+
console.log(` Context — used: ${contextUsedPct}% | remaining: ${contextRemaining.toLocaleString()} tokens`);
|
|
746
|
+
|
|
747
|
+
// Parse
|
|
748
|
+
const parsed = extractJson(rawText);
|
|
749
|
+
|
|
750
|
+
// Input summary
|
|
751
|
+
const inputSummary = contentParts.map(part => {
|
|
752
|
+
if (part.fileData) return { type: 'fileData', mimeType: part.fileData.mimeType, fileUri: part.fileData.fileUri };
|
|
753
|
+
if (part.text) return { type: 'text', chars: part.text.length, preview: part.text.substring(0, 300) };
|
|
754
|
+
return part;
|
|
755
|
+
});
|
|
756
|
+
|
|
757
|
+
// ── Cleanup Gemini File API uploads ────────────────────────────────────────
|
|
758
|
+
const geminiFileNames = fileRefs.filter(r => r.name && !r.usedExternalUrl).map(r => r.name);
|
|
759
|
+
|
|
760
|
+
return {
|
|
761
|
+
run: {
|
|
762
|
+
model: config.GEMINI_MODEL,
|
|
763
|
+
displayName,
|
|
764
|
+
userName,
|
|
765
|
+
timestamp: new Date().toISOString(),
|
|
766
|
+
durationMs,
|
|
767
|
+
tokenUsage,
|
|
768
|
+
systemInstruction,
|
|
769
|
+
batchMode: true,
|
|
770
|
+
segmentIndices,
|
|
771
|
+
},
|
|
772
|
+
input: {
|
|
773
|
+
videoFiles: fileRefs.map((ref, i) => ({
|
|
774
|
+
mimeType: ref.mimeType,
|
|
775
|
+
fileUri: ref.uri,
|
|
776
|
+
segmentName: batchSegments[i].segName,
|
|
777
|
+
usedExternalUrl: ref.usedExternalUrl,
|
|
778
|
+
})),
|
|
779
|
+
contextDocuments: contextDocs.map(d => ({ fileName: d.fileName, type: d.type })),
|
|
780
|
+
previousSegmentCount: previousAnalyses.length,
|
|
781
|
+
parts: inputSummary,
|
|
782
|
+
promptText,
|
|
783
|
+
},
|
|
784
|
+
output: {
|
|
785
|
+
raw: rawText,
|
|
786
|
+
parsed,
|
|
787
|
+
parseSuccess: parsed !== null,
|
|
788
|
+
},
|
|
789
|
+
_geminiFileNames: geminiFileNames,
|
|
790
|
+
};
|
|
791
|
+
}
|
|
792
|
+
|
|
567
793
|
// ======================== FINAL COMPILATION ========================
|
|
568
794
|
|
|
569
795
|
/**
|
|
@@ -945,7 +1171,12 @@ console.log(` ${c.success(`Summary: ${summary.length.toLocaleString()} chars
|
|
|
945
1171
|
*/
|
|
946
1172
|
async function cleanupGeminiFiles(ai, geminiFileName, contextDocs = []) {
|
|
947
1173
|
const toDelete = [];
|
|
948
|
-
|
|
1174
|
+
// Accept a single name string or an array of names
|
|
1175
|
+
if (Array.isArray(geminiFileName)) {
|
|
1176
|
+
toDelete.push(...geminiFileName.filter(Boolean));
|
|
1177
|
+
} else if (geminiFileName) {
|
|
1178
|
+
toDelete.push(geminiFileName);
|
|
1179
|
+
}
|
|
949
1180
|
for (const doc of contextDocs) {
|
|
950
1181
|
if (doc.type === 'fileData' && doc.geminiFileName) {
|
|
951
1182
|
toDelete.push(doc.geminiFileName);
|
|
@@ -970,6 +1201,7 @@ module.exports = {
|
|
|
970
1201
|
prepareDocsForGemini,
|
|
971
1202
|
loadPrompt,
|
|
972
1203
|
processWithGemini,
|
|
1204
|
+
processSegmentBatch,
|
|
973
1205
|
compileFinalResult,
|
|
974
1206
|
buildDocBridgeText,
|
|
975
1207
|
analyzeVideoForContext,
|
package/src/services/video.js
CHANGED
|
@@ -237,7 +237,7 @@ function compressAndSegment(inputFile, outputDir, { segTime = SEG_TIME, speed =
|
|
|
237
237
|
const fbResult = spawnSync(getFFmpeg(), fbArgs, { stdio: 'inherit' });
|
|
238
238
|
if (fbResult.status === 0 && verifySegment(fallbackPath)) {
|
|
239
239
|
// Remove all corrupt segments and replace with the fallback
|
|
240
|
-
for (const seg of corrupt) { try { fs.unlinkSync(seg); } catch {} }
|
|
240
|
+
for (const seg of corrupt) { try { fs.unlinkSync(seg); } catch { /* best-effort cleanup */ } }
|
|
241
241
|
// If this was the only segment, just rename it
|
|
242
242
|
if (segments.length === 1) {
|
|
243
243
|
const dest = path.join(outputDir, 'segment_00.mp4');
|
|
@@ -261,8 +261,8 @@ function compressAndSegment(inputFile, outputDir, { segTime = SEG_TIME, speed =
|
|
|
261
261
|
for (const f of reSegs) {
|
|
262
262
|
fs.renameSync(path.join(reSegDir, f), path.join(outputDir, f));
|
|
263
263
|
}
|
|
264
|
-
try { fs.rmSync(reSegDir, { recursive: true }); } catch {}
|
|
265
|
-
try { fs.unlinkSync(fallbackPath); } catch {}
|
|
264
|
+
try { fs.rmSync(reSegDir, { recursive: true }); } catch { /* best-effort cleanup */ }
|
|
265
|
+
try { fs.unlinkSync(fallbackPath); } catch { /* best-effort cleanup */ }
|
|
266
266
|
// Re-collect
|
|
267
267
|
segments = fs.readdirSync(outputDir)
|
|
268
268
|
.filter(f => f.startsWith('segment_') && f.endsWith('.mp4'))
|
|
@@ -272,13 +272,13 @@ function compressAndSegment(inputFile, outputDir, { segTime = SEG_TIME, speed =
|
|
|
272
272
|
}
|
|
273
273
|
} else {
|
|
274
274
|
console.error(` ${c.error('Fallback re-encode also failed')}`);
|
|
275
|
-
try { fs.unlinkSync(fallbackPath); } catch {}
|
|
275
|
+
try { fs.unlinkSync(fallbackPath); } catch { /* best-effort cleanup */ }
|
|
276
276
|
}
|
|
277
277
|
} else if (corrupt.length > 0 && !needsSegmentation) {
|
|
278
278
|
// Single-output mode also failed — try once more without segment muxer flags
|
|
279
279
|
console.log(` Retrying single-output compression...`);
|
|
280
280
|
const retryPath = path.join(outputDir, 'segment_00.mp4');
|
|
281
|
-
try { fs.unlinkSync(retryPath); } catch {}
|
|
281
|
+
try { fs.unlinkSync(retryPath); } catch { /* best-effort cleanup */ }
|
|
282
282
|
const retryArgs = [
|
|
283
283
|
'-y',
|
|
284
284
|
'-i', inputFile,
|
|
@@ -373,7 +373,7 @@ function compressAndSegmentAudio(inputFile, outputDir, { segTime = SEG_TIME, spe
|
|
|
373
373
|
const fbArgs = ['-y', '-i', inputFile, ...encodingArgs, fallbackPath];
|
|
374
374
|
const fbResult = spawnSync(getFFmpeg(), fbArgs, { stdio: 'inherit' });
|
|
375
375
|
if (fbResult.status === 0 && verifySegment(fallbackPath)) {
|
|
376
|
-
for (const seg of corrupt) { try { fs.unlinkSync(seg); } catch {} }
|
|
376
|
+
for (const seg of corrupt) { try { fs.unlinkSync(seg); } catch { /* best-effort cleanup */ } }
|
|
377
377
|
if (segments.length === 1) {
|
|
378
378
|
const dest = path.join(outputDir, 'segment_00.m4a');
|
|
379
379
|
fs.renameSync(fallbackPath, dest);
|
|
@@ -394,8 +394,8 @@ function compressAndSegmentAudio(inputFile, outputDir, { segTime = SEG_TIME, spe
|
|
|
394
394
|
for (const f of reSegs) {
|
|
395
395
|
fs.renameSync(path.join(reSegDir, f), path.join(outputDir, f));
|
|
396
396
|
}
|
|
397
|
-
try { fs.rmSync(reSegDir, { recursive: true }); } catch {}
|
|
398
|
-
try { fs.unlinkSync(fallbackPath); } catch {}
|
|
397
|
+
try { fs.rmSync(reSegDir, { recursive: true }); } catch { /* best-effort cleanup */ }
|
|
398
|
+
try { fs.unlinkSync(fallbackPath); } catch { /* best-effort cleanup */ }
|
|
399
399
|
segments = fs.readdirSync(outputDir)
|
|
400
400
|
.filter(f => f.startsWith('segment_') && (f.endsWith('.m4a') || f.endsWith('.mp4')))
|
|
401
401
|
.sort()
|
|
@@ -404,7 +404,7 @@ function compressAndSegmentAudio(inputFile, outputDir, { segTime = SEG_TIME, spe
|
|
|
404
404
|
}
|
|
405
405
|
} else {
|
|
406
406
|
console.error(` ${c.error('Fallback audio re-encode failed')}`);
|
|
407
|
-
try { fs.unlinkSync(fallbackPath); } catch {}
|
|
407
|
+
try { fs.unlinkSync(fallbackPath); } catch { /* best-effort cleanup */ }
|
|
408
408
|
}
|
|
409
409
|
}
|
|
410
410
|
|
package/src/utils/cli.js
CHANGED
|
@@ -38,7 +38,7 @@ function parseArgs(argv) {
|
|
|
38
38
|
'resume', 'reanalyze', 'dry-run',
|
|
39
39
|
'dynamic', 'deep-dive', 'deep-summary', 'update-progress',
|
|
40
40
|
'no-focused-pass', 'no-learning', 'no-diff',
|
|
41
|
-
'no-html',
|
|
41
|
+
'no-html', 'no-batch',
|
|
42
42
|
]);
|
|
43
43
|
|
|
44
44
|
for (let i = 0; i < argv.length; i++) {
|
|
@@ -329,6 +329,7 @@ ${f('--compilation-thinking-budget <n>', 'Thinking tokens for compilation (defau
|
|
|
329
329
|
${f('--no-focused-pass', 'Disable focused re-analysis')}
|
|
330
330
|
${f('--no-learning', 'Disable learning loop')}
|
|
331
331
|
${f('--no-diff', 'Disable diff comparison')}
|
|
332
|
+
${f('--no-batch', 'Disable multi-segment batching')}
|
|
332
333
|
${f('--no-html', 'Skip HTML output (Markdown only)')}
|
|
333
334
|
${f('--log-level <level>', 'debug, info, warn, error (default: info)')}
|
|
334
335
|
|
|
@@ -511,12 +511,164 @@ function detectBoundaryContext(vttContent, segmentStartSec, segmentEndSec, segme
|
|
|
511
511
|
return `SEGMENT BOUNDARY CONTEXT:\n${notes.map(n => `• ${n}`).join('\n')}\n→ Pay special attention to continuity — pick up where the previous segment left off. Do NOT re-extract items that were already captured in previous segments unless their status changed.`;
|
|
512
512
|
}
|
|
513
513
|
|
|
514
|
+
// ════════════════════════════════════════════════════════════
|
|
515
|
+
// Multi-Segment Batch Planning
|
|
516
|
+
// ════════════════════════════════════════════════════════════
|
|
517
|
+
|
|
518
|
+
/** Tokens per second of video at standard resolution (Google docs: ~300 tok/s). */
|
|
519
|
+
const VIDEO_TOKENS_PER_SEC = 300;
|
|
520
|
+
|
|
521
|
+
/**
|
|
522
|
+
* Plan how to group consecutive segments into batches that fit the context window.
|
|
523
|
+
*
|
|
524
|
+
* Token budget breakdown:
|
|
525
|
+
* contextWindow
|
|
526
|
+
* − promptOverhead (system instruction + prompt template + output buffer + safety)
|
|
527
|
+
* − docTokens (context documents, already accounting for deep-summary condensation)
|
|
528
|
+
* − prevContextTokens (progressive previous-analysis context, grows with batches)
|
|
529
|
+
* = available for video segments
|
|
530
|
+
*
|
|
531
|
+
* Each segment costs ~300 tok/sec × durationSec.
|
|
532
|
+
*
|
|
533
|
+
* @param {Array<{durSec: number}>} segmentMetas – per-segment metadata with durations
|
|
534
|
+
* @param {Array} contextDocs – prepared context docs (after deep-summary, if any)
|
|
535
|
+
* @param {object} opts
|
|
536
|
+
* @param {number} opts.contextWindow – model context window (default: 1_048_576)
|
|
537
|
+
* @param {number} [opts.promptOverhead=120000] – tokens reserved for prompt/output/thinking
|
|
538
|
+
* @param {number} [opts.previousAnalysesTokens=0] – current progressive context size
|
|
539
|
+
* @param {number} [opts.maxBatchSize=8] – hard cap on segments per batch
|
|
540
|
+
* @returns {{ batches: number[][], batchSize: number, reason: string }}
|
|
541
|
+
*/
|
|
542
|
+
function planSegmentBatches(segmentMetas, contextDocs, opts = {}) {
|
|
543
|
+
const {
|
|
544
|
+
contextWindow = 1_048_576,
|
|
545
|
+
promptOverhead = 120_000,
|
|
546
|
+
previousAnalysesTokens = 0,
|
|
547
|
+
maxBatchSize = 8,
|
|
548
|
+
} = opts;
|
|
549
|
+
|
|
550
|
+
// Total doc tokens
|
|
551
|
+
const docTokens = contextDocs.reduce((sum, d) => sum + estimateDocTokens(d), 0);
|
|
552
|
+
|
|
553
|
+
// Available tokens for video
|
|
554
|
+
const available = contextWindow - promptOverhead - docTokens - previousAnalysesTokens;
|
|
555
|
+
|
|
556
|
+
if (available <= 0) {
|
|
557
|
+
return { batches: segmentMetas.map((_, i) => [i]), batchSize: 1, reason: 'no headroom — 1 segment per call' };
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
// Greedy batching: pack consecutive segments while they fit
|
|
561
|
+
const batches = [];
|
|
562
|
+
let batch = [];
|
|
563
|
+
let batchTokens = 0;
|
|
564
|
+
|
|
565
|
+
for (let i = 0; i < segmentMetas.length; i++) {
|
|
566
|
+
const segTokens = Math.ceil((segmentMetas[i].durSec || 280) * VIDEO_TOKENS_PER_SEC);
|
|
567
|
+
|
|
568
|
+
if (batch.length > 0 && (batchTokens + segTokens > available || batch.length >= maxBatchSize)) {
|
|
569
|
+
batches.push(batch);
|
|
570
|
+
batch = [];
|
|
571
|
+
batchTokens = 0;
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
batch.push(i);
|
|
575
|
+
batchTokens += segTokens;
|
|
576
|
+
}
|
|
577
|
+
if (batch.length > 0) batches.push(batch);
|
|
578
|
+
|
|
579
|
+
// Effective max batch size across all batches
|
|
580
|
+
const effectiveBatchSize = Math.max(...batches.map(b => b.length));
|
|
581
|
+
|
|
582
|
+
const reason = effectiveBatchSize > 1
|
|
583
|
+
? `${(available / 1000).toFixed(0)}K tokens available → up to ${effectiveBatchSize} segments/batch`
|
|
584
|
+
: 'segments too large for batching — 1 per call';
|
|
585
|
+
|
|
586
|
+
return { batches, batchSize: effectiveBatchSize, reason };
|
|
587
|
+
}
|
|
588
|
+
|
|
589
|
+
/**
|
|
590
|
+
* Build a segment focus block that covers a RANGE of segments in a batch.
|
|
591
|
+
*
|
|
592
|
+
* @param {number[]} segmentIndices – indices of segments in this batch (0-based)
|
|
593
|
+
* @param {number} totalSegments – total segment count across the whole file
|
|
594
|
+
* @param {Array} previousAnalyses – all analyses from prior batches
|
|
595
|
+
* @param {string} userName
|
|
596
|
+
* @returns {string}
|
|
597
|
+
*/
|
|
598
|
+
function buildBatchSegmentFocus(segmentIndices, totalSegments, previousAnalyses, userName) {
|
|
599
|
+
const first = segmentIndices[0];
|
|
600
|
+
const last = segmentIndices[segmentIndices.length - 1];
|
|
601
|
+
const isRange = segmentIndices.length > 1;
|
|
602
|
+
const lines = [];
|
|
603
|
+
|
|
604
|
+
const posLabel = first === 0 ? 'FIRST' :
|
|
605
|
+
last === totalSegments - 1 ? 'LAST' : 'MIDDLE';
|
|
606
|
+
|
|
607
|
+
if (isRange) {
|
|
608
|
+
lines.push(`MULTI-SEGMENT BATCH: segments ${first + 1}–${last + 1} of ${totalSegments} (${posLabel} — analyzing ${segmentIndices.length} consecutive segments together)`);
|
|
609
|
+
lines.push(`You are watching ${segmentIndices.length} video segments in sequence. Each segment is a separate video file provided in order.`);
|
|
610
|
+
lines.push(`IMPORTANT: Tag every extracted item with its correct source_segment number (${first + 1}–${last + 1}) based on which video it appears in.`);
|
|
611
|
+
} else {
|
|
612
|
+
lines.push(`SEGMENT POSITION: ${first + 1} of ${totalSegments} (${
|
|
613
|
+
first === 0 ? 'FIRST — establish baseline' :
|
|
614
|
+
first === totalSegments - 1 ? 'LAST — capture final decisions & wrap-up tasks' :
|
|
615
|
+
'MIDDLE — track changes & new items'
|
|
616
|
+
})`);
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
if (first === 0) {
|
|
620
|
+
lines.push('FOCUS: Identify ALL tickets, participants, and initial task assignments.');
|
|
621
|
+
lines.push('Establish the baseline state for each ticket. Cross-reference everything against task documents.');
|
|
622
|
+
lines.push(`Pay special attention to tasks assigned to "${userName}".`);
|
|
623
|
+
} else {
|
|
624
|
+
// Build awareness of what's been found
|
|
625
|
+
const allTicketIds = new Set();
|
|
626
|
+
const allCrIds = new Set();
|
|
627
|
+
const allActionIds = new Set();
|
|
628
|
+
const allBlockerIds = new Set();
|
|
629
|
+
|
|
630
|
+
for (const prev of previousAnalyses) {
|
|
631
|
+
(prev.tickets || []).forEach(t => allTicketIds.add(t.ticket_id));
|
|
632
|
+
(prev.change_requests || []).forEach(cr => allCrIds.add(cr.id));
|
|
633
|
+
(prev.action_items || []).forEach(ai => allActionIds.add(ai.id));
|
|
634
|
+
(prev.blockers || []).forEach(b => allBlockerIds.add(b.id));
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
lines.push('ALREADY FOUND in previous segments:');
|
|
638
|
+
if (allTicketIds.size > 0) lines.push(` Tickets: ${[...allTicketIds].join(', ')}`);
|
|
639
|
+
if (allCrIds.size > 0) lines.push(` CRs: ${[...allCrIds].slice(0, 20).join(', ')}${allCrIds.size > 20 ? ` (+${allCrIds.size - 20} more)` : ''}`);
|
|
640
|
+
if (allActionIds.size > 0) lines.push(` Actions: ${[...allActionIds].join(', ')}`);
|
|
641
|
+
if (allBlockerIds.size > 0) lines.push(` Blockers: ${[...allBlockerIds].join(', ')}`);
|
|
642
|
+
|
|
643
|
+
lines.push('');
|
|
644
|
+
lines.push('FOCUS for this batch:');
|
|
645
|
+
lines.push('1. DETECT NEW tickets, CRs, action items, blockers not yet found');
|
|
646
|
+
lines.push('2. TRACK STATE CHANGES to already-known items within and across the segments');
|
|
647
|
+
lines.push('3. CAPTURE any tasks assigned, re-assigned, or completed');
|
|
648
|
+
lines.push(`4. UPDATE ${userName}'s task list — any new assignments, completions, or blockers`);
|
|
649
|
+
|
|
650
|
+
if (last === totalSegments - 1) {
|
|
651
|
+
lines.push('');
|
|
652
|
+
lines.push('LAST SEGMENT SPECIAL:');
|
|
653
|
+
lines.push('- Capture all FINAL DECISIONS and wrap-up action items');
|
|
654
|
+
lines.push('- Note any "next steps" or "follow-up" items mentioned');
|
|
655
|
+
lines.push('- Identify items that were discussed but NOT resolved');
|
|
656
|
+
}
|
|
657
|
+
}
|
|
658
|
+
|
|
659
|
+
return lines.join('\n');
|
|
660
|
+
}
|
|
661
|
+
|
|
514
662
|
module.exports = {
|
|
515
663
|
estimateTokens,
|
|
664
|
+
estimateDocTokens,
|
|
516
665
|
selectDocsByBudget,
|
|
517
666
|
sliceVttForSegment,
|
|
518
667
|
buildProgressiveContext,
|
|
519
668
|
buildSegmentFocus,
|
|
669
|
+
buildBatchSegmentFocus,
|
|
520
670
|
detectBoundaryContext,
|
|
671
|
+
planSegmentBatches,
|
|
521
672
|
VTT_FALLBACK_MAX_CHARS,
|
|
673
|
+
VIDEO_TOKENS_PER_SEC,
|
|
522
674
|
};
|
package/src/utils/diff-engine.js
CHANGED
|
@@ -238,10 +238,10 @@ function renderDiffMarkdown(diff) {
|
|
|
238
238
|
for (const { name, d } of categories) {
|
|
239
239
|
const a = d.added?.length || 0;
|
|
240
240
|
const r = d.removed?.length || 0;
|
|
241
|
-
const
|
|
241
|
+
const ch = d.changed?.length || 0;
|
|
242
242
|
const u = d.unchanged?.length || 0;
|
|
243
|
-
if (a + r +
|
|
244
|
-
ln(`| ${name} | ${a > 0 ? `+${a}` : '-'} | ${r > 0 ? `-${r}` : '-'} | ${
|
|
243
|
+
if (a + r + ch > 0) {
|
|
244
|
+
ln(`| ${name} | ${a > 0 ? `+${a}` : '-'} | ${r > 0 ? `-${r}` : '-'} | ${ch > 0 ? `~${ch}` : '-'} | ${u} |`);
|
|
245
245
|
}
|
|
246
246
|
}
|
|
247
247
|
ln('');
|
|
@@ -293,10 +293,10 @@ function renderDiffMarkdown(diff) {
|
|
|
293
293
|
if (allChanged.length > 0) {
|
|
294
294
|
ln('### 🔀 Changed Items');
|
|
295
295
|
ln('');
|
|
296
|
-
for (const
|
|
297
|
-
const title =
|
|
298
|
-
ln(`- **[${
|
|
299
|
-
for (const ch of
|
|
296
|
+
for (const change of allChanged) {
|
|
297
|
+
const title = change.item.title || change.item.description || change.item.ticket_id || change.id;
|
|
298
|
+
ln(`- **[${change.type}]** ${change.id}: ${title}`);
|
|
299
|
+
for (const ch of change.changes) {
|
|
300
300
|
ln(` - \`${ch.field}\`: ${ch.from || '_empty_'} → **${ch.to || '_empty_'}**`);
|
|
301
301
|
}
|
|
302
302
|
}
|
package/src/utils/interactive.js
CHANGED
|
@@ -34,6 +34,31 @@ const CR = '\r';
|
|
|
34
34
|
|
|
35
35
|
// ── Render helpers ────────────────────────────────────────────────────────────
|
|
36
36
|
|
|
37
|
+
/**
|
|
38
|
+
* Truncate a string that may contain ANSI escape codes to fit within
|
|
39
|
+
* `maxCols` visible characters. Preserves ANSI sequences so colours are
|
|
40
|
+
* not broken, and appends '…' when truncation occurs.
|
|
41
|
+
*/
|
|
42
|
+
function fitToWidth(str, maxCols) {
|
|
43
|
+
if (!maxCols || maxCols <= 0) return str;
|
|
44
|
+
const visible = strip(str);
|
|
45
|
+
if (visible.length <= maxCols) return str;
|
|
46
|
+
|
|
47
|
+
let visCount = 0;
|
|
48
|
+
let i = 0;
|
|
49
|
+
const target = maxCols - 1; // leave room for '…'
|
|
50
|
+
while (i < str.length && visCount < target) {
|
|
51
|
+
if (str[i] === '\x1b') {
|
|
52
|
+
// Skip full ANSI sequence: ESC [ ... m
|
|
53
|
+
const end = str.indexOf('m', i);
|
|
54
|
+
if (end !== -1) { i = end + 1; continue; }
|
|
55
|
+
}
|
|
56
|
+
visCount++;
|
|
57
|
+
i++;
|
|
58
|
+
}
|
|
59
|
+
return str.slice(0, i) + '\x1b[0m…';
|
|
60
|
+
}
|
|
61
|
+
|
|
37
62
|
/**
|
|
38
63
|
* Build display strings for each item.
|
|
39
64
|
*
|
|
@@ -66,11 +91,14 @@ function renderList(items, cursor, selected, multi = false) {
|
|
|
66
91
|
/**
|
|
67
92
|
* Write an array of strings to stdout, one per line.
|
|
68
93
|
* Each line is preceded by CR + CLEAR_LINE so the entire row is wiped first.
|
|
94
|
+
* Lines are truncated to terminal width to prevent wrapping (which breaks
|
|
95
|
+
* cursor-UP repositioning on redraw).
|
|
69
96
|
*/
|
|
70
97
|
function writeLines(lines) {
|
|
98
|
+
const cols = process.stdout.columns || 80;
|
|
71
99
|
for (let i = 0; i < lines.length; i++) {
|
|
72
100
|
if (i > 0) process.stdout.write('\n');
|
|
73
|
-
process.stdout.write(`${CR}${CLEAR_LINE}${lines[i]}`);
|
|
101
|
+
process.stdout.write(`${CR}${CLEAR_LINE}${fitToWidth(lines[i], cols - 1)}`);
|
|
74
102
|
}
|
|
75
103
|
}
|
|
76
104
|
|
|
@@ -89,7 +117,7 @@ function decodeKey(buf) {
|
|
|
89
117
|
}
|
|
90
118
|
if (buf[0] === 0x0d || buf[0] === 0x0a) return 'enter';
|
|
91
119
|
if (buf[0] === 0x20) return 'space';
|
|
92
|
-
if (buf[0] === 0x03) return '
|
|
120
|
+
if (buf[0] === 0x03) return 'ctrl-c';
|
|
93
121
|
if (buf[0] === 0x61 || buf[0] === 0x41) return 'a';
|
|
94
122
|
return null;
|
|
95
123
|
}
|
|
@@ -107,6 +135,10 @@ function decodeKey(buf) {
|
|
|
107
135
|
* @returns {Promise<{index: number, value: any}>}
|
|
108
136
|
*/
|
|
109
137
|
function selectOne({ title, items, default: defaultIdx = 0, footer }) {
|
|
138
|
+
if (!items || items.length === 0) {
|
|
139
|
+
return Promise.resolve({ index: -1, value: undefined });
|
|
140
|
+
}
|
|
141
|
+
|
|
110
142
|
if (!process.stdin.isTTY) {
|
|
111
143
|
return _fallbackSelectOne({ title, items, default: defaultIdx });
|
|
112
144
|
}
|
|
@@ -136,8 +168,9 @@ function selectOne({ title, items, default: defaultIdx = 0, footer }) {
|
|
|
136
168
|
const lines = renderList(items, cursor);
|
|
137
169
|
writeLines(lines);
|
|
138
170
|
if (hasFooter) {
|
|
171
|
+
const cols = process.stdout.columns || 80;
|
|
139
172
|
process.stdout.write('\n');
|
|
140
|
-
process.stdout.write(`${CR}${CLEAR_LINE}${c.dim(` ${footer}`)}`);
|
|
173
|
+
process.stdout.write(`${CR}${CLEAR_LINE}${fitToWidth(c.dim(` ${footer}`), cols - 1)}`);
|
|
141
174
|
}
|
|
142
175
|
// Terminal cursor is now on the LAST rendered line
|
|
143
176
|
firstDraw = false;
|
|
@@ -173,6 +206,10 @@ function selectOne({ title, items, default: defaultIdx = 0, footer }) {
|
|
|
173
206
|
const chosen = items[defaultIdx];
|
|
174
207
|
console.log(c.success(`${strip(chosen.label)}`));
|
|
175
208
|
resolve({ index: defaultIdx, value: chosen.value });
|
|
209
|
+
} else if (key === 'ctrl-c') {
|
|
210
|
+
cleanup();
|
|
211
|
+
console.log('');
|
|
212
|
+
process.exit(130);
|
|
176
213
|
}
|
|
177
214
|
};
|
|
178
215
|
|
|
@@ -193,6 +230,10 @@ function selectOne({ title, items, default: defaultIdx = 0, footer }) {
|
|
|
193
230
|
* @returns {Promise<{indices: number[], values: any[]}>}
|
|
194
231
|
*/
|
|
195
232
|
function selectMany({ title, items, defaultSelected, footer }) {
|
|
233
|
+
if (!items || items.length === 0) {
|
|
234
|
+
return Promise.resolve({ indices: [], values: [] });
|
|
235
|
+
}
|
|
236
|
+
|
|
196
237
|
if (!process.stdin.isTTY) {
|
|
197
238
|
return _fallbackSelectMany({ title, items, defaultSelected });
|
|
198
239
|
}
|
|
@@ -220,8 +261,9 @@ function selectMany({ title, items, defaultSelected, footer }) {
|
|
|
220
261
|
}
|
|
221
262
|
const lines = renderList(items, cursor, selected, true);
|
|
222
263
|
writeLines(lines);
|
|
264
|
+
const cols = process.stdout.columns || 80;
|
|
223
265
|
process.stdout.write('\n');
|
|
224
|
-
process.stdout.write(`${CR}${CLEAR_LINE}${c.dim(` ${footerText}`)}`);
|
|
266
|
+
process.stdout.write(`${CR}${CLEAR_LINE}${fitToWidth(c.dim(` ${footerText}`), cols - 1)}`);
|
|
225
267
|
firstDraw = false;
|
|
226
268
|
};
|
|
227
269
|
|
|
@@ -271,6 +313,10 @@ function selectMany({ title, items, defaultSelected, footer }) {
|
|
|
271
313
|
const indices = [...(defaultSelected || [])].sort((a, b) => a - b);
|
|
272
314
|
const values = indices.map(i => items[i].value);
|
|
273
315
|
resolve({ indices, values });
|
|
316
|
+
} else if (key === 'ctrl-c') {
|
|
317
|
+
cleanup();
|
|
318
|
+
console.log('');
|
|
319
|
+
process.exit(130);
|
|
274
320
|
}
|
|
275
321
|
};
|
|
276
322
|
|
|
@@ -18,16 +18,17 @@ const { fmtDuration } = require('./format');
|
|
|
18
18
|
// ======================== PHASE DEFINITIONS ========================
|
|
19
19
|
|
|
20
20
|
const PHASES = [
|
|
21
|
-
{ key: 'init',
|
|
22
|
-
{ key: 'discover',
|
|
23
|
-
{ key: 'services',
|
|
24
|
-
{ key: '
|
|
25
|
-
{ key: '
|
|
26
|
-
{ key: '
|
|
27
|
-
{ key: '
|
|
28
|
-
{ key: '
|
|
29
|
-
{ key: '
|
|
30
|
-
{ key: '
|
|
21
|
+
{ key: 'init', label: 'Init', index: 1 },
|
|
22
|
+
{ key: 'discover', label: 'Discover', index: 2 },
|
|
23
|
+
{ key: 'services', label: 'Services', index: 3 },
|
|
24
|
+
{ key: 'deep-summary', label: 'Deep Summary', index: 4 },
|
|
25
|
+
{ key: 'compress', label: 'Compress', index: 5 },
|
|
26
|
+
{ key: 'upload', label: 'Upload', index: 6 },
|
|
27
|
+
{ key: 'analyze', label: 'Analyze', index: 7 },
|
|
28
|
+
{ key: 'compile', label: 'Compile', index: 8 },
|
|
29
|
+
{ key: 'output', label: 'Output', index: 9 },
|
|
30
|
+
{ key: 'summary', label: 'Summary', index: 10 },
|
|
31
|
+
{ key: 'deep-dive', label: 'Deep Dive', index: 11 },
|
|
31
32
|
];
|
|
32
33
|
|
|
33
34
|
const PHASE_MAP = Object.fromEntries(PHASES.map(p => [p.key, p]));
|