task-summary-extractor 9.2.2 → 9.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +6 -2
- package/ARCHITECTURE.md +37 -37
- package/QUICK_START.md +1 -1
- package/README.md +32 -13
- package/package.json +2 -3
- package/src/config.js +1 -1
- package/src/modes/deep-summary.js +406 -0
- package/src/phases/discover.js +1 -0
- package/src/phases/init.js +9 -30
- package/src/phases/services.js +61 -1
- package/src/pipeline.js +33 -3
- package/src/services/gemini.js +142 -17
- package/src/utils/cli.js +89 -1
- package/src/utils/context-manager.js +31 -4
- package/EXPLORATION.md +0 -514
|
@@ -0,0 +1,406 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Deep Summary — pre-summarizes context documents before segment analysis
|
|
3
|
+
* to dramatically reduce input tokens per segment.
|
|
4
|
+
*
|
|
5
|
+
* Instead of sending full document content (potentially 500K+ tokens) to
|
|
6
|
+
* every segment, this module:
|
|
7
|
+
* 1. Groups documents by priority tier
|
|
8
|
+
* 2. Sends each group to Gemini for intelligent condensation
|
|
9
|
+
* 3. Replaces full content with condensed summaries
|
|
10
|
+
* 4. Preserves "excluded" docs at full fidelity (user-chosen focus docs)
|
|
11
|
+
* 5. Ensures summaries capture all ticket IDs, action items, statuses
|
|
12
|
+
*
|
|
13
|
+
* The user can pick specific docs to EXCLUDE from summarization — these stay
|
|
14
|
+
* full. The summary pass receives extra instructions to focus on extracting
|
|
15
|
+
* information related to these excluded docs' topics.
|
|
16
|
+
*
|
|
17
|
+
* Token savings: typically 60-80% reduction in per-segment context tokens.
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
'use strict';
|
|
21
|
+
|
|
22
|
+
const { extractJson } = require('../utils/json-parser');
|
|
23
|
+
const { withRetry } = require('../utils/retry');
|
|
24
|
+
const { estimateTokens } = require('../utils/context-manager');
|
|
25
|
+
const { c } = require('../utils/colors');
|
|
26
|
+
const config = require('../config');
|
|
27
|
+
|
|
28
|
+
// ======================== CONSTANTS ========================
|
|
29
|
+
|
|
30
|
+
/** Max tokens for a single summarization call output */
|
|
31
|
+
const SUMMARY_MAX_OUTPUT = 16384;
|
|
32
|
+
|
|
33
|
+
/** Max input chars to send in one summarization batch (~200K tokens @ 0.3 tok/char) */
|
|
34
|
+
const BATCH_MAX_CHARS = 600000;
|
|
35
|
+
|
|
36
|
+
/** Minimum content length (chars) to bother summarizing — below this, keep full */
|
|
37
|
+
const MIN_SUMMARIZE_LENGTH = 500;
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Hard cap per-document chars before sending to Gemini.
|
|
41
|
+
* Gemini context = 1M tokens; prompt overhead ~50K tokens; at 0.3 tok/char
|
|
42
|
+
* 900K chars ≈ 270K tokens — safe with prompt + thinking overhead.
|
|
43
|
+
*/
|
|
44
|
+
const MAX_DOC_CHARS = 900000;
|
|
45
|
+
|
|
46
|
+
// ======================== BATCH BUILDER ========================
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Group documents into batches that fit within the batch char limit.
|
|
50
|
+
* Each batch will be summarized in a single Gemini call.
|
|
51
|
+
*
|
|
52
|
+
* @param {Array} docs - Context docs to batch [{type, fileName, content}]
|
|
53
|
+
* @param {number} [maxChars=BATCH_MAX_CHARS] - Max chars per batch
|
|
54
|
+
* @returns {Array<Array>} Batches of docs
|
|
55
|
+
*/
|
|
56
|
+
function buildBatches(docs, maxChars = BATCH_MAX_CHARS) {
|
|
57
|
+
const batches = [];
|
|
58
|
+
let currentBatch = [];
|
|
59
|
+
let currentChars = 0;
|
|
60
|
+
|
|
61
|
+
for (let doc of docs) {
|
|
62
|
+
let docChars = doc.content ? doc.content.length : 0;
|
|
63
|
+
|
|
64
|
+
// Truncate extremely large docs to avoid exceeding the context window.
|
|
65
|
+
// Any single doc beyond MAX_DOC_CHARS is capped (tail is dropped) and a
|
|
66
|
+
// warning is prepended so the summariser knows the content is incomplete.
|
|
67
|
+
if (docChars > MAX_DOC_CHARS) {
|
|
68
|
+
const truncated = doc.content.substring(0, MAX_DOC_CHARS);
|
|
69
|
+
doc = {
|
|
70
|
+
...doc,
|
|
71
|
+
content: `[TRUNCATED — original ${(docChars / 1024).toFixed(0)} KB exceeded the ${(MAX_DOC_CHARS / 1024).toFixed(0)} KB limit; only the first ${(MAX_DOC_CHARS / 1024).toFixed(0)} KB is included]\n\n${truncated}`,
|
|
72
|
+
_truncatedFrom: docChars,
|
|
73
|
+
};
|
|
74
|
+
docChars = doc.content.length;
|
|
75
|
+
console.warn(` ${c.warn(`${doc.fileName} truncated from ${(doc._truncatedFrom / 1024).toFixed(0)} KB to ${(MAX_DOC_CHARS / 1024).toFixed(0)} KB for deep summary`)}`);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
// If this single doc exceeds the batch limit, it gets its own batch
|
|
79
|
+
if (docChars > maxChars) {
|
|
80
|
+
if (currentBatch.length > 0) {
|
|
81
|
+
batches.push(currentBatch);
|
|
82
|
+
currentBatch = [];
|
|
83
|
+
currentChars = 0;
|
|
84
|
+
}
|
|
85
|
+
batches.push([doc]);
|
|
86
|
+
continue;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
if (currentChars + docChars > maxChars && currentBatch.length > 0) {
|
|
90
|
+
batches.push(currentBatch);
|
|
91
|
+
currentBatch = [];
|
|
92
|
+
currentChars = 0;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
currentBatch.push(doc);
|
|
96
|
+
currentChars += docChars;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
if (currentBatch.length > 0) {
|
|
100
|
+
batches.push(currentBatch);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
return batches;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
// ======================== SUMMARIZE ONE BATCH ========================
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* Summarize a batch of documents into a condensed representation.
|
|
110
|
+
*
|
|
111
|
+
* @param {object} ai - Gemini AI instance
|
|
112
|
+
* @param {Array} docs - Documents in this batch
|
|
113
|
+
* @param {object} [opts]
|
|
114
|
+
* @param {string[]} [opts.focusTopics=[]] - Topics to focus on (from excluded docs)
|
|
115
|
+
* @param {number} [opts.thinkingBudget=8192] - Thinking token budget
|
|
116
|
+
* @param {number} [opts.batchIndex=0] - Batch number for logging
|
|
117
|
+
* @param {number} [opts.totalBatches=1] - Total batches for logging
|
|
118
|
+
* @returns {Promise<{summaries: Map<string, string>, tokenUsage: object}|null>}
|
|
119
|
+
*/
|
|
120
|
+
async function summarizeBatch(ai, docs, opts = {}) {
|
|
121
|
+
const {
|
|
122
|
+
focusTopics = [],
|
|
123
|
+
thinkingBudget = 8192,
|
|
124
|
+
batchIndex = 0,
|
|
125
|
+
totalBatches = 1,
|
|
126
|
+
} = opts;
|
|
127
|
+
|
|
128
|
+
const docEntries = docs
|
|
129
|
+
.filter(d => d.type === 'inlineText' && d.content)
|
|
130
|
+
.map(d => `=== DOCUMENT: ${d.fileName} ===\n${d.content}`);
|
|
131
|
+
|
|
132
|
+
if (docEntries.length === 0) return null;
|
|
133
|
+
|
|
134
|
+
const focusSection = focusTopics.length > 0
|
|
135
|
+
? `\n\nFOCUS AREAS — The user has selected certain documents to keep at full fidelity. ` +
|
|
136
|
+
`Your summaries must be especially thorough about information related to these topics:\n` +
|
|
137
|
+
focusTopics.map((t, i) => ` ${i + 1}. ${t}`).join('\n') +
|
|
138
|
+
`\n\nFor every ticket ID, action item, blocker, or status mentioned in relation to these ` +
|
|
139
|
+
`focus areas, include them verbatim in the summary. Do NOT omit any IDs or assignments.`
|
|
140
|
+
: '';
|
|
141
|
+
|
|
142
|
+
const promptText = `You are a precision document summarizer for a meeting analysis pipeline.
|
|
143
|
+
|
|
144
|
+
Your job: read ALL documents below and produce a CONDENSED version of each that preserves every piece of actionable information.
|
|
145
|
+
|
|
146
|
+
WHAT TO PRESERVE (in order of importance):
|
|
147
|
+
1. IDENTIFIERS — Every ticket ID, task ID, CR number, PR number, JIRA key, GitHub issue, reference number, version number. Copy these VERBATIM — do not paraphrase or abbreviate IDs.
|
|
148
|
+
2. PEOPLE — All assignees, reviewers, approvers, requesters, and responsible parties. Use full names exactly as they appear.
|
|
149
|
+
3. STATUSES & STATES — All statuses (open, closed, in_progress, blocked, deferred, etc.) and state markers (✅, ⬜, ⏸️, 🔲). Preserve the exact status vocabulary used in the document.
|
|
150
|
+
4. ACTION ITEMS — Every action item, commitment, and deliverable with its owner, deadline, and dependency chain.
|
|
151
|
+
5. BLOCKERS & DEPENDENCIES — What is blocked, by whom, what it blocks downstream.
|
|
152
|
+
6. DECISIONS & RATIONALE — Key decisions and WHY they were made (not just what).
|
|
153
|
+
7. CROSS-REFERENCES — When Document A references something from Document B, preserve that linkage. If ticket X is mentioned in a code-map entry, keep both the ticket ID and the code-map path.
|
|
154
|
+
8. TECHNICAL SPECIFICS — File paths, code references, API endpoints, database tables, configuration keys, environment names (dev/staging/prod).
|
|
155
|
+
9. NUMERICAL DATA — Percentages, counts, dates, deadlines, version numbers, sizes.
|
|
156
|
+
10. CHECKLISTS & PROGRESS — Preserve checklist items with their completion status markers. Include progress ratios (e.g., "35/74 done, 6 blocked").
|
|
157
|
+
|
|
158
|
+
WHAT TO REMOVE:
|
|
159
|
+
- Verbose explanations of well-known concepts
|
|
160
|
+
- Redundant phrasing, filler text, throat-clearing sentences
|
|
161
|
+
- Formatting-only content (decorative headers, horizontal rules, empty sections)
|
|
162
|
+
- Boilerplate/template text that adds no project-specific information
|
|
163
|
+
- Repeated definitions or glossary entries that don't change across documents
|
|
164
|
+
${focusSection}
|
|
165
|
+
|
|
166
|
+
QUALITY REQUIREMENTS:
|
|
167
|
+
- Aim for 70-80% size reduction while preserving ALL actionable information.
|
|
168
|
+
- Every ID, every name, every status MUST survive the summarization.
|
|
169
|
+
- If two documents reference the same entity (ticket, file, person), ensure the summary preserves enough context in BOTH summaries for downstream consumers to make the connection.
|
|
170
|
+
- When a document contains a table, preserve the table structure (header + key rows). Omit empty or low-value rows.
|
|
171
|
+
- When a document has nested structure (subsections, indented lists), preserve the hierarchy — use indentation or numbering.
|
|
172
|
+
|
|
173
|
+
OUTPUT FORMAT:
|
|
174
|
+
Return valid JSON with this structure:
|
|
175
|
+
{
|
|
176
|
+
"summaries": {
|
|
177
|
+
"<fileName>": "<condensed text — plain text, preserving all key info>",
|
|
178
|
+
...
|
|
179
|
+
},
|
|
180
|
+
"metadata": {
|
|
181
|
+
"originalTokensEstimate": <number>,
|
|
182
|
+
"summaryTokensEstimate": <number>,
|
|
183
|
+
"compressionRatio": <number between 0 and 1>
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
DOCUMENTS TO SUMMARIZE (${docEntries.length} documents):
|
|
188
|
+
|
|
189
|
+
${docEntries.join('\n\n')}`;
|
|
190
|
+
|
|
191
|
+
const requestPayload = {
|
|
192
|
+
model: config.GEMINI_MODEL,
|
|
193
|
+
contents: [{ role: 'user', parts: [{ text: promptText }] }],
|
|
194
|
+
config: {
|
|
195
|
+
systemInstruction: 'You are a lossless information compressor specialized in engineering and business documents. Preserve every ID, name, status, assignment, dependency, file path, decision rationale, and actionable detail. Maintain cross-document references (when doc A mentions entity from doc B, keep both sides). Output valid JSON only.',
|
|
196
|
+
maxOutputTokens: SUMMARY_MAX_OUTPUT,
|
|
197
|
+
temperature: 0,
|
|
198
|
+
thinkingConfig: { thinkingBudget },
|
|
199
|
+
},
|
|
200
|
+
};
|
|
201
|
+
|
|
202
|
+
try {
|
|
203
|
+
const label = totalBatches > 1
|
|
204
|
+
? `Deep summary batch ${batchIndex + 1}/${totalBatches}`
|
|
205
|
+
: 'Deep summary';
|
|
206
|
+
|
|
207
|
+
const response = await withRetry(
|
|
208
|
+
() => ai.models.generateContent(requestPayload),
|
|
209
|
+
{ label, maxRetries: 2, baseDelay: 3000 }
|
|
210
|
+
);
|
|
211
|
+
|
|
212
|
+
const rawText = response.text;
|
|
213
|
+
const parsed = extractJson(rawText);
|
|
214
|
+
|
|
215
|
+
if (!parsed || !parsed.summaries) return null;
|
|
216
|
+
|
|
217
|
+
const usage = response.usageMetadata || {};
|
|
218
|
+
const tokenUsage = {
|
|
219
|
+
inputTokens: usage.promptTokenCount || 0,
|
|
220
|
+
outputTokens: usage.candidatesTokenCount || 0,
|
|
221
|
+
totalTokens: usage.totalTokenCount || 0,
|
|
222
|
+
thoughtTokens: usage.thoughtsTokenCount || 0,
|
|
223
|
+
};
|
|
224
|
+
|
|
225
|
+
return { summaries: parsed.summaries, metadata: parsed.metadata || {}, tokenUsage };
|
|
226
|
+
} catch (err) {
|
|
227
|
+
console.warn(` ${c.warn(`Deep summary batch ${batchIndex + 1} failed: ${err.message}`)}`);
|
|
228
|
+
return null;
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
// ======================== MAIN ENTRY POINT ========================
|
|
233
|
+
|
|
234
|
+
/**
|
|
235
|
+
* Run deep summarization on context documents.
|
|
236
|
+
*
|
|
237
|
+
* @param {object} ai - Gemini AI instance
|
|
238
|
+
* @param {Array} contextDocs - All prepared context docs
|
|
239
|
+
* @param {object} [opts]
|
|
240
|
+
* @param {string[]} [opts.excludeFileNames=[]] - Doc fileNames to keep at full fidelity
|
|
241
|
+
* @param {number} [opts.thinkingBudget=8192] - Thinking budget per batch
|
|
242
|
+
* @param {Function} [opts.onProgress] - Callback(done, total) for progress
|
|
243
|
+
* @returns {Promise<{docs: Array, stats: object}>}
|
|
244
|
+
*/
|
|
245
|
+
async function deepSummarize(ai, contextDocs, opts = {}) {
|
|
246
|
+
const {
|
|
247
|
+
excludeFileNames = [],
|
|
248
|
+
thinkingBudget = 8192,
|
|
249
|
+
onProgress = null,
|
|
250
|
+
} = opts;
|
|
251
|
+
|
|
252
|
+
const excludeSet = new Set(excludeFileNames.map(n => n.toLowerCase()));
|
|
253
|
+
|
|
254
|
+
// Partition: docs to summarize vs docs to keep full
|
|
255
|
+
const toSummarize = [];
|
|
256
|
+
const keepFull = [];
|
|
257
|
+
|
|
258
|
+
for (const doc of contextDocs) {
|
|
259
|
+
// Keep non-text docs (fileData = PDF etc.) as-is
|
|
260
|
+
if (doc.type !== 'inlineText') {
|
|
261
|
+
keepFull.push(doc);
|
|
262
|
+
continue;
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
// Keep excluded docs at full fidelity
|
|
266
|
+
if (excludeSet.has(doc.fileName.toLowerCase())) {
|
|
267
|
+
keepFull.push(doc);
|
|
268
|
+
continue;
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
// Skip tiny docs — not worth summarizing
|
|
272
|
+
if (!doc.content || doc.content.length < MIN_SUMMARIZE_LENGTH) {
|
|
273
|
+
keepFull.push(doc);
|
|
274
|
+
continue;
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
toSummarize.push(doc);
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
if (toSummarize.length === 0) {
|
|
281
|
+
return {
|
|
282
|
+
docs: contextDocs,
|
|
283
|
+
stats: {
|
|
284
|
+
summarized: 0,
|
|
285
|
+
keptFull: keepFull.length,
|
|
286
|
+
originalTokens: 0,
|
|
287
|
+
summaryTokens: 0,
|
|
288
|
+
savedTokens: 0,
|
|
289
|
+
savingsPercent: 0,
|
|
290
|
+
totalInputTokens: 0,
|
|
291
|
+
totalOutputTokens: 0,
|
|
292
|
+
},
|
|
293
|
+
};
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
// Build focus topics from excluded docs (tell summarizer what to prioritize)
|
|
297
|
+
const focusTopics = keepFull
|
|
298
|
+
.filter(d => d.type === 'inlineText' && excludeSet.has(d.fileName.toLowerCase()))
|
|
299
|
+
.map(d => d.fileName);
|
|
300
|
+
|
|
301
|
+
// Batch documents
|
|
302
|
+
const batches = buildBatches(toSummarize);
|
|
303
|
+
|
|
304
|
+
console.log(` Batched ${c.highlight(toSummarize.length)} doc(s) into ${c.highlight(batches.length)} summarization batch(es)`);
|
|
305
|
+
if (focusTopics.length > 0) {
|
|
306
|
+
console.log(` Focus topics from ${c.highlight(focusTopics.length)} excluded doc(s):`);
|
|
307
|
+
focusTopics.forEach(t => console.log(` ${c.dim('•')} ${c.cyan(t)}`));
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
// Process batches (sequential for now; can add parallelization later)
|
|
311
|
+
const allSummaries = new Map();
|
|
312
|
+
let totalInput = 0;
|
|
313
|
+
let totalOutput = 0;
|
|
314
|
+
let batchesDone = 0;
|
|
315
|
+
|
|
316
|
+
for (let i = 0; i < batches.length; i++) {
|
|
317
|
+
const result = await summarizeBatch(ai, batches[i], {
|
|
318
|
+
focusTopics,
|
|
319
|
+
thinkingBudget,
|
|
320
|
+
batchIndex: i,
|
|
321
|
+
totalBatches: batches.length,
|
|
322
|
+
});
|
|
323
|
+
|
|
324
|
+
batchesDone++;
|
|
325
|
+
if (onProgress) onProgress(batchesDone, batches.length);
|
|
326
|
+
|
|
327
|
+
if (result && result.summaries) {
|
|
328
|
+
for (const [fileName, summary] of Object.entries(result.summaries)) {
|
|
329
|
+
allSummaries.set(fileName.toLowerCase(), summary);
|
|
330
|
+
}
|
|
331
|
+
totalInput += result.tokenUsage.inputTokens;
|
|
332
|
+
totalOutput += result.tokenUsage.outputTokens;
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
// Replace doc content with summaries
|
|
337
|
+
let originalTokens = 0;
|
|
338
|
+
let summaryTokens = 0;
|
|
339
|
+
const resultDocs = [];
|
|
340
|
+
|
|
341
|
+
for (const doc of contextDocs) {
|
|
342
|
+
if (doc.type !== 'inlineText') {
|
|
343
|
+
resultDocs.push(doc);
|
|
344
|
+
continue;
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
// Check if this doc was excluded (kept full)
|
|
348
|
+
if (excludeSet.has(doc.fileName.toLowerCase())) {
|
|
349
|
+
resultDocs.push(doc);
|
|
350
|
+
continue;
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
// Check if we have a summary for this doc
|
|
354
|
+
const summaryKey = doc.fileName.toLowerCase();
|
|
355
|
+
const summary = allSummaries.get(summaryKey);
|
|
356
|
+
|
|
357
|
+
if (summary && summary.length > 0) {
|
|
358
|
+
const origTokens = estimateTokens(doc.content);
|
|
359
|
+
const sumTokens = estimateTokens(summary);
|
|
360
|
+
originalTokens += origTokens;
|
|
361
|
+
summaryTokens += sumTokens;
|
|
362
|
+
|
|
363
|
+
resultDocs.push({
|
|
364
|
+
...doc,
|
|
365
|
+
content: `[Deep Summary — original: ~${origTokens.toLocaleString()} tokens → condensed: ~${sumTokens.toLocaleString()} tokens]\n\n${summary}`,
|
|
366
|
+
_originalLength: doc.content.length,
|
|
367
|
+
_summaryLength: summary.length,
|
|
368
|
+
_deepSummarized: true,
|
|
369
|
+
});
|
|
370
|
+
} else {
|
|
371
|
+
// No summary returned — keep original
|
|
372
|
+
resultDocs.push(doc);
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
const savedTokens = originalTokens - summaryTokens;
|
|
377
|
+
const savingsPercent = originalTokens > 0
|
|
378
|
+
? parseFloat(((savedTokens / originalTokens) * 100).toFixed(1))
|
|
379
|
+
: 0;
|
|
380
|
+
|
|
381
|
+
return {
|
|
382
|
+
docs: resultDocs,
|
|
383
|
+
stats: {
|
|
384
|
+
summarized: allSummaries.size,
|
|
385
|
+
keptFull: keepFull.length,
|
|
386
|
+
originalTokens,
|
|
387
|
+
summaryTokens,
|
|
388
|
+
savedTokens,
|
|
389
|
+
savingsPercent,
|
|
390
|
+
totalInputTokens: totalInput,
|
|
391
|
+
totalOutputTokens: totalOutput,
|
|
392
|
+
},
|
|
393
|
+
};
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
// ======================== EXPORTS ========================
|
|
397
|
+
|
|
398
|
+
module.exports = {
|
|
399
|
+
deepSummarize,
|
|
400
|
+
summarizeBatch,
|
|
401
|
+
buildBatches,
|
|
402
|
+
SUMMARY_MAX_OUTPUT,
|
|
403
|
+
BATCH_MAX_CHARS,
|
|
404
|
+
MIN_SUMMARIZE_LENGTH,
|
|
405
|
+
MAX_DOC_CHARS,
|
|
406
|
+
};
|
package/src/phases/discover.js
CHANGED
|
@@ -85,6 +85,7 @@ async function phaseDiscover(ctx) {
|
|
|
85
85
|
if (opts.resume) activeFlags.push('resume');
|
|
86
86
|
if (opts.reanalyze) activeFlags.push('reanalyze');
|
|
87
87
|
if (opts.dryRun) activeFlags.push('dry-run');
|
|
88
|
+
if (opts.deepSummary) activeFlags.push('deep-summary');
|
|
88
89
|
if (activeFlags.length > 0) {
|
|
89
90
|
console.log(` Flags: ${c.yellow(activeFlags.join(', '))}`);
|
|
90
91
|
}
|
package/src/phases/init.js
CHANGED
|
@@ -67,6 +67,10 @@ async function phaseInit() {
|
|
|
67
67
|
disableDiff: !!flags['no-diff'],
|
|
68
68
|
noHtml: !!flags['no-html'],
|
|
69
69
|
deepDive: !!flags['deep-dive'],
|
|
70
|
+
deepSummary: !!flags['deep-summary'],
|
|
71
|
+
deepSummaryExclude: typeof flags['exclude-docs'] === 'string'
|
|
72
|
+
? flags['exclude-docs'].split(',').map(s => s.trim()).filter(Boolean)
|
|
73
|
+
: [], // populated by CLI flag, interactive picker, or kept empty
|
|
70
74
|
dynamic: !!flags.dynamic,
|
|
71
75
|
request: typeof flags.request === 'string' ? flags.request : null,
|
|
72
76
|
updateProgress: !!flags['update-progress'],
|
|
@@ -94,36 +98,10 @@ async function phaseInit() {
|
|
|
94
98
|
opts.runMode = mode;
|
|
95
99
|
|
|
96
100
|
if (mode !== 'custom') {
|
|
97
|
-
// Apply preset overrides
|
|
98
|
-
const {
|
|
99
|
-
|
|
100
|
-
const
|
|
101
|
-
fast: {
|
|
102
|
-
disableFocusedPass: true,
|
|
103
|
-
disableLearning: true,
|
|
104
|
-
disableDiff: true,
|
|
105
|
-
format: 'md,json',
|
|
106
|
-
formats: new Set(['md', 'json']),
|
|
107
|
-
modelTier: 'economy',
|
|
108
|
-
},
|
|
109
|
-
balanced: {
|
|
110
|
-
disableFocusedPass: false,
|
|
111
|
-
disableLearning: false,
|
|
112
|
-
disableDiff: false,
|
|
113
|
-
format: 'all',
|
|
114
|
-
formats: new Set(['md', 'html', 'json', 'pdf', 'docx']),
|
|
115
|
-
modelTier: 'balanced',
|
|
116
|
-
},
|
|
117
|
-
detailed: {
|
|
118
|
-
disableFocusedPass: false,
|
|
119
|
-
disableLearning: false,
|
|
120
|
-
disableDiff: false,
|
|
121
|
-
format: 'all',
|
|
122
|
-
formats: new Set(['md', 'html', 'json', 'pdf', 'docx']),
|
|
123
|
-
modelTier: 'premium',
|
|
124
|
-
},
|
|
125
|
-
};
|
|
126
|
-
const preset = presetOverrides[mode];
|
|
101
|
+
// Apply preset overrides from the shared RUN_PRESETS definition
|
|
102
|
+
const { RUN_PRESETS } = require('../utils/cli');
|
|
103
|
+
const presetDef = RUN_PRESETS[mode];
|
|
104
|
+
const preset = presetDef ? presetDef.overrides : null;
|
|
127
105
|
if (preset) {
|
|
128
106
|
opts.disableFocusedPass = preset.disableFocusedPass;
|
|
129
107
|
opts.disableLearning = preset.disableLearning;
|
|
@@ -322,6 +300,7 @@ function _printRunSummary(opts, modelId, models, targetDir) {
|
|
|
322
300
|
if (!opts.disableLearning) features.push(c.green('learning'));
|
|
323
301
|
if (!opts.disableDiff) features.push(c.green('diff'));
|
|
324
302
|
if (opts.deepDive) features.push(c.cyan('deep-dive'));
|
|
303
|
+
if (opts.deepSummary) features.push(c.cyan('deep-summary'));
|
|
325
304
|
if (opts.dynamic) features.push(c.cyan('dynamic'));
|
|
326
305
|
if (opts.resume) features.push(c.yellow('resume'));
|
|
327
306
|
if (opts.dryRun) features.push(c.yellow('dry-run'));
|
package/src/phases/services.js
CHANGED
|
@@ -7,6 +7,9 @@ const path = require('path');
|
|
|
7
7
|
const { initFirebase, uploadToStorage, storageExists } = require('../services/firebase');
|
|
8
8
|
const { initGemini, prepareDocsForGemini } = require('../services/gemini');
|
|
9
9
|
|
|
10
|
+
// --- Modes ---
|
|
11
|
+
const { deepSummarize } = require('../modes/deep-summary');
|
|
12
|
+
|
|
10
13
|
// --- Utils ---
|
|
11
14
|
const { parallelMap } = require('../utils/retry');
|
|
12
15
|
|
|
@@ -101,4 +104,61 @@ async function phaseServices(ctx) {
|
|
|
101
104
|
return { ...ctx, storage, firebaseReady, ai, contextDocs, docStorageUrls, callName };
|
|
102
105
|
}
|
|
103
106
|
|
|
104
|
-
|
|
107
|
+
// ======================== PHASE: DEEP SUMMARY ========================
|
|
108
|
+
|
|
109
|
+
/**
|
|
110
|
+
* Pre-summarize context documents to save input tokens per segment.
|
|
111
|
+
* Runs only when --deep-summary flag is active.
|
|
112
|
+
*
|
|
113
|
+
* @param {object} ctx - Pipeline context with ai, contextDocs, opts
|
|
114
|
+
* @returns {object} Updated ctx with summarized contextDocs and deepSummaryStats
|
|
115
|
+
*/
|
|
116
|
+
async function phaseDeepSummary(ctx) {
|
|
117
|
+
const log = getLog();
|
|
118
|
+
const { opts, ai, contextDocs } = ctx;
|
|
119
|
+
|
|
120
|
+
if (!opts.deepSummary || !ai || contextDocs.length === 0) {
|
|
121
|
+
return { ...ctx, deepSummaryStats: null };
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
console.log('');
|
|
125
|
+
console.log(c.cyan(' ── Deep Summary — Pre-summarizing context documents ──'));
|
|
126
|
+
log.step('Deep summary: starting context document pre-summarization');
|
|
127
|
+
if (log && log.phaseStart) log.phaseStart('deep_summary');
|
|
128
|
+
|
|
129
|
+
const excludeNames = opts.deepSummaryExclude || [];
|
|
130
|
+
let updatedDocs = contextDocs;
|
|
131
|
+
let deepSummaryStats = null;
|
|
132
|
+
|
|
133
|
+
try {
|
|
134
|
+
const result = await deepSummarize(ai, contextDocs, {
|
|
135
|
+
excludeFileNames: excludeNames,
|
|
136
|
+
thinkingBudget: Math.min(8192, opts.thinkingBudget),
|
|
137
|
+
});
|
|
138
|
+
|
|
139
|
+
updatedDocs = result.docs;
|
|
140
|
+
deepSummaryStats = result.stats;
|
|
141
|
+
|
|
142
|
+
if (deepSummaryStats.summarized > 0) {
|
|
143
|
+
console.log(` ${c.success(`Summarized ${c.highlight(deepSummaryStats.summarized)} doc(s) — saved ~${c.highlight(deepSummaryStats.savedTokens.toLocaleString())} tokens (${c.yellow(deepSummaryStats.savingsPercent + '%')} reduction)`)}`);
|
|
144
|
+
console.log(` ${c.dim('Original:')} ~${deepSummaryStats.originalTokens.toLocaleString()} tokens → ${c.dim('Condensed:')} ~${deepSummaryStats.summaryTokens.toLocaleString()} tokens`);
|
|
145
|
+
if (deepSummaryStats.keptFull > 0) {
|
|
146
|
+
console.log(` ${c.dim('Kept full:')} ${deepSummaryStats.keptFull} doc(s) (excluded from summary)`);
|
|
147
|
+
}
|
|
148
|
+
log.step(`Deep summary: ${deepSummaryStats.summarized} docs summarized, ${deepSummaryStats.savedTokens} tokens saved (${deepSummaryStats.savingsPercent}%)`);
|
|
149
|
+
log.metric('deep_summary', deepSummaryStats);
|
|
150
|
+
} else {
|
|
151
|
+
console.log(` ${c.dim('No documents needed summarization')}`);
|
|
152
|
+
}
|
|
153
|
+
} catch (err) {
|
|
154
|
+
console.warn(` ${c.warn(`Deep summary failed (continuing with full docs): ${err.message}`)}`);
|
|
155
|
+
log.warn(`Deep summary failed: ${err.message}`);
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
if (log && log.phaseEnd) log.phaseEnd({ stats: deepSummaryStats });
|
|
159
|
+
console.log('');
|
|
160
|
+
|
|
161
|
+
return { ...ctx, contextDocs: updatedDocs, deepSummaryStats };
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
module.exports = { phaseServices, phaseDeepSummary };
|
package/src/pipeline.js
CHANGED
|
@@ -32,7 +32,7 @@ const { getLog, isShuttingDown, PKG_ROOT, PROJECT_ROOT } = require('./phases/_sh
|
|
|
32
32
|
// --- Pipeline phases ---
|
|
33
33
|
const phaseInit = require('./phases/init');
|
|
34
34
|
const phaseDiscover = require('./phases/discover');
|
|
35
|
-
const phaseServices
|
|
35
|
+
const { phaseServices, phaseDeepSummary } = require('./phases/services');
|
|
36
36
|
const phaseProcessVideo = require('./phases/process-media');
|
|
37
37
|
const phaseCompile = require('./phases/compile');
|
|
38
38
|
const phaseOutput = require('./phases/output');
|
|
@@ -46,7 +46,7 @@ const phaseDeepDive = require('./phases/deep-dive');
|
|
|
46
46
|
// --- Utils (for run orchestration + alt modes) ---
|
|
47
47
|
const { c } = require('./utils/colors');
|
|
48
48
|
const { findDocsRecursive } = require('./utils/fs');
|
|
49
|
-
const { promptUserText } = require('./utils/cli');
|
|
49
|
+
const { promptUser, promptUserText, selectDocsToExclude } = require('./utils/cli');
|
|
50
50
|
const { createProgressBar } = require('./utils/progress-bar');
|
|
51
51
|
const { buildHealthReport, printHealthDashboard } = require('./utils/health-dashboard');
|
|
52
52
|
const { saveHistory, buildHistoryEntry } = require('./utils/learning-loop');
|
|
@@ -92,9 +92,38 @@ async function run() {
|
|
|
92
92
|
|
|
93
93
|
// Phase 3: Services
|
|
94
94
|
bar.setPhase('services');
|
|
95
|
-
|
|
95
|
+
let fullCtx = await phaseServices(ctx);
|
|
96
96
|
bar.tick('Services ready');
|
|
97
97
|
|
|
98
|
+
// Phase 3.5 (optional): Deep Summary — pre-summarize context docs
|
|
99
|
+
// If user didn't pass --deep-summary but has many context docs, offer it interactively
|
|
100
|
+
if (!fullCtx.opts.deepSummary && process.stdin.isTTY && fullCtx.ai && fullCtx.contextDocs.length >= 3) {
|
|
101
|
+
const inlineDocs = fullCtx.contextDocs.filter(d => d.type === 'inlineText' && d.content);
|
|
102
|
+
const totalChars = inlineDocs.reduce((sum, d) => sum + d.content.length, 0);
|
|
103
|
+
const totalTokensEstimate = Math.ceil(totalChars * 0.3);
|
|
104
|
+
// Only offer when context is large enough to benefit (>100K tokens)
|
|
105
|
+
if (totalTokensEstimate > 100000) {
|
|
106
|
+
console.log('');
|
|
107
|
+
console.log(` ${c.cyan('You have')} ${c.highlight(inlineDocs.length)} ${c.cyan('context docs')} (~${c.highlight((totalTokensEstimate / 1000).toFixed(0) + 'K')} ${c.cyan('tokens)')}`);
|
|
108
|
+
console.log(` ${c.dim('Deep summary can reduce per-segment context by 60-80%, saving time and cost.')}`);
|
|
109
|
+
const wantDeepSummary = await promptUser(` ${c.cyan('Enable deep summary?')} [y/N] `);
|
|
110
|
+
if (wantDeepSummary) {
|
|
111
|
+
fullCtx.opts.deepSummary = true;
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
if (fullCtx.opts.deepSummary && fullCtx.ai && fullCtx.contextDocs.length > 0) {
|
|
117
|
+
// Interactive picker: let user choose docs to keep at full fidelity
|
|
118
|
+
if (process.stdin.isTTY && fullCtx.opts.deepSummaryExclude.length === 0) {
|
|
119
|
+
const excluded = await selectDocsToExclude(fullCtx.contextDocs);
|
|
120
|
+
fullCtx.opts.deepSummaryExclude = excluded;
|
|
121
|
+
}
|
|
122
|
+
bar.setPhase('deep-summary', 1);
|
|
123
|
+
fullCtx = await phaseDeepSummary(fullCtx);
|
|
124
|
+
bar.tick('Docs summarized');
|
|
125
|
+
}
|
|
126
|
+
|
|
98
127
|
// Phase 4: Process each media file (video or audio)
|
|
99
128
|
const allSegmentAnalyses = [];
|
|
100
129
|
const allSegmentReports = [];
|
|
@@ -117,6 +146,7 @@ async function run() {
|
|
|
117
146
|
contextDocuments: fullCtx.contextDocs.map(d => d.fileName),
|
|
118
147
|
documentStorageUrls: fullCtx.docStorageUrls,
|
|
119
148
|
firebaseAuthenticated: fullCtx.firebaseReady,
|
|
149
|
+
deepSummary: fullCtx.deepSummaryStats || null,
|
|
120
150
|
files: [],
|
|
121
151
|
};
|
|
122
152
|
|