task-summary-extractor 8.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ARCHITECTURE.md +605 -0
- package/EXPLORATION.md +451 -0
- package/QUICK_START.md +272 -0
- package/README.md +544 -0
- package/bin/taskex.js +64 -0
- package/package.json +63 -0
- package/process_and_upload.js +107 -0
- package/prompt.json +265 -0
- package/setup.js +505 -0
- package/src/config.js +327 -0
- package/src/logger.js +355 -0
- package/src/pipeline.js +2006 -0
- package/src/renderers/markdown.js +968 -0
- package/src/services/firebase.js +106 -0
- package/src/services/gemini.js +779 -0
- package/src/services/git.js +329 -0
- package/src/services/video.js +305 -0
- package/src/utils/adaptive-budget.js +266 -0
- package/src/utils/change-detector.js +466 -0
- package/src/utils/cli.js +415 -0
- package/src/utils/context-manager.js +499 -0
- package/src/utils/cost-tracker.js +156 -0
- package/src/utils/deep-dive.js +549 -0
- package/src/utils/diff-engine.js +315 -0
- package/src/utils/dynamic-mode.js +567 -0
- package/src/utils/focused-reanalysis.js +317 -0
- package/src/utils/format.js +32 -0
- package/src/utils/fs.js +39 -0
- package/src/utils/global-config.js +315 -0
- package/src/utils/health-dashboard.js +216 -0
- package/src/utils/inject-cli-flags.js +58 -0
- package/src/utils/json-parser.js +245 -0
- package/src/utils/learning-loop.js +301 -0
- package/src/utils/progress-updater.js +451 -0
- package/src/utils/progress.js +166 -0
- package/src/utils/prompt.js +32 -0
- package/src/utils/quality-gate.js +429 -0
- package/src/utils/retry.js +129 -0
|
@@ -0,0 +1,429 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Quality Gate — validates AI analysis output quality and determines
|
|
3
|
+
* whether a segment needs re-analysis.
|
|
4
|
+
*
|
|
5
|
+
* Scoring dimensions:
|
|
6
|
+
* - Structural completeness (required fields present)
|
|
7
|
+
* - Content density (meaningful data extracted)
|
|
8
|
+
* - Parse integrity (JSON parsed successfully, no truncation)
|
|
9
|
+
* - Cross-reference consistency (IDs, timestamps, references make sense)
|
|
10
|
+
*
|
|
11
|
+
* Returns a quality report with a numeric score (0-100) and actionable
|
|
12
|
+
* diagnostics for retry decisions.
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
'use strict';
|
|
16
|
+
|
|
17
|
+
// ======================== QUALITY THRESHOLDS ========================
|
|
18
|
+
|
|
19
|
+
const THRESHOLDS = {
|
|
20
|
+
/** Minimum score to PASS without retry (0-100) */
|
|
21
|
+
PASS: 45,
|
|
22
|
+
/** Score range for WARNING — will pass but flag issues (45-65 is typical) */
|
|
23
|
+
WARN: 65,
|
|
24
|
+
/** Maximum retries per segment */
|
|
25
|
+
MAX_RETRIES: 1,
|
|
26
|
+
};
|
|
27
|
+
|
|
28
|
+
// Required top-level fields in a valid analysis
|
|
29
|
+
const REQUIRED_FIELDS = [
|
|
30
|
+
'tickets',
|
|
31
|
+
'action_items',
|
|
32
|
+
'change_requests',
|
|
33
|
+
'summary',
|
|
34
|
+
];
|
|
35
|
+
|
|
36
|
+
// Optional but valuable fields (boost score when present)
|
|
37
|
+
const VALUED_FIELDS = [
|
|
38
|
+
'blockers',
|
|
39
|
+
'scope_changes',
|
|
40
|
+
'file_references',
|
|
41
|
+
'your_tasks',
|
|
42
|
+
];
|
|
43
|
+
|
|
44
|
+
// ======================== SCORING FUNCTIONS ========================
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Score structural completeness: are the required fields present and non-empty?
|
|
48
|
+
* @param {object} analysis - Parsed analysis
|
|
49
|
+
* @returns {{ score: number, issues: string[] }} - 0-100 score + issues
|
|
50
|
+
*/
|
|
51
|
+
function scoreStructure(analysis) {
|
|
52
|
+
if (!analysis || typeof analysis !== 'object') {
|
|
53
|
+
return { score: 0, issues: ['Analysis is null or not an object'] };
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
const issues = [];
|
|
57
|
+
let present = 0;
|
|
58
|
+
|
|
59
|
+
for (const field of REQUIRED_FIELDS) {
|
|
60
|
+
if (analysis[field] === undefined || analysis[field] === null) {
|
|
61
|
+
issues.push(`Missing required field: "${field}"`);
|
|
62
|
+
} else {
|
|
63
|
+
present++;
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
// Bonus for valued optional fields
|
|
68
|
+
let bonus = 0;
|
|
69
|
+
for (const field of VALUED_FIELDS) {
|
|
70
|
+
if (analysis[field] !== undefined && analysis[field] !== null) {
|
|
71
|
+
bonus += 3; // up to 12 bonus points
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
const baseScore = (present / REQUIRED_FIELDS.length) * 80;
|
|
76
|
+
return { score: Math.min(100, baseScore + bonus), issues };
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Score content density: how much meaningful data was extracted?
|
|
81
|
+
* Empty arrays are valid but sparse; we want to reward rich extraction.
|
|
82
|
+
* Also scores confidence field coverage.
|
|
83
|
+
* @param {object} analysis
|
|
84
|
+
* @returns {{ score: number, issues: string[] }}
|
|
85
|
+
*/
|
|
86
|
+
function scoreDensity(analysis) {
|
|
87
|
+
if (!analysis || typeof analysis !== 'object') {
|
|
88
|
+
return { score: 0, issues: ['No analysis to score'] };
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
const issues = [];
|
|
92
|
+
let points = 0;
|
|
93
|
+
let maxPoints = 0;
|
|
94
|
+
|
|
95
|
+
// Tickets
|
|
96
|
+
maxPoints += 25;
|
|
97
|
+
const tickets = analysis.tickets || [];
|
|
98
|
+
if (tickets.length > 0) {
|
|
99
|
+
points += 12;
|
|
100
|
+
const richTickets = tickets.filter(t =>
|
|
101
|
+
t.ticket_id && t.discussed_state && (t.comments?.length > 0 || t.code_changes?.length > 0)
|
|
102
|
+
);
|
|
103
|
+
points += Math.min(13, (richTickets.length / Math.max(tickets.length, 1)) * 13);
|
|
104
|
+
} else {
|
|
105
|
+
issues.push('No tickets extracted — verify if segment discusses tickets');
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
// Action items
|
|
109
|
+
maxPoints += 17;
|
|
110
|
+
const actions = analysis.action_items || [];
|
|
111
|
+
if (actions.length > 0) {
|
|
112
|
+
points += 8;
|
|
113
|
+
const richActions = actions.filter(a => a.assigned_to && a.description);
|
|
114
|
+
points += Math.min(9, (richActions.length / Math.max(actions.length, 1)) * 9);
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// Change requests
|
|
118
|
+
maxPoints += 17;
|
|
119
|
+
const crs = analysis.change_requests || [];
|
|
120
|
+
if (crs.length > 0) {
|
|
121
|
+
points += 8;
|
|
122
|
+
const richCrs = crs.filter(cr => cr.where && cr.what);
|
|
123
|
+
points += Math.min(9, (richCrs.length / Math.max(crs.length, 1)) * 9);
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// Summary
|
|
127
|
+
maxPoints += 13;
|
|
128
|
+
const summary = analysis.summary || '';
|
|
129
|
+
if (summary.length > 50) {
|
|
130
|
+
points += 13;
|
|
131
|
+
} else if (summary.length > 0) {
|
|
132
|
+
points += 6;
|
|
133
|
+
issues.push('Summary is very short (< 50 chars)');
|
|
134
|
+
} else {
|
|
135
|
+
issues.push('No summary extracted');
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// your_tasks
|
|
139
|
+
maxPoints += 13;
|
|
140
|
+
const tasks = analysis.your_tasks;
|
|
141
|
+
if (tasks) {
|
|
142
|
+
const taskCount =
|
|
143
|
+
(tasks.tasks_todo?.length || 0) +
|
|
144
|
+
(tasks.tasks_waiting_on_others?.length || 0) +
|
|
145
|
+
(tasks.decisions_needed?.length || 0) +
|
|
146
|
+
(tasks.completed_in_call?.length || 0);
|
|
147
|
+
if (taskCount > 0) {
|
|
148
|
+
points += 13;
|
|
149
|
+
} else {
|
|
150
|
+
points += 4;
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
// Confidence coverage — reward items that have confidence fields
|
|
155
|
+
maxPoints += 15;
|
|
156
|
+
const allItems = [
|
|
157
|
+
...tickets,
|
|
158
|
+
...actions,
|
|
159
|
+
...crs,
|
|
160
|
+
...(analysis.blockers || []),
|
|
161
|
+
...(analysis.scope_changes || []),
|
|
162
|
+
];
|
|
163
|
+
if (allItems.length > 0) {
|
|
164
|
+
const withConfidence = allItems.filter(item =>
|
|
165
|
+
item.confidence && ['HIGH', 'MEDIUM', 'LOW'].includes(item.confidence)
|
|
166
|
+
);
|
|
167
|
+
const coverageRatio = withConfidence.length / allItems.length;
|
|
168
|
+
points += Math.round(coverageRatio * 15);
|
|
169
|
+
|
|
170
|
+
if (coverageRatio < 0.5) {
|
|
171
|
+
issues.push(`Low confidence coverage: only ${withConfidence.length}/${allItems.length} items have confidence fields`);
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
// Bonus check: confidence distribution shouldn't be all the same
|
|
175
|
+
if (withConfidence.length >= 3) {
|
|
176
|
+
const levels = new Set(withConfidence.map(i => i.confidence));
|
|
177
|
+
if (levels.size === 1) {
|
|
178
|
+
issues.push(`All items have same confidence (${[...levels][0]}) — suspicious uniformity`);
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
} else {
|
|
182
|
+
points += 5; // No items to score — neutral
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
const score = Math.round((points / maxPoints) * 100);
|
|
186
|
+
return { score, issues };
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
/**
|
|
190
|
+
* Score parse integrity: did the JSON parse cleanly?
|
|
191
|
+
* @param {object} parseContext - { parseSuccess, rawLength, parsedKeys }
|
|
192
|
+
* @returns {{ score: number, issues: string[] }}
|
|
193
|
+
*/
|
|
194
|
+
function scoreIntegrity(parseContext) {
|
|
195
|
+
const { parseSuccess, rawLength = 0, truncated = false } = parseContext;
|
|
196
|
+
const issues = [];
|
|
197
|
+
|
|
198
|
+
if (!parseSuccess) {
|
|
199
|
+
issues.push('JSON parse failed — output could not be parsed');
|
|
200
|
+
return { score: 0, issues };
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
let score = 80;
|
|
204
|
+
|
|
205
|
+
if (truncated) {
|
|
206
|
+
issues.push('Output was truncated — data may be incomplete');
|
|
207
|
+
score -= 30;
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
// Very short raw output suggests the model didn't produce enough
|
|
211
|
+
if (rawLength < 500) {
|
|
212
|
+
issues.push(`Raw output very short (${rawLength} chars) — may be minimal`);
|
|
213
|
+
score -= 20;
|
|
214
|
+
} else if (rawLength < 2000) {
|
|
215
|
+
issues.push(`Raw output is short (${rawLength} chars)`);
|
|
216
|
+
score -= 10;
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
return { score: Math.max(0, score), issues };
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
/**
|
|
223
|
+
* Score cross-reference consistency within the analysis.
|
|
224
|
+
* @param {object} analysis
|
|
225
|
+
* @returns {{ score: number, issues: string[] }}
|
|
226
|
+
*/
|
|
227
|
+
function scoreCrossReferences(analysis) {
|
|
228
|
+
if (!analysis || typeof analysis !== 'object') {
|
|
229
|
+
return { score: 50, issues: [] }; // neutral if no analysis
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
const issues = [];
|
|
233
|
+
let score = 100;
|
|
234
|
+
|
|
235
|
+
// Check: ticket IDs should be unique
|
|
236
|
+
const tickets = analysis.tickets || [];
|
|
237
|
+
const ticketIds = tickets.map(t => t.ticket_id).filter(Boolean);
|
|
238
|
+
const uniqueIds = new Set(ticketIds);
|
|
239
|
+
if (ticketIds.length > 0 && uniqueIds.size < ticketIds.length) {
|
|
240
|
+
issues.push(`Duplicate ticket IDs found: ${ticketIds.length - uniqueIds.size} duplicate(s)`);
|
|
241
|
+
score -= 15;
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
// Check: action item IDs should be sequential and unique
|
|
245
|
+
const actions = analysis.action_items || [];
|
|
246
|
+
const actionIds = actions.map(a => a.id).filter(Boolean);
|
|
247
|
+
const uniqueActionIds = new Set(actionIds);
|
|
248
|
+
if (actionIds.length > 0 && uniqueActionIds.size < actionIds.length) {
|
|
249
|
+
issues.push(`Duplicate action item IDs: ${actionIds.length - uniqueActionIds.size} duplicate(s)`);
|
|
250
|
+
score -= 10;
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// Check: change request IDs should reference real tickets
|
|
254
|
+
const crs = analysis.change_requests || [];
|
|
255
|
+
for (const cr of crs) {
|
|
256
|
+
if (cr.ticket_id && !uniqueIds.has(cr.ticket_id) && tickets.length > 0) {
|
|
257
|
+
issues.push(`CR "${cr.id}" references unknown ticket "${cr.ticket_id}"`);
|
|
258
|
+
score -= 5;
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
return { score: Math.max(0, score), issues };
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
// ======================== MAIN QUALITY GATE ========================
|
|
266
|
+
|
|
267
|
+
/**
|
|
268
|
+
* Run the full quality gate on a segment analysis.
|
|
269
|
+
*
|
|
270
|
+
* @param {object} analysis - The parsed analysis object
|
|
271
|
+
* @param {object} context - Additional context for scoring
|
|
272
|
+
* @param {boolean} context.parseSuccess - Whether JSON parsing succeeded
|
|
273
|
+
* @param {number} context.rawLength - Length of raw AI output
|
|
274
|
+
* @param {boolean} [context.truncated] - Whether output was truncated during repair
|
|
275
|
+
* @param {number} [context.segmentIndex] - Which segment (0-based)
|
|
276
|
+
* @param {number} [context.totalSegments] - Total segments
|
|
277
|
+
* @returns {QualityReport}
|
|
278
|
+
*/
|
|
279
|
+
function assessQuality(analysis, context = {}) {
|
|
280
|
+
const structure = scoreStructure(analysis);
|
|
281
|
+
const density = scoreDensity(analysis);
|
|
282
|
+
const integrity = scoreIntegrity(context);
|
|
283
|
+
const crossRef = scoreCrossReferences(analysis);
|
|
284
|
+
|
|
285
|
+
// Weighted composite score
|
|
286
|
+
const weights = { structure: 0.25, density: 0.35, integrity: 0.25, crossRef: 0.15 };
|
|
287
|
+
const compositeScore = Math.round(
|
|
288
|
+
structure.score * weights.structure +
|
|
289
|
+
density.score * weights.density +
|
|
290
|
+
integrity.score * weights.integrity +
|
|
291
|
+
crossRef.score * weights.crossRef
|
|
292
|
+
);
|
|
293
|
+
|
|
294
|
+
const allIssues = [
|
|
295
|
+
...structure.issues.map(i => `[structure] ${i}`),
|
|
296
|
+
...density.issues.map(i => `[density] ${i}`),
|
|
297
|
+
...integrity.issues.map(i => `[integrity] ${i}`),
|
|
298
|
+
...crossRef.issues.map(i => `[consistency] ${i}`),
|
|
299
|
+
];
|
|
300
|
+
|
|
301
|
+
let grade;
|
|
302
|
+
if (compositeScore >= THRESHOLDS.WARN) {
|
|
303
|
+
grade = 'PASS';
|
|
304
|
+
} else if (compositeScore >= THRESHOLDS.PASS) {
|
|
305
|
+
grade = 'WARN';
|
|
306
|
+
} else {
|
|
307
|
+
grade = 'FAIL';
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
return {
|
|
311
|
+
grade,
|
|
312
|
+
score: compositeScore,
|
|
313
|
+
dimensions: {
|
|
314
|
+
structure: { score: structure.score, weight: weights.structure },
|
|
315
|
+
density: { score: density.score, weight: weights.density },
|
|
316
|
+
integrity: { score: integrity.score, weight: weights.integrity },
|
|
317
|
+
crossRef: { score: crossRef.score, weight: weights.crossRef },
|
|
318
|
+
},
|
|
319
|
+
issues: allIssues,
|
|
320
|
+
shouldRetry: grade === 'FAIL',
|
|
321
|
+
retryHints: grade === 'FAIL' ? buildRetryHints(analysis, allIssues) : [],
|
|
322
|
+
};
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
/**
|
|
326
|
+
* Build retry hints — specific instructions to inject into the retry prompt
|
|
327
|
+
* to address the quality issues found.
|
|
328
|
+
* @param {object} analysis
|
|
329
|
+
* @param {string[]} issues
|
|
330
|
+
* @returns {string[]}
|
|
331
|
+
*/
|
|
332
|
+
function buildRetryHints(analysis, issues) {
|
|
333
|
+
const hints = [];
|
|
334
|
+
|
|
335
|
+
if (issues.some(i => i.includes('Missing required field'))) {
|
|
336
|
+
hints.push('CRITICAL: Your previous response was missing required fields. You MUST include ALL of: tickets, action_items, change_requests, summary. Use empty arrays [] if no items exist.');
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
if (issues.some(i => i.includes('JSON parse failed'))) {
|
|
340
|
+
hints.push('CRITICAL: Your previous response could not be parsed as JSON. Respond with ONLY valid JSON — no markdown fences, no extra text before or after the JSON object. Start with { and end with }.');
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
if (issues.some(i => i.includes('truncated'))) {
|
|
344
|
+
hints.push('Your previous response was truncated. Be more concise — use shorter descriptions, fewer comments per ticket (max 3), and compact formatting to fit within the output limit.');
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
if (issues.some(i => i.includes('No tickets extracted'))) {
|
|
348
|
+
hints.push('Your previous response contained no tickets. Listen carefully to the video — if specific work items, bugs, features, or tasks are discussed, extract them as tickets with IDs.');
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
if (issues.some(i => i.includes('No summary'))) {
|
|
352
|
+
hints.push('Your previous response was missing a summary. Include a 2-4 sentence executive summary of what was discussed in this segment.');
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
if (issues.some(i => i.includes('very short'))) {
|
|
356
|
+
hints.push('Your previous response was too brief. Analyze the video more thoroughly — extract ALL tickets, action items, changes discussed, and blockers mentioned.');
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
if (issues.some(i => i.includes('confidence coverage'))) {
|
|
360
|
+
hints.push('Your previous response was missing confidence fields. Every ticket, action_item, change_request, blocker, and scope_change MUST have "confidence": "HIGH|MEDIUM|LOW" and "confidence_reason" explaining why.');
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
if (issues.some(i => i.includes('suspicious uniformity'))) {
|
|
364
|
+
hints.push('Your previous response had all items at the same confidence level. Differentiate: use HIGH for items explicitly discussed + corroborated by docs, MEDIUM for partial evidence, LOW for inferred items.');
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
return hints;
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
/**
|
|
371
|
+
* Generate a human-readable quality summary line.
|
|
372
|
+
* @param {QualityReport} report
|
|
373
|
+
* @param {string} segmentName
|
|
374
|
+
* @returns {string}
|
|
375
|
+
*/
|
|
376
|
+
function formatQualityLine(report, segmentName) {
|
|
377
|
+
const icon = report.grade === 'PASS' ? '✓' : report.grade === 'WARN' ? '⚠' : '✗';
|
|
378
|
+
const dims = report.dimensions;
|
|
379
|
+
return ` ${icon} Quality: ${report.score}/100 (${report.grade}) — ` +
|
|
380
|
+
`struct:${dims.structure.score} density:${dims.density.score} ` +
|
|
381
|
+
`integrity:${dims.integrity.score} xref:${dims.crossRef.score}`;
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
/**
|
|
385
|
+
* Extract confidence distribution statistics from an analysis.
|
|
386
|
+
* @param {object} analysis
|
|
387
|
+
* @returns {{ total: number, high: number, medium: number, low: number, missing: number, coverage: number }}
|
|
388
|
+
*/
|
|
389
|
+
function getConfidenceStats(analysis) {
|
|
390
|
+
if (!analysis || typeof analysis !== 'object') {
|
|
391
|
+
return { total: 0, high: 0, medium: 0, low: 0, missing: 0, coverage: 0 };
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
const allItems = [
|
|
395
|
+
...(analysis.tickets || []),
|
|
396
|
+
...(analysis.action_items || []),
|
|
397
|
+
...(analysis.change_requests || []),
|
|
398
|
+
...(analysis.blockers || []),
|
|
399
|
+
...(analysis.scope_changes || []),
|
|
400
|
+
];
|
|
401
|
+
|
|
402
|
+
const total = allItems.length;
|
|
403
|
+
if (total === 0) return { total: 0, high: 0, medium: 0, low: 0, missing: 0, coverage: 1 };
|
|
404
|
+
|
|
405
|
+
let high = 0, medium = 0, low = 0, missing = 0;
|
|
406
|
+
for (const item of allItems) {
|
|
407
|
+
switch (item.confidence) {
|
|
408
|
+
case 'HIGH': high++; break;
|
|
409
|
+
case 'MEDIUM': medium++; break;
|
|
410
|
+
case 'LOW': low++; break;
|
|
411
|
+
default: missing++; break;
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
return { total, high, medium, low, missing, coverage: Math.round(((total - missing) / total) * 100) };
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
module.exports = {
|
|
419
|
+
assessQuality,
|
|
420
|
+
formatQualityLine,
|
|
421
|
+
buildRetryHints,
|
|
422
|
+
getConfidenceStats,
|
|
423
|
+
THRESHOLDS,
|
|
424
|
+
// Expose for testing
|
|
425
|
+
scoreStructure,
|
|
426
|
+
scoreDensity,
|
|
427
|
+
scoreIntegrity,
|
|
428
|
+
scoreCrossReferences,
|
|
429
|
+
};
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Retry utility — exponential backoff with jitter for transient failures.
|
|
3
|
+
*
|
|
4
|
+
* Used for Gemini API calls and Firebase operations that may fail
|
|
5
|
+
* due to rate limits, network issues, or temporary outages.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
'use strict';
|
|
9
|
+
|
|
10
|
+
const { MAX_RETRIES, RETRY_BASE_DELAY_MS } = require('../config');
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Known transient error patterns that should be retried.
|
|
14
|
+
*/
|
|
15
|
+
const TRANSIENT_PATTERNS = [
|
|
16
|
+
/429/i, // Rate limited
|
|
17
|
+
/too many requests/i,
|
|
18
|
+
/quota exceeded/i,
|
|
19
|
+
/resource exhausted/i,
|
|
20
|
+
/ECONNRESET/i,
|
|
21
|
+
/ETIMEDOUT/i,
|
|
22
|
+
/ENOTFOUND/i,
|
|
23
|
+
/EPIPE/i,
|
|
24
|
+
/socket hang up/i,
|
|
25
|
+
/network/i,
|
|
26
|
+
/503/i, // Service unavailable
|
|
27
|
+
/502/i, // Bad gateway
|
|
28
|
+
/500/i, // Internal server error (sometimes transient)
|
|
29
|
+
/UNAVAILABLE/i,
|
|
30
|
+
/INTERNAL/i,
|
|
31
|
+
/overloaded/i,
|
|
32
|
+
/capacity/i,
|
|
33
|
+
];
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Determine if an error is likely transient and worth retrying.
|
|
37
|
+
* @param {Error} err
|
|
38
|
+
* @returns {boolean}
|
|
39
|
+
*/
|
|
40
|
+
function isTransientError(err) {
|
|
41
|
+
const msg = err.message || '';
|
|
42
|
+
const code = err.code || '';
|
|
43
|
+
const status = err.status || err.statusCode || 0;
|
|
44
|
+
|
|
45
|
+
// HTTP status codes that are transient
|
|
46
|
+
if ([429, 500, 502, 503, 504].includes(status)) return true;
|
|
47
|
+
|
|
48
|
+
// Check message against known patterns
|
|
49
|
+
const combined = `${msg} ${code}`;
|
|
50
|
+
return TRANSIENT_PATTERNS.some(p => p.test(combined));
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Execute an async function with exponential backoff retry.
|
|
55
|
+
*
|
|
56
|
+
* @param {Function} fn - Async function to execute
|
|
57
|
+
* @param {object} [opts]
|
|
58
|
+
* @param {number} [opts.maxRetries] - Max retry attempts (default from config)
|
|
59
|
+
* @param {number} [opts.baseDelay] - Base delay in ms (default from config)
|
|
60
|
+
* @param {string} [opts.label] - Human-readable label for log messages
|
|
61
|
+
* @param {Function} [opts.onRetry] - Called with (attempt, delay, err) before each retry
|
|
62
|
+
* @param {Function} [opts.shouldRetry] - Custom predicate (err) → boolean
|
|
63
|
+
* @returns {Promise<any>} Result of fn()
|
|
64
|
+
*/
|
|
65
|
+
async function withRetry(fn, opts = {}) {
|
|
66
|
+
const maxRetries = opts.maxRetries ?? MAX_RETRIES;
|
|
67
|
+
const baseDelay = opts.baseDelay ?? RETRY_BASE_DELAY_MS;
|
|
68
|
+
const label = opts.label || 'operation';
|
|
69
|
+
const shouldRetry = opts.shouldRetry || isTransientError;
|
|
70
|
+
const onRetry = opts.onRetry || null;
|
|
71
|
+
|
|
72
|
+
let lastError;
|
|
73
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
74
|
+
try {
|
|
75
|
+
return await fn();
|
|
76
|
+
} catch (err) {
|
|
77
|
+
lastError = err;
|
|
78
|
+
|
|
79
|
+
if (attempt >= maxRetries || !shouldRetry(err)) {
|
|
80
|
+
throw err;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// Exponential backoff with jitter: baseDelay * 2^attempt * (0.5-1.5)
|
|
84
|
+
const jitter = 0.5 + Math.random();
|
|
85
|
+
const delay = Math.min(baseDelay * Math.pow(2, attempt) * jitter, 60000);
|
|
86
|
+
|
|
87
|
+
if (onRetry) {
|
|
88
|
+
onRetry(attempt + 1, delay, err);
|
|
89
|
+
} else {
|
|
90
|
+
const msg = err.message || String(err);
|
|
91
|
+
console.warn(` ⚠ ${label} failed (attempt ${attempt + 1}/${maxRetries + 1}): ${msg.slice(0, 120)}`);
|
|
92
|
+
console.warn(` → Retrying in ${(delay / 1000).toFixed(1)}s...`);
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
await new Promise(r => setTimeout(r, delay));
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
throw lastError;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
/**
|
|
103
|
+
* Run multiple async tasks with a concurrency limit.
|
|
104
|
+
*
|
|
105
|
+
* @param {Array} items - Items to process
|
|
106
|
+
* @param {Function} fn - Async function (item, index) → result
|
|
107
|
+
* @param {number} [concurrency=3] - Max concurrent tasks
|
|
108
|
+
* @returns {Promise<Array>} Results in original order
|
|
109
|
+
*/
|
|
110
|
+
async function parallelMap(items, fn, concurrency = 3) {
|
|
111
|
+
const results = new Array(items.length);
|
|
112
|
+
let nextIndex = 0;
|
|
113
|
+
|
|
114
|
+
async function worker() {
|
|
115
|
+
while (nextIndex < items.length) {
|
|
116
|
+
const idx = nextIndex++;
|
|
117
|
+
results[idx] = await fn(items[idx], idx);
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
const workers = Array.from(
|
|
122
|
+
{ length: Math.min(concurrency, items.length) },
|
|
123
|
+
() => worker()
|
|
124
|
+
);
|
|
125
|
+
await Promise.all(workers);
|
|
126
|
+
return results;
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
module.exports = { withRetry, parallelMap, isTransientError };
|