@cyber-dash-tech/revela 0.9.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +54 -9
- package/README.zh-CN.md +54 -9
- package/designs/monet/DESIGN.md +9 -9
- package/designs/starter/DESIGN.md +8 -8
- package/designs/summit/DESIGN.md +9 -9
- package/lib/commands/help.ts +2 -0
- package/lib/commands/inspect.ts +23 -0
- package/lib/commands/pdf.ts +33 -5
- package/lib/commands/pptx.ts +14 -9
- package/lib/commands/refine.ts +26 -0
- package/lib/commands/review.ts +8 -2
- package/lib/deck-html/contract.ts +252 -0
- package/lib/decks-state.ts +574 -31
- package/lib/document-materials/extract.ts +20 -0
- package/lib/edit/resolve-deck.ts +13 -2
- package/lib/inspect/open.ts +63 -0
- package/lib/inspect/prompt.ts +32 -0
- package/lib/inspect/request.ts +70 -0
- package/lib/inspect/requests.ts +86 -0
- package/lib/inspect/server.ts +1063 -0
- package/lib/inspect/slide-index.ts +12 -0
- package/lib/inspection-context/compile.ts +346 -0
- package/lib/inspection-context/match.ts +169 -0
- package/lib/inspection-context/project.ts +263 -0
- package/lib/inspection-context/result.ts +160 -0
- package/lib/qa/export-gate.ts +8 -1
- package/lib/refine/open.ts +70 -0
- package/lib/refine/server.ts +1581 -0
- package/lib/workspace-state/actions.ts +71 -0
- package/lib/workspace-state/compat.ts +10 -0
- package/lib/workspace-state/evidence-status.ts +267 -0
- package/lib/workspace-state/graph.ts +426 -0
- package/lib/workspace-state/render-targets.ts +182 -0
- package/lib/workspace-state/rendered-artifacts.ts +43 -0
- package/lib/workspace-state/repository.ts +43 -0
- package/lib/workspace-state/research-attachments.ts +130 -0
- package/lib/workspace-state/review-snapshots.ts +127 -0
- package/lib/workspace-state/types.ts +119 -0
- package/package.json +1 -1
- package/plugin.ts +48 -1
- package/skill/SKILL.md +10 -5
- package/tools/decks.ts +61 -2
- package/tools/inspection-context.ts +22 -0
- package/tools/inspection-result.ts +63 -0
- package/tools/pdf.ts +9 -1
- package/tools/pptx.ts +10 -0
- package/tools/research-save.ts +15 -0
- package/tools/workspace-scan.ts +15 -0
package/lib/decks-state.ts
CHANGED
|
@@ -1,7 +1,24 @@
|
|
|
1
|
-
import { existsSync,
|
|
2
|
-
import {
|
|
3
|
-
|
|
4
|
-
|
|
1
|
+
import { existsSync, readdirSync, readFileSync, statSync } from "fs"
|
|
2
|
+
import { createHash } from "crypto"
|
|
3
|
+
import { basename, join, resolve } from "path"
|
|
4
|
+
import {
|
|
5
|
+
hasWorkspaceState,
|
|
6
|
+
readOrCreateWorkspaceState,
|
|
7
|
+
readWorkspaceState,
|
|
8
|
+
workspaceStatePath,
|
|
9
|
+
writeWorkspaceState,
|
|
10
|
+
} from "./workspace-state/repository"
|
|
11
|
+
import { ensureActiveHtmlDeckRenderTarget } from "./workspace-state/render-targets"
|
|
12
|
+
import {
|
|
13
|
+
activeReviewTargetId,
|
|
14
|
+
appendReviewSnapshot,
|
|
15
|
+
createReviewSnapshot,
|
|
16
|
+
isReviewSnapshotCurrent,
|
|
17
|
+
latestReviewSnapshotForTarget,
|
|
18
|
+
} from "./workspace-state/review-snapshots"
|
|
19
|
+
import { WORKSPACE_STATE_FILE, type RenderTarget, type ReviewSnapshot, type WorkspaceAction } from "./workspace-state/types"
|
|
20
|
+
|
|
21
|
+
export const DECKS_STATE_FILE = WORKSPACE_STATE_FILE
|
|
5
22
|
|
|
6
23
|
export type DeckProductionStatus = "planning" | "blocked" | "ready" | "written"
|
|
7
24
|
export type SlideProductionStatus = "planned" | "ready" | "written" | "qa_passed" | "qa_failed"
|
|
@@ -22,6 +39,9 @@ export interface DecksState {
|
|
|
22
39
|
openQuestions: string[]
|
|
23
40
|
}
|
|
24
41
|
decks: Record<string, DeckSpec>
|
|
42
|
+
actions: WorkspaceAction[]
|
|
43
|
+
renderTargets: RenderTarget[]
|
|
44
|
+
reviews: ReviewSnapshot[]
|
|
25
45
|
}
|
|
26
46
|
|
|
27
47
|
export interface SourceMaterial {
|
|
@@ -161,6 +181,7 @@ export interface DeckStateReadinessResult {
|
|
|
161
181
|
blockers: string[]
|
|
162
182
|
warnings: string[]
|
|
163
183
|
issues: ReadinessIssue[]
|
|
184
|
+
evidenceCandidates?: EvidenceBindingCandidate[]
|
|
164
185
|
}
|
|
165
186
|
|
|
166
187
|
export type ReadinessSeverity = "blocker" | "warning"
|
|
@@ -182,16 +203,76 @@ export interface ReadinessIssue {
|
|
|
182
203
|
slideIndex?: number
|
|
183
204
|
slideTitle?: string
|
|
184
205
|
claimText?: string
|
|
206
|
+
evidenceCandidates?: EvidenceBindingCandidate[]
|
|
207
|
+
evidenceCandidateSearch?: EvidenceCandidateSearchDiagnostic
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
export interface EvidenceBindingCandidate {
|
|
211
|
+
candidateId: string
|
|
212
|
+
slideIndex: number
|
|
213
|
+
slideTitle?: string
|
|
214
|
+
claimText?: string
|
|
215
|
+
source: string
|
|
216
|
+
findingsFile?: string
|
|
217
|
+
sourcePath?: string
|
|
218
|
+
location?: string
|
|
219
|
+
quote?: string
|
|
220
|
+
caveat?: string
|
|
221
|
+
supportScope: string[]
|
|
222
|
+
supportStrength: "partial" | "strong"
|
|
223
|
+
sourceKind?: "researchPlan" | "researchesFallback"
|
|
224
|
+
evidenceDraft?: EvidenceRef
|
|
225
|
+
unsupportedScope?: string[]
|
|
226
|
+
recommendedRewrite?: string
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
export interface EvidenceCandidateSearchDiagnostic {
|
|
230
|
+
queryTokens: string[]
|
|
231
|
+
researchPlanFindingsSearched: string[]
|
|
232
|
+
fallbackResearchFilesSearched: string[]
|
|
233
|
+
fallbackResearchFilesSkipped: string[]
|
|
234
|
+
nearMisses: EvidenceCandidateNearMiss[]
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
export interface EvidenceCandidateNearMiss {
|
|
238
|
+
findingsFile: string
|
|
239
|
+
sourceKind: "researchPlan" | "researchesFallback"
|
|
240
|
+
bestScore: number
|
|
241
|
+
threshold: number
|
|
242
|
+
supportScope: string[]
|
|
243
|
+
quote?: string
|
|
244
|
+
reason: string
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
export interface ApplyEvidenceCandidatesResult {
|
|
248
|
+
applied: AppliedEvidenceCandidate[]
|
|
249
|
+
skipped: SkippedEvidenceCandidate[]
|
|
250
|
+
nextReviewNeeded: boolean
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
export interface AppliedEvidenceCandidate {
|
|
254
|
+
candidateId: string
|
|
255
|
+
slideIndex: number
|
|
256
|
+
evidence: EvidenceRef
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
export interface SkippedEvidenceCandidate {
|
|
260
|
+
candidateId: string
|
|
261
|
+
reason: string
|
|
185
262
|
}
|
|
186
263
|
|
|
187
264
|
const SOURCE_TRACE_ACTION = "Add slide evidence with source plus source trace such as findingsFile or sourcePath, and quote, location, url, or caveat where available; otherwise reframe the claim as an explicit assumption/opinion."
|
|
188
265
|
|
|
266
|
+
export interface ReviewDeckStateOptions {
|
|
267
|
+
workspaceRoot?: string
|
|
268
|
+
}
|
|
269
|
+
|
|
189
270
|
export function decksStatePath(workspaceRoot: string): string {
|
|
190
|
-
return
|
|
271
|
+
return workspaceStatePath(workspaceRoot, DECKS_STATE_FILE)
|
|
191
272
|
}
|
|
192
273
|
|
|
193
274
|
export function hasDecksState(workspaceRoot: string): boolean {
|
|
194
|
-
return
|
|
275
|
+
return hasWorkspaceState(workspaceRoot, DECKS_STATE_FILE)
|
|
195
276
|
}
|
|
196
277
|
|
|
197
278
|
export function createEmptyDecksState(): DecksState {
|
|
@@ -204,6 +285,9 @@ export function createEmptyDecksState(): DecksState {
|
|
|
204
285
|
openQuestions: [],
|
|
205
286
|
},
|
|
206
287
|
decks: {},
|
|
288
|
+
actions: [],
|
|
289
|
+
renderTargets: [],
|
|
290
|
+
reviews: [],
|
|
207
291
|
}
|
|
208
292
|
}
|
|
209
293
|
|
|
@@ -227,6 +311,7 @@ export function normalizeWorkspaceDeckState(state: DecksState, workspaceRoot: st
|
|
|
227
311
|
delete normalized.decks[existingKey]
|
|
228
312
|
normalized.decks[slug] = deck
|
|
229
313
|
normalized.activeDeck = slug
|
|
314
|
+
ensureActiveHtmlDeckRenderTarget(normalized)
|
|
230
315
|
return normalized
|
|
231
316
|
}
|
|
232
317
|
|
|
@@ -264,21 +349,15 @@ export function createDeckSpec(input: Partial<DeckSpec> & { slug: string }): Dec
|
|
|
264
349
|
}
|
|
265
350
|
|
|
266
351
|
export function readDecksState(workspaceRoot: string): DecksState {
|
|
267
|
-
|
|
268
|
-
return normalizeDecksState(parsed)
|
|
352
|
+
return readWorkspaceState(workspaceRoot, { fileName: DECKS_STATE_FILE, normalize: normalizeDecksState })
|
|
269
353
|
}
|
|
270
354
|
|
|
271
355
|
export function writeDecksState(workspaceRoot: string, state: DecksState): void {
|
|
272
|
-
|
|
273
|
-
mkdirSync(dirname(filePath), { recursive: true })
|
|
274
|
-
writeFileSync(filePath, JSON.stringify(normalizeDecksState(state), null, 2) + "\n", "utf-8")
|
|
356
|
+
writeWorkspaceState(workspaceRoot, state, { fileName: DECKS_STATE_FILE, normalize: normalizeDecksState })
|
|
275
357
|
}
|
|
276
358
|
|
|
277
359
|
export function readOrCreateDecksState(workspaceRoot: string): DecksState {
|
|
278
|
-
|
|
279
|
-
const state = createEmptyDecksState()
|
|
280
|
-
writeDecksState(workspaceRoot, state)
|
|
281
|
-
return state
|
|
360
|
+
return readOrCreateWorkspaceState(workspaceRoot, createEmptyDecksState, { fileName: DECKS_STATE_FILE, normalize: normalizeDecksState })
|
|
282
361
|
}
|
|
283
362
|
|
|
284
363
|
export function upsertDeck(state: DecksState, input: Partial<DeckSpec> & { slug: string }): DecksState {
|
|
@@ -292,6 +371,7 @@ export function upsertDeck(state: DecksState, input: Partial<DeckSpec> & { slug:
|
|
|
292
371
|
const next = createDeckSpec({ ...existing, ...input, slug })
|
|
293
372
|
normalized.decks[slug] = next
|
|
294
373
|
normalized.activeDeck = slug
|
|
374
|
+
ensureActiveHtmlDeckRenderTarget(normalized)
|
|
295
375
|
return normalized
|
|
296
376
|
}
|
|
297
377
|
|
|
@@ -308,10 +388,69 @@ export function upsertSlides(state: DecksState, slug: string, slides: SlideSpec[
|
|
|
308
388
|
deck.slides = [...byIndex.values()].sort((a, b) => a.index - b.index)
|
|
309
389
|
normalized.decks[key] = deck
|
|
310
390
|
normalized.activeDeck = key
|
|
391
|
+
ensureActiveHtmlDeckRenderTarget(normalized)
|
|
311
392
|
return normalized
|
|
312
393
|
}
|
|
313
394
|
|
|
314
|
-
export function
|
|
395
|
+
export function applyEvidenceCandidates(state: DecksState, candidateIds: string[], options: ReviewDeckStateOptions = {}): { state: DecksState; result: ApplyEvidenceCandidatesResult } {
|
|
396
|
+
const normalized = normalizeDecksState(state)
|
|
397
|
+
const ids = [...new Set(candidateIds.map((id) => id.trim()).filter(Boolean))]
|
|
398
|
+
const applied: AppliedEvidenceCandidate[] = []
|
|
399
|
+
const skipped: SkippedEvidenceCandidate[] = []
|
|
400
|
+
const key = currentDeckKey(normalized)
|
|
401
|
+
const deck = key ? normalized.decks[key] : undefined
|
|
402
|
+
|
|
403
|
+
if (!deck) {
|
|
404
|
+
return {
|
|
405
|
+
state: normalized,
|
|
406
|
+
result: {
|
|
407
|
+
applied,
|
|
408
|
+
skipped: ids.map((candidateId) => ({ candidateId, reason: `No active deck exists in ${DECKS_STATE_FILE}.` })),
|
|
409
|
+
nextReviewNeeded: false,
|
|
410
|
+
},
|
|
411
|
+
}
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
const review = reviewDeckState(normalized, deck.slug, options)
|
|
415
|
+
const byId = new Map((review.result.evidenceCandidates ?? []).map((candidate) => [candidate.candidateId, candidate]))
|
|
416
|
+
const next = normalizeDecksState(review.state)
|
|
417
|
+
const nextDeck = next.decks[deck.slug]
|
|
418
|
+
|
|
419
|
+
for (const candidateId of ids) {
|
|
420
|
+
const candidate = byId.get(candidateId)
|
|
421
|
+
if (!candidate) {
|
|
422
|
+
skipped.push({ candidateId, reason: "Candidate was not found in the current review result." })
|
|
423
|
+
continue
|
|
424
|
+
}
|
|
425
|
+
if (!candidate.evidenceDraft) {
|
|
426
|
+
skipped.push({ candidateId, reason: "Candidate has no evidenceDraft to apply." })
|
|
427
|
+
continue
|
|
428
|
+
}
|
|
429
|
+
const slide = nextDeck.slides.find((item) => item.index === candidate.slideIndex)
|
|
430
|
+
if (!slide) {
|
|
431
|
+
skipped.push({ candidateId, reason: `Slide ${candidate.slideIndex} no longer exists.` })
|
|
432
|
+
continue
|
|
433
|
+
}
|
|
434
|
+
const evidence = cleanEvidenceRef(candidate.evidenceDraft)
|
|
435
|
+
if (slide.evidence.some((item) => sameEvidenceRef(item, evidence))) {
|
|
436
|
+
skipped.push({ candidateId, reason: `Slide ${candidate.slideIndex} already has this evidence record.` })
|
|
437
|
+
continue
|
|
438
|
+
}
|
|
439
|
+
slide.evidence.push(evidence)
|
|
440
|
+
applied.push({ candidateId, slideIndex: candidate.slideIndex, evidence })
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
return {
|
|
444
|
+
state: next,
|
|
445
|
+
result: {
|
|
446
|
+
applied,
|
|
447
|
+
skipped,
|
|
448
|
+
nextReviewNeeded: applied.length > 0,
|
|
449
|
+
},
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
export function reviewDeckState(state: DecksState, slug?: string, options: ReviewDeckStateOptions = {}): { state: DecksState; result: DeckStateReadinessResult } {
|
|
315
454
|
const normalized = normalizeDecksState(state)
|
|
316
455
|
const key = normalizeSlug(slug || currentDeckKey(normalized) || "")
|
|
317
456
|
const deck = key ? normalized.decks[key] : undefined
|
|
@@ -335,28 +474,33 @@ export function reviewDeckState(state: DecksState, slug?: string): { state: Deck
|
|
|
335
474
|
}
|
|
336
475
|
}
|
|
337
476
|
|
|
338
|
-
const issues = computeDeckReadinessIssues(deck, normalized.workspace)
|
|
477
|
+
const issues = computeDeckReadinessIssues(deck, normalized.workspace, options)
|
|
339
478
|
const blockers = issues.filter((issue) => issue.severity === "blocker").map((issue) => issue.message)
|
|
340
479
|
const warnings = issues.filter((issue) => issue.severity === "warning").map((issue) => issue.message)
|
|
480
|
+
const evidenceCandidates = issues.flatMap((issue) => issue.evidenceCandidates ?? [])
|
|
481
|
+
const reviewedAt = new Date().toISOString()
|
|
341
482
|
deck.writeReadiness = {
|
|
342
483
|
status: blockers.length === 0 ? "ready" : "blocked",
|
|
343
484
|
blockers,
|
|
344
|
-
lastReviewedAt:
|
|
485
|
+
lastReviewedAt: reviewedAt,
|
|
345
486
|
}
|
|
346
487
|
deck.status = blockers.length === 0 ? "ready" : "blocked"
|
|
347
488
|
normalized.decks[deck.slug] = deck
|
|
348
489
|
normalized.activeDeck = deck.slug
|
|
490
|
+
const result: DeckStateReadinessResult = {
|
|
491
|
+
ready: blockers.length === 0,
|
|
492
|
+
slug: deck.slug,
|
|
493
|
+
status: deck.writeReadiness.status,
|
|
494
|
+
blocker: blockers.join("; "),
|
|
495
|
+
blockers,
|
|
496
|
+
warnings,
|
|
497
|
+
issues,
|
|
498
|
+
evidenceCandidates,
|
|
499
|
+
}
|
|
500
|
+
appendReviewSnapshot(normalized, createReviewSnapshot(normalized, { slug: deck.slug, result, reviewedAt }))
|
|
349
501
|
return {
|
|
350
502
|
state: normalized,
|
|
351
|
-
result
|
|
352
|
-
ready: blockers.length === 0,
|
|
353
|
-
slug: deck.slug,
|
|
354
|
-
status: deck.writeReadiness.status,
|
|
355
|
-
blocker: blockers.join("; "),
|
|
356
|
-
blockers,
|
|
357
|
-
warnings,
|
|
358
|
-
issues,
|
|
359
|
-
},
|
|
503
|
+
result,
|
|
360
504
|
}
|
|
361
505
|
}
|
|
362
506
|
|
|
@@ -420,6 +564,38 @@ export function evaluateDeckStateWriteReadiness(state: DecksState, filePath: str
|
|
|
420
564
|
suggestedAction: "Resolve the stored writeReadiness blockers and rerun /revela review.",
|
|
421
565
|
})
|
|
422
566
|
}
|
|
567
|
+
if (normalized.reviews.length > 0) {
|
|
568
|
+
const targetId = activeReviewTargetId(normalized)
|
|
569
|
+
const snapshot = latestReviewSnapshotForTarget(normalized, targetId)
|
|
570
|
+
if (!snapshot) {
|
|
571
|
+
const message = "No review snapshot exists for the active HTML render target"
|
|
572
|
+
blockers.unshift(message)
|
|
573
|
+
issues.unshift({
|
|
574
|
+
type: "missing_slide_spec",
|
|
575
|
+
severity: "blocker",
|
|
576
|
+
message,
|
|
577
|
+
suggestedAction: "Run /revela review so readiness is recorded against the current active render target.",
|
|
578
|
+
})
|
|
579
|
+
} else if (!isReviewSnapshotCurrent(normalized, snapshot, deck.slug)) {
|
|
580
|
+
const message = "Latest review snapshot is stale for the current deck, sources, evidence, narrative state, or render target"
|
|
581
|
+
blockers.unshift(message)
|
|
582
|
+
issues.unshift({
|
|
583
|
+
type: "missing_slide_spec",
|
|
584
|
+
severity: "blocker",
|
|
585
|
+
message,
|
|
586
|
+
suggestedAction: "Run /revela review again after the latest state changes before writing deck HTML.",
|
|
587
|
+
})
|
|
588
|
+
} else if (snapshot.status !== "ready") {
|
|
589
|
+
const message = `Latest review snapshot is ${snapshot.status}, not ready`
|
|
590
|
+
blockers.unshift(message)
|
|
591
|
+
issues.unshift({
|
|
592
|
+
type: "missing_slide_spec",
|
|
593
|
+
severity: "blocker",
|
|
594
|
+
message,
|
|
595
|
+
suggestedAction: "Resolve review blockers and rerun /revela review before writing deck HTML.",
|
|
596
|
+
})
|
|
597
|
+
}
|
|
598
|
+
}
|
|
423
599
|
|
|
424
600
|
return {
|
|
425
601
|
ready: blockers.length === 0,
|
|
@@ -457,10 +633,12 @@ export function buildDecksStatePromptLayer(workspaceRoot: string, maxChars = 140
|
|
|
457
633
|
activeDeck: activeKey,
|
|
458
634
|
workspace: compactWorkspaceForPrompt(state.workspace),
|
|
459
635
|
deck: active ? compactDeckForPrompt(active) : undefined,
|
|
636
|
+
renderTargets: state.renderTargets,
|
|
637
|
+
reviews: compactReviewsForPrompt(state.reviews),
|
|
460
638
|
}
|
|
461
639
|
let text = JSON.stringify(compact, null, 2)
|
|
462
640
|
if (text.length > maxChars) text = text.slice(0, maxChars).trimEnd() + "\n[DECKS.json state truncated for prompt size.]"
|
|
463
|
-
return `---\n\n# Revela Workspace State From ${DECKS_STATE_FILE}\n\n\`\`\`json\n${text}\n\`\`\`\n\nRules for this state layer:\n- Treat ${DECKS_STATE_FILE} as the source of truth for the single current deck's specs, slide plan, and write readiness.\n- The decks map is compatibility storage; operate only on the current workspace deck.\n- Do not edit ${DECKS_STATE_FILE} directly; use the revela-decks tool.\n- Before writing decks/*.html, the current deck must have writeReadiness.status=ready and a complete slide spec, and its outputPath must match the target file.`
|
|
641
|
+
return `---\n\n# Revela Workspace State From ${DECKS_STATE_FILE}\n\n\`\`\`json\n${text}\n\`\`\`\n\nRules for this state layer:\n- Treat ${DECKS_STATE_FILE} as the source of truth for the single current deck's specs, slide plan, evidence, render targets, and write readiness.\n- The decks map is compatibility storage; operate only on the current workspace deck.\n- ${DECKS_STATE_FILE} deck slides use 1-based \`slides[].index\` values. Render every HTML \`<section class="slide">\` with a matching 1-based \`data-slide-index\` attribute, and do not use 0-based \`data-index\` as slide identity.\n- The active HTML deck is represented as a \`renderTarget\` of type \`html_deck\`; PDF/PPTX exports should be recorded as derived render targets, not as separate deck specs.\n- \`writeReadiness\` is a compatibility projection. When review snapshots exist, deck HTML writes require a current non-stale ready review snapshot for the active HTML render target.\n- Do not edit ${DECKS_STATE_FILE} directly; use the revela-decks tool.\n- Before writing decks/*.html, the current deck must have writeReadiness.status=ready and a complete slide spec, and its outputPath must match the target file.`
|
|
464
642
|
}
|
|
465
643
|
|
|
466
644
|
function compactWorkspaceForPrompt(workspace: DecksState["workspace"]): DecksState["workspace"] {
|
|
@@ -493,6 +671,20 @@ function compactDeckForPrompt(deck: DeckSpec): DeckSpec {
|
|
|
493
671
|
}
|
|
494
672
|
}
|
|
495
673
|
|
|
674
|
+
function compactReviewsForPrompt(reviews: ReviewSnapshot[]): ReviewSnapshot[] {
|
|
675
|
+
return reviews.slice(-5).map((review) => ({
|
|
676
|
+
id: review.id,
|
|
677
|
+
targetId: review.targetId,
|
|
678
|
+
inputHash: review.inputHash,
|
|
679
|
+
status: review.status,
|
|
680
|
+
blockers: review.blockers.slice(0, 5),
|
|
681
|
+
warnings: review.warnings.slice(0, 5),
|
|
682
|
+
issues: review.issues.slice(0, 10),
|
|
683
|
+
evidenceCandidates: review.evidenceCandidates?.slice(0, 10),
|
|
684
|
+
reviewedAt: review.reviewedAt,
|
|
685
|
+
}))
|
|
686
|
+
}
|
|
687
|
+
|
|
496
688
|
function compactNarrativeBriefForPrompt(brief: NarrativeBrief | undefined): NarrativeBrief | undefined {
|
|
497
689
|
if (!brief) return undefined
|
|
498
690
|
return {
|
|
@@ -536,6 +728,9 @@ function normalizeDecksState(input: DecksState): DecksState {
|
|
|
536
728
|
openQuestions: input.workspace?.openQuestions ?? [],
|
|
537
729
|
},
|
|
538
730
|
decks: {},
|
|
731
|
+
actions: input.actions ?? [],
|
|
732
|
+
renderTargets: input.renderTargets ?? [],
|
|
733
|
+
reviews: input.reviews ?? [],
|
|
539
734
|
}
|
|
540
735
|
for (const [slug, deck] of Object.entries(input.decks ?? {})) {
|
|
541
736
|
const normalizedSlug = normalizeSlug(deck.slug || slug)
|
|
@@ -546,6 +741,7 @@ function normalizeDecksState(input: DecksState): DecksState {
|
|
|
546
741
|
const keys = Object.keys(state.decks)
|
|
547
742
|
if (keys.length === 1) state.activeDeck = keys[0]
|
|
548
743
|
}
|
|
744
|
+
ensureActiveHtmlDeckRenderTarget(state)
|
|
549
745
|
return state
|
|
550
746
|
}
|
|
551
747
|
|
|
@@ -562,7 +758,7 @@ function currentDeckBlocker(state: DecksState): string {
|
|
|
562
758
|
return `${DECKS_STATE_FILE} contains multiple deck records and no activeDeck. Select one current deck explicitly or move extra decks to separate workspaces.`
|
|
563
759
|
}
|
|
564
760
|
|
|
565
|
-
function computeDeckReadinessIssues(deck: DeckSpec, workspace: DecksState["workspace"]): ReadinessIssue[] {
|
|
761
|
+
function computeDeckReadinessIssues(deck: DeckSpec, workspace: DecksState["workspace"], options: ReviewDeckStateOptions = {}): ReadinessIssue[] {
|
|
566
762
|
const issues: ReadinessIssue[] = []
|
|
567
763
|
if (!deck.goal.trim()) issues.push(blockerIssue("missing_slide_spec", "Deck goal is missing", "Set the deck goal through revela-decks upsertDeck."))
|
|
568
764
|
if (!isDeckHtmlPath(deck.outputPath)) {
|
|
@@ -592,12 +788,18 @@ function computeDeckReadinessIssues(deck: DeckSpec, workspace: DecksState["works
|
|
|
592
788
|
if (!hasSlideContent(slide)) issues.push(blockerIssue("missing_slide_spec", `Slide ${slide.index} content is missing`, "Add structured headline/body/bullets/data content to the slide spec.", slideRef))
|
|
593
789
|
|
|
594
790
|
const claim = findEvidenceSensitiveClaim(slide)
|
|
595
|
-
if (claim && slide.evidence.length === 0) {
|
|
791
|
+
if (claim && slide.evidence.length === 0 && !isNavigationSlide(slide)) {
|
|
792
|
+
const { candidates: evidenceCandidates, search: evidenceCandidateSearch } = findEvidenceBindingCandidates(deck, slide, claim, options)
|
|
596
793
|
issues.push(blockerIssue(
|
|
597
794
|
"missing_evidence",
|
|
598
795
|
`Slide ${slide.index} has an evidence-sensitive claim without evidence: ${claim}`,
|
|
599
796
|
SOURCE_TRACE_ACTION,
|
|
600
|
-
{
|
|
797
|
+
{
|
|
798
|
+
...slideRef,
|
|
799
|
+
claimText: claim,
|
|
800
|
+
evidenceCandidates: evidenceCandidates.length > 0 ? evidenceCandidates : undefined,
|
|
801
|
+
evidenceCandidateSearch,
|
|
802
|
+
},
|
|
601
803
|
))
|
|
602
804
|
} else if (claim && slide.evidence.some((item) => !hasEvidenceDetail(item))) {
|
|
603
805
|
issues.push(warningIssue(
|
|
@@ -624,6 +826,7 @@ function computeDeckReadinessIssues(deck: DeckSpec, workspace: DecksState["works
|
|
|
624
826
|
const hasNeededResearch = deck.researchPlan.some((axis) => axis.needed && axis.status !== "skipped")
|
|
625
827
|
for (const material of workspace.sourceMaterials ?? []) {
|
|
626
828
|
if (material.status !== "discovered") continue
|
|
829
|
+
if (isIgnorableSourceMaterial(material.path)) continue
|
|
627
830
|
const message = `Source material ${material.path} has been identified but not extracted, summarized, or researched`
|
|
628
831
|
if (hasNeededResearch) {
|
|
629
832
|
issues.push(blockerIssue(
|
|
@@ -643,6 +846,297 @@ function computeDeckReadinessIssues(deck: DeckSpec, workspace: DecksState["works
|
|
|
643
846
|
return issues
|
|
644
847
|
}
|
|
645
848
|
|
|
849
|
+
function findEvidenceBindingCandidates(deck: DeckSpec, slide: SlideSpec, claimText: string, options: ReviewDeckStateOptions): { candidates: EvidenceBindingCandidate[]; search?: EvidenceCandidateSearchDiagnostic } {
|
|
850
|
+
if (!options.workspaceRoot) return { candidates: [] }
|
|
851
|
+
const queryText = slideSearchText(slide)
|
|
852
|
+
const queryTokens = meaningfulTokens(queryText)
|
|
853
|
+
if (queryTokens.length === 0) return { candidates: [] }
|
|
854
|
+
|
|
855
|
+
const candidates: EvidenceBindingCandidate[] = []
|
|
856
|
+
const search: EvidenceCandidateSearchDiagnostic = {
|
|
857
|
+
queryTokens,
|
|
858
|
+
researchPlanFindingsSearched: [],
|
|
859
|
+
fallbackResearchFilesSearched: [],
|
|
860
|
+
fallbackResearchFilesSkipped: [],
|
|
861
|
+
nearMisses: [],
|
|
862
|
+
}
|
|
863
|
+
const planFindings = new Set<string>()
|
|
864
|
+
for (const axis of deck.researchPlan) {
|
|
865
|
+
if (!axis.needed || (axis.status !== "done" && axis.status !== "read") || !axis.findingsFile?.trim()) continue
|
|
866
|
+
const normalizedFindingsFile = normalizePath(axis.findingsFile)
|
|
867
|
+
planFindings.add(normalizedFindingsFile)
|
|
868
|
+
search.researchPlanFindingsSearched.push(normalizedFindingsFile)
|
|
869
|
+
const findingsPath = safeWorkspacePath(options.workspaceRoot, axis.findingsFile)
|
|
870
|
+
if (!findingsPath || !existsSync(findingsPath)) continue
|
|
871
|
+
const text = readTextPrefix(findingsPath, 100_000)
|
|
872
|
+
if (!text.trim()) continue
|
|
873
|
+
const result = candidateFromFindingsFile({
|
|
874
|
+
slide,
|
|
875
|
+
claimText,
|
|
876
|
+
queryTokens,
|
|
877
|
+
findingsFile: normalizedFindingsFile,
|
|
878
|
+
text,
|
|
879
|
+
sourceKind: "researchPlan",
|
|
880
|
+
})
|
|
881
|
+
if (result.candidate) candidates.push(result.candidate)
|
|
882
|
+
else if (result.nearMiss) search.nearMisses.push(result.nearMiss)
|
|
883
|
+
}
|
|
884
|
+
|
|
885
|
+
if (candidates.length === 0) {
|
|
886
|
+
for (const findingsFile of listWorkspaceResearchFindings(options.workspaceRoot, planFindings)) {
|
|
887
|
+
search.fallbackResearchFilesSearched.push(findingsFile)
|
|
888
|
+
const findingsPath = safeWorkspacePath(options.workspaceRoot, findingsFile)
|
|
889
|
+
if (!findingsPath || !existsSync(findingsPath)) {
|
|
890
|
+
search.fallbackResearchFilesSkipped.push(findingsFile)
|
|
891
|
+
continue
|
|
892
|
+
}
|
|
893
|
+
const text = readTextPrefix(findingsPath, 100_000)
|
|
894
|
+
if (!text.trim()) {
|
|
895
|
+
search.fallbackResearchFilesSkipped.push(findingsFile)
|
|
896
|
+
continue
|
|
897
|
+
}
|
|
898
|
+
const result = candidateFromFindingsFile({
|
|
899
|
+
slide,
|
|
900
|
+
claimText,
|
|
901
|
+
queryTokens,
|
|
902
|
+
findingsFile,
|
|
903
|
+
text,
|
|
904
|
+
sourceKind: "researchesFallback",
|
|
905
|
+
})
|
|
906
|
+
if (result.candidate) candidates.push(result.candidate)
|
|
907
|
+
else if (result.nearMiss) search.nearMisses.push(result.nearMiss)
|
|
908
|
+
}
|
|
909
|
+
}
|
|
910
|
+
|
|
911
|
+
search.nearMisses = search.nearMisses
|
|
912
|
+
.sort((a, b) => b.bestScore - a.bestScore)
|
|
913
|
+
.slice(0, 5)
|
|
914
|
+
return {
|
|
915
|
+
candidates: candidates
|
|
916
|
+
.sort((a, b) => b.supportScope.length - a.supportScope.length)
|
|
917
|
+
.slice(0, 3),
|
|
918
|
+
search,
|
|
919
|
+
}
|
|
920
|
+
}
|
|
921
|
+
|
|
922
|
+
function candidateFromFindingsFile({
|
|
923
|
+
slide,
|
|
924
|
+
claimText,
|
|
925
|
+
queryTokens,
|
|
926
|
+
findingsFile,
|
|
927
|
+
text,
|
|
928
|
+
sourceKind,
|
|
929
|
+
}: {
|
|
930
|
+
slide: SlideSpec
|
|
931
|
+
claimText: string
|
|
932
|
+
queryTokens: string[]
|
|
933
|
+
findingsFile: string
|
|
934
|
+
text: string
|
|
935
|
+
sourceKind: "researchPlan" | "researchesFallback"
|
|
936
|
+
}): { candidate?: EvidenceBindingCandidate; nearMiss?: EvidenceCandidateNearMiss } {
|
|
937
|
+
const lines = extractFindingsLines(text)
|
|
938
|
+
let best: { line: string; scope: string[]; score: number } | undefined
|
|
939
|
+
for (const line of lines) {
|
|
940
|
+
const normalizedLine = line.toLowerCase()
|
|
941
|
+
const scope = queryTokens.filter((token) => normalizedLine.includes(token))
|
|
942
|
+
const phraseScore = importantPhrases(slide).filter((phrase) => normalizedLine.includes(phrase)).length * 2
|
|
943
|
+
const score = scope.length + phraseScore
|
|
944
|
+
if (!best || score > best.score) best = { line, scope, score }
|
|
945
|
+
}
|
|
946
|
+
if (!best || best.score <= 0) return {}
|
|
947
|
+
|
|
948
|
+
const threshold = 2
|
|
949
|
+
const supportScope = [...new Set(best.scope)].slice(0, 8)
|
|
950
|
+
if (best.score < threshold) {
|
|
951
|
+
return {
|
|
952
|
+
nearMiss: {
|
|
953
|
+
findingsFile,
|
|
954
|
+
sourceKind,
|
|
955
|
+
bestScore: best.score,
|
|
956
|
+
threshold,
|
|
957
|
+
supportScope,
|
|
958
|
+
quote: best.line,
|
|
959
|
+
reason: `Best matching line scored ${best.score}, below binding threshold ${threshold}.`,
|
|
960
|
+
},
|
|
961
|
+
}
|
|
962
|
+
}
|
|
963
|
+
|
|
964
|
+
const sourcePath = extractSourcePath(text)
|
|
965
|
+
const coverage = supportScope.length / Math.max(1, queryTokens.length)
|
|
966
|
+
const supportStrength = best.score >= Math.min(5, Math.max(3, queryTokens.length)) && coverage >= 0.5 ? "strong" : "partial"
|
|
967
|
+
const unsupportedScope = unsupportedClaimScope(slide, best.line).slice(0, 5)
|
|
968
|
+
const caveats = []
|
|
969
|
+
if (supportStrength === "partial") {
|
|
970
|
+
caveats.push("Candidate support is partial. Bind only the matched claim scope; do not use it to support unrelated future-state or recommendation claims on the same slide.")
|
|
971
|
+
}
|
|
972
|
+
if (sourceKind === "researchesFallback") {
|
|
973
|
+
caveats.push("Candidate was discovered from researches/ fallback and is not referenced by researchPlan; confirm relevance before binding it into slide evidence.")
|
|
974
|
+
}
|
|
975
|
+
if (unsupportedScope.length > 0) {
|
|
976
|
+
caveats.push(`Unsupported claim scope: ${unsupportedScope.join("; ")}.`)
|
|
977
|
+
}
|
|
978
|
+
const caveat = caveats.length > 0 ? caveats.join(" ") : undefined
|
|
979
|
+
const evidenceDraft: EvidenceRef = {
|
|
980
|
+
source: sourcePath || findingsFile,
|
|
981
|
+
findingsFile,
|
|
982
|
+
sourcePath,
|
|
983
|
+
location: "research findings excerpt",
|
|
984
|
+
quote: best.line,
|
|
985
|
+
caveat,
|
|
986
|
+
}
|
|
987
|
+
return {
|
|
988
|
+
candidate: {
|
|
989
|
+
candidateId: evidenceCandidateId(slide.index, findingsFile, best.line, supportScope),
|
|
990
|
+
slideIndex: slide.index,
|
|
991
|
+
slideTitle: slide.title,
|
|
992
|
+
claimText,
|
|
993
|
+
source: sourcePath || findingsFile,
|
|
994
|
+
findingsFile,
|
|
995
|
+
sourcePath,
|
|
996
|
+
location: "research findings excerpt",
|
|
997
|
+
quote: best.line,
|
|
998
|
+
caveat,
|
|
999
|
+
supportScope,
|
|
1000
|
+
supportStrength,
|
|
1001
|
+
sourceKind,
|
|
1002
|
+
evidenceDraft,
|
|
1003
|
+
unsupportedScope,
|
|
1004
|
+
recommendedRewrite: recommendedEvidenceRewrite(supportScope, unsupportedScope),
|
|
1005
|
+
},
|
|
1006
|
+
}
|
|
1007
|
+
}
|
|
1008
|
+
|
|
1009
|
+
function unsupportedClaimScope(slide: SlideSpec, supportedLine: string): string[] {
|
|
1010
|
+
const normalizedLine = supportedLine.toLowerCase()
|
|
1011
|
+
const phrases = [slide.purpose, slide.content?.headline, ...(slide.content?.bullets ?? [])]
|
|
1012
|
+
.map((item) => cleanMarkdownText(item ?? ""))
|
|
1013
|
+
.filter((item) => item.length >= 8)
|
|
1014
|
+
|
|
1015
|
+
return [...new Set(phrases.filter((phrase) => {
|
|
1016
|
+
const normalizedPhrase = phrase.toLowerCase()
|
|
1017
|
+
return FUTURE_STATE_SCOPE_PATTERN.test(normalizedPhrase) && !normalizedLine.includes(normalizedPhrase)
|
|
1018
|
+
}))]
|
|
1019
|
+
}
|
|
1020
|
+
|
|
1021
|
+
function recommendedEvidenceRewrite(supportScope: string[], unsupportedScope: string[]): string | undefined {
|
|
1022
|
+
if (unsupportedScope.length === 0) return undefined
|
|
1023
|
+
const supported = supportScope.length > 0 ? supportScope.join(", ") : "the quoted current-state support"
|
|
1024
|
+
return `Bind this evidence only to the supported scope (${supported}). Reframe unsupported scope as internal synthesis, target-state hypothesis, or a separately sourced claim: ${unsupportedScope.join("; ")}.`
|
|
1025
|
+
}
|
|
1026
|
+
|
|
1027
|
+
function evidenceCandidateId(slideIndex: number, findingsFile: string, quote: string, supportScope: string[]): string {
|
|
1028
|
+
const hash = createHash("sha1")
|
|
1029
|
+
.update(JSON.stringify({ slideIndex, findingsFile, quote, supportScope }))
|
|
1030
|
+
.digest("hex")
|
|
1031
|
+
.slice(0, 8)
|
|
1032
|
+
return `s${slideIndex}-${hash}`
|
|
1033
|
+
}
|
|
1034
|
+
|
|
1035
|
+
function cleanEvidenceRef(evidence: EvidenceRef): EvidenceRef {
|
|
1036
|
+
const cleaned: EvidenceRef = { source: cleanMarkdownText(evidence.source) }
|
|
1037
|
+
for (const key of ["quote", "page", "url", "sourcePath", "location", "findingsFile", "caveat", "extractedTextPath", "extractedManifestPath"] as const) {
|
|
1038
|
+
const value = cleanOptionalText(evidence[key])
|
|
1039
|
+
if (value) cleaned[key] = value
|
|
1040
|
+
}
|
|
1041
|
+
return cleaned
|
|
1042
|
+
}
|
|
1043
|
+
|
|
1044
|
+
function sameEvidenceRef(a: EvidenceRef, b: EvidenceRef): boolean {
|
|
1045
|
+
return normalizeEvidenceComparable(a) === normalizeEvidenceComparable(b)
|
|
1046
|
+
}
|
|
1047
|
+
|
|
1048
|
+
function normalizeEvidenceComparable(evidence: EvidenceRef): string {
|
|
1049
|
+
const cleaned = cleanEvidenceRef(evidence)
|
|
1050
|
+
return JSON.stringify({
|
|
1051
|
+
source: cleaned.source,
|
|
1052
|
+
findingsFile: cleaned.findingsFile,
|
|
1053
|
+
sourcePath: cleaned.sourcePath,
|
|
1054
|
+
quote: cleaned.quote,
|
|
1055
|
+
location: cleaned.location,
|
|
1056
|
+
caveat: cleaned.caveat,
|
|
1057
|
+
})
|
|
1058
|
+
}
|
|
1059
|
+
|
|
1060
|
+
function listWorkspaceResearchFindings(workspaceRoot: string, exclude: Set<string>): string[] {
|
|
1061
|
+
const researchRoot = safeWorkspacePath(workspaceRoot, "researches")
|
|
1062
|
+
if (!researchRoot || !existsSync(researchRoot)) return []
|
|
1063
|
+
const files: string[] = []
|
|
1064
|
+
collectMarkdownFiles(researchRoot, files, 0)
|
|
1065
|
+
return files
|
|
1066
|
+
.map((file) => normalizePath(file.slice(resolve(workspaceRoot).length + 1)))
|
|
1067
|
+
.filter((file) => file.startsWith("researches/") && !exclude.has(file))
|
|
1068
|
+
.slice(0, 50)
|
|
1069
|
+
}
|
|
1070
|
+
|
|
1071
|
+
function collectMarkdownFiles(dir: string, output: string[], depth: number): void {
|
|
1072
|
+
if (depth > 4) return
|
|
1073
|
+
let entries: string[]
|
|
1074
|
+
try {
|
|
1075
|
+
entries = readdirSync(dir)
|
|
1076
|
+
} catch {
|
|
1077
|
+
return
|
|
1078
|
+
}
|
|
1079
|
+
for (const entry of entries) {
|
|
1080
|
+
const fullPath = join(dir, entry)
|
|
1081
|
+
let stat
|
|
1082
|
+
try {
|
|
1083
|
+
stat = statSync(fullPath)
|
|
1084
|
+
} catch {
|
|
1085
|
+
continue
|
|
1086
|
+
}
|
|
1087
|
+
if (stat.isDirectory()) {
|
|
1088
|
+
collectMarkdownFiles(fullPath, output, depth + 1)
|
|
1089
|
+
} else if (stat.isFile() && entry.endsWith(".md")) {
|
|
1090
|
+
output.push(fullPath)
|
|
1091
|
+
}
|
|
1092
|
+
}
|
|
1093
|
+
}
|
|
1094
|
+
|
|
1095
|
+
function safeWorkspacePath(workspaceRoot: string, relativePath: string): string | undefined {
|
|
1096
|
+
const root = resolve(workspaceRoot)
|
|
1097
|
+
const target = resolve(root, relativePath)
|
|
1098
|
+
if (target !== root && !target.startsWith(root + "/")) return undefined
|
|
1099
|
+
return target
|
|
1100
|
+
}
|
|
1101
|
+
|
|
1102
|
+
function readTextPrefix(filePath: string, maxChars: number): string {
|
|
1103
|
+
try {
|
|
1104
|
+
return readFileSync(filePath, "utf-8").slice(0, maxChars)
|
|
1105
|
+
} catch {
|
|
1106
|
+
return ""
|
|
1107
|
+
}
|
|
1108
|
+
}
|
|
1109
|
+
|
|
1110
|
+
function extractFindingsLines(text: string): string[] {
|
|
1111
|
+
return text
|
|
1112
|
+
.split(/\r?\n/)
|
|
1113
|
+
.map((line) => line.replace(/^\s*(?:[-*+]\s+|\d+\.\s+|>\s*)/, "").trim())
|
|
1114
|
+
.filter((line) => line.length >= 24 && !/^---$/.test(line) && !/^#/.test(line))
|
|
1115
|
+
.slice(0, 300)
|
|
1116
|
+
}
|
|
1117
|
+
|
|
1118
|
+
function extractSourcePath(text: string): string | undefined {
|
|
1119
|
+
const sourceLine = text.split(/\r?\n/).find((line) => /^\s*(?:[-*+]\s*)?(?:source|来源)\s*:/i.test(line))
|
|
1120
|
+
if (!sourceLine) return undefined
|
|
1121
|
+
return cleanMarkdownText(sourceLine.replace(/^\s*(?:[-*+]\s*)?(?:source|来源)\s*:\s*/i, "")) || undefined
|
|
1122
|
+
}
|
|
1123
|
+
|
|
1124
|
+
function meaningfulTokens(text: string): string[] {
|
|
1125
|
+
const normalized = text.toLowerCase().replace(/[^a-z0-9\u4e00-\u9fa5]+/g, " ")
|
|
1126
|
+
const latin = normalized
|
|
1127
|
+
.split(/\s+/)
|
|
1128
|
+
.map((token) => token.trim())
|
|
1129
|
+
.filter((token) => token.length >= 4 && !EVIDENCE_BINDING_STOPWORDS.has(token))
|
|
1130
|
+
const chinese = Array.from(normalized.matchAll(/[\u4e00-\u9fa5]{2,}/g), (match) => match[0])
|
|
1131
|
+
return [...new Set([...latin, ...chinese])].slice(0, 40)
|
|
1132
|
+
}
|
|
1133
|
+
|
|
1134
|
+
function importantPhrases(slide: SlideSpec): string[] {
|
|
1135
|
+
return [slide.title, slide.content?.headline, ...(slide.content?.bullets ?? [])]
|
|
1136
|
+
.map((item) => item?.trim().toLowerCase())
|
|
1137
|
+
.filter((item): item is string => Boolean(item && item.length >= 8 && item.length <= 80))
|
|
1138
|
+
}
|
|
1139
|
+
|
|
646
1140
|
function computeNarrativeReadinessIssues(deck: DeckSpec): ReadinessIssue[] {
|
|
647
1141
|
const issues: ReadinessIssue[] = []
|
|
648
1142
|
const slides = deck.slides.filter((slide) => slide.index > 0).sort((a, b) => a.index - b.index)
|
|
@@ -842,6 +1336,25 @@ function hasClearEnding(slides: SlideSpec[]): boolean {
|
|
|
842
1336
|
return finalSlides.some((slide) => slide.narrativeRole === "recommendation" || slide.narrativeRole === "ask" || slide.narrativeRole === "close" || /\b(so what|takeaway|recommend(?:ation)?|decision|ask|next step|conclusion|close)\b|结论|建议|决策|请求|下一步|收尾|总结/.test(slideSearchText(slide)))
|
|
843
1337
|
}
|
|
844
1338
|
|
|
1339
|
+
function isNavigationSlide(slide: SlideSpec): boolean {
|
|
1340
|
+
const text = slideSearchText(slide)
|
|
1341
|
+
return slide.layout === "toc" || /\b(table of contents|agenda|contents|outline|section guide)\b|目录|议程|大纲/.test(text)
|
|
1342
|
+
}
|
|
1343
|
+
|
|
1344
|
+
function isIgnorableSourceMaterial(path: string): boolean {
|
|
1345
|
+
const normalized = normalizePath(path).replace(/^\.\//, "")
|
|
1346
|
+
const name = basename(normalized)
|
|
1347
|
+
return Boolean(
|
|
1348
|
+
name.startsWith("~$") ||
|
|
1349
|
+
/^(AGENTS|README(?:\.zh-CN)?|DECKS)\.md$/.test(name) ||
|
|
1350
|
+
name === DECKS_STATE_FILE ||
|
|
1351
|
+
normalized.startsWith("decks/") ||
|
|
1352
|
+
normalized.startsWith("researches/") ||
|
|
1353
|
+
normalized.startsWith("assets/") ||
|
|
1354
|
+
normalized.startsWith(".opencode/"),
|
|
1355
|
+
)
|
|
1356
|
+
}
|
|
1357
|
+
|
|
845
1358
|
function slideSearchText(slide: SlideSpec): string {
|
|
846
1359
|
return [
|
|
847
1360
|
slide.title,
|
|
@@ -918,6 +1431,36 @@ const EVIDENCE_SENSITIVE_TERMS = [
|
|
|
918
1431
|
/可扩展/,
|
|
919
1432
|
]
|
|
920
1433
|
|
|
1434
|
+
const FUTURE_STATE_SCOPE_PATTERN = /\b(20\d{2}|future|target-state|end state|roadmap|pathway|architecture|capabilit(?:y|ies)|autonomy|autonomous|self-organizing|ecosystem|ai manufacturing os|ai brain|digital workers|closed-loop|orchestration)\b|未来|目标态|路线图|架构|能力|自治|自组织|生态|智能体|闭环/
|
|
1435
|
+
|
|
1436
|
+
const EVIDENCE_BINDING_STOPWORDS = new Set([
|
|
1437
|
+
"about",
|
|
1438
|
+
"after",
|
|
1439
|
+
"again",
|
|
1440
|
+
"also",
|
|
1441
|
+
"before",
|
|
1442
|
+
"between",
|
|
1443
|
+
"could",
|
|
1444
|
+
"deck",
|
|
1445
|
+
"from",
|
|
1446
|
+
"have",
|
|
1447
|
+
"into",
|
|
1448
|
+
"must",
|
|
1449
|
+
"only",
|
|
1450
|
+
"over",
|
|
1451
|
+
"page",
|
|
1452
|
+
"roadmap",
|
|
1453
|
+
"show",
|
|
1454
|
+
"slide",
|
|
1455
|
+
"that",
|
|
1456
|
+
"their",
|
|
1457
|
+
"there",
|
|
1458
|
+
"this",
|
|
1459
|
+
"through",
|
|
1460
|
+
"with",
|
|
1461
|
+
"would",
|
|
1462
|
+
])
|
|
1463
|
+
|
|
921
1464
|
function normalizeSlides(slides: SlideSpec[]): SlideSpec[] {
|
|
922
1465
|
return slides
|
|
923
1466
|
.map((slide) => ({
|