@useody/detectors 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/LICENSE +198 -0
  2. package/README.md +42 -0
  3. package/dist/claim-comparison.d.ts +12 -0
  4. package/dist/claim-comparison.d.ts.map +1 -0
  5. package/dist/claim-comparison.js +108 -0
  6. package/dist/claim-nli.d.ts +15 -0
  7. package/dist/claim-nli.d.ts.map +1 -0
  8. package/dist/claim-nli.js +181 -0
  9. package/dist/consensus.d.ts +26 -0
  10. package/dist/consensus.d.ts.map +1 -0
  11. package/dist/consensus.js +211 -0
  12. package/dist/consultant-analysis.d.ts +20 -0
  13. package/dist/consultant-analysis.d.ts.map +1 -0
  14. package/dist/consultant-analysis.js +69 -0
  15. package/dist/consultant-prompts.d.ts +83 -0
  16. package/dist/consultant-prompts.d.ts.map +1 -0
  17. package/dist/consultant-prompts.js +135 -0
  18. package/dist/contradiction-helpers.d.ts +40 -0
  19. package/dist/contradiction-helpers.d.ts.map +1 -0
  20. package/dist/contradiction-helpers.js +163 -0
  21. package/dist/contradictions.d.ts +20 -0
  22. package/dist/contradictions.d.ts.map +1 -0
  23. package/dist/contradictions.js +235 -0
  24. package/dist/duplicates.d.ts +14 -0
  25. package/dist/duplicates.d.ts.map +1 -0
  26. package/dist/duplicates.js +95 -0
  27. package/dist/health-score.d.ts +13 -0
  28. package/dist/health-score.d.ts.map +1 -0
  29. package/dist/health-score.js +53 -0
  30. package/dist/helpers/index.d.ts +7 -0
  31. package/dist/helpers/index.d.ts.map +1 -0
  32. package/dist/helpers/index.js +7 -0
  33. package/dist/helpers/llm-timeout.d.ts +14 -0
  34. package/dist/helpers/llm-timeout.d.ts.map +1 -0
  35. package/dist/helpers/llm-timeout.js +30 -0
  36. package/dist/helpers/text-utils.d.ts +21 -0
  37. package/dist/helpers/text-utils.d.ts.map +1 -0
  38. package/dist/helpers/text-utils.js +63 -0
  39. package/dist/index.d.ts +17 -0
  40. package/dist/index.d.ts.map +1 -0
  41. package/dist/index.js +14 -0
  42. package/dist/run-detection.d.ts +29 -0
  43. package/dist/run-detection.d.ts.map +1 -0
  44. package/dist/run-detection.js +46 -0
  45. package/dist/staleness.d.ts +21 -0
  46. package/dist/staleness.d.ts.map +1 -0
  47. package/dist/staleness.js +128 -0
  48. package/dist/time-bomb-utils.d.ts +23 -0
  49. package/dist/time-bomb-utils.d.ts.map +1 -0
  50. package/dist/time-bomb-utils.js +161 -0
  51. package/dist/time-bombs.d.ts +14 -0
  52. package/dist/time-bombs.d.ts.map +1 -0
  53. package/dist/time-bombs.js +113 -0
  54. package/dist/undocumented.d.ts +13 -0
  55. package/dist/undocumented.d.ts.map +1 -0
  56. package/dist/undocumented.js +84 -0
  57. package/package.json +48 -0
@@ -0,0 +1,211 @@
1
+ import { analyzeCorpus } from './consultant-analysis.js';
2
+ import { computeDeterministicHealthScore } from './health-score.js';
3
+ /**
4
+ * Normalize a headline for grouping.
5
+ * Lowercases, strips punctuation, collapses whitespace.
6
+ */
7
+ function normalizeHeadline(headline) {
8
+ return headline
9
+ .toLowerCase()
10
+ .replace(/[^a-z0-9\s]/g, '')
11
+ .replace(/\s+/g, ' ')
12
+ .trim();
13
+ }
14
+ /** Compute word-overlap similarity between two strings (Jaccard). */
15
+ function wordSimilarity(a, b) {
16
+ const wordsA = new Set(normalizeHeadline(a).split(' '));
17
+ const wordsB = new Set(normalizeHeadline(b).split(' '));
18
+ if (wordsA.size === 0 && wordsB.size === 0)
19
+ return 1;
20
+ let intersection = 0;
21
+ for (const w of wordsA) {
22
+ if (wordsB.has(w))
23
+ intersection++;
24
+ }
25
+ const union = new Set([...wordsA, ...wordsB]).size;
26
+ return union === 0 ? 0 : intersection / union;
27
+ }
28
+ /** Check if two findings match the same affected documents. */
29
+ function documentsOverlap(a, b) {
30
+ if (a.affectedDocuments.length === 0 || b.affectedDocuments.length === 0) {
31
+ return false;
32
+ }
33
+ const setA = new Set(a.affectedDocuments);
34
+ for (const doc of b.affectedDocuments) {
35
+ if (setA.has(doc))
36
+ return true;
37
+ }
38
+ return false;
39
+ }
40
+ /** Determine if two findings are semantically similar enough to cluster. */
41
+ function areSimilar(a, b) {
42
+ if (a.category !== b.category)
43
+ return false;
44
+ const headlineSim = wordSimilarity(a.headline, b.headline);
45
+ if (headlineSim >= 0.5)
46
+ return true;
47
+ return documentsOverlap(a, b) && headlineSim >= 0.3;
48
+ }
49
+ /** Group findings into clusters by semantic similarity. */
50
+ function clusterFindings(allFindings) {
51
+ const clusters = [];
52
+ for (const { finding, passIndex } of allFindings) {
53
+ let matched = false;
54
+ for (const cluster of clusters) {
55
+ if (areSimilar(cluster.findings[0], finding)) {
56
+ cluster.findings.push(finding);
57
+ cluster.passIndices.add(passIndex);
58
+ matched = true;
59
+ break;
60
+ }
61
+ }
62
+ if (!matched) {
63
+ clusters.push({
64
+ findings: [finding],
65
+ passIndices: new Set([passIndex]),
66
+ });
67
+ }
68
+ }
69
+ return clusters;
70
+ }
71
+ /** Pick the best headline from a cluster (longest, most specific). */
72
+ function bestHeadline(findings) {
73
+ let best = findings[0].headline;
74
+ for (const f of findings) {
75
+ if (f.headline.length > best.length) {
76
+ best = f.headline;
77
+ }
78
+ }
79
+ return best;
80
+ }
81
+ /** Pick the median severity from a cluster. */
82
+ function medianSeverity(findings) {
83
+ const order = { critical: 2, warning: 1, info: 0 };
84
+ const sorted = findings
85
+ .map((f) => order[f.severity] ?? 0)
86
+ .sort((a, b) => a - b);
87
+ const mid = Math.floor(sorted.length / 2);
88
+ const val = sorted[mid];
89
+ if (val >= 2)
90
+ return 'critical';
91
+ if (val >= 1)
92
+ return 'warning';
93
+ return 'info';
94
+ }
95
+ /** Merge all evidence from a cluster, deduplicating by quote. */
96
+ function mergeEvidence(findings) {
97
+ const seen = new Set();
98
+ const result = [];
99
+ for (const f of findings) {
100
+ for (const e of f.evidence) {
101
+ const key = `${e.source}::${e.quote}`;
102
+ if (!seen.has(key)) {
103
+ seen.add(key);
104
+ result.push(e);
105
+ }
106
+ }
107
+ }
108
+ return result;
109
+ }
110
+ /** Merge a cluster into a single consensus finding. */
111
+ function mergeCluster(cluster) {
112
+ const base = cluster.findings[0];
113
+ const allDocs = new Set();
114
+ for (const f of cluster.findings) {
115
+ for (const d of f.affectedDocuments)
116
+ allDocs.add(d);
117
+ }
118
+ return {
119
+ category: base.category,
120
+ severity: medianSeverity(cluster.findings),
121
+ headline: bestHeadline(cluster.findings),
122
+ evidence: mergeEvidence(cluster.findings),
123
+ businessImpact: base.businessImpact,
124
+ recommendation: base.recommendation,
125
+ effort: base.effort,
126
+ affectedDocuments: [...allDocs],
127
+ };
128
+ }
129
+ /** Merge document maps from multiple passes. */
130
+ function mergeDocumentMaps(maps) {
131
+ const byPath = new Map();
132
+ for (const map of maps) {
133
+ for (const doc of map) {
134
+ const existing = byPath.get(doc.path);
135
+ if (!existing || doc.topics.length > existing.topics.length) {
136
+ byPath.set(doc.path, doc);
137
+ }
138
+ }
139
+ }
140
+ return [...byPath.values()];
141
+ }
142
+ /** Compute the median of an array of numbers. */
143
+ function median(values) {
144
+ if (values.length === 0)
145
+ return 0;
146
+ const sorted = [...values].sort((a, b) => a - b);
147
+ const mid = Math.floor(sorted.length / 2);
148
+ if (sorted.length % 2 === 0) {
149
+ return Math.round((sorted[mid - 1] + sorted[mid]) / 2);
150
+ }
151
+ return sorted[mid];
152
+ }
153
+ /**
154
+ * Run consensus analysis: multiple passes with majority voting.
155
+ * Findings must appear in >= ceil(N/2) passes to be kept.
156
+ * Health scores are computed deterministically from consensus findings.
157
+ */
158
+ export async function runConsensusAnalysis(input, llm, options) {
159
+ const passes = options?.passes ?? 3;
160
+ const log = options?.logger ?? (() => { });
161
+ const maxTokens = options?.maxTokens;
162
+ const threshold = options?.minVotes ?? Math.ceil(passes / 2);
163
+ const taggedFindings = [];
164
+ log(`Running ${String(passes)} passes in parallel...`);
165
+ const passPromises = Array.from({ length: passes }, (_, i) => {
166
+ const passLog = (msg) => log(`[Pass ${String(i + 1)}] ${msg}`);
167
+ return analyzeCorpus(input, llm, { maxTokens, logger: passLog })
168
+ .then((result) => {
169
+ passLog(`Complete: ${String(result.findings.length)} findings`);
170
+ return { passIndex: i, result };
171
+ })
172
+ .catch((err) => {
173
+ const msg = err instanceof Error ? err.message : String(err);
174
+ passLog(`Failed: ${msg}`);
175
+ return null;
176
+ });
177
+ });
178
+ const settled = await Promise.all(passPromises);
179
+ const results = [];
180
+ for (const entry of settled) {
181
+ if (entry) {
182
+ results.push(entry.result);
183
+ for (const finding of entry.result.findings) {
184
+ taggedFindings.push({ finding, passIndex: entry.passIndex });
185
+ }
186
+ }
187
+ }
188
+ log('Building consensus...');
189
+ const clusters = clusterFindings(taggedFindings);
190
+ const consensusFindings = [];
191
+ for (const cluster of clusters) {
192
+ if (cluster.passIndices.size >= threshold) {
193
+ consensusFindings.push(mergeCluster(cluster));
194
+ }
195
+ }
196
+ const healthScore = computeDeterministicHealthScore(consensusFindings);
197
+ const documentMap = mergeDocumentMaps(results.map((r) => r.documentMap));
198
+ const totalTokens = median(results.map((r) => r.metadata.totalTokens));
199
+ return {
200
+ findings: consensusFindings,
201
+ healthScore,
202
+ documentMap,
203
+ metadata: {
204
+ analyzedAt: new Date().toISOString(),
205
+ documentCount: input.documents.length,
206
+ totalTokens,
207
+ modelUsed: `${llm.getModelId()} (${String(passes)}-pass consensus)`,
208
+ },
209
+ };
210
+ }
211
+ //# sourceMappingURL=consensus.js.map
@@ -0,0 +1,20 @@
1
+ /**
2
+ * Consultant-grade analysis engine.
3
+ * Sends all content to an LLM acting as a management consultant and
4
+ * parses structured findings across 7 categories.
5
+ * Uses streaming to provide progress feedback and avoid apparent hangs.
6
+ * @module consultant-analysis
7
+ */
8
+ import type { LLMProvider } from '@useody/platform-core';
9
+ export type { FindingCategory, ConsultingFinding, HealthScore, DocumentInfo, AnalysisResult, AnalysisInput, } from './consultant-prompts.js';
10
+ import type { AnalysisResult, AnalysisInput } from './consultant-prompts.js';
11
+ /**
12
+ * Run consulting-style analysis on a document corpus.
13
+ * Sends all content to the LLM in a single call with a management-
14
+ * consultant system prompt. For corpora that fit in context.
15
+ */
16
+ export declare function analyzeCorpus(input: AnalysisInput, llm: LLMProvider, options?: {
17
+ maxTokens?: number;
18
+ logger?: (msg: string) => void;
19
+ }): Promise<AnalysisResult>;
20
+ //# sourceMappingURL=consultant-analysis.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"consultant-analysis.d.ts","sourceRoot":"","sources":["../src/consultant-analysis.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AACH,OAAO,KAAK,EAAE,WAAW,EAAe,MAAM,uBAAuB,CAAC;AAYtE,YAAY,EACV,eAAe,EACf,iBAAiB,EACjB,WAAW,EACX,YAAY,EACZ,cAAc,EACd,aAAa,GACd,MAAM,yBAAyB,CAAC;AAEjC,OAAO,KAAK,EAAE,cAAc,EAAE,aAAa,EAAE,MAAM,yBAAyB,CAAC;AAE7E;;;;GAIG;AACH,wBAAsB,aAAa,CACjC,KAAK,EAAE,aAAa,EACpB,GAAG,EAAE,WAAW,EAChB,OAAO,CAAC,EAAE;IAAE,SAAS,CAAC,EAAE,MAAM,CAAC;IAAC,MAAM,CAAC,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,IAAI,CAAA;CAAE,GAC/D,OAAO,CAAC,cAAc,CAAC,CA4DzB"}
@@ -0,0 +1,69 @@
1
+ import { SYSTEM_PROMPT, buildUserMessage, isValidFinding, parseHealthScore, streamToString, parseOrRetry, buildDocumentMap, } from './consultant-prompts.js';
2
+ /**
3
+ * Run consulting-style analysis on a document corpus.
4
+ * Sends all content to the LLM in a single call with a management-
5
+ * consultant system prompt. For corpora that fit in context.
6
+ */
7
+ export async function analyzeCorpus(input, llm, options) {
8
+ const log = options?.logger ?? (() => { });
9
+ const docCount = input.documents.length;
10
+ // Scale maxTokens with corpus size: base 1024 + 256 per doc, capped at 4096.
11
+ const scaledMax = Math.min(4096, 1024 + docCount * 256);
12
+ const maxTokens = options?.maxTokens ?? scaledMax;
13
+ const modelId = llm.getModelId();
14
+ if (input.documents.length === 0) {
15
+ return emptyResult(modelId);
16
+ }
17
+ if (input.documents.length < 2) {
18
+ log('Single document — cross-document analysis requires 2+ documents.');
19
+ return {
20
+ findings: [],
21
+ healthScore: { overall: 100, consistency: 100, freshness: 100, ownership: 100, coverage: 100 },
22
+ documentMap: input.documents.map((d) => ({
23
+ path: d.path, title: d.title, lastModified: d.lastModified, topics: [],
24
+ })),
25
+ metadata: {
26
+ analyzedAt: new Date().toISOString(),
27
+ documentCount: input.documents.length,
28
+ totalTokens: 0,
29
+ modelUsed: modelId,
30
+ },
31
+ };
32
+ }
33
+ const label = `Analyzing ${String(input.documents.length)} documents with ${modelId}`;
34
+ log(`${label}...`);
35
+ const messages = [
36
+ { role: 'system', content: SYSTEM_PROMPT },
37
+ { role: 'user', content: buildUserMessage(input) },
38
+ ];
39
+ const response = await streamToString(llm, messages, maxTokens, log, label);
40
+ const data = await parseOrRetry(messages, response, llm, maxTokens, log, label);
41
+ if (!data) {
42
+ log('Failed to parse LLM response after retry.');
43
+ return emptyResult(modelId);
44
+ }
45
+ const findings = (data.findings ?? []).filter(isValidFinding);
46
+ const healthScore = parseHealthScore(data.healthScore);
47
+ const documentMap = buildDocumentMap(data.documentMap, input);
48
+ const totalChars = input.documents.reduce((s, d) => s + d.content.length, 0);
49
+ log(`Found ${String(findings.length)} findings. Health: ${String(healthScore.overall)}`);
50
+ return {
51
+ findings, healthScore, documentMap,
52
+ metadata: {
53
+ analyzedAt: new Date().toISOString(),
54
+ documentCount: input.documents.length,
55
+ totalTokens: Math.ceil(totalChars / 4),
56
+ modelUsed: modelId,
57
+ },
58
+ };
59
+ }
60
+ /** Return a clean empty result. */
61
+ function emptyResult(modelId) {
62
+ return {
63
+ findings: [],
64
+ healthScore: { overall: 100, consistency: 100, freshness: 100, ownership: 100, coverage: 100 },
65
+ documentMap: [],
66
+ metadata: { analyzedAt: new Date().toISOString(), documentCount: 0, totalTokens: 0, modelUsed: modelId },
67
+ };
68
+ }
69
+ //# sourceMappingURL=consultant-analysis.js.map
@@ -0,0 +1,83 @@
1
+ /**
2
+ * System prompt, types, and validation helpers for consultant analysis.
3
+ * Extracted from consultant-analysis.ts to keep files under 250 lines.
4
+ * @module consultant-prompts
5
+ */
6
+ import type { LLMProvider, ChatMessage } from '@useody/platform-core';
7
+ /** The seven finding categories a consultant analysis produces. */
8
+ export type FindingCategory = 'contradiction' | 'stale_commitment' | 'ownership_gap' | 'tribal_knowledge' | 'duplicate_truth' | 'commitment_without_followthrough' | 'decision_without_context';
9
+ /** A single finding from the consultant analysis. */
10
+ export interface ConsultingFinding {
11
+ category: FindingCategory;
12
+ severity: 'critical' | 'warning' | 'info';
13
+ headline: string;
14
+ evidence: Array<{
15
+ source: string;
16
+ quote: string;
17
+ }>;
18
+ businessImpact: string;
19
+ recommendation: string;
20
+ effort: 'quick_win' | 'medium' | 'major';
21
+ affectedDocuments: string[];
22
+ }
23
+ /** Multi-dimensional health score for the document corpus. */
24
+ export interface HealthScore {
25
+ overall: number;
26
+ consistency: number;
27
+ freshness: number;
28
+ ownership: number;
29
+ coverage: number;
30
+ }
31
+ /** Metadata about a single document extracted during analysis. */
32
+ export interface DocumentInfo {
33
+ path: string;
34
+ title: string;
35
+ lastModified?: string;
36
+ topics: string[];
37
+ owner?: string;
38
+ }
39
+ /** Complete result of a consultant analysis. */
40
+ export interface AnalysisResult {
41
+ findings: ConsultingFinding[];
42
+ healthScore: HealthScore;
43
+ documentMap: DocumentInfo[];
44
+ metadata: {
45
+ analyzedAt: string;
46
+ documentCount: number;
47
+ totalTokens: number;
48
+ modelUsed: string;
49
+ };
50
+ }
51
+ /** Input documents for analysis. */
52
+ export interface AnalysisInput {
53
+ documents: Array<{
54
+ path: string;
55
+ title: string;
56
+ content: string;
57
+ lastModified?: string;
58
+ }>;
59
+ }
60
+ /** The management consultant system prompt for analysis. */
61
+ export declare const SYSTEM_PROMPT: string;
62
+ /** Build the user message with document delimiters. */
63
+ export declare function buildUserMessage(input: AnalysisInput): string;
64
+ /** Validate that a parsed object is a well-formed finding. */
65
+ export declare function isValidFinding(f: unknown): f is ConsultingFinding;
66
+ /** Parse HealthScore from LLM output, clamping each dimension. */
67
+ export declare function parseHealthScore(raw: unknown): HealthScore;
68
+ /** Shape of the raw JSON the LLM produces. */
69
+ export interface LlmOutput {
70
+ findings?: unknown[];
71
+ healthScore?: unknown;
72
+ documentMap?: unknown[];
73
+ }
74
+ /**
75
+ * Accumulate a streaming LLM response, logging progress periodically.
76
+ * Falls back to llm.complete() if the stream method throws.
77
+ */
78
+ export declare function streamToString(llm: LLMProvider, messages: ChatMessage[], maxTokens: number, log: (msg: string) => void, label: string): Promise<string>;
79
+ /** Attempt to parse LLM response, retrying once on failure. */
80
+ export declare function parseOrRetry(messages: ChatMessage[], firstResponse: string, llm: LLMProvider, maxTokens: number, log: (msg: string) => void, label: string): Promise<LlmOutput | null>;
81
+ /** Merge LLM document analysis with input metadata. */
82
+ export declare function buildDocumentMap(raw: unknown[] | undefined, input: AnalysisInput): DocumentInfo[];
83
+ //# sourceMappingURL=consultant-prompts.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"consultant-prompts.d.ts","sourceRoot":"","sources":["../src/consultant-prompts.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH,OAAO,KAAK,EAAE,WAAW,EAAE,WAAW,EAAE,MAAM,uBAAuB,CAAC;AAGtE,mEAAmE;AACnE,MAAM,MAAM,eAAe,GACvB,eAAe,GACf,kBAAkB,GAClB,eAAe,GACf,kBAAkB,GAClB,iBAAiB,GACjB,kCAAkC,GAClC,0BAA0B,CAAC;AAE/B,qDAAqD;AACrD,MAAM,WAAW,iBAAiB;IAChC,QAAQ,EAAE,eAAe,CAAC;IAC1B,QAAQ,EAAE,UAAU,GAAG,SAAS,GAAG,MAAM,CAAC;IAC1C,QAAQ,EAAE,MAAM,CAAC;IACjB,QAAQ,EAAE,KAAK,CAAC;QAAE,MAAM,EAAE,MAAM,CAAC;QAAC,KAAK,EAAE,MAAM,CAAA;KAAE,CAAC,CAAC;IACnD,cAAc,EAAE,MAAM,CAAC;IACvB,cAAc,EAAE,MAAM,CAAC;IACvB,MAAM,EAAE,WAAW,GAAG,QAAQ,GAAG,OAAO,CAAC;IACzC,iBAAiB,EAAE,MAAM,EAAE,CAAC;CAC7B;AAED,8DAA8D;AAC9D,MAAM,WAAW,WAAW;IAC1B,OAAO,EAAE,MAAM,CAAC;IAChB,WAAW,EAAE,MAAM,CAAC;IACpB,SAAS,EAAE,MAAM,CAAC;IAClB,SAAS,EAAE,MAAM,CAAC;IAClB,QAAQ,EAAE,MAAM,CAAC;CAClB;AAED,kEAAkE;AAClE,MAAM,WAAW,YAAY;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,KAAK,EAAE,MAAM,CAAC;IACd,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,MAAM,EAAE,MAAM,EAAE,CAAC;IACjB,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB;AAED,gDAAgD;AAChD,MAAM,WAAW,cAAc;IAC7B,QAAQ,EAAE,iBAAiB,EAAE,CAAC;IAC9B,WAAW,EAAE,WAAW,CAAC;IACzB,WAAW,EAAE,YAAY,EAAE,CAAC;IAC5B,QAAQ,EAAE;QACR,UAAU,EAAE,MAAM,CAAC;QACnB,aAAa,EAAE,MAAM,CAAC;QACtB,WAAW,EAAE,MAAM,CAAC;QACpB,SAAS,EAAE,MAAM,CAAC;KACnB,CAAC;CACH;AAED,oCAAoC;AACpC,MAAM,WAAW,aAAa;IAC5B,SAAS,EAAE,KAAK,CAAC;QACf,IAAI,EAAE,MAAM,CAAC;QACb,KAAK,EAAE,MAAM,CAAC;QACd,OAAO,EAAE,MAAM,CAAC;QAChB,YAAY,CAAC,EAAE,MAAM,CAAC;KACvB,CAAC,CAAC;CACJ;AASD,4DAA4D;AAC5D,eAAO,MAAM,aAAa,QA6Bd,CAAC;AAEb,uDAAuD;AACvD,wBAAgB,gBAAgB,CAAC,KAAK,EAAE,aAAa,GAAG,MAAM,CAK7D;AAED,8DAA8D;AAC9D,wBAAgB,cAAc,CAAC,CAAC,EAAE,OAAO,GAAG,CAAC,IAAI,iBAAiB,CAWjE;AAOD,kEAAkE;AAClE,wBAAgB,gBAAgB,CAAC,GAAG,EAAE,OAAO,GAAG,WAAW,CAM1D;AAED,8CAA8C;AAC9C,MAAM,WAAW,SAAS;IACxB,QAAQ,CAAC,EAAE,OAAO,EAAE,CAAC;IACrB,WAAW,CAAC,EAAE,OAAO,CAAC;IACtB,WAAW,CAAC,EAAE,OAAO,EAAE,CAAC;CACzB;AAED;;;GAGG;AACH,wBAAsB,cAAc,CAClC,GAAG,EAAE,WAAW,EAChB,QAAQ,EAAE,WAAW,EAAE,EACvB,SAAS,EAAE,MAAM,EACjB,GAAG,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,IAAI,EAC1B,KAAK,EAAE,MAAM,GACZ,OAAO,CAAC,MAAM,CAAC,CAoBjB;AAED,+DAA+D;AAC/D,wBAAsB,YAAY,CAChC,QAAQ,EAAE,WAAW,EAAE,EACvB,aAAa,EAAE,MAAM,EACrB,GAAG,EAAE,WAAW,EAChB,SAAS,EAAE,MAAM,EACjB,GAAG,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,IAAI,EAC1B,KAAK,EAAE,MAAM,GACZ,OAAO,CAAC,SAAS,GAAG,IAAI,CAAC,CAW3B;AAED,uDAAuD;AACvD,wBAAgB,gBAAgB,CAAC,GAAG,EAAE,OAAO,EAAE,GAAG,SAAS,EAAE,KAAK,EAAE,aAAa,GAAG,YAAY,EAAE,CAejG"}
@@ -0,0 +1,135 @@
1
+ import { parseLlmJsonResponse } from '@useody/platform-core';
2
+ const VALID_CATEGORIES = new Set([
3
+ 'contradiction', 'stale_commitment', 'ownership_gap', 'tribal_knowledge',
4
+ 'duplicate_truth', 'commitment_without_followthrough', 'decision_without_context',
5
+ ]);
6
+ const VALID_SEVERITIES = new Set(['critical', 'warning', 'info']);
7
+ const VALID_EFFORTS = new Set(['quick_win', 'medium', 'major']);
8
+ /** The management consultant system prompt for analysis. */
9
+ export const SYSTEM_PROMPT = [
10
+ 'You are a senior management consultant conducting a knowledge health assessment for an executive team.',
11
+ 'Analyze every document. Cite exact passages as evidence. Identify both problems AND coverage gaps.',
12
+ '',
13
+ '## Systematic Cross-Reference (MANDATORY)',
14
+ 'Before writing findings: extract every policy, number, deadline, and rule from EACH document. Then cross-reference each assertion against EVERY other document. Check all document pairs — do not skip any.',
15
+ 'Pay special attention to: numerical disagreements (different numbers for the same policy), cases where one doc allows what another forbids, and conflicting dates/deadlines/timelines.',
16
+ '',
17
+ '## 7 Categories',
18
+ '1. contradiction — documents making conflicting claims. Numbers that don\'t match. Policies that disagree. One doc allows what another forbids.',
19
+ '2. stale_commitment — deadlines passed, pending decisions never resolved, outdated references.',
20
+ '3. ownership_gap — no clear process owner, overlapping responsibilities, orphaned projects. Also flag under-documented areas.',
21
+ '4. tribal_knowledge — critical info in only one document, single points of failure. Also flag areas that seem under-documented.',
22
+ '5. duplicate_truth — same topic in multiple places with divergent content.',
23
+ '6. commitment_without_followthrough — stated commitments, timelines, or action items with no evidence of completion.',
24
+ '7. decision_without_context — decisions stated without rationale, creating reversal risk if the decision-maker leaves.',
25
+ '',
26
+ '## Writing Rules',
27
+ '- Headlines must name the SPECIFIC policy, number, or commitment. Never generic ("potentially outdated"). Instead: "Vacation policy: 20 days (HR handbook) vs 15 days (onboarding guide)."',
28
+ '- Lead with business impact, use "This creates risk of..." framing. Zero jargon.',
29
+ '- Every contradiction MUST cite both conflicting claims as exact quotes in the evidence array.',
30
+ '',
31
+ '## Severity: critical=money/customers/compliance/safety/passed deadlines. warning=internal conflicts, >6mo stale, ambiguous ownership. info=not-yet-divergent duplicates, non-critical single-source.',
32
+ '',
33
+ '## JSON Output (no fencing, no preamble)',
34
+ '{"findings":[{"category":"<id>","severity":"<level>","headline":"<CEO-scannable>","evidence":[{"source":"<doc path>","quote":"<exact passage>"}],"businessImpact":"<what happens if ignored>","recommendation":"<specific fix>","effort":"<quick_win|medium|major>","affectedDocuments":["<paths>"]}],"healthScore":{"overall":<0-100>,"consistency":<0-100>,"freshness":<0-100>,"ownership":<0-100>,"coverage":<0-100>},"documentMap":[{"path":"<path>","title":"<title>","topics":["<topic>"],"owner":"<if identifiable>"}]}',
35
+ '',
36
+ 'Dimensions: consistency=cross-doc agreement, freshness=currency, ownership=responsibility clarity, coverage=breadth.',
37
+ 'Write in the documents\' language. Only JSON keys and category IDs use English.',
38
+ ].join('\n');
39
+ /** Build the user message with document delimiters. */
40
+ export function buildUserMessage(input) {
41
+ return input.documents.map((doc) => {
42
+ const mod = doc.lastModified ? ` (Last modified: ${doc.lastModified})` : '';
43
+ return `--- DOCUMENT: ${doc.path}${mod} ---\n${doc.content}`;
44
+ }).join('\n\n');
45
+ }
46
+ /** Validate that a parsed object is a well-formed finding. */
47
+ export function isValidFinding(f) {
48
+ if (!f || typeof f !== 'object')
49
+ return false;
50
+ const o = f;
51
+ if (typeof o.category !== 'string' || !VALID_CATEGORIES.has(o.category))
52
+ return false;
53
+ if (typeof o.severity !== 'string' || !VALID_SEVERITIES.has(o.severity))
54
+ return false;
55
+ if (typeof o.headline !== 'string' || !Array.isArray(o.evidence))
56
+ return false;
57
+ if (typeof o.effort !== 'string' || !VALID_EFFORTS.has(o.effort))
58
+ o.effort = 'medium';
59
+ if (!Array.isArray(o.affectedDocuments))
60
+ o.affectedDocuments = [];
61
+ if (typeof o.businessImpact !== 'string')
62
+ o.businessImpact = '';
63
+ if (typeof o.recommendation !== 'string')
64
+ o.recommendation = '';
65
+ return true;
66
+ }
67
+ /** Clamp a value to 0-100. */
68
+ function clamp(v) {
69
+ return Math.max(0, Math.min(100, Math.round(typeof v === 'number' ? v : 0)));
70
+ }
71
+ /** Parse HealthScore from LLM output, clamping each dimension. */
72
+ export function parseHealthScore(raw) {
73
+ const o = (raw && typeof raw === 'object' ? raw : {});
74
+ return {
75
+ overall: clamp(o.overall), consistency: clamp(o.consistency),
76
+ freshness: clamp(o.freshness), ownership: clamp(o.ownership), coverage: clamp(o.coverage),
77
+ };
78
+ }
79
+ /**
80
+ * Accumulate a streaming LLM response, logging progress periodically.
81
+ * Falls back to llm.complete() if the stream method throws.
82
+ */
83
+ export async function streamToString(llm, messages, maxTokens, log, label) {
84
+ try {
85
+ const chunks = [];
86
+ let tokenCount = 0;
87
+ let lastLog = Date.now();
88
+ const LOG_INTERVAL_MS = 3_000;
89
+ for await (const token of llm.stream(messages, { temperature: 0, maxTokens })) {
90
+ chunks.push(token);
91
+ tokenCount++;
92
+ const now = Date.now();
93
+ if (now - lastLog >= LOG_INTERVAL_MS) {
94
+ log(`${label} (${String(tokenCount)} tokens...)`);
95
+ lastLog = now;
96
+ }
97
+ }
98
+ return chunks.join('');
99
+ }
100
+ catch {
101
+ return llm.complete(messages, { temperature: 0, maxTokens });
102
+ }
103
+ }
104
+ /** Attempt to parse LLM response, retrying once on failure. */
105
+ export async function parseOrRetry(messages, firstResponse, llm, maxTokens, log, label) {
106
+ const first = parseLlmJsonResponse(firstResponse);
107
+ if (first.data)
108
+ return first.data;
109
+ log(`${label} — retrying (response was not valid JSON)...`);
110
+ const retry = [
111
+ ...messages,
112
+ { role: 'assistant', content: firstResponse },
113
+ { role: 'user', content: 'That was not valid JSON. Return ONLY the JSON object, no other text.' },
114
+ ];
115
+ const second = await streamToString(llm, retry, maxTokens, log, `${label} retry`);
116
+ return parseLlmJsonResponse(second).data;
117
+ }
118
+ /** Merge LLM document analysis with input metadata. */
119
+ export function buildDocumentMap(raw, input) {
120
+ const idx = new Map();
121
+ for (const d of (Array.isArray(raw) ? raw : [])) {
122
+ if (d && typeof d === 'object' && typeof d.path === 'string') {
123
+ idx.set(d.path, d);
124
+ }
125
+ }
126
+ return input.documents.map((doc) => {
127
+ const m = idx.get(doc.path);
128
+ return {
129
+ path: doc.path, title: doc.title, lastModified: doc.lastModified,
130
+ topics: Array.isArray(m?.topics) ? m.topics : [],
131
+ owner: typeof m?.owner === 'string' ? m.owner : undefined,
132
+ };
133
+ });
134
+ }
135
+ //# sourceMappingURL=consultant-prompts.js.map
@@ -0,0 +1,40 @@
1
+ /**
2
+ * Heuristic helpers for the contradiction detector.
3
+ * Extracted to keep contradictions.ts under 250 lines.
4
+ * @module detectors/contradiction-helpers
5
+ */
6
+ import type { KnowledgeNode, Detection } from '@useody/platform-core';
7
+ /** A number+unit fact extracted from text. */
8
+ export interface NumberFact {
9
+ value: number;
10
+ unit: string;
11
+ raw: string;
12
+ index: number;
13
+ }
14
+ /** Extract number+unit pairs from text. */
15
+ export declare function extractNumbers(text: string): NumberFact[];
16
+ /** Normalize unit names for comparison. */
17
+ export declare function normalizeUnit(u: string): string;
18
+ /** Get raw text from a node for content-based heuristics. */
19
+ export declare function getNodeText(node: KnowledgeNode): string;
20
+ /** Extract the sentence containing the match at matchIndex. */
21
+ export declare function extractSentence(text: string, matchIndex: number): string;
22
+ /** Capitalize the first letter of a string. */
23
+ export declare function capitalizeFirst(s: string): string;
24
+ /** Extract topic keywords from a node's raw content only. */
25
+ export declare function rawKeywords(node: KnowledgeNode): Set<string>;
26
+ /** Canonical pair key for deduplication. */
27
+ export declare function pairKey(id1: string, id2: string): string;
28
+ /** Return true if two nodes plausibly discuss the same topic. */
29
+ export declare function areSameTopic(a: KnowledgeNode, b: KnowledgeNode): boolean;
30
+ /** Return true if both texts share an identical long sentence. */
31
+ export declare function shareExactSentence(textA: string, textB: string): boolean;
32
+ /** Build set of entities appearing in >50% of nodes. */
33
+ export declare function buildHighFreqEntities(nodes: KnowledgeNode[]): Set<string>;
34
+ /** @internal */
35
+ export declare const BOOLEAN_PAIRS: [RegExp, RegExp, string, string][];
36
+ /** Words near enabled/disabled or required/optional that indicate a spec, not a policy. */
37
+ export declare const CONFIG_CONTEXT: RegExp;
38
+ /** Detect contradictions from shared entities with different facts. */
39
+ export declare function detectFactContradiction(a: KnowledgeNode, b: KnowledgeNode, out: Detection[], highFreqEntities: Set<string>): void;
40
+ //# sourceMappingURL=contradiction-helpers.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"contradiction-helpers.d.ts","sourceRoot":"","sources":["../src/contradiction-helpers.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH,OAAO,KAAK,EACV,aAAa,EACb,SAAS,EACV,MAAM,uBAAuB,CAAC;AAkB/B,8CAA8C;AAC9C,MAAM,WAAW,UAAU;IAAG,KAAK,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,MAAM,CAAC;IAAC,GAAG,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAA;CAAE;AAEvF,2CAA2C;AAC3C,wBAAgB,cAAc,CAAC,IAAI,EAAE,MAAM,GAAG,UAAU,EAAE,CAUzD;AAED,2CAA2C;AAC3C,wBAAgB,aAAa,CAAC,CAAC,EAAE,MAAM,GAAG,MAAM,CAS/C;AAED,6DAA6D;AAC7D,wBAAgB,WAAW,CAAC,IAAI,EAAE,aAAa,GAAG,MAAM,CAIvD;AAED,+DAA+D;AAC/D,wBAAgB,eAAe,CAAC,IAAI,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM,GAAG,MAAM,CAMxE;AAED,+CAA+C;AAC/C,wBAAgB,eAAe,CAAC,CAAC,EAAE,MAAM,GAAG,MAAM,CAEjD;AAED,6DAA6D;AAC7D,wBAAgB,WAAW,CAAC,IAAI,EAAE,aAAa,GAAG,GAAG,CAAC,MAAM,CAAC,CAM5D;AAED,4CAA4C;AAC5C,wBAAgB,OAAO,CAAC,GAAG,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,GAAG,MAAM,CAExD;AAED,iEAAiE;AACjE,wBAAgB,YAAY,CAAC,CAAC,EAAE,aAAa,EAAE,CAAC,EAAE,aAAa,GAAG,OAAO,CAUxE;AAED,kEAAkE;AAClE,wBAAgB,kBAAkB,CAAC,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,GAAG,OAAO,CAIxE;AAED,wDAAwD;AACxD,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,aAAa,EAAE,GAAG,GAAG,CAAC,MAAM,CAAC,CAezE;AAGD,gBAAgB;AAChB,eAAO,MAAM,aAAa,EAAE,CAAC,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,CAAC,EAe3D,CAAC;AAEF,2FAA2F;AAC3F,eAAO,MAAM,cAAc,QAA6M,CAAC;AAEzO,uEAAuE;AACvE,wBAAgB,uBAAuB,CACrC,CAAC,EAAE,aAAa,EAAE,CAAC,EAAE,aAAa,EAAE,GAAG,EAAE,SAAS,EAAE,EACpD,gBAAgB,EAAE,GAAG,CAAC,MAAM,CAAC,GAC5B,IAAI,CAqCN"}