@bryan-thompson/inspector-assessment-client 1.34.2 → 1.35.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/dist/assets/{OAuthCallback-CBcYNwyM.js → OAuthCallback-DC1cIXHT.js} +1 -1
  2. package/dist/assets/{OAuthDebugCallback-B0zFGlM8.js → OAuthDebugCallback-C3gqJjgQ.js} +1 -1
  3. package/dist/assets/{index-Djm_oTDV.js → index-Dn2w887x.js} +5 -4
  4. package/dist/index.html +1 -1
  5. package/lib/lib/assessment/resultTypes.d.ts +42 -0
  6. package/lib/lib/assessment/resultTypes.d.ts.map +1 -1
  7. package/lib/lib/assessment/sharedSchemas.d.ts +13 -0
  8. package/lib/lib/assessment/sharedSchemas.d.ts.map +1 -1
  9. package/lib/lib/assessment/sharedSchemas.js +9 -0
  10. package/lib/lib/assessment/summarizer/AssessmentSummarizer.d.ts +112 -0
  11. package/lib/lib/assessment/summarizer/AssessmentSummarizer.d.ts.map +1 -0
  12. package/lib/lib/assessment/summarizer/AssessmentSummarizer.js +452 -0
  13. package/lib/lib/assessment/summarizer/index.d.ts +19 -0
  14. package/lib/lib/assessment/summarizer/index.d.ts.map +1 -0
  15. package/lib/lib/assessment/summarizer/index.js +19 -0
  16. package/lib/lib/assessment/summarizer/stageBEnrichmentBuilder.d.ts +36 -0
  17. package/lib/lib/assessment/summarizer/stageBEnrichmentBuilder.d.ts.map +1 -0
  18. package/lib/lib/assessment/summarizer/stageBEnrichmentBuilder.js +282 -0
  19. package/lib/lib/assessment/summarizer/stageBTypes.d.ts +154 -0
  20. package/lib/lib/assessment/summarizer/stageBTypes.d.ts.map +1 -0
  21. package/lib/lib/assessment/summarizer/stageBTypes.js +24 -0
  22. package/lib/lib/assessment/summarizer/tokenEstimator.d.ts +103 -0
  23. package/lib/lib/assessment/summarizer/tokenEstimator.d.ts.map +1 -0
  24. package/lib/lib/assessment/summarizer/tokenEstimator.js +225 -0
  25. package/lib/lib/assessment/summarizer/types.d.ts +187 -0
  26. package/lib/lib/assessment/summarizer/types.d.ts.map +1 -0
  27. package/lib/lib/assessment/summarizer/types.js +20 -0
  28. package/lib/lib/moduleScoring.d.ts +2 -1
  29. package/lib/lib/moduleScoring.d.ts.map +1 -1
  30. package/lib/lib/moduleScoring.js +2 -1
  31. package/lib/services/assessment/modules/securityTests/TestValidityAnalyzer.d.ts +28 -0
  32. package/lib/services/assessment/modules/securityTests/TestValidityAnalyzer.d.ts.map +1 -1
  33. package/lib/services/assessment/modules/securityTests/TestValidityAnalyzer.js +180 -0
  34. package/package.json +1 -1
@@ -0,0 +1,282 @@
1
+ /**
2
+ * Stage B Enrichment Builder
3
+ *
4
+ * Functions to build Stage B enrichment data from assessment results.
5
+ * Extracts evidence, correlations, and confidence details for Claude
6
+ * semantic analysis.
7
+ *
8
+ * Issue #137: Stage A data enrichment for Stage B Claude analysis
9
+ *
10
+ * @module assessment/summarizer/stageBEnrichmentBuilder
11
+ */
12
+ import { DEFAULT_TIER2_MAX_SAMPLES, DEFAULT_TIER3_MAX_CORRELATIONS, MAX_RESPONSE_LENGTH, MAX_CONTEXT_WINDOW, } from "./stageBTypes.js";
13
+ // ============================================================================
14
+ // Helper Functions
15
+ // ============================================================================
16
+ /**
17
+ * Truncate a string to a maximum length, adding ellipsis if truncated.
18
+ */
19
+ function truncate(str, maxLength) {
20
+ if (!str)
21
+ return "";
22
+ if (str.length <= maxLength)
23
+ return str;
24
+ return str.slice(0, maxLength - 3) + "...";
25
+ }
26
+ /**
27
+ * Map test result to classification.
28
+ */
29
+ function classifyTestResult(test) {
30
+ if (test.connectionError)
31
+ return "error";
32
+ if (test.testReliability === "failed")
33
+ return "error";
34
+ if (test.vulnerable)
35
+ return "vulnerable";
36
+ return "safe";
37
+ }
38
+ /**
39
+ * Convert SecurityTestResult to PayloadCorrelation.
40
+ */
41
+ function testToCorrelation(test) {
42
+ return {
43
+ inputPayload: truncate(test.payload, MAX_RESPONSE_LENGTH),
44
+ outputResponse: truncate(test.response, MAX_RESPONSE_LENGTH),
45
+ classification: classifyTestResult(test),
46
+ matchedPatterns: test.vulnerable ? [test.testName] : [],
47
+ toolName: test.toolName || "unknown",
48
+ testName: test.testName,
49
+ confidence: test.confidence,
50
+ };
51
+ }
52
+ /**
53
+ * Convert test result to finding evidence.
54
+ */
55
+ function testToEvidence(test) {
56
+ const contextSource = test.evidence || test.response;
57
+ const location = test.evidence
58
+ ? "evidence"
59
+ : test.response
60
+ ? "response"
61
+ : "unknown";
62
+ return {
63
+ raw: truncate(test.payload, MAX_RESPONSE_LENGTH),
64
+ context: truncate(contextSource, MAX_CONTEXT_WINDOW),
65
+ location,
66
+ };
67
+ }
68
+ /**
69
+ * Calculate confidence distribution from tests.
70
+ */
71
+ function calculateConfidenceBreakdown(tests) {
72
+ const breakdown = { high: 0, medium: 0, low: 0 };
73
+ for (const test of tests) {
74
+ if (test.vulnerable) {
75
+ const confidence = test.confidence || "medium";
76
+ breakdown[confidence]++;
77
+ }
78
+ }
79
+ return breakdown;
80
+ }
81
+ /**
82
+ * Calculate pattern distribution from tests.
83
+ */
84
+ function calculatePatternDistribution(tests) {
85
+ const distribution = {};
86
+ for (const test of tests) {
87
+ if (test.vulnerable) {
88
+ const pattern = test.testName;
89
+ distribution[pattern] = (distribution[pattern] || 0) + 1;
90
+ }
91
+ }
92
+ return distribution;
93
+ }
94
+ /**
95
+ * Find the highest risk test (most concerning vulnerability).
96
+ */
97
+ function findHighestRiskTest(tests) {
98
+ const vulnerableTests = tests.filter((t) => t.vulnerable);
99
+ if (vulnerableTests.length === 0)
100
+ return undefined;
101
+ // Prioritize by risk level, then by confidence
102
+ const riskOrder = { CRITICAL: 0, HIGH: 1, MEDIUM: 2, LOW: 3 };
103
+ const confidenceOrder = { high: 0, medium: 1, low: 2 };
104
+ return vulnerableTests.sort((a, b) => {
105
+ const riskDiff = (riskOrder[a.riskLevel] ?? 4) - (riskOrder[b.riskLevel] ?? 4);
106
+ if (riskDiff !== 0)
107
+ return riskDiff;
108
+ return ((confidenceOrder[a.confidence || "medium"] ?? 1) -
109
+ (confidenceOrder[b.confidence || "medium"] ?? 1));
110
+ })[0];
111
+ }
112
+ // ============================================================================
113
+ // Tier 2: Tool Summary Enrichment Builder
114
+ // ============================================================================
115
+ /**
116
+ * Build Stage B enrichment for Tier 2 tool summaries.
117
+ *
118
+ * @param toolName - Name of the tool
119
+ * @param tests - Security test results for this tool
120
+ * @param maxSamples - Maximum evidence samples to include
121
+ * @returns Tool summary Stage B enrichment
122
+ */
123
+ export function buildToolSummaryStageBEnrichment(toolName, tests, maxSamples = DEFAULT_TIER2_MAX_SAMPLES) {
124
+ // Filter to only tests for this tool
125
+ const toolTests = tests.filter((t) => t.toolName === toolName);
126
+ // Get vulnerable tests for evidence sampling
127
+ const vulnerableTests = toolTests.filter((t) => t.vulnerable);
128
+ // Sample evidence from highest-risk vulnerabilities
129
+ const sortedVulnerable = [...vulnerableTests].sort((a, b) => {
130
+ const riskOrder = { CRITICAL: 0, HIGH: 1, MEDIUM: 2, LOW: 3 };
131
+ return (riskOrder[a.riskLevel] ?? 4) - (riskOrder[b.riskLevel] ?? 4);
132
+ });
133
+ const sampleEvidence = sortedVulnerable
134
+ .slice(0, maxSamples)
135
+ .map(testToEvidence);
136
+ // Calculate confidence breakdown
137
+ const confidenceBreakdown = calculateConfidenceBreakdown(toolTests);
138
+ // Find highest risk correlation
139
+ const highestRiskTest = findHighestRiskTest(toolTests);
140
+ const highestRiskCorrelation = highestRiskTest
141
+ ? testToCorrelation(highestRiskTest)
142
+ : undefined;
143
+ // Calculate pattern distribution
144
+ const patternDistribution = calculatePatternDistribution(toolTests);
145
+ // Check for sanitization detection
146
+ const sanitizationDetected = toolTests.some((t) => t.sanitizationDetected);
147
+ // Check auth failure mode
148
+ const authTests = toolTests.filter((t) => t.authFailureMode);
149
+ const authFailureMode = authTests.length > 0 ? authTests[0].authFailureMode : undefined;
150
+ return {
151
+ sampleEvidence,
152
+ confidenceBreakdown,
153
+ highestRiskCorrelation,
154
+ patternDistribution,
155
+ sanitizationDetected: sanitizationDetected || undefined,
156
+ authFailureMode,
157
+ };
158
+ }
159
+ // ============================================================================
160
+ // Tier 3: Tool Detail Enrichment Builder
161
+ // ============================================================================
162
+ /**
163
+ * Build Stage B enrichment for Tier 3 per-tool detail files.
164
+ *
165
+ * @param toolName - Name of the tool
166
+ * @param tests - Security test results for this tool
167
+ * @param annotationResult - Tool annotation result (if available)
168
+ * @param aupViolations - AUP violations for this tool (if any)
169
+ * @param maxCorrelations - Maximum correlations to include
170
+ * @returns Tool detail Stage B enrichment
171
+ */
172
+ export function buildToolDetailStageBEnrichment(toolName, tests, annotationResult, aupViolations, maxCorrelations = DEFAULT_TIER3_MAX_CORRELATIONS) {
173
+ // Filter to only tests for this tool
174
+ const toolTests = tests.filter((t) => t.toolName === toolName);
175
+ // Build payload correlations (prioritize vulnerable, then errors, then safe)
176
+ const sortedTests = [...toolTests].sort((a, b) => {
177
+ if (a.vulnerable && !b.vulnerable)
178
+ return -1;
179
+ if (!a.vulnerable && b.vulnerable)
180
+ return 1;
181
+ if (a.connectionError && !b.connectionError)
182
+ return -1;
183
+ if (!a.connectionError && b.connectionError)
184
+ return 1;
185
+ return 0;
186
+ });
187
+ const payloadCorrelations = sortedTests
188
+ .slice(0, maxCorrelations)
189
+ .map(testToCorrelation);
190
+ // Pattern distribution
191
+ const patternDistribution = calculatePatternDistribution(toolTests);
192
+ // Build context windows from evidence
193
+ const contextWindows = {};
194
+ for (const test of toolTests.filter((t) => t.vulnerable && t.evidence)) {
195
+ const key = `${test.testName}:${test.payload.slice(0, 30)}`;
196
+ if (!contextWindows[key]) {
197
+ contextWindows[key] = truncate(test.evidence, MAX_CONTEXT_WINDOW);
198
+ }
199
+ }
200
+ // Calculate confidence details
201
+ const confidenceBreakdown = calculateConfidenceBreakdown(toolTests);
202
+ const totalVulnerable = confidenceBreakdown.high +
203
+ confidenceBreakdown.medium +
204
+ confidenceBreakdown.low;
205
+ const overallConfidence = totalVulnerable > 0
206
+ ? Math.round(((confidenceBreakdown.high * 100 +
207
+ confidenceBreakdown.medium * 70 +
208
+ confidenceBreakdown.low * 40) /
209
+ totalVulnerable /
210
+ 100) *
211
+ 100)
212
+ : 100; // 100% confidence if no vulnerabilities
213
+ const confidenceDetails = {
214
+ overall: overallConfidence,
215
+ byCategory: patternDistribution,
216
+ requiresManualReview: toolTests.filter((t) => t.requiresManualReview)
217
+ .length,
218
+ };
219
+ // Security details
220
+ const vulnerableCount = toolTests.filter((t) => t.vulnerable).length;
221
+ const safeCount = toolTests.filter((t) => !t.vulnerable && !t.connectionError).length;
222
+ const errorCount = toolTests.filter((t) => t.connectionError).length;
223
+ // Collect sanitization libraries
224
+ const sanitizationLibraries = [
225
+ ...new Set(toolTests.flatMap((t) => t.sanitizationLibraries || []).filter(Boolean)),
226
+ ];
227
+ // Auth bypass evidence
228
+ const authBypassTest = toolTests.find((t) => t.authBypassDetected);
229
+ const authBypassEvidence = authBypassTest?.authBypassEvidence;
230
+ const securityDetails = {
231
+ vulnerableCount,
232
+ safeCount,
233
+ errorCount,
234
+ sanitizationLibraries,
235
+ authBypassEvidence,
236
+ };
237
+ // Annotation details
238
+ let annotationDetails;
239
+ if (annotationResult) {
240
+ annotationDetails = {
241
+ hasAnnotations: annotationResult.hasAnnotations,
242
+ alignmentStatus: annotationResult.alignmentStatus,
243
+ inferredBehavior: annotationResult.inferredBehavior
244
+ ? {
245
+ expectedReadOnly: annotationResult.inferredBehavior.expectedReadOnly,
246
+ expectedDestructive: annotationResult.inferredBehavior.expectedDestructive,
247
+ reason: annotationResult.inferredBehavior.reason,
248
+ }
249
+ : undefined,
250
+ descriptionPoisoning: annotationResult.descriptionPoisoning
251
+ ? {
252
+ detected: annotationResult.descriptionPoisoning.detected,
253
+ patterns: annotationResult.descriptionPoisoning.patterns.map((p) => ({
254
+ name: p.name,
255
+ evidence: truncate(p.evidence, MAX_CONTEXT_WINDOW),
256
+ severity: p.severity,
257
+ })),
258
+ }
259
+ : undefined,
260
+ };
261
+ }
262
+ // AUP violations for this tool
263
+ const toolAupViolations = aupViolations
264
+ ?.filter((v) => v.location?.includes(toolName))
265
+ .map((v) => ({
266
+ pattern: v.pattern,
267
+ matchedText: truncate(v.matchedText, MAX_CONTEXT_WINDOW),
268
+ severity: v.severity,
269
+ location: v.location,
270
+ }));
271
+ return {
272
+ payloadCorrelations,
273
+ patternDistribution,
274
+ contextWindows,
275
+ confidenceDetails,
276
+ securityDetails,
277
+ annotationDetails,
278
+ aupViolations: toolAupViolations && toolAupViolations.length > 0
279
+ ? toolAupViolations
280
+ : undefined,
281
+ };
282
+ }
@@ -0,0 +1,154 @@
1
+ /**
2
+ * Stage B Enrichment Types
3
+ *
4
+ * Type definitions for Stage B (Claude semantic analysis) data enrichment.
5
+ * These types extend the tiered output with evidence, correlations, and
6
+ * confidence details for better LLM semantic analysis.
7
+ *
8
+ * Issue #137: Stage A data enrichment for Stage B Claude analysis
9
+ *
10
+ * @module assessment/summarizer/stageBTypes
11
+ */
12
+ /**
13
+ * Evidence structure for individual findings.
14
+ * Provides raw data and context for Claude to analyze.
15
+ */
16
+ export interface FindingEvidence {
17
+ /** Actual data that triggered the finding (payload or matched text) */
18
+ raw: string;
19
+ /** Surrounding context for better understanding */
20
+ context: string;
21
+ /** Location in response (e.g., "response.content[0].text", "description") */
22
+ location: string;
23
+ }
24
+ /**
25
+ * Payload correlation linking input to output.
26
+ * Enables Claude to understand cause-effect relationships.
27
+ */
28
+ export interface PayloadCorrelation {
29
+ /** The test payload that was sent */
30
+ inputPayload: string;
31
+ /** The response received (may be truncated) */
32
+ outputResponse: string;
33
+ /** Classification of the result */
34
+ classification: "vulnerable" | "safe" | "error" | "timeout";
35
+ /** Patterns that matched this response */
36
+ matchedPatterns: string[];
37
+ /** Tool this correlation belongs to */
38
+ toolName: string;
39
+ /** Test name/pattern that triggered this */
40
+ testName: string;
41
+ /** Confidence level of the detection */
42
+ confidence?: "high" | "medium" | "low";
43
+ }
44
+ /**
45
+ * Stage B enrichment for Tier 2 tool summaries.
46
+ * Provides sampled evidence for quick Claude analysis.
47
+ */
48
+ export interface ToolSummaryStageBEnrichment {
49
+ /** Top evidence samples for this tool (limited for token efficiency) */
50
+ sampleEvidence: FindingEvidence[];
51
+ /** Confidence breakdown by pattern type */
52
+ confidenceBreakdown: {
53
+ high: number;
54
+ medium: number;
55
+ low: number;
56
+ };
57
+ /** Highest risk correlation for this tool (if vulnerable) */
58
+ highestRiskCorrelation?: PayloadCorrelation;
59
+ /** Pattern distribution showing which attack types were detected */
60
+ patternDistribution: Record<string, number>;
61
+ /** Whether this tool has sanitization detected */
62
+ sanitizationDetected?: boolean;
63
+ /** Auth bypass mode if detected */
64
+ authFailureMode?: "FAIL_OPEN" | "FAIL_CLOSED" | "UNKNOWN";
65
+ }
66
+ /**
67
+ * Stage B enrichment for Tier 3 per-tool detail files.
68
+ * Provides comprehensive evidence for deep-dive analysis.
69
+ */
70
+ export interface ToolDetailStageBEnrichment {
71
+ /** All payload correlations for this tool */
72
+ payloadCorrelations: PayloadCorrelation[];
73
+ /** Full pattern distribution with counts */
74
+ patternDistribution: Record<string, number>;
75
+ /** Context windows for key locations */
76
+ contextWindows: Record<string, string>;
77
+ /** Detailed confidence breakdown */
78
+ confidenceDetails: {
79
+ /** Overall confidence score (0-100) */
80
+ overall: number;
81
+ /** Confidence by attack category */
82
+ byCategory: Record<string, number>;
83
+ /** Number of tests with manual review recommended */
84
+ requiresManualReview: number;
85
+ };
86
+ /** Security-specific details */
87
+ securityDetails: {
88
+ /** Total vulnerabilities found */
89
+ vulnerableCount: number;
90
+ /** Total safe tests */
91
+ safeCount: number;
92
+ /** Tests with connection errors */
93
+ errorCount: number;
94
+ /** Sanitization libraries detected */
95
+ sanitizationLibraries: string[];
96
+ /** Auth bypass evidence if detected */
97
+ authBypassEvidence?: string;
98
+ };
99
+ /** Annotation alignment details (if available) */
100
+ annotationDetails?: {
101
+ /** Whether tool has annotations */
102
+ hasAnnotations: boolean;
103
+ /** Alignment status */
104
+ alignmentStatus?: "ALIGNED" | "MISALIGNED" | "MISSING";
105
+ /** Inferred behavior from patterns */
106
+ inferredBehavior?: {
107
+ expectedReadOnly: boolean;
108
+ expectedDestructive: boolean;
109
+ reason: string;
110
+ };
111
+ /** Description poisoning if detected */
112
+ descriptionPoisoning?: {
113
+ detected: boolean;
114
+ patterns: Array<{
115
+ name: string;
116
+ evidence: string;
117
+ severity: "LOW" | "MEDIUM" | "HIGH";
118
+ }>;
119
+ };
120
+ };
121
+ /** AUP violations for this tool (if any) */
122
+ aupViolations?: Array<{
123
+ pattern: string;
124
+ matchedText: string;
125
+ severity: string;
126
+ location: string;
127
+ }>;
128
+ }
129
+ /**
130
+ * Combined Stage B enrichment that can be attached to results.
131
+ */
132
+ export interface StageBEnrichment {
133
+ /** Enrichment version for compatibility tracking */
134
+ version: number;
135
+ /** Whether enrichment was enabled */
136
+ enabled: boolean;
137
+ /** Generation timestamp */
138
+ generatedAt: string;
139
+ /** Tier 2 enrichment (tool summary level) */
140
+ tier2?: ToolSummaryStageBEnrichment;
141
+ /** Tier 3 enrichment (tool detail level) */
142
+ tier3?: ToolDetailStageBEnrichment;
143
+ }
144
+ /** Current Stage B enrichment version */
145
+ export declare const STAGE_B_ENRICHMENT_VERSION = 1;
146
+ /** Default maximum samples for Tier 2 evidence */
147
+ export declare const DEFAULT_TIER2_MAX_SAMPLES = 3;
148
+ /** Default maximum correlations for Tier 3 */
149
+ export declare const DEFAULT_TIER3_MAX_CORRELATIONS = 50;
150
+ /** Maximum response length to include (prevents token explosion) */
151
+ export declare const MAX_RESPONSE_LENGTH = 500;
152
+ /** Maximum context window size (chars before/after) */
153
+ export declare const MAX_CONTEXT_WINDOW = 200;
154
+ //# sourceMappingURL=stageBTypes.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"stageBTypes.d.ts","sourceRoot":"","sources":["../../../../src/lib/assessment/summarizer/stageBTypes.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;GAUG;AAMH;;;GAGG;AACH,MAAM,WAAW,eAAe;IAC9B,uEAAuE;IACvE,GAAG,EAAE,MAAM,CAAC;IACZ,mDAAmD;IACnD,OAAO,EAAE,MAAM,CAAC;IAChB,6EAA6E;IAC7E,QAAQ,EAAE,MAAM,CAAC;CAClB;AAED;;;GAGG;AACH,MAAM,WAAW,kBAAkB;IACjC,qCAAqC;IACrC,YAAY,EAAE,MAAM,CAAC;IACrB,+CAA+C;IAC/C,cAAc,EAAE,MAAM,CAAC;IACvB,mCAAmC;IACnC,cAAc,EAAE,YAAY,GAAG,MAAM,GAAG,OAAO,GAAG,SAAS,CAAC;IAC5D,0CAA0C;IAC1C,eAAe,EAAE,MAAM,EAAE,CAAC;IAC1B,uCAAuC;IACvC,QAAQ,EAAE,MAAM,CAAC;IACjB,4CAA4C;IAC5C,QAAQ,EAAE,MAAM,CAAC;IACjB,wCAAwC;IACxC,UAAU,CAAC,EAAE,MAAM,GAAG,QAAQ,GAAG,KAAK,CAAC;CACxC;AAMD;;;GAGG;AACH,MAAM,WAAW,2BAA2B;IAC1C,wEAAwE;IACxE,cAAc,EAAE,eAAe,EAAE,CAAC;IAElC,2CAA2C;IAC3C,mBAAmB,EAAE;QACnB,IAAI,EAAE,MAAM,CAAC;QACb,MAAM,EAAE,MAAM,CAAC;QACf,GAAG,EAAE,MAAM,CAAC;KACb,CAAC;IAEF,6DAA6D;IAC7D,sBAAsB,CAAC,EAAE,kBAAkB,CAAC;IAE5C,oEAAoE;IACpE,mBAAmB,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAE5C,kDAAkD;IAClD,oBAAoB,CAAC,EAAE,OAAO,CAAC;IAE/B,mCAAmC;IACnC,eAAe,CAAC,EAAE,WAAW,GAAG,aAAa,GAAG,SAAS,CAAC;CAC3D;AAMD;;;GAGG;AACH,MAAM,WAAW,0BAA0B;IACzC,6CAA6C;IAC7C,mBAAmB,EAAE,kBAAkB,EAAE,CAAC;IAE1C,4CAA4C;IAC5C,mBAAmB,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAE5C,wCAAwC;IACxC,cAAc,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAEvC,oCAAoC;IACpC,iBAAiB,EAAE;QACjB,uCAAuC;QACvC,OAAO,EAAE,MAAM,CAAC;QAChB,oCAAoC;QACpC,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QACnC,qDAAqD;QACrD,oBAAoB,EAAE,MAAM,CAAC;KAC9B,CAAC;IAEF,gCAAgC;IAChC,eAAe,EAAE;QACf,kCAAkC;QAClC,eAAe,EAAE,MAAM,CAAC;QACxB,uBAAuB;QACvB,SAAS,EAAE,MAAM,CAAC;QAClB,mCAAmC;QACnC,UAAU,EAAE,MAAM,CAAC;QACnB,sCAAsC;QACtC,qBAAqB,EAAE,MAAM,EAAE,CAAC;QAChC,uCAAuC;QACvC,kBAAkB,CAAC,EAAE,MAAM,CAAC;KAC7B,CAAC;IAEF,kDAAkD;IAClD,iBAAiB,CAAC,EAAE;QAClB,mCAAmC;QACnC,cAAc,EAAE,OAAO,CAAC;QACxB,uBAAuB;QACvB,eAAe,CAAC,EAAE,SAAS,GAAG,YAAY,GAAG,SAAS,CAAC;QACvD,sCAAsC;QACtC,gBAAgB,CAAC,EAAE;YACjB,gBAAgB,EAAE,OAAO,CAAC;YAC1B,mBAAmB,EAAE,OAAO,CAAC;YAC7B,MAAM,EAAE,MAAM,CAAC;SAChB,CAAC;QACF,wCAAwC;QACxC,oBAAoB,CAAC,EAAE;YACrB,QAAQ,EAAE,OAAO,CAAC;YAClB,QAAQ,EAAE,KAAK,CAAC;gBACd,IAAI,EAAE,MAAM,CAAC;gBACb,QAAQ,EAAE,MAAM,CAAC;gBACjB,QAAQ,EAAE,KAAK,GAAG,QAAQ,GAAG,MAAM,CAAC;aACrC,CAAC,CAAC;SACJ,CAAC;KACH,CAAC;IAEF,4CAA4C;IAC5C,aAAa,CAAC,EAAE,KAAK,CAAC;QACpB,OAAO,EAAE,MAAM,CAAC;QAChB,WAAW,EAAE,MAAM,CAAC;QACpB,QAAQ,EAAE,MAAM,CAAC;QACjB,QAAQ,EAAE,MAAM,CAAC;KAClB,CAAC,CAAC;CACJ;AAMD;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAC/B,oDAAoD;IACpD,OAAO,EAAE,MAAM,CAAC;IAEhB,qCAAqC;IACrC,OAAO,EAAE,OAAO,CAAC;IAEjB,2BAA2B;IAC3B,WAAW,EAAE,MAAM,CAAC;IAEpB,6CAA6C;IAC7C,KAAK,CAAC,EAAE,2BAA2B,CAAC;IAEpC,4CAA4C;IAC5C,KAAK,CAAC,EAAE,0BAA0B,CAAC;CACpC;AAMD,yCAAyC;AACzC,eAAO,MAAM,0BAA0B,IAAI,CAAC;AAE5C,kDAAkD;AAClD,eAAO,MAAM,yBAAyB,IAAI,CAAC;AAE3C,8CAA8C;AAC9C,eAAO,MAAM,8BAA8B,KAAK,CAAC;AAEjD,oEAAoE;AACpE,eAAO,MAAM,mBAAmB,MAAM,CAAC;AAEvC,uDAAuD;AACvD,eAAO,MAAM,kBAAkB,MAAM,CAAC"}
@@ -0,0 +1,24 @@
1
+ /**
2
+ * Stage B Enrichment Types
3
+ *
4
+ * Type definitions for Stage B (Claude semantic analysis) data enrichment.
5
+ * These types extend the tiered output with evidence, correlations, and
6
+ * confidence details for better LLM semantic analysis.
7
+ *
8
+ * Issue #137: Stage A data enrichment for Stage B Claude analysis
9
+ *
10
+ * @module assessment/summarizer/stageBTypes
11
+ */
12
+ // ============================================================================
13
+ // Constants
14
+ // ============================================================================
15
+ /** Current Stage B enrichment version */
16
+ export const STAGE_B_ENRICHMENT_VERSION = 1;
17
+ /** Default maximum samples for Tier 2 evidence */
18
+ export const DEFAULT_TIER2_MAX_SAMPLES = 3;
19
+ /** Default maximum correlations for Tier 3 */
20
+ export const DEFAULT_TIER3_MAX_CORRELATIONS = 50;
21
+ /** Maximum response length to include (prevents token explosion) */
22
+ export const MAX_RESPONSE_LENGTH = 500;
23
+ /** Maximum context window size (chars before/after) */
24
+ export const MAX_CONTEXT_WINDOW = 200;
@@ -0,0 +1,103 @@
1
+ /**
2
+ * Token Estimation Utilities
3
+ *
4
+ * Provides token counting and threshold detection for tiered output strategy.
5
+ * Uses industry-standard approximation of ~4 characters per token.
6
+ *
7
+ * Issue #136: Tiered output strategy for large assessments
8
+ *
9
+ * @module assessment/summarizer/tokenEstimator
10
+ */
11
+ import type { MCPDirectoryAssessment } from "../resultTypes.js";
12
+ /**
13
+ * Estimate the number of tokens for any content.
14
+ *
15
+ * Uses the industry-standard approximation of ~4 characters per token.
16
+ * For JSON content, applies a buffer for formatting overhead.
17
+ *
18
+ * @param content - Content to estimate (string, object, or array)
19
+ * @returns Estimated token count
20
+ *
21
+ * @example
22
+ * ```typescript
23
+ * // String content
24
+ * estimateTokens("Hello world"); // ~3 tokens
25
+ *
26
+ * // Object content (will be JSON stringified)
27
+ * estimateTokens({ name: "test", value: 123 }); // ~10 tokens
28
+ *
29
+ * // Large assessment results
30
+ * estimateTokens(assessmentResults); // ~50,000+ tokens
31
+ * ```
32
+ */
33
+ export declare function estimateTokens(content: unknown): number;
34
+ /**
35
+ * Estimate tokens for a JSON file that would be written.
36
+ * Accounts for pretty-printing with indent=2.
37
+ *
38
+ * @param content - Content that would be JSON.stringify'd
39
+ * @returns Estimated token count
40
+ */
41
+ export declare function estimateJsonFileTokens(content: unknown): number;
42
+ /**
43
+ * Determine if assessment results should automatically use tiered output.
44
+ *
45
+ * Returns true when estimated token count exceeds the threshold,
46
+ * indicating the full output would not fit in typical LLM context windows.
47
+ *
48
+ * @param results - Full assessment results
49
+ * @param threshold - Token threshold (default: 100,000)
50
+ * @returns true if results should be tiered
51
+ *
52
+ * @example
53
+ * ```typescript
54
+ * const results = await runAssessment(server);
55
+ *
56
+ * if (shouldAutoTier(results)) {
57
+ * // Use tiered output
58
+ * saveTieredResults(serverName, results, options);
59
+ * } else {
60
+ * // Use standard full output
61
+ * saveResults(serverName, results, options);
62
+ * }
63
+ * ```
64
+ */
65
+ export declare function shouldAutoTier(results: MCPDirectoryAssessment, threshold?: number): boolean;
66
+ /**
67
+ * Get a human-readable token estimate with size category.
68
+ *
69
+ * @param tokenCount - Number of tokens
70
+ * @returns Object with formatted token count and size category
71
+ *
72
+ * @example
73
+ * ```typescript
74
+ * formatTokenEstimate(5000);
75
+ * // { tokens: "5,000", category: "small", fitsContext: true }
76
+ *
77
+ * formatTokenEstimate(500000);
78
+ * // { tokens: "500,000", category: "very-large", fitsContext: false }
79
+ * ```
80
+ */
81
+ export declare function formatTokenEstimate(tokenCount: number): {
82
+ tokens: string;
83
+ category: "small" | "medium" | "large" | "very-large" | "oversized";
84
+ fitsContext: boolean;
85
+ recommendation: string;
86
+ };
87
+ /**
88
+ * Estimate tokens for each major section of assessment results.
89
+ * Useful for understanding which modules contribute most to output size.
90
+ *
91
+ * @param results - Assessment results to analyze
92
+ * @returns Map of section name to estimated token count
93
+ */
94
+ export declare function estimateSectionTokens(results: MCPDirectoryAssessment): Record<string, number>;
95
+ /**
96
+ * Get the top N largest sections by token count.
97
+ *
98
+ * @param results - Assessment results
99
+ * @param topN - Number of sections to return (default: 5)
100
+ * @returns Array of [sectionName, tokenCount] sorted by size descending
101
+ */
102
+ export declare function getTopSections(results: MCPDirectoryAssessment, topN?: number): Array<[string, number]>;
103
+ //# sourceMappingURL=tokenEstimator.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"tokenEstimator.d.ts","sourceRoot":"","sources":["../../../../src/lib/assessment/summarizer/tokenEstimator.ts"],"names":[],"mappings":"AAAA;;;;;;;;;GASG;AAEH,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,gBAAgB,CAAC;AAuB7D;;;;;;;;;;;;;;;;;;;;GAoBG;AACH,wBAAgB,cAAc,CAAC,OAAO,EAAE,OAAO,GAAG,MAAM,CAmBvD;AAED;;;;;;GAMG;AACH,wBAAgB,sBAAsB,CAAC,OAAO,EAAE,OAAO,GAAG,MAAM,CAW/D;AAED;;;;;;;;;;;;;;;;;;;;;;GAsBG;AACH,wBAAgB,cAAc,CAC5B,OAAO,EAAE,sBAAsB,EAC/B,SAAS,GAAE,MAAoD,GAC9D,OAAO,CAGT;AAED;;;;;;;;;;;;;;GAcG;AACH,wBAAgB,mBAAmB,CAAC,UAAU,EAAE,MAAM,GAAG;IACvD,MAAM,EAAE,MAAM,CAAC;IACf,QAAQ,EAAE,OAAO,GAAG,QAAQ,GAAG,OAAO,GAAG,YAAY,GAAG,WAAW,CAAC;IACpE,WAAW,EAAE,OAAO,CAAC;IACrB,cAAc,EAAE,MAAM,CAAC;CACxB,CA8BA;AAED;;;;;;GAMG;AACH,wBAAgB,qBAAqB,CACnC,OAAO,EAAE,sBAAsB,GAC9B,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CA8CxB;AAED;;;;;;GAMG;AACH,wBAAgB,cAAc,CAC5B,OAAO,EAAE,sBAAsB,EAC/B,IAAI,GAAE,MAAU,GACf,KAAK,CAAC,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,CAOzB"}