@harness-engineering/core 0.7.0 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/index.d.mts +2023 -365
- package/dist/index.d.ts +2023 -365
- package/dist/index.js +4785 -271
- package/dist/index.mjs +4708 -280
- package/package.json +4 -3
package/dist/index.d.mts
CHANGED
|
@@ -1,7 +1,245 @@
|
|
|
1
|
-
import { Result, WorkflowStep, WorkflowStepResult, Workflow, WorkflowResult, SkillLifecycleHooks, SkillContext, SkillResult, TurnContext, CICheckName, CIFailOnSeverity, CICheckReport } from '@harness-engineering/types';
|
|
1
|
+
import { Result, WorkflowStep, WorkflowStepResult, Workflow, WorkflowResult, SkillLifecycleHooks, SkillContext, SkillResult, TurnContext, CICheckName, CIFailOnSeverity, CICheckReport, Roadmap, FeatureStatus } from '@harness-engineering/types';
|
|
2
2
|
export * from '@harness-engineering/types';
|
|
3
3
|
import { z } from 'zod';
|
|
4
4
|
|
|
5
|
+
type ErrorCode = string;
|
|
6
|
+
interface BaseError {
|
|
7
|
+
code: ErrorCode;
|
|
8
|
+
message: string;
|
|
9
|
+
details: Record<string, unknown>;
|
|
10
|
+
suggestions: string[];
|
|
11
|
+
}
|
|
12
|
+
interface ValidationError extends BaseError {
|
|
13
|
+
code: 'INVALID_TYPE' | 'MISSING_FIELD' | 'VALIDATION_FAILED' | 'PARSE_ERROR';
|
|
14
|
+
}
|
|
15
|
+
interface ContextError extends BaseError {
|
|
16
|
+
code: 'PARSE_ERROR' | 'SCHEMA_VIOLATION' | 'MISSING_SECTION' | 'BROKEN_LINK';
|
|
17
|
+
}
|
|
18
|
+
interface ConstraintError extends BaseError {
|
|
19
|
+
code: 'WRONG_LAYER' | 'CIRCULAR_DEP' | 'FORBIDDEN_IMPORT' | 'BOUNDARY_ERROR' | 'PARSER_UNAVAILABLE';
|
|
20
|
+
}
|
|
21
|
+
interface EntropyError extends BaseError {
|
|
22
|
+
code: 'SNAPSHOT_BUILD_FAILED' | 'PARSE_ERROR' | 'ENTRY_POINT_NOT_FOUND' | 'INVALID_CONFIG' | 'CONFIG_VALIDATION_ERROR' | 'FIX_FAILED' | 'BACKUP_FAILED';
|
|
23
|
+
details: {
|
|
24
|
+
file?: string;
|
|
25
|
+
reason?: string;
|
|
26
|
+
issues?: unknown[];
|
|
27
|
+
originalError?: Error;
|
|
28
|
+
};
|
|
29
|
+
}
|
|
30
|
+
interface FeedbackError$1 extends BaseError {
|
|
31
|
+
code: 'AGENT_SPAWN_ERROR' | 'AGENT_TIMEOUT' | 'TELEMETRY_ERROR' | 'TELEMETRY_UNAVAILABLE' | 'REVIEW_ERROR' | 'DIFF_PARSE_ERROR' | 'SINK_ERROR';
|
|
32
|
+
details: {
|
|
33
|
+
agentId?: string;
|
|
34
|
+
service?: string;
|
|
35
|
+
reason?: string;
|
|
36
|
+
originalError?: Error;
|
|
37
|
+
};
|
|
38
|
+
}
|
|
39
|
+
declare function createError<T extends BaseError>(code: T['code'], message: string, details?: Record<string, unknown>, suggestions?: string[]): T;
|
|
40
|
+
|
|
41
|
+
interface Convention {
|
|
42
|
+
pattern: string;
|
|
43
|
+
required: boolean;
|
|
44
|
+
description: string;
|
|
45
|
+
examples: string[];
|
|
46
|
+
}
|
|
47
|
+
interface StructureValidation {
|
|
48
|
+
valid: boolean;
|
|
49
|
+
missing: string[];
|
|
50
|
+
unexpected: string[];
|
|
51
|
+
conformance: number;
|
|
52
|
+
}
|
|
53
|
+
interface ConfigError extends ValidationError {
|
|
54
|
+
code: 'INVALID_TYPE' | 'MISSING_FIELD' | 'VALIDATION_FAILED';
|
|
55
|
+
details: {
|
|
56
|
+
zodError?: unknown;
|
|
57
|
+
path?: string[];
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
type CommitFormat = 'conventional' | 'angular' | 'custom';
|
|
61
|
+
interface CommitValidation {
|
|
62
|
+
valid: boolean;
|
|
63
|
+
type?: string;
|
|
64
|
+
scope?: string;
|
|
65
|
+
breaking: boolean;
|
|
66
|
+
issues: string[];
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
declare function validateFileStructure(projectPath: string, conventions: Convention[]): Promise<Result<StructureValidation, ValidationError>>;
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Validates configuration data against a Zod schema
|
|
73
|
+
* Returns a Result type with validated data or ConfigError
|
|
74
|
+
*
|
|
75
|
+
* @template T - The type of data being validated
|
|
76
|
+
* @param data - The configuration data to validate
|
|
77
|
+
* @param schema - Zod schema to validate against
|
|
78
|
+
* @returns Result<T, ConfigError> - Success with validated data or error
|
|
79
|
+
*/
|
|
80
|
+
declare function validateConfig<T>(data: unknown, schema: z.ZodSchema<T>): Result<T, ConfigError>;
|
|
81
|
+
|
|
82
|
+
/**
|
|
83
|
+
* Validates a commit message according to the specified format
|
|
84
|
+
* Returns a Result type with validation details
|
|
85
|
+
*
|
|
86
|
+
* @param message - The commit message to validate
|
|
87
|
+
* @param format - The commit format to validate against ('conventional', 'angular', 'custom')
|
|
88
|
+
* @returns Result<CommitValidation, ValidationError> - Success with validation details or error
|
|
89
|
+
*/
|
|
90
|
+
declare function validateCommitMessage(message: string, format?: CommitFormat): Result<CommitValidation, ValidationError>;
|
|
91
|
+
|
|
92
|
+
interface AgentMapLink {
|
|
93
|
+
text: string;
|
|
94
|
+
path: string;
|
|
95
|
+
exists: boolean;
|
|
96
|
+
line: number;
|
|
97
|
+
error?: ContextError;
|
|
98
|
+
}
|
|
99
|
+
interface AgentMapSection {
|
|
100
|
+
title: string;
|
|
101
|
+
level: number;
|
|
102
|
+
links: AgentMapLink[];
|
|
103
|
+
description?: string;
|
|
104
|
+
line: number;
|
|
105
|
+
}
|
|
106
|
+
interface AgentMapValidation {
|
|
107
|
+
valid: boolean;
|
|
108
|
+
sections: AgentMapSection[];
|
|
109
|
+
totalLinks: number;
|
|
110
|
+
brokenLinks: AgentMapLink[];
|
|
111
|
+
missingSections: string[];
|
|
112
|
+
errors?: ContextError[];
|
|
113
|
+
}
|
|
114
|
+
interface DocumentationGap {
|
|
115
|
+
file: string;
|
|
116
|
+
suggestedSection: string;
|
|
117
|
+
importance: 'high' | 'medium' | 'low';
|
|
118
|
+
}
|
|
119
|
+
interface CoverageReport {
|
|
120
|
+
domain: string;
|
|
121
|
+
documented: string[];
|
|
122
|
+
undocumented: string[];
|
|
123
|
+
coveragePercentage: number;
|
|
124
|
+
gaps: DocumentationGap[];
|
|
125
|
+
}
|
|
126
|
+
interface GraphCoverageData {
|
|
127
|
+
documented: string[];
|
|
128
|
+
undocumented: string[];
|
|
129
|
+
coveragePercentage: number;
|
|
130
|
+
}
|
|
131
|
+
interface CoverageOptions {
|
|
132
|
+
docsDir?: string;
|
|
133
|
+
sourceDir?: string;
|
|
134
|
+
excludePatterns?: string[];
|
|
135
|
+
graphCoverage?: GraphCoverageData;
|
|
136
|
+
}
|
|
137
|
+
interface BrokenLink {
|
|
138
|
+
text: string;
|
|
139
|
+
path: string;
|
|
140
|
+
line: number;
|
|
141
|
+
section: string;
|
|
142
|
+
reason: 'NOT_FOUND' | 'PERMISSION_DENIED' | 'INVALID_PATH';
|
|
143
|
+
suggestion: string;
|
|
144
|
+
}
|
|
145
|
+
interface IntegrityReport {
|
|
146
|
+
totalLinks: number;
|
|
147
|
+
brokenLinks: BrokenLink[];
|
|
148
|
+
validLinks: number;
|
|
149
|
+
integrity: number;
|
|
150
|
+
}
|
|
151
|
+
interface GenerationSection {
|
|
152
|
+
name: string;
|
|
153
|
+
pattern: string;
|
|
154
|
+
description: string;
|
|
155
|
+
}
|
|
156
|
+
interface AgentsMapConfig {
|
|
157
|
+
rootDir: string;
|
|
158
|
+
includePaths: string[];
|
|
159
|
+
excludePaths: string[];
|
|
160
|
+
template?: string;
|
|
161
|
+
sections?: GenerationSection[];
|
|
162
|
+
}
|
|
163
|
+
declare const REQUIRED_SECTIONS: readonly ["Project Overview", "Repository Structure", "Development Workflow"];
|
|
164
|
+
|
|
165
|
+
interface ExtractedLink {
|
|
166
|
+
text: string;
|
|
167
|
+
path: string;
|
|
168
|
+
line: number;
|
|
169
|
+
}
|
|
170
|
+
/**
|
|
171
|
+
* Extract markdown links from content
|
|
172
|
+
* Pattern: [text](path)
|
|
173
|
+
*/
|
|
174
|
+
declare function extractMarkdownLinks(content: string): ExtractedLink[];
|
|
175
|
+
/**
|
|
176
|
+
* Extract sections from markdown content
|
|
177
|
+
* Pattern: # Heading or ## Heading etc.
|
|
178
|
+
*/
|
|
179
|
+
declare function extractSections(content: string): AgentMapSection[];
|
|
180
|
+
/**
|
|
181
|
+
* Validate an AGENTS.md file
|
|
182
|
+
* - Parses sections and links
|
|
183
|
+
* - Checks for required sections
|
|
184
|
+
* - Verifies all links point to existing files
|
|
185
|
+
*/
|
|
186
|
+
declare function validateAgentsMap(path?: string): Promise<Result<AgentMapValidation, ContextError>>;
|
|
187
|
+
|
|
188
|
+
/**
|
|
189
|
+
* Check documentation coverage for a domain
|
|
190
|
+
*/
|
|
191
|
+
declare function checkDocCoverage(domain: string, options?: CoverageOptions): Promise<Result<CoverageReport, ContextError>>;
|
|
192
|
+
|
|
193
|
+
/**
|
|
194
|
+
* Validate knowledge map integrity (all links in AGENTS.md and docs)
|
|
195
|
+
*/
|
|
196
|
+
declare function validateKnowledgeMap(rootDir?: string): Promise<Result<IntegrityReport, ContextError>>;
|
|
197
|
+
|
|
198
|
+
/**
|
|
199
|
+
* Generate AGENTS.md content from project structure
|
|
200
|
+
*/
|
|
201
|
+
declare function generateAgentsMap(config: AgentsMapConfig, graphSections?: Array<{
|
|
202
|
+
name: string;
|
|
203
|
+
files: string[];
|
|
204
|
+
description?: string;
|
|
205
|
+
}>): Promise<Result<string, ContextError>>;
|
|
206
|
+
|
|
207
|
+
interface TokenBudget {
|
|
208
|
+
total: number;
|
|
209
|
+
systemPrompt: number;
|
|
210
|
+
projectManifest: number;
|
|
211
|
+
taskSpec: number;
|
|
212
|
+
activeCode: number;
|
|
213
|
+
interfaces: number;
|
|
214
|
+
reserve: number;
|
|
215
|
+
}
|
|
216
|
+
interface TokenBudgetOverrides {
|
|
217
|
+
systemPrompt?: number;
|
|
218
|
+
projectManifest?: number;
|
|
219
|
+
taskSpec?: number;
|
|
220
|
+
activeCode?: number;
|
|
221
|
+
interfaces?: number;
|
|
222
|
+
reserve?: number;
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
declare function contextBudget(totalTokens: number, overrides?: TokenBudgetOverrides, graphDensity?: Record<string, number>): TokenBudget;
|
|
226
|
+
|
|
227
|
+
type WorkflowPhase = 'implement' | 'review' | 'debug' | 'plan';
|
|
228
|
+
interface FileCategory {
|
|
229
|
+
category: string;
|
|
230
|
+
patterns: string[];
|
|
231
|
+
priority: number;
|
|
232
|
+
}
|
|
233
|
+
interface ContextFilterResult {
|
|
234
|
+
phase: WorkflowPhase;
|
|
235
|
+
includedCategories: string[];
|
|
236
|
+
excludedCategories: string[];
|
|
237
|
+
filePatterns: string[];
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
declare function contextFilter(phase: WorkflowPhase, maxCategories?: number, graphFilePaths?: string[]): ContextFilterResult;
|
|
241
|
+
declare function getPhaseCategories(phase: WorkflowPhase): FileCategory[];
|
|
242
|
+
|
|
5
243
|
/**
|
|
6
244
|
* Abstract Syntax Tree representation
|
|
7
245
|
*/
|
|
@@ -95,6 +333,7 @@ interface LayerConfig {
|
|
|
95
333
|
rootDir: string;
|
|
96
334
|
parser: LanguageParser;
|
|
97
335
|
fallbackBehavior?: 'skip' | 'error' | 'warn';
|
|
336
|
+
graphDependencyData?: GraphDependencyData;
|
|
98
337
|
}
|
|
99
338
|
interface DependencyEdge {
|
|
100
339
|
from: string;
|
|
@@ -154,26 +393,67 @@ interface BoundaryValidator<T> {
|
|
|
154
393
|
validate(input: unknown): Result<boolean, ConstraintError>;
|
|
155
394
|
schema: z.ZodSchema<T>;
|
|
156
395
|
}
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
type: 'function' | 'class' | 'variable' | 'type';
|
|
170
|
-
line: number;
|
|
171
|
-
references: number;
|
|
172
|
-
calledBy: string[];
|
|
396
|
+
/**
|
|
397
|
+
* Pre-computed dependency data from graph — avoids file parsing.
|
|
398
|
+
* Compatible with DependencyGraph shape.
|
|
399
|
+
*/
|
|
400
|
+
interface GraphDependencyData {
|
|
401
|
+
nodes: string[];
|
|
402
|
+
edges: Array<{
|
|
403
|
+
from: string;
|
|
404
|
+
to: string;
|
|
405
|
+
importType: 'static' | 'dynamic' | 'type-only';
|
|
406
|
+
line: number;
|
|
407
|
+
}>;
|
|
173
408
|
}
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
409
|
+
|
|
410
|
+
/**
|
|
411
|
+
* Create a layer definition
|
|
412
|
+
*/
|
|
413
|
+
declare function defineLayer(name: string, patterns: string[], allowedDependencies: string[]): Layer;
|
|
414
|
+
/**
|
|
415
|
+
* Resolve a file path to its layer
|
|
416
|
+
*/
|
|
417
|
+
declare function resolveFileToLayer(file: string, layers: Layer[]): Layer | undefined;
|
|
418
|
+
|
|
419
|
+
/**
|
|
420
|
+
* Build a dependency graph from a list of files
|
|
421
|
+
* Note: buildDependencyGraph is exported as an addition beyond spec for advanced use cases
|
|
422
|
+
*/
|
|
423
|
+
declare function buildDependencyGraph(files: string[], parser: LanguageParser, graphDependencyData?: GraphDependencyData): Promise<Result<DependencyGraph, ConstraintError>>;
|
|
424
|
+
/**
|
|
425
|
+
* Validate dependencies against layer rules
|
|
426
|
+
*/
|
|
427
|
+
declare function validateDependencies(config: LayerConfig): Promise<Result<DependencyValidation, ConstraintError>>;
|
|
428
|
+
|
|
429
|
+
/**
|
|
430
|
+
* Detect circular dependencies in a dependency graph
|
|
431
|
+
*/
|
|
432
|
+
declare function detectCircularDeps(graph: DependencyGraph): Result<CircularDepsResult, ConstraintError>;
|
|
433
|
+
/**
|
|
434
|
+
* Detect circular dependencies from a list of files
|
|
435
|
+
*/
|
|
436
|
+
declare function detectCircularDepsInFiles(files: string[], parser: LanguageParser, graphDependencyData?: GraphDependencyData): Promise<Result<CircularDepsResult, ConstraintError>>;
|
|
437
|
+
|
|
438
|
+
/**
|
|
439
|
+
* Create a boundary validator from a Zod schema
|
|
440
|
+
*/
|
|
441
|
+
declare function createBoundaryValidator<T>(schema: z.ZodSchema<T>, name: string): BoundaryValidator<T>;
|
|
442
|
+
/**
|
|
443
|
+
* Validate multiple boundaries at once
|
|
444
|
+
*/
|
|
445
|
+
declare function validateBoundaries(boundaries: BoundaryDefinition[], data: Map<string, unknown>): Result<BoundaryValidation, ConstraintError>;
|
|
446
|
+
|
|
447
|
+
interface InternalSymbol {
|
|
448
|
+
name: string;
|
|
449
|
+
type: 'function' | 'class' | 'variable' | 'type';
|
|
450
|
+
line: number;
|
|
451
|
+
references: number;
|
|
452
|
+
calledBy: string[];
|
|
453
|
+
}
|
|
454
|
+
interface JSDocComment {
|
|
455
|
+
content: string;
|
|
456
|
+
line: number;
|
|
177
457
|
associatedSymbol?: string;
|
|
178
458
|
}
|
|
179
459
|
interface CodeBlock {
|
|
@@ -249,6 +529,9 @@ interface EntropyConfig {
|
|
|
249
529
|
drift?: boolean | Partial<DriftConfig>;
|
|
250
530
|
deadCode?: boolean | Partial<DeadCodeConfig>;
|
|
251
531
|
patterns?: boolean | PatternConfig;
|
|
532
|
+
complexity?: boolean | Partial<ComplexityConfig>;
|
|
533
|
+
coupling?: boolean | Partial<CouplingConfig>;
|
|
534
|
+
sizeBudget?: boolean | Partial<SizeBudgetConfig>;
|
|
252
535
|
};
|
|
253
536
|
include?: string[];
|
|
254
537
|
exclude?: string[];
|
|
@@ -404,7 +687,116 @@ interface PatternReport {
|
|
|
404
687
|
};
|
|
405
688
|
passRate: number;
|
|
406
689
|
}
|
|
407
|
-
|
|
690
|
+
interface ComplexityThresholds {
|
|
691
|
+
cyclomaticComplexity?: {
|
|
692
|
+
error?: number;
|
|
693
|
+
warn?: number;
|
|
694
|
+
};
|
|
695
|
+
nestingDepth?: {
|
|
696
|
+
warn?: number;
|
|
697
|
+
};
|
|
698
|
+
functionLength?: {
|
|
699
|
+
warn?: number;
|
|
700
|
+
};
|
|
701
|
+
parameterCount?: {
|
|
702
|
+
warn?: number;
|
|
703
|
+
};
|
|
704
|
+
fileLength?: {
|
|
705
|
+
info?: number;
|
|
706
|
+
};
|
|
707
|
+
hotspotPercentile?: {
|
|
708
|
+
error?: number;
|
|
709
|
+
};
|
|
710
|
+
}
|
|
711
|
+
interface ComplexityConfig {
|
|
712
|
+
enabled?: boolean;
|
|
713
|
+
thresholds?: ComplexityThresholds;
|
|
714
|
+
}
|
|
715
|
+
interface ComplexityViolation {
|
|
716
|
+
file: string;
|
|
717
|
+
function: string;
|
|
718
|
+
line: number;
|
|
719
|
+
metric: 'cyclomaticComplexity' | 'nestingDepth' | 'functionLength' | 'parameterCount' | 'fileLength' | 'hotspotScore';
|
|
720
|
+
value: number;
|
|
721
|
+
threshold: number;
|
|
722
|
+
tier: 1 | 2 | 3;
|
|
723
|
+
severity: 'error' | 'warning' | 'info';
|
|
724
|
+
message?: string;
|
|
725
|
+
}
|
|
726
|
+
interface ComplexityReport {
|
|
727
|
+
violations: ComplexityViolation[];
|
|
728
|
+
stats: {
|
|
729
|
+
filesAnalyzed: number;
|
|
730
|
+
functionsAnalyzed: number;
|
|
731
|
+
violationCount: number;
|
|
732
|
+
errorCount: number;
|
|
733
|
+
warningCount: number;
|
|
734
|
+
infoCount: number;
|
|
735
|
+
};
|
|
736
|
+
}
|
|
737
|
+
interface CouplingThresholds {
|
|
738
|
+
fanOut?: {
|
|
739
|
+
warn?: number;
|
|
740
|
+
};
|
|
741
|
+
fanIn?: {
|
|
742
|
+
info?: number;
|
|
743
|
+
};
|
|
744
|
+
couplingRatio?: {
|
|
745
|
+
warn?: number;
|
|
746
|
+
};
|
|
747
|
+
transitiveDependencyDepth?: {
|
|
748
|
+
info?: number;
|
|
749
|
+
};
|
|
750
|
+
}
|
|
751
|
+
interface CouplingConfig {
|
|
752
|
+
enabled?: boolean;
|
|
753
|
+
thresholds?: CouplingThresholds;
|
|
754
|
+
}
|
|
755
|
+
interface CouplingViolation {
|
|
756
|
+
file: string;
|
|
757
|
+
metric: 'fanOut' | 'fanIn' | 'couplingRatio' | 'transitiveDependencyDepth';
|
|
758
|
+
value: number;
|
|
759
|
+
threshold: number;
|
|
760
|
+
tier: 1 | 2 | 3;
|
|
761
|
+
severity: 'error' | 'warning' | 'info';
|
|
762
|
+
message?: string;
|
|
763
|
+
}
|
|
764
|
+
interface CouplingReport {
|
|
765
|
+
violations: CouplingViolation[];
|
|
766
|
+
stats: {
|
|
767
|
+
filesAnalyzed: number;
|
|
768
|
+
violationCount: number;
|
|
769
|
+
warningCount: number;
|
|
770
|
+
infoCount: number;
|
|
771
|
+
};
|
|
772
|
+
}
|
|
773
|
+
interface SizeBudgetConfig {
|
|
774
|
+
enabled?: boolean;
|
|
775
|
+
budgets: Record<string, {
|
|
776
|
+
warn?: string;
|
|
777
|
+
}>;
|
|
778
|
+
dependencyWeight?: {
|
|
779
|
+
info?: string;
|
|
780
|
+
};
|
|
781
|
+
}
|
|
782
|
+
interface SizeBudgetViolation {
|
|
783
|
+
package: string;
|
|
784
|
+
currentSize: number;
|
|
785
|
+
budgetSize: number;
|
|
786
|
+
unit: 'bytes';
|
|
787
|
+
tier: 2 | 3;
|
|
788
|
+
severity: 'warning' | 'info';
|
|
789
|
+
}
|
|
790
|
+
interface SizeBudgetReport {
|
|
791
|
+
violations: SizeBudgetViolation[];
|
|
792
|
+
stats: {
|
|
793
|
+
packagesChecked: number;
|
|
794
|
+
violationCount: number;
|
|
795
|
+
warningCount: number;
|
|
796
|
+
infoCount: number;
|
|
797
|
+
};
|
|
798
|
+
}
|
|
799
|
+
type FixType = 'unused-imports' | 'dead-files' | 'dead-exports' | 'commented-code' | 'orphaned-deps' | 'forbidden-import-replacement' | 'import-ordering' | 'trailing-whitespace' | 'broken-links' | 'sort-imports';
|
|
408
800
|
interface FixConfig {
|
|
409
801
|
dryRun: boolean;
|
|
410
802
|
fixTypes: FixType[];
|
|
@@ -436,6 +828,24 @@ interface FixResult {
|
|
|
436
828
|
backupPath?: string;
|
|
437
829
|
};
|
|
438
830
|
}
|
|
831
|
+
type SafetyLevel = 'safe' | 'probably-safe' | 'unsafe';
|
|
832
|
+
interface CleanupFinding {
|
|
833
|
+
id: string;
|
|
834
|
+
concern: 'dead-code' | 'architecture';
|
|
835
|
+
file: string;
|
|
836
|
+
line?: number;
|
|
837
|
+
type: string;
|
|
838
|
+
description: string;
|
|
839
|
+
safety: SafetyLevel;
|
|
840
|
+
safetyReason: string;
|
|
841
|
+
hotspotDowngraded: boolean;
|
|
842
|
+
fixAction?: string;
|
|
843
|
+
suggestion: string;
|
|
844
|
+
}
|
|
845
|
+
interface HotspotContext {
|
|
846
|
+
churnMap: Map<string, number>;
|
|
847
|
+
topPercentileThreshold: number;
|
|
848
|
+
}
|
|
439
849
|
interface Suggestion {
|
|
440
850
|
type: 'rename' | 'move' | 'merge' | 'split' | 'delete' | 'update-docs' | 'add-export' | 'refactor';
|
|
441
851
|
priority: 'high' | 'medium' | 'low';
|
|
@@ -461,7 +871,7 @@ interface SuggestionReport {
|
|
|
461
871
|
estimatedEffort: 'trivial' | 'small' | 'medium' | 'large';
|
|
462
872
|
}
|
|
463
873
|
interface AnalysisError {
|
|
464
|
-
analyzer: 'drift' | 'deadCode' | 'patterns';
|
|
874
|
+
analyzer: 'drift' | 'deadCode' | 'patterns' | 'complexity' | 'coupling' | 'sizeBudget';
|
|
465
875
|
error: EntropyError;
|
|
466
876
|
}
|
|
467
877
|
interface EntropyReport {
|
|
@@ -469,6 +879,9 @@ interface EntropyReport {
|
|
|
469
879
|
drift?: DriftReport;
|
|
470
880
|
deadCode?: DeadCodeReport;
|
|
471
881
|
patterns?: PatternReport;
|
|
882
|
+
complexity?: ComplexityReport;
|
|
883
|
+
coupling?: CouplingReport;
|
|
884
|
+
sizeBudget?: SizeBudgetReport;
|
|
472
885
|
analysisErrors: AnalysisError[];
|
|
473
886
|
summary: {
|
|
474
887
|
totalIssues: number;
|
|
@@ -481,347 +894,245 @@ interface EntropyReport {
|
|
|
481
894
|
duration: number;
|
|
482
895
|
}
|
|
483
896
|
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
897
|
+
/**
|
|
898
|
+
* Main entropy analysis orchestrator
|
|
899
|
+
*/
|
|
900
|
+
declare class EntropyAnalyzer {
|
|
901
|
+
private config;
|
|
902
|
+
private snapshot?;
|
|
903
|
+
private report?;
|
|
904
|
+
constructor(config: EntropyConfig);
|
|
905
|
+
/**
|
|
906
|
+
* Run full entropy analysis.
|
|
907
|
+
* When graphOptions is provided, passes graph data to drift and dead code detectors
|
|
908
|
+
* for graph-enhanced analysis instead of snapshot-based analysis.
|
|
909
|
+
*/
|
|
910
|
+
analyze(graphOptions?: {
|
|
911
|
+
graphDriftData?: {
|
|
912
|
+
staleEdges: Array<{
|
|
913
|
+
docNodeId: string;
|
|
914
|
+
codeNodeId: string;
|
|
915
|
+
edgeType: string;
|
|
916
|
+
}>;
|
|
917
|
+
missingTargets: string[];
|
|
918
|
+
};
|
|
919
|
+
graphDeadCodeData?: {
|
|
920
|
+
reachableNodeIds: Set<string> | string[];
|
|
921
|
+
unreachableNodes: Array<{
|
|
922
|
+
id: string;
|
|
923
|
+
type: string;
|
|
924
|
+
name: string;
|
|
925
|
+
path?: string;
|
|
926
|
+
}>;
|
|
927
|
+
};
|
|
928
|
+
graphComplexityData?: {
|
|
929
|
+
hotspots: Array<{
|
|
930
|
+
file: string;
|
|
931
|
+
function: string;
|
|
932
|
+
hotspotScore: number;
|
|
933
|
+
}>;
|
|
934
|
+
percentile95Score: number;
|
|
935
|
+
};
|
|
936
|
+
graphCouplingData?: {
|
|
937
|
+
files: Array<{
|
|
938
|
+
file: string;
|
|
939
|
+
fanIn: number;
|
|
940
|
+
fanOut: number;
|
|
941
|
+
couplingRatio: number;
|
|
942
|
+
transitiveDepth: number;
|
|
943
|
+
}>;
|
|
944
|
+
};
|
|
945
|
+
}): Promise<Result<EntropyReport, EntropyError>>;
|
|
946
|
+
/**
|
|
947
|
+
* Get the built snapshot (must call analyze first)
|
|
948
|
+
*/
|
|
949
|
+
getSnapshot(): CodebaseSnapshot | undefined;
|
|
950
|
+
/**
|
|
951
|
+
* Get the last report (must call analyze first)
|
|
952
|
+
*/
|
|
953
|
+
getReport(): EntropyReport | undefined;
|
|
954
|
+
/**
|
|
955
|
+
* Generate suggestions from the last analysis
|
|
956
|
+
*/
|
|
957
|
+
getSuggestions(): SuggestionReport;
|
|
958
|
+
/**
|
|
959
|
+
* Build snapshot without running analysis
|
|
960
|
+
*/
|
|
961
|
+
buildSnapshot(): Promise<Result<CodebaseSnapshot, EntropyError>>;
|
|
962
|
+
/**
|
|
963
|
+
* Ensure snapshot is built, returning the snapshot or an error
|
|
964
|
+
*/
|
|
965
|
+
private ensureSnapshot;
|
|
966
|
+
/**
|
|
967
|
+
* Run drift detection only (snapshot must be built first)
|
|
968
|
+
*/
|
|
969
|
+
detectDrift(config?: Partial<DriftConfig>, graphDriftData?: {
|
|
970
|
+
staleEdges: Array<{
|
|
971
|
+
docNodeId: string;
|
|
972
|
+
codeNodeId: string;
|
|
973
|
+
edgeType: string;
|
|
974
|
+
}>;
|
|
975
|
+
missingTargets: string[];
|
|
976
|
+
}): Promise<Result<DriftReport, EntropyError>>;
|
|
977
|
+
/**
|
|
978
|
+
* Run dead code detection only (snapshot must be built first)
|
|
979
|
+
*/
|
|
980
|
+
detectDeadCode(graphDeadCodeData?: {
|
|
981
|
+
reachableNodeIds: Set<string> | string[];
|
|
982
|
+
unreachableNodes: Array<{
|
|
983
|
+
id: string;
|
|
984
|
+
type: string;
|
|
985
|
+
name: string;
|
|
986
|
+
path?: string;
|
|
987
|
+
}>;
|
|
988
|
+
}): Promise<Result<DeadCodeReport, EntropyError>>;
|
|
989
|
+
/**
|
|
990
|
+
* Run pattern detection only (snapshot must be built first)
|
|
991
|
+
*/
|
|
992
|
+
detectPatterns(config: PatternConfig): Promise<Result<PatternReport, EntropyError>>;
|
|
499
993
|
}
|
|
500
994
|
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
service?: string;
|
|
506
|
-
reason?: string;
|
|
507
|
-
originalError?: Error;
|
|
508
|
-
};
|
|
509
|
-
}
|
|
510
|
-
declare function createError<T extends BaseError>(code: T['code'], message: string, details?: Record<string, unknown>, suggestions?: string[]): T;
|
|
995
|
+
/**
|
|
996
|
+
* Build a complete CodebaseSnapshot
|
|
997
|
+
*/
|
|
998
|
+
declare function buildSnapshot(config: EntropyConfig): Promise<Result<CodebaseSnapshot, EntropyError>>;
|
|
511
999
|
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
}
|
|
524
|
-
interface ConfigError extends ValidationError {
|
|
525
|
-
code: 'INVALID_TYPE' | 'MISSING_FIELD' | 'VALIDATION_FAILED';
|
|
526
|
-
details: {
|
|
527
|
-
zodError?: unknown;
|
|
528
|
-
path?: string[];
|
|
529
|
-
};
|
|
530
|
-
}
|
|
531
|
-
type CommitFormat = 'conventional' | 'angular' | 'custom';
|
|
532
|
-
interface CommitValidation {
|
|
533
|
-
valid: boolean;
|
|
534
|
-
type?: string;
|
|
535
|
-
scope?: string;
|
|
536
|
-
breaking: boolean;
|
|
537
|
-
issues: string[];
|
|
538
|
-
}
|
|
1000
|
+
/**
|
|
1001
|
+
* Detect documentation drift in a codebase.
|
|
1002
|
+
* When graphDriftData is provided, uses graph-derived edges instead of snapshot-based analysis.
|
|
1003
|
+
*/
|
|
1004
|
+
declare function detectDocDrift(snapshot: CodebaseSnapshot, config?: Partial<DriftConfig>, graphDriftData?: {
|
|
1005
|
+
staleEdges: Array<{
|
|
1006
|
+
docNodeId: string;
|
|
1007
|
+
codeNodeId: string;
|
|
1008
|
+
edgeType: string;
|
|
1009
|
+
}>;
|
|
1010
|
+
missingTargets: string[];
|
|
1011
|
+
}): Promise<Result<DriftReport, EntropyError>>;
|
|
539
1012
|
|
|
540
|
-
|
|
1013
|
+
/**
|
|
1014
|
+
* Detect dead code in a codebase snapshot.
|
|
1015
|
+
* Analyzes exports, files, imports, and internal symbols to find unused code.
|
|
1016
|
+
* When graphDeadCodeData is provided, uses graph-derived reachability instead of snapshot-based BFS.
|
|
1017
|
+
*/
|
|
1018
|
+
declare function detectDeadCode(snapshot: CodebaseSnapshot, graphDeadCodeData?: {
|
|
1019
|
+
reachableNodeIds: Set<string> | string[];
|
|
1020
|
+
unreachableNodes: Array<{
|
|
1021
|
+
id: string;
|
|
1022
|
+
type: string;
|
|
1023
|
+
name: string;
|
|
1024
|
+
path?: string;
|
|
1025
|
+
}>;
|
|
1026
|
+
}): Promise<Result<DeadCodeReport, EntropyError>>;
|
|
541
1027
|
|
|
542
1028
|
/**
|
|
543
|
-
*
|
|
544
|
-
* Returns a Result type with validated data or ConfigError
|
|
545
|
-
*
|
|
546
|
-
* @template T - The type of data being validated
|
|
547
|
-
* @param data - The configuration data to validate
|
|
548
|
-
* @param schema - Zod schema to validate against
|
|
549
|
-
* @returns Result<T, ConfigError> - Success with validated data or error
|
|
1029
|
+
* Detect pattern violations across a codebase
|
|
550
1030
|
*/
|
|
551
|
-
declare function
|
|
1031
|
+
declare function detectPatternViolations(snapshot: CodebaseSnapshot, config?: PatternConfig): Promise<Result<PatternReport, EntropyError>>;
|
|
552
1032
|
|
|
1033
|
+
interface GraphComplexityData {
|
|
1034
|
+
hotspots: Array<{
|
|
1035
|
+
file: string;
|
|
1036
|
+
function: string;
|
|
1037
|
+
hotspotScore: number;
|
|
1038
|
+
}>;
|
|
1039
|
+
percentile95Score: number;
|
|
1040
|
+
}
|
|
553
1041
|
/**
|
|
554
|
-
*
|
|
555
|
-
* Returns a Result type with validation details
|
|
556
|
-
*
|
|
557
|
-
* @param message - The commit message to validate
|
|
558
|
-
* @param format - The commit format to validate against ('conventional', 'angular', 'custom')
|
|
559
|
-
* @returns Result<CommitValidation, ValidationError> - Success with validation details or error
|
|
1042
|
+
* Detect complexity violations across a codebase snapshot.
|
|
560
1043
|
*/
|
|
561
|
-
declare function
|
|
1044
|
+
declare function detectComplexityViolations(snapshot: CodebaseSnapshot, config?: ComplexityConfig, graphData?: GraphComplexityData): Promise<Result<ComplexityReport, EntropyError>>;
|
|
562
1045
|
|
|
563
|
-
interface
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
1046
|
+
interface GraphCouplingData {
|
|
1047
|
+
files: Array<{
|
|
1048
|
+
file: string;
|
|
1049
|
+
fanIn: number;
|
|
1050
|
+
fanOut: number;
|
|
1051
|
+
couplingRatio: number;
|
|
1052
|
+
transitiveDepth: number;
|
|
1053
|
+
}>;
|
|
569
1054
|
}
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
1055
|
+
declare function detectCouplingViolations(snapshot: CodebaseSnapshot, config?: Partial<CouplingConfig>, graphData?: GraphCouplingData): Promise<Result<CouplingReport, EntropyError>>;
|
|
1056
|
+
|
|
1057
|
+
/**
|
|
1058
|
+
* Parse a human-readable size string into bytes.
|
|
1059
|
+
* Supports: "100KB", "1MB", "500", "1GB".
|
|
1060
|
+
*/
|
|
1061
|
+
declare function parseSize(size: string): number;
|
|
1062
|
+
/**
|
|
1063
|
+
* Detect size budget violations for configured packages.
|
|
1064
|
+
*/
|
|
1065
|
+
declare function detectSizeBudgetViolations(rootDir: string, config?: Partial<SizeBudgetConfig>): Promise<Result<SizeBudgetReport, EntropyError>>;
|
|
1066
|
+
|
|
1067
|
+
interface CommentedCodeBlock {
|
|
1068
|
+
file: string;
|
|
1069
|
+
startLine: number;
|
|
1070
|
+
endLine: number;
|
|
1071
|
+
content: string;
|
|
576
1072
|
}
|
|
577
|
-
interface AgentMapValidation {
|
|
578
|
-
valid: boolean;
|
|
579
|
-
sections: AgentMapSection[];
|
|
580
|
-
totalLinks: number;
|
|
581
|
-
brokenLinks: AgentMapLink[];
|
|
582
|
-
missingSections: string[];
|
|
583
|
-
errors?: ContextError[];
|
|
584
|
-
}
|
|
585
|
-
interface DocumentationGap {
|
|
586
|
-
file: string;
|
|
587
|
-
suggestedSection: string;
|
|
588
|
-
importance: 'high' | 'medium' | 'low';
|
|
589
|
-
}
|
|
590
|
-
interface CoverageReport {
|
|
591
|
-
domain: string;
|
|
592
|
-
documented: string[];
|
|
593
|
-
undocumented: string[];
|
|
594
|
-
coveragePercentage: number;
|
|
595
|
-
gaps: DocumentationGap[];
|
|
596
|
-
}
|
|
597
|
-
interface CoverageOptions {
|
|
598
|
-
docsDir?: string;
|
|
599
|
-
sourceDir?: string;
|
|
600
|
-
excludePatterns?: string[];
|
|
601
|
-
}
|
|
602
|
-
interface BrokenLink {
|
|
603
|
-
text: string;
|
|
604
|
-
path: string;
|
|
605
|
-
line: number;
|
|
606
|
-
section: string;
|
|
607
|
-
reason: 'NOT_FOUND' | 'PERMISSION_DENIED' | 'INVALID_PATH';
|
|
608
|
-
suggestion: string;
|
|
609
|
-
}
|
|
610
|
-
interface IntegrityReport {
|
|
611
|
-
totalLinks: number;
|
|
612
|
-
brokenLinks: BrokenLink[];
|
|
613
|
-
validLinks: number;
|
|
614
|
-
integrity: number;
|
|
615
|
-
}
|
|
616
|
-
interface GenerationSection {
|
|
617
|
-
name: string;
|
|
618
|
-
pattern: string;
|
|
619
|
-
description: string;
|
|
620
|
-
}
|
|
621
|
-
interface AgentsMapConfig {
|
|
622
|
-
rootDir: string;
|
|
623
|
-
includePaths: string[];
|
|
624
|
-
excludePaths: string[];
|
|
625
|
-
template?: string;
|
|
626
|
-
sections?: GenerationSection[];
|
|
627
|
-
}
|
|
628
|
-
declare const REQUIRED_SECTIONS: readonly ["Project Overview", "Repository Structure", "Development Workflow"];
|
|
629
|
-
|
|
630
|
-
interface ExtractedLink {
|
|
631
|
-
text: string;
|
|
632
|
-
path: string;
|
|
633
|
-
line: number;
|
|
634
|
-
}
|
|
635
|
-
/**
|
|
636
|
-
* Extract markdown links from content
|
|
637
|
-
* Pattern: [text](path)
|
|
638
|
-
*/
|
|
639
|
-
declare function extractMarkdownLinks(content: string): ExtractedLink[];
|
|
640
|
-
/**
|
|
641
|
-
* Extract sections from markdown content
|
|
642
|
-
* Pattern: # Heading or ## Heading etc.
|
|
643
|
-
*/
|
|
644
|
-
declare function extractSections(content: string): AgentMapSection[];
|
|
645
|
-
/**
|
|
646
|
-
* Validate an AGENTS.md file
|
|
647
|
-
* - Parses sections and links
|
|
648
|
-
* - Checks for required sections
|
|
649
|
-
* - Verifies all links point to existing files
|
|
650
|
-
*/
|
|
651
|
-
declare function validateAgentsMap(path?: string): Promise<Result<AgentMapValidation, ContextError>>;
|
|
652
|
-
|
|
653
|
-
/**
|
|
654
|
-
* Check documentation coverage for a domain
|
|
655
|
-
*/
|
|
656
|
-
declare function checkDocCoverage(domain: string, options?: CoverageOptions): Promise<Result<CoverageReport, ContextError>>;
|
|
657
|
-
|
|
658
|
-
/**
|
|
659
|
-
* Validate knowledge map integrity (all links in AGENTS.md and docs)
|
|
660
|
-
*/
|
|
661
|
-
declare function validateKnowledgeMap(rootDir?: string): Promise<Result<IntegrityReport, ContextError>>;
|
|
662
|
-
|
|
663
1073
|
/**
|
|
664
|
-
*
|
|
1074
|
+
* Create fixes for commented-out code blocks
|
|
665
1075
|
*/
|
|
666
|
-
declare function
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
projectManifest: number;
|
|
672
|
-
taskSpec: number;
|
|
673
|
-
activeCode: number;
|
|
674
|
-
interfaces: number;
|
|
675
|
-
reserve: number;
|
|
676
|
-
}
|
|
677
|
-
interface TokenBudgetOverrides {
|
|
678
|
-
systemPrompt?: number;
|
|
679
|
-
projectManifest?: number;
|
|
680
|
-
taskSpec?: number;
|
|
681
|
-
activeCode?: number;
|
|
682
|
-
interfaces?: number;
|
|
683
|
-
reserve?: number;
|
|
684
|
-
}
|
|
685
|
-
|
|
686
|
-
declare function contextBudget(totalTokens: number, overrides?: TokenBudgetOverrides): TokenBudget;
|
|
687
|
-
|
|
688
|
-
type WorkflowPhase = 'implement' | 'review' | 'debug' | 'plan';
|
|
689
|
-
interface FileCategory {
|
|
690
|
-
category: string;
|
|
691
|
-
patterns: string[];
|
|
692
|
-
priority: number;
|
|
693
|
-
}
|
|
694
|
-
interface ContextFilterResult {
|
|
695
|
-
phase: WorkflowPhase;
|
|
696
|
-
includedCategories: string[];
|
|
697
|
-
excludedCategories: string[];
|
|
698
|
-
filePatterns: string[];
|
|
1076
|
+
declare function createCommentedCodeFixes(blocks: CommentedCodeBlock[]): Fix[];
|
|
1077
|
+
interface OrphanedDep {
|
|
1078
|
+
name: string;
|
|
1079
|
+
packageJsonPath: string;
|
|
1080
|
+
depType: 'dependencies' | 'devDependencies';
|
|
699
1081
|
}
|
|
700
|
-
|
|
701
|
-
declare function contextFilter(phase: WorkflowPhase, maxCategories?: number): ContextFilterResult;
|
|
702
|
-
declare function getPhaseCategories(phase: WorkflowPhase): FileCategory[];
|
|
703
|
-
|
|
704
|
-
/**
|
|
705
|
-
* Create a layer definition
|
|
706
|
-
*/
|
|
707
|
-
declare function defineLayer(name: string, patterns: string[], allowedDependencies: string[]): Layer;
|
|
708
|
-
/**
|
|
709
|
-
* Resolve a file path to its layer
|
|
710
|
-
*/
|
|
711
|
-
declare function resolveFileToLayer(file: string, layers: Layer[]): Layer | undefined;
|
|
712
|
-
|
|
713
1082
|
/**
|
|
714
|
-
*
|
|
715
|
-
* Note: buildDependencyGraph is exported as an addition beyond spec for advanced use cases
|
|
1083
|
+
* Create fixes for orphaned npm dependencies
|
|
716
1084
|
*/
|
|
717
|
-
declare function
|
|
1085
|
+
declare function createOrphanedDepFixes(deps: OrphanedDep[]): Fix[];
|
|
718
1086
|
/**
|
|
719
|
-
*
|
|
1087
|
+
* Create fixes from dead code report
|
|
720
1088
|
*/
|
|
721
|
-
declare function
|
|
722
|
-
|
|
1089
|
+
declare function createFixes(deadCodeReport: DeadCodeReport, config?: Partial<FixConfig>): Fix[];
|
|
723
1090
|
/**
|
|
724
|
-
*
|
|
1091
|
+
* Preview what a fix would do
|
|
725
1092
|
*/
|
|
726
|
-
declare function
|
|
1093
|
+
declare function previewFix(fix: Fix): string;
|
|
727
1094
|
/**
|
|
728
|
-
*
|
|
1095
|
+
* Apply fixes to codebase
|
|
729
1096
|
*/
|
|
730
|
-
declare function
|
|
1097
|
+
declare function applyFixes(fixes: Fix[], config?: Partial<FixConfig>): Promise<Result<FixResult, EntropyError>>;
|
|
731
1098
|
|
|
732
1099
|
/**
|
|
733
|
-
*
|
|
734
|
-
*/
|
|
735
|
-
declare function createBoundaryValidator<T>(schema: z.ZodSchema<T>, name: string): BoundaryValidator<T>;
|
|
736
|
-
/**
|
|
737
|
-
* Validate multiple boundaries at once
|
|
1100
|
+
* Generate all suggestions from analysis reports
|
|
738
1101
|
*/
|
|
739
|
-
declare function
|
|
1102
|
+
declare function generateSuggestions(deadCode?: DeadCodeReport, drift?: DriftReport, patterns?: PatternReport): SuggestionReport;
|
|
740
1103
|
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
private snapshot?;
|
|
747
|
-
private report?;
|
|
748
|
-
constructor(config: EntropyConfig);
|
|
749
|
-
/**
|
|
750
|
-
* Run full entropy analysis
|
|
751
|
-
*/
|
|
752
|
-
analyze(): Promise<Result<EntropyReport, EntropyError>>;
|
|
753
|
-
/**
|
|
754
|
-
* Get the built snapshot (must call analyze first)
|
|
755
|
-
*/
|
|
756
|
-
getSnapshot(): CodebaseSnapshot | undefined;
|
|
757
|
-
/**
|
|
758
|
-
* Get the last report (must call analyze first)
|
|
759
|
-
*/
|
|
760
|
-
getReport(): EntropyReport | undefined;
|
|
761
|
-
/**
|
|
762
|
-
* Generate suggestions from the last analysis
|
|
763
|
-
*/
|
|
764
|
-
getSuggestions(): SuggestionReport;
|
|
765
|
-
/**
|
|
766
|
-
* Build snapshot without running analysis
|
|
767
|
-
*/
|
|
768
|
-
buildSnapshot(): Promise<Result<CodebaseSnapshot, EntropyError>>;
|
|
769
|
-
/**
|
|
770
|
-
* Ensure snapshot is built, returning the snapshot or an error
|
|
771
|
-
*/
|
|
772
|
-
private ensureSnapshot;
|
|
773
|
-
/**
|
|
774
|
-
* Run drift detection only (snapshot must be built first)
|
|
775
|
-
*/
|
|
776
|
-
detectDrift(config?: Partial<DriftConfig>): Promise<Result<DriftReport, EntropyError>>;
|
|
777
|
-
/**
|
|
778
|
-
* Run dead code detection only (snapshot must be built first)
|
|
779
|
-
*/
|
|
780
|
-
detectDeadCode(): Promise<Result<DeadCodeReport, EntropyError>>;
|
|
781
|
-
/**
|
|
782
|
-
* Run pattern detection only (snapshot must be built first)
|
|
783
|
-
*/
|
|
784
|
-
detectPatterns(config: PatternConfig): Promise<Result<PatternReport, EntropyError>>;
|
|
1104
|
+
interface ForbiddenImportViolation {
|
|
1105
|
+
file: string;
|
|
1106
|
+
line: number;
|
|
1107
|
+
forbiddenImport: string;
|
|
1108
|
+
alternative?: string;
|
|
785
1109
|
}
|
|
786
|
-
|
|
787
|
-
/**
|
|
788
|
-
* Build a complete CodebaseSnapshot
|
|
789
|
-
*/
|
|
790
|
-
declare function buildSnapshot(config: EntropyConfig): Promise<Result<CodebaseSnapshot, EntropyError>>;
|
|
791
|
-
|
|
792
|
-
/**
|
|
793
|
-
* Detect documentation drift in a codebase
|
|
794
|
-
*/
|
|
795
|
-
declare function detectDocDrift(snapshot: CodebaseSnapshot, config?: Partial<DriftConfig>): Promise<Result<DriftReport, EntropyError>>;
|
|
796
|
-
|
|
797
|
-
/**
|
|
798
|
-
* Detect dead code in a codebase snapshot.
|
|
799
|
-
* Analyzes exports, files, imports, and internal symbols to find unused code.
|
|
800
|
-
*/
|
|
801
|
-
declare function detectDeadCode(snapshot: CodebaseSnapshot): Promise<Result<DeadCodeReport, EntropyError>>;
|
|
802
|
-
|
|
803
1110
|
/**
|
|
804
|
-
*
|
|
1111
|
+
* Create fixes for forbidden imports that have a configured alternative
|
|
805
1112
|
*/
|
|
806
|
-
declare function
|
|
1113
|
+
declare function createForbiddenImportFixes(violations: ForbiddenImportViolation[]): Fix[];
|
|
807
1114
|
|
|
1115
|
+
interface FindingInput {
|
|
1116
|
+
concern: 'dead-code' | 'architecture';
|
|
1117
|
+
file: string;
|
|
1118
|
+
line?: number;
|
|
1119
|
+
type: string;
|
|
1120
|
+
description: string;
|
|
1121
|
+
isPublicApi?: boolean;
|
|
1122
|
+
hasAlternative?: boolean;
|
|
1123
|
+
}
|
|
808
1124
|
/**
|
|
809
|
-
*
|
|
810
|
-
*/
|
|
811
|
-
declare function createFixes(deadCodeReport: DeadCodeReport, config?: Partial<FixConfig>): Fix[];
|
|
812
|
-
/**
|
|
813
|
-
* Preview what a fix would do
|
|
1125
|
+
* Classify a raw finding into a CleanupFinding with safety level
|
|
814
1126
|
*/
|
|
815
|
-
declare function
|
|
1127
|
+
declare function classifyFinding(input: FindingInput): CleanupFinding;
|
|
816
1128
|
/**
|
|
817
|
-
*
|
|
1129
|
+
* Downgrade safety for findings in high-churn files
|
|
818
1130
|
*/
|
|
819
|
-
declare function
|
|
820
|
-
|
|
1131
|
+
declare function applyHotspotDowngrade(finding: CleanupFinding, hotspot: HotspotContext): CleanupFinding;
|
|
821
1132
|
/**
|
|
822
|
-
*
|
|
1133
|
+
* Deduplicate cross-concern findings (e.g., dead import + forbidden import on same line)
|
|
823
1134
|
*/
|
|
824
|
-
declare function
|
|
1135
|
+
declare function deduplicateCleanupFindings(findings: CleanupFinding[]): CleanupFinding[];
|
|
825
1136
|
|
|
826
1137
|
declare const PatternConfigSchema: z.ZodObject<{
|
|
827
1138
|
patterns: z.ZodArray<z.ZodObject<{
|
|
@@ -1618,30 +1929,160 @@ declare const EntropyConfigSchema: z.ZodObject<{
|
|
|
1618
1929
|
*/
|
|
1619
1930
|
declare function validatePatternConfig(config: unknown): Result<PatternConfig, EntropyError>;
|
|
1620
1931
|
|
|
1621
|
-
interface
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
1932
|
+
interface BenchmarkResult {
|
|
1933
|
+
name: string;
|
|
1934
|
+
file: string;
|
|
1935
|
+
opsPerSec: number;
|
|
1936
|
+
meanMs: number;
|
|
1937
|
+
p99Ms: number;
|
|
1938
|
+
marginOfError: number;
|
|
1939
|
+
}
|
|
1940
|
+
interface Baseline {
|
|
1941
|
+
opsPerSec: number;
|
|
1942
|
+
meanMs: number;
|
|
1943
|
+
p99Ms: number;
|
|
1944
|
+
marginOfError: number;
|
|
1945
|
+
}
|
|
1946
|
+
interface BaselinesFile {
|
|
1947
|
+
version: 1;
|
|
1948
|
+
updatedAt: string;
|
|
1949
|
+
updatedFrom: string;
|
|
1950
|
+
benchmarks: Record<string, Baseline>;
|
|
1951
|
+
}
|
|
1952
|
+
interface RegressionResult {
|
|
1953
|
+
benchmark: string;
|
|
1954
|
+
current: BenchmarkResult;
|
|
1955
|
+
baseline: Baseline;
|
|
1956
|
+
regressionPct: number;
|
|
1957
|
+
isCriticalPath: boolean;
|
|
1958
|
+
tier: 1 | 2 | 3;
|
|
1959
|
+
severity: 'error' | 'warning' | 'info';
|
|
1960
|
+
withinNoise: boolean;
|
|
1961
|
+
}
|
|
1962
|
+
interface RegressionReport {
|
|
1963
|
+
regressions: RegressionResult[];
|
|
1964
|
+
improvements: Array<{
|
|
1965
|
+
benchmark: string;
|
|
1966
|
+
improvementPct: number;
|
|
1967
|
+
}>;
|
|
1968
|
+
stats: {
|
|
1969
|
+
benchmarksCompared: number;
|
|
1970
|
+
regressionCount: number;
|
|
1971
|
+
improvementCount: number;
|
|
1972
|
+
newBenchmarks: number;
|
|
1628
1973
|
};
|
|
1629
1974
|
}
|
|
1630
|
-
interface
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
severity: 'error' | 'warning' | 'info';
|
|
1636
|
-
details: string;
|
|
1637
|
-
suggestion?: string;
|
|
1638
|
-
file?: string;
|
|
1639
|
-
line?: number;
|
|
1975
|
+
interface CriticalPathEntry {
|
|
1976
|
+
file: string;
|
|
1977
|
+
function: string;
|
|
1978
|
+
source: 'annotation' | 'graph-inferred';
|
|
1979
|
+
fanIn?: number;
|
|
1640
1980
|
}
|
|
1641
|
-
interface
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1981
|
+
interface CriticalPathSet {
|
|
1982
|
+
entries: CriticalPathEntry[];
|
|
1983
|
+
stats: {
|
|
1984
|
+
annotated: number;
|
|
1985
|
+
graphInferred: number;
|
|
1986
|
+
total: number;
|
|
1987
|
+
};
|
|
1988
|
+
}
|
|
1989
|
+
|
|
1990
|
+
/**
|
|
1991
|
+
* Manages performance baselines stored on disk.
|
|
1992
|
+
*
|
|
1993
|
+
* Baselines are stored at `.harness/perf/baselines.json` relative to the project root.
|
|
1994
|
+
* Each benchmark is keyed by `${file}::${name}`.
|
|
1995
|
+
*/
|
|
1996
|
+
declare class BaselineManager {
|
|
1997
|
+
private readonly baselinesPath;
|
|
1998
|
+
constructor(projectRoot: string);
|
|
1999
|
+
/**
|
|
2000
|
+
* Load the baselines file from disk.
|
|
2001
|
+
* Returns null if the file does not exist or contains invalid JSON.
|
|
2002
|
+
*/
|
|
2003
|
+
load(): BaselinesFile | null;
|
|
2004
|
+
/**
|
|
2005
|
+
* Save benchmark results to disk, merging with any existing baselines.
|
|
2006
|
+
* Each result is keyed by `${file}::${name}`.
|
|
2007
|
+
*/
|
|
2008
|
+
save(results: BenchmarkResult[], commitHash: string): void;
|
|
2009
|
+
/**
|
|
2010
|
+
* Remove baselines whose file prefix does not match any of the given bench files.
|
|
2011
|
+
* This cleans up entries for deleted benchmark files.
|
|
2012
|
+
*/
|
|
2013
|
+
prune(existingBenchFiles: string[]): void;
|
|
2014
|
+
}
|
|
2015
|
+
|
|
2016
|
+
interface BenchmarkRunOptions {
|
|
2017
|
+
glob?: string;
|
|
2018
|
+
cwd?: string;
|
|
2019
|
+
timeout?: number;
|
|
2020
|
+
}
|
|
2021
|
+
declare class BenchmarkRunner {
|
|
2022
|
+
/**
|
|
2023
|
+
* Discover .bench.ts files matching the glob pattern.
|
|
2024
|
+
*/
|
|
2025
|
+
discover(cwd: string, glob?: string): string[];
|
|
2026
|
+
/**
|
|
2027
|
+
* Run benchmarks via vitest bench and capture results.
|
|
2028
|
+
* Returns parsed BenchmarkResult[] from vitest bench JSON output.
|
|
2029
|
+
*/
|
|
2030
|
+
run(options?: BenchmarkRunOptions): Promise<{
|
|
2031
|
+
results: BenchmarkResult[];
|
|
2032
|
+
rawOutput: string;
|
|
2033
|
+
success: boolean;
|
|
2034
|
+
}>;
|
|
2035
|
+
/**
|
|
2036
|
+
* Parse vitest bench JSON reporter output into BenchmarkResult[].
|
|
2037
|
+
* Vitest bench JSON output contains testResults with benchmark data.
|
|
2038
|
+
*/
|
|
2039
|
+
parseVitestBenchOutput(output: string): BenchmarkResult[];
|
|
2040
|
+
}
|
|
2041
|
+
|
|
2042
|
+
declare class RegressionDetector {
|
|
2043
|
+
detect(results: BenchmarkResult[], baselines: Record<string, Baseline>, criticalPaths: CriticalPathSet): RegressionReport;
|
|
2044
|
+
}
|
|
2045
|
+
|
|
2046
|
+
interface GraphCriticalPathData {
|
|
2047
|
+
highFanInFunctions: Array<{
|
|
2048
|
+
file: string;
|
|
2049
|
+
function: string;
|
|
2050
|
+
fanIn: number;
|
|
2051
|
+
}>;
|
|
2052
|
+
}
|
|
2053
|
+
declare class CriticalPathResolver {
|
|
2054
|
+
private readonly projectRoot;
|
|
2055
|
+
constructor(projectRoot: string);
|
|
2056
|
+
resolve(graphData?: GraphCriticalPathData): Promise<CriticalPathSet>;
|
|
2057
|
+
private scanAnnotations;
|
|
2058
|
+
private walkDir;
|
|
2059
|
+
private scanFile;
|
|
2060
|
+
}
|
|
2061
|
+
|
|
2062
|
+
interface FeedbackError extends BaseError {
|
|
2063
|
+
code: 'AGENT_SPAWN_ERROR' | 'AGENT_TIMEOUT' | 'TELEMETRY_ERROR' | 'TELEMETRY_UNAVAILABLE' | 'REVIEW_ERROR' | 'DIFF_PARSE_ERROR' | 'SINK_ERROR';
|
|
2064
|
+
details: {
|
|
2065
|
+
agentId?: string;
|
|
2066
|
+
service?: string;
|
|
2067
|
+
reason?: string;
|
|
2068
|
+
originalError?: Error;
|
|
2069
|
+
};
|
|
2070
|
+
}
|
|
2071
|
+
interface ReviewItem {
|
|
2072
|
+
id: string;
|
|
2073
|
+
category: 'harness' | 'custom' | 'diff';
|
|
2074
|
+
check: string;
|
|
2075
|
+
passed: boolean;
|
|
2076
|
+
severity: 'error' | 'warning' | 'info';
|
|
2077
|
+
details: string;
|
|
2078
|
+
suggestion?: string;
|
|
2079
|
+
file?: string;
|
|
2080
|
+
line?: number;
|
|
2081
|
+
}
|
|
2082
|
+
interface ReviewChecklist {
|
|
2083
|
+
items: ReviewItem[];
|
|
2084
|
+
passed: boolean;
|
|
2085
|
+
summary: {
|
|
1645
2086
|
total: number;
|
|
1646
2087
|
passed: number;
|
|
1647
2088
|
failed: number;
|
|
@@ -1859,6 +2300,31 @@ interface ActionSink {
|
|
|
1859
2300
|
flush?(): Promise<Result<void, FeedbackError>>;
|
|
1860
2301
|
close?(): Promise<void>;
|
|
1861
2302
|
}
|
|
2303
|
+
/**
|
|
2304
|
+
* Pre-computed impact data from graph — enriches diff analysis.
|
|
2305
|
+
*/
|
|
2306
|
+
interface GraphImpactData {
|
|
2307
|
+
affectedTests: Array<{
|
|
2308
|
+
testFile: string;
|
|
2309
|
+
coversFile: string;
|
|
2310
|
+
}>;
|
|
2311
|
+
affectedDocs: Array<{
|
|
2312
|
+
docFile: string;
|
|
2313
|
+
documentsFile: string;
|
|
2314
|
+
}>;
|
|
2315
|
+
impactScope: number;
|
|
2316
|
+
}
|
|
2317
|
+
/**
|
|
2318
|
+
* Pre-computed harness check data from graph — replaces placeholders.
|
|
2319
|
+
*/
|
|
2320
|
+
interface GraphHarnessCheckData {
|
|
2321
|
+
graphExists: boolean;
|
|
2322
|
+
nodeCount: number;
|
|
2323
|
+
edgeCount: number;
|
|
2324
|
+
constraintViolations: number;
|
|
2325
|
+
undocumentedFiles: number;
|
|
2326
|
+
unreachableNodes: number;
|
|
2327
|
+
}
|
|
1862
2328
|
interface ActionTracker {
|
|
1863
2329
|
readonly action: AgentAction;
|
|
1864
2330
|
complete(result: ActionResult): Promise<Result<AgentAction, FeedbackError>>;
|
|
@@ -1883,20 +2349,25 @@ declare function resetFeedbackConfig(): void;
|
|
|
1883
2349
|
declare class ChecklistBuilder {
|
|
1884
2350
|
private rootDir;
|
|
1885
2351
|
private harnessOptions?;
|
|
2352
|
+
private graphHarnessData?;
|
|
1886
2353
|
private customRules;
|
|
1887
2354
|
private diffOptions?;
|
|
2355
|
+
private graphImpactData?;
|
|
1888
2356
|
constructor(rootDir: string);
|
|
1889
|
-
withHarnessChecks(options?: SelfReviewConfig['harness']): this;
|
|
2357
|
+
withHarnessChecks(options?: SelfReviewConfig['harness'], graphData?: GraphHarnessCheckData): this;
|
|
1890
2358
|
addRule(rule: CustomRule): this;
|
|
1891
2359
|
addRules(rules: CustomRule[]): this;
|
|
1892
|
-
withDiffAnalysis(options: SelfReviewConfig['diffAnalysis']): this;
|
|
2360
|
+
withDiffAnalysis(options: SelfReviewConfig['diffAnalysis'], graphImpactData?: GraphImpactData): this;
|
|
1893
2361
|
run(changes: CodeChanges): Promise<Result<ReviewChecklist, FeedbackError>>;
|
|
1894
2362
|
}
|
|
1895
2363
|
|
|
1896
|
-
declare function createSelfReview(changes: CodeChanges, config: SelfReviewConfig
|
|
2364
|
+
declare function createSelfReview(changes: CodeChanges, config: SelfReviewConfig, graphData?: {
|
|
2365
|
+
impact?: GraphImpactData;
|
|
2366
|
+
harness?: GraphHarnessCheckData;
|
|
2367
|
+
}): Promise<Result<ReviewChecklist, FeedbackError>>;
|
|
1897
2368
|
|
|
1898
2369
|
declare function parseDiff(diff: string): Result<CodeChanges, FeedbackError>;
|
|
1899
|
-
declare function analyzeDiff(changes: CodeChanges, options: SelfReviewConfig['diffAnalysis']): Promise<Result<ReviewItem[], FeedbackError>>;
|
|
2370
|
+
declare function analyzeDiff(changes: CodeChanges, options: SelfReviewConfig['diffAnalysis'], graphImpactData?: GraphImpactData): Promise<Result<ReviewItem[], FeedbackError>>;
|
|
1900
2371
|
|
|
1901
2372
|
declare function requestPeerReview(agentType: AgentType, context: ReviewContext, options?: PeerReviewOptions): Promise<Result<PeerReview, FeedbackError>>;
|
|
1902
2373
|
declare function requestMultiplePeerReviews(requests: Array<{
|
|
@@ -2113,11 +2584,11 @@ declare const HarnessStateSchema: z.ZodObject<{
|
|
|
2113
2584
|
phase: z.ZodOptional<z.ZodString>;
|
|
2114
2585
|
task: z.ZodOptional<z.ZodString>;
|
|
2115
2586
|
}, "strip", z.ZodTypeAny, {
|
|
2116
|
-
phase?: string | undefined;
|
|
2117
2587
|
task?: string | undefined;
|
|
2118
|
-
}, {
|
|
2119
2588
|
phase?: string | undefined;
|
|
2589
|
+
}, {
|
|
2120
2590
|
task?: string | undefined;
|
|
2591
|
+
phase?: string | undefined;
|
|
2121
2592
|
}>>;
|
|
2122
2593
|
decisions: z.ZodDefault<z.ZodArray<z.ZodObject<{
|
|
2123
2594
|
date: z.ZodString;
|
|
@@ -2175,8 +2646,8 @@ declare const HarnessStateSchema: z.ZodObject<{
|
|
|
2175
2646
|
}[];
|
|
2176
2647
|
schemaVersion: 1;
|
|
2177
2648
|
position: {
|
|
2178
|
-
phase?: string | undefined;
|
|
2179
2649
|
task?: string | undefined;
|
|
2650
|
+
phase?: string | undefined;
|
|
2180
2651
|
};
|
|
2181
2652
|
progress: Record<string, "pending" | "in_progress" | "complete">;
|
|
2182
2653
|
lastSession?: {
|
|
@@ -2198,8 +2669,8 @@ declare const HarnessStateSchema: z.ZodObject<{
|
|
|
2198
2669
|
description: string;
|
|
2199
2670
|
}[] | undefined;
|
|
2200
2671
|
position?: {
|
|
2201
|
-
phase?: string | undefined;
|
|
2202
2672
|
task?: string | undefined;
|
|
2673
|
+
phase?: string | undefined;
|
|
2203
2674
|
} | undefined;
|
|
2204
2675
|
progress?: Record<string, "pending" | "in_progress" | "complete"> | undefined;
|
|
2205
2676
|
lastSession?: {
|
|
@@ -2212,22 +2683,107 @@ declare const HarnessStateSchema: z.ZodObject<{
|
|
|
2212
2683
|
type HarnessState = z.infer<typeof HarnessStateSchema>;
|
|
2213
2684
|
declare const DEFAULT_STATE: HarnessState;
|
|
2214
2685
|
|
|
2215
|
-
declare function loadState(projectPath: string): Promise<Result<HarnessState, Error>>;
|
|
2216
|
-
declare function saveState(projectPath: string, state: HarnessState): Promise<Result<void, Error>>;
|
|
2217
|
-
declare function appendLearning(projectPath: string, learning: string, skillName?: string, outcome?: string): Promise<Result<void, Error>>;
|
|
2218
|
-
declare function loadRelevantLearnings(projectPath: string, skillName?: string): Promise<Result<string[], Error>>;
|
|
2219
|
-
declare function appendFailure(projectPath: string, description: string, skillName: string, type: string): Promise<Result<void, Error>>;
|
|
2220
|
-
declare function loadFailures(projectPath: string): Promise<Result<Array<{
|
|
2686
|
+
declare function loadState(projectPath: string, stream?: string): Promise<Result<HarnessState, Error>>;
|
|
2687
|
+
declare function saveState(projectPath: string, state: HarnessState, stream?: string): Promise<Result<void, Error>>;
|
|
2688
|
+
declare function appendLearning(projectPath: string, learning: string, skillName?: string, outcome?: string, stream?: string): Promise<Result<void, Error>>;
|
|
2689
|
+
declare function loadRelevantLearnings(projectPath: string, skillName?: string, stream?: string): Promise<Result<string[], Error>>;
|
|
2690
|
+
declare function appendFailure(projectPath: string, description: string, skillName: string, type: string, stream?: string): Promise<Result<void, Error>>;
|
|
2691
|
+
declare function loadFailures(projectPath: string, stream?: string): Promise<Result<Array<{
|
|
2221
2692
|
date: string;
|
|
2222
2693
|
skill: string;
|
|
2223
2694
|
type: string;
|
|
2224
2695
|
description: string;
|
|
2225
2696
|
}>, Error>>;
|
|
2226
|
-
declare function archiveFailures(projectPath: string): Promise<Result<void, Error>>;
|
|
2227
|
-
declare function saveHandoff(projectPath: string, handoff: Handoff): Promise<Result<void, Error>>;
|
|
2228
|
-
declare function loadHandoff(projectPath: string): Promise<Result<Handoff | null, Error>>;
|
|
2697
|
+
declare function archiveFailures(projectPath: string, stream?: string): Promise<Result<void, Error>>;
|
|
2698
|
+
declare function saveHandoff(projectPath: string, handoff: Handoff, stream?: string): Promise<Result<void, Error>>;
|
|
2699
|
+
declare function loadHandoff(projectPath: string, stream?: string): Promise<Result<Handoff | null, Error>>;
|
|
2229
2700
|
declare function runMechanicalGate(projectPath: string): Promise<Result<GateResult, Error>>;
|
|
2230
2701
|
|
|
2702
|
+
declare const StreamInfoSchema: z.ZodObject<{
|
|
2703
|
+
name: z.ZodString;
|
|
2704
|
+
branch: z.ZodOptional<z.ZodString>;
|
|
2705
|
+
createdAt: z.ZodString;
|
|
2706
|
+
lastActiveAt: z.ZodString;
|
|
2707
|
+
}, "strip", z.ZodTypeAny, {
|
|
2708
|
+
name: string;
|
|
2709
|
+
createdAt: string;
|
|
2710
|
+
lastActiveAt: string;
|
|
2711
|
+
branch?: string | undefined;
|
|
2712
|
+
}, {
|
|
2713
|
+
name: string;
|
|
2714
|
+
createdAt: string;
|
|
2715
|
+
lastActiveAt: string;
|
|
2716
|
+
branch?: string | undefined;
|
|
2717
|
+
}>;
|
|
2718
|
+
type StreamInfo = z.infer<typeof StreamInfoSchema>;
|
|
2719
|
+
declare const StreamIndexSchema: z.ZodObject<{
|
|
2720
|
+
schemaVersion: z.ZodLiteral<1>;
|
|
2721
|
+
activeStream: z.ZodNullable<z.ZodString>;
|
|
2722
|
+
streams: z.ZodRecord<z.ZodString, z.ZodObject<{
|
|
2723
|
+
name: z.ZodString;
|
|
2724
|
+
branch: z.ZodOptional<z.ZodString>;
|
|
2725
|
+
createdAt: z.ZodString;
|
|
2726
|
+
lastActiveAt: z.ZodString;
|
|
2727
|
+
}, "strip", z.ZodTypeAny, {
|
|
2728
|
+
name: string;
|
|
2729
|
+
createdAt: string;
|
|
2730
|
+
lastActiveAt: string;
|
|
2731
|
+
branch?: string | undefined;
|
|
2732
|
+
}, {
|
|
2733
|
+
name: string;
|
|
2734
|
+
createdAt: string;
|
|
2735
|
+
lastActiveAt: string;
|
|
2736
|
+
branch?: string | undefined;
|
|
2737
|
+
}>>;
|
|
2738
|
+
}, "strip", z.ZodTypeAny, {
|
|
2739
|
+
schemaVersion: 1;
|
|
2740
|
+
activeStream: string | null;
|
|
2741
|
+
streams: Record<string, {
|
|
2742
|
+
name: string;
|
|
2743
|
+
createdAt: string;
|
|
2744
|
+
lastActiveAt: string;
|
|
2745
|
+
branch?: string | undefined;
|
|
2746
|
+
}>;
|
|
2747
|
+
}, {
|
|
2748
|
+
schemaVersion: 1;
|
|
2749
|
+
activeStream: string | null;
|
|
2750
|
+
streams: Record<string, {
|
|
2751
|
+
name: string;
|
|
2752
|
+
createdAt: string;
|
|
2753
|
+
lastActiveAt: string;
|
|
2754
|
+
branch?: string | undefined;
|
|
2755
|
+
}>;
|
|
2756
|
+
}>;
|
|
2757
|
+
type StreamIndex = z.infer<typeof StreamIndexSchema>;
|
|
2758
|
+
declare const DEFAULT_STREAM_INDEX: StreamIndex;
|
|
2759
|
+
|
|
2760
|
+
declare function loadStreamIndex(projectPath: string): Promise<Result<StreamIndex, Error>>;
|
|
2761
|
+
declare function saveStreamIndex(projectPath: string, index: StreamIndex): Promise<Result<void, Error>>;
|
|
2762
|
+
/**
|
|
2763
|
+
* Resolves a stream path without side effects.
|
|
2764
|
+
*
|
|
2765
|
+
* Does NOT update lastActiveAt or activeStream in the index.
|
|
2766
|
+
* Callers that need to mark a stream as active should call `touchStream()` separately.
|
|
2767
|
+
*/
|
|
2768
|
+
declare function resolveStreamPath(projectPath: string, options?: {
|
|
2769
|
+
stream?: string;
|
|
2770
|
+
}): Promise<Result<string, Error>>;
|
|
2771
|
+
/**
|
|
2772
|
+
* Updates lastActiveAt and activeStream for the given stream.
|
|
2773
|
+
* Call this once per session start, not on every state operation.
|
|
2774
|
+
*/
|
|
2775
|
+
declare function touchStream(projectPath: string, name: string): Promise<Result<void, Error>>;
|
|
2776
|
+
declare function createStream(projectPath: string, name: string, branch?: string): Promise<Result<string, Error>>;
|
|
2777
|
+
declare function listStreams(projectPath: string): Promise<Result<StreamInfo[], Error>>;
|
|
2778
|
+
declare function setActiveStream(projectPath: string, name: string): Promise<Result<void, Error>>;
|
|
2779
|
+
/**
|
|
2780
|
+
* Archives a stream by moving its entire directory (including any failure archives
|
|
2781
|
+
* within it) to `.harness/archive/streams/<name>-<date>`.
|
|
2782
|
+
*/
|
|
2783
|
+
declare function archiveStream(projectPath: string, name: string): Promise<Result<void, Error>>;
|
|
2784
|
+
declare function getStreamForBranch(index: StreamIndex, branch: string): string | null;
|
|
2785
|
+
declare function migrateToStreams(projectPath: string): Promise<Result<void, Error>>;
|
|
2786
|
+
|
|
2231
2787
|
type StepExecutor = (step: WorkflowStep, previousArtifact?: string) => Promise<WorkflowStepResult>;
|
|
2232
2788
|
declare function executeWorkflow(workflow: Workflow, executor: StepExecutor): Promise<WorkflowResult>;
|
|
2233
2789
|
|
|
@@ -2251,6 +2807,179 @@ type TurnExecutor = (context: TurnContext) => Promise<{
|
|
|
2251
2807
|
declare function runPipeline(initialContext: SkillContext, executor: SkillExecutor, options?: PipelineOptions): Promise<PipelineResult>;
|
|
2252
2808
|
declare function runMultiTurnPipeline(initialContext: SkillContext, turnExecutor: TurnExecutor, options?: PipelineOptions): Promise<PipelineResult>;
|
|
2253
2809
|
|
|
2810
|
+
type SecurityCategory = 'secrets' | 'injection' | 'xss' | 'crypto' | 'network' | 'deserialization' | 'path-traversal';
|
|
2811
|
+
type SecuritySeverity = 'error' | 'warning' | 'info';
|
|
2812
|
+
type SecurityConfidence = 'high' | 'medium' | 'low';
|
|
2813
|
+
interface SecurityRule {
|
|
2814
|
+
id: string;
|
|
2815
|
+
name: string;
|
|
2816
|
+
category: SecurityCategory;
|
|
2817
|
+
severity: SecuritySeverity;
|
|
2818
|
+
confidence: SecurityConfidence;
|
|
2819
|
+
patterns: RegExp[];
|
|
2820
|
+
fileGlob?: string;
|
|
2821
|
+
stack?: string[];
|
|
2822
|
+
message: string;
|
|
2823
|
+
remediation: string;
|
|
2824
|
+
references?: string[];
|
|
2825
|
+
}
|
|
2826
|
+
interface SecurityFinding {
|
|
2827
|
+
ruleId: string;
|
|
2828
|
+
ruleName: string;
|
|
2829
|
+
category: SecurityCategory;
|
|
2830
|
+
severity: SecuritySeverity;
|
|
2831
|
+
confidence: SecurityConfidence;
|
|
2832
|
+
file: string;
|
|
2833
|
+
line: number;
|
|
2834
|
+
column?: number;
|
|
2835
|
+
match: string;
|
|
2836
|
+
context: string;
|
|
2837
|
+
message: string;
|
|
2838
|
+
remediation: string;
|
|
2839
|
+
references?: string[];
|
|
2840
|
+
}
|
|
2841
|
+
interface ScanResult {
|
|
2842
|
+
findings: SecurityFinding[];
|
|
2843
|
+
scannedFiles: number;
|
|
2844
|
+
rulesApplied: number;
|
|
2845
|
+
externalToolsUsed: string[];
|
|
2846
|
+
coverage: 'baseline' | 'enhanced';
|
|
2847
|
+
}
|
|
2848
|
+
type RuleOverride = 'off' | SecuritySeverity;
|
|
2849
|
+
interface SecurityConfig {
|
|
2850
|
+
enabled: boolean;
|
|
2851
|
+
strict: boolean;
|
|
2852
|
+
rules?: Record<string, RuleOverride>;
|
|
2853
|
+
exclude?: string[];
|
|
2854
|
+
external?: {
|
|
2855
|
+
semgrep?: {
|
|
2856
|
+
enabled: 'auto' | boolean;
|
|
2857
|
+
rulesets?: string[];
|
|
2858
|
+
};
|
|
2859
|
+
gitleaks?: {
|
|
2860
|
+
enabled: 'auto' | boolean;
|
|
2861
|
+
};
|
|
2862
|
+
};
|
|
2863
|
+
}
|
|
2864
|
+
declare const DEFAULT_SECURITY_CONFIG: SecurityConfig;
|
|
2865
|
+
|
|
2866
|
+
declare class SecurityScanner {
|
|
2867
|
+
private registry;
|
|
2868
|
+
private config;
|
|
2869
|
+
private activeRules;
|
|
2870
|
+
constructor(config?: Partial<SecurityConfig>);
|
|
2871
|
+
configureForProject(projectRoot: string): void;
|
|
2872
|
+
scanContent(content: string, filePath: string, startLine?: number): SecurityFinding[];
|
|
2873
|
+
scanFile(filePath: string): Promise<SecurityFinding[]>;
|
|
2874
|
+
scanFiles(filePaths: string[]): Promise<ScanResult>;
|
|
2875
|
+
}
|
|
2876
|
+
|
|
2877
|
+
declare const SecurityConfigSchema: z.ZodObject<{
|
|
2878
|
+
enabled: z.ZodDefault<z.ZodBoolean>;
|
|
2879
|
+
strict: z.ZodDefault<z.ZodBoolean>;
|
|
2880
|
+
rules: z.ZodDefault<z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodEnum<["off", "error", "warning", "info"]>>>>;
|
|
2881
|
+
exclude: z.ZodDefault<z.ZodOptional<z.ZodArray<z.ZodString, "many">>>;
|
|
2882
|
+
external: z.ZodOptional<z.ZodObject<{
|
|
2883
|
+
semgrep: z.ZodOptional<z.ZodObject<{
|
|
2884
|
+
enabled: z.ZodDefault<z.ZodUnion<[z.ZodLiteral<"auto">, z.ZodBoolean]>>;
|
|
2885
|
+
rulesets: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
2886
|
+
}, "strip", z.ZodTypeAny, {
|
|
2887
|
+
enabled: boolean | "auto";
|
|
2888
|
+
rulesets?: string[] | undefined;
|
|
2889
|
+
}, {
|
|
2890
|
+
enabled?: boolean | "auto" | undefined;
|
|
2891
|
+
rulesets?: string[] | undefined;
|
|
2892
|
+
}>>;
|
|
2893
|
+
gitleaks: z.ZodOptional<z.ZodObject<{
|
|
2894
|
+
enabled: z.ZodDefault<z.ZodUnion<[z.ZodLiteral<"auto">, z.ZodBoolean]>>;
|
|
2895
|
+
}, "strip", z.ZodTypeAny, {
|
|
2896
|
+
enabled: boolean | "auto";
|
|
2897
|
+
}, {
|
|
2898
|
+
enabled?: boolean | "auto" | undefined;
|
|
2899
|
+
}>>;
|
|
2900
|
+
}, "strip", z.ZodTypeAny, {
|
|
2901
|
+
semgrep?: {
|
|
2902
|
+
enabled: boolean | "auto";
|
|
2903
|
+
rulesets?: string[] | undefined;
|
|
2904
|
+
} | undefined;
|
|
2905
|
+
gitleaks?: {
|
|
2906
|
+
enabled: boolean | "auto";
|
|
2907
|
+
} | undefined;
|
|
2908
|
+
}, {
|
|
2909
|
+
semgrep?: {
|
|
2910
|
+
enabled?: boolean | "auto" | undefined;
|
|
2911
|
+
rulesets?: string[] | undefined;
|
|
2912
|
+
} | undefined;
|
|
2913
|
+
gitleaks?: {
|
|
2914
|
+
enabled?: boolean | "auto" | undefined;
|
|
2915
|
+
} | undefined;
|
|
2916
|
+
}>>;
|
|
2917
|
+
}, "strip", z.ZodTypeAny, {
|
|
2918
|
+
enabled: boolean;
|
|
2919
|
+
strict: boolean;
|
|
2920
|
+
exclude: string[];
|
|
2921
|
+
rules: Record<string, "error" | "warning" | "info" | "off">;
|
|
2922
|
+
external?: {
|
|
2923
|
+
semgrep?: {
|
|
2924
|
+
enabled: boolean | "auto";
|
|
2925
|
+
rulesets?: string[] | undefined;
|
|
2926
|
+
} | undefined;
|
|
2927
|
+
gitleaks?: {
|
|
2928
|
+
enabled: boolean | "auto";
|
|
2929
|
+
} | undefined;
|
|
2930
|
+
} | undefined;
|
|
2931
|
+
}, {
|
|
2932
|
+
enabled?: boolean | undefined;
|
|
2933
|
+
strict?: boolean | undefined;
|
|
2934
|
+
exclude?: string[] | undefined;
|
|
2935
|
+
rules?: Record<string, "error" | "warning" | "info" | "off"> | undefined;
|
|
2936
|
+
external?: {
|
|
2937
|
+
semgrep?: {
|
|
2938
|
+
enabled?: boolean | "auto" | undefined;
|
|
2939
|
+
rulesets?: string[] | undefined;
|
|
2940
|
+
} | undefined;
|
|
2941
|
+
gitleaks?: {
|
|
2942
|
+
enabled?: boolean | "auto" | undefined;
|
|
2943
|
+
} | undefined;
|
|
2944
|
+
} | undefined;
|
|
2945
|
+
}>;
|
|
2946
|
+
declare function parseSecurityConfig(input: unknown): SecurityConfig;
|
|
2947
|
+
declare function resolveRuleSeverity(ruleId: string, defaultSeverity: SecuritySeverity, overrides: Record<string, RuleOverride>, strict: boolean): RuleOverride;
|
|
2948
|
+
|
|
2949
|
+
declare class RuleRegistry {
|
|
2950
|
+
private rules;
|
|
2951
|
+
register(rule: SecurityRule): void;
|
|
2952
|
+
registerAll(rules: SecurityRule[]): void;
|
|
2953
|
+
getById(id: string): SecurityRule | undefined;
|
|
2954
|
+
getAll(): SecurityRule[];
|
|
2955
|
+
getByCategory(category: SecurityCategory): SecurityRule[];
|
|
2956
|
+
getForStacks(stacks: string[]): SecurityRule[];
|
|
2957
|
+
}
|
|
2958
|
+
|
|
2959
|
+
declare function detectStack(projectRoot: string): string[];
|
|
2960
|
+
|
|
2961
|
+
declare const secretRules: SecurityRule[];
|
|
2962
|
+
|
|
2963
|
+
declare const injectionRules: SecurityRule[];
|
|
2964
|
+
|
|
2965
|
+
declare const xssRules: SecurityRule[];
|
|
2966
|
+
|
|
2967
|
+
declare const cryptoRules: SecurityRule[];
|
|
2968
|
+
|
|
2969
|
+
declare const pathTraversalRules: SecurityRule[];
|
|
2970
|
+
|
|
2971
|
+
declare const networkRules: SecurityRule[];
|
|
2972
|
+
|
|
2973
|
+
declare const deserializationRules: SecurityRule[];
|
|
2974
|
+
|
|
2975
|
+
declare const nodeRules: SecurityRule[];
|
|
2976
|
+
|
|
2977
|
+
declare const expressRules: SecurityRule[];
|
|
2978
|
+
|
|
2979
|
+
declare const reactRules: SecurityRule[];
|
|
2980
|
+
|
|
2981
|
+
declare const goRules: SecurityRule[];
|
|
2982
|
+
|
|
2254
2983
|
interface RunCIChecksInput {
|
|
2255
2984
|
projectRoot: string;
|
|
2256
2985
|
config: Record<string, unknown>;
|
|
@@ -2259,12 +2988,941 @@ interface RunCIChecksInput {
|
|
|
2259
2988
|
}
|
|
2260
2989
|
declare function runCIChecks(input: RunCIChecksInput): Promise<Result<CICheckReport, Error>>;
|
|
2261
2990
|
|
|
2991
|
+
/**
|
|
2992
|
+
* An index of mechanical findings, queryable by file + line range.
|
|
2993
|
+
* Used in Phase 5 (VALIDATE) to determine whether an AI-produced finding
|
|
2994
|
+
* overlaps with a mechanical finding and should be excluded.
|
|
2995
|
+
*/
|
|
2996
|
+
declare class ExclusionSet {
|
|
2997
|
+
/** Findings indexed by file path for O(1) file lookup */
|
|
2998
|
+
private byFile;
|
|
2999
|
+
private allFindings;
|
|
3000
|
+
constructor(findings: MechanicalFinding[]);
|
|
3001
|
+
/**
|
|
3002
|
+
* Returns true if any mechanical finding covers the given file + line range.
|
|
3003
|
+
*
|
|
3004
|
+
* A mechanical finding "covers" a range if:
|
|
3005
|
+
* - The file matches, AND
|
|
3006
|
+
* - The finding has no line (file-level finding — covers everything), OR
|
|
3007
|
+
* - The finding's line falls within [startLine, endLine] inclusive.
|
|
3008
|
+
*/
|
|
3009
|
+
isExcluded(file: string, lineRange: [number, number]): boolean;
|
|
3010
|
+
/** Number of findings in the set */
|
|
3011
|
+
get size(): number;
|
|
3012
|
+
/** Returns a copy of all findings */
|
|
3013
|
+
getFindings(): MechanicalFinding[];
|
|
3014
|
+
}
|
|
3015
|
+
/**
|
|
3016
|
+
* Build an ExclusionSet from mechanical findings.
|
|
3017
|
+
*/
|
|
3018
|
+
declare function buildExclusionSet(findings: MechanicalFinding[]): ExclusionSet;
|
|
3019
|
+
|
|
3020
|
+
/**
|
|
3021
|
+
* A finding produced by a mechanical check (lint, typecheck, security scan, harness validate/deps/docs).
|
|
3022
|
+
* Used as input to the exclusion set and reported when the pipeline stops due to mechanical failures.
|
|
3023
|
+
*/
|
|
3024
|
+
interface MechanicalFinding {
|
|
3025
|
+
/** Which mechanical tool produced this finding */
|
|
3026
|
+
tool: 'validate' | 'check-deps' | 'check-docs' | 'security-scan';
|
|
3027
|
+
/** File path (absolute or project-relative) */
|
|
3028
|
+
file: string;
|
|
3029
|
+
/** Line number, if available */
|
|
3030
|
+
line?: number;
|
|
3031
|
+
/** Rule ID from the tool (e.g., security rule ID) */
|
|
3032
|
+
ruleId?: string;
|
|
3033
|
+
/** Human-readable message */
|
|
3034
|
+
message: string;
|
|
3035
|
+
/** Severity level */
|
|
3036
|
+
severity: 'error' | 'warning';
|
|
3037
|
+
}
|
|
3038
|
+
/**
|
|
3039
|
+
* Result of running all mechanical checks.
|
|
3040
|
+
*/
|
|
3041
|
+
interface MechanicalCheckResult {
|
|
3042
|
+
/** Overall pass/fail — false if any check produced errors */
|
|
3043
|
+
pass: boolean;
|
|
3044
|
+
/** True if the pipeline should stop (validate or check-deps failed) */
|
|
3045
|
+
stopPipeline: boolean;
|
|
3046
|
+
/** All findings from all mechanical checks */
|
|
3047
|
+
findings: MechanicalFinding[];
|
|
3048
|
+
/** Per-check status for reporting */
|
|
3049
|
+
checks: {
|
|
3050
|
+
validate: MechanicalCheckStatus;
|
|
3051
|
+
checkDeps: MechanicalCheckStatus;
|
|
3052
|
+
checkDocs: MechanicalCheckStatus;
|
|
3053
|
+
securityScan: MechanicalCheckStatus;
|
|
3054
|
+
};
|
|
3055
|
+
}
|
|
3056
|
+
type MechanicalCheckStatus = 'pass' | 'fail' | 'warn' | 'skip';
|
|
3057
|
+
/**
|
|
3058
|
+
* Options for running mechanical checks.
|
|
3059
|
+
*/
|
|
3060
|
+
interface MechanicalCheckOptions {
|
|
3061
|
+
/** Project root directory */
|
|
3062
|
+
projectRoot: string;
|
|
3063
|
+
/** Config object (from resolveConfig or harness.config.json) */
|
|
3064
|
+
config: Record<string, unknown>;
|
|
3065
|
+
/** Skip specific checks */
|
|
3066
|
+
skip?: Array<'validate' | 'check-deps' | 'check-docs' | 'security-scan'>;
|
|
3067
|
+
/** Only scan these files for security (e.g., changed files from a PR) */
|
|
3068
|
+
changedFiles?: string[];
|
|
3069
|
+
}
|
|
3070
|
+
/**
|
|
3071
|
+
* Change type detected from commit message prefix or diff heuristic.
|
|
3072
|
+
*/
|
|
3073
|
+
type ChangeType = 'feature' | 'bugfix' | 'refactor' | 'docs';
|
|
3074
|
+
/**
|
|
3075
|
+
* Review domain — each gets its own scoped context bundle.
|
|
3076
|
+
*/
|
|
3077
|
+
type ReviewDomain = 'compliance' | 'bug' | 'security' | 'architecture';
|
|
3078
|
+
/**
|
|
3079
|
+
* A file included in a context bundle with its content.
|
|
3080
|
+
*/
|
|
3081
|
+
interface ContextFile {
|
|
3082
|
+
/** File path (project-relative) */
|
|
3083
|
+
path: string;
|
|
3084
|
+
/** File content (full or truncated to budget) */
|
|
3085
|
+
content: string;
|
|
3086
|
+
/** Why this file was included */
|
|
3087
|
+
reason: 'changed' | 'import' | 'test' | 'spec' | 'type' | 'convention' | 'graph-dependency' | 'graph-impact';
|
|
3088
|
+
/** Line count of the content */
|
|
3089
|
+
lines: number;
|
|
3090
|
+
}
|
|
3091
|
+
/**
|
|
3092
|
+
* Commit history entry for a changed file.
|
|
3093
|
+
*/
|
|
3094
|
+
interface CommitHistoryEntry {
|
|
3095
|
+
/** Short SHA */
|
|
3096
|
+
sha: string;
|
|
3097
|
+
/** One-line commit message */
|
|
3098
|
+
message: string;
|
|
3099
|
+
/** File path this commit touched */
|
|
3100
|
+
file: string;
|
|
3101
|
+
}
|
|
3102
|
+
/**
|
|
3103
|
+
* Context bundle assembled for a single review domain.
|
|
3104
|
+
* Each Phase 4 subagent receives one of these.
|
|
3105
|
+
*/
|
|
3106
|
+
interface ContextBundle {
|
|
3107
|
+
/** Which review domain this bundle is for */
|
|
3108
|
+
domain: ReviewDomain;
|
|
3109
|
+
/** Detected change type */
|
|
3110
|
+
changeType: ChangeType;
|
|
3111
|
+
/** Files that were changed in the diff */
|
|
3112
|
+
changedFiles: ContextFile[];
|
|
3113
|
+
/** Additional context files (imports, tests, specs, types, conventions) */
|
|
3114
|
+
contextFiles: ContextFile[];
|
|
3115
|
+
/**
|
|
3116
|
+
* Recent commit history for changed files.
|
|
3117
|
+
* @remarks Empty by default from `scopeContext()`. Callers should populate this
|
|
3118
|
+
* via `git log` commands at the orchestration layer before passing bundles to
|
|
3119
|
+
* Phase 4 subagents. Example: `git log --oneline -5 -- <file>` per changed file.
|
|
3120
|
+
*/
|
|
3121
|
+
commitHistory: CommitHistoryEntry[];
|
|
3122
|
+
/** Total lines of diff */
|
|
3123
|
+
diffLines: number;
|
|
3124
|
+
/** Total lines of context gathered */
|
|
3125
|
+
contextLines: number;
|
|
3126
|
+
}
|
|
3127
|
+
/**
|
|
3128
|
+
* Information about a diff, used as input to context scoping.
|
|
3129
|
+
*/
|
|
3130
|
+
interface DiffInfo {
|
|
3131
|
+
/** Changed file paths (project-relative) */
|
|
3132
|
+
changedFiles: string[];
|
|
3133
|
+
/** New files (subset of changedFiles) */
|
|
3134
|
+
newFiles: string[];
|
|
3135
|
+
/** Deleted files (subset of changedFiles) */
|
|
3136
|
+
deletedFiles: string[];
|
|
3137
|
+
/** Total lines of diff across all files */
|
|
3138
|
+
totalDiffLines: number;
|
|
3139
|
+
/** Per-file diff content */
|
|
3140
|
+
fileDiffs: Map<string, string>;
|
|
3141
|
+
}
|
|
3142
|
+
/**
|
|
3143
|
+
* Adapter interface for graph queries.
|
|
3144
|
+
* Callers implement this using @harness-engineering/graph when available.
|
|
3145
|
+
* The context scoper does NOT depend on the graph package directly.
|
|
3146
|
+
*/
|
|
3147
|
+
interface GraphAdapter {
|
|
3148
|
+
/**
|
|
3149
|
+
* Find direct dependencies of a file (imports, calls).
|
|
3150
|
+
* Returns file paths of dependencies.
|
|
3151
|
+
*/
|
|
3152
|
+
getDependencies(filePath: string): Promise<string[]>;
|
|
3153
|
+
/**
|
|
3154
|
+
* Find files impacted by changes to a file (reverse dependencies, tests, docs).
|
|
3155
|
+
* Returns file paths of impacted nodes grouped by category.
|
|
3156
|
+
*/
|
|
3157
|
+
getImpact(filePath: string): Promise<{
|
|
3158
|
+
tests: string[];
|
|
3159
|
+
docs: string[];
|
|
3160
|
+
code: string[];
|
|
3161
|
+
}>;
|
|
3162
|
+
/**
|
|
3163
|
+
* Check if a path exists in the dependency graph between two files.
|
|
3164
|
+
* Used for reachability validation in Phase 5 (exported here for shared use).
|
|
3165
|
+
*/
|
|
3166
|
+
isReachable(fromFile: string, toFile: string, maxDepth?: number): Promise<boolean>;
|
|
3167
|
+
}
|
|
3168
|
+
/**
|
|
3169
|
+
* Options for context scoping.
|
|
3170
|
+
*/
|
|
3171
|
+
interface ContextScopeOptions {
|
|
3172
|
+
/** Project root directory */
|
|
3173
|
+
projectRoot: string;
|
|
3174
|
+
/** Diff information */
|
|
3175
|
+
diff: DiffInfo;
|
|
3176
|
+
/** Most recent commit message (for change-type detection) */
|
|
3177
|
+
commitMessage: string;
|
|
3178
|
+
/** Graph adapter (optional -- falls back to heuristics when absent) */
|
|
3179
|
+
graph?: GraphAdapter;
|
|
3180
|
+
/** Convention files to include for compliance domain */
|
|
3181
|
+
conventionFiles?: string[];
|
|
3182
|
+
/** Output from `harness check-deps` (for architecture fallback) */
|
|
3183
|
+
checkDepsOutput?: string;
|
|
3184
|
+
/** Pre-gathered commit history entries. If provided, included in all bundles. */
|
|
3185
|
+
commitHistory?: CommitHistoryEntry[];
|
|
3186
|
+
}
|
|
3187
|
+
/**
|
|
3188
|
+
* Model tier — abstract label resolved at runtime from project config.
|
|
3189
|
+
* - fast: haiku-class (gate, context phases)
|
|
3190
|
+
* - standard: sonnet-class (compliance, architecture agents)
|
|
3191
|
+
* - strong: opus-class (bug detection, security agents)
|
|
3192
|
+
*/
|
|
3193
|
+
type ModelTier = 'fast' | 'standard' | 'strong';
|
|
3194
|
+
/**
|
|
3195
|
+
* Severity level for AI-produced review findings.
|
|
3196
|
+
*/
|
|
3197
|
+
type FindingSeverity = 'critical' | 'important' | 'suggestion';
|
|
3198
|
+
/**
|
|
3199
|
+
* A finding produced by a Phase 4 review subagent.
|
|
3200
|
+
* Common schema used across all four agents and in Phases 5-7.
|
|
3201
|
+
*/
|
|
3202
|
+
interface ReviewFinding {
|
|
3203
|
+
/** Unique identifier for dedup (format: domain-file-line, e.g. "bug-src/auth.ts-42") */
|
|
3204
|
+
id: string;
|
|
3205
|
+
/** File path (project-relative) */
|
|
3206
|
+
file: string;
|
|
3207
|
+
/** Start and end line numbers */
|
|
3208
|
+
lineRange: [number, number];
|
|
3209
|
+
/** Which review domain produced this finding */
|
|
3210
|
+
domain: ReviewDomain;
|
|
3211
|
+
/** Severity level */
|
|
3212
|
+
severity: FindingSeverity;
|
|
3213
|
+
/** One-line summary of the issue */
|
|
3214
|
+
title: string;
|
|
3215
|
+
/** Why this is an issue — the reasoning */
|
|
3216
|
+
rationale: string;
|
|
3217
|
+
/** Suggested fix, if available */
|
|
3218
|
+
suggestion?: string;
|
|
3219
|
+
/** Supporting context/evidence from the agent */
|
|
3220
|
+
evidence: string[];
|
|
3221
|
+
/** How this finding was validated (set in Phase 5; agents set 'heuristic' by default) */
|
|
3222
|
+
validatedBy: 'mechanical' | 'graph' | 'heuristic';
|
|
3223
|
+
/** CWE identifier, e.g. "CWE-89" (security domain only) */
|
|
3224
|
+
cweId?: string;
|
|
3225
|
+
/** OWASP Top 10 category, e.g. "A03:2021 Injection" (security domain only) */
|
|
3226
|
+
owaspCategory?: string;
|
|
3227
|
+
/** Confidence level of the finding (security domain only) */
|
|
3228
|
+
confidence?: 'high' | 'medium' | 'low';
|
|
3229
|
+
/** Specific remediation guidance (security domain only) */
|
|
3230
|
+
remediation?: string;
|
|
3231
|
+
/** Links to CWE/OWASP reference docs (security domain only) */
|
|
3232
|
+
references?: string[];
|
|
3233
|
+
}
|
|
3234
|
+
/**
|
|
3235
|
+
* Descriptor for a review subagent — metadata about its purpose and model tier.
|
|
3236
|
+
*/
|
|
3237
|
+
interface ReviewAgentDescriptor {
|
|
3238
|
+
/** Review domain this agent covers */
|
|
3239
|
+
domain: ReviewDomain;
|
|
3240
|
+
/** Model tier annotation (resolved to a concrete model at runtime) */
|
|
3241
|
+
tier: ModelTier;
|
|
3242
|
+
/** Human-readable name for output */
|
|
3243
|
+
displayName: string;
|
|
3244
|
+
/** Focus area descriptions for this agent */
|
|
3245
|
+
focusAreas: string[];
|
|
3246
|
+
}
|
|
3247
|
+
/**
|
|
3248
|
+
* Result from a single review agent.
|
|
3249
|
+
*/
|
|
3250
|
+
interface AgentReviewResult {
|
|
3251
|
+
/** Which domain produced these findings */
|
|
3252
|
+
domain: ReviewDomain;
|
|
3253
|
+
/** Findings produced by this agent */
|
|
3254
|
+
findings: ReviewFinding[];
|
|
3255
|
+
/** Time taken in milliseconds */
|
|
3256
|
+
durationMs: number;
|
|
3257
|
+
}
|
|
3258
|
+
/**
|
|
3259
|
+
* Options for the fan-out orchestrator.
|
|
3260
|
+
*/
|
|
3261
|
+
interface FanOutOptions {
|
|
3262
|
+
/** Context bundles from Phase 3 (one per domain) */
|
|
3263
|
+
bundles: ContextBundle[];
|
|
3264
|
+
}
|
|
3265
|
+
/**
|
|
3266
|
+
* Assessment decision — determines exit code and PR review action.
|
|
3267
|
+
*/
|
|
3268
|
+
type ReviewAssessment = 'approve' | 'comment' | 'request-changes';
|
|
3269
|
+
/**
|
|
3270
|
+
* A strength identified during review (positive feedback).
|
|
3271
|
+
*/
|
|
3272
|
+
interface ReviewStrength {
|
|
3273
|
+
/** File path (project-relative), or null for project-wide strengths */
|
|
3274
|
+
file: string | null;
|
|
3275
|
+
/** One-line description of what's done well */
|
|
3276
|
+
description: string;
|
|
3277
|
+
}
|
|
3278
|
+
/**
|
|
3279
|
+
* Options for formatting review output.
|
|
3280
|
+
*/
|
|
3281
|
+
interface ReviewOutputOptions {
|
|
3282
|
+
/** Deduplicated findings from Phase 6 */
|
|
3283
|
+
findings: ReviewFinding[];
|
|
3284
|
+
/** Strengths identified during review */
|
|
3285
|
+
strengths: ReviewStrength[];
|
|
3286
|
+
/** PR number (required for GitHub comments) */
|
|
3287
|
+
prNumber?: number;
|
|
3288
|
+
/** Repository in owner/repo format (required for GitHub comments) */
|
|
3289
|
+
repo?: string;
|
|
3290
|
+
}
|
|
3291
|
+
/**
|
|
3292
|
+
* A formatted GitHub inline comment ready for posting.
|
|
3293
|
+
*/
|
|
3294
|
+
interface GitHubInlineComment {
|
|
3295
|
+
/** File path (project-relative) */
|
|
3296
|
+
path: string;
|
|
3297
|
+
/** Line number for the comment */
|
|
3298
|
+
line: number;
|
|
3299
|
+
/** Side of the diff ('RIGHT' for additions) */
|
|
3300
|
+
side: 'RIGHT';
|
|
3301
|
+
/** Comment body (markdown) */
|
|
3302
|
+
body: string;
|
|
3303
|
+
}
|
|
3304
|
+
/**
|
|
3305
|
+
* Information about a prior review on this PR.
|
|
3306
|
+
*/
|
|
3307
|
+
interface PriorReview {
|
|
3308
|
+
/** The head commit SHA that was reviewed */
|
|
3309
|
+
headSha: string;
|
|
3310
|
+
/** ISO timestamp of when the review was submitted */
|
|
3311
|
+
reviewedAt: string;
|
|
3312
|
+
}
|
|
3313
|
+
/**
|
|
3314
|
+
* PR metadata used by the eligibility gate.
|
|
3315
|
+
* This is a pure data object — the caller is responsible for fetching
|
|
3316
|
+
* this data from GitHub (via `gh` CLI, GitHub MCP, or mock).
|
|
3317
|
+
*/
|
|
3318
|
+
interface PrMetadata {
|
|
3319
|
+
/** PR state: open, closed, or merged */
|
|
3320
|
+
state: 'open' | 'closed' | 'merged';
|
|
3321
|
+
/** Whether the PR is marked as draft */
|
|
3322
|
+
isDraft: boolean;
|
|
3323
|
+
/** List of changed file paths (project-relative) */
|
|
3324
|
+
changedFiles: string[];
|
|
3325
|
+
/** The HEAD commit SHA of the PR branch */
|
|
3326
|
+
headSha: string;
|
|
3327
|
+
/** Prior reviews submitted on this PR */
|
|
3328
|
+
priorReviews: PriorReview[];
|
|
3329
|
+
}
|
|
3330
|
+
/**
|
|
3331
|
+
* Result of the eligibility gate check.
|
|
3332
|
+
*/
|
|
3333
|
+
interface EligibilityResult {
|
|
3334
|
+
/** Whether the PR is eligible for review */
|
|
3335
|
+
eligible: boolean;
|
|
3336
|
+
/** Human-readable reason when not eligible */
|
|
3337
|
+
reason?: string;
|
|
3338
|
+
}
|
|
3339
|
+
/**
|
|
3340
|
+
* Configuration mapping abstract model tiers to concrete model identifiers.
|
|
3341
|
+
* All tiers are optional — unmapped tiers resolve to undefined (use current model).
|
|
3342
|
+
*
|
|
3343
|
+
* Example config:
|
|
3344
|
+
* { fast: "haiku", standard: "sonnet", strong: "opus" }
|
|
3345
|
+
* { fast: "gpt-4o-mini", standard: "gpt-4o", strong: "o1" }
|
|
3346
|
+
*/
|
|
3347
|
+
interface ModelTierConfig {
|
|
3348
|
+
fast?: string;
|
|
3349
|
+
standard?: string;
|
|
3350
|
+
strong?: string;
|
|
3351
|
+
}
|
|
3352
|
+
/**
|
|
3353
|
+
* Known provider identifiers for default tier resolution.
|
|
3354
|
+
*/
|
|
3355
|
+
type ModelProvider = 'claude' | 'openai' | 'gemini';
|
|
3356
|
+
/**
|
|
3357
|
+
* Default model tier mappings per provider.
|
|
3358
|
+
* Used as fallback when config does not specify a tier.
|
|
3359
|
+
*/
|
|
3360
|
+
type ProviderDefaults = Record<ModelProvider, ModelTierConfig>;
|
|
3361
|
+
/**
|
|
3362
|
+
* Flags controlling pipeline behavior, derived from CLI/MCP input.
|
|
3363
|
+
*/
|
|
3364
|
+
interface PipelineFlags {
|
|
3365
|
+
/** Post inline comments to GitHub PR */
|
|
3366
|
+
comment: boolean;
|
|
3367
|
+
/** Enable eligibility gate (CI mode) */
|
|
3368
|
+
ci: boolean;
|
|
3369
|
+
/** Add threat modeling pass to security agent */
|
|
3370
|
+
deep: boolean;
|
|
3371
|
+
/** Skip mechanical checks */
|
|
3372
|
+
noMechanical: boolean;
|
|
3373
|
+
}
|
|
3374
|
+
/**
|
|
3375
|
+
* Mutable context object threaded through all 7 pipeline phases.
|
|
3376
|
+
* Each phase reads from upstream fields and writes to its own fields.
|
|
3377
|
+
*/
|
|
3378
|
+
interface PipelineContext {
|
|
3379
|
+
/** Project root directory */
|
|
3380
|
+
projectRoot: string;
|
|
3381
|
+
/** Diff information from git */
|
|
3382
|
+
diff: DiffInfo;
|
|
3383
|
+
/** Most recent commit message */
|
|
3384
|
+
commitMessage: string;
|
|
3385
|
+
/** Pipeline flags from CLI/MCP */
|
|
3386
|
+
flags: PipelineFlags;
|
|
3387
|
+
/** Model tier config (from harness.config.json review.model_tiers) */
|
|
3388
|
+
modelTierConfig?: ModelTierConfig;
|
|
3389
|
+
/** Graph adapter (optional — enhances context and validation) */
|
|
3390
|
+
graph?: GraphAdapter;
|
|
3391
|
+
/** PR metadata for gate phase and GitHub comments */
|
|
3392
|
+
prMetadata?: PrMetadata;
|
|
3393
|
+
/** Convention file paths for compliance context */
|
|
3394
|
+
conventionFiles?: string[];
|
|
3395
|
+
/** Output from `harness check-deps` for architecture fallback */
|
|
3396
|
+
checkDepsOutput?: string;
|
|
3397
|
+
/** Repository in owner/repo format (for --comment) */
|
|
3398
|
+
repo?: string;
|
|
3399
|
+
/** Whether the pipeline was skipped by the gate */
|
|
3400
|
+
skipped: boolean;
|
|
3401
|
+
/** Reason for skipping (when skipped is true) */
|
|
3402
|
+
skipReason?: string;
|
|
3403
|
+
/** Mechanical check results */
|
|
3404
|
+
mechanicalResult?: MechanicalCheckResult;
|
|
3405
|
+
/** Exclusion set built from mechanical findings */
|
|
3406
|
+
exclusionSet?: ExclusionSet;
|
|
3407
|
+
/** Context bundles per review domain */
|
|
3408
|
+
contextBundles?: ContextBundle[];
|
|
3409
|
+
/** Raw findings from all agents */
|
|
3410
|
+
rawFindings?: ReviewFinding[];
|
|
3411
|
+
/** Findings after mechanical exclusion and reachability validation */
|
|
3412
|
+
validatedFindings?: ReviewFinding[];
|
|
3413
|
+
/** Final deduplicated finding list */
|
|
3414
|
+
dedupedFindings?: ReviewFinding[];
|
|
3415
|
+
/** Strengths identified during review */
|
|
3416
|
+
strengths: ReviewStrength[];
|
|
3417
|
+
/** Final assessment */
|
|
3418
|
+
assessment?: ReviewAssessment;
|
|
3419
|
+
/** Formatted terminal output */
|
|
3420
|
+
terminalOutput?: string;
|
|
3421
|
+
/** GitHub inline comments (when --comment is set) */
|
|
3422
|
+
githubComments?: GitHubInlineComment[];
|
|
3423
|
+
/** Process exit code (0 = approve/comment, 1 = request-changes) */
|
|
3424
|
+
exitCode: number;
|
|
3425
|
+
}
|
|
3426
|
+
/**
|
|
3427
|
+
* Immutable result returned from `runPipeline()`.
|
|
3428
|
+
*/
|
|
3429
|
+
interface ReviewPipelineResult {
|
|
3430
|
+
/** Whether the pipeline was skipped by the eligibility gate */
|
|
3431
|
+
skipped: boolean;
|
|
3432
|
+
/** Reason for skipping */
|
|
3433
|
+
skipReason?: string;
|
|
3434
|
+
/** Whether the pipeline stopped due to mechanical failures */
|
|
3435
|
+
stoppedByMechanical: boolean;
|
|
3436
|
+
/** Final assessment (undefined if skipped or stopped) */
|
|
3437
|
+
assessment?: ReviewAssessment;
|
|
3438
|
+
/** Deduplicated findings */
|
|
3439
|
+
findings: ReviewFinding[];
|
|
3440
|
+
/** Strengths identified */
|
|
3441
|
+
strengths: ReviewStrength[];
|
|
3442
|
+
/** Formatted terminal output */
|
|
3443
|
+
terminalOutput: string;
|
|
3444
|
+
/** GitHub inline comments (empty if --comment not set) */
|
|
3445
|
+
githubComments: GitHubInlineComment[];
|
|
3446
|
+
/** Process exit code */
|
|
3447
|
+
exitCode: number;
|
|
3448
|
+
/** Mechanical check result (for reporting) */
|
|
3449
|
+
mechanicalResult?: MechanicalCheckResult;
|
|
3450
|
+
}
|
|
3451
|
+
|
|
3452
|
+
/**
|
|
3453
|
+
* Run all mechanical checks and produce the exclusion set inputs.
|
|
3454
|
+
*
|
|
3455
|
+
* Mechanical checks that fail with errors (validate, check-deps) set `stopPipeline: true`.
|
|
3456
|
+
* Checks that produce warnings (check-docs, security-scan) record findings but do NOT stop the pipeline.
|
|
3457
|
+
*/
|
|
3458
|
+
declare function runMechanicalChecks(options: MechanicalCheckOptions): Promise<Result<MechanicalCheckResult, Error>>;
|
|
3459
|
+
|
|
3460
|
+
/**
|
|
3461
|
+
* Detect the change type from a commit message and diff information.
|
|
3462
|
+
*
|
|
3463
|
+
* Detection priority:
|
|
3464
|
+
* 1. Conventional commit prefix in commit message
|
|
3465
|
+
* 2. Diff pattern heuristics (new files, test files, docs-only)
|
|
3466
|
+
* 3. Default to 'feature' (most thorough review)
|
|
3467
|
+
*/
|
|
3468
|
+
declare function detectChangeType(commitMessage: string, diff: DiffInfo): ChangeType;
|
|
3469
|
+
|
|
3470
|
+
/**
|
|
3471
|
+
* Assemble scoped context bundles for each review domain.
|
|
3472
|
+
*
|
|
3473
|
+
* Returns one ContextBundle per domain. Each bundle contains:
|
|
3474
|
+
* - The changed files with their content
|
|
3475
|
+
* - Domain-specific context files (imports, tests, conventions, etc.)
|
|
3476
|
+
* - Recent commit history
|
|
3477
|
+
* - Change type and context ratio metadata
|
|
3478
|
+
*/
|
|
3479
|
+
declare function scopeContext(options: ContextScopeOptions): Promise<ContextBundle[]>;
|
|
3480
|
+
|
|
3481
|
+
/**
|
|
3482
|
+
* Descriptor for the compliance review agent.
|
|
3483
|
+
*/
|
|
3484
|
+
declare const COMPLIANCE_DESCRIPTOR: ReviewAgentDescriptor;
|
|
3485
|
+
/**
|
|
3486
|
+
* Run the compliance review agent.
|
|
3487
|
+
*
|
|
3488
|
+
* Analyzes the context bundle for convention adherence, spec alignment,
|
|
3489
|
+
* and documentation completeness. Produces ReviewFinding[] with domain 'compliance'.
|
|
3490
|
+
*
|
|
3491
|
+
* This function performs static/heuristic analysis. The actual LLM invocation
|
|
3492
|
+
* for deeper compliance review happens at the orchestration layer (MCP/CLI).
|
|
3493
|
+
*/
|
|
3494
|
+
declare function runComplianceAgent(bundle: ContextBundle): ReviewFinding[];
|
|
3495
|
+
|
|
3496
|
+
declare const BUG_DETECTION_DESCRIPTOR: ReviewAgentDescriptor;
|
|
3497
|
+
/**
|
|
3498
|
+
* Run the bug detection review agent.
|
|
3499
|
+
*
|
|
3500
|
+
* Analyzes the context bundle for logic errors, edge cases, error handling issues,
|
|
3501
|
+
* and test coverage gaps. Produces ReviewFinding[] with domain 'bug'.
|
|
3502
|
+
*/
|
|
3503
|
+
declare function runBugDetectionAgent(bundle: ContextBundle): ReviewFinding[];
|
|
3504
|
+
|
|
3505
|
+
declare const SECURITY_DESCRIPTOR: ReviewAgentDescriptor;
|
|
3506
|
+
/**
|
|
3507
|
+
* Run the security review agent.
|
|
3508
|
+
*
|
|
3509
|
+
* Analyzes the context bundle for security vulnerabilities using pattern-based
|
|
3510
|
+
* heuristics. Produces ReviewFinding[] with domain 'security'.
|
|
3511
|
+
*/
|
|
3512
|
+
declare function runSecurityAgent(bundle: ContextBundle): ReviewFinding[];
|
|
3513
|
+
|
|
3514
|
+
declare const ARCHITECTURE_DESCRIPTOR: ReviewAgentDescriptor;
|
|
3515
|
+
/**
|
|
3516
|
+
* Run the architecture review agent.
|
|
3517
|
+
*
|
|
3518
|
+
* Analyzes the context bundle for architectural violations, dependency direction,
|
|
3519
|
+
* and design pattern compliance. Produces ReviewFinding[] with domain 'architecture'.
|
|
3520
|
+
*/
|
|
3521
|
+
declare function runArchitectureAgent(bundle: ContextBundle): ReviewFinding[];
|
|
3522
|
+
|
|
3523
|
+
/**
|
|
3524
|
+
* All agent descriptors indexed by domain.
|
|
3525
|
+
* Used by the fan-out orchestrator to dispatch agents and by output formatting
|
|
3526
|
+
* to display agent metadata.
|
|
3527
|
+
*/
|
|
3528
|
+
declare const AGENT_DESCRIPTORS: Record<ReviewDomain, ReviewAgentDescriptor>;
|
|
3529
|
+
|
|
3530
|
+
/**
|
|
3531
|
+
* Fan out review to all agents in parallel.
|
|
3532
|
+
*
|
|
3533
|
+
* Dispatches one agent per context bundle (each bundle targets a specific domain).
|
|
3534
|
+
* All agents run concurrently via Promise.all.
|
|
3535
|
+
*
|
|
3536
|
+
* Currently dispatches synchronous heuristic agents. Parallelism becomes
|
|
3537
|
+
* meaningful when agents perform async LLM calls (Phase 8 model tiering).
|
|
3538
|
+
*
|
|
3539
|
+
* Returns an AgentReviewResult per domain, each containing the findings
|
|
3540
|
+
* and timing information.
|
|
3541
|
+
*/
|
|
3542
|
+
declare function fanOutReview(options: FanOutOptions): Promise<AgentReviewResult[]>;
|
|
3543
|
+
|
|
3544
|
+
/**
|
|
3545
|
+
* Options for the validation phase.
|
|
3546
|
+
*/
|
|
3547
|
+
interface ValidateFindingsOptions {
|
|
3548
|
+
/** All findings from Phase 4 fan-out */
|
|
3549
|
+
findings: ReviewFinding[];
|
|
3550
|
+
/** ExclusionSet built from mechanical findings in Phase 2 */
|
|
3551
|
+
exclusionSet: ExclusionSet;
|
|
3552
|
+
/** Graph adapter (optional — falls back to import-chain heuristic when absent) */
|
|
3553
|
+
graph?: GraphAdapter;
|
|
3554
|
+
/** Project root for path normalization */
|
|
3555
|
+
projectRoot: string;
|
|
3556
|
+
/** Changed file contents for import-chain heuristic (file path -> content) */
|
|
3557
|
+
fileContents?: Map<string, string>;
|
|
3558
|
+
}
|
|
3559
|
+
/**
|
|
3560
|
+
* Validate Phase 4 findings against mechanical exclusion, graph reachability,
|
|
3561
|
+
* and import-chain heuristic fallback.
|
|
3562
|
+
*
|
|
3563
|
+
* 1. Mechanical exclusion: discard findings that overlap with ExclusionSet
|
|
3564
|
+
* 2. Graph reachability (if graph provided): verify cross-file claims, discard unreachable
|
|
3565
|
+
* 3. Import-chain heuristic (no graph): downgrade findings with unvalidated cross-file claims
|
|
3566
|
+
*/
|
|
3567
|
+
declare function validateFindings(options: ValidateFindingsOptions): Promise<ReviewFinding[]>;
|
|
3568
|
+
|
|
3569
|
+
/**
|
|
3570
|
+
* Options for the deduplication phase.
|
|
3571
|
+
*/
|
|
3572
|
+
interface DeduplicateFindingsOptions {
|
|
3573
|
+
/** Validated findings from Phase 5 */
|
|
3574
|
+
findings: ReviewFinding[];
|
|
3575
|
+
/** Maximum line gap to consider findings as overlapping (default: 3) */
|
|
3576
|
+
lineGap?: number;
|
|
3577
|
+
}
|
|
3578
|
+
/**
|
|
3579
|
+
* Deduplicate and merge overlapping findings.
|
|
3580
|
+
*
|
|
3581
|
+
* Groups findings by file, then merges findings with overlapping line ranges
|
|
3582
|
+
* (within `lineGap` lines of each other). Merged findings keep the highest
|
|
3583
|
+
* severity, combine evidence, preserve the strongest rationale, and note
|
|
3584
|
+
* all contributing domains in the title.
|
|
3585
|
+
*/
|
|
3586
|
+
declare function deduplicateFindings(options: DeduplicateFindingsOptions): ReviewFinding[];
|
|
3587
|
+
|
|
3588
|
+
/**
|
|
3589
|
+
* Phase 1: Eligibility Gate
|
|
3590
|
+
*
|
|
3591
|
+
* Pure function that checks whether a PR should be reviewed.
|
|
3592
|
+
* In CI mode (`ciMode: true`), checks PR state, draft status,
|
|
3593
|
+
* trivial changes, and prior reviews. When `ciMode` is false
|
|
3594
|
+
* (manual invocation), always returns eligible.
|
|
3595
|
+
*
|
|
3596
|
+
* @param pr - PR metadata (state, draft status, files, commit range, prior reviews)
|
|
3597
|
+
* @param ciMode - Whether the review was invoked with --ci flag
|
|
3598
|
+
* @returns Eligibility result with optional skip reason
|
|
3599
|
+
*/
|
|
3600
|
+
declare function checkEligibility(pr: PrMetadata, ciMode: boolean): EligibilityResult;
|
|
3601
|
+
|
|
3602
|
+
/**
|
|
3603
|
+
* Sensible default model tier mappings per known provider.
|
|
3604
|
+
* Used as fallback when project config does not map a tier.
|
|
3605
|
+
*/
|
|
3606
|
+
declare const DEFAULT_PROVIDER_TIERS: ProviderDefaults;
|
|
3607
|
+
/**
|
|
3608
|
+
* Resolve an abstract model tier to a concrete model identifier.
|
|
3609
|
+
*
|
|
3610
|
+
* Resolution order:
|
|
3611
|
+
* 1. If config has a mapping for the tier, return it.
|
|
3612
|
+
* 2. If a provider is specified and has a default for the tier, return the default.
|
|
3613
|
+
* 3. Return undefined (meaning "use whatever model the user is currently running").
|
|
3614
|
+
*
|
|
3615
|
+
* @param tier - Abstract model tier ('fast', 'standard', 'strong')
|
|
3616
|
+
* @param config - Optional model tier config from harness.config.json review.model_tiers
|
|
3617
|
+
* @param provider - Optional known provider for default fallback
|
|
3618
|
+
* @returns Concrete model identifier string, or undefined if no mapping found
|
|
3619
|
+
*/
|
|
3620
|
+
declare function resolveModelTier(tier: ModelTier, config?: ModelTierConfig, provider?: ModelProvider): string | undefined;
|
|
3621
|
+
|
|
3622
|
+
/**
|
|
3623
|
+
* Determine the overall assessment based on the highest severity finding.
|
|
3624
|
+
*
|
|
3625
|
+
* - No findings or all suggestions → approve
|
|
3626
|
+
* - Any important (but no critical) → comment
|
|
3627
|
+
* - Any critical → request-changes
|
|
3628
|
+
*/
|
|
3629
|
+
declare function determineAssessment(findings: ReviewFinding[]): ReviewAssessment;
|
|
3630
|
+
/**
|
|
3631
|
+
* Map an assessment to a process exit code.
|
|
3632
|
+
* - approve / comment → 0
|
|
3633
|
+
* - request-changes → 1
|
|
3634
|
+
*/
|
|
3635
|
+
declare function getExitCode(assessment: ReviewAssessment): number;
|
|
3636
|
+
|
|
3637
|
+
/**
|
|
3638
|
+
* Format a single finding as a terminal text block.
|
|
3639
|
+
*/
|
|
3640
|
+
declare function formatFindingBlock(finding: ReviewFinding): string;
|
|
3641
|
+
/**
|
|
3642
|
+
* Format the full terminal output in Strengths / Issues / Assessment format.
|
|
3643
|
+
*/
|
|
3644
|
+
declare function formatTerminalOutput(options: {
|
|
3645
|
+
findings: ReviewFinding[];
|
|
3646
|
+
strengths: ReviewStrength[];
|
|
3647
|
+
}): string;
|
|
3648
|
+
|
|
3649
|
+
/**
|
|
3650
|
+
* Check if a suggestion is "small" (under 10 lines) and suitable
|
|
3651
|
+
* for a committable GitHub suggestion block.
|
|
3652
|
+
*/
|
|
3653
|
+
declare function isSmallSuggestion(suggestion: string | undefined): boolean;
|
|
3654
|
+
/**
|
|
3655
|
+
* Format a single finding as a GitHub inline comment.
|
|
3656
|
+
*
|
|
3657
|
+
* - Small suggestions (< 10 lines): committable suggestion block
|
|
3658
|
+
* - Large suggestions or no suggestion: description + rationale
|
|
3659
|
+
*/
|
|
3660
|
+
declare function formatGitHubComment(finding: ReviewFinding): GitHubInlineComment;
|
|
3661
|
+
/**
|
|
3662
|
+
* Format the review summary for a GitHub PR review body.
|
|
3663
|
+
* Uses markdown formatting (## headers, bullet lists).
|
|
3664
|
+
*/
|
|
3665
|
+
declare function formatGitHubSummary(options: {
|
|
3666
|
+
findings: ReviewFinding[];
|
|
3667
|
+
strengths: ReviewStrength[];
|
|
3668
|
+
}): string;
|
|
3669
|
+
|
|
3670
|
+
/**
|
|
3671
|
+
* Options for invoking the pipeline.
|
|
3672
|
+
*/
|
|
3673
|
+
interface RunPipelineOptions {
|
|
3674
|
+
projectRoot: string;
|
|
3675
|
+
diff: DiffInfo;
|
|
3676
|
+
commitMessage: string;
|
|
3677
|
+
flags: PipelineFlags;
|
|
3678
|
+
modelTierConfig?: ModelTierConfig;
|
|
3679
|
+
graph?: GraphAdapter;
|
|
3680
|
+
prMetadata?: PrMetadata;
|
|
3681
|
+
conventionFiles?: string[];
|
|
3682
|
+
checkDepsOutput?: string;
|
|
3683
|
+
repo?: string;
|
|
3684
|
+
/** Harness config object for mechanical checks */
|
|
3685
|
+
config?: Record<string, unknown>;
|
|
3686
|
+
/** Pre-gathered commit history entries */
|
|
3687
|
+
commitHistory?: CommitHistoryEntry[];
|
|
3688
|
+
}
|
|
3689
|
+
/**
|
|
3690
|
+
* Run the full 7-phase code review pipeline.
|
|
3691
|
+
*
|
|
3692
|
+
* Phase 1: GATE (CI mode only)
|
|
3693
|
+
* Phase 2: MECHANICAL (skipped with --no-mechanical)
|
|
3694
|
+
* Phase 3: CONTEXT
|
|
3695
|
+
* Phase 4: FAN-OUT (parallel agents)
|
|
3696
|
+
* Phase 5: VALIDATE
|
|
3697
|
+
* Phase 6: DEDUP+MERGE
|
|
3698
|
+
* Phase 7: OUTPUT
|
|
3699
|
+
*/
|
|
3700
|
+
declare function runReviewPipeline(options: RunPipelineOptions): Promise<ReviewPipelineResult>;
|
|
3701
|
+
|
|
3702
|
+
/**
|
|
3703
|
+
* Parse a roadmap markdown string into a structured Roadmap object.
|
|
3704
|
+
* Returns Result<Roadmap> — Err on invalid input.
|
|
3705
|
+
*/
|
|
3706
|
+
declare function parseRoadmap(markdown: string): Result<Roadmap>;
|
|
3707
|
+
|
|
3708
|
+
/**
|
|
3709
|
+
* Serialize a Roadmap object to markdown string.
|
|
3710
|
+
* Produces output that round-trips with parseRoadmap.
|
|
3711
|
+
*/
|
|
3712
|
+
declare function serializeRoadmap(roadmap: Roadmap): string;
|
|
3713
|
+
|
|
3714
|
+
/**
|
|
3715
|
+
* A proposed status change from the sync engine.
|
|
3716
|
+
*/
|
|
3717
|
+
interface SyncChange {
|
|
3718
|
+
/** Feature name */
|
|
3719
|
+
feature: string;
|
|
3720
|
+
/** Current status in the roadmap */
|
|
3721
|
+
from: FeatureStatus;
|
|
3722
|
+
/** Proposed new status based on execution state */
|
|
3723
|
+
to: FeatureStatus;
|
|
3724
|
+
}
|
|
3725
|
+
interface SyncOptions {
|
|
3726
|
+
/** Path to project root */
|
|
3727
|
+
projectPath: string;
|
|
3728
|
+
/** Parsed roadmap object */
|
|
3729
|
+
roadmap: Roadmap;
|
|
3730
|
+
/** Override human-always-wins rule */
|
|
3731
|
+
forceSync?: boolean;
|
|
3732
|
+
}
|
|
3733
|
+
/**
|
|
3734
|
+
* Scan execution state files and infer status changes for roadmap features.
|
|
3735
|
+
* Returns proposed changes without modifying the roadmap.
|
|
3736
|
+
*/
|
|
3737
|
+
declare function syncRoadmap(options: SyncOptions): Result<SyncChange[]>;
|
|
3738
|
+
|
|
3739
|
+
declare const InteractionTypeSchema: z.ZodEnum<["question", "confirmation", "transition"]>;
|
|
3740
|
+
declare const QuestionSchema: z.ZodObject<{
|
|
3741
|
+
text: z.ZodString;
|
|
3742
|
+
options: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
3743
|
+
default: z.ZodOptional<z.ZodString>;
|
|
3744
|
+
}, "strip", z.ZodTypeAny, {
|
|
3745
|
+
text: string;
|
|
3746
|
+
default?: string | undefined;
|
|
3747
|
+
options?: string[] | undefined;
|
|
3748
|
+
}, {
|
|
3749
|
+
text: string;
|
|
3750
|
+
default?: string | undefined;
|
|
3751
|
+
options?: string[] | undefined;
|
|
3752
|
+
}>;
|
|
3753
|
+
declare const ConfirmationSchema: z.ZodObject<{
|
|
3754
|
+
text: z.ZodString;
|
|
3755
|
+
context: z.ZodString;
|
|
3756
|
+
}, "strip", z.ZodTypeAny, {
|
|
3757
|
+
text: string;
|
|
3758
|
+
context: string;
|
|
3759
|
+
}, {
|
|
3760
|
+
text: string;
|
|
3761
|
+
context: string;
|
|
3762
|
+
}>;
|
|
3763
|
+
declare const TransitionSchema: z.ZodObject<{
|
|
3764
|
+
completedPhase: z.ZodString;
|
|
3765
|
+
suggestedNext: z.ZodString;
|
|
3766
|
+
reason: z.ZodString;
|
|
3767
|
+
artifacts: z.ZodArray<z.ZodString, "many">;
|
|
3768
|
+
requiresConfirmation: z.ZodBoolean;
|
|
3769
|
+
summary: z.ZodString;
|
|
3770
|
+
}, "strip", z.ZodTypeAny, {
|
|
3771
|
+
reason: string;
|
|
3772
|
+
summary: string;
|
|
3773
|
+
completedPhase: string;
|
|
3774
|
+
suggestedNext: string;
|
|
3775
|
+
artifacts: string[];
|
|
3776
|
+
requiresConfirmation: boolean;
|
|
3777
|
+
}, {
|
|
3778
|
+
reason: string;
|
|
3779
|
+
summary: string;
|
|
3780
|
+
completedPhase: string;
|
|
3781
|
+
suggestedNext: string;
|
|
3782
|
+
artifacts: string[];
|
|
3783
|
+
requiresConfirmation: boolean;
|
|
3784
|
+
}>;
|
|
3785
|
+
declare const EmitInteractionInputSchema: z.ZodObject<{
|
|
3786
|
+
path: z.ZodString;
|
|
3787
|
+
type: z.ZodEnum<["question", "confirmation", "transition"]>;
|
|
3788
|
+
stream: z.ZodOptional<z.ZodString>;
|
|
3789
|
+
question: z.ZodOptional<z.ZodObject<{
|
|
3790
|
+
text: z.ZodString;
|
|
3791
|
+
options: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
3792
|
+
default: z.ZodOptional<z.ZodString>;
|
|
3793
|
+
}, "strip", z.ZodTypeAny, {
|
|
3794
|
+
text: string;
|
|
3795
|
+
default?: string | undefined;
|
|
3796
|
+
options?: string[] | undefined;
|
|
3797
|
+
}, {
|
|
3798
|
+
text: string;
|
|
3799
|
+
default?: string | undefined;
|
|
3800
|
+
options?: string[] | undefined;
|
|
3801
|
+
}>>;
|
|
3802
|
+
confirmation: z.ZodOptional<z.ZodObject<{
|
|
3803
|
+
text: z.ZodString;
|
|
3804
|
+
context: z.ZodString;
|
|
3805
|
+
}, "strip", z.ZodTypeAny, {
|
|
3806
|
+
text: string;
|
|
3807
|
+
context: string;
|
|
3808
|
+
}, {
|
|
3809
|
+
text: string;
|
|
3810
|
+
context: string;
|
|
3811
|
+
}>>;
|
|
3812
|
+
transition: z.ZodOptional<z.ZodObject<{
|
|
3813
|
+
completedPhase: z.ZodString;
|
|
3814
|
+
suggestedNext: z.ZodString;
|
|
3815
|
+
reason: z.ZodString;
|
|
3816
|
+
artifacts: z.ZodArray<z.ZodString, "many">;
|
|
3817
|
+
requiresConfirmation: z.ZodBoolean;
|
|
3818
|
+
summary: z.ZodString;
|
|
3819
|
+
}, "strip", z.ZodTypeAny, {
|
|
3820
|
+
reason: string;
|
|
3821
|
+
summary: string;
|
|
3822
|
+
completedPhase: string;
|
|
3823
|
+
suggestedNext: string;
|
|
3824
|
+
artifacts: string[];
|
|
3825
|
+
requiresConfirmation: boolean;
|
|
3826
|
+
}, {
|
|
3827
|
+
reason: string;
|
|
3828
|
+
summary: string;
|
|
3829
|
+
completedPhase: string;
|
|
3830
|
+
suggestedNext: string;
|
|
3831
|
+
artifacts: string[];
|
|
3832
|
+
requiresConfirmation: boolean;
|
|
3833
|
+
}>>;
|
|
3834
|
+
}, "strip", z.ZodTypeAny, {
|
|
3835
|
+
path: string;
|
|
3836
|
+
type: "question" | "confirmation" | "transition";
|
|
3837
|
+
stream?: string | undefined;
|
|
3838
|
+
question?: {
|
|
3839
|
+
text: string;
|
|
3840
|
+
default?: string | undefined;
|
|
3841
|
+
options?: string[] | undefined;
|
|
3842
|
+
} | undefined;
|
|
3843
|
+
confirmation?: {
|
|
3844
|
+
text: string;
|
|
3845
|
+
context: string;
|
|
3846
|
+
} | undefined;
|
|
3847
|
+
transition?: {
|
|
3848
|
+
reason: string;
|
|
3849
|
+
summary: string;
|
|
3850
|
+
completedPhase: string;
|
|
3851
|
+
suggestedNext: string;
|
|
3852
|
+
artifacts: string[];
|
|
3853
|
+
requiresConfirmation: boolean;
|
|
3854
|
+
} | undefined;
|
|
3855
|
+
}, {
|
|
3856
|
+
path: string;
|
|
3857
|
+
type: "question" | "confirmation" | "transition";
|
|
3858
|
+
stream?: string | undefined;
|
|
3859
|
+
question?: {
|
|
3860
|
+
text: string;
|
|
3861
|
+
default?: string | undefined;
|
|
3862
|
+
options?: string[] | undefined;
|
|
3863
|
+
} | undefined;
|
|
3864
|
+
confirmation?: {
|
|
3865
|
+
text: string;
|
|
3866
|
+
context: string;
|
|
3867
|
+
} | undefined;
|
|
3868
|
+
transition?: {
|
|
3869
|
+
reason: string;
|
|
3870
|
+
summary: string;
|
|
3871
|
+
completedPhase: string;
|
|
3872
|
+
suggestedNext: string;
|
|
3873
|
+
artifacts: string[];
|
|
3874
|
+
requiresConfirmation: boolean;
|
|
3875
|
+
} | undefined;
|
|
3876
|
+
}>;
|
|
3877
|
+
type InteractionType = z.infer<typeof InteractionTypeSchema>;
|
|
3878
|
+
type Question = z.infer<typeof QuestionSchema>;
|
|
3879
|
+
type Confirmation = z.infer<typeof ConfirmationSchema>;
|
|
3880
|
+
type Transition = z.infer<typeof TransitionSchema>;
|
|
3881
|
+
type EmitInteractionInput = z.infer<typeof EmitInteractionInputSchema>;
|
|
3882
|
+
|
|
3883
|
+
interface UpdateCheckState {
|
|
3884
|
+
lastCheckTime: number;
|
|
3885
|
+
latestVersion: string | null;
|
|
3886
|
+
currentVersion: string;
|
|
3887
|
+
}
|
|
3888
|
+
/**
|
|
3889
|
+
* Returns false if the HARNESS_NO_UPDATE_CHECK env var is set to "1"
|
|
3890
|
+
* or the configured interval is 0 (disabled).
|
|
3891
|
+
*/
|
|
3892
|
+
declare function isUpdateCheckEnabled(configInterval?: number): boolean;
|
|
3893
|
+
/**
|
|
3894
|
+
* Returns true when enough time has passed since the last check.
|
|
3895
|
+
* If state is null (never checked), returns true.
|
|
3896
|
+
*/
|
|
3897
|
+
declare function shouldRunCheck(state: UpdateCheckState | null, intervalMs: number): boolean;
|
|
3898
|
+
/**
|
|
3899
|
+
* Reads the update check state from ~/.harness/update-check.json.
|
|
3900
|
+
* Returns null if the file is missing, unreadable, or has invalid content.
|
|
3901
|
+
*/
|
|
3902
|
+
declare function readCheckState(): UpdateCheckState | null;
|
|
3903
|
+
/**
|
|
3904
|
+
* Spawns a detached background Node process that:
|
|
3905
|
+
* 1. Queries npm registry for the latest version of @harness-engineering/cli
|
|
3906
|
+
* 2. Writes the result to ~/.harness/update-check.json
|
|
3907
|
+
* 3. Exits silently on any failure
|
|
3908
|
+
*
|
|
3909
|
+
* The parent calls child.unref() so the child does not block process exit.
|
|
3910
|
+
*/
|
|
3911
|
+
declare function spawnBackgroundCheck(currentVersion: string): void;
|
|
3912
|
+
/**
|
|
3913
|
+
* Reads the cached update check state and returns a formatted notification
|
|
3914
|
+
* string if a newer version is available. Returns null otherwise.
|
|
3915
|
+
*
|
|
3916
|
+
* @param currentVersion - The currently running version (e.g. VERSION from index.ts)
|
|
3917
|
+
*/
|
|
3918
|
+
declare function getUpdateNotification(currentVersion: string): string | null;
|
|
3919
|
+
|
|
2262
3920
|
/**
|
|
2263
3921
|
* @harness-engineering/core
|
|
2264
3922
|
*
|
|
2265
3923
|
* Core library for Harness Engineering toolkit
|
|
2266
3924
|
*/
|
|
2267
3925
|
|
|
2268
|
-
declare const VERSION = "0.
|
|
3926
|
+
declare const VERSION = "0.8.0";
|
|
2269
3927
|
|
|
2270
|
-
export { type AST, type ActionContext, type ActionEvent, type ActionEventHandler, type ActionEventType, type ActionResult, type ActionSink, type ActionTracker, type ActionType, type AgentAction, AgentActionEmitter, type AgentConfig, type AgentExecutor, type AgentMapLink, type AgentMapSection, type AgentMapValidation, type AgentProcess, type AgentType, type AgentsMapConfig, type BaseError, type BoundaryDefinition, type BoundaryValidation, type BoundaryValidator, type BoundaryViolation, type BrokenLink, type ChangedFile, ChecklistBuilder, type CircularDependency, type CircularDepsResult, type CodeBlock, type CodeChanges, type CodePattern, type CodeReference, type CodebaseSnapshot, type CommitFormat, type CommitValidation, type ConfigError, type ConfigPattern, ConsoleSink, type ConstraintError, type ContextError, type ContextFilterResult, type Convention, type CoverageOptions, type CoverageReport, type CustomRule, type CustomRuleResult, DEFAULT_STATE, type DeadCodeConfig, type DeadCodeReport, type DeadExport, type DeadFile, type DeadInternal, type DependencyEdge, type DependencyGraph, type DependencyValidation, type DependencyViolation, type DocumentationDrift, type DocumentationFile, type DocumentationGap, type DriftConfig, type DriftReport, EntropyAnalyzer, type EntropyConfig, EntropyConfigSchema, type EntropyError, type EntropyReport, type ExecutorHealth, type Export, type ExportMap, type FailureEntry, FailureEntrySchema, type FeedbackConfig, type FeedbackError$1 as FeedbackError, type FileCategory, FileSink, type Fix, type FixConfig, type FixResult, type FixType, type ForbiddenPattern, type GateConfig, GateConfigSchema, type GateResult, GateResultSchema, type GenerationSection, type Handoff, HandoffSchema, type HarnessState, HarnessStateSchema, type HealthCheckResult, type Import, type InlineReference, type IntegrityReport, type InternalSymbol, type JSDocComment, type LanguageParser, type Layer, type LayerConfig, type LogEntry, type LogFilter, type Metric, NoOpExecutor, NoOpSink, NoOpTelemetryAdapter, type ParseError, type PatternConfig, PatternConfigSchema, type PatternMatch, type PatternReport, type PatternViolation, type PeerReview, type PeerReviewOptions, type PipelineOptions, type PipelineResult, REQUIRED_SECTIONS, type ReachabilityNode, type ReviewChecklist, type ReviewComment, type ReviewContext, type ReviewItem, type RunCIChecksInput, type SelfReviewConfig, type SkillExecutor, type SourceFile, type Span, type SpanEvent, type StepExecutor, type StructureValidation, type Suggestion, type SuggestionReport, type TelemetryAdapter, type TelemetryHealth, type TimeRange, type TokenBudget, type TokenBudgetOverrides, type Trace, type TurnExecutor, TypeScriptParser, type UnusedImport, VERSION, type ValidationError, type WorkflowPhase, analyzeDiff, appendFailure, appendLearning, applyFixes, archiveFailures, buildDependencyGraph, buildSnapshot, checkDocCoverage, configureFeedback, contextBudget, contextFilter, createBoundaryValidator, createError, createFixes, createParseError, createSelfReview, defineLayer, detectCircularDeps, detectCircularDepsInFiles, detectDeadCode, detectDocDrift, detectPatternViolations, executeWorkflow, extractMarkdownLinks, extractSections, generateAgentsMap, generateSuggestions, getActionEmitter, getFeedbackConfig, getPhaseCategories, loadFailures, loadHandoff, loadRelevantLearnings, loadState, logAgentAction, parseDiff, previewFix, requestMultiplePeerReviews, requestPeerReview, resetFeedbackConfig, resolveFileToLayer, runCIChecks, runMechanicalGate, runMultiTurnPipeline, runPipeline, saveHandoff, saveState, trackAction, validateAgentsMap, validateBoundaries, validateCommitMessage, validateConfig, validateDependencies, validateFileStructure, validateKnowledgeMap, validatePatternConfig };
|
|
3928
|
+
export { AGENT_DESCRIPTORS, ARCHITECTURE_DESCRIPTOR, type AST, type ActionContext, type ActionEvent, type ActionEventHandler, type ActionEventType, type ActionResult, type ActionSink, type ActionTracker, type ActionType, type AgentAction, AgentActionEmitter, type AgentConfig, type AgentExecutor, type AgentMapLink, type AgentMapSection, type AgentMapValidation, type AgentProcess, type AgentReviewResult, type AgentType, type AgentsMapConfig, BUG_DETECTION_DESCRIPTOR, type BaseError, type Baseline, BaselineManager, type BaselinesFile, type BenchmarkResult, type BenchmarkRunOptions, BenchmarkRunner, type BoundaryDefinition, type BoundaryValidation, type BoundaryValidator, type BoundaryViolation, type BrokenLink, COMPLIANCE_DESCRIPTOR, type ChangeType, type ChangedFile, ChecklistBuilder, type CircularDependency, type CircularDepsResult, type CleanupFinding, type CodeBlock, type CodeChanges, type CodePattern, type CodeReference, type CodebaseSnapshot, type CommentedCodeBlock, type CommitFormat, type CommitHistoryEntry, type CommitValidation, type ComplexityConfig, type ComplexityReport, type ComplexityThresholds, type ComplexityViolation, type ConfigError, type ConfigPattern, type Confirmation, ConfirmationSchema, ConsoleSink, type ConstraintError, type ContextBundle, type ContextError, type ContextFile, type ContextFilterResult, type ContextScopeOptions, type Convention, type CouplingConfig, type CouplingReport, type CouplingThresholds, type CouplingViolation, type CoverageOptions, type CoverageReport, type CriticalPathEntry, CriticalPathResolver, type CriticalPathSet, type CustomRule, type CustomRuleResult, DEFAULT_PROVIDER_TIERS, DEFAULT_SECURITY_CONFIG, DEFAULT_STATE, DEFAULT_STREAM_INDEX, type DeadCodeConfig, type DeadCodeReport, type DeadExport, type DeadFile, type DeadInternal, type DeduplicateFindingsOptions, type DependencyEdge, type DependencyGraph, type DependencyValidation, type DependencyViolation, type DiffInfo, type DocumentationDrift, type DocumentationFile, type DocumentationGap, type DriftConfig, type DriftReport, type EligibilityResult, type EmitInteractionInput, EmitInteractionInputSchema, EntropyAnalyzer, type EntropyConfig, EntropyConfigSchema, type EntropyError, type EntropyReport, ExclusionSet, type ExecutorHealth, type Export, type ExportMap, type FailureEntry, FailureEntrySchema, type FanOutOptions, type FeedbackConfig, type FeedbackError$1 as FeedbackError, type FileCategory, FileSink, type FindingSeverity, type Fix, type FixConfig, type FixResult, type FixType, type ForbiddenImportViolation, type ForbiddenPattern, type GateConfig, GateConfigSchema, type GateResult, GateResultSchema, type GenerationSection, type GitHubInlineComment, type GraphAdapter, type GraphComplexityData, type GraphCouplingData, type GraphCoverageData, type GraphCriticalPathData, type GraphDependencyData, type GraphHarnessCheckData, type GraphImpactData, type Handoff, HandoffSchema, type HarnessState, HarnessStateSchema, type HealthCheckResult, type HotspotContext, type Import, type InlineReference, type IntegrityReport, type InteractionType, InteractionTypeSchema, type InternalSymbol, type JSDocComment, type LanguageParser, type Layer, type LayerConfig, type LogEntry, type LogFilter, type MechanicalCheckOptions, type MechanicalCheckResult, type MechanicalCheckStatus, type MechanicalFinding, type Metric, type ModelProvider, type ModelTier, type ModelTierConfig, NoOpExecutor, NoOpSink, NoOpTelemetryAdapter, type OrphanedDep, type ParseError, type PatternConfig, PatternConfigSchema, type PatternMatch, type PatternReport, type PatternViolation, type PeerReview, type PeerReviewOptions, type PipelineContext, type PipelineFlags, type PipelineOptions, type PipelineResult, type PrMetadata, type PriorReview, type ProviderDefaults, type Question, QuestionSchema, REQUIRED_SECTIONS, type ReachabilityNode, RegressionDetector, type RegressionReport, type RegressionResult, type ReviewAgentDescriptor, type ReviewAssessment, type ReviewChecklist, type ReviewComment, type ReviewContext, type ReviewDomain, type ReviewFinding, type ReviewItem, type ReviewOutputOptions, type ReviewPipelineResult, type ReviewStrength, type RuleOverride, RuleRegistry, type RunCIChecksInput, type RunPipelineOptions, SECURITY_DESCRIPTOR, type SafetyLevel, type ScanResult, type SecurityCategory, type SecurityConfidence, type SecurityConfig, SecurityConfigSchema, type SecurityFinding, type SecurityRule, SecurityScanner, type SecuritySeverity, type SelfReviewConfig, type SizeBudgetConfig, type SizeBudgetReport, type SizeBudgetViolation, type SkillExecutor, type SourceFile, type Span, type SpanEvent, type StepExecutor, type StreamIndex, StreamIndexSchema, type StreamInfo, StreamInfoSchema, type StructureValidation, type Suggestion, type SuggestionReport, type SyncChange, type SyncOptions, type TelemetryAdapter, type TelemetryHealth, type TimeRange, type TokenBudget, type TokenBudgetOverrides, type Trace, type Transition, TransitionSchema, type TurnExecutor, TypeScriptParser, type UnusedImport, type UpdateCheckState, VERSION, type ValidateFindingsOptions, type ValidationError, type WorkflowPhase, analyzeDiff, appendFailure, appendLearning, applyFixes, applyHotspotDowngrade, archiveFailures, archiveStream, buildDependencyGraph, buildExclusionSet, buildSnapshot, checkDocCoverage, checkEligibility, classifyFinding, configureFeedback, contextBudget, contextFilter, createBoundaryValidator, createCommentedCodeFixes, createError, createFixes, createForbiddenImportFixes, createOrphanedDepFixes, createParseError, createSelfReview, createStream, cryptoRules, deduplicateCleanupFindings, deduplicateFindings, defineLayer, deserializationRules, detectChangeType, detectCircularDeps, detectCircularDepsInFiles, detectComplexityViolations, detectCouplingViolations, detectDeadCode, detectDocDrift, detectPatternViolations, detectSizeBudgetViolations, detectStack, determineAssessment, executeWorkflow, expressRules, extractMarkdownLinks, extractSections, fanOutReview, formatFindingBlock, formatGitHubComment, formatGitHubSummary, formatTerminalOutput, generateAgentsMap, generateSuggestions, getActionEmitter, getExitCode, getFeedbackConfig, getPhaseCategories, getStreamForBranch, getUpdateNotification, goRules, injectionRules, isSmallSuggestion, isUpdateCheckEnabled, listStreams, loadFailures, loadHandoff, loadRelevantLearnings, loadState, loadStreamIndex, logAgentAction, migrateToStreams, networkRules, nodeRules, parseDiff, parseRoadmap, parseSecurityConfig, parseSize, pathTraversalRules, previewFix, reactRules, readCheckState, requestMultiplePeerReviews, requestPeerReview, resetFeedbackConfig, resolveFileToLayer, resolveModelTier, resolveRuleSeverity, resolveStreamPath, runArchitectureAgent, runBugDetectionAgent, runCIChecks, runComplianceAgent, runMechanicalChecks, runMechanicalGate, runMultiTurnPipeline, runPipeline, runReviewPipeline, runSecurityAgent, saveHandoff, saveState, saveStreamIndex, scopeContext, secretRules, serializeRoadmap, setActiveStream, shouldRunCheck, spawnBackgroundCheck, syncRoadmap, touchStream, trackAction, validateAgentsMap, validateBoundaries, validateCommitMessage, validateConfig, validateDependencies, validateFileStructure, validateFindings, validateKnowledgeMap, validatePatternConfig, xssRules };
|