@aiready/core 0.19.3 → 0.19.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/client.d.mts CHANGED
@@ -10,6 +10,30 @@ declare enum Severity {
10
10
  Info = "info"
11
11
  }
12
12
  declare const SeveritySchema: z.ZodEnum<typeof Severity>;
13
+ /**
14
+ * Canonical Tool Names (IDs)
15
+ * Used everywhere as the single source of truth for tool identification.
16
+ */
17
+ declare enum ToolName {
18
+ PatternDetect = "pattern-detect",
19
+ ContextAnalyzer = "context-analyzer",
20
+ NamingConsistency = "naming-consistency",
21
+ AiSignalClarity = "ai-signal-clarity",
22
+ AgentGrounding = "agent-grounding",
23
+ TestabilityIndex = "testability-index",
24
+ DocDrift = "doc-drift",
25
+ DependencyHealth = "dependency-health",
26
+ ChangeAmplification = "change-amplification",
27
+ CognitiveLoad = "cognitive-load",
28
+ PatternEntropy = "pattern-entropy",
29
+ ConceptCohesion = "concept-cohesion",
30
+ SemanticDistance = "semantic-distance"
31
+ }
32
+ declare const ToolNameSchema: z.ZodEnum<typeof ToolName>;
33
+ /**
34
+ * Friendly labels for UI display
35
+ */
36
+ declare const FRIENDLY_TOOL_NAMES: Record<ToolName, string>;
13
37
  /**
14
38
  * Standardized issue types across all AIReady tools.
15
39
  */
@@ -232,7 +256,7 @@ declare const UnifiedReportSchema: z.ZodObject<{
232
256
  rating: z.ZodString;
233
257
  timestamp: z.ZodString;
234
258
  breakdown: z.ZodArray<z.ZodObject<{
235
- toolName: z.ZodString;
259
+ toolName: z.ZodUnion<readonly [z.ZodEnum<typeof ToolName>, z.ZodString]>;
236
260
  score: z.ZodNumber;
237
261
  }, z.core.$catchall<z.ZodAny>>>;
238
262
  }, z.core.$strip>>;
@@ -753,53 +777,26 @@ interface ScoringConfig {
753
777
  * Default weights for known tools. Weights sum to 100 and read directly as
754
778
  * percentage contribution to the overall score.
755
779
  * New tools get weight of 5 if not specified.
756
- *
757
- * Weight philosophy:
758
- * - pattern-detect (22%): Semantic duplication directly wastes token budget and
759
- * confuses AI with contradictory in-context examples.
760
- * - context-analyzer (19%): Context limits are the primary hard constraint on
761
- * AI effectiveness regardless of model size.
762
- * - consistency (14%): Naming/pattern inconsistency degrades AI intent understanding
763
- * proportionally to codebase size.
764
- * - ai-signal-clarity (11%): Code patterns empirically causing AI to generate
765
- * confidently wrong outputs — critical for agentic use cases.
766
- * - agent-grounding (10%): How well an autonomous agent can navigate unaided —
767
- * increasingly important as agentic workflows grow.
768
- * - testability (10%): AI changes without verifiability create hidden risk.
769
- * - doc-drift (8%): Stale docs actively mislead AI; planned spoke.
770
- * - deps (6%): Dependency health affects AI suggestion accuracy; planned spoke.
771
780
  */
772
781
  declare const DEFAULT_TOOL_WEIGHTS: Record<string, number>;
773
782
  /**
774
- * Tool name normalization map (shorthand -> full name)
783
+ * Tool name normalization map (shorthand -> canonical name)
775
784
  */
776
785
  declare const TOOL_NAME_MAP: Record<string, string>;
777
786
  /**
778
787
  * Model context tiers for context-aware threshold calibration.
779
- *
780
- * As AI models evolve from 32k → 128k → 1M+ context windows, absolute token
781
- * thresholds become meaningless. Use these tiers to adjust context-analyzer
782
- * thresholds relative to the model your team uses.
783
788
  */
784
789
  type ModelContextTier = 'compact' | 'standard' | 'extended' | 'frontier';
785
790
  /**
786
791
  * Context budget thresholds per tier.
787
- * Scores are interpolated between these boundaries.
788
792
  */
789
793
  declare const CONTEXT_TIER_THRESHOLDS: Record<ModelContextTier, {
790
- /** Below this → full score for context budget */
791
794
  idealTokens: number;
792
- /** Above this → critical penalty for context budget */
793
795
  criticalTokens: number;
794
- /** Suggested max import depth before penalty */
795
796
  idealDepth: number;
796
797
  }>;
797
798
  /**
798
799
  * Project-size-adjusted minimum thresholds.
799
- *
800
- * Large codebases structurally accrue more issues. A score of 65 in an
801
- * enterprise codebase is roughly equivalent to 75 in a small project.
802
- * These are recommended minimum passing thresholds by project size.
803
800
  */
804
801
  declare const SIZE_ADJUSTED_THRESHOLDS: Record<string, number>;
805
802
  /**
@@ -811,30 +808,21 @@ declare function getProjectSizeTier(fileCount: number): keyof typeof SIZE_ADJUST
811
808
  */
812
809
  declare function getRecommendedThreshold(fileCount: number, modelTier?: ModelContextTier): number;
813
810
  /**
814
- * Normalize tool name from shorthand to full name
811
+ * Normalize tool name from shorthand to canonical name
815
812
  */
816
813
  declare function normalizeToolName(shortName: string): string;
817
814
  /**
818
- * Get tool weight with fallback priority:
819
- * 1. CLI override
820
- * 2. Tool config scoreWeight
821
- * 3. Default weight
822
- * 4. 10 (for unknown tools)
815
+ * Get tool weight
823
816
  */
824
817
  declare function getToolWeight(toolName: string, toolConfig?: {
825
818
  scoreWeight?: number;
826
819
  }, cliOverride?: number): number;
827
820
  /**
828
- * Parse weight string from CLI (e.g., "patterns:50,context:30")
821
+ * Parse weight string from CLI
829
822
  */
830
823
  declare function parseWeightString(weightStr?: string): Map<string, number>;
831
824
  /**
832
- * Calculate overall AI Readiness Score from multiple tool scores.
833
- *
834
- * Formula: Σ(tool_score × tool_weight) / Σ(active_tool_weights)
835
- *
836
- * This allows dynamic composition - score adjusts automatically
837
- * based on which tools actually ran.
825
+ * Calculate overall AI Readiness Score
838
826
  */
839
827
  declare function calculateOverallScore(toolOutputs: Map<string, ToolScoringOutput>, config?: any, cliWeights?: Map<string, number>): ScoringResult;
840
828
  /**
@@ -843,18 +831,17 @@ declare function calculateOverallScore(toolOutputs: Map<string, ToolScoringOutpu
843
831
  declare function getRating(score: number): ScoringResult['rating'];
844
832
  /**
845
833
  * Convert score to rating with project-size awareness.
846
- * Use this for display to give fairer assessment to large codebases.
847
834
  */
848
835
  declare function getRatingWithContext(score: number, fileCount: number, modelTier?: ModelContextTier): ScoringResult['rating'];
849
836
  /**
850
- * Get rating emoji and color for display
837
+ * Get rating display properties
851
838
  */
852
839
  declare function getRatingDisplay(rating: ScoringResult['rating']): {
853
840
  emoji: string;
854
841
  color: string;
855
842
  };
856
843
  /**
857
- * Format score for display with rating
844
+ * Format score for display
858
845
  */
859
846
  declare function formatScore(result: ScoringResult): string;
860
847
  /**
@@ -875,4 +862,4 @@ declare function formatToolScore(output: ToolScoringOutput): string;
875
862
  */
876
863
  declare function generateHTML(graph: GraphData): string;
877
864
 
878
- export { type AIReadyConfig, type AcceptancePrediction, type AnalysisResult, AnalysisResultSchema, AnalysisStatus, AnalysisStatusSchema, type BusinessReport, CONTEXT_TIER_THRESHOLDS, type CommonASTNode, type ComprehensionDifficulty, type CostConfig, DEFAULT_TOOL_WEIGHTS, type ExportInfo, type GraphData, type GraphEdge, type GraphIssueSeverity, type GraphMetadata, type GraphNode, type ImportInfo, type Issue, IssueSchema, IssueType, IssueTypeSchema, LANGUAGE_EXTENSIONS, Language, type LanguageConfig, type LanguageParser, type Location, LocationSchema, type Metrics, MetricsSchema, type ModelContextTier, ModelTier, ModelTierSchema, type NamingConvention, ParseError, type ParseResult, type ParseStatistics, type ProductivityImpact, type Report, SIZE_ADJUSTED_THRESHOLDS, type ScanOptions, type ScoringConfig, type ScoringResult, Severity, SeveritySchema, type SourceLocation, type SourceRange, type SpokeOutput, SpokeOutputSchema, TOOL_NAME_MAP, type TechnicalValueChain, type TechnicalValueChainSummary, type TokenBudget, type ToolScoringOutput, type UnifiedReport, UnifiedReportSchema, calculateOverallScore, formatScore, formatToolScore, generateHTML, getProjectSizeTier, getRating, getRatingDisplay, getRatingWithContext, getRecommendedThreshold, getToolWeight, normalizeToolName, parseWeightString };
865
+ export { type AIReadyConfig, type AcceptancePrediction, type AnalysisResult, AnalysisResultSchema, AnalysisStatus, AnalysisStatusSchema, type BusinessReport, CONTEXT_TIER_THRESHOLDS, type CommonASTNode, type ComprehensionDifficulty, type CostConfig, DEFAULT_TOOL_WEIGHTS, type ExportInfo, FRIENDLY_TOOL_NAMES, type GraphData, type GraphEdge, type GraphIssueSeverity, type GraphMetadata, type GraphNode, type ImportInfo, type Issue, IssueSchema, IssueType, IssueTypeSchema, LANGUAGE_EXTENSIONS, Language, type LanguageConfig, type LanguageParser, type Location, LocationSchema, type Metrics, MetricsSchema, type ModelContextTier, ModelTier, ModelTierSchema, type NamingConvention, ParseError, type ParseResult, type ParseStatistics, type ProductivityImpact, type Report, SIZE_ADJUSTED_THRESHOLDS, type ScanOptions, type ScoringConfig, type ScoringResult, Severity, SeveritySchema, type SourceLocation, type SourceRange, type SpokeOutput, SpokeOutputSchema, TOOL_NAME_MAP, type TechnicalValueChain, type TechnicalValueChainSummary, type TokenBudget, ToolName, ToolNameSchema, type ToolScoringOutput, type UnifiedReport, UnifiedReportSchema, calculateOverallScore, formatScore, formatToolScore, generateHTML, getProjectSizeTier, getRating, getRatingDisplay, getRatingWithContext, getRecommendedThreshold, getToolWeight, normalizeToolName, parseWeightString };
package/dist/client.d.ts CHANGED
@@ -10,6 +10,30 @@ declare enum Severity {
10
10
  Info = "info"
11
11
  }
12
12
  declare const SeveritySchema: z.ZodEnum<typeof Severity>;
13
+ /**
14
+ * Canonical Tool Names (IDs)
15
+ * Used everywhere as the single source of truth for tool identification.
16
+ */
17
+ declare enum ToolName {
18
+ PatternDetect = "pattern-detect",
19
+ ContextAnalyzer = "context-analyzer",
20
+ NamingConsistency = "naming-consistency",
21
+ AiSignalClarity = "ai-signal-clarity",
22
+ AgentGrounding = "agent-grounding",
23
+ TestabilityIndex = "testability-index",
24
+ DocDrift = "doc-drift",
25
+ DependencyHealth = "dependency-health",
26
+ ChangeAmplification = "change-amplification",
27
+ CognitiveLoad = "cognitive-load",
28
+ PatternEntropy = "pattern-entropy",
29
+ ConceptCohesion = "concept-cohesion",
30
+ SemanticDistance = "semantic-distance"
31
+ }
32
+ declare const ToolNameSchema: z.ZodEnum<typeof ToolName>;
33
+ /**
34
+ * Friendly labels for UI display
35
+ */
36
+ declare const FRIENDLY_TOOL_NAMES: Record<ToolName, string>;
13
37
  /**
14
38
  * Standardized issue types across all AIReady tools.
15
39
  */
@@ -232,7 +256,7 @@ declare const UnifiedReportSchema: z.ZodObject<{
232
256
  rating: z.ZodString;
233
257
  timestamp: z.ZodString;
234
258
  breakdown: z.ZodArray<z.ZodObject<{
235
- toolName: z.ZodString;
259
+ toolName: z.ZodUnion<readonly [z.ZodEnum<typeof ToolName>, z.ZodString]>;
236
260
  score: z.ZodNumber;
237
261
  }, z.core.$catchall<z.ZodAny>>>;
238
262
  }, z.core.$strip>>;
@@ -753,53 +777,26 @@ interface ScoringConfig {
753
777
  * Default weights for known tools. Weights sum to 100 and read directly as
754
778
  * percentage contribution to the overall score.
755
779
  * New tools get weight of 5 if not specified.
756
- *
757
- * Weight philosophy:
758
- * - pattern-detect (22%): Semantic duplication directly wastes token budget and
759
- * confuses AI with contradictory in-context examples.
760
- * - context-analyzer (19%): Context limits are the primary hard constraint on
761
- * AI effectiveness regardless of model size.
762
- * - consistency (14%): Naming/pattern inconsistency degrades AI intent understanding
763
- * proportionally to codebase size.
764
- * - ai-signal-clarity (11%): Code patterns empirically causing AI to generate
765
- * confidently wrong outputs — critical for agentic use cases.
766
- * - agent-grounding (10%): How well an autonomous agent can navigate unaided —
767
- * increasingly important as agentic workflows grow.
768
- * - testability (10%): AI changes without verifiability create hidden risk.
769
- * - doc-drift (8%): Stale docs actively mislead AI; planned spoke.
770
- * - deps (6%): Dependency health affects AI suggestion accuracy; planned spoke.
771
780
  */
772
781
  declare const DEFAULT_TOOL_WEIGHTS: Record<string, number>;
773
782
  /**
774
- * Tool name normalization map (shorthand -> full name)
783
+ * Tool name normalization map (shorthand -> canonical name)
775
784
  */
776
785
  declare const TOOL_NAME_MAP: Record<string, string>;
777
786
  /**
778
787
  * Model context tiers for context-aware threshold calibration.
779
- *
780
- * As AI models evolve from 32k → 128k → 1M+ context windows, absolute token
781
- * thresholds become meaningless. Use these tiers to adjust context-analyzer
782
- * thresholds relative to the model your team uses.
783
788
  */
784
789
  type ModelContextTier = 'compact' | 'standard' | 'extended' | 'frontier';
785
790
  /**
786
791
  * Context budget thresholds per tier.
787
- * Scores are interpolated between these boundaries.
788
792
  */
789
793
  declare const CONTEXT_TIER_THRESHOLDS: Record<ModelContextTier, {
790
- /** Below this → full score for context budget */
791
794
  idealTokens: number;
792
- /** Above this → critical penalty for context budget */
793
795
  criticalTokens: number;
794
- /** Suggested max import depth before penalty */
795
796
  idealDepth: number;
796
797
  }>;
797
798
  /**
798
799
  * Project-size-adjusted minimum thresholds.
799
- *
800
- * Large codebases structurally accrue more issues. A score of 65 in an
801
- * enterprise codebase is roughly equivalent to 75 in a small project.
802
- * These are recommended minimum passing thresholds by project size.
803
800
  */
804
801
  declare const SIZE_ADJUSTED_THRESHOLDS: Record<string, number>;
805
802
  /**
@@ -811,30 +808,21 @@ declare function getProjectSizeTier(fileCount: number): keyof typeof SIZE_ADJUST
811
808
  */
812
809
  declare function getRecommendedThreshold(fileCount: number, modelTier?: ModelContextTier): number;
813
810
  /**
814
- * Normalize tool name from shorthand to full name
811
+ * Normalize tool name from shorthand to canonical name
815
812
  */
816
813
  declare function normalizeToolName(shortName: string): string;
817
814
  /**
818
- * Get tool weight with fallback priority:
819
- * 1. CLI override
820
- * 2. Tool config scoreWeight
821
- * 3. Default weight
822
- * 4. 10 (for unknown tools)
815
+ * Get tool weight
823
816
  */
824
817
  declare function getToolWeight(toolName: string, toolConfig?: {
825
818
  scoreWeight?: number;
826
819
  }, cliOverride?: number): number;
827
820
  /**
828
- * Parse weight string from CLI (e.g., "patterns:50,context:30")
821
+ * Parse weight string from CLI
829
822
  */
830
823
  declare function parseWeightString(weightStr?: string): Map<string, number>;
831
824
  /**
832
- * Calculate overall AI Readiness Score from multiple tool scores.
833
- *
834
- * Formula: Σ(tool_score × tool_weight) / Σ(active_tool_weights)
835
- *
836
- * This allows dynamic composition - score adjusts automatically
837
- * based on which tools actually ran.
825
+ * Calculate overall AI Readiness Score
838
826
  */
839
827
  declare function calculateOverallScore(toolOutputs: Map<string, ToolScoringOutput>, config?: any, cliWeights?: Map<string, number>): ScoringResult;
840
828
  /**
@@ -843,18 +831,17 @@ declare function calculateOverallScore(toolOutputs: Map<string, ToolScoringOutpu
843
831
  declare function getRating(score: number): ScoringResult['rating'];
844
832
  /**
845
833
  * Convert score to rating with project-size awareness.
846
- * Use this for display to give fairer assessment to large codebases.
847
834
  */
848
835
  declare function getRatingWithContext(score: number, fileCount: number, modelTier?: ModelContextTier): ScoringResult['rating'];
849
836
  /**
850
- * Get rating emoji and color for display
837
+ * Get rating display properties
851
838
  */
852
839
  declare function getRatingDisplay(rating: ScoringResult['rating']): {
853
840
  emoji: string;
854
841
  color: string;
855
842
  };
856
843
  /**
857
- * Format score for display with rating
844
+ * Format score for display
858
845
  */
859
846
  declare function formatScore(result: ScoringResult): string;
860
847
  /**
@@ -875,4 +862,4 @@ declare function formatToolScore(output: ToolScoringOutput): string;
875
862
  */
876
863
  declare function generateHTML(graph: GraphData): string;
877
864
 
878
- export { type AIReadyConfig, type AcceptancePrediction, type AnalysisResult, AnalysisResultSchema, AnalysisStatus, AnalysisStatusSchema, type BusinessReport, CONTEXT_TIER_THRESHOLDS, type CommonASTNode, type ComprehensionDifficulty, type CostConfig, DEFAULT_TOOL_WEIGHTS, type ExportInfo, type GraphData, type GraphEdge, type GraphIssueSeverity, type GraphMetadata, type GraphNode, type ImportInfo, type Issue, IssueSchema, IssueType, IssueTypeSchema, LANGUAGE_EXTENSIONS, Language, type LanguageConfig, type LanguageParser, type Location, LocationSchema, type Metrics, MetricsSchema, type ModelContextTier, ModelTier, ModelTierSchema, type NamingConvention, ParseError, type ParseResult, type ParseStatistics, type ProductivityImpact, type Report, SIZE_ADJUSTED_THRESHOLDS, type ScanOptions, type ScoringConfig, type ScoringResult, Severity, SeveritySchema, type SourceLocation, type SourceRange, type SpokeOutput, SpokeOutputSchema, TOOL_NAME_MAP, type TechnicalValueChain, type TechnicalValueChainSummary, type TokenBudget, type ToolScoringOutput, type UnifiedReport, UnifiedReportSchema, calculateOverallScore, formatScore, formatToolScore, generateHTML, getProjectSizeTier, getRating, getRatingDisplay, getRatingWithContext, getRecommendedThreshold, getToolWeight, normalizeToolName, parseWeightString };
865
+ export { type AIReadyConfig, type AcceptancePrediction, type AnalysisResult, AnalysisResultSchema, AnalysisStatus, AnalysisStatusSchema, type BusinessReport, CONTEXT_TIER_THRESHOLDS, type CommonASTNode, type ComprehensionDifficulty, type CostConfig, DEFAULT_TOOL_WEIGHTS, type ExportInfo, FRIENDLY_TOOL_NAMES, type GraphData, type GraphEdge, type GraphIssueSeverity, type GraphMetadata, type GraphNode, type ImportInfo, type Issue, IssueSchema, IssueType, IssueTypeSchema, LANGUAGE_EXTENSIONS, Language, type LanguageConfig, type LanguageParser, type Location, LocationSchema, type Metrics, MetricsSchema, type ModelContextTier, ModelTier, ModelTierSchema, type NamingConvention, ParseError, type ParseResult, type ParseStatistics, type ProductivityImpact, type Report, SIZE_ADJUSTED_THRESHOLDS, type ScanOptions, type ScoringConfig, type ScoringResult, Severity, SeveritySchema, type SourceLocation, type SourceRange, type SpokeOutput, SpokeOutputSchema, TOOL_NAME_MAP, type TechnicalValueChain, type TechnicalValueChainSummary, type TokenBudget, ToolName, ToolNameSchema, type ToolScoringOutput, type UnifiedReport, UnifiedReportSchema, calculateOverallScore, formatScore, formatToolScore, generateHTML, getProjectSizeTier, getRating, getRatingDisplay, getRatingWithContext, getRecommendedThreshold, getToolWeight, normalizeToolName, parseWeightString };
package/dist/client.js CHANGED
@@ -25,6 +25,7 @@ __export(client_exports, {
25
25
  AnalysisStatusSchema: () => AnalysisStatusSchema,
26
26
  CONTEXT_TIER_THRESHOLDS: () => CONTEXT_TIER_THRESHOLDS,
27
27
  DEFAULT_TOOL_WEIGHTS: () => DEFAULT_TOOL_WEIGHTS,
28
+ FRIENDLY_TOOL_NAMES: () => FRIENDLY_TOOL_NAMES,
28
29
  IssueSchema: () => IssueSchema,
29
30
  IssueType: () => IssueType,
30
31
  IssueTypeSchema: () => IssueTypeSchema,
@@ -40,6 +41,8 @@ __export(client_exports, {
40
41
  SeveritySchema: () => SeveritySchema,
41
42
  SpokeOutputSchema: () => SpokeOutputSchema,
42
43
  TOOL_NAME_MAP: () => TOOL_NAME_MAP,
44
+ ToolName: () => ToolName,
45
+ ToolNameSchema: () => ToolNameSchema,
43
46
  UnifiedReportSchema: () => UnifiedReportSchema,
44
47
  calculateOverallScore: () => calculateOverallScore,
45
48
  formatScore: () => formatScore,
@@ -66,6 +69,38 @@ var Severity = /* @__PURE__ */ ((Severity2) => {
66
69
  return Severity2;
67
70
  })(Severity || {});
68
71
  var SeveritySchema = import_zod.z.nativeEnum(Severity);
72
+ var ToolName = /* @__PURE__ */ ((ToolName2) => {
73
+ ToolName2["PatternDetect"] = "pattern-detect";
74
+ ToolName2["ContextAnalyzer"] = "context-analyzer";
75
+ ToolName2["NamingConsistency"] = "naming-consistency";
76
+ ToolName2["AiSignalClarity"] = "ai-signal-clarity";
77
+ ToolName2["AgentGrounding"] = "agent-grounding";
78
+ ToolName2["TestabilityIndex"] = "testability-index";
79
+ ToolName2["DocDrift"] = "doc-drift";
80
+ ToolName2["DependencyHealth"] = "dependency-health";
81
+ ToolName2["ChangeAmplification"] = "change-amplification";
82
+ ToolName2["CognitiveLoad"] = "cognitive-load";
83
+ ToolName2["PatternEntropy"] = "pattern-entropy";
84
+ ToolName2["ConceptCohesion"] = "concept-cohesion";
85
+ ToolName2["SemanticDistance"] = "semantic-distance";
86
+ return ToolName2;
87
+ })(ToolName || {});
88
+ var ToolNameSchema = import_zod.z.nativeEnum(ToolName);
89
+ var FRIENDLY_TOOL_NAMES = {
90
+ ["pattern-detect" /* PatternDetect */]: "Semantic Duplicates",
91
+ ["context-analyzer" /* ContextAnalyzer */]: "Context Fragmentation",
92
+ ["naming-consistency" /* NamingConsistency */]: "Naming Consistency",
93
+ ["ai-signal-clarity" /* AiSignalClarity */]: "AI Signal Clarity",
94
+ ["agent-grounding" /* AgentGrounding */]: "Agent Grounding",
95
+ ["testability-index" /* TestabilityIndex */]: "Testability Index",
96
+ ["doc-drift" /* DocDrift */]: "Documentation Health",
97
+ ["dependency-health" /* DependencyHealth */]: "Dependency Health",
98
+ ["change-amplification" /* ChangeAmplification */]: "Change Amplification",
99
+ ["cognitive-load" /* CognitiveLoad */]: "Cognitive Load",
100
+ ["pattern-entropy" /* PatternEntropy */]: "Pattern Entropy",
101
+ ["concept-cohesion" /* ConceptCohesion */]: "Concept Cohesion",
102
+ ["semantic-distance" /* SemanticDistance */]: "Semantic Distance"
103
+ };
69
104
  var IssueType = /* @__PURE__ */ ((IssueType2) => {
70
105
  IssueType2["DuplicatePattern"] = "duplicate-pattern";
71
106
  IssueType2["PatternInconsistency"] = "pattern-inconsistency";
@@ -163,10 +198,12 @@ var UnifiedReportSchema = import_zod.z.object({
163
198
  overall: import_zod.z.number(),
164
199
  rating: import_zod.z.string(),
165
200
  timestamp: import_zod.z.string(),
166
- breakdown: import_zod.z.array(import_zod.z.object({
167
- toolName: import_zod.z.string(),
168
- score: import_zod.z.number()
169
- }).catchall(import_zod.z.any()))
201
+ breakdown: import_zod.z.array(
202
+ import_zod.z.object({
203
+ toolName: import_zod.z.union([ToolNameSchema, import_zod.z.string()]),
204
+ score: import_zod.z.number()
205
+ }).catchall(import_zod.z.any())
206
+ )
170
207
  }).optional()
171
208
  }).catchall(import_zod.z.any());
172
209
 
@@ -203,28 +240,34 @@ var ParseError = class extends Error {
203
240
 
204
241
  // src/scoring.ts
205
242
  var DEFAULT_TOOL_WEIGHTS = {
206
- "pattern-detect": 22,
207
- "context-analyzer": 19,
208
- consistency: 14,
209
- "ai-signal-clarity": 11,
210
- "agent-grounding": 10,
211
- testability: 10,
212
- "doc-drift": 8,
213
- deps: 6
243
+ ["pattern-detect" /* PatternDetect */]: 22,
244
+ ["context-analyzer" /* ContextAnalyzer */]: 19,
245
+ ["naming-consistency" /* NamingConsistency */]: 14,
246
+ ["ai-signal-clarity" /* AiSignalClarity */]: 11,
247
+ ["agent-grounding" /* AgentGrounding */]: 10,
248
+ ["testability-index" /* TestabilityIndex */]: 10,
249
+ ["doc-drift" /* DocDrift */]: 8,
250
+ ["dependency-health" /* DependencyHealth */]: 6,
251
+ ["change-amplification" /* ChangeAmplification */]: 8
214
252
  };
215
253
  var TOOL_NAME_MAP = {
216
- patterns: "pattern-detect",
217
- context: "context-analyzer",
218
- consistency: "consistency",
219
- "AI signal clarity": "ai-signal-clarity",
220
- "ai-signal-clarity": "ai-signal-clarity",
221
- grounding: "agent-grounding",
222
- "agent-grounding": "agent-grounding",
223
- testability: "testability",
224
- tests: "testability",
225
- "doc-drift": "doc-drift",
226
- docs: "doc-drift",
227
- deps: "deps"
254
+ patterns: "pattern-detect" /* PatternDetect */,
255
+ "pattern-detect": "pattern-detect" /* PatternDetect */,
256
+ context: "context-analyzer" /* ContextAnalyzer */,
257
+ "context-analyzer": "context-analyzer" /* ContextAnalyzer */,
258
+ consistency: "naming-consistency" /* NamingConsistency */,
259
+ "naming-consistency": "naming-consistency" /* NamingConsistency */,
260
+ "ai-signal": "ai-signal-clarity" /* AiSignalClarity */,
261
+ "ai-signal-clarity": "ai-signal-clarity" /* AiSignalClarity */,
262
+ grounding: "agent-grounding" /* AgentGrounding */,
263
+ "agent-grounding": "agent-grounding" /* AgentGrounding */,
264
+ testability: "testability-index" /* TestabilityIndex */,
265
+ "testability-index": "testability-index" /* TestabilityIndex */,
266
+ "doc-drift": "doc-drift" /* DocDrift */,
267
+ "deps-health": "dependency-health" /* DependencyHealth */,
268
+ "dependency-health": "dependency-health" /* DependencyHealth */,
269
+ "change-amp": "change-amplification" /* ChangeAmplification */,
270
+ "change-amplification": "change-amplification" /* ChangeAmplification */
228
271
  };
229
272
  var CONTEXT_TIER_THRESHOLDS = {
230
273
  compact: { idealTokens: 3e3, criticalTokens: 1e4, idealDepth: 4 },
@@ -258,22 +301,16 @@ function getRecommendedThreshold(fileCount, modelTier = "standard") {
258
301
  return base + modelBonus;
259
302
  }
260
303
  function normalizeToolName(shortName) {
261
- return TOOL_NAME_MAP[shortName] || shortName;
304
+ return TOOL_NAME_MAP[shortName.toLowerCase()] || shortName;
262
305
  }
263
306
  function getToolWeight(toolName, toolConfig, cliOverride) {
264
- if (cliOverride !== void 0) {
265
- return cliOverride;
266
- }
267
- if (toolConfig?.scoreWeight !== void 0) {
268
- return toolConfig.scoreWeight;
269
- }
307
+ if (cliOverride !== void 0) return cliOverride;
308
+ if (toolConfig?.scoreWeight !== void 0) return toolConfig.scoreWeight;
270
309
  return DEFAULT_TOOL_WEIGHTS[toolName] || 5;
271
310
  }
272
311
  function parseWeightString(weightStr) {
273
312
  const weights = /* @__PURE__ */ new Map();
274
- if (!weightStr) {
275
- return weights;
276
- }
313
+ if (!weightStr) return weights;
277
314
  const pairs = weightStr.split(",");
278
315
  for (const pair of pairs) {
279
316
  const [toolShortName, weightStr2] = pair.split(":");
@@ -305,8 +342,7 @@ function calculateOverallScore(toolOutputs, config, cliWeights) {
305
342
  const calculationWeights = {};
306
343
  for (const [toolName, output] of toolOutputs.entries()) {
307
344
  const weight = weights.get(toolName) || 5;
308
- const weightedScore = output.score * weight;
309
- weightedSum += weightedScore;
345
+ weightedSum += output.score * weight;
310
346
  totalWeight += weight;
311
347
  toolsUsed.push(toolName);
312
348
  calculationWeights[toolName] = weight;
@@ -565,6 +601,7 @@ function generateHTML(graph) {
565
601
  AnalysisStatusSchema,
566
602
  CONTEXT_TIER_THRESHOLDS,
567
603
  DEFAULT_TOOL_WEIGHTS,
604
+ FRIENDLY_TOOL_NAMES,
568
605
  IssueSchema,
569
606
  IssueType,
570
607
  IssueTypeSchema,
@@ -580,6 +617,8 @@ function generateHTML(graph) {
580
617
  SeveritySchema,
581
618
  SpokeOutputSchema,
582
619
  TOOL_NAME_MAP,
620
+ ToolName,
621
+ ToolNameSchema,
583
622
  UnifiedReportSchema,
584
623
  calculateOverallScore,
585
624
  formatScore,
package/dist/client.mjs CHANGED
@@ -4,6 +4,7 @@ import {
4
4
  AnalysisStatusSchema,
5
5
  CONTEXT_TIER_THRESHOLDS,
6
6
  DEFAULT_TOOL_WEIGHTS,
7
+ FRIENDLY_TOOL_NAMES,
7
8
  IssueSchema,
8
9
  IssueType,
9
10
  IssueTypeSchema,
@@ -19,6 +20,8 @@ import {
19
20
  SeveritySchema,
20
21
  SpokeOutputSchema,
21
22
  TOOL_NAME_MAP,
23
+ ToolName,
24
+ ToolNameSchema,
22
25
  UnifiedReportSchema,
23
26
  calculateOverallScore,
24
27
  formatScore,
@@ -32,13 +35,14 @@ import {
32
35
  getToolWeight,
33
36
  normalizeToolName,
34
37
  parseWeightString
35
- } from "./chunk-D3D3NCRR.mjs";
38
+ } from "./chunk-QAFB3HXQ.mjs";
36
39
  export {
37
40
  AnalysisResultSchema,
38
41
  AnalysisStatus,
39
42
  AnalysisStatusSchema,
40
43
  CONTEXT_TIER_THRESHOLDS,
41
44
  DEFAULT_TOOL_WEIGHTS,
45
+ FRIENDLY_TOOL_NAMES,
42
46
  IssueSchema,
43
47
  IssueType,
44
48
  IssueTypeSchema,
@@ -54,6 +58,8 @@ export {
54
58
  SeveritySchema,
55
59
  SpokeOutputSchema,
56
60
  TOOL_NAME_MAP,
61
+ ToolName,
62
+ ToolNameSchema,
57
63
  UnifiedReportSchema,
58
64
  calculateOverallScore,
59
65
  formatScore,
package/dist/index.d.mts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { ScanOptions, AIReadyConfig, ModelContextTier, CostConfig, TokenBudget, ProductivityImpact, ToolScoringOutput, AcceptancePrediction, ComprehensionDifficulty, TechnicalValueChainSummary, TechnicalValueChain, LanguageParser, Language, ParseResult, NamingConvention } from './client.mjs';
2
- export { AnalysisResult, AnalysisResultSchema, AnalysisStatus, AnalysisStatusSchema, BusinessReport, CONTEXT_TIER_THRESHOLDS, CommonASTNode, DEFAULT_TOOL_WEIGHTS, ExportInfo, GraphData, GraphEdge, GraphIssueSeverity, GraphMetadata, GraphNode, ImportInfo, Issue, IssueSchema, IssueType, IssueTypeSchema, LANGUAGE_EXTENSIONS, LanguageConfig, Location, LocationSchema, Metrics, MetricsSchema, ModelTier, ModelTierSchema, ParseError, ParseStatistics, Report, SIZE_ADJUSTED_THRESHOLDS, ScoringConfig, ScoringResult, Severity, SeveritySchema, SourceLocation, SourceRange, SpokeOutput, SpokeOutputSchema, TOOL_NAME_MAP, UnifiedReport, UnifiedReportSchema, calculateOverallScore, formatScore, formatToolScore, generateHTML, getProjectSizeTier, getRating, getRatingDisplay, getRatingWithContext, getRecommendedThreshold, getToolWeight, normalizeToolName, parseWeightString } from './client.mjs';
2
+ export { AnalysisResult, AnalysisResultSchema, AnalysisStatus, AnalysisStatusSchema, BusinessReport, CONTEXT_TIER_THRESHOLDS, CommonASTNode, DEFAULT_TOOL_WEIGHTS, ExportInfo, FRIENDLY_TOOL_NAMES, GraphData, GraphEdge, GraphIssueSeverity, GraphMetadata, GraphNode, ImportInfo, Issue, IssueSchema, IssueType, IssueTypeSchema, LANGUAGE_EXTENSIONS, LanguageConfig, Location, LocationSchema, Metrics, MetricsSchema, ModelTier, ModelTierSchema, ParseError, ParseStatistics, Report, SIZE_ADJUSTED_THRESHOLDS, ScoringConfig, ScoringResult, Severity, SeveritySchema, SourceLocation, SourceRange, SpokeOutput, SpokeOutputSchema, TOOL_NAME_MAP, ToolName, ToolNameSchema, UnifiedReport, UnifiedReportSchema, calculateOverallScore, formatScore, formatToolScore, generateHTML, getProjectSizeTier, getRating, getRatingDisplay, getRatingWithContext, getRecommendedThreshold, getToolWeight, normalizeToolName, parseWeightString } from './client.mjs';
3
3
  import { z } from 'zod';
4
4
 
5
5
  /**
package/dist/index.d.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { ScanOptions, AIReadyConfig, ModelContextTier, CostConfig, TokenBudget, ProductivityImpact, ToolScoringOutput, AcceptancePrediction, ComprehensionDifficulty, TechnicalValueChainSummary, TechnicalValueChain, LanguageParser, Language, ParseResult, NamingConvention } from './client.js';
2
- export { AnalysisResult, AnalysisResultSchema, AnalysisStatus, AnalysisStatusSchema, BusinessReport, CONTEXT_TIER_THRESHOLDS, CommonASTNode, DEFAULT_TOOL_WEIGHTS, ExportInfo, GraphData, GraphEdge, GraphIssueSeverity, GraphMetadata, GraphNode, ImportInfo, Issue, IssueSchema, IssueType, IssueTypeSchema, LANGUAGE_EXTENSIONS, LanguageConfig, Location, LocationSchema, Metrics, MetricsSchema, ModelTier, ModelTierSchema, ParseError, ParseStatistics, Report, SIZE_ADJUSTED_THRESHOLDS, ScoringConfig, ScoringResult, Severity, SeveritySchema, SourceLocation, SourceRange, SpokeOutput, SpokeOutputSchema, TOOL_NAME_MAP, UnifiedReport, UnifiedReportSchema, calculateOverallScore, formatScore, formatToolScore, generateHTML, getProjectSizeTier, getRating, getRatingDisplay, getRatingWithContext, getRecommendedThreshold, getToolWeight, normalizeToolName, parseWeightString } from './client.js';
2
+ export { AnalysisResult, AnalysisResultSchema, AnalysisStatus, AnalysisStatusSchema, BusinessReport, CONTEXT_TIER_THRESHOLDS, CommonASTNode, DEFAULT_TOOL_WEIGHTS, ExportInfo, FRIENDLY_TOOL_NAMES, GraphData, GraphEdge, GraphIssueSeverity, GraphMetadata, GraphNode, ImportInfo, Issue, IssueSchema, IssueType, IssueTypeSchema, LANGUAGE_EXTENSIONS, LanguageConfig, Location, LocationSchema, Metrics, MetricsSchema, ModelTier, ModelTierSchema, ParseError, ParseStatistics, Report, SIZE_ADJUSTED_THRESHOLDS, ScoringConfig, ScoringResult, Severity, SeveritySchema, SourceLocation, SourceRange, SpokeOutput, SpokeOutputSchema, TOOL_NAME_MAP, ToolName, ToolNameSchema, UnifiedReport, UnifiedReportSchema, calculateOverallScore, formatScore, formatToolScore, generateHTML, getProjectSizeTier, getRating, getRatingDisplay, getRatingWithContext, getRecommendedThreshold, getToolWeight, normalizeToolName, parseWeightString } from './client.js';
3
3
  import { z } from 'zod';
4
4
 
5
5
  /**