@aiready/core 0.9.25 → 0.9.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,19 +1,24 @@
1
1
  import {
2
+ CONTEXT_TIER_THRESHOLDS,
2
3
  DEFAULT_TOOL_WEIGHTS,
3
4
  LANGUAGE_EXTENSIONS,
4
5
  Language,
5
6
  ParseError,
7
+ SIZE_ADJUSTED_THRESHOLDS,
6
8
  TOOL_NAME_MAP,
7
9
  calculateOverallScore,
8
10
  formatScore,
9
11
  formatToolScore,
10
12
  generateHTML,
13
+ getProjectSizeTier,
11
14
  getRating,
12
15
  getRatingDisplay,
16
+ getRatingWithContext,
17
+ getRecommendedThreshold,
13
18
  getToolWeight,
14
19
  normalizeToolName,
15
20
  parseWeightString
16
- } from "./chunk-HKSARRCD.mjs";
21
+ } from "./chunk-JJ5JL5FX.mjs";
17
22
 
18
23
  // src/utils/file-scanner.ts
19
24
  import { glob } from "glob";
@@ -414,11 +419,87 @@ function getElapsedTime(startTime) {
414
419
  }
415
420
 
416
421
  // src/business-metrics.ts
422
+ var MODEL_PRICING_PRESETS = {
423
+ "gpt-4": {
424
+ name: "GPT-4",
425
+ pricePer1KInputTokens: 0.03,
426
+ pricePer1KOutputTokens: 0.06,
427
+ contextTier: "standard",
428
+ typicalQueriesPerDevPerDay: 40
429
+ },
430
+ "gpt-4o": {
431
+ name: "GPT-4o",
432
+ pricePer1KInputTokens: 5e-3,
433
+ pricePer1KOutputTokens: 0.015,
434
+ contextTier: "extended",
435
+ typicalQueriesPerDevPerDay: 60
436
+ },
437
+ "gpt-4o-mini": {
438
+ name: "GPT-4o mini",
439
+ pricePer1KInputTokens: 15e-5,
440
+ pricePer1KOutputTokens: 6e-4,
441
+ contextTier: "extended",
442
+ typicalQueriesPerDevPerDay: 120
443
+ },
444
+ "claude-3-5-sonnet": {
445
+ name: "Claude 3.5 Sonnet",
446
+ pricePer1KInputTokens: 3e-3,
447
+ pricePer1KOutputTokens: 0.015,
448
+ contextTier: "extended",
449
+ typicalQueriesPerDevPerDay: 80
450
+ },
451
+ "claude-3-7-sonnet": {
452
+ name: "Claude 3.7 Sonnet",
453
+ pricePer1KInputTokens: 3e-3,
454
+ pricePer1KOutputTokens: 0.015,
455
+ contextTier: "frontier",
456
+ typicalQueriesPerDevPerDay: 80
457
+ },
458
+ "claude-sonnet-4": {
459
+ name: "Claude Sonnet 4",
460
+ pricePer1KInputTokens: 3e-3,
461
+ pricePer1KOutputTokens: 0.015,
462
+ contextTier: "frontier",
463
+ typicalQueriesPerDevPerDay: 80
464
+ },
465
+ "gemini-1-5-pro": {
466
+ name: "Gemini 1.5 Pro",
467
+ pricePer1KInputTokens: 125e-5,
468
+ pricePer1KOutputTokens: 5e-3,
469
+ contextTier: "frontier",
470
+ typicalQueriesPerDevPerDay: 80
471
+ },
472
+ "gemini-2-0-flash": {
473
+ name: "Gemini 2.0 Flash",
474
+ pricePer1KInputTokens: 1e-4,
475
+ pricePer1KOutputTokens: 4e-4,
476
+ contextTier: "frontier",
477
+ typicalQueriesPerDevPerDay: 150
478
+ },
479
+ "copilot": {
480
+ name: "GitHub Copilot (subscription)",
481
+ // Amortized per-request cost for a $19/month plan at 80 queries/day
482
+ pricePer1KInputTokens: 1e-4,
483
+ pricePer1KOutputTokens: 1e-4,
484
+ contextTier: "extended",
485
+ typicalQueriesPerDevPerDay: 80
486
+ },
487
+ "cursor-pro": {
488
+ name: "Cursor Pro (subscription)",
489
+ pricePer1KInputTokens: 1e-4,
490
+ pricePer1KOutputTokens: 1e-4,
491
+ contextTier: "frontier",
492
+ typicalQueriesPerDevPerDay: 100
493
+ }
494
+ };
495
+ function getModelPreset(modelId) {
496
+ return MODEL_PRICING_PRESETS[modelId] ?? MODEL_PRICING_PRESETS["gpt-4o"];
497
+ }
417
498
  var DEFAULT_COST_CONFIG = {
418
- pricePer1KTokens: 0.01,
419
- // $0.01 per 1K tokens (GPT-4)
420
- queriesPerDevPerDay: 50,
421
- // Average AI queries per developer
499
+ pricePer1KTokens: 5e-3,
500
+ // GPT-4o input price (updated from GPT-4 era 0.01)
501
+ queriesPerDevPerDay: 60,
502
+ // Average AI queries per developer (updated: 40→60 as of 2026)
422
503
  developerCount: 5,
423
504
  // Default team size
424
505
  daysPerMonth: 30
@@ -469,43 +550,63 @@ function calculateProductivityImpact(issues, hourlyRate = DEFAULT_HOURLY_RATE) {
469
550
  }
470
551
  function predictAcceptanceRate(toolOutputs) {
471
552
  const factors = [];
553
+ const baseRate = 0.3;
472
554
  const patterns = toolOutputs.get("pattern-detect");
473
555
  if (patterns) {
474
- const patternImpact = (patterns.score - 50) * 0.3;
556
+ const patternImpact = (patterns.score - 50) * 3e-3;
475
557
  factors.push({
476
558
  name: "Semantic Duplication",
477
- impact: Math.round(patternImpact)
559
+ impact: Math.round(patternImpact * 100)
478
560
  });
479
561
  }
480
562
  const context = toolOutputs.get("context-analyzer");
481
563
  if (context) {
482
- const contextImpact = (context.score - 50) * 0.4;
564
+ const contextImpact = (context.score - 50) * 4e-3;
483
565
  factors.push({
484
566
  name: "Context Efficiency",
485
- impact: Math.round(contextImpact)
567
+ impact: Math.round(contextImpact * 100)
486
568
  });
487
569
  }
488
570
  const consistency = toolOutputs.get("consistency");
489
571
  if (consistency) {
490
- const consistencyImpact = (consistency.score - 50) * 0.3;
572
+ const consistencyImpact = (consistency.score - 50) * 2e-3;
491
573
  factors.push({
492
574
  name: "Code Consistency",
493
- impact: Math.round(consistencyImpact)
575
+ impact: Math.round(consistencyImpact * 100)
576
+ });
577
+ }
578
+ const hallucinationRisk = toolOutputs.get("hallucination-risk");
579
+ if (hallucinationRisk) {
580
+ const hrImpact = (50 - hallucinationRisk.score) * 2e-3;
581
+ factors.push({
582
+ name: "Hallucination Risk",
583
+ impact: Math.round(hrImpact * 100)
494
584
  });
495
585
  }
496
- const baseRate = 0.65;
497
586
  const totalImpact = factors.reduce((sum, f) => sum + f.impact / 100, 0);
498
- const rate = Math.max(0.1, Math.min(0.95, baseRate + totalImpact));
499
- const confidence = toolOutputs.size >= 3 ? 0.8 : toolOutputs.size >= 2 ? 0.6 : 0.4;
587
+ const rate = Math.max(0.05, Math.min(0.8, baseRate + totalImpact));
588
+ let confidence;
589
+ if (toolOutputs.size >= 4) confidence = 0.75;
590
+ else if (toolOutputs.size >= 3) confidence = 0.65;
591
+ else if (toolOutputs.size >= 2) confidence = 0.5;
592
+ else confidence = 0.35;
500
593
  return {
501
594
  rate: Math.round(rate * 100) / 100,
502
595
  confidence,
503
596
  factors
504
597
  };
505
598
  }
506
- function calculateComprehensionDifficulty(contextBudget, importDepth, fragmentation, consistencyScore, totalFiles) {
507
- const budgetFactor = Math.min(100, Math.max(0, (contextBudget - 5e3) / 250));
508
- const depthFactor = Math.min(100, Math.max(0, (importDepth - 5) * 10));
599
+ function calculateComprehensionDifficulty(contextBudget, importDepth, fragmentation, consistencyScore, totalFiles, modelTier = "standard") {
600
+ const tierThresholds = CONTEXT_TIER_THRESHOLDS[modelTier];
601
+ const idealBudget = tierThresholds.idealTokens;
602
+ const criticalBudget = tierThresholds.criticalTokens;
603
+ const idealDepth = tierThresholds.idealDepth;
604
+ const budgetRange = criticalBudget - idealBudget;
605
+ const budgetFactor = Math.min(100, Math.max(
606
+ 0,
607
+ (contextBudget - idealBudget) / budgetRange * 100
608
+ ));
609
+ const depthFactor = Math.min(100, Math.max(0, (importDepth - idealDepth) * 10));
509
610
  const fragmentationFactor = Math.min(100, Math.max(0, (fragmentation - 0.3) * 250));
510
611
  const consistencyFactor = Math.min(100, Math.max(0, 100 - consistencyScore));
511
612
  const fileFactor = Math.min(100, Math.max(0, (totalFiles - 50) / 5));
@@ -525,12 +626,12 @@ function calculateComprehensionDifficulty(contextBudget, importDepth, fragmentat
525
626
  {
526
627
  name: "Context Budget",
527
628
  contribution: Math.round(budgetFactor * 0.35),
528
- description: `${Math.round(contextBudget)} tokens required`
629
+ description: `${Math.round(contextBudget)} tokens required (${modelTier} model tier: ideal <${idealBudget.toLocaleString()})`
529
630
  },
530
631
  {
531
632
  name: "Import Depth",
532
633
  contribution: Math.round(depthFactor * 0.2),
533
- description: `${importDepth.toFixed(1)} average levels`
634
+ description: `${importDepth.toFixed(1)} average levels (ideal <${idealDepth} for ${modelTier})`
534
635
  },
535
636
  {
536
637
  name: "Code Fragmentation",
@@ -573,6 +674,200 @@ function formatHours(hours) {
573
674
  function formatAcceptanceRate(rate) {
574
675
  return `${Math.round(rate * 100)}%`;
575
676
  }
677
+ function calculateScoreTrend(history) {
678
+ if (history.length < 2) {
679
+ return {
680
+ direction: "stable",
681
+ change30Days: 0,
682
+ change90Days: 0,
683
+ velocity: 0,
684
+ projectedScore: history[0]?.overallScore || 100
685
+ };
686
+ }
687
+ const now = /* @__PURE__ */ new Date();
688
+ const thirtyDaysAgo = new Date(now.getTime() - 30 * 24 * 60 * 60 * 1e3);
689
+ const ninetyDaysAgo = new Date(now.getTime() - 90 * 24 * 60 * 60 * 1e3);
690
+ const last30Days = history.filter((e) => new Date(e.timestamp) >= thirtyDaysAgo);
691
+ const last90Days = history.filter((e) => new Date(e.timestamp) >= ninetyDaysAgo);
692
+ const currentScore = history[history.length - 1].overallScore;
693
+ const thirtyDaysAgoScore = last30Days[0]?.overallScore || currentScore;
694
+ const ninetyDaysAgoScore = last90Days[0]?.overallScore || thirtyDaysAgoScore;
695
+ const change30Days = currentScore - thirtyDaysAgoScore;
696
+ const change90Days = currentScore - ninetyDaysAgoScore;
697
+ const weeksOfData = Math.max(1, history.length / 7);
698
+ const totalChange = currentScore - history[0].overallScore;
699
+ const velocity = totalChange / weeksOfData;
700
+ let direction;
701
+ if (change30Days > 3) direction = "improving";
702
+ else if (change30Days < -3) direction = "degrading";
703
+ else direction = "stable";
704
+ const projectedScore = Math.max(0, Math.min(100, currentScore + velocity * 4));
705
+ return {
706
+ direction,
707
+ change30Days,
708
+ change90Days,
709
+ velocity: Math.round(velocity * 10) / 10,
710
+ projectedScore: Math.round(projectedScore)
711
+ };
712
+ }
713
+ function calculateRemediationVelocity(history, currentIssues) {
714
+ if (history.length < 2) {
715
+ return {
716
+ issuesFixedThisWeek: 0,
717
+ avgIssuesPerWeek: 0,
718
+ trend: "stable",
719
+ estimatedCompletionWeeks: currentIssues > 0 ? Infinity : 0
720
+ };
721
+ }
722
+ const now = /* @__PURE__ */ new Date();
723
+ const oneWeekAgo = new Date(now.getTime() - 7 * 24 * 60 * 60 * 1e3);
724
+ const twoWeeksAgo = new Date(now.getTime() - 14 * 24 * 60 * 60 * 1e3);
725
+ const thisWeek = history.filter((e) => new Date(e.timestamp) >= oneWeekAgo);
726
+ const lastWeek = history.filter(
727
+ (e) => new Date(e.timestamp) >= twoWeeksAgo && new Date(e.timestamp) < oneWeekAgo
728
+ );
729
+ const issuesFixedThisWeek = thisWeek.length > 1 ? thisWeek[0].totalIssues - thisWeek[thisWeek.length - 1].totalIssues : 0;
730
+ const totalIssuesFixed = history[0].totalIssues - history[history.length - 1].totalIssues;
731
+ const weeksOfData = Math.max(1, history.length / 7);
732
+ const avgIssuesPerWeek = totalIssuesFixed / weeksOfData;
733
+ let trend;
734
+ if (lastWeek.length > 1) {
735
+ const lastWeekFixed = lastWeek[0].totalIssues - lastWeek[lastWeek.length - 1].totalIssues;
736
+ if (issuesFixedThisWeek > lastWeekFixed * 1.2) trend = "accelerating";
737
+ else if (issuesFixedThisWeek < lastWeekFixed * 0.8) trend = "decelerating";
738
+ else trend = "stable";
739
+ } else {
740
+ trend = "stable";
741
+ }
742
+ const estimatedCompletionWeeks = avgIssuesPerWeek > 0 ? Math.ceil(currentIssues / avgIssuesPerWeek) : Infinity;
743
+ return {
744
+ issuesFixedThisWeek: Math.max(0, issuesFixedThisWeek),
745
+ avgIssuesPerWeek: Math.round(avgIssuesPerWeek * 10) / 10,
746
+ trend,
747
+ estimatedCompletionWeeks
748
+ };
749
+ }
750
+ function calculateKnowledgeConcentration(files, authorData) {
751
+ if (files.length === 0) {
752
+ return {
753
+ score: 0,
754
+ rating: "low",
755
+ analysis: {
756
+ uniqueConceptFiles: 0,
757
+ totalFiles: 0,
758
+ concentrationRatio: 0,
759
+ singleAuthorFiles: 0,
760
+ orphanFiles: 0
761
+ },
762
+ recommendations: ["No files to analyze"]
763
+ };
764
+ }
765
+ const orphanFiles = files.filter((f) => f.exports < 2 && f.imports < 2).length;
766
+ const avgExports = files.reduce((sum, f) => sum + f.exports, 0) / files.length;
767
+ const uniqueConceptFiles = files.filter((f) => f.exports > avgExports * 2).length;
768
+ const totalExports = files.reduce((sum, f) => sum + f.exports, 0);
769
+ const concentrationRatio = totalExports > 0 ? uniqueConceptFiles / files.length : 0;
770
+ let singleAuthorFiles = 0;
771
+ if (authorData) {
772
+ for (const files2 of authorData.values()) {
773
+ if (files2.length === 1) singleAuthorFiles++;
774
+ }
775
+ }
776
+ const orphanRisk = orphanFiles / files.length * 30;
777
+ const uniqueRisk = concentrationRatio * 40;
778
+ const singleAuthorRisk = authorData ? singleAuthorFiles / files.length * 30 : 0;
779
+ const score = Math.min(100, Math.round(orphanRisk + uniqueRisk + singleAuthorRisk));
780
+ let rating;
781
+ if (score < 20) rating = "low";
782
+ else if (score < 40) rating = "moderate";
783
+ else if (score < 70) rating = "high";
784
+ else rating = "critical";
785
+ const recommendations = [];
786
+ if (orphanFiles > files.length * 0.2) {
787
+ recommendations.push(`Reduce ${orphanFiles} orphan files by connecting them to main modules`);
788
+ }
789
+ if (uniqueConceptFiles > files.length * 0.1) {
790
+ recommendations.push("Distribute high-export files into more focused modules");
791
+ }
792
+ if (authorData && singleAuthorFiles > files.length * 0.3) {
793
+ recommendations.push("Increase knowledge sharing to reduce single-author dependencies");
794
+ }
795
+ return {
796
+ score,
797
+ rating,
798
+ analysis: {
799
+ uniqueConceptFiles,
800
+ totalFiles: files.length,
801
+ concentrationRatio: Math.round(concentrationRatio * 100) / 100,
802
+ singleAuthorFiles,
803
+ orphanFiles
804
+ },
805
+ recommendations
806
+ };
807
+ }
808
+ function calculateTechnicalDebtInterest(params) {
809
+ const { currentMonthlyCost, issues, monthsOpen } = params;
810
+ const criticalCount = issues.filter((i) => i.severity === "critical").length;
811
+ const majorCount = issues.filter((i) => i.severity === "major").length;
812
+ const minorCount = issues.filter((i) => i.severity === "minor").length;
813
+ const severityWeight = (criticalCount * 3 + majorCount * 2 + minorCount * 1) / Math.max(1, issues.length);
814
+ const baseRate = 0.02 + severityWeight * 0.01;
815
+ const timeMultiplier = Math.max(1, 1 + monthsOpen * 0.1);
816
+ const monthlyRate = baseRate * timeMultiplier;
817
+ const projectDebt = (principal2, months) => {
818
+ let debt = principal2;
819
+ for (let i = 0; i < months; i++) {
820
+ debt = debt * (1 + monthlyRate);
821
+ }
822
+ return Math.round(debt);
823
+ };
824
+ const principal = currentMonthlyCost * 12;
825
+ const projections = {
826
+ months6: projectDebt(principal, 6),
827
+ months12: projectDebt(principal, 12),
828
+ months24: projectDebt(principal, 24)
829
+ };
830
+ return {
831
+ monthlyRate: Math.round(monthlyRate * 1e4) / 100,
832
+ annualRate: Math.round((Math.pow(1 + monthlyRate, 12) - 1) * 1e4) / 100,
833
+ principal,
834
+ projections,
835
+ monthlyCost: Math.round(currentMonthlyCost * (1 + monthlyRate) * 100) / 100
836
+ };
837
+ }
838
+ function getDebtBreakdown(patternCost, contextCost, consistencyCost) {
839
+ const breakdowns = [
840
+ {
841
+ category: "Semantic Duplication",
842
+ currentCost: patternCost,
843
+ monthlyGrowthRate: 5,
844
+ // Grows as devs copy-paste
845
+ priority: patternCost > 1e3 ? "high" : "medium",
846
+ fixCost: patternCost * 3
847
+ // Fixing costs 3x current waste
848
+ },
849
+ {
850
+ category: "Context Fragmentation",
851
+ currentCost: contextCost,
852
+ monthlyGrowthRate: 3,
853
+ // Grows with new features
854
+ priority: contextCost > 500 ? "high" : "medium",
855
+ fixCost: contextCost * 2.5
856
+ },
857
+ {
858
+ category: "Consistency Issues",
859
+ currentCost: consistencyCost,
860
+ monthlyGrowthRate: 2,
861
+ // Grows with new devs
862
+ priority: consistencyCost > 200 ? "medium" : "low",
863
+ fixCost: consistencyCost * 1.5
864
+ }
865
+ ];
866
+ return breakdowns.sort((a, b) => {
867
+ const priorityOrder = { high: 0, medium: 1, low: 2 };
868
+ return priorityOrder[a.priority] - priorityOrder[b.priority];
869
+ });
870
+ }
576
871
 
577
872
  // src/parsers/typescript-parser.ts
578
873
  import { parse as parse2 } from "@typescript-eslint/typescript-estree";
@@ -1053,23 +1348,671 @@ function isFileSupported(filePath) {
1053
1348
  function getSupportedLanguages() {
1054
1349
  return ParserFactory.getInstance().getSupportedLanguages();
1055
1350
  }
1351
+
1352
+ // src/future-proof-metrics.ts
1353
+ function calculateCognitiveLoad(params) {
1354
+ const { linesOfCode, exportCount, importCount, uniqueConcepts, cyclomaticComplexity = 1 } = params;
1355
+ const sizeFactor = {
1356
+ name: "Size Complexity",
1357
+ score: Math.min(100, Math.max(0, (linesOfCode - 50) / 10)),
1358
+ weight: 0.3,
1359
+ description: `${linesOfCode} lines of code`
1360
+ };
1361
+ const interfaceFactor = {
1362
+ name: "Interface Complexity",
1363
+ score: Math.min(100, exportCount * 5),
1364
+ weight: 0.25,
1365
+ description: `${exportCount} exported concepts`
1366
+ };
1367
+ const dependencyFactor = {
1368
+ name: "Dependency Complexity",
1369
+ score: Math.min(100, importCount * 8),
1370
+ weight: 0.25,
1371
+ description: `${importCount} dependencies`
1372
+ };
1373
+ const conceptDensity = linesOfCode > 0 ? uniqueConcepts / linesOfCode : 0;
1374
+ const conceptFactor = {
1375
+ name: "Conceptual Density",
1376
+ score: Math.min(100, conceptDensity * 500),
1377
+ weight: 0.2,
1378
+ description: `${uniqueConcepts} unique concepts`
1379
+ };
1380
+ const factors = [sizeFactor, interfaceFactor, dependencyFactor, conceptFactor];
1381
+ const score = factors.reduce((sum, f) => sum + f.score * f.weight, 0);
1382
+ let rating;
1383
+ if (score < 20) rating = "trivial";
1384
+ else if (score < 40) rating = "easy";
1385
+ else if (score < 60) rating = "moderate";
1386
+ else if (score < 80) rating = "difficult";
1387
+ else rating = "expert";
1388
+ return {
1389
+ score: Math.round(score),
1390
+ rating,
1391
+ factors,
1392
+ rawValues: {
1393
+ size: linesOfCode,
1394
+ complexity: cyclomaticComplexity,
1395
+ dependencyCount: importCount,
1396
+ conceptCount: uniqueConcepts
1397
+ }
1398
+ };
1399
+ }
1400
+ function calculateSemanticDistance(params) {
1401
+ const { file1, file2, file1Domain, file2Domain, sharedDependencies } = params;
1402
+ const domainDistance = file1Domain === file2Domain ? 0 : file1Domain && file2Domain ? 0.5 : 0.8;
1403
+ const importOverlap = sharedDependencies.length / Math.max(1, Math.min(params.file1Imports.length, params.file2Imports.length));
1404
+ const importDistance = 1 - importOverlap;
1405
+ const distance = domainDistance * 0.4 + importDistance * 0.3 + (sharedDependencies.length > 0 ? 0 : 0.3);
1406
+ let relationship;
1407
+ if (file1 === file2) relationship = "same-file";
1408
+ else if (file1Domain === file2Domain) relationship = "same-domain";
1409
+ else if (sharedDependencies.length > 0) relationship = "cross-domain";
1410
+ else relationship = "unrelated";
1411
+ const pathItems = [file1Domain, ...sharedDependencies, file2Domain].filter((s) => typeof s === "string" && s.length > 0);
1412
+ return {
1413
+ between: [file1, file2],
1414
+ distance: Math.round(distance * 100) / 100,
1415
+ relationship,
1416
+ path: pathItems,
1417
+ reason: relationship === "same-domain" ? `Both in "${file1Domain}" domain` : relationship === "cross-domain" ? `Share ${sharedDependencies.length} dependency(ies)` : "No strong semantic relationship detected"
1418
+ };
1419
+ }
1420
+ function calculatePatternEntropy(files) {
1421
+ if (files.length === 0) {
1422
+ return {
1423
+ domain: "unknown",
1424
+ entropy: 0,
1425
+ rating: "crystalline",
1426
+ distribution: { locationCount: 0, dominantLocation: "", giniCoefficient: 0 },
1427
+ recommendations: ["No files to analyze"]
1428
+ };
1429
+ }
1430
+ const dirGroups = /* @__PURE__ */ new Map();
1431
+ for (const file of files) {
1432
+ const parts = file.path.split("/").slice(0, 4).join("/") || "root";
1433
+ dirGroups.set(parts, (dirGroups.get(parts) || 0) + 1);
1434
+ }
1435
+ const counts = Array.from(dirGroups.values());
1436
+ const total = counts.reduce((a, b) => a + b, 0);
1437
+ let entropy = 0;
1438
+ for (const count of counts) {
1439
+ const p = count / total;
1440
+ if (p > 0) entropy -= p * Math.log2(p);
1441
+ }
1442
+ const maxEntropy = Math.log2(dirGroups.size || 1);
1443
+ const normalizedEntropy = maxEntropy > 0 ? entropy / maxEntropy : 0;
1444
+ const sortedCounts = counts.sort((a, b) => a - b);
1445
+ let gini = 0;
1446
+ for (let i = 0; i < sortedCounts.length; i++) {
1447
+ gini += (2 * (i + 1) - sortedCounts.length - 1) * sortedCounts[i];
1448
+ }
1449
+ gini /= total * sortedCounts.length;
1450
+ let dominantLocation = "";
1451
+ let maxCount = 0;
1452
+ for (const [loc, count] of dirGroups.entries()) {
1453
+ if (count > maxCount) {
1454
+ maxCount = count;
1455
+ dominantLocation = loc;
1456
+ }
1457
+ }
1458
+ let rating;
1459
+ if (normalizedEntropy < 0.2) rating = "crystalline";
1460
+ else if (normalizedEntropy < 0.4) rating = "well-structured";
1461
+ else if (normalizedEntropy < 0.6) rating = "moderate";
1462
+ else if (normalizedEntropy < 0.8) rating = "fragmented";
1463
+ else rating = "chaotic";
1464
+ const recommendations = [];
1465
+ if (normalizedEntropy > 0.5) {
1466
+ recommendations.push(`Consolidate ${files.length} files into fewer directories by domain`);
1467
+ }
1468
+ if (dirGroups.size > 5) {
1469
+ recommendations.push("Consider barrel exports to reduce directory navigation");
1470
+ }
1471
+ if (gini > 0.5) {
1472
+ recommendations.push("Redistribute files more evenly across directories");
1473
+ }
1474
+ const firstFile = files.length > 0 ? files[0] : null;
1475
+ const domainValue = firstFile ? firstFile.domain : "mixed";
1476
+ return {
1477
+ domain: domainValue,
1478
+ entropy: Math.round(normalizedEntropy * 100) / 100,
1479
+ rating,
1480
+ distribution: {
1481
+ locationCount: dirGroups.size,
1482
+ dominantLocation,
1483
+ giniCoefficient: Math.round(gini * 100) / 100
1484
+ },
1485
+ recommendations
1486
+ };
1487
+ }
1488
+ function calculateConceptCohesion(params) {
1489
+ const { exports } = params;
1490
+ if (exports.length === 0) {
1491
+ return {
1492
+ score: 1,
1493
+ rating: "excellent",
1494
+ analysis: { uniqueDomains: 0, domainConcentration: 0, exportPurposeClarity: 1 }
1495
+ };
1496
+ }
1497
+ const allDomains = [];
1498
+ for (const exp of exports) {
1499
+ if (exp.inferredDomain) allDomains.push(exp.inferredDomain);
1500
+ if (exp.domains) allDomains.push(...exp.domains);
1501
+ }
1502
+ const uniqueDomains = new Set(allDomains);
1503
+ const domainCounts = /* @__PURE__ */ new Map();
1504
+ for (const d of allDomains) {
1505
+ domainCounts.set(d, (domainCounts.get(d) || 0) + 1);
1506
+ }
1507
+ const maxCount = Math.max(...Array.from(domainCounts.values()), 1);
1508
+ const domainConcentration = maxCount / allDomains.length;
1509
+ const exportPurposeClarity = 1 - (uniqueDomains.size - 1) / Math.max(1, exports.length);
1510
+ const score = domainConcentration * 0.5 + exportPurposeClarity * 0.5;
1511
+ let rating;
1512
+ if (score > 0.8) rating = "excellent";
1513
+ else if (score > 0.6) rating = "good";
1514
+ else if (score > 0.4) rating = "moderate";
1515
+ else rating = "poor";
1516
+ return {
1517
+ score: Math.round(score * 100) / 100,
1518
+ rating,
1519
+ analysis: {
1520
+ uniqueDomains: uniqueDomains.size,
1521
+ domainConcentration: Math.round(domainConcentration * 100) / 100,
1522
+ exportPurposeClarity: Math.round(exportPurposeClarity * 100) / 100
1523
+ }
1524
+ };
1525
+ }
1526
+ function calculateFutureProofScore(params) {
1527
+ const loadScore = 100 - params.cognitiveLoad.score;
1528
+ const entropyScore = 100 - params.patternEntropy.entropy * 100;
1529
+ const cohesionScore = params.conceptCohesion.score * 100;
1530
+ const overall = Math.round(
1531
+ loadScore * 0.4 + entropyScore * 0.3 + cohesionScore * 0.3
1532
+ );
1533
+ const factors = [
1534
+ {
1535
+ name: "Cognitive Load",
1536
+ impact: Math.round(loadScore - 50),
1537
+ description: params.cognitiveLoad.rating
1538
+ },
1539
+ {
1540
+ name: "Pattern Entropy",
1541
+ impact: Math.round(entropyScore - 50),
1542
+ description: params.patternEntropy.rating
1543
+ },
1544
+ {
1545
+ name: "Concept Cohesion",
1546
+ impact: Math.round(cohesionScore - 50),
1547
+ description: params.conceptCohesion.rating
1548
+ }
1549
+ ];
1550
+ const recommendations = [];
1551
+ for (const rec of params.patternEntropy.recommendations) {
1552
+ recommendations.push({
1553
+ action: rec,
1554
+ estimatedImpact: 5,
1555
+ priority: "medium"
1556
+ });
1557
+ }
1558
+ if (params.conceptCohesion.rating === "poor") {
1559
+ recommendations.push({
1560
+ action: "Improve concept cohesion by grouping related exports",
1561
+ estimatedImpact: 8,
1562
+ priority: "high"
1563
+ });
1564
+ }
1565
+ const semanticDistanceAvg = params.semanticDistances && params.semanticDistances.length > 0 ? params.semanticDistances.reduce((s, d) => s + d.distance, 0) / params.semanticDistances.length : 0;
1566
+ return {
1567
+ toolName: "future-proof",
1568
+ score: overall,
1569
+ rawMetrics: {
1570
+ cognitiveLoadScore: params.cognitiveLoad.score,
1571
+ entropyScore: params.patternEntropy.entropy,
1572
+ cohesionScore: params.conceptCohesion.score,
1573
+ semanticDistanceAvg
1574
+ },
1575
+ factors,
1576
+ recommendations
1577
+ };
1578
+ }
1579
+ function calculateHallucinationRisk(params) {
1580
+ const {
1581
+ overloadedSymbols,
1582
+ magicLiterals,
1583
+ booleanTraps,
1584
+ implicitSideEffects,
1585
+ deepCallbacks,
1586
+ ambiguousNames,
1587
+ undocumentedExports,
1588
+ totalSymbols,
1589
+ totalExports
1590
+ } = params;
1591
+ if (totalSymbols === 0) {
1592
+ return {
1593
+ score: 0,
1594
+ rating: "minimal",
1595
+ signals: [],
1596
+ topRisk: "No symbols to analyze",
1597
+ recommendations: []
1598
+ };
1599
+ }
1600
+ const overloadRatio = Math.min(1, overloadedSymbols / Math.max(1, totalSymbols));
1601
+ const overloadSignal = {
1602
+ name: "Symbol Overloading",
1603
+ count: overloadedSymbols,
1604
+ riskContribution: Math.round(overloadRatio * 100 * 0.25),
1605
+ // 25% weight
1606
+ description: `${overloadedSymbols} overloaded symbols \u2014 AI picks wrong signature`
1607
+ };
1608
+ const magicRatio = Math.min(1, magicLiterals / Math.max(1, totalSymbols * 2));
1609
+ const magicSignal = {
1610
+ name: "Magic Literals",
1611
+ count: magicLiterals,
1612
+ riskContribution: Math.round(magicRatio * 100 * 0.2),
1613
+ // 20% weight
1614
+ description: `${magicLiterals} unnamed constants \u2014 AI invents wrong values`
1615
+ };
1616
+ const trapRatio = Math.min(1, booleanTraps / Math.max(1, totalSymbols));
1617
+ const trapSignal = {
1618
+ name: "Boolean Traps",
1619
+ count: booleanTraps,
1620
+ riskContribution: Math.round(trapRatio * 100 * 0.2),
1621
+ // 20% weight
1622
+ description: `${booleanTraps} boolean trap parameters \u2014 AI inverts intent`
1623
+ };
1624
+ const sideEffectRatio = Math.min(1, implicitSideEffects / Math.max(1, totalExports));
1625
+ const sideEffectSignal = {
1626
+ name: "Implicit Side Effects",
1627
+ count: implicitSideEffects,
1628
+ riskContribution: Math.round(sideEffectRatio * 100 * 0.15),
1629
+ // 15% weight
1630
+ description: `${implicitSideEffects} functions with implicit side effects \u2014 AI misses contracts`
1631
+ };
1632
+ const callbackRatio = Math.min(1, deepCallbacks / Math.max(1, totalSymbols * 0.1));
1633
+ const callbackSignal = {
1634
+ name: "Callback Nesting",
1635
+ count: deepCallbacks,
1636
+ riskContribution: Math.round(callbackRatio * 100 * 0.1),
1637
+ // 10% weight
1638
+ description: `${deepCallbacks} deep callback chains \u2014 AI loses control flow context`
1639
+ };
1640
+ const ambiguousRatio = Math.min(1, ambiguousNames / Math.max(1, totalSymbols));
1641
+ const ambiguousSignal = {
1642
+ name: "Ambiguous Names",
1643
+ count: ambiguousNames,
1644
+ riskContribution: Math.round(ambiguousRatio * 100 * 0.1),
1645
+ // 10% weight
1646
+ description: `${ambiguousNames} non-descriptive identifiers \u2014 AI guesses wrong intent`
1647
+ };
1648
+ const undocRatio = Math.min(1, undocumentedExports / Math.max(1, totalExports));
1649
+ const undocSignal = {
1650
+ name: "Undocumented Exports",
1651
+ count: undocumentedExports,
1652
+ riskContribution: Math.round(undocRatio * 100 * 0.1),
1653
+ // 10% weight
1654
+ description: `${undocumentedExports} public functions without docs \u2014 AI fabricates behavior`
1655
+ };
1656
+ const signals = [
1657
+ overloadSignal,
1658
+ magicSignal,
1659
+ trapSignal,
1660
+ sideEffectSignal,
1661
+ callbackSignal,
1662
+ ambiguousSignal,
1663
+ undocSignal
1664
+ ];
1665
+ const score = Math.min(100, signals.reduce((sum, s) => sum + s.riskContribution, 0));
1666
+ let rating;
1667
+ if (score < 10) rating = "minimal";
1668
+ else if (score < 25) rating = "low";
1669
+ else if (score < 50) rating = "moderate";
1670
+ else if (score < 75) rating = "high";
1671
+ else rating = "severe";
1672
+ const topSignal = signals.reduce((a, b) => a.riskContribution > b.riskContribution ? a : b);
1673
+ const topRisk = topSignal.riskContribution > 0 ? topSignal.description : "No significant hallucination risks detected";
1674
+ const recommendations = [];
1675
+ if (overloadSignal.riskContribution > 5) {
1676
+ recommendations.push(`Rename ${overloadedSymbols} overloaded symbols to unique, intent-revealing names`);
1677
+ }
1678
+ if (magicSignal.riskContribution > 5) {
1679
+ recommendations.push(`Extract ${magicLiterals} magic literals into named constants`);
1680
+ }
1681
+ if (trapSignal.riskContribution > 5) {
1682
+ recommendations.push(`Replace ${booleanTraps} boolean traps with named options objects`);
1683
+ }
1684
+ if (undocSignal.riskContribution > 5) {
1685
+ recommendations.push(`Add JSDoc/docstrings to ${undocumentedExports} undocumented public functions`);
1686
+ }
1687
+ if (sideEffectSignal.riskContribution > 5) {
1688
+ recommendations.push("Mark functions with side effects explicitly in their names or docs");
1689
+ }
1690
+ return {
1691
+ score: Math.round(score),
1692
+ rating,
1693
+ signals: signals.filter((s) => s.count > 0),
1694
+ topRisk,
1695
+ recommendations
1696
+ };
1697
+ }
1698
+ function calculateAgentGrounding(params) {
1699
+ const {
1700
+ deepDirectories,
1701
+ totalDirectories,
1702
+ vagueFileNames,
1703
+ totalFiles,
1704
+ hasRootReadme,
1705
+ readmeIsFresh,
1706
+ barrelExports,
1707
+ untypedExports,
1708
+ totalExports,
1709
+ inconsistentDomainTerms,
1710
+ domainVocabularySize
1711
+ } = params;
1712
+ const deepDirRatio = totalDirectories > 0 ? deepDirectories / totalDirectories : 0;
1713
+ const structureClarityScore = Math.max(0, Math.round(100 - deepDirRatio * 80));
1714
+ const vagueRatio = totalFiles > 0 ? vagueFileNames / totalFiles : 0;
1715
+ const selfDocumentationScore = Math.max(0, Math.round(100 - vagueRatio * 90));
1716
+ let entryPointScore = 60;
1717
+ if (hasRootReadme) entryPointScore += 25;
1718
+ if (readmeIsFresh) entryPointScore += 10;
1719
+ const barrelRatio = totalFiles > 0 ? barrelExports / (totalFiles * 0.1) : 0;
1720
+ entryPointScore += Math.round(Math.min(5, barrelRatio * 5));
1721
+ entryPointScore = Math.min(100, entryPointScore);
1722
+ const untypedRatio = totalExports > 0 ? untypedExports / totalExports : 0;
1723
+ const apiClarityScore = Math.max(0, Math.round(100 - untypedRatio * 70));
1724
+ const inconsistencyRatio = domainVocabularySize > 0 ? inconsistentDomainTerms / domainVocabularySize : 0;
1725
+ const domainConsistencyScore = Math.max(0, Math.round(100 - inconsistencyRatio * 80));
1726
+ const score = Math.round(
1727
+ structureClarityScore * 0.2 + selfDocumentationScore * 0.25 + entryPointScore * 0.2 + apiClarityScore * 0.15 + domainConsistencyScore * 0.2
1728
+ );
1729
+ let rating;
1730
+ if (score >= 85) rating = "excellent";
1731
+ else if (score >= 70) rating = "good";
1732
+ else if (score >= 50) rating = "moderate";
1733
+ else if (score >= 30) rating = "poor";
1734
+ else rating = "disorienting";
1735
+ const recommendations = [];
1736
+ if (structureClarityScore < 70) {
1737
+ recommendations.push(`Flatten ${deepDirectories} overly-deep directories to improve agent navigation`);
1738
+ }
1739
+ if (selfDocumentationScore < 70) {
1740
+ recommendations.push(`Rename ${vagueFileNames} vague files (utils, helpers, misc) to domain-specific names`);
1741
+ }
1742
+ if (!hasRootReadme) {
1743
+ recommendations.push("Add a root README.md so agents understand the project context immediately");
1744
+ } else if (!readmeIsFresh) {
1745
+ recommendations.push("Update README.md \u2014 stale entry-point documentation disorients agents");
1746
+ }
1747
+ if (apiClarityScore < 70) {
1748
+ recommendations.push(`Add TypeScript types to ${untypedExports} untyped exports to improve API discoverability`);
1749
+ }
1750
+ if (domainConsistencyScore < 70) {
1751
+ recommendations.push(`Unify ${inconsistentDomainTerms} inconsistent domain terms \u2014 agents need one word per concept`);
1752
+ }
1753
+ return {
1754
+ score,
1755
+ rating,
1756
+ dimensions: {
1757
+ structureClarityScore,
1758
+ selfDocumentationScore,
1759
+ entryPointScore,
1760
+ apiClarityScore,
1761
+ domainConsistencyScore
1762
+ },
1763
+ recommendations
1764
+ };
1765
+ }
1766
+ function calculateTestabilityIndex(params) {
1767
+ const {
1768
+ testFiles,
1769
+ sourceFiles,
1770
+ pureFunctions,
1771
+ totalFunctions,
1772
+ injectionPatterns,
1773
+ totalClasses,
1774
+ bloatedInterfaces,
1775
+ totalInterfaces,
1776
+ externalStateMutations,
1777
+ hasTestFramework
1778
+ } = params;
1779
+ const rawCoverageRatio = sourceFiles > 0 ? testFiles / sourceFiles : 0;
1780
+ const testCoverageRatio = Math.min(100, Math.round(rawCoverageRatio * 100));
1781
+ const purityRatio = totalFunctions > 0 ? pureFunctions / totalFunctions : 0.5;
1782
+ const purityScore = Math.round(purityRatio * 100);
1783
+ const injectionRatio = totalClasses > 0 ? injectionPatterns / totalClasses : 0.5;
1784
+ const dependencyInjectionScore = Math.round(Math.min(100, injectionRatio * 100));
1785
+ const bloatedRatio = totalInterfaces > 0 ? bloatedInterfaces / totalInterfaces : 0;
1786
+ const interfaceFocusScore = Math.max(0, Math.round(100 - bloatedRatio * 80));
1787
+ const mutationRatio = totalFunctions > 0 ? externalStateMutations / totalFunctions : 0;
1788
+ const observabilityScore = Math.max(0, Math.round(100 - mutationRatio * 100));
1789
+ const frameworkWeight = hasTestFramework ? 1 : 0.8;
1790
+ const rawScore = (testCoverageRatio * 0.3 + purityScore * 0.25 + dependencyInjectionScore * 0.2 + interfaceFocusScore * 0.1 + observabilityScore * 0.15) * frameworkWeight;
1791
+ const score = Math.max(0, Math.min(100, Math.round(rawScore)));
1792
+ let rating;
1793
+ if (score >= 85) rating = "excellent";
1794
+ else if (score >= 70) rating = "good";
1795
+ else if (score >= 50) rating = "moderate";
1796
+ else if (score >= 30) rating = "poor";
1797
+ else rating = "unverifiable";
1798
+ let aiChangeSafetyRating;
1799
+ if (rawCoverageRatio >= 0.5 && score >= 70) aiChangeSafetyRating = "safe";
1800
+ else if (rawCoverageRatio >= 0.2 && score >= 50) aiChangeSafetyRating = "moderate-risk";
1801
+ else if (rawCoverageRatio > 0) aiChangeSafetyRating = "high-risk";
1802
+ else aiChangeSafetyRating = "blind-risk";
1803
+ const recommendations = [];
1804
+ if (!hasTestFramework) {
1805
+ recommendations.push("Add a testing framework (Jest, Vitest, pytest) \u2014 AI changes cannot be verified without tests");
1806
+ }
1807
+ if (rawCoverageRatio < 0.3) {
1808
+ const neededTests = Math.round(sourceFiles * 0.3 - testFiles);
1809
+ recommendations.push(`Add ~${neededTests} test files to reach 30% coverage ratio \u2014 minimum for safe AI assistance`);
1810
+ }
1811
+ if (purityScore < 50) {
1812
+ recommendations.push("Extract pure functions from side-effectful code \u2014 pure functions are trivially AI-testable");
1813
+ }
1814
+ if (dependencyInjectionScore < 50 && totalClasses > 0) {
1815
+ recommendations.push("Adopt dependency injection \u2014 makes classes mockable and AI-generated code verifiable");
1816
+ }
1817
+ if (externalStateMutations > totalFunctions * 0.3) {
1818
+ recommendations.push("Reduce direct state mutations \u2014 return values instead to improve observability");
1819
+ }
1820
+ return {
1821
+ score,
1822
+ rating,
1823
+ dimensions: {
1824
+ testCoverageRatio,
1825
+ purityScore,
1826
+ dependencyInjectionScore,
1827
+ interfaceFocusScore,
1828
+ observabilityScore
1829
+ },
1830
+ aiChangeSafetyRating,
1831
+ recommendations
1832
+ };
1833
+ }
1834
+ function calculateExtendedFutureProofScore(params) {
1835
+ const loadScore = 100 - params.cognitiveLoad.score;
1836
+ const entropyScore = 100 - params.patternEntropy.entropy * 100;
1837
+ const cohesionScore = params.conceptCohesion.score * 100;
1838
+ const hallucinationScore = 100 - params.hallucinationRisk.score;
1839
+ const groundingScore = params.agentGrounding.score;
1840
+ const testabilityScore = params.testability.score;
1841
+ const overall = Math.round(
1842
+ loadScore * 0.2 + entropyScore * 0.15 + cohesionScore * 0.15 + hallucinationScore * 0.2 + groundingScore * 0.15 + testabilityScore * 0.15
1843
+ );
1844
+ const factors = [
1845
+ {
1846
+ name: "Cognitive Load",
1847
+ impact: Math.round(loadScore - 50),
1848
+ description: params.cognitiveLoad.rating
1849
+ },
1850
+ {
1851
+ name: "Pattern Entropy",
1852
+ impact: Math.round(entropyScore - 50),
1853
+ description: params.patternEntropy.rating
1854
+ },
1855
+ {
1856
+ name: "Concept Cohesion",
1857
+ impact: Math.round(cohesionScore - 50),
1858
+ description: params.conceptCohesion.rating
1859
+ },
1860
+ {
1861
+ name: "Hallucination Risk",
1862
+ impact: Math.round(hallucinationScore - 50),
1863
+ description: `${params.hallucinationRisk.rating} risk (${params.hallucinationRisk.score}/100 raw)`
1864
+ },
1865
+ {
1866
+ name: "Agent Grounding",
1867
+ impact: Math.round(groundingScore - 50),
1868
+ description: params.agentGrounding.rating
1869
+ },
1870
+ {
1871
+ name: "Testability",
1872
+ impact: Math.round(testabilityScore - 50),
1873
+ description: `${params.testability.rating} \u2014 AI changes are ${params.testability.aiChangeSafetyRating}`
1874
+ }
1875
+ ];
1876
+ const recommendations = [];
1877
+ for (const rec of params.hallucinationRisk.recommendations) {
1878
+ recommendations.push({ action: rec, estimatedImpact: 8, priority: "high" });
1879
+ }
1880
+ for (const rec of params.agentGrounding.recommendations) {
1881
+ recommendations.push({ action: rec, estimatedImpact: 6, priority: "medium" });
1882
+ }
1883
+ for (const rec of params.testability.recommendations) {
1884
+ const priority = params.testability.aiChangeSafetyRating === "blind-risk" ? "high" : "medium";
1885
+ recommendations.push({ action: rec, estimatedImpact: 10, priority });
1886
+ }
1887
+ for (const rec of params.patternEntropy.recommendations) {
1888
+ recommendations.push({ action: rec, estimatedImpact: 5, priority: "low" });
1889
+ }
1890
+ const semanticDistanceAvg = params.semanticDistances && params.semanticDistances.length > 0 ? params.semanticDistances.reduce((s, d) => s + d.distance, 0) / params.semanticDistances.length : 0;
1891
+ return {
1892
+ toolName: "future-proof",
1893
+ score: overall,
1894
+ rawMetrics: {
1895
+ cognitiveLoadScore: params.cognitiveLoad.score,
1896
+ entropyScore: params.patternEntropy.entropy,
1897
+ cohesionScore: params.conceptCohesion.score,
1898
+ hallucinationRiskScore: params.hallucinationRisk.score,
1899
+ agentGroundingScore: params.agentGrounding.score,
1900
+ testabilityScore: params.testability.score,
1901
+ semanticDistanceAvg
1902
+ },
1903
+ factors,
1904
+ recommendations
1905
+ };
1906
+ }
1907
+
1908
+ // src/utils/history.ts
1909
+ import { readFileSync as readFileSync2, writeFileSync as writeFileSync2, existsSync as existsSync4, mkdirSync as mkdirSync2 } from "fs";
1910
+ import { join as join4, dirname as dirname3 } from "path";
1911
+ function getHistoryPath(rootDir) {
1912
+ return join4(rootDir, ".aiready", "history.json");
1913
+ }
1914
+ function loadScoreHistory(rootDir) {
1915
+ const historyPath = getHistoryPath(rootDir);
1916
+ if (!existsSync4(historyPath)) {
1917
+ return [];
1918
+ }
1919
+ try {
1920
+ const data = readFileSync2(historyPath, "utf-8");
1921
+ return JSON.parse(data);
1922
+ } catch (error) {
1923
+ console.warn("Failed to load score history:", error);
1924
+ return [];
1925
+ }
1926
+ }
1927
+ function saveScoreEntry(rootDir, entry) {
1928
+ const historyPath = getHistoryPath(rootDir);
1929
+ const historyDir = dirname3(historyPath);
1930
+ if (!existsSync4(historyDir)) {
1931
+ mkdirSync2(historyDir, { recursive: true });
1932
+ }
1933
+ const history = loadScoreHistory(rootDir);
1934
+ const newEntry = {
1935
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
1936
+ ...entry
1937
+ };
1938
+ const oneYearAgo = Date.now() - 365 * 24 * 60 * 60 * 1e3;
1939
+ const filteredHistory = history.filter(
1940
+ (e) => new Date(e.timestamp).getTime() > oneYearAgo
1941
+ );
1942
+ filteredHistory.push(newEntry);
1943
+ writeFileSync2(historyPath, JSON.stringify(filteredHistory, null, 2));
1944
+ }
1945
+ function getHistorySummary(rootDir) {
1946
+ const history = loadScoreHistory(rootDir);
1947
+ if (history.length === 0) {
1948
+ return {
1949
+ totalScans: 0,
1950
+ firstScan: null,
1951
+ lastScan: null,
1952
+ avgScore: 0
1953
+ };
1954
+ }
1955
+ const scores = history.map((e) => e.overallScore);
1956
+ const avgScore = scores.reduce((a, b) => a + b, 0) / scores.length;
1957
+ return {
1958
+ totalScans: history.length,
1959
+ firstScan: history[0].timestamp,
1960
+ lastScan: history[history.length - 1].timestamp,
1961
+ avgScore: Math.round(avgScore)
1962
+ };
1963
+ }
1964
+ function exportHistory(rootDir, format = "json") {
1965
+ const history = loadScoreHistory(rootDir);
1966
+ if (format === "csv") {
1967
+ const headers = "timestamp,overallScore,totalIssues,totalTokens,patternScore,contextScore,consistencyScore\n";
1968
+ const rows = history.map(
1969
+ (e) => `${e.timestamp},${e.overallScore},${e.totalIssues},${e.totalTokens},${e.breakdown?.["pattern-detect"] || ""},${e.breakdown?.["context-analyzer"] || ""},${e.breakdown?.["consistency"] || ""}`
1970
+ ).join("\n");
1971
+ return headers + rows;
1972
+ }
1973
+ return JSON.stringify(history, null, 2);
1974
+ }
1975
+ function clearHistory(rootDir) {
1976
+ const historyPath = getHistoryPath(rootDir);
1977
+ if (existsSync4(historyPath)) {
1978
+ writeFileSync2(historyPath, JSON.stringify([]));
1979
+ }
1980
+ }
1056
1981
  export {
1982
+ CONTEXT_TIER_THRESHOLDS,
1057
1983
  DEFAULT_COST_CONFIG,
1058
1984
  DEFAULT_EXCLUDE,
1059
1985
  DEFAULT_TOOL_WEIGHTS,
1060
1986
  LANGUAGE_EXTENSIONS,
1061
1987
  Language,
1988
+ MODEL_PRICING_PRESETS,
1062
1989
  ParseError,
1063
1990
  ParserFactory,
1064
1991
  PythonParser,
1992
+ SIZE_ADJUSTED_THRESHOLDS,
1065
1993
  TOOL_NAME_MAP,
1066
1994
  TypeScriptParser,
1995
+ calculateAgentGrounding,
1996
+ calculateCognitiveLoad,
1067
1997
  calculateComprehensionDifficulty,
1998
+ calculateConceptCohesion,
1999
+ calculateExtendedFutureProofScore,
2000
+ calculateFutureProofScore,
2001
+ calculateHallucinationRisk,
1068
2002
  calculateImportSimilarity,
2003
+ calculateKnowledgeConcentration,
1069
2004
  calculateMonthlyCost,
1070
2005
  calculateOverallScore,
2006
+ calculatePatternEntropy,
1071
2007
  calculateProductivityImpact,
2008
+ calculateRemediationVelocity,
2009
+ calculateScoreTrend,
2010
+ calculateSemanticDistance,
2011
+ calculateTechnicalDebtInterest,
2012
+ calculateTestabilityIndex,
2013
+ clearHistory,
1072
2014
  estimateTokens,
2015
+ exportHistory,
1073
2016
  extractFunctions,
1074
2017
  extractImports,
1075
2018
  formatAcceptanceRate,
@@ -1078,11 +2021,17 @@ export {
1078
2021
  formatScore,
1079
2022
  formatToolScore,
1080
2023
  generateHTML,
2024
+ getDebtBreakdown,
1081
2025
  getElapsedTime,
1082
2026
  getFileExtension,
2027
+ getHistorySummary,
2028
+ getModelPreset,
1083
2029
  getParser,
2030
+ getProjectSizeTier,
1084
2031
  getRating,
1085
2032
  getRatingDisplay,
2033
+ getRatingWithContext,
2034
+ getRecommendedThreshold,
1086
2035
  getSupportedLanguages,
1087
2036
  getToolWeight,
1088
2037
  handleCLIError,
@@ -1091,6 +2040,7 @@ export {
1091
2040
  isSourceFile,
1092
2041
  loadConfig,
1093
2042
  loadMergedConfig,
2043
+ loadScoreHistory,
1094
2044
  mergeConfigWithDefaults,
1095
2045
  normalizeToolName,
1096
2046
  parseCode,
@@ -1099,5 +2049,6 @@ export {
1099
2049
  predictAcceptanceRate,
1100
2050
  readFileContent,
1101
2051
  resolveOutputPath,
2052
+ saveScoreEntry,
1102
2053
  scanFiles
1103
2054
  };