cto-ai-cli 6.1.0 → 7.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -247,17 +247,6 @@ interface SelectionInput {
247
247
  }
248
248
  declare function selectContext(input: SelectionInput): Promise<ContextSelection>;
249
249
 
250
- declare function scoreAllFiles(files: AnalyzedFile[], graph: ProjectGraph, weights?: RiskWeights): void;
251
- declare function scoreFile(file: AnalyzedFile, graph: ProjectGraph, weights?: RiskWeights): number;
252
-
253
- declare function calculateCoverage(targetPaths: string[], includedPaths: string[], allFiles: AnalyzedFile[], graph: ProjectGraph, depth?: number): CoverageResult;
254
-
255
- declare function getPruneLevelForRisk(riskScore: number): PruneLevel;
256
- declare function optimizeBudget(files: AnalyzedFile[], budget: number): Promise<BudgetPlan>;
257
-
258
- declare function pruneFile(file: AnalyzedFile, level: PruneLevel): Promise<PrunedContent>;
259
- declare function pruneFiles(files: AnalyzedFile[], levelFn: (file: AnalyzedFile) => PruneLevel): Promise<PrunedContent[]>;
260
-
261
250
  /**
262
251
  * TF-IDF Semantic Matching Engine
263
252
  *
@@ -326,6 +315,82 @@ declare function tokenize(text: string): string[];
326
315
  */
327
316
  declare function boostByPath(matches: SemanticMatch[], allFiles: string[], taskDescription: string): SemanticMatch[];
328
317
 
318
+ /**
319
+ * Persistent TF-IDF Index Cache
320
+ *
321
+ * Problem: Building a TF-IDF index reads every source file and tokenizes it.
322
+ * For a 50K-file repo, that's 5-10 seconds per query. With 20K devs running
323
+ * queries concurrently, re-indexing on every call is unacceptable.
324
+ *
325
+ * Solution: Persist the index to disk with per-file mtime tracking.
326
+ * On subsequent queries, only re-index files that changed since last build.
327
+ *
328
+ * Storage: .cto/index-cache.json
329
+ * {
330
+ * version: 2,
331
+ * builtAt: ISO timestamp,
332
+ * files: { [relativePath]: { mtime: number, terms: { [term]: count }, length: number } },
333
+ * idf: { [term]: number },
334
+ * avgDocLength: number,
335
+ * totalDocs: number,
336
+ * }
337
+ *
338
+ * Invalidation:
339
+ * - Per-file: mtime changed → re-tokenize that file
340
+ * - New files: not in cache → tokenize and add
341
+ * - Deleted files: in cache but not on disk → remove
342
+ * - Version bump: cache format changed → full rebuild
343
+ *
344
+ * The IDF values are recomputed after any incremental update because
345
+ * document frequency changes affect all terms globally.
346
+ */
347
+
348
+ interface IndexCacheStats {
349
+ /** Total files in the index */
350
+ totalFiles: number;
351
+ /** Files that were re-indexed (changed or new) */
352
+ updatedFiles: number;
353
+ /** Files removed from cache (deleted from disk) */
354
+ removedFiles: number;
355
+ /** Files reused from cache (unchanged) */
356
+ cachedFiles: number;
357
+ /** Whether the cache existed before this build */
358
+ cacheHit: boolean;
359
+ /** Time to build/update the index (ms) */
360
+ buildTimeMs: number;
361
+ }
362
+ /**
363
+ * Build or update a TF-IDF index with disk caching.
364
+ *
365
+ * First call: builds full index and writes cache to .cto/index-cache.json
366
+ * Subsequent calls: reads cache, updates only changed files, rewrites cache
367
+ *
368
+ * @param projectPath - Root of the project (for .cto/ directory)
369
+ * @param files - All files to index: { relativePath, absolutePath, content? }
370
+ * If content is provided, it's used directly. Otherwise, the file is read from disk.
371
+ * @returns The TF-IDF index + stats about cache hits/misses
372
+ */
373
+ declare function buildIndexCached(projectPath: string, files: {
374
+ relativePath: string;
375
+ absolutePath: string;
376
+ content?: string;
377
+ }[]): {
378
+ index: TfIdfIndex;
379
+ stats: IndexCacheStats;
380
+ };
381
+ /**
382
+ * Invalidate the entire cache (force full rebuild on next call).
383
+ */
384
+ declare function invalidateCache(projectPath: string): void;
385
+ /**
386
+ * Get cache stats without rebuilding.
387
+ */
388
+ declare function getCacheInfo(projectPath: string): {
389
+ exists: boolean;
390
+ fileCount: number;
391
+ builtAt: string | null;
392
+ };
393
+
329
394
  /**
330
395
  * Usage Learner — Gets smarter with every use.
331
396
  *
@@ -403,6 +468,477 @@ declare function getLearnerStats(model: LearnerModel): {
403
468
  */
404
469
  declare function extractPattern(filePath: string): string;
405
470
 
471
+ /**
472
+ * Multi-Repo Context Selection
473
+ *
474
+ * Discovers sibling repositories in a workspace and queries them
475
+ * for relevant files when selecting context for a task.
476
+ *
477
+ * How it works:
478
+ * 1. Discover sibling repos (scan parent dir or use explicit paths)
479
+ * 2. For each sibling: list source files, read contents, build TF-IDF index
480
+ * 3. Query each sibling's index with the task description
481
+ * 4. Return ranked matches with repo attribution
482
+ *
483
+ * This is NOT the cross-repo learning system (cross-repo.ts).
484
+ * This is actual multi-repo file discovery and querying.
485
+ */
486
+ interface SiblingRepo {
487
+ /** Absolute path to the repo root */
488
+ path: string;
489
+ /** Short name (directory name) */
490
+ name: string;
491
+ /** Detected stack (from package.json, tsconfig, etc.) */
492
+ stack: string[];
493
+ /** Number of source files found */
494
+ fileCount: number;
495
+ }
496
+ interface SiblingMatch {
497
+ /** Which sibling repo this file belongs to */
498
+ repoName: string;
499
+ /** Absolute path to the repo */
500
+ repoPath: string;
501
+ /** Relative path within the sibling repo */
502
+ relativePath: string;
503
+ /** Absolute path to the file */
504
+ absolutePath: string;
505
+ /** Semantic relevance score (0-1) */
506
+ score: number;
507
+ /** File content */
508
+ content: string;
509
+ /** Estimated token count */
510
+ tokens: number;
511
+ }
512
+ interface MultiRepoResult {
513
+ /** Sibling repos that were discovered/used */
514
+ siblings: SiblingRepo[];
515
+ /** Top matches from sibling repos, ranked by score */
516
+ matches: SiblingMatch[];
517
+ /** Total time spent indexing + querying (ms) */
518
+ timeMs: number;
519
+ }
520
+ /**
521
+ * Discover sibling repositories by scanning the parent directory.
522
+ * A directory is a "repo" if it contains a known project marker file.
523
+ */
524
+ declare function discoverSiblingRepos(projectPath: string): SiblingRepo[];
525
+ /**
526
+ * Query sibling repos for files relevant to a task.
527
+ *
528
+ * For each sibling:
529
+ * 1. List source files
530
+ * 2. Build TF-IDF index from file contents
531
+ * 3. Query with task description
532
+ * 4. Return top matches with content
533
+ *
534
+ * @param siblings - Sibling repos to query (from discoverSiblingRepos or explicit paths)
535
+ * @param task - Task description to match against
536
+ * @param maxPerRepo - Max matches per repo (default 5)
537
+ * @param minScore - Minimum semantic score to include (default 0.3)
538
+ */
539
+ declare function querySiblingRepos(siblings: SiblingRepo[], task: string, maxPerRepo?: number, minScore?: number): MultiRepoResult;
540
+ /**
541
+ * Parse explicit repo paths from a comma-separated string.
542
+ * Resolves relative paths against the current project's parent directory.
543
+ */
544
+ declare function parseSiblingPaths(pathsStr: string, projectPath: string): SiblingRepo[];
545
+ /**
546
+ * Render multi-repo results for CLI output.
547
+ */
548
+ declare function renderMultiRepoSummary(result: MultiRepoResult): string;
549
+
550
+ /**
551
+ * Shared Context Pipeline
552
+ *
553
+ * Single function that runs the full context selection pipeline:
554
+ * read files → build TF-IDF index → query → boost → load learner → selectContext
555
+ *
556
+ * Used by both CLI and MCP server. No duplication.
557
+ */
558
+
559
+ interface ContextPipelineInput {
560
+ projectPath: string;
561
+ task: string;
562
+ analysis: ProjectAnalysis;
563
+ budget?: number;
564
+ /** Optional sibling repos for cross-repo context */
565
+ siblingRepos?: SiblingRepo[];
566
+ }
567
+ interface ContextPipelineResult {
568
+ selection: ContextSelection;
569
+ taskType: string;
570
+ fileContentMap: Map<string, string>;
571
+ semanticMap: Map<string, SemanticMatch>;
572
+ learnerMap: Map<string, LearnerBoost>;
573
+ /** Cross-repo results (only present if siblingRepos were provided) */
574
+ multiRepo?: MultiRepoResult;
575
+ /** Index cache stats (how many files were cached vs rebuilt) */
576
+ indexCacheStats?: IndexCacheStats;
577
+ }
578
+ /**
579
+ * Run the full context selection pipeline.
580
+ * One function, used everywhere. No copy-paste.
581
+ */
582
+ declare function runContextPipeline(input: ContextPipelineInput): Promise<ContextPipelineResult>;
583
+
584
+ declare function scoreAllFiles(files: AnalyzedFile[], graph: ProjectGraph, weights?: RiskWeights): void;
585
+ declare function scoreFile(file: AnalyzedFile, graph: ProjectGraph, weights?: RiskWeights): number;
586
+
587
+ declare function calculateCoverage(targetPaths: string[], includedPaths: string[], allFiles: AnalyzedFile[], graph: ProjectGraph, depth?: number): CoverageResult;
588
+
589
+ declare function getPruneLevelForRisk(riskScore: number): PruneLevel;
590
+ declare function optimizeBudget(files: AnalyzedFile[], budget: number): Promise<BudgetPlan>;
591
+
592
+ declare function pruneFile(file: AnalyzedFile, level: PruneLevel): Promise<PrunedContent>;
593
+ declare function pruneFiles(files: AnalyzedFile[], levelFn: (file: AnalyzedFile) => PruneLevel): Promise<PrunedContent[]>;
594
+
595
+ /**
596
+ * Closed-Loop A/B Testing Engine
597
+ *
598
+ * The missing piece: the feedback system records data but never closes the loop.
599
+ * This module adds real experimentation:
600
+ *
601
+ * 1. Define experiments with control + variant strategies
602
+ * 2. Assign requests to groups (deterministic hashing for consistency)
603
+ * 3. Collect outcomes per group
604
+ * 4. Compute statistical significance (z-test for proportions)
605
+ * 5. Auto-promote winning variants when significance threshold met
606
+ *
607
+ * Example experiment:
608
+ * - Control: default composite scoring (semantic 0.55, risk 0.25, learner 0.20)
609
+ * - Variant: reranker-heavy scoring (reranker 0.70, risk 0.15, learner 0.15)
610
+ * - Metric: acceptance rate
611
+ * - Significance: p < 0.05
612
+ *
613
+ * Storage: .cto/experiments.json
614
+ * Design: Pure functions. No external deps. Deterministic assignment.
615
+ */
616
+ interface Experiment {
617
+ /** Unique experiment ID */
618
+ id: string;
619
+ /** Human-readable name */
620
+ name: string;
621
+ /** What we're testing */
622
+ description: string;
623
+ /** Current status */
624
+ status: 'running' | 'concluded' | 'paused';
625
+ /** When the experiment started */
626
+ startedAt: string;
627
+ /** When it concluded (if applicable) */
628
+ concludedAt?: string;
629
+ /** Traffic split: 0.5 = 50/50 */
630
+ trafficSplit: number;
631
+ /** Minimum observations per group before significance test */
632
+ minObservations: number;
633
+ /** P-value threshold for significance */
634
+ significanceThreshold: number;
635
+ /** Control group config */
636
+ control: ExperimentGroup;
637
+ /** Variant group config */
638
+ variant: ExperimentGroup;
639
+ /** Conclusion (when experiment ends) */
640
+ conclusion?: ExperimentConclusion;
641
+ }
642
+ interface ExperimentGroup {
643
+ /** Group name */
644
+ name: string;
645
+ /** Strategy parameters (passed to the engine) */
646
+ params: Record<string, unknown>;
647
+ /** Collected metrics */
648
+ metrics: GroupMetrics;
649
+ }
650
+ interface GroupMetrics {
651
+ /** Total observations */
652
+ total: number;
653
+ /** Successful outcomes (accepted) */
654
+ successes: number;
655
+ /** Accept rate = successes / total */
656
+ acceptRate: number;
657
+ /** Average time to accept (ms) */
658
+ avgTimeToAccept: number;
659
+ /** Compilable rate */
660
+ compilableRate: number;
661
+ /** Sum of time values (for running average) */
662
+ timeSum: number;
663
+ /** Count of compilable results */
664
+ compilableCount: number;
665
+ }
666
+ interface ExperimentConclusion {
667
+ /** Which group won */
668
+ winner: 'control' | 'variant' | 'no_difference';
669
+ /** Observed p-value */
670
+ pValue: number;
671
+ /** Effect size (difference in accept rates) */
672
+ effectSize: number;
673
+ /** Confidence interval for effect size */
674
+ confidenceInterval: [number, number];
675
+ /** Human-readable summary */
676
+ summary: string;
677
+ }
678
+ interface AssignmentResult {
679
+ /** Which group the request was assigned to */
680
+ group: 'control' | 'variant';
681
+ /** The strategy params for this group */
682
+ params: Record<string, unknown>;
683
+ /** Experiment ID for tracking */
684
+ experimentId: string;
685
+ }
686
+ declare function loadExperiments(projectPath: string): Experiment[];
687
+ declare function saveExperiments(projectPath: string, experiments: Experiment[]): void;
688
+ declare function createExperiment(id: string, name: string, description: string, controlParams: Record<string, unknown>, variantParams: Record<string, unknown>, options?: {
689
+ trafficSplit?: number;
690
+ minObservations?: number;
691
+ significanceThreshold?: number;
692
+ }): Experiment;
693
+ /**
694
+ * Assign a request to control or variant group.
695
+ * Uses deterministic hashing: same (experiment_id, task) → same group.
696
+ * This ensures consistency (retries get the same group).
697
+ */
698
+ declare function assignGroup(experiment: Experiment, task: string): AssignmentResult | null;
699
+ /**
700
+ * Record an outcome for an experiment group.
701
+ * Updates running statistics and checks for significance.
702
+ */
703
+ declare function recordOutcome(experiment: Experiment, group: 'control' | 'variant', outcome: {
704
+ accepted: boolean;
705
+ compilable?: boolean;
706
+ timeToAcceptMs?: number;
707
+ }): Experiment;
708
+ interface SignificanceResult {
709
+ /** Two-sided p-value */
710
+ pValue: number;
711
+ /** Z-score */
712
+ zScore: number;
713
+ /** Effect size: variant rate - control rate */
714
+ effectSize: number;
715
+ /** 95% confidence interval for effect size */
716
+ confidenceInterval: [number, number];
717
+ /** Whether the result is significant at the experiment's threshold */
718
+ significant: boolean;
719
+ }
720
+ /**
721
+ * Two-proportion z-test for A/B testing.
722
+ *
723
+ * H0: p_control = p_variant
724
+ * H1: p_control ≠ p_variant (two-sided)
725
+ *
726
+ * This is the standard test for comparing conversion rates.
727
+ */
728
+ declare function testSignificance(experiment: Experiment): SignificanceResult;
729
+ /**
730
+ * Get the active experiment for this project (if any).
731
+ */
732
+ declare function getActiveExperiment(experiments: Experiment[]): Experiment | null;
733
+ /**
734
+ * Get all concluded experiments with their results.
735
+ */
736
+ declare function getConcludedExperiments(experiments: Experiment[]): Experiment[];
737
+ /**
738
+ * Render experiment summary for CLI/dashboard.
739
+ */
740
+ declare function renderExperimentSummary(experiment: Experiment): string;
741
+
742
+ /**
743
+ * Polyglot Dependency Graph — Import Parsing for Python, Go, Java, Rust
744
+ *
745
+ * Problem: The existing graph.ts uses ts-morph (AST) which only handles TS/JS.
746
+ * For a 20K-dev org with Java, Python, Go, Rust — the dependency graph is empty.
747
+ * No graph → no hub detection → no risk scoring → useless context selection.
748
+ *
749
+ * Solution: Regex-based import parsers for each language. Not AST-accurate, but
750
+ * good enough for dependency graph construction. We don't need perfect resolution;
751
+ * we need to know "file A probably depends on file B" for hub/risk scoring.
752
+ *
753
+ * Each parser:
754
+ * 1. Extracts import specifiers from file content using regex
755
+ * 2. Resolves specifiers to relative file paths within the project
756
+ * 3. Returns edges: { from: relativePath, to: relativePath }
757
+ *
758
+ * Supported languages:
759
+ * - Python: import x, from x import y, relative imports
760
+ * - Go: import "pkg", import ( "pkg" ... )
761
+ * - Java: import com.example.Foo, package declaration
762
+ * - Rust: use crate::x, mod x, use super::x
763
+ *
764
+ * Design: Pure functions. No external deps. Deterministic.
765
+ */
766
+
767
+ type SupportedLanguage = 'python' | 'go' | 'java' | 'rust' | 'typescript';
768
+ interface ImportSpec {
769
+ /** The raw import specifier as written in the source */
770
+ raw: string;
771
+ /** Whether this is a relative import */
772
+ isRelative: boolean;
773
+ }
774
+ declare function detectLanguage(filePath: string): SupportedLanguage | null;
775
+ /**
776
+ * Parse imports from a non-TS file and resolve to project-relative paths.
777
+ * Returns dependency edges for the project graph.
778
+ *
779
+ * @param filePath - Absolute path to the source file
780
+ * @param relativePath - Project-relative path (e.g., "src/auth/login.py")
781
+ * @param projectPath - Absolute path to the project root
782
+ * @param allRelativePaths - Set of all file paths in the project (for resolution)
783
+ * @param content - Optional file content (read from disk if not provided)
784
+ */
785
+ declare function parseImports(filePath: string, relativePath: string, projectPath: string, allRelativePaths: Set<string>, content?: string): GraphEdge[];
786
+ /**
787
+ * Parse imports for ALL non-TS files in a project.
788
+ * Call this alongside ts-morph's buildProjectGraph for TS files.
789
+ */
790
+ declare function parseAllPolyglotImports(files: {
791
+ relativePath: string;
792
+ absolutePath: string;
793
+ content?: string;
794
+ }[], projectPath: string): GraphEdge[];
795
+ /**
796
+ * Estimate cyclomatic complexity from source code using regex.
797
+ * Not AST-accurate but good enough for risk scoring.
798
+ */
799
+ declare function estimateComplexity(content: string, lang: SupportedLanguage): number;
800
+
801
+ /**
802
+ * Multi-Stage Reranker
803
+ *
804
+ * The problem: BM25 retrieval gets 54% precision. Adding risk scoring drops it
805
+ * to 33% because high-risk irrelevant files fill the budget.
806
+ *
807
+ * The solution: a 3-stage pipeline that turns BM25 candidates into a precision-
808
+ * optimized selection:
809
+ *
810
+ * Stage 1: RETRIEVE (BM25 top-K) — already done by tfidf.ts
811
+ * Stage 2: RERANK (multi-signal rescoring)
812
+ * - Term coverage: what fraction of UNIQUE query terms does the file match?
813
+ * - Term specificity: are the matched terms rare (high IDF) or generic?
814
+ * - Bigram proximity: do query terms appear near each other in the file?
815
+ * - Dependency signal: is this file in the dependency cone of a top match?
816
+ * - Path relevance: does the file path match query terms?
817
+ * Stage 3: QUALITY GATE (adaptive cutoff)
818
+ * - Hard floor: files below absolute threshold are excluded
819
+ * - Elbow detection: find the natural drop-off point in scores
820
+ * - Don't fill budget with noise — stop when quality degrades
821
+ *
822
+ * This is a cross-encoder-like approach using hand-crafted features instead
823
+ * of a neural model. No ML dependencies. Deterministic.
824
+ */
825
+
826
+ interface RerankInput {
827
+ /** Task description */
828
+ task: string;
829
+ /** BM25 candidates from tfidf.query() */
830
+ candidates: SemanticMatch[];
831
+ /** The TF-IDF index (for IDF weights) */
832
+ index: TfIdfIndex;
833
+ /** File contents for bigram proximity analysis */
834
+ fileContents: Map<string, string>;
835
+ /** Dependency edges: from → to[] */
836
+ dependencies: Map<string, string[]>;
837
+ /** All file paths in the project */
838
+ allFilePaths: string[];
839
+ }
840
+ interface RerankResult {
841
+ /** Reranked and filtered files — only high-quality matches */
842
+ files: RerankedFile[];
843
+ /** Files that were cut by the quality gate */
844
+ filtered: FilteredFile[];
845
+ /** The quality threshold used */
846
+ qualityThreshold: number;
847
+ /** Telemetry data for observability and debugging */
848
+ telemetry: RerankTelemetry;
849
+ }
850
+ interface RerankTelemetry {
851
+ /** Total candidates received from BM25 */
852
+ candidatesIn: number;
853
+ /** Files that passed the quality gate */
854
+ candidatesOut: number;
855
+ /** Files filtered out */
856
+ candidatesFiltered: number;
857
+ /** Timing in milliseconds */
858
+ durationMs: number;
859
+ /** Signal weight configuration used */
860
+ weights: typeof WEIGHTS;
861
+ /** Quality gate thresholds used */
862
+ gateConfig: {
863
+ absoluteFloor: number;
864
+ elbowDropRatio: number;
865
+ minTermCoverage: number;
866
+ };
867
+ /** Aggregate signal statistics across all candidates (before gate) */
868
+ signalStats: {
869
+ termCoverage: {
870
+ min: number;
871
+ max: number;
872
+ mean: number;
873
+ median: number;
874
+ };
875
+ termSpecificity: {
876
+ min: number;
877
+ max: number;
878
+ mean: number;
879
+ median: number;
880
+ };
881
+ bigramProximity: {
882
+ min: number;
883
+ max: number;
884
+ mean: number;
885
+ median: number;
886
+ };
887
+ dependencySignal: {
888
+ min: number;
889
+ max: number;
890
+ mean: number;
891
+ median: number;
892
+ };
893
+ pathRelevance: {
894
+ min: number;
895
+ max: number;
896
+ mean: number;
897
+ median: number;
898
+ };
899
+ };
900
+ /** Filter reason breakdown: reason → count */
901
+ filterReasons: Record<string, number>;
902
+ /** Score distribution: [min, p25, p50, p75, max] across all scored candidates */
903
+ scoreDistribution: [number, number, number, number, number];
904
+ /** Number of unique query terms */
905
+ queryTermCount: number;
906
+ /** Size of the dependency relevance cone */
907
+ relevanceConeSize: number;
908
+ }
909
+ interface RerankedFile {
910
+ filePath: string;
911
+ /** Final reranked score (0-1) */
912
+ score: number;
913
+ /** Original BM25 score */
914
+ bm25Score: number;
915
+ /** Individual signal scores */
916
+ signals: {
917
+ termCoverage: number;
918
+ termSpecificity: number;
919
+ bigramProximity: number;
920
+ dependencySignal: number;
921
+ pathRelevance: number;
922
+ };
923
+ }
924
+ interface FilteredFile {
925
+ filePath: string;
926
+ score: number;
927
+ reason: string;
928
+ }
929
+ declare const WEIGHTS: {
930
+ termCoverage: number;
931
+ termSpecificity: number;
932
+ bigramProximity: number;
933
+ dependencySignal: number;
934
+ pathRelevance: number;
935
+ };
936
+ /**
937
+ * Rerank BM25 candidates using multi-signal scoring + quality gate.
938
+ * Returns only files that pass the quality threshold.
939
+ */
940
+ declare function rerank(input: RerankInput): RerankResult;
941
+
406
942
  declare function countTokensTiktoken(text: string): number;
407
943
  declare function countTokensChars4(sizeInBytes: number): number;
408
944
  declare function estimateTokens(content: string, sizeInBytes: number, method?: 'chars4' | 'tiktoken'): number;
@@ -470,4 +1006,4 @@ interface AuditOptions {
470
1006
  }
471
1007
  declare function auditProject(projectPath: string, filePaths: string[], options?: AuditOptions): Promise<AuditResult>;
472
1008
 
473
- export { CtoError, type CtoErrorCode, type DocumentVector, type LearnerBoost, type LearnerBoostInput, type LearnerModel, type LogEntry, type LogLevel, type Logger, type PatternStats, type SecretFinding, type SecretType, type SelectionInput, type SemanticMatch, type SemanticScore, type TfIdfIndex, analyzeProject, auditProject, bfsBidirectional, boostByPath, buildAdjacencyList, buildIndex, buildProjectGraph, calculateCoverage, classifyFileKind, countTokensChars4, countTokensTiktoken, createLogger, createProject, detectStack, estimateFileTokens, estimateTokens, extractPattern, freeEncoder, getLearnerBoosts, getLearnerStats, getPruneLevelForRisk, isCtoError, loadLearner, optimizeBudget, pruneFile, pruneFiles, query, recordSelection, sanitizeContent, saveLearner, scanContentForSecrets, scanFileForSecrets, scanProjectForSecrets, scoreAllFiles, scoreFile, selectContext, setJsonLogging, setLogLevel, similarity, tokenize, walkProject, wrapError };
1009
+ export { type AssignmentResult, type ContextPipelineInput, type ContextPipelineResult, CtoError, type CtoErrorCode, type DocumentVector, type Experiment, type ExperimentConclusion, type ExperimentGroup, type FilteredFile, type GroupMetrics, type ImportSpec, type IndexCacheStats, type LearnerBoost, type LearnerBoostInput, type LearnerModel, type LogEntry, type LogLevel, type Logger, type MultiRepoResult, type PatternStats, type RerankInput, type RerankResult, type RerankedFile, type SecretFinding, type SecretType, type SelectionInput, type SemanticMatch, type SemanticScore, type SiblingMatch, type SiblingRepo, type SignificanceResult, type SupportedLanguage, type TfIdfIndex, analyzeProject, assignGroup, auditProject, bfsBidirectional, boostByPath, buildAdjacencyList, buildIndex, buildIndexCached, buildProjectGraph, calculateCoverage, classifyFileKind, countTokensChars4, countTokensTiktoken, createExperiment, createLogger, createProject, detectLanguage, detectStack, discoverSiblingRepos, estimateComplexity, estimateFileTokens, estimateTokens, extractPattern, freeEncoder, getActiveExperiment, getCacheInfo, getConcludedExperiments, getLearnerBoosts, getLearnerStats, getPruneLevelForRisk, invalidateCache, isCtoError, loadExperiments, loadLearner, optimizeBudget, parseAllPolyglotImports, parseImports, parseSiblingPaths, pruneFile, pruneFiles, query, querySiblingRepos, recordOutcome, recordSelection, renderExperimentSummary, renderMultiRepoSummary, rerank, runContextPipeline, sanitizeContent, saveExperiments, saveLearner, scanContentForSecrets, scanFileForSecrets, scanProjectForSecrets, scoreAllFiles, scoreFile, selectContext, setJsonLogging, setLogLevel, similarity, testSignificance, tokenize, walkProject, wrapError };