@suco/su-auggie-mcp 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,47 @@
1
+ # @suco/su-auggie-mcp
2
+
3
+ An MCP (Model Context Protocol) server that exposes [Augment Code](https://augmentcode.com) context engine capabilities.
4
+
5
+ > ⚠️ **Experimental** — This package is a test implementation using the Auggie SDK. Use at your own risk.
6
+
7
+ ## Installation
8
+
9
+ ```bash
10
+ npm install -g @suco/su-auggie-mcp
11
+ ```
12
+
13
+ Or use with npx:
14
+
15
+ ```bash
16
+ npx @suco/su-auggie-mcp
17
+ ```
18
+
19
+ ## Usage
20
+
21
+ ### As an MCP Server
22
+
23
+ Add to your MCP client configuration:
24
+
25
+ ```json
26
+ {
27
+ "mcpServers": {
28
+ "su-auggie-mcp": {
29
+ "command": "npx",
30
+ "args": ["@suco/su-auggie-mcp"]
31
+ }
32
+ }
33
+ }
34
+ ```
35
+
36
+ ### Environment Variables
37
+
38
+ - `AUGMENT_API_KEY` — Your Augment Code API key (required)
39
+
40
+ ## Requirements
41
+
42
+ - Node.js >= 20
43
+
44
+ ## License
45
+
46
+ UNLICENSED
47
+
@@ -0,0 +1,70 @@
1
+ /**
2
+ * Adaptive batching with self-tuning concurrency
3
+ * Batches by content size, adjusts parallelism based on throughput
4
+ */
5
+ /** File entry for batching */
6
+ export interface FileEntry {
7
+ path: string;
8
+ contents: string;
9
+ }
10
+ /** Batch with metadata */
11
+ export interface Batch {
12
+ files: FileEntry[];
13
+ totalBytes: number;
14
+ }
15
+ /** Batching configuration */
16
+ export interface AdaptiveBatcherConfig {
17
+ /** Target bytes per batch (default: 2MB) */
18
+ targetBatchBytes?: number;
19
+ /** Max files per batch (default: 1000, SDK limit) */
20
+ maxFilesPerBatch?: number;
21
+ /** Initial concurrency (default: 1) */
22
+ initialConcurrency?: number;
23
+ /** Minimum concurrency floor (default: 1) */
24
+ minConcurrency?: number;
25
+ /** Time threshold to increase concurrency in ms (default: 1500) */
26
+ fastThresholdMs?: number;
27
+ /** Time threshold to decrease concurrency in ms (default: 3000) */
28
+ slowThresholdMs?: number;
29
+ }
30
+ /** Metrics for monitoring */
31
+ export interface BatcherMetrics {
32
+ totalBatches: number;
33
+ totalFiles: number;
34
+ totalBytes: number;
35
+ currentConcurrency: number;
36
+ avgBatchTimeMs: number;
37
+ peakConcurrency: number;
38
+ concurrencyBounds: {
39
+ min: number;
40
+ max: number;
41
+ };
42
+ batchLimits: {
43
+ maxBytes: number;
44
+ maxFiles: number;
45
+ };
46
+ }
47
+ /** Adaptive batcher with self-tuning concurrency */
48
+ export declare class AdaptiveBatcher {
49
+ private config;
50
+ private concurrency;
51
+ private peakConcurrency;
52
+ private batchTimes;
53
+ private totalBatches;
54
+ private totalFiles;
55
+ private totalBytes;
56
+ constructor(config?: AdaptiveBatcherConfig);
57
+ /** Get current concurrency level */
58
+ getConcurrency(): number;
59
+ /** Get batch size limits for streaming batching */
60
+ getBatchLimits(): {
61
+ maxBytes: number;
62
+ maxFiles: number;
63
+ };
64
+ /** Get metrics for monitoring */
65
+ getMetrics(): BatcherMetrics;
66
+ /** Record batch completion and adjust concurrency */
67
+ recordBatchComplete(timeMs: number, fileCount: number, bytes: number, error?: boolean, rateLimited?: boolean): void;
68
+ /** Create batches from files based on target byte size and max file count */
69
+ createBatches(files: FileEntry[]): Generator<Batch>;
70
+ }
@@ -0,0 +1,119 @@
1
+ /**
2
+ * Adaptive batching with self-tuning concurrency
3
+ * Batches by content size, adjusts parallelism based on throughput
4
+ */
5
+ import { INDEXING_CONFIG } from './config.js';
6
+ import { createLogger } from './logger.js';
7
+ const logger = createLogger('AdaptiveBatcher');
8
+ /** Default configuration - uses INDEXING_CONFIG for tunables */
9
+ const DEFAULT_CONFIG = {
10
+ targetBatchBytes: INDEXING_CONFIG.BATCH_MAX_BYTES, // Default: 8MB (configurable)
11
+ maxFilesPerBatch: INDEXING_CONFIG.BATCH_MAX_FILES, // Default: 1000 (configurable)
12
+ initialConcurrency: INDEXING_CONFIG.INITIAL_CONCURRENCY, // Default: 4 (configurable)
13
+ minConcurrency: 1,
14
+ fastThresholdMs: 1500, // <1.5s = increase concurrency
15
+ slowThresholdMs: 3000, // >3s = decrease concurrency
16
+ };
17
+ /** Adaptive batcher with self-tuning concurrency */
18
+ export class AdaptiveBatcher {
19
+ config;
20
+ concurrency;
21
+ peakConcurrency;
22
+ batchTimes = [];
23
+ totalBatches = 0;
24
+ totalFiles = 0;
25
+ totalBytes = 0;
26
+ constructor(config = {}) {
27
+ this.config = { ...DEFAULT_CONFIG, ...config };
28
+ this.concurrency = this.config.initialConcurrency;
29
+ this.peakConcurrency = this.concurrency;
30
+ }
31
+ /** Get current concurrency level */
32
+ getConcurrency() {
33
+ return this.concurrency;
34
+ }
35
+ /** Get batch size limits for streaming batching */
36
+ getBatchLimits() {
37
+ return {
38
+ maxBytes: this.config.targetBatchBytes,
39
+ maxFiles: this.config.maxFilesPerBatch,
40
+ };
41
+ }
42
+ /** Get metrics for monitoring */
43
+ getMetrics() {
44
+ const avgBatchTimeMs = this.batchTimes.length > 0
45
+ ? this.batchTimes.reduce((a, b) => a + b, 0) / this.batchTimes.length
46
+ : 0;
47
+ return {
48
+ totalBatches: this.totalBatches,
49
+ totalFiles: this.totalFiles,
50
+ totalBytes: this.totalBytes,
51
+ currentConcurrency: this.concurrency,
52
+ avgBatchTimeMs: Math.round(avgBatchTimeMs),
53
+ peakConcurrency: this.peakConcurrency,
54
+ concurrencyBounds: {
55
+ min: this.config.minConcurrency,
56
+ max: 20, // Hard-coded max in adjustConcurrency
57
+ },
58
+ batchLimits: {
59
+ maxBytes: this.config.targetBatchBytes,
60
+ maxFiles: this.config.maxFilesPerBatch,
61
+ },
62
+ };
63
+ }
64
+ /** Record batch completion and adjust concurrency */
65
+ recordBatchComplete(timeMs, fileCount, bytes, error, rateLimited) {
66
+ this.totalBatches++;
67
+ this.totalFiles += fileCount;
68
+ this.totalBytes += bytes;
69
+ this.batchTimes.push(timeMs);
70
+ // Keep last 20 batch times for averaging
71
+ if (this.batchTimes.length > 20) {
72
+ this.batchTimes.shift();
73
+ }
74
+ const oldConcurrency = this.concurrency;
75
+ if (rateLimited) {
76
+ // Rate limited - back off 50%
77
+ this.concurrency = Math.max(this.config.minConcurrency, Math.floor(this.concurrency * 0.5));
78
+ logger.warn(`Rate limited, reducing concurrency: ${oldConcurrency} -> ${this.concurrency}`);
79
+ }
80
+ else if (error) {
81
+ // Error - back off 30%
82
+ this.concurrency = Math.max(this.config.minConcurrency, Math.floor(this.concurrency * 0.7));
83
+ logger.debug(`Batch error, reducing concurrency: ${oldConcurrency} -> ${this.concurrency}`);
84
+ }
85
+ else if (timeMs < this.config.fastThresholdMs) {
86
+ // Fast - increase concurrency
87
+ this.concurrency++;
88
+ this.peakConcurrency = Math.max(this.peakConcurrency, this.concurrency);
89
+ logger.debug(`Fast batch (${timeMs}ms), increasing concurrency: ${oldConcurrency} -> ${this.concurrency}`);
90
+ }
91
+ else if (timeMs > this.config.slowThresholdMs) {
92
+ // Slow - decrease concurrency
93
+ this.concurrency = Math.max(this.config.minConcurrency, this.concurrency - 1);
94
+ logger.debug(`Slow batch (${timeMs}ms), reducing concurrency: ${oldConcurrency} -> ${this.concurrency}`);
95
+ }
96
+ }
97
+ /** Create batches from files based on target byte size and max file count */
98
+ *createBatches(files) {
99
+ let currentBatch = [];
100
+ let currentBytes = 0;
101
+ for (const file of files) {
102
+ const fileBytes = Buffer.byteLength(file.contents, 'utf-8');
103
+ // If adding this file exceeds byte target OR file count limit, yield current batch first
104
+ const exceedsByteLimit = currentBytes > 0 && currentBytes + fileBytes > this.config.targetBatchBytes;
105
+ const exceedsFileLimit = currentBatch.length >= this.config.maxFilesPerBatch;
106
+ if (exceedsByteLimit || exceedsFileLimit) {
107
+ yield { files: currentBatch, totalBytes: currentBytes };
108
+ currentBatch = [];
109
+ currentBytes = 0;
110
+ }
111
+ currentBatch.push(file);
112
+ currentBytes += fileBytes;
113
+ }
114
+ // Yield remaining files
115
+ if (currentBatch.length > 0) {
116
+ yield { files: currentBatch, totalBytes: currentBytes };
117
+ }
118
+ }
119
+ }
package/dist/auth.d.ts ADDED
@@ -0,0 +1,21 @@
1
+ /**
2
+ * Authentication utilities for Augment API
3
+ * Checks credentials from environment variables and session file
4
+ */
5
+ /** Callback for auth state changes */
6
+ type AuthChangeCallback = (hasAuth: boolean) => void;
7
+ /** Check if credentials exist in environment variables */
8
+ export declare function hasEnvCredentials(): boolean;
9
+ /** Check if session file exists and contains valid credentials */
10
+ export declare function hasSessionCredentials(): boolean;
11
+ /** Check if any credentials are available (env or session file) */
12
+ export declare function hasCredentials(): boolean;
13
+ /** Get session file path (for watching) */
14
+ export declare function getSessionFilePath(): string;
15
+ /** Register a callback for auth state changes */
16
+ export declare function onAuthChange(callback: AuthChangeCallback): void;
17
+ /** Start watching session file for changes */
18
+ export declare function startSessionWatcher(): void;
19
+ /** Stop watching session file */
20
+ export declare function stopSessionWatcher(): void;
21
+ export {};
package/dist/auth.js ADDED
@@ -0,0 +1,111 @@
1
+ /**
2
+ * Authentication utilities for Augment API
3
+ * Checks credentials from environment variables and session file
4
+ */
5
+ import * as fs from 'node:fs';
6
+ import * as path from 'node:path';
7
+ import * as os from 'node:os';
8
+ import { createLogger } from './logger.js';
9
+ import { sendLogToClient } from './mcp-notifications.js';
10
+ const logger = createLogger('Auth');
11
+ /** Path to Augment session file */
12
+ const SESSION_FILE_PATH = path.join(os.homedir(), '.augment', 'session.json');
13
+ /** Session file watcher state */
14
+ let sessionWatcher = null;
15
+ let authChangeCallbacks = [];
16
+ let lastKnownAuthState = null;
17
+ /** Check if credentials exist in environment variables */
18
+ export function hasEnvCredentials() {
19
+ return !!(process.env.AUGMENT_API_TOKEN || process.env.AUGMENT_API_KEY);
20
+ }
21
+ /** Check if session file exists and contains valid credentials */
22
+ export function hasSessionCredentials() {
23
+ try {
24
+ if (!fs.existsSync(SESSION_FILE_PATH)) {
25
+ return false;
26
+ }
27
+ const content = fs.readFileSync(SESSION_FILE_PATH, 'utf-8');
28
+ const session = JSON.parse(content);
29
+ // Check for token in session (accessToken is the actual field name)
30
+ return !!(session.accessToken || session.token || session.access_token || session.api_token);
31
+ }
32
+ catch (err) {
33
+ logger.debug('Failed to read session file', err);
34
+ return false;
35
+ }
36
+ }
37
+ /** Check if any credentials are available (env or session file) */
38
+ export function hasCredentials() {
39
+ return hasEnvCredentials() || hasSessionCredentials();
40
+ }
41
+ /** Get session file path (for watching) */
42
+ export function getSessionFilePath() {
43
+ return SESSION_FILE_PATH;
44
+ }
45
+ /** Register a callback for auth state changes */
46
+ export function onAuthChange(callback) {
47
+ authChangeCallbacks.push(callback);
48
+ }
49
+ /** Start watching session file for changes */
50
+ export function startSessionWatcher() {
51
+ if (sessionWatcher) {
52
+ logger.debug('Session watcher already running');
53
+ return;
54
+ }
55
+ const sessionDir = path.dirname(SESSION_FILE_PATH);
56
+ // Ensure directory exists before watching
57
+ if (!fs.existsSync(sessionDir)) {
58
+ try {
59
+ fs.mkdirSync(sessionDir, { recursive: true });
60
+ }
61
+ catch (err) {
62
+ logger.warn('Failed to create .augment directory for watching', err);
63
+ return;
64
+ }
65
+ }
66
+ lastKnownAuthState = hasCredentials();
67
+ logger.info(`Starting session file watcher (current auth: ${lastKnownAuthState})`);
68
+ try {
69
+ // Watch the directory to catch file creation
70
+ sessionWatcher = fs.watch(sessionDir, (eventType, filename) => {
71
+ if (filename === 'session.json') {
72
+ handleSessionFileChange();
73
+ }
74
+ });
75
+ sessionWatcher.on('error', (err) => {
76
+ logger.warn('Session watcher error', err);
77
+ });
78
+ }
79
+ catch (err) {
80
+ logger.warn('Failed to start session watcher', err);
81
+ }
82
+ }
83
+ /** Handle session file change */
84
+ function handleSessionFileChange() {
85
+ const currentAuthState = hasCredentials();
86
+ if (currentAuthState !== lastKnownAuthState) {
87
+ logger.info(`Auth state changed: ${lastKnownAuthState} -> ${currentAuthState}`);
88
+ lastKnownAuthState = currentAuthState;
89
+ if (currentAuthState) {
90
+ sendLogToClient('info', 'Augment credentials detected. Starting indexing...');
91
+ }
92
+ // Notify all callbacks
93
+ for (const callback of authChangeCallbacks) {
94
+ try {
95
+ callback(currentAuthState);
96
+ }
97
+ catch (err) {
98
+ logger.error('Auth change callback error', err);
99
+ }
100
+ }
101
+ }
102
+ }
103
+ /** Stop watching session file */
104
+ export function stopSessionWatcher() {
105
+ if (sessionWatcher) {
106
+ sessionWatcher.close();
107
+ sessionWatcher = null;
108
+ authChangeCallbacks = [];
109
+ logger.debug('Session watcher stopped');
110
+ }
111
+ }
@@ -0,0 +1,56 @@
1
+ /**
2
+ * Configuration constants for suco-auggie-mcp
3
+ */
4
+ /** Indexing batch configuration - tune based on experimentation */
5
+ export declare const INDEXING_CONFIG: {
6
+ /** Batch size for streaming discovery (yield batch when this many files found) */
7
+ readonly DISCOVERY_BATCH_SIZE: 100;
8
+ /** Number of parallel batch indexing operations */
9
+ readonly PARALLEL_BATCHES: 3;
10
+ /** Maximum file size in bytes (SDK limit is 1MB) */
11
+ readonly MAX_FILE_SIZE: number;
12
+ /** Maximum bytes per batch (SDK default: 2MB, optimal: 4MB) */
13
+ readonly BATCH_MAX_BYTES: number;
14
+ /** Maximum files per batch (SDK default: 1000) */
15
+ readonly BATCH_MAX_FILES: number;
16
+ /** Initial concurrency for batch uploads */
17
+ readonly INITIAL_CONCURRENCY: number;
18
+ /** Maximum retry attempts for failed batches (0 = no retries) - Tier 1 immediate retries */
19
+ readonly MAX_RETRIES: number;
20
+ /** Base delay for exponential backoff in ms */
21
+ readonly RETRY_BASE_DELAY_MS: number;
22
+ /** Maximum delay between retries in ms */
23
+ readonly RETRY_MAX_DELAY_MS: number;
24
+ /** Tier 2: Cooldown period in ms before retrying failed files (default: 5 minutes) */
25
+ readonly COOLDOWN_PERIOD_MS: number;
26
+ /** Tier 2: Maximum cooldown cycles before giving up (default: 3) */
27
+ readonly MAX_COOLDOWN_CYCLES: number;
28
+ };
29
+ /** File watcher configuration */
30
+ export declare const WATCHER_CONFIG: {
31
+ /** Debounce delay in ms before processing file changes */
32
+ readonly DEBOUNCE_MS: number;
33
+ /** Delay before initial scan starts after watcher ready */
34
+ readonly INITIAL_SCAN_DELAY_MS: 100;
35
+ };
36
+ /** Persistence configuration */
37
+ export declare const PERSISTENCE_CONFIG: {
38
+ /** Directory name for state files within workspace */
39
+ readonly STATE_DIR: ".suco-auggie";
40
+ /** Filename for index state */
41
+ readonly STATE_FILE: "index-state.json";
42
+ /** Filename for file metadata (mtimes, sizes) */
43
+ readonly METADATA_FILE: "file-metadata.json";
44
+ /** Filename for cooldown queue (failed files awaiting retry) */
45
+ readonly COOLDOWN_FILE: "cooldown-queue.json";
46
+ };
47
+ /** Get debug mode */
48
+ export declare function isDebugMode(): boolean;
49
+ /** Set debug mode (called from CLI when --debug is passed) */
50
+ export declare function setDebugMode(enabled: boolean): void;
51
+ /** Check if persistence is enabled */
52
+ export declare function isPersistenceEnabled(): boolean;
53
+ /** Set persistence mode (called from CLI when --no-persistent is passed) */
54
+ export declare function setPersistenceEnabled(enabled: boolean): void;
55
+ /** @deprecated Use isDebugMode() instead */
56
+ export declare const DEBUG: boolean;
package/dist/config.js ADDED
@@ -0,0 +1,68 @@
1
+ /**
2
+ * Configuration constants for suco-auggie-mcp
3
+ */
4
+ /** Indexing batch configuration - tune based on experimentation */
5
+ export const INDEXING_CONFIG = {
6
+ /** Batch size for streaming discovery (yield batch when this many files found) */
7
+ DISCOVERY_BATCH_SIZE: 100,
8
+ /** Number of parallel batch indexing operations */
9
+ PARALLEL_BATCHES: 3,
10
+ /** Maximum file size in bytes (SDK limit is 1MB) */
11
+ MAX_FILE_SIZE: 1024 * 1024, // 1MB
12
+ /** Maximum bytes per batch (SDK default: 2MB, optimal: 4MB) */
13
+ BATCH_MAX_BYTES: parseInt(process.env.SUCO_AUGGIE_BATCH_MAX_BYTES || String(4 * 1024 * 1024), 10),
14
+ /** Maximum files per batch (SDK default: 1000) */
15
+ BATCH_MAX_FILES: parseInt(process.env.SUCO_AUGGIE_BATCH_MAX_FILES || '1000', 10),
16
+ /** Initial concurrency for batch uploads */
17
+ INITIAL_CONCURRENCY: parseInt(process.env.SUCO_AUGGIE_INITIAL_CONCURRENCY || '4', 10),
18
+ /** Maximum retry attempts for failed batches (0 = no retries) - Tier 1 immediate retries */
19
+ MAX_RETRIES: parseInt(process.env.SUCO_AUGGIE_MAX_RETRIES || '3', 10),
20
+ /** Base delay for exponential backoff in ms */
21
+ RETRY_BASE_DELAY_MS: parseInt(process.env.SUCO_AUGGIE_RETRY_BASE_DELAY_MS || '1000', 10),
22
+ /** Maximum delay between retries in ms */
23
+ RETRY_MAX_DELAY_MS: parseInt(process.env.SUCO_AUGGIE_RETRY_MAX_DELAY_MS || '30000', 10),
24
+ /** Tier 2: Cooldown period in ms before retrying failed files (default: 5 minutes) */
25
+ COOLDOWN_PERIOD_MS: parseInt(process.env.SUCO_AUGGIE_COOLDOWN_PERIOD_MS || String(5 * 60 * 1000), 10),
26
+ /** Tier 2: Maximum cooldown cycles before giving up (default: 3) */
27
+ MAX_COOLDOWN_CYCLES: parseInt(process.env.SUCO_AUGGIE_MAX_COOLDOWN_CYCLES || '3', 10),
28
+ };
29
+ /** File watcher configuration */
30
+ export const WATCHER_CONFIG = {
31
+ /** Debounce delay in ms before processing file changes */
32
+ DEBOUNCE_MS: parseInt(process.env.SUCO_AUGGIE_DEBOUNCE_MS || '500', 10),
33
+ /** Delay before initial scan starts after watcher ready */
34
+ INITIAL_SCAN_DELAY_MS: 100,
35
+ };
36
+ /** Persistence configuration */
37
+ export const PERSISTENCE_CONFIG = {
38
+ /** Directory name for state files within workspace */
39
+ STATE_DIR: '.suco-auggie',
40
+ /** Filename for index state */
41
+ STATE_FILE: 'index-state.json',
42
+ /** Filename for file metadata (mtimes, sizes) */
43
+ METADATA_FILE: 'file-metadata.json',
44
+ /** Filename for cooldown queue (failed files awaiting retry) */
45
+ COOLDOWN_FILE: 'cooldown-queue.json',
46
+ };
47
+ /** Debug mode - can be set via env var or CLI flag */
48
+ let debugMode = process.env.SUCO_AUGGIE_DEBUG === '1';
49
+ /** Get debug mode */
50
+ export function isDebugMode() {
51
+ return debugMode;
52
+ }
53
+ /** Set debug mode (called from CLI when --debug is passed) */
54
+ export function setDebugMode(enabled) {
55
+ debugMode = enabled;
56
+ }
57
+ /** Persistence mode - enabled by default, can be disabled via CLI */
58
+ let persistenceEnabled = true;
59
+ /** Check if persistence is enabled */
60
+ export function isPersistenceEnabled() {
61
+ return persistenceEnabled;
62
+ }
63
+ /** Set persistence mode (called from CLI when --no-persistent is passed) */
64
+ export function setPersistenceEnabled(enabled) {
65
+ persistenceEnabled = enabled;
66
+ }
67
+ /** @deprecated Use isDebugMode() instead */
68
+ export const DEBUG = process.env.SUCO_AUGGIE_DEBUG === '1';
@@ -0,0 +1,154 @@
1
+ /**
2
+ * FileIndexer - Centralized file indexing with file-level error handling.
3
+ *
4
+ * Single entry point for ALL file indexing operations:
5
+ * - Initial discovery indexing
6
+ * - File watcher changes
7
+ * - Retry (tier 1 & tier 2)
8
+ *
9
+ * Errors are handled at FILE level, not BATCH level.
10
+ */
11
+ import { DirectContext } from '@augmentcode/auggie-sdk';
12
+ import { AdaptiveBatcher } from './adaptive-batcher.js';
13
+ import type { FileMetadataMap } from './types.js';
14
+ /** File-level error classification */
15
+ export type FileErrorType = 'skip' | 'retryable' | 'permanent' | 'fatal';
16
+ /** Result of attempting to index a file */
17
+ export interface FileIndexResult {
18
+ path: string;
19
+ success: boolean;
20
+ error?: FileErrorType;
21
+ message?: string;
22
+ }
23
+ /** Statistics for FileIndexer */
24
+ export interface FileIndexerStats {
25
+ /** Files successfully indexed */
26
+ indexed: number;
27
+ /** Files skipped (too large, binary, etc.) */
28
+ skipped: number;
29
+ /** Files in tier 1 retry queue */
30
+ pendingRetry: number;
31
+ /** Files in tier 2 cooldown queue */
32
+ pendingCooldown: number;
33
+ /** Files recovered via retry */
34
+ recovered: number;
35
+ /** Files permanently failed */
36
+ failed: number;
37
+ /** Current cooldown cycle */
38
+ cooldownCycle: number;
39
+ /** Next cooldown retry time */
40
+ cooldownNextRetryAt: string | null;
41
+ }
42
+ /** Callback for progress updates */
43
+ export type ProgressCallback = (indexed: number, total: number, message: string) => void;
44
+ /**
45
+ * Classify an error at the FILE level.
46
+ * Returns: skip | retryable | permanent | fatal
47
+ * @internal Exported for testing
48
+ */
49
+ export declare function classifyFileError(err: unknown): FileErrorType;
50
+ /**
51
+ * Check if a file is binary (non-text).
52
+ * Uses null byte detection in first 8KB.
53
+ * @internal Exported for testing
54
+ */
55
+ export declare function isBinaryFile(buffer: Buffer): boolean;
56
+ /**
57
+ * Calculate exponential backoff delay with jitter.
58
+ * @internal Exported for testing
59
+ */
60
+ export declare function calculateBackoff(attempt: number): number;
61
+ /**
62
+ * FileIndexer - centralized indexing with file-level error handling.
63
+ */
64
+ export declare class FileIndexer {
65
+ private context;
66
+ private root;
67
+ private batcher;
68
+ private indexQueue;
69
+ private retryQueue;
70
+ private cooldownQueue;
71
+ private cooldownTimer;
72
+ private cooldownNextRetryAt;
73
+ private stats;
74
+ constructor(context: DirectContext, root: string);
75
+ /** Get current statistics */
76
+ getStats(): FileIndexerStats;
77
+ /** Get batcher metrics */
78
+ getBatcherMetrics(): import("./adaptive-batcher.js").BatcherMetrics;
79
+ /** Get batcher for external access */
80
+ getBatcher(): AdaptiveBatcher;
81
+ /**
82
+ * Pre-filter a file before reading contents.
83
+ * Returns null if file should be skipped, or file entry if valid.
84
+ */
85
+ private preFilterFile;
86
+ /**
87
+ * Index a batch of files with error isolation.
88
+ * On batch failure, isolates problematic files and retries good ones.
89
+ */
90
+ private indexBatchWithIsolation;
91
+ /**
92
+ * Route failed files to appropriate tier based on error type.
93
+ */
94
+ private routeFailedFiles;
95
+ /**
96
+ * Add a file to cooldown queue (tier 2).
97
+ */
98
+ private addToCooldown;
99
+ /**
100
+ * Process tier 1 retry queue.
101
+ */
102
+ processRetryQueue(): Promise<void>;
103
+ /**
104
+ * Schedule cooldown retry if not already scheduled.
105
+ */
106
+ private scheduleCooldownRetry;
107
+ /**
108
+ * Process cooldown queue (tier 2).
109
+ */
110
+ processCooldownQueue(): Promise<void>;
111
+ /**
112
+ * Main entry point: Index a list of file paths.
113
+ * Handles batching, pre-filtering, error isolation, and retry routing.
114
+ */
115
+ indexFiles(absolutePaths: string[], onProgress?: ProgressCallback): Promise<{
116
+ metadata: FileMetadataMap;
117
+ indexed: number;
118
+ skipped: number;
119
+ }>;
120
+ /**
121
+ * Index a single file (for watcher events).
122
+ */
123
+ indexSingleFile(absolutePath: string): Promise<boolean>;
124
+ /**
125
+ * Remove files from index.
126
+ */
127
+ removeFiles(relativePaths: string[]): Promise<void>;
128
+ /**
129
+ * Load cooldown queue from disk.
130
+ */
131
+ private loadCooldownQueue;
132
+ /**
133
+ * Save cooldown queue to disk.
134
+ */
135
+ private saveCooldownQueue;
136
+ /**
137
+ * Stop the indexer and clean up.
138
+ */
139
+ stop(): void;
140
+ /**
141
+ * Reset statistics (for reindex).
142
+ */
143
+ resetStats(): void;
144
+ /**
145
+ * Close the indexer (alias for stop).
146
+ */
147
+ close(): void;
148
+ /**
149
+ * Trigger retry if conditions are met (no pending work, has failed files).
150
+ * This is a "hit and run" call - triggers async retry and returns immediately.
151
+ * Used by tool calls to opportunistically retry failed files.
152
+ */
153
+ triggerRetryIfNeeded(): void;
154
+ }