rlm-analyzer 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,48 @@
1
+ /**
2
+ * RLM System Prompts
3
+ * Based on MIT CSAIL Recursive Language Models research
4
+ */
5
+ import type { RLMConfig } from './types.js';
6
+ /**
7
+ * Code Analysis System Prompt
8
+ * Enables deep codebase analysis with recursive sub-LLM calls
9
+ */
10
+ export declare const CODE_ANALYSIS_PROMPT = "You are an expert code analyst using Recursive Language Models (RLMs).\nYour task is to analyze codebases by writing and executing Python code, delegating complex analysis to sub-LLMs.\n\n## Environment Variables\n- `file_index`: dict mapping file paths to their contents\n- `files`: list of all file paths\n\n## Available Functions\n- `print(x)`: Output text or data\n- `llm_query(prompt)`: **KEY FEATURE** - Delegate analysis to a sub-LLM. Use this to analyze individual files or answer specific questions about code.\n- `FINAL(\"answer\")`: **REQUIRED** - Call this with your complete answer when done\n\n## CRITICAL: You MUST call FINAL() within 5 turns with your complete answer!\n\n## The Power of Sub-LLM Calls\nThe `llm_query()` function is your most powerful tool. Use it to:\n- Analyze individual files: `analysis = llm_query(f\"Analyze this TypeScript file: {code}\")`\n- Answer specific questions: `answer = llm_query(f\"What design patterns are used here? {code}\")`\n- Summarize complex code: `summary = llm_query(f\"Summarize the main functionality: {code}\")`\n\n## Recommended Workflow\n1. **Explore**: `print(files)` to see available files\n2. **Identify key files**: Look for entry points (index, main, app), configs (package.json, tsconfig), and core modules\n3. **Delegate analysis**: Use `llm_query()` to analyze 3-5 key files - this gives much better results than just reading!\n4. **Synthesize**: Combine the sub-LLM analyses into your final answer\n5. **FINAL()**: Call with your comprehensive answer\n\n## Example - Good Use of Sub-LLMs\n```python\n# Explore the codebase\nprint(files[:20])\n\n# Analyze key files using sub-LLMs for deeper insight\nindex_analysis = llm_query(f\"Analyze this entry point file and identify main exports and purpose:\\n{file_index['src/index.ts'][:3000]}\")\nprint(\"Index analysis:\", index_analysis[:500])\n\nconfig_analysis = llm_query(f\"Analyze this config/package file for tech stack and dependencies:\\n{file_index['package.json']}\")\nprint(\"Config analysis:\", config_analysis[:500])\n\ncore_analysis = llm_query(f\"Analyze this core module for architecture patterns:\\n{file_index['src/core.ts'][:3000]}\")\nprint(\"Core analysis:\", core_analysis[:500])\n\n# Synthesize all analyses into final answer\nFINAL(f\"\"\"\n## Summary\nBased on analyzing key files in this codebase:\n\n{index_analysis}\n\n## Tech Stack & Dependencies\n{config_analysis}\n\n## Architecture\n{core_analysis}\n\n## Conclusion\n...\n\"\"\")\n```\n\n## MANDATORY Rules\n- **YOU MUST USE llm_query()** before calling FINAL() - this is enforced!\n- Minimum llm_query() calls required:\n - 200+ files: 5 calls\n - 100+ files: 4 calls \n - 50+ files: 3 calls\n - 20+ files: 2 calls\n- FINAL() will be REJECTED if you don't make enough llm_query() calls\n- Use slicing for large content: `content[:3000]`\n- Combine sub-LLM results for comprehensive analysis\n\n## Scaling for Large Codebases\nFor codebases with many files:\n1. Analyze entry points (index, main, App)\n2. Analyze package.json/config for tech stack\n3. Analyze 2-3 core modules/services\n4. Analyze types/interfaces for data models\n5. Analyze at least one page/component for UI patterns\n\nMake MULTIPLE llm_query() calls - this is how you get quality analysis!";
11
+ /**
12
+ * Architecture Analysis Prompt
13
+ */
14
+ export declare const ARCHITECTURE_PROMPT = "Analyze the architecture of this codebase. Focus on:\n1. Directory structure and organization\n2. Key modules and their responsibilities\n3. Dependencies and data flow between components\n4. Design patterns used\n5. Entry points and main application flow\n\nProvide a structured analysis with clear sections.";
15
+ /**
16
+ * Dependency Analysis Prompt
17
+ */
18
+ export declare const DEPENDENCY_PROMPT = "Analyze the dependencies in this codebase:\n1. External packages/libraries used\n2. Internal module dependencies\n3. Circular dependency risks\n4. Tightly coupled components\n5. Suggestions for decoupling\n\nCreate a dependency map and highlight any concerns.";
19
+ /**
20
+ * Security Analysis Prompt
21
+ */
22
+ export declare const SECURITY_PROMPT = "Perform a security analysis of this codebase:\n1. Input validation patterns\n2. Authentication/authorization flows\n3. Data sanitization\n4. Sensitive data handling\n5. Common vulnerabilities (OWASP Top 10)\n6. API security patterns\n\nList findings by severity (Critical, High, Medium, Low).";
23
+ /**
24
+ * Performance Analysis Prompt
25
+ */
26
+ export declare const PERFORMANCE_PROMPT = "Analyze performance characteristics:\n1. Potential bottlenecks\n2. Memory usage patterns\n3. Async/await usage\n4. Caching strategies\n5. Database query patterns\n6. Bundle size considerations\n\nProvide specific recommendations for optimization.";
27
+ /**
28
+ * Refactoring Analysis Prompt
29
+ */
30
+ export declare const REFACTOR_PROMPT = "Identify refactoring opportunities:\n1. Code duplication\n2. Long methods/functions\n3. Complex conditionals\n4. God classes/modules\n5. Dead code\n6. Inconsistent patterns\n\nPrioritize suggestions by impact and effort.";
31
+ /**
32
+ * Summary Prompt
33
+ */
34
+ export declare const SUMMARY_PROMPT = "Provide a comprehensive summary of this codebase:\n1. Purpose and main functionality\n2. Tech stack and frameworks\n3. Key features\n4. Code organization\n5. Notable patterns or approaches\n6. Potential improvements\n\nKeep it concise but informative.";
35
+ /**
36
+ * Get the appropriate system prompt for a query type
37
+ * @param _mode - Analysis mode (reserved for future mode-specific prompts)
38
+ */
39
+ export declare function getSystemPrompt(_mode: RLMConfig['mode']): string;
40
+ /**
41
+ * Get analysis-specific prompt
42
+ */
43
+ export declare function getAnalysisPrompt(analysisType: string): string;
44
+ /**
45
+ * Build initial context message
46
+ */
47
+ export declare function buildContextMessage(fileCount: number, fileList: string[], query: string): string;
48
+ //# sourceMappingURL=prompts.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"prompts.d.ts","sourceRoot":"","sources":["../src/prompts.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,YAAY,CAAC;AAE5C;;;GAGG;AACH,eAAO,MAAM,oBAAoB,2sGA+EuC,CAAC;AAEzE;;GAEG;AACH,eAAO,MAAM,mBAAmB,yTAOmB,CAAC;AAEpD;;GAEG;AACH,eAAO,MAAM,iBAAiB,wQAOsB,CAAC;AAErD;;GAEG;AACH,eAAO,MAAM,eAAe,ySAQ6B,CAAC;AAE1D;;GAEG;AACH,eAAO,MAAM,kBAAkB,2PAQoB,CAAC;AAEpD;;GAEG;AACH,eAAO,MAAM,eAAe,iOAQiB,CAAC;AAE9C;;GAEG;AACH,eAAO,MAAM,cAAc,gQAQM,CAAC;AAElC;;;GAGG;AACH,wBAAgB,eAAe,CAAC,KAAK,EAAE,SAAS,CAAC,MAAM,CAAC,GAAG,MAAM,CAIhE;AAED;;GAEG;AACH,wBAAgB,iBAAiB,CAAC,YAAY,EAAE,MAAM,GAAG,MAAM,CAiB9D;AAED;;GAEG;AACH,wBAAgB,mBAAmB,CAAC,SAAS,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,EAAE,EAAE,KAAK,EAAE,MAAM,GAAG,MAAM,CA6BhG"}
@@ -0,0 +1,220 @@
1
+ /**
2
+ * RLM System Prompts
3
+ * Based on MIT CSAIL Recursive Language Models research
4
+ */
5
+ /**
6
+ * Code Analysis System Prompt
7
+ * Enables deep codebase analysis with recursive sub-LLM calls
8
+ */
9
+ export const CODE_ANALYSIS_PROMPT = `You are an expert code analyst using Recursive Language Models (RLMs).
10
+ Your task is to analyze codebases by writing and executing Python code, delegating complex analysis to sub-LLMs.
11
+
12
+ ## Environment Variables
13
+ - \`file_index\`: dict mapping file paths to their contents
14
+ - \`files\`: list of all file paths
15
+
16
+ ## Available Functions
17
+ - \`print(x)\`: Output text or data
18
+ - \`llm_query(prompt)\`: **KEY FEATURE** - Delegate analysis to a sub-LLM. Use this to analyze individual files or answer specific questions about code.
19
+ - \`FINAL("answer")\`: **REQUIRED** - Call this with your complete answer when done
20
+
21
+ ## CRITICAL: You MUST call FINAL() within 5 turns with your complete answer!
22
+
23
+ ## The Power of Sub-LLM Calls
24
+ The \`llm_query()\` function is your most powerful tool. Use it to:
25
+ - Analyze individual files: \`analysis = llm_query(f"Analyze this TypeScript file: {code}")\`
26
+ - Answer specific questions: \`answer = llm_query(f"What design patterns are used here? {code}")\`
27
+ - Summarize complex code: \`summary = llm_query(f"Summarize the main functionality: {code}")\`
28
+
29
+ ## Recommended Workflow
30
+ 1. **Explore**: \`print(files)\` to see available files
31
+ 2. **Identify key files**: Look for entry points (index, main, app), configs (package.json, tsconfig), and core modules
32
+ 3. **Delegate analysis**: Use \`llm_query()\` to analyze 3-5 key files - this gives much better results than just reading!
33
+ 4. **Synthesize**: Combine the sub-LLM analyses into your final answer
34
+ 5. **FINAL()**: Call with your comprehensive answer
35
+
36
+ ## Example - Good Use of Sub-LLMs
37
+ \`\`\`python
38
+ # Explore the codebase
39
+ print(files[:20])
40
+
41
+ # Analyze key files using sub-LLMs for deeper insight
42
+ index_analysis = llm_query(f"Analyze this entry point file and identify main exports and purpose:\\n{file_index['src/index.ts'][:3000]}")
43
+ print("Index analysis:", index_analysis[:500])
44
+
45
+ config_analysis = llm_query(f"Analyze this config/package file for tech stack and dependencies:\\n{file_index['package.json']}")
46
+ print("Config analysis:", config_analysis[:500])
47
+
48
+ core_analysis = llm_query(f"Analyze this core module for architecture patterns:\\n{file_index['src/core.ts'][:3000]}")
49
+ print("Core analysis:", core_analysis[:500])
50
+
51
+ # Synthesize all analyses into final answer
52
+ FINAL(f"""
53
+ ## Summary
54
+ Based on analyzing key files in this codebase:
55
+
56
+ {index_analysis}
57
+
58
+ ## Tech Stack & Dependencies
59
+ {config_analysis}
60
+
61
+ ## Architecture
62
+ {core_analysis}
63
+
64
+ ## Conclusion
65
+ ...
66
+ """)
67
+ \`\`\`
68
+
69
+ ## MANDATORY Rules
70
+ - **YOU MUST USE llm_query()** before calling FINAL() - this is enforced!
71
+ - Minimum llm_query() calls required:
72
+ - 200+ files: 5 calls
73
+ - 100+ files: 4 calls
74
+ - 50+ files: 3 calls
75
+ - 20+ files: 2 calls
76
+ - FINAL() will be REJECTED if you don't make enough llm_query() calls
77
+ - Use slicing for large content: \`content[:3000]\`
78
+ - Combine sub-LLM results for comprehensive analysis
79
+
80
+ ## Scaling for Large Codebases
81
+ For codebases with many files:
82
+ 1. Analyze entry points (index, main, App)
83
+ 2. Analyze package.json/config for tech stack
84
+ 3. Analyze 2-3 core modules/services
85
+ 4. Analyze types/interfaces for data models
86
+ 5. Analyze at least one page/component for UI patterns
87
+
88
+ Make MULTIPLE llm_query() calls - this is how you get quality analysis!`;
89
+ /**
90
+ * Architecture Analysis Prompt
91
+ */
92
+ export const ARCHITECTURE_PROMPT = `Analyze the architecture of this codebase. Focus on:
93
+ 1. Directory structure and organization
94
+ 2. Key modules and their responsibilities
95
+ 3. Dependencies and data flow between components
96
+ 4. Design patterns used
97
+ 5. Entry points and main application flow
98
+
99
+ Provide a structured analysis with clear sections.`;
100
+ /**
101
+ * Dependency Analysis Prompt
102
+ */
103
+ export const DEPENDENCY_PROMPT = `Analyze the dependencies in this codebase:
104
+ 1. External packages/libraries used
105
+ 2. Internal module dependencies
106
+ 3. Circular dependency risks
107
+ 4. Tightly coupled components
108
+ 5. Suggestions for decoupling
109
+
110
+ Create a dependency map and highlight any concerns.`;
111
+ /**
112
+ * Security Analysis Prompt
113
+ */
114
+ export const SECURITY_PROMPT = `Perform a security analysis of this codebase:
115
+ 1. Input validation patterns
116
+ 2. Authentication/authorization flows
117
+ 3. Data sanitization
118
+ 4. Sensitive data handling
119
+ 5. Common vulnerabilities (OWASP Top 10)
120
+ 6. API security patterns
121
+
122
+ List findings by severity (Critical, High, Medium, Low).`;
123
+ /**
124
+ * Performance Analysis Prompt
125
+ */
126
+ export const PERFORMANCE_PROMPT = `Analyze performance characteristics:
127
+ 1. Potential bottlenecks
128
+ 2. Memory usage patterns
129
+ 3. Async/await usage
130
+ 4. Caching strategies
131
+ 5. Database query patterns
132
+ 6. Bundle size considerations
133
+
134
+ Provide specific recommendations for optimization.`;
135
+ /**
136
+ * Refactoring Analysis Prompt
137
+ */
138
+ export const REFACTOR_PROMPT = `Identify refactoring opportunities:
139
+ 1. Code duplication
140
+ 2. Long methods/functions
141
+ 3. Complex conditionals
142
+ 4. God classes/modules
143
+ 5. Dead code
144
+ 6. Inconsistent patterns
145
+
146
+ Prioritize suggestions by impact and effort.`;
147
+ /**
148
+ * Summary Prompt
149
+ */
150
+ export const SUMMARY_PROMPT = `Provide a comprehensive summary of this codebase:
151
+ 1. Purpose and main functionality
152
+ 2. Tech stack and frameworks
153
+ 3. Key features
154
+ 4. Code organization
155
+ 5. Notable patterns or approaches
156
+ 6. Potential improvements
157
+
158
+ Keep it concise but informative.`;
159
+ /**
160
+ * Get the appropriate system prompt for a query type
161
+ * @param _mode - Analysis mode (reserved for future mode-specific prompts)
162
+ */
163
+ export function getSystemPrompt(_mode) {
164
+ // Currently returns the same prompt for all modes
165
+ // Future: return different prompts based on mode (document-qa, education, etc.)
166
+ return CODE_ANALYSIS_PROMPT;
167
+ }
168
+ /**
169
+ * Get analysis-specific prompt
170
+ */
171
+ export function getAnalysisPrompt(analysisType) {
172
+ switch (analysisType) {
173
+ case 'architecture':
174
+ return ARCHITECTURE_PROMPT;
175
+ case 'dependencies':
176
+ return DEPENDENCY_PROMPT;
177
+ case 'security':
178
+ return SECURITY_PROMPT;
179
+ case 'performance':
180
+ return PERFORMANCE_PROMPT;
181
+ case 'refactor':
182
+ return REFACTOR_PROMPT;
183
+ case 'summary':
184
+ return SUMMARY_PROMPT;
185
+ default:
186
+ return '';
187
+ }
188
+ }
189
+ /**
190
+ * Build initial context message
191
+ */
192
+ export function buildContextMessage(fileCount, fileList, query) {
193
+ const preview = fileList.slice(0, 30).join('\n ');
194
+ const truncated = fileList.length > 30 ? `\n ... and ${fileList.length - 30} more` : '';
195
+ // Determine recommended sub-LLM calls based on codebase size
196
+ const recommendedCalls = fileCount > 100 ? '5-7' : fileCount > 50 ? '4-5' : '3-4';
197
+ return `## Codebase Context
198
+ Files loaded: ${fileCount} ${fileCount > 100 ? '(LARGE CODEBASE - use many sub-LLM calls!)' : ''}
199
+
200
+ File list:
201
+ ${preview}${truncated}
202
+
203
+ ## Your Task
204
+ ${query}
205
+
206
+ ## Instructions
207
+ 1. First, explore the file list to identify key files (entry points, configs, core modules)
208
+ 2. **Make ${recommendedCalls} llm_query() calls** to analyze different aspects:
209
+ - Entry point / main app file
210
+ - package.json / config files
211
+ - Core services or modules
212
+ - Types / interfaces
213
+ - Sample pages / components
214
+ 3. Synthesize the sub-LLM analyses into a comprehensive final answer
215
+ 4. Call FINAL("your answer") with your complete analysis
216
+
217
+ 🚫 WARNING: FINAL() will be REJECTED if you don't make at least ${recommendedCalls} llm_query() calls!
218
+ This is enforced - you cannot skip sub-LLM analysis.`;
219
+ }
220
+ //# sourceMappingURL=prompts.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"prompts.js","sourceRoot":"","sources":["../src/prompts.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAIH;;;GAGG;AACH,MAAM,CAAC,MAAM,oBAAoB,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;wEA+EoC,CAAC;AAEzE;;GAEG;AACH,MAAM,CAAC,MAAM,mBAAmB,GAAG;;;;;;;mDAOgB,CAAC;AAEpD;;GAEG;AACH,MAAM,CAAC,MAAM,iBAAiB,GAAG;;;;;;;oDAOmB,CAAC;AAErD;;GAEG;AACH,MAAM,CAAC,MAAM,eAAe,GAAG;;;;;;;;yDAQ0B,CAAC;AAE1D;;GAEG;AACH,MAAM,CAAC,MAAM,kBAAkB,GAAG;;;;;;;;mDAQiB,CAAC;AAEpD;;GAEG;AACH,MAAM,CAAC,MAAM,eAAe,GAAG;;;;;;;;6CAQc,CAAC;AAE9C;;GAEG;AACH,MAAM,CAAC,MAAM,cAAc,GAAG;;;;;;;;iCAQG,CAAC;AAElC;;;GAGG;AACH,MAAM,UAAU,eAAe,CAAC,KAAwB;IACtD,kDAAkD;IAClD,gFAAgF;IAChF,OAAO,oBAAoB,CAAC;AAC9B,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,iBAAiB,CAAC,YAAoB;IACpD,QAAQ,YAAY,EAAE,CAAC;QACrB,KAAK,cAAc;YACjB,OAAO,mBAAmB,CAAC;QAC7B,KAAK,cAAc;YACjB,OAAO,iBAAiB,CAAC;QAC3B,KAAK,UAAU;YACb,OAAO,eAAe,CAAC;QACzB,KAAK,aAAa;YAChB,OAAO,kBAAkB,CAAC;QAC5B,KAAK,UAAU;YACb,OAAO,eAAe,CAAC;QACzB,KAAK,SAAS;YACZ,OAAO,cAAc,CAAC;QACxB;YACE,OAAO,EAAE,CAAC;IACd,CAAC;AACH,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,mBAAmB,CAAC,SAAiB,EAAE,QAAkB,EAAE,KAAa;IACtF,MAAM,OAAO,GAAG,QAAQ,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;IACnD,MAAM,SAAS,GAAG,QAAQ,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,eAAe,QAAQ,CAAC,MAAM,GAAG,EAAE,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC;IAEzF,6DAA6D;IAC7D,MAAM,gBAAgB,GAAG,SAAS,GAAG,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,SAAS,GAAG,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;IAElF,OAAO;gBACO,SAAS,IAAI,SAAS,GAAG,GAAG,CAAC,CAAC,CAAC,4CAA4C,CAAC,CAAC,CAAC,EAAE;;;IAG5F,OAAO,GAAG,SAAS;;;EAGrB,KAAK;;;;YAIK,gBAAgB;;;;;;;;;kEASsC,gBAAgB;qDAC7B,CAAC;AACtD,CAAC"}
@@ -0,0 +1,125 @@
1
+ /**
2
+ * RLM Analyzer Type Definitions
3
+ * Based on MIT CSAIL Recursive Language Models research (arXiv:2512.24601v1)
4
+ */
5
+ /** Analysis types supported by the analyzer */
6
+ export type AnalysisType = 'architecture' | 'dependencies' | 'security' | 'performance' | 'refactor' | 'summary' | 'custom';
7
+ export interface RLMConfig {
8
+ /** Root model for orchestration (default: gemini-3-flash-preview) */
9
+ rootModel: string;
10
+ /** Sub-model for recursive calls (default: gemini-3-flash-preview) */
11
+ subModel: string;
12
+ /** Maximum recursion depth for sub-LLM calls */
13
+ maxRecursionDepth: number;
14
+ /** Maximum conversation turns before forcing completion */
15
+ maxTurns: number;
16
+ /** Timeout in milliseconds for entire analysis */
17
+ timeoutMs: number;
18
+ /** Maximum sub-LLM calls per session */
19
+ maxSubCalls: number;
20
+ /** Analysis mode */
21
+ mode: 'code-analysis' | 'document-qa' | 'education';
22
+ }
23
+ export interface RLMContext {
24
+ /** Files loaded as environment variables */
25
+ files: Record<string, string>;
26
+ /** Additional context variables */
27
+ variables: Record<string, unknown>;
28
+ /** Analysis mode */
29
+ mode: RLMConfig['mode'];
30
+ }
31
+ export interface RLMTurn {
32
+ /** Turn number */
33
+ turn: number;
34
+ /** Model's response text */
35
+ response: string;
36
+ /** Code extracted from response */
37
+ code: string | null;
38
+ /** Execution result */
39
+ executionResult: string | null;
40
+ /** Error if any */
41
+ error: string | null;
42
+ /** Timestamp */
43
+ timestamp: number;
44
+ }
45
+ export interface RLMResult {
46
+ /** Whether analysis completed successfully */
47
+ success: boolean;
48
+ /** Final answer extracted via FINAL() marker */
49
+ answer: string | null;
50
+ /** All conversation turns */
51
+ turns: RLMTurn[];
52
+ /** Total execution time in ms */
53
+ executionTimeMs: number;
54
+ /** Number of sub-LLM calls made */
55
+ subCallCount: number;
56
+ /** Error message if failed */
57
+ error?: string;
58
+ }
59
+ export interface CodeAnalysisOptions {
60
+ /** Directory to analyze */
61
+ directory: string;
62
+ /** File patterns to include (glob) */
63
+ include?: string[];
64
+ /** File patterns to exclude (glob) */
65
+ exclude?: string[];
66
+ /** Custom query for analysis */
67
+ query?: string;
68
+ /** Analysis type */
69
+ analysisType?: AnalysisType;
70
+ /** Callback for turn updates */
71
+ onTurnComplete?: (turn: RLMTurn) => void;
72
+ /** Verbose output */
73
+ verbose?: boolean;
74
+ /** Gemini model to use */
75
+ model?: string;
76
+ }
77
+ export interface CodeAnalysisResult extends RLMResult {
78
+ /** Files that were analyzed */
79
+ filesAnalyzed: string[];
80
+ /** Analysis type performed */
81
+ analysisType: AnalysisType;
82
+ }
83
+ export interface ExecutorResult {
84
+ /** Whether execution succeeded */
85
+ success: boolean;
86
+ /** Output from execution */
87
+ output: string;
88
+ /** Error message if failed */
89
+ error?: string;
90
+ }
91
+ export interface SubLLMOptions {
92
+ /** Model to use */
93
+ model?: string;
94
+ /** Temperature for generation */
95
+ temperature?: number;
96
+ /** Maximum tokens in response */
97
+ maxTokens?: number;
98
+ }
99
+ /** Markers for extracting final answers */
100
+ export declare const FINAL_MARKERS: {
101
+ readonly FINAL: "FINAL(";
102
+ readonly FINAL_VAR: "FINAL_VAR(";
103
+ };
104
+ /**
105
+ * Get default RLM configuration with dynamically resolved models
106
+ * This function respects the model priority chain:
107
+ * 1. Environment variables (RLM_DEFAULT_MODEL)
108
+ * 2. Config file (~/.rlm-analyzer/config.json)
109
+ * 3. Built-in defaults
110
+ *
111
+ * @param modelOverride - Optional model to use instead of resolved default
112
+ * @returns RLMConfig with resolved model settings
113
+ */
114
+ export declare function getDefaultRLMConfig(modelOverride?: string): RLMConfig;
115
+ /**
116
+ * @deprecated Use `getDefaultRLMConfig()` instead for dynamic model resolution.
117
+ * This static constant uses hardcoded model IDs and won't respect
118
+ * environment variables or config file settings.
119
+ */
120
+ export declare const DEFAULT_CONFIG: RLMConfig;
121
+ /** File extensions to analyze by default */
122
+ export declare const CODE_EXTENSIONS: string[];
123
+ /** Directories to ignore by default */
124
+ export declare const IGNORE_DIRS: string[];
125
+ //# sourceMappingURL=types.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAIH,+CAA+C;AAC/C,MAAM,MAAM,YAAY,GAAG,cAAc,GAAG,cAAc,GAAG,UAAU,GAAG,aAAa,GAAG,UAAU,GAAG,SAAS,GAAG,QAAQ,CAAC;AAE5H,MAAM,WAAW,SAAS;IACxB,qEAAqE;IACrE,SAAS,EAAE,MAAM,CAAC;IAClB,sEAAsE;IACtE,QAAQ,EAAE,MAAM,CAAC;IACjB,gDAAgD;IAChD,iBAAiB,EAAE,MAAM,CAAC;IAC1B,2DAA2D;IAC3D,QAAQ,EAAE,MAAM,CAAC;IACjB,kDAAkD;IAClD,SAAS,EAAE,MAAM,CAAC;IAClB,wCAAwC;IACxC,WAAW,EAAE,MAAM,CAAC;IACpB,oBAAoB;IACpB,IAAI,EAAE,eAAe,GAAG,aAAa,GAAG,WAAW,CAAC;CACrD;AAED,MAAM,WAAW,UAAU;IACzB,4CAA4C;IAC5C,KAAK,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAC9B,mCAAmC;IACnC,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACnC,oBAAoB;IACpB,IAAI,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC;CACzB;AAED,MAAM,WAAW,OAAO;IACtB,kBAAkB;IAClB,IAAI,EAAE,MAAM,CAAC;IACb,4BAA4B;IAC5B,QAAQ,EAAE,MAAM,CAAC;IACjB,mCAAmC;IACnC,IAAI,EAAE,MAAM,GAAG,IAAI,CAAC;IACpB,uBAAuB;IACvB,eAAe,EAAE,MAAM,GAAG,IAAI,CAAC;IAC/B,mBAAmB;IACnB,KAAK,EAAE,MAAM,GAAG,IAAI,CAAC;IACrB,gBAAgB;IAChB,SAAS,EAAE,MAAM,CAAC;CACnB;AAED,MAAM,WAAW,SAAS;IACxB,8CAA8C;IAC9C,OAAO,EAAE,OAAO,CAAC;IACjB,gDAAgD;IAChD,MAAM,EAAE,MAAM,GAAG,IAAI,CAAC;IACtB,6BAA6B;IAC7B,KAAK,EAAE,OAAO,EAAE,CAAC;IACjB,iCAAiC;IACjC,eAAe,EAAE,MAAM,CAAC;IACxB,mCAAmC;IACnC,YAAY,EAAE,MAAM,CAAC;IACrB,8BAA8B;IAC9B,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB;AAED,MAAM,WAAW,mBAAmB;IAClC,2BAA2B;IAC3B,SAAS,EAAE,MAAM,CAAC;IAClB,sCAAsC;IACtC,OAAO,CAAC,EAAE,MAAM,EAAE,CAAC;IACnB,sCAAsC;IACtC,OAAO,CAAC,EAAE,MAAM,EAAE,CAAC;IACnB,gCAAgC;IAChC,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,oBAAoB;IACpB,YAAY,CAAC,EAAE,YAAY,CAAC;IAC5B,gCAAgC;IAChC,cAAc,CAAC,EAAE,CAAC,IAAI,EAAE,OAAO,KAAK,IAAI,CAAC;IACzC,qBAAqB;IACrB,OAAO,CAAC,EAAE,OAAO,CAAC;IAClB,0BAA0B;IAC1B,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB;AAED,MAAM,WAAW,kBAAmB,SAAQ,SAAS;IACnD,+BAA+B;IAC/B,aAAa,EAAE,MAAM,EAAE,CAAC;IACxB,8BAA8B;IAC9B,YAAY,EAAE,YAAY,CAAC;CAC5B;AAED,MAAM,WAAW,cAAc;IAC7B,kCAAkC;IAClC,OAAO,EAAE,OAAO,CAAC;IACjB,4BAA4B;IAC5B,MAAM,EAAE,MAAM,CAAC;IACf,8BAA8B;IAC9B,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB;AAED,MAAM,WAAW,aAAa;IAC5B,mBAAmB;IACnB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,iCAAiC;IACjC,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,iCAAiC;IACjC,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAED,2CAA2C;AAC3C,eAAO,MAAM,aAAa;;;CAGhB,CAAC;AAEX;;;;;;;;;GASG;AACH,wBAAgB,mBAAmB,CAAC,aAAa,CAAC,EAAE,MAAM,GAAG,SAAS,CAWrE;AAED;;;;GAIG;AACH,eAAO,MAAM,cAAc,EAAE,SAQ5B,CAAC;AAEF,4CAA4C;AAC5C,eAAO,MAAM,eAAe,UAiB3B,CAAC;AAEF,uCAAuC;AACvC,eAAO,MAAM,WAAW,UAmBvB,CAAC"}
package/dist/types.js ADDED
@@ -0,0 +1,87 @@
1
+ /**
2
+ * RLM Analyzer Type Definitions
3
+ * Based on MIT CSAIL Recursive Language Models research (arXiv:2512.24601v1)
4
+ */
5
+ import { getDefaultModel } from './models.js';
6
+ /** Markers for extracting final answers */
7
+ export const FINAL_MARKERS = {
8
+ FINAL: 'FINAL(',
9
+ FINAL_VAR: 'FINAL_VAR(',
10
+ };
11
+ /**
12
+ * Get default RLM configuration with dynamically resolved models
13
+ * This function respects the model priority chain:
14
+ * 1. Environment variables (RLM_DEFAULT_MODEL)
15
+ * 2. Config file (~/.rlm-analyzer/config.json)
16
+ * 3. Built-in defaults
17
+ *
18
+ * @param modelOverride - Optional model to use instead of resolved default
19
+ * @returns RLMConfig with resolved model settings
20
+ */
21
+ export function getDefaultRLMConfig(modelOverride) {
22
+ const model = modelOverride || getDefaultModel();
23
+ return {
24
+ rootModel: model,
25
+ subModel: model,
26
+ maxRecursionDepth: 3,
27
+ maxTurns: 10,
28
+ timeoutMs: 300000, // 5 minutes
29
+ maxSubCalls: 15,
30
+ mode: 'code-analysis',
31
+ };
32
+ }
33
+ /**
34
+ * @deprecated Use `getDefaultRLMConfig()` instead for dynamic model resolution.
35
+ * This static constant uses hardcoded model IDs and won't respect
36
+ * environment variables or config file settings.
37
+ */
38
+ export const DEFAULT_CONFIG = {
39
+ rootModel: 'gemini-3-flash-preview',
40
+ subModel: 'gemini-3-flash-preview',
41
+ maxRecursionDepth: 3,
42
+ maxTurns: 10,
43
+ timeoutMs: 300000, // 5 minutes
44
+ maxSubCalls: 15,
45
+ mode: 'code-analysis',
46
+ };
47
+ /** File extensions to analyze by default */
48
+ export const CODE_EXTENSIONS = [
49
+ '.ts', '.tsx', '.js', '.jsx', '.mjs', '.cjs',
50
+ '.py', '.pyw',
51
+ '.java', '.kt', '.scala',
52
+ '.go',
53
+ '.rs',
54
+ '.c', '.cpp', '.cc', '.h', '.hpp',
55
+ '.cs',
56
+ '.rb',
57
+ '.php',
58
+ '.swift',
59
+ '.vue', '.svelte',
60
+ '.json', '.yaml', '.yml', '.toml',
61
+ '.md', '.mdx',
62
+ '.sql',
63
+ '.sh', '.bash', '.zsh',
64
+ '.dockerfile', '.docker-compose.yml',
65
+ ];
66
+ /** Directories to ignore by default */
67
+ export const IGNORE_DIRS = [
68
+ 'node_modules',
69
+ 'dist',
70
+ 'build',
71
+ '.git',
72
+ '.svn',
73
+ 'coverage',
74
+ '__pycache__',
75
+ '.pytest_cache',
76
+ 'venv',
77
+ '.venv',
78
+ 'env',
79
+ '.env',
80
+ 'vendor',
81
+ 'target',
82
+ '.next',
83
+ '.nuxt',
84
+ '.output',
85
+ '.cache',
86
+ ];
87
+ //# sourceMappingURL=types.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"types.js","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EAAE,eAAe,EAAE,MAAM,aAAa,CAAC;AAyG9C,2CAA2C;AAC3C,MAAM,CAAC,MAAM,aAAa,GAAG;IAC3B,KAAK,EAAE,QAAQ;IACf,SAAS,EAAE,YAAY;CACf,CAAC;AAEX;;;;;;;;;GASG;AACH,MAAM,UAAU,mBAAmB,CAAC,aAAsB;IACxD,MAAM,KAAK,GAAG,aAAa,IAAI,eAAe,EAAE,CAAC;IACjD,OAAO;QACL,SAAS,EAAE,KAAK;QAChB,QAAQ,EAAE,KAAK;QACf,iBAAiB,EAAE,CAAC;QACpB,QAAQ,EAAE,EAAE;QACZ,SAAS,EAAE,MAAM,EAAE,YAAY;QAC/B,WAAW,EAAE,EAAE;QACf,IAAI,EAAE,eAAe;KACtB,CAAC;AACJ,CAAC;AAED;;;;GAIG;AACH,MAAM,CAAC,MAAM,cAAc,GAAc;IACvC,SAAS,EAAE,wBAAwB;IACnC,QAAQ,EAAE,wBAAwB;IAClC,iBAAiB,EAAE,CAAC;IACpB,QAAQ,EAAE,EAAE;IACZ,SAAS,EAAE,MAAM,EAAE,YAAY;IAC/B,WAAW,EAAE,EAAE;IACf,IAAI,EAAE,eAAe;CACtB,CAAC;AAEF,4CAA4C;AAC5C,MAAM,CAAC,MAAM,eAAe,GAAG;IAC7B,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM;IAC5C,KAAK,EAAE,MAAM;IACb,OAAO,EAAE,KAAK,EAAE,QAAQ;IACxB,KAAK;IACL,KAAK;IACL,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,IAAI,EAAE,MAAM;IACjC,KAAK;IACL,KAAK;IACL,MAAM;IACN,QAAQ;IACR,MAAM,EAAE,SAAS;IACjB,OAAO,EAAE,OAAO,EAAE,MAAM,EAAE,OAAO;IACjC,KAAK,EAAE,MAAM;IACb,MAAM;IACN,KAAK,EAAE,OAAO,EAAE,MAAM;IACtB,aAAa,EAAE,qBAAqB;CACrC,CAAC;AAEF,uCAAuC;AACvC,MAAM,CAAC,MAAM,WAAW,GAAG;IACzB,cAAc;IACd,MAAM;IACN,OAAO;IACP,MAAM;IACN,MAAM;IACN,UAAU;IACV,aAAa;IACb,eAAe;IACf,MAAM;IACN,OAAO;IACP,KAAK;IACL,MAAM;IACN,QAAQ;IACR,QAAQ;IACR,OAAO;IACP,OAAO;IACP,SAAS;IACT,QAAQ;CACT,CAAC"}
package/package.json ADDED
@@ -0,0 +1,77 @@
1
+ {
2
+ "name": "rlm-analyzer",
3
+ "version": "1.1.0",
4
+ "description": "Recursive Language Model (RLM) code analyzer - Analyze any codebase with AI that can process 100x beyond context limits",
5
+ "type": "module",
6
+ "main": "dist/index.js",
7
+ "types": "dist/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "types": "./dist/index.d.ts",
11
+ "import": "./dist/index.js"
12
+ },
13
+ "./models": {
14
+ "types": "./dist/models.d.ts",
15
+ "import": "./dist/models.js"
16
+ },
17
+ "./config": {
18
+ "types": "./dist/config.d.ts",
19
+ "import": "./dist/config.js"
20
+ },
21
+ "./types": {
22
+ "types": "./dist/types.d.ts",
23
+ "import": "./dist/types.js"
24
+ }
25
+ },
26
+ "bin": {
27
+ "rlm": "dist/cli.js",
28
+ "rlm-analyzer": "dist/cli.js",
29
+ "rlm-mcp": "dist/mcp-server.js",
30
+ "rlm-analyzer-mcp": "dist/mcp-server.js"
31
+ },
32
+ "scripts": {
33
+ "clean": "rm -rf dist",
34
+ "build": "npm run clean && tsc && chmod +x dist/cli.js dist/mcp-server.js",
35
+ "dev": "tsc --watch",
36
+ "start": "node dist/cli.js",
37
+ "prepublishOnly": "npm run build"
38
+ },
39
+ "keywords": [
40
+ "ai",
41
+ "code-analysis",
42
+ "gemini",
43
+ "llm",
44
+ "recursive",
45
+ "static-analysis",
46
+ "architecture",
47
+ "refactoring",
48
+ "security",
49
+ "cli",
50
+ "mcp",
51
+ "model-context-protocol",
52
+ "claude-code",
53
+ "cursor"
54
+ ],
55
+ "author": "BzLabs",
56
+ "license": "MIT",
57
+ "repository": {
58
+ "type": "git",
59
+ "url": "https://github.com/bzlabs/rlm-analyzer"
60
+ },
61
+ "engines": {
62
+ "node": ">=18.0.0"
63
+ },
64
+ "dependencies": {
65
+ "@google/genai": "^1.0.0",
66
+ "@modelcontextprotocol/sdk": "^1.0.0",
67
+ "dotenv": "^16.5.0"
68
+ },
69
+ "devDependencies": {
70
+ "@types/node": "^20.10.0",
71
+ "typescript": "^5.3.0"
72
+ },
73
+ "files": [
74
+ "dist",
75
+ "README.md"
76
+ ]
77
+ }