@superadnim/rlm-pro 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,139 @@
1
+ # rlm-codebase
2
+
3
+ Analyze codebases using Recursive Language Models - enables LLMs to handle near-infinite context through recursive decomposition.
4
+
5
+ Based on the [RLM research](https://arxiv.org/abs/2512.24601) from MIT OASYS lab.
6
+
7
+ ## Installation
8
+
9
+ ```bash
10
+ # Using npx (recommended - no installation needed)
11
+ npx rlm-codebase ./my-project -q "Explain the architecture"
12
+
13
+ # Or install globally
14
+ npm install -g rlm-codebase
15
+ ```
16
+
17
+ ### Prerequisites
18
+
19
+ - **Node.js 18+**
20
+ - **uv** (Python package manager) - will be prompted to install if missing
21
+ - **OpenAI API key** (set as environment variable)
22
+
23
+ ```bash
24
+ # Install uv if not already installed
25
+ curl -LsSf https://astral.sh/uv/install.sh | sh
26
+
27
+ # Set your API key
28
+ export OPENAI_API_KEY="your-key"
29
+ ```
30
+
31
+ ## Usage
32
+
33
+ ### Command Line
34
+
35
+ ```bash
36
+ # Basic usage
37
+ npx rlm-codebase ./my-project -q "Explain the architecture"
38
+
39
+ # Get JSON output (for programmatic use)
40
+ npx rlm-codebase ./my-project -q "List all API endpoints" --json
41
+
42
+ # Use a specific model
43
+ npx rlm-codebase ./my-project -q "Find potential bugs" -m gpt-4o
44
+
45
+ # Use Anthropic backend
46
+ npx rlm-codebase ./my-project -q "Review this code" -b anthropic
47
+
48
+ # Verbose output for debugging
49
+ npx rlm-codebase ./my-project -q "How does authentication work?" -v
50
+
51
+ # Only build context (no LLM call)
52
+ npx rlm-codebase ./my-project -q "" --context-only
53
+ ```
54
+
55
+ ### Programmatic Usage (Node.js)
56
+
57
+ ```javascript
58
+ const { analyzeCodebase } = require('rlm-codebase');
59
+
60
+ async function main() {
61
+ const result = await analyzeCodebase('./my-project', {
62
+ query: 'Summarize the codebase structure',
63
+ backend: 'openai',
64
+ model: 'gpt-4o',
65
+ });
66
+
67
+ console.log(result.response);
68
+ console.log('Execution time:', result.execution_time, 'seconds');
69
+ }
70
+
71
+ main();
72
+ ```
73
+
74
+ ## Options
75
+
76
+ | Option | Description | Default |
77
+ |--------|-------------|---------|
78
+ | `-q, --query <query>` | Question or task to perform (required) | - |
79
+ | `-b, --backend <backend>` | LLM backend (openai, anthropic, etc.) | `openai` |
80
+ | `-m, --model <model>` | Model name | `gpt-4o` |
81
+ | `-e, --env <env>` | Execution environment (local, docker) | `local` |
82
+ | `--max-depth <n>` | Maximum recursion depth | `1` |
83
+ | `--max-iterations <n>` | Maximum iterations | `30` |
84
+ | `--max-file-size <bytes>` | Max size per file | `100000` |
85
+ | `--max-total-size <bytes>` | Max total context size | `500000` |
86
+ | `--no-tree` | Exclude directory tree from context | - |
87
+ | `--json` | Output as JSON | - |
88
+ | `-v, --verbose` | Enable verbose output | - |
89
+ | `--context-only` | Only output built context | - |
90
+
91
+ ## Environment Variables
92
+
93
+ | Variable | Description | Required |
94
+ |----------|-------------|----------|
95
+ | `OPENAI_API_KEY` | OpenAI API key | Yes (for OpenAI backend) |
96
+ | `ANTHROPIC_API_KEY` | Anthropic API key | For Anthropic backend |
97
+
98
+ ## How It Works
99
+
100
+ RLM (Recursive Language Models) enables LLMs to handle near-infinite context by:
101
+
102
+ 1. **Context Building**: Intelligently reads and formats your codebase
103
+ 2. **Recursive Decomposition**: Breaks complex queries into manageable sub-tasks
104
+ 3. **Code Execution**: Runs Python code in a sandboxed environment to explore and analyze
105
+ 4. **Iterative Refinement**: Continues until a complete answer is found
106
+
107
+ This allows answering complex questions about large codebases that would exceed normal context limits.
108
+
109
+ ## Examples
110
+
111
+ ### Architecture Analysis
112
+ ```bash
113
+ npx rlm-codebase ./backend -q "Describe the system architecture and key design patterns"
114
+ ```
115
+
116
+ ### Bug Finding
117
+ ```bash
118
+ npx rlm-codebase ./src -q "Find potential security vulnerabilities" --json
119
+ ```
120
+
121
+ ### Documentation Generation
122
+ ```bash
123
+ npx rlm-codebase ./api -q "Generate API documentation for all endpoints"
124
+ ```
125
+
126
+ ### Code Review
127
+ ```bash
128
+ npx rlm-codebase ./feature-branch -q "Review this code for best practices"
129
+ ```
130
+
131
+ ## License
132
+
133
+ MIT
134
+
135
+ ## Links
136
+
137
+ - [RLM Research Paper](https://arxiv.org/abs/2512.24601)
138
+ - [Python Package (PyPI)](https://pypi.org/project/rlm/)
139
+ - [GitHub Repository](https://github.com/mit-oasys/rlm)
@@ -0,0 +1,58 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * RLM Codebase CLI - Node.js wrapper for the Python RLM library
5
+ *
6
+ * Analyzes codebases using Recursive Language Models, enabling LLMs
7
+ * to handle near-infinite context through recursive decomposition.
8
+ *
9
+ * Usage:
10
+ * npx rlm-codebase ./my-project -q "Explain the architecture"
11
+ * npx rlm-codebase ./my-project -q "List all API endpoints" --json
12
+ */
13
+
14
+ const { program } = require('commander');
15
+ const { analyzeCodebase, getVersion } = require('../src/index.js');
16
+
17
+ program
18
+ .name('rlm-codebase')
19
+ .description('Analyze codebases using Recursive Language Models')
20
+ .version('0.1.0')
21
+ .argument('<path>', 'Path to codebase directory or file')
22
+ .requiredOption('-q, --query <query>', 'Question or task to perform on the codebase')
23
+ .option('-b, --backend <backend>', 'LLM backend (openai, anthropic, etc.)', 'openai')
24
+ .option('-m, --model <model>', 'Model name to use')
25
+ .option('-e, --env <env>', 'Execution environment (local, docker, modal, prime)', 'local')
26
+ .option('--max-depth <n>', 'Maximum recursion depth', '1')
27
+ .option('--max-iterations <n>', 'Maximum iterations', '30')
28
+ .option('--max-file-size <bytes>', 'Maximum size per file', '100000')
29
+ .option('--max-total-size <bytes>', 'Maximum total context size', '500000')
30
+ .option('--no-tree', 'Exclude directory tree from context')
31
+ .option('--json', 'Output as JSON')
32
+ .option('-v, --verbose', 'Enable verbose output')
33
+ .option('--context-only', 'Only output the built context')
34
+ .action(async (path, options) => {
35
+ try {
36
+ const result = await analyzeCodebase(path, options);
37
+
38
+ if (options.json) {
39
+ console.log(JSON.stringify(result, null, 2));
40
+ } else if (options.contextOnly) {
41
+ console.log(result.context || result.response);
42
+ } else {
43
+ console.log(result.response);
44
+ }
45
+
46
+ process.exit(0);
47
+ } catch (error) {
48
+ if (options.verbose) {
49
+ console.error('Error:', error.message);
50
+ console.error(error.stack);
51
+ } else {
52
+ console.error('Error:', error.message);
53
+ }
54
+ process.exit(1);
55
+ }
56
+ });
57
+
58
+ program.parse();
package/package.json ADDED
@@ -0,0 +1,54 @@
1
+ {
2
+ "name": "@superadnim/rlm-pro",
3
+ "version": "1.0.0",
4
+ "description": "RLM PRO - Enterprise-grade Recursive Language Models for infinite context code analysis. Analyze entire codebases with AI.",
5
+ "main": "src/index.js",
6
+ "bin": {
7
+ "rlm-pro": "./bin/rlm-codebase.js",
8
+ "rlm-codebase": "./bin/rlm-codebase.js"
9
+ },
10
+ "scripts": {
11
+ "postinstall": "node src/python-manager.js check",
12
+ "test": "node bin/rlm-codebase.js --version"
13
+ },
14
+ "keywords": [
15
+ "llm",
16
+ "recursive",
17
+ "language-model",
18
+ "codebase",
19
+ "analysis",
20
+ "context",
21
+ "ai",
22
+ "openai",
23
+ "anthropic",
24
+ "claude",
25
+ "gemini",
26
+ "enterprise",
27
+ "code-review",
28
+ "infinite-context"
29
+ ],
30
+ "author": "Columbia Group Labs <labs@columbiagroup.com>",
31
+ "license": "MIT",
32
+ "repository": {
33
+ "type": "git",
34
+ "url": "git+https://github.com/cg-labs/RLM-PRO.git"
35
+ },
36
+ "bugs": {
37
+ "url": "https://github.com/cg-labs/RLM-PRO/issues"
38
+ },
39
+ "homepage": "https://github.com/cg-labs/RLM-PRO#readme",
40
+ "engines": {
41
+ "node": ">=18"
42
+ },
43
+ "dependencies": {
44
+ "commander": "^12.0.0"
45
+ },
46
+ "files": [
47
+ "bin/",
48
+ "src/",
49
+ "README.md"
50
+ ],
51
+ "publishConfig": {
52
+ "access": "public"
53
+ }
54
+ }
@@ -0,0 +1,120 @@
1
+ /**
2
+ * Python process executor for RLM
3
+ *
4
+ * Spawns Python processes to run the RLM CLI.
5
+ * Tries multiple methods: uvx (for published packages), uv run (for local dev),
6
+ * and direct python (fallback).
7
+ * Uses execFile for security (avoids shell injection).
8
+ */
9
+
10
+ const { execFile } = require('child_process');
11
+ const { promisify } = require('util');
12
+ const path = require('path');
13
+ const fs = require('fs');
14
+
15
+ const execFileAsync = promisify(execFile);
16
+
17
+ /**
18
+ * Try to run a command and return result or null if failed.
19
+ */
20
+ async function tryCommand(cmd, args, options) {
21
+ try {
22
+ return await execFileAsync(cmd, args, options);
23
+ } catch {
24
+ return null;
25
+ }
26
+ }
27
+
28
+ /**
29
+ * Run the RLM CLI with the given arguments.
30
+ * Tries multiple execution methods in order of preference.
31
+ *
32
+ * @param {string[]} args - Arguments to pass to rlm-codebase CLI
33
+ * @returns {Promise<Object>} Parsed JSON result from the CLI
34
+ */
35
+ async function runRlm(args) {
36
+ const execOptions = {
37
+ maxBuffer: 50 * 1024 * 1024, // 50MB buffer for large outputs
38
+ timeout: 600000, // 10 minute timeout
39
+ env: {
40
+ ...process.env,
41
+ PYTHONIOENCODING: 'utf-8',
42
+ },
43
+ };
44
+
45
+ let result = null;
46
+ let lastError = null;
47
+
48
+ // Method 1: Try uvx (for published packages on PyPI)
49
+ result = await tryCommand('uvx', ['rlm', ...args], execOptions);
50
+
51
+ // Method 2: Try uv run rlm-codebase (for local development with pyproject.toml)
52
+ if (!result) {
53
+ result = await tryCommand('uv', ['run', 'rlm-codebase', ...args], execOptions);
54
+ }
55
+
56
+ // Method 3: Try direct rlm-codebase command (if installed in PATH)
57
+ if (!result) {
58
+ result = await tryCommand('rlm-codebase', args, execOptions);
59
+ }
60
+
61
+ // Method 4: Try python -m rlm.cli (fallback)
62
+ if (!result) {
63
+ result = await tryCommand('python3', ['-m', 'rlm.cli', ...args], execOptions);
64
+ }
65
+
66
+ // Method 5: Try python -m rlm.cli (Windows fallback)
67
+ if (!result) {
68
+ result = await tryCommand('python', ['-m', 'rlm.cli', ...args], execOptions);
69
+ }
70
+
71
+ if (!result) {
72
+ throw new Error(
73
+ 'Could not run RLM CLI. Please ensure:\n' +
74
+ '1. uv is installed: curl -LsSf https://astral.sh/uv/install.sh | sh\n' +
75
+ '2. rlm package is installed: uv pip install rlm\n' +
76
+ ' Or for local development: cd <rlm-repo> && uv pip install -e .\n' +
77
+ '3. Restart your terminal after installation'
78
+ );
79
+ }
80
+
81
+ const { stdout, stderr } = result;
82
+
83
+ // Check for API key errors in stderr
84
+ if (stderr && stderr.includes('API_KEY')) {
85
+ throw new Error(
86
+ 'API key not configured. Set the appropriate environment variable:\n' +
87
+ ' export OPENAI_API_KEY="your-key" # For OpenAI\n' +
88
+ ' export ANTHROPIC_API_KEY="your-key" # For Anthropic'
89
+ );
90
+ }
91
+
92
+ // Try to parse as JSON
93
+ try {
94
+ return JSON.parse(stdout);
95
+ } catch {
96
+ // If not JSON, return as response object
97
+ return {
98
+ response: stdout.trim(),
99
+ stderr: stderr || undefined,
100
+ };
101
+ }
102
+ }
103
+
104
+ /**
105
+ * Check if the RLM setup is working.
106
+ * @returns {Promise<boolean>}
107
+ */
108
+ async function checkSetup() {
109
+ try {
110
+ await execFileAsync('uvx', ['rlm-codebase', '--version']);
111
+ return true;
112
+ } catch {
113
+ return false;
114
+ }
115
+ }
116
+
117
+ module.exports = {
118
+ runRlm,
119
+ checkSetup,
120
+ };
package/src/index.js ADDED
@@ -0,0 +1,97 @@
1
+ /**
2
+ * RLM Codebase - Main module
3
+ *
4
+ * Provides programmatic access to the RLM codebase analyzer.
5
+ */
6
+
7
+ const { runRlm, checkSetup } = require('./executor.js');
8
+ const { checkPythonSetup } = require('./python-manager.js');
9
+
10
+ /**
11
+ * Analyze a codebase using Recursive Language Models.
12
+ *
13
+ * @param {string} path - Path to codebase directory or file
14
+ * @param {Object} options - Analysis options
15
+ * @param {string} options.query - Question or task to perform
16
+ * @param {string} [options.backend='openai'] - LLM backend
17
+ * @param {string} [options.model] - Model name
18
+ * @param {string} [options.env='local'] - Execution environment
19
+ * @param {string} [options.maxDepth='1'] - Maximum recursion depth
20
+ * @param {string} [options.maxIterations='30'] - Maximum iterations
21
+ * @param {string} [options.maxFileSize='100000'] - Max file size
22
+ * @param {string} [options.maxTotalSize='500000'] - Max total context size
23
+ * @param {boolean} [options.tree=true] - Include directory tree
24
+ * @param {boolean} [options.json=false] - Request JSON output
25
+ * @param {boolean} [options.verbose=false] - Enable verbose output
26
+ * @param {boolean} [options.contextOnly=false] - Only build context
27
+ * @returns {Promise<Object>} Analysis result
28
+ */
29
+ async function analyzeCodebase(path, options = {}) {
30
+ // Ensure Python/uv is set up
31
+ await checkPythonSetup();
32
+
33
+ // Build arguments for Python CLI
34
+ const args = [path, '-q', options.query];
35
+
36
+ if (options.backend) {
37
+ args.push('-b', options.backend);
38
+ }
39
+
40
+ if (options.model) {
41
+ args.push('-m', options.model);
42
+ }
43
+
44
+ if (options.env) {
45
+ args.push('-e', options.env);
46
+ }
47
+
48
+ if (options.maxDepth) {
49
+ args.push('--max-depth', options.maxDepth);
50
+ }
51
+
52
+ if (options.maxIterations) {
53
+ args.push('--max-iterations', options.maxIterations);
54
+ }
55
+
56
+ if (options.maxFileSize) {
57
+ args.push('--max-file-size', options.maxFileSize);
58
+ }
59
+
60
+ if (options.maxTotalSize) {
61
+ args.push('--max-total-size', options.maxTotalSize);
62
+ }
63
+
64
+ if (options.tree === false || options.noTree) {
65
+ args.push('--no-tree');
66
+ }
67
+
68
+ if (options.verbose) {
69
+ args.push('-v');
70
+ }
71
+
72
+ if (options.contextOnly) {
73
+ args.push('--context-only');
74
+ }
75
+
76
+ // Always request JSON from Python for parsing
77
+ args.push('--json');
78
+
79
+ // Run the Python CLI
80
+ const result = await runRlm(args);
81
+
82
+ return result;
83
+ }
84
+
85
+ /**
86
+ * Get the version of the RLM CLI.
87
+ * @returns {string} Version string
88
+ */
89
+ function getVersion() {
90
+ return '0.1.0';
91
+ }
92
+
93
+ module.exports = {
94
+ analyzeCodebase,
95
+ getVersion,
96
+ checkSetup,
97
+ };
@@ -0,0 +1,128 @@
1
+ /**
2
+ * Python environment manager for RLM
3
+ *
4
+ * Handles checking and setting up Python/uv dependencies.
5
+ */
6
+
7
+ const { execFile, execFileSync } = require('child_process');
8
+ const { promisify } = require('util');
9
+ const os = require('os');
10
+ const path = require('path');
11
+ const fs = require('fs');
12
+
13
+ const execFileAsync = promisify(execFile);
14
+
15
+ /**
16
+ * Check if uv is installed and available.
17
+ * @returns {Promise<boolean>}
18
+ */
19
+ async function checkUv() {
20
+ try {
21
+ await execFileAsync('uv', ['--version']);
22
+ return true;
23
+ } catch {
24
+ return false;
25
+ }
26
+ }
27
+
28
+ /**
29
+ * Check if uvx can run rlm-codebase.
30
+ * @returns {Promise<boolean>}
31
+ */
32
+ async function checkRlmInstalled() {
33
+ try {
34
+ await execFileAsync('uvx', ['rlm-codebase', '--version']);
35
+ return true;
36
+ } catch {
37
+ // uvx might not have rlm-codebase cached yet, which is fine
38
+ return false;
39
+ }
40
+ }
41
+
42
+ /**
43
+ * Get instructions for installing uv.
44
+ * @returns {string}
45
+ */
46
+ function getUvInstallInstructions() {
47
+ const platform = os.platform();
48
+
49
+ if (platform === 'win32') {
50
+ return `
51
+ To install uv (Python package manager):
52
+ powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
53
+
54
+ Or with pip:
55
+ pip install uv
56
+
57
+ Then restart your terminal and try again.
58
+ `;
59
+ }
60
+
61
+ return `
62
+ To install uv (Python package manager):
63
+ curl -LsSf https://astral.sh/uv/install.sh | sh
64
+
65
+ Or with pip:
66
+ pip install uv
67
+
68
+ Or with Homebrew (macOS):
69
+ brew install uv
70
+
71
+ Then restart your terminal and try again.
72
+ `;
73
+ }
74
+
75
+ /**
76
+ * Check Python setup and provide helpful messages.
77
+ * @returns {Promise<void>}
78
+ */
79
+ async function checkPythonSetup() {
80
+ const hasUv = await checkUv();
81
+
82
+ if (!hasUv) {
83
+ console.error('Error: uv (Python package manager) is not installed.');
84
+ console.error(getUvInstallInstructions());
85
+ throw new Error('uv is required but not installed');
86
+ }
87
+
88
+ // Check for required API key
89
+ if (!process.env.OPENAI_API_KEY && !process.env.ANTHROPIC_API_KEY) {
90
+ console.warn('Warning: No API key found. Set OPENAI_API_KEY or ANTHROPIC_API_KEY environment variable.');
91
+ }
92
+ }
93
+
94
+ /**
95
+ * Check command - run from postinstall
96
+ */
97
+ function runCheck() {
98
+ checkUv()
99
+ .then(hasUv => {
100
+ if (hasUv) {
101
+ console.log('RLM Codebase: Python environment ready (uv detected)');
102
+ } else {
103
+ console.log('RLM Codebase: uv not detected. Install it to use this tool:');
104
+ console.log(' curl -LsSf https://astral.sh/uv/install.sh | sh');
105
+ }
106
+ })
107
+ .catch(err => {
108
+ console.error('RLM Codebase: Setup check failed:', err.message);
109
+ });
110
+ }
111
+
112
+ // Handle CLI invocation
113
+ if (require.main === module) {
114
+ const command = process.argv[2];
115
+
116
+ if (command === 'check') {
117
+ runCheck();
118
+ } else {
119
+ console.log('Usage: node python-manager.js check');
120
+ }
121
+ }
122
+
123
+ module.exports = {
124
+ checkUv,
125
+ checkRlmInstalled,
126
+ checkPythonSetup,
127
+ getUvInstallInstructions,
128
+ };