coderlm 0.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,61 @@
1
+ # CLAUDE.md
2
+
3
+ This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
4
+
5
+ ## What This Is
6
+
7
+ A CLI that implements the RLM (Recursive Language Model) pattern: instead of feeding all files into an LLM's context, give it a file listing and let it use tools to peek, decompose, and recursively call itself on subsets. Ships as both an npm package (`bunx coderlm`) and a PyPI package (`uvx coderlm`).
8
+
9
+ ## Commands
10
+
11
+ ```bash
12
+ bun test # run tests
13
+ bun test --watch # run tests in watch mode
14
+ bun test --filter "pattern" # run a single test by name
15
+ just publish # bump patch + publish to npm and pypi
16
+ just publish npm minor # bump minor + publish to npm only
17
+ just publish pypi major # bump major + publish to pypi only
18
+ bun x ultracite fix # format and lint
19
+ bun x ultracite check # check for issues
20
+ ```
21
+
22
+ ## Architecture
23
+
24
+ The entire CLI is a single bash script at `src/coderlm`. It:
25
+ 1. Parses args (command, globs, --prompt, --max-depth, --dry-run)
26
+ 2. Expands globs via `fd` (falls back to `find`)
27
+ 3. Builds a system prompt with the file list and RLM instructions
28
+ 4. Dispatches to the appropriate agent with agent-specific flags:
29
+ - **claude**: `-p --append-system-prompt <sys> --allowedTools Bash <prompt>` (system prompt separate)
30
+ - **codex**: `exec --full-auto <combined>` (system + task combined)
31
+ - **gemini**: `-p <combined> --yolo` (command is word-split, e.g. `bunx --bun @google/gemini-cli`)
32
+ - **generic**: `<combined>` as single arg
33
+
34
+ The Python package (`src/__init__.py`) is a thin wrapper that `os.execvp`s the bash script.
35
+
36
+ **Context guards** (`src/bashrlm.sh`): Wraps common high-output commands (`cat`, `rg`, `grep`, `jq`, `find`, etc.) with truncation to prevent agents from blowing up their context windows. Injected into every non-interactive bash subshell the agent spawns via `BASH_ENV`. The file `src/bashrlm.md` contains the corresponding instructions that get appended to the system prompt, describing the truncation behavior and rules to the agent.
37
+
38
+ **Model passthrough**: The agent command is word-split (`read -ra _agent_cmd <<< "$agent"`), so model flags can be passed inline:
39
+ ```bash
40
+ coderlm "claude --model claude-haiku-4-5" "**/*.ts" --prompt "..."
41
+ coderlm "codex -m o4-mini" "**/*.py" --prompt "..."
42
+ ```
43
+
44
+ ## Testing
45
+
46
+ Unit tests are in `src/unit.test.ts` and integration tests in `src/integration.test.ts`, using `bun:test`. Unit tests use `--dry-run` which prints the constructed command as null-delimited args to stdout instead of exec-ing. This lets tests verify argument construction for each agent without actually running them.
47
+
48
+ ## Known Issues
49
+
50
+ See `.claude/PROBLEMS.md` for a running log of problems and solutions (e.g., Claude Code nesting limitations, MCP server conflicts).
51
+
52
+ ## Dual Publishing
53
+
54
+ Versions are kept in sync across `package.json` and `pyproject.toml`. The justfile `_bump` recipe uses `npm version` then syncs to pyproject.toml via perl.
55
+
56
+
57
+ # Code Standards
58
+
59
+ This project uses **Ultracite** (Biome-backed). Run `bun x ultracite fix` before committing.
60
+
61
+ TypeScript (test files only): use `const` by default, `async/await` over promise chains, explicit types where clarity is improved, `unknown` over `any`. Don't use `.only` or `.skip` in committed tests.
@@ -0,0 +1,31 @@
1
+ # Problems & Solutions
2
+
3
+ Running log of known issues and workarounds.
4
+
5
+ ## Claude Code cannot be spawned from within a Claude Code session
6
+
7
+ **Problem**: Running `coderlm claude ...` from inside a Claude Code session (or any child process of Claude Code) fails silently or with "cannot be launched inside another Claude Code session". This happens even after unsetting `CLAUDECODE` — the nesting guard goes deeper than just the env var (likely shared runtime resources or lock files).
8
+
9
+ **Impact**: Cannot integration-test the `claude` agent path from within Claude Code. The codex and gemini paths work fine.
10
+
11
+ **Workaround**: Test the claude path from a standalone terminal:
12
+ ```bash
13
+ ./src/coderlm claude "**/*.ts" --prompt "Summarize" --max-depth 1
14
+ ```
15
+
16
+ The integration test suite uses `--dry-run` for claude to verify command construction without actually spawning a session.
17
+
18
+ ## CLAUDECODE env var leaks into macOS session
19
+
20
+ **Problem**: Claude Code sets `CLAUDECODE` in child processes. If something triggers `launchctl setenv CLAUDECODE ...`, it persists across all new terminals system-wide — even standalone ones outside the IDE.
21
+
22
+ **Fix**:
23
+ ```bash
24
+ launchctl unsetenv CLAUDECODE
25
+ ```
26
+
27
+ ## Gemini CLI hangs when MCP servers conflict
28
+
29
+ **Problem**: When running `coderlm` with Gemini from inside another agent session (e.g., Claude Code), the shared `nia` MCP server can't handle concurrent connections. Gemini hangs during startup trying to connect.
30
+
31
+ **Workaround**: Pass `--allowed-mcp-server-names none` to disable MCP servers, or run from a standalone terminal. The integration test sets `GEMINI_ALLOWED_MCP_SERVERS=none` in the env.
@@ -0,0 +1,15 @@
1
+ {
2
+ "hooks": {
3
+ "PostToolUse": [
4
+ {
5
+ "matcher": "Write|Edit",
6
+ "hooks": [
7
+ {
8
+ "type": "command",
9
+ "command": "just fmt"
10
+ }
11
+ ]
12
+ }
13
+ ]
14
+ }
15
+ }
@@ -0,0 +1,16 @@
1
+ name: Test
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+
9
+ jobs:
10
+ test:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v4
14
+ - uses: oven-sh/setup-bun@v2
15
+ - run: bun install --frozen-lockfile
16
+ - run: bun test src/unit.test.ts
@@ -0,0 +1,38 @@
1
+ # dependencies (bun install)
2
+ node_modules
3
+
4
+ # output
5
+ out
6
+ dist
7
+ *.tgz
8
+
9
+ # python
10
+ __pycache__
11
+ *.egg-info
12
+
13
+ # code coverage
14
+ coverage
15
+ *.lcov
16
+
17
+ # logs
18
+ logs
19
+ _.log
20
+ report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
21
+
22
+ # dotenv environment variable files
23
+ .env
24
+ .env.development.local
25
+ .env.test.local
26
+ .env.production.local
27
+ .env.local
28
+
29
+ # caches
30
+ .eslintcache
31
+ .cache
32
+ *.tsbuildinfo
33
+
34
+ # IntelliJ based IDEs
35
+ .idea
36
+
37
+ # Finder (MacOS) folder config
38
+ .DS_Store
@@ -0,0 +1,4 @@
1
+ [tools]
2
+ bun = "1"
3
+ just = "1.40"
4
+ python = "3.13"
@@ -0,0 +1,62 @@
1
+ {
2
+ "file_scan_exclusions": [
3
+ "**/.DS_Store",
4
+ "**/.git",
5
+ "**/.openai_test_cache/",
6
+ "**/.pytest_cache/",
7
+ "**/.ruff_cache",
8
+ "**/.tsbuildinfo",
9
+ "**/.venv/",
10
+ "**/__pycache__",
11
+ "**/node_modules/",
12
+ "dist/**"
13
+ ],
14
+ "formatter": "language_server",
15
+ "format_on_save": "on",
16
+ "lsp": {
17
+ "typescript-language-server": {
18
+ "settings": {
19
+ "typescript": {
20
+ "preferences": {
21
+ "includePackageJsonAutoImports": "on"
22
+ }
23
+ }
24
+ }
25
+ }
26
+ },
27
+ "languages": {
28
+ "JavaScript": {
29
+ "formatter": {
30
+ "language_server": {
31
+ "name": "biome"
32
+ }
33
+ },
34
+ "code_actions_on_format": {
35
+ "source.fixAll.biome": true,
36
+ "source.organizeImports.biome": true
37
+ }
38
+ },
39
+ "TypeScript": {
40
+ "formatter": {
41
+ "language_server": {
42
+ "name": "biome"
43
+ }
44
+ },
45
+ "code_actions_on_format": {
46
+ "source.fixAll.biome": true,
47
+ "source.organizeImports.biome": true
48
+ }
49
+ },
50
+ "TSX": {
51
+ "formatter": {
52
+ "language_server": {
53
+ "name": "biome"
54
+ }
55
+ },
56
+ "code_actions_on_format": {
57
+ "source.fixAll.biome": true,
58
+ "source.organizeImports.biome": true
59
+ }
60
+ }
61
+ }
62
+ }
coderlm-0.1.1/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 zenbase-ai
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
coderlm-0.1.1/PKG-INFO ADDED
@@ -0,0 +1,92 @@
1
+ Metadata-Version: 2.4
2
+ Name: coderlm
3
+ Version: 0.1.1
4
+ Summary: Process large codebases using the RLM (Recursive Language Model) pattern
5
+ License-Expression: MIT
6
+ License-File: LICENSE
7
+ Requires-Python: >=3.9
8
+ Description-Content-Type: text/markdown
9
+
10
+ # coderlm
11
+
12
+ Process large codebases with Coding Agents using the **RLM (Recursive Language Model)** pattern — with bundled context guards to keep the agent's output from flooding its own context window.
13
+
14
+ Instead of stuffing all files into an LLM's context window, give it a file listing and let it use tools to peek, decompose, and recursively call itself on subsets. This keeps each agent focused on a manageable scope while covering arbitrarily large codebases.
15
+
16
+ ## Install
17
+
18
+ ```bash
19
+ # npm / bun
20
+ bunx coderlm@latest
21
+ npx coderlm@latest
22
+
23
+ # pypi
24
+ uvx coderlm
25
+ pipx run coderlm
26
+ ```
27
+
28
+ Or install globally:
29
+
30
+ ```bash
31
+ npm i -g coderlm # npm
32
+ uv tool install coderlm # pypi
33
+ ```
34
+
35
+ ## Usage
36
+
37
+ ```
38
+ coderlm <agent> <globs...> --prompt "<task>" [--max-depth N] [--allowedTools TOOLS]
39
+ ```
40
+
41
+ ### Examples
42
+
43
+ ```bash
44
+ # Codex
45
+ coderlm codex "src/**/*.ts" --prompt "Find all TODO comments"
46
+ coderlm codex "src/**" "lib/**" "test/**" --prompt "Find dead code"
47
+ coderlm codex "**/*.ts" --prompt "Summarize the codebase" --max-depth 2
48
+
49
+ # Gemini
50
+ coderlm "bunx --bun @google/gemini-cli" "**/*.py" --prompt "Review for security issues"
51
+ coderlm "bunx --bun @google/gemini-cli" "src/**" --prompt "Architecture overview"
52
+
53
+ # Claude (non-recursive only — Claude cannot spawn nested Claude sessions)
54
+ coderlm claude "src/**" --prompt "Fix type errors" --allowedTools "Bash,Edit"
55
+ ```
56
+
57
+ ## How It Works
58
+
59
+ 1. **Expand globs** into a file listing using `fd` (or `find` as fallback)
60
+ 2. **Build a system prompt** containing the file list and RLM instructions (explore, decompose, aggregate)
61
+ 3. **Inject context guards** via `BASH_ENV` so every bash subshell the agent spawns has output truncation active
62
+ 4. **Launch the agent** with agent-specific flags for non-interactive execution
63
+
64
+ The agent receives a file listing — not file contents. It uses shell tools (`rg`, `cat`, `head`, `jq`, etc.) to inspect files as needed. For large file sets (>20 files), it spawns recursive sub-agents on subsets.
65
+
66
+ ### Context Guards (bundled)
67
+
68
+ `bashrlm.sh` is bundled and auto-activates for every agent run. It wraps high-output commands (`cat`, `grep`, `rg`, `jq`, `find`, `ls`, `curl`, etc.) with automatic truncation, preventing the agent from flooding its own context window with oversized output.
69
+
70
+ Truncation uses head+tail mode — the agent sees the start and end of any large output, with the middle omitted:
71
+
72
+ ```
73
+ [TRUNCATED — showing 2000 of 15000 chars, first and last 1000]
74
+ ```
75
+
76
+ Guards are redirect-aware: piping to a file (`> /tmp/out.txt`) bypasses truncation, so multi-step processing works naturally. The agent's system prompt includes instructions for this pattern.
77
+
78
+ ### Supported Agents
79
+
80
+ | Agent | Command | Notes |
81
+ | ------------- | ------------------------------- | ------------------------------------------------------------------ |
82
+ | OpenAI Codex | `codex` | Recommended — supports recursive sub-agents |
83
+ | Google Gemini | `bunx --bun @google/gemini-cli` | Supports recursive sub-agents |
84
+ | Claude Code | `claude` | Non-recursive only — cannot spawn nested Claude sessions |
85
+ | Any CLI | `my-agent` | Combined prompt passed as single argument |
86
+
87
+ ## When to Use
88
+
89
+ - Many files to analyze (>10)
90
+ - Task benefits from divide-and-conquer
91
+ - Total content exceeds a comfortable context window
92
+ - You want the agent to explore strategically rather than read everything upfront
@@ -0,0 +1,83 @@
1
+ # coderlm
2
+
3
+ Process large codebases with Coding Agents using the **RLM (Recursive Language Model)** pattern — with bundled context guards to keep the agent's output from flooding its own context window.
4
+
5
+ Instead of stuffing all files into an LLM's context window, give it a file listing and let it use tools to peek, decompose, and recursively call itself on subsets. This keeps each agent focused on a manageable scope while covering arbitrarily large codebases.
6
+
7
+ ## Install
8
+
9
+ ```bash
10
+ # npm / bun
11
+ bunx coderlm@latest
12
+ npx coderlm@latest
13
+
14
+ # pypi
15
+ uvx coderlm
16
+ pipx run coderlm
17
+ ```
18
+
19
+ Or install globally:
20
+
21
+ ```bash
22
+ npm i -g coderlm # npm
23
+ uv tool install coderlm # pypi
24
+ ```
25
+
26
+ ## Usage
27
+
28
+ ```
29
+ coderlm <agent> <globs...> --prompt "<task>" [--max-depth N] [--allowedTools TOOLS]
30
+ ```
31
+
32
+ ### Examples
33
+
34
+ ```bash
35
+ # Codex
36
+ coderlm codex "src/**/*.ts" --prompt "Find all TODO comments"
37
+ coderlm codex "src/**" "lib/**" "test/**" --prompt "Find dead code"
38
+ coderlm codex "**/*.ts" --prompt "Summarize the codebase" --max-depth 2
39
+
40
+ # Gemini
41
+ coderlm "bunx --bun @google/gemini-cli" "**/*.py" --prompt "Review for security issues"
42
+ coderlm "bunx --bun @google/gemini-cli" "src/**" --prompt "Architecture overview"
43
+
44
+ # Claude (non-recursive only — Claude cannot spawn nested Claude sessions)
45
+ coderlm claude "src/**" --prompt "Fix type errors" --allowedTools "Bash,Edit"
46
+ ```
47
+
48
+ ## How It Works
49
+
50
+ 1. **Expand globs** into a file listing using `fd` (or `find` as fallback)
51
+ 2. **Build a system prompt** containing the file list and RLM instructions (explore, decompose, aggregate)
52
+ 3. **Inject context guards** via `BASH_ENV` so every bash subshell the agent spawns has output truncation active
53
+ 4. **Launch the agent** with agent-specific flags for non-interactive execution
54
+
55
+ The agent receives a file listing — not file contents. It uses shell tools (`rg`, `cat`, `head`, `jq`, etc.) to inspect files as needed. For large file sets (>20 files), it spawns recursive sub-agents on subsets.
56
+
57
+ ### Context Guards (bundled)
58
+
59
+ `bashrlm.sh` is bundled and auto-activates for every agent run. It wraps high-output commands (`cat`, `grep`, `rg`, `jq`, `find`, `ls`, `curl`, etc.) with automatic truncation, preventing the agent from flooding its own context window with oversized output.
60
+
61
+ Truncation uses head+tail mode — the agent sees the start and end of any large output, with the middle omitted:
62
+
63
+ ```
64
+ [TRUNCATED — showing 2000 of 15000 chars, first and last 1000]
65
+ ```
66
+
67
+ Guards are redirect-aware: piping to a file (`> /tmp/out.txt`) bypasses truncation, so multi-step processing works naturally. The agent's system prompt includes instructions for this pattern.
68
+
69
+ ### Supported Agents
70
+
71
+ | Agent | Command | Notes |
72
+ | ------------- | ------------------------------- | ------------------------------------------------------------------ |
73
+ | OpenAI Codex | `codex` | Recommended — supports recursive sub-agents |
74
+ | Google Gemini | `bunx --bun @google/gemini-cli` | Supports recursive sub-agents |
75
+ | Claude Code | `claude` | Non-recursive only — cannot spawn nested Claude sessions |
76
+ | Any CLI | `my-agent` | Combined prompt passed as single argument |
77
+
78
+ ## When to Use
79
+
80
+ - Many files to analyze (>10)
81
+ - Task benefits from divide-and-conquer
82
+ - Total content exceeds a comfortable context window
83
+ - You want the agent to explore strategically rather than read everything upfront
coderlm-0.1.1/SKILL.md ADDED
@@ -0,0 +1,20 @@
1
+ ---
2
+ name: coderlm
3
+ description: Use when a task involves many files (>10), the total content exceeds comfortable context size, or the task benefits from divide-and-conquer. Runs an agent with a file listing instead of file contents, letting it peek strategically and recursively decompose into sub-agents. Bundles bashrlm context guards — output from high-output commands (cat, grep, rg, jq, find, ls, curl, etc.) is automatically truncated so the agent never floods its own context window.
4
+ ---
5
+
6
+ ## Usage
7
+
8
+ ```
9
+ coderlm <agent> <globs...> --prompt "<task>" [--max-depth N] [--allowedTools TOOLS]
10
+ ```
11
+
12
+ ## Examples
13
+
14
+ ```bash
15
+ coderlm codex "src/**/*.ts" --prompt "Find all TODO comments"
16
+ coderlm codex "src/**" "lib/**" --prompt "Architecture overview"
17
+ coderlm "bunx --bun @google/gemini-cli" "**/*.py" --prompt "Review for security issues"
18
+ coderlm "bunx --bun @google/gemini-cli" "src/**" --prompt "Find dead code"
19
+ coderlm claude "src/**" --prompt "Fix type errors" --allowedTools "Bash,Edit"
20
+ ```
@@ -0,0 +1,4 @@
1
+ {
2
+ "$schema": "./node_modules/@biomejs/biome/configuration_schema.json",
3
+ "extends": ["ultracite/biome/core"]
4
+ }
coderlm-0.1.1/bun.lock ADDED
@@ -0,0 +1,70 @@
1
+ {
2
+ "lockfileVersion": 1,
3
+ "configVersion": 1,
4
+ "workspaces": {
5
+ "": {
6
+ "name": "coding-agent-rlm",
7
+ "devDependencies": {
8
+ "@biomejs/biome": "2.4.0",
9
+ "ultracite": "7.2.3",
10
+ },
11
+ },
12
+ },
13
+ "packages": {
14
+ "@biomejs/biome": ["@biomejs/biome@2.4.0", "", { "optionalDependencies": { "@biomejs/cli-darwin-arm64": "2.4.0", "@biomejs/cli-darwin-x64": "2.4.0", "@biomejs/cli-linux-arm64": "2.4.0", "@biomejs/cli-linux-arm64-musl": "2.4.0", "@biomejs/cli-linux-x64": "2.4.0", "@biomejs/cli-linux-x64-musl": "2.4.0", "@biomejs/cli-win32-arm64": "2.4.0", "@biomejs/cli-win32-x64": "2.4.0" }, "bin": { "biome": "bin/biome" } }, "sha512-iluT61cORUDIC5i/y42ljyQraCemmmcgbMLLCnYO+yh+2hjTmcMFcwY8G0zTzWCsPb3t3AyKc+0t/VuhPZULUg=="],
15
+
16
+ "@biomejs/cli-darwin-arm64": ["@biomejs/cli-darwin-arm64@2.4.0", "", { "os": "darwin", "cpu": "arm64" }, "sha512-L+YpOtPSuU0etomfvFTPWRsa7+8ejaJL3yaROEoT/96HDJbR6OsvZQk0C8JUYou+XFdP+JcGxqZknkp4n934RA=="],
17
+
18
+ "@biomejs/cli-darwin-x64": ["@biomejs/cli-darwin-x64@2.4.0", "", { "os": "darwin", "cpu": "x64" }, "sha512-Aq+S7ffpb5ynTyLgtnEjG+W6xuTd2F7FdC7J6ShpvRhZwJhjzwITGF9vrqoOnw0sv1XWkt2Q1Rpg+hleg/Xg7Q=="],
19
+
20
+ "@biomejs/cli-linux-arm64": ["@biomejs/cli-linux-arm64@2.4.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-u2p54IhvNAWB+h7+rxCZe3reNfQYFK+ppDw+q0yegrGclFYnDPZAntv/PqgUacpC3uxTeuWFgWW7RFe3lHuxOA=="],
21
+
22
+ "@biomejs/cli-linux-arm64-musl": ["@biomejs/cli-linux-arm64-musl@2.4.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-1rhDUq8sf7xX3tg7vbnU3WVfanKCKi40OXc4VleBMzRStmQHdeBY46aFP6VdwEomcVjyNiu+Zcr3LZtAdrZrjQ=="],
23
+
24
+ "@biomejs/cli-linux-x64": ["@biomejs/cli-linux-x64@2.4.0", "", { "os": "linux", "cpu": "x64" }, "sha512-WVFOhsnzhrbMGOSIcB9yFdRV2oG2KkRRhIZiunI9gJqSU3ax9ErdnTxRfJUxZUI9NbzVxC60OCXNcu+mXfF/Tw=="],
25
+
26
+ "@biomejs/cli-linux-x64-musl": ["@biomejs/cli-linux-x64-musl@2.4.0", "", { "os": "linux", "cpu": "x64" }, "sha512-Omo0xhl63z47X+CrE5viEWKJhejJyndl577VoXg763U/aoATrK3r5+8DPh02GokWPeODX1Hek00OtjjooGan9w=="],
27
+
28
+ "@biomejs/cli-win32-arm64": ["@biomejs/cli-win32-arm64@2.4.0", "", { "os": "win32", "cpu": "arm64" }, "sha512-aqRwW0LJLV1v1NzyLvLWQhdLmDSAV1vUh+OBdYJaa8f28XBn5BZavo+WTfqgEzALZxlNfBmu6NGO6Al3MbCULw=="],
29
+
30
+ "@biomejs/cli-win32-x64": ["@biomejs/cli-win32-x64@2.4.0", "", { "os": "win32", "cpu": "x64" }, "sha512-g47s+V+OqsGxbSZN3lpav6WYOk0PIc3aCBAq+p6dwSynL3K5MA6Cg6nkzDOlu28GEHwbakW+BllzHCJCxnfK5Q=="],
31
+
32
+ "@clack/core": ["@clack/core@1.0.1", "", { "dependencies": { "picocolors": "^1.0.0", "sisteransi": "^1.0.5" } }, "sha512-WKeyK3NOBwDOzagPR5H08rFk9D/WuN705yEbuZvKqlkmoLM2woKtXb10OO2k1NoSU4SFG947i2/SCYh+2u5e4g=="],
33
+
34
+ "@clack/prompts": ["@clack/prompts@1.0.1", "", { "dependencies": { "@clack/core": "1.0.1", "picocolors": "^1.0.0", "sisteransi": "^1.0.5" } }, "sha512-/42G73JkuYdyWZ6m8d/CJtBrGl1Hegyc7Fy78m5Ob+jF85TOUmLR5XLce/U3LxYAw0kJ8CT5aI99RIvPHcGp/Q=="],
35
+
36
+ "balanced-match": ["balanced-match@4.0.3", "", {}, "sha512-1pHv8LX9CpKut1Zp4EXey7Z8OfH11ONNH6Dhi2WDUt31VVZFXZzKwXcysBgqSumFCmR+0dqjMK5v5JiFHzi0+g=="],
37
+
38
+ "brace-expansion": ["brace-expansion@5.0.2", "", { "dependencies": { "balanced-match": "^4.0.2" } }, "sha512-Pdk8c9poy+YhOgVWw1JNN22/HcivgKWwpxKq04M/jTmHyCZn12WPJebZxdjSa5TmBqISrUSgNYU3eRORljfCCw=="],
39
+
40
+ "citty": ["citty@0.2.1", "", {}, "sha512-kEV95lFBhQgtogAPlQfJJ0WGVSokvLr/UEoFPiKKOXF7pl98HfUVUD0ejsuTCld/9xH9vogSywZ5KqHzXrZpqg=="],
41
+
42
+ "commander": ["commander@14.0.3", "", {}, "sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw=="],
43
+
44
+ "deepmerge": ["deepmerge@4.3.1", "", {}, "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A=="],
45
+
46
+ "glob": ["glob@13.0.5", "", { "dependencies": { "minimatch": "^10.2.1", "minipass": "^7.1.2", "path-scurry": "^2.0.0" } }, "sha512-BzXxZg24Ibra1pbQ/zE7Kys4Ua1ks7Bn6pKLkVPZ9FZe4JQS6/Q7ef3LG1H+k7lUf5l4T3PLSyYyYJVYUvfgTw=="],
47
+
48
+ "jsonc-parser": ["jsonc-parser@3.3.1", "", {}, "sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ=="],
49
+
50
+ "lru-cache": ["lru-cache@11.2.6", "", {}, "sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ=="],
51
+
52
+ "minimatch": ["minimatch@10.2.1", "", { "dependencies": { "brace-expansion": "^5.0.2" } }, "sha512-MClCe8IL5nRRmawL6ib/eT4oLyeKMGCghibcDWK+J0hh0Q8kqSdia6BvbRMVk6mPa6WqUa5uR2oxt6C5jd533A=="],
53
+
54
+ "minipass": ["minipass@7.1.2", "", {}, "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw=="],
55
+
56
+ "nypm": ["nypm@0.6.5", "", { "dependencies": { "citty": "^0.2.0", "pathe": "^2.0.3", "tinyexec": "^1.0.2" }, "bin": { "nypm": "dist/cli.mjs" } }, "sha512-K6AJy1GMVyfyMXRVB88700BJqNUkByijGJM8kEHpLdcAt+vSQAVfkWWHYzuRXHSY6xA2sNc5RjTj0p9rE2izVQ=="],
57
+
58
+ "path-scurry": ["path-scurry@2.0.1", "", { "dependencies": { "lru-cache": "^11.0.0", "minipass": "^7.1.2" } }, "sha512-oWyT4gICAu+kaA7QWk/jvCHWarMKNs6pXOGWKDTr7cw4IGcUbW+PeTfbaQiLGheFRpjo6O9J0PmyMfQPjH71oA=="],
59
+
60
+ "pathe": ["pathe@2.0.3", "", {}, "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w=="],
61
+
62
+ "picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="],
63
+
64
+ "sisteransi": ["sisteransi@1.0.5", "", {}, "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg=="],
65
+
66
+ "tinyexec": ["tinyexec@1.0.2", "", {}, "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg=="],
67
+
68
+ "ultracite": ["ultracite@7.2.3", "", { "dependencies": { "@clack/prompts": "^1.0.1", "commander": "^14.0.3", "deepmerge": "^4.3.1", "glob": "^13.0.3", "jsonc-parser": "^3.3.1", "nypm": "^0.6.5" }, "peerDependencies": { "oxlint": "^1.0.0" }, "optionalPeers": ["oxlint"], "bin": { "ultracite": "dist/index.js" } }, "sha512-WKNS2sKAZe4BHu+JGbZebXvy/A1QagDaBnndrK/zwOJAze/mQ8jeHfdG2bPlv3qcJ5fdS3w2Kd7c/eIcH78HvA=="],
69
+ }
70
+ }
coderlm-0.1.1/justfile ADDED
@@ -0,0 +1,67 @@
1
+ set shell := ["bash", "-euo", "pipefail", "-c"]
2
+
3
+ # Show available recipes
4
+ default:
5
+ @just --list
6
+
7
+ # Run tests
8
+ test:
9
+ bun test
10
+
11
+ # Format code (py, ts, or all)
12
+ fmt target="all":
13
+ just _fmt-{{target}}
14
+
15
+ _fmt-all:
16
+ just _fmt-ts
17
+ just _fmt-py
18
+
19
+ _fmt-ts:
20
+ bunx --bun ultracite fix
21
+
22
+ _fmt-py:
23
+ ruff format
24
+ ruff check --fix
25
+
26
+ # Lint code (py, ts, or all)
27
+ lint target="all":
28
+ just _lint-{{target}}
29
+
30
+ _lint-all:
31
+ just _lint-ts
32
+ just _lint-py
33
+
34
+ _lint-ts:
35
+ bunx --bun ultracite check
36
+
37
+ _lint-py:
38
+ ruff format --check
39
+ ruff check
40
+
41
+ # Publish current version to npm, pypi, or both (default: all)
42
+ publish target="all":
43
+ just _publish-{{target}}
44
+
45
+ # Bump version in package.json and pyproject.toml (default: patch)
46
+ bump level="patch":
47
+ just _bump {{level}}
48
+
49
+ # Internal bump implementation
50
+ _bump level:
51
+ #!/usr/bin/env bash
52
+ set -euo pipefail
53
+ npm version {{level}} --no-git-tag-version
54
+ version=$(jq -r .version package.json)
55
+ perl -pi -e "s/^version = .*/version = \"$version\"/" pyproject.toml
56
+ echo "Bumped to $version"
57
+
58
+ _publish-all:
59
+ just _publish-npm
60
+ just _publish-pypi
61
+
62
+ _publish-npm:
63
+ npm publish
64
+
65
+ _publish-pypi:
66
+ uv build
67
+ uv publish
@@ -0,0 +1,37 @@
1
+ {
2
+ "name": "coderlm",
3
+ "version": "0.1.1",
4
+ "description": "Process large codebases using the RLM (Recursive Language Model) pattern",
5
+ "bin": {
6
+ "coderlm": "src/coderlm"
7
+ },
8
+ "files": [
9
+ "src/coderlm",
10
+ "src/bashrlm.sh",
11
+ "src/bashrlm.md",
12
+ "SKILL.md",
13
+ "README.md"
14
+ ],
15
+ "keywords": [
16
+ "cli",
17
+ "rlm",
18
+ "claude",
19
+ "codex",
20
+ "agent",
21
+ "llm"
22
+ ],
23
+ "license": "MIT",
24
+ "repository": {
25
+ "type": "git",
26
+ "url": "git+https://github.com/zenbase-ai/coderlm.git"
27
+ },
28
+ "devDependencies": {
29
+ "@biomejs/biome": "2.4.0",
30
+ "ultracite": "7.2.3"
31
+ },
32
+ "scripts": {
33
+ "test": "bun test",
34
+ "check": "ultracite check",
35
+ "fix": "ultracite fix"
36
+ }
37
+ }
@@ -0,0 +1,22 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "coderlm"
7
+ version = "0.1.1"
8
+ description = "Process large codebases using the RLM (Recursive Language Model) pattern"
9
+ readme = "README.md"
10
+ license = "MIT"
11
+ requires-python = ">=3.9"
12
+
13
+ [project.scripts]
14
+ coderlm = "src:main"
15
+
16
+ [tool.hatch.build.targets.wheel]
17
+ packages = ["src"]
18
+
19
+ [dependency-groups]
20
+ dev = [
21
+ "ruff>=0.15.1",
22
+ ]