recursive-llm-ts 2.0.11 โ†’ 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,78 @@
1
+ #!/bin/bash
2
+ # Simple integration tests
3
+
4
+ set -e
5
+
6
+ echo "๐Ÿงช Simple RLM Integration Tests"
7
+ echo "================================"
8
+ echo ""
9
+
10
+ if [ -z "$OPENAI_API_KEY" ]; then
11
+ echo "โŒ Set OPENAI_API_KEY first"
12
+ exit 1
13
+ fi
14
+
15
+ # Test 1
16
+ echo "Test 1: Count word occurrences"
17
+ cat > /tmp/test1.json <<'JSON'
18
+ {
19
+ "model": "gpt-4o-mini",
20
+ "query": "How many times does the word 'test' appear?",
21
+ "context": "This is a test. Another test here. Final test.",
22
+ "config": {
23
+ "max_iterations": 10
24
+ }
25
+ }
26
+ JSON
27
+
28
+ echo "Running..."
29
+ RESULT=$(cat /tmp/test1.json | OPENAI_API_KEY="$OPENAI_API_KEY" ./rlm)
30
+ echo "โœ… Test 1 Result:"
31
+ echo "$RESULT" | jq -r '.result'
32
+ echo "Stats:" $(echo "$RESULT" | jq '.stats')
33
+ echo ""
34
+
35
+ # Test 2
36
+ echo "Test 2: Simple counting"
37
+ cat > /tmp/test2.json <<'JSON'
38
+ {
39
+ "model": "gpt-4o-mini",
40
+ "query": "How many words are in this text?",
41
+ "context": "One two three four five",
42
+ "config": {
43
+ "max_iterations": 10,
44
+ "temperature": 0.1
45
+ }
46
+ }
47
+ JSON
48
+
49
+ echo "Running..."
50
+ RESULT=$(cat /tmp/test2.json | OPENAI_API_KEY="$OPENAI_API_KEY" ./rlm)
51
+ echo "โœ… Test 2 Result:"
52
+ echo "$RESULT" | jq -r '.result'
53
+ echo "Stats:" $(echo "$RESULT" | jq '.stats')
54
+ echo ""
55
+
56
+ # Test 3
57
+ echo "Test 3: Extract information"
58
+ cat > /tmp/test3.json <<'JSON'
59
+ {
60
+ "model": "gpt-4o-mini",
61
+ "query": "List all the numbers mentioned",
62
+ "context": "I have 5 apples, 10 oranges, and 3 bananas.",
63
+ "config": {
64
+ "max_iterations": 10
65
+ }
66
+ }
67
+ JSON
68
+
69
+ echo "Running..."
70
+ RESULT=$(cat /tmp/test3.json | OPENAI_API_KEY="$OPENAI_API_KEY" ./rlm)
71
+ echo "โœ… Test 3 Result:"
72
+ echo "$RESULT" | jq -r '.result'
73
+ echo "Stats:" $(echo "$RESULT" | jq '.stats')
74
+ echo ""
75
+
76
+ echo "================================"
77
+ echo "โœ… All tests passed!"
78
+ rm -f /tmp/test*.json
package/package.json CHANGED
@@ -1,18 +1,18 @@
1
1
  {
2
2
  "name": "recursive-llm-ts",
3
- "version": "2.0.11",
3
+ "version": "3.0.1",
4
4
  "description": "TypeScript bridge for recursive-llm: Recursive Language Models for unbounded context processing",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
7
7
  "files": [
8
8
  "dist",
9
- "recursive-llm/src",
10
- "recursive-llm/pyproject.toml",
11
- "scripts/install-python-deps.js"
9
+ "bin",
10
+ "go",
11
+ "scripts/build-go-binary.js"
12
12
  ],
13
13
  "scripts": {
14
14
  "build": "tsc",
15
- "postinstall": "node scripts/install-python-deps.js",
15
+ "postinstall": "node scripts/build-go-binary.js",
16
16
  "prepublishOnly": "npm run build",
17
17
  "release": "./scripts/release.sh",
18
18
  "start": "ts-node test/test.ts",
@@ -36,10 +36,7 @@
36
36
  "url": "https://github.com/jbeck018/recursive-llm-ts/issues"
37
37
  },
38
38
  "homepage": "https://github.com/jbeck018/recursive-llm-ts#readme",
39
- "dependencies": {
40
- "bunpy": "^0.1.0",
41
- "pythonia": "^1.2.6"
42
- },
39
+ "dependencies": {},
43
40
  "devDependencies": {
44
41
  "@types/node": "^20.11.19",
45
42
  "dotenv": "^16.4.5",
@@ -0,0 +1,41 @@
1
+ #!/usr/bin/env node
2
+ const { execFileSync } = require('child_process');
3
+ const fs = require('fs');
4
+ const path = require('path');
5
+
6
+ const repoRoot = path.join(__dirname, '..');
7
+ const goRoot = path.join(repoRoot, 'go');
8
+ const binDir = path.join(repoRoot, 'bin');
9
+ const binaryName = process.platform === 'win32' ? 'rlm-go.exe' : 'rlm-go';
10
+ const binaryPath = path.join(binDir, binaryName);
11
+
12
+ function goAvailable() {
13
+ try {
14
+ execFileSync('go', ['version'], { stdio: 'ignore' });
15
+ return true;
16
+ } catch {
17
+ return false;
18
+ }
19
+ }
20
+
21
+ if (!fs.existsSync(goRoot)) {
22
+ console.warn('[recursive-llm-ts] Go source directory not found; skipping Go build');
23
+ process.exit(0);
24
+ }
25
+
26
+ if (!goAvailable()) {
27
+ console.warn('[recursive-llm-ts] Go is not installed; skipping Go binary build');
28
+ console.warn('[recursive-llm-ts] Install Go 1.21+ and rerun: node scripts/build-go-binary.js');
29
+ process.exit(0);
30
+ }
31
+
32
+ try {
33
+ fs.mkdirSync(binDir, { recursive: true });
34
+ // Build with optimization: -s removes symbol table, -w removes DWARF debug info
35
+ execFileSync('go', ['build', '-ldflags=-s -w', '-o', binaryPath, './cmd/rlm'], { stdio: 'inherit', cwd: goRoot });
36
+ console.log(`[recursive-llm-ts] โœ“ Go binary built at ${binaryPath}`);
37
+ } catch (error) {
38
+ console.warn('[recursive-llm-ts] Warning: Failed to build Go binary');
39
+ console.warn(error.message || error);
40
+ process.exit(0);
41
+ }
@@ -1,70 +0,0 @@
1
- [build-system]
2
- requires = ["setuptools>=61.0", "wheel"]
3
- build-backend = "setuptools.build_meta"
4
-
5
- [project]
6
- name = "recursive-llm"
7
- version = "0.1.0"
8
- description = "Recursive Language Models for unbounded context processing"
9
- authors = [{name = "Grigori Gvadzabia"}]
10
- readme = "README.md"
11
- requires-python = ">=3.9"
12
- license = {text = "MIT"}
13
- keywords = ["llm", "ai", "nlp", "recursive", "language-models"]
14
- classifiers = [
15
- "Development Status :: 3 - Alpha",
16
- "Intended Audience :: Developers",
17
- "License :: OSI Approved :: MIT License",
18
- "Programming Language :: Python :: 3",
19
- "Programming Language :: Python :: 3.9",
20
- "Programming Language :: Python :: 3.10",
21
- "Programming Language :: Python :: 3.11",
22
- "Programming Language :: Python :: 3.12",
23
- ]
24
-
25
- dependencies = [
26
- "litellm>=1.0.0",
27
- "RestrictedPython>=6.0",
28
- "python-dotenv>=1.0.0",
29
- ]
30
-
31
- [project.optional-dependencies]
32
- dev = [
33
- "pytest>=7.0.0",
34
- "pytest-asyncio>=0.21.0",
35
- "pytest-cov>=4.0.0",
36
- "black>=24.0.0",
37
- "ruff>=0.1.0",
38
- "mypy>=1.0.0",
39
- ]
40
-
41
- [project.urls]
42
- Homepage = "https://github.com/yourusername/recursive-llm"
43
- Documentation = "https://github.com/yourusername/recursive-llm"
44
- Repository = "https://github.com/yourusername/recursive-llm"
45
- Issues = "https://github.com/yourusername/recursive-llm/issues"
46
-
47
- [tool.setuptools.packages.find]
48
- where = ["src"]
49
-
50
- [tool.pytest.ini_options]
51
- asyncio_mode = "auto"
52
- testpaths = ["tests"]
53
- python_files = ["test_*.py"]
54
- python_classes = ["Test*"]
55
- python_functions = ["test_*"]
56
- addopts = "-v"
57
-
58
- [tool.black]
59
- line-length = 100
60
- target-version = ['py39']
61
-
62
- [tool.ruff]
63
- line-length = 100
64
- target-version = "py39"
65
-
66
- [tool.mypy]
67
- python_version = "3.9"
68
- warn_return_any = true
69
- warn_unused_configs = true
70
- disallow_untyped_defs = true
@@ -1,14 +0,0 @@
1
- """Recursive Language Models for unbounded context processing."""
2
-
3
- from .core import RLM, RLMError, MaxIterationsError, MaxDepthError
4
- from .repl import REPLError
5
-
6
- __version__ = "0.1.0"
7
-
8
- __all__ = [
9
- "RLM",
10
- "RLMError",
11
- "MaxIterationsError",
12
- "MaxDepthError",
13
- "REPLError",
14
- ]
@@ -1,322 +0,0 @@
1
- """Core RLM implementation."""
2
-
3
- import asyncio
4
- import re
5
- from typing import Optional, Dict, Any, List
6
-
7
- import litellm
8
-
9
- from .types import Message
10
- from .repl import REPLExecutor, REPLError
11
- from .prompts import build_system_prompt
12
- from .parser import parse_response, is_final
13
-
14
-
15
- class RLMError(Exception):
16
- """Base error for RLM."""
17
- pass
18
-
19
-
20
- class MaxIterationsError(RLMError):
21
- """Max iterations exceeded."""
22
- pass
23
-
24
-
25
- class MaxDepthError(RLMError):
26
- """Max recursion depth exceeded."""
27
- pass
28
-
29
-
30
- class RLM:
31
- """Recursive Language Model."""
32
-
33
- def __init__(
34
- self,
35
- model: str,
36
- recursive_model: Optional[str] = None,
37
- api_base: Optional[str] = None,
38
- api_key: Optional[str] = None,
39
- max_depth: int = 5,
40
- max_iterations: int = 30,
41
- _current_depth: int = 0,
42
- **llm_kwargs: Any
43
- ):
44
- """
45
- Initialize RLM.
46
-
47
- Args:
48
- model: Model name (e.g., "gpt-4o", "claude-sonnet-4", "ollama/llama3.2")
49
- recursive_model: Optional cheaper model for recursive calls
50
- api_base: Optional API base URL
51
- api_key: Optional API key
52
- max_depth: Maximum recursion depth
53
- max_iterations: Maximum REPL iterations per call
54
- _current_depth: Internal current depth tracker
55
- **llm_kwargs: Additional LiteLLM parameters
56
- """
57
- # Patch for recursive-llm-ts bug where config is passed as 2nd positional arg
58
- if isinstance(recursive_model, dict):
59
- config = recursive_model
60
- # Reset recursive_model default
61
- self.recursive_model = config.get('recursive_model', model)
62
- self.api_base = config.get('api_base', api_base)
63
- self.api_key = config.get('api_key', api_key)
64
- self.max_depth = int(config.get('max_depth', max_depth))
65
- self.max_iterations = int(config.get('max_iterations', max_iterations))
66
-
67
- # Extract other llm kwargs
68
- excluded = {'recursive_model', 'api_base', 'api_key', 'max_depth', 'max_iterations'}
69
- self.llm_kwargs = {k: v for k, v in config.items() if k not in excluded}
70
- # Merge with any actual kwargs passed
71
- self.llm_kwargs.update(llm_kwargs)
72
- else:
73
- self.recursive_model = recursive_model or model
74
- self.api_base = api_base
75
- self.api_key = api_key
76
- self.max_depth = max_depth
77
- self.max_iterations = max_iterations
78
- self.llm_kwargs = llm_kwargs
79
-
80
- self._current_depth = _current_depth
81
- self.model = model
82
-
83
- self.repl = REPLExecutor()
84
-
85
- # Stats
86
- self._llm_calls = 0
87
- self._iterations = 0
88
-
89
- def completion(
90
- self,
91
- query: str = "",
92
- context: str = "",
93
- **kwargs: Any
94
- ) -> str:
95
- """
96
- Sync wrapper for acompletion.
97
-
98
- Args:
99
- query: User query (optional if query is in context)
100
- context: Context to process (optional, can pass query here)
101
- **kwargs: Additional LiteLLM parameters
102
-
103
- Returns:
104
- Final answer string
105
-
106
- Examples:
107
- # Standard usage
108
- rlm.completion(query="Summarize this", context=document)
109
-
110
- # Query in context (RLM will extract task)
111
- rlm.completion(context="Summarize this document: ...")
112
-
113
- # Single string (treat as context)
114
- rlm.completion("Process this text and extract dates")
115
- """
116
- # If only one argument provided, treat it as context
117
- if query and not context:
118
- context = query
119
- query = ""
120
-
121
- return asyncio.run(self.acompletion(query, context, **kwargs))
122
-
123
- async def acompletion(
124
- self,
125
- query: str = "",
126
- context: str = "",
127
- **kwargs: Any
128
- ) -> str:
129
- """
130
- Main async completion method.
131
-
132
- Args:
133
- query: User query (optional if query is in context)
134
- context: Context to process (optional, can pass query here)
135
- **kwargs: Additional LiteLLM parameters
136
-
137
- Returns:
138
- Final answer string
139
-
140
- Raises:
141
- MaxIterationsError: If max iterations exceeded
142
- MaxDepthError: If max recursion depth exceeded
143
-
144
- Examples:
145
- # Explicit query and context
146
- await rlm.acompletion(query="What is this?", context=doc)
147
-
148
- # Query embedded in context
149
- await rlm.acompletion(context="Extract all dates from: ...")
150
-
151
- # LLM will figure out the task
152
- await rlm.acompletion(context=document_with_instructions)
153
- """
154
- # If only query provided, treat it as context
155
- if query and not context:
156
- context = query
157
- query = ""
158
- if self._current_depth >= self.max_depth:
159
- raise MaxDepthError(f"Max recursion depth ({self.max_depth}) exceeded")
160
-
161
- # Initialize REPL environment
162
- repl_env = self._build_repl_env(query, context)
163
-
164
- # Build initial messages
165
- system_prompt = build_system_prompt(len(context), self._current_depth)
166
- messages: List[Message] = [
167
- {"role": "system", "content": system_prompt},
168
- {"role": "user", "content": query}
169
- ]
170
-
171
- # Main loop
172
- for iteration in range(self.max_iterations):
173
- self._iterations = iteration + 1
174
-
175
- # Call LLM
176
- response = await self._call_llm(messages, **kwargs)
177
-
178
- # Check for FINAL
179
- if is_final(response):
180
- answer = parse_response(response, repl_env)
181
- if answer is not None:
182
- return answer
183
-
184
- # Execute code in REPL
185
- try:
186
- exec_result = self.repl.execute(response, repl_env)
187
- except REPLError as e:
188
- exec_result = f"Error: {str(e)}"
189
- except Exception as e:
190
- exec_result = f"Unexpected error: {str(e)}"
191
-
192
- # Add to conversation
193
- messages.append({"role": "assistant", "content": response})
194
- messages.append({"role": "user", "content": exec_result})
195
-
196
- raise MaxIterationsError(
197
- f"Max iterations ({self.max_iterations}) exceeded without FINAL()"
198
- )
199
-
200
- async def _call_llm(
201
- self,
202
- messages: List[Message],
203
- **kwargs: Any
204
- ) -> str:
205
- """
206
- Call LLM API.
207
-
208
- Args:
209
- messages: Conversation messages
210
- **kwargs: Additional parameters (can override model here)
211
-
212
- Returns:
213
- LLM response text
214
- """
215
- self._llm_calls += 1
216
-
217
- # Choose model based on depth
218
- default_model = self.model if self._current_depth == 0 else self.recursive_model
219
-
220
- # Allow override via kwargs
221
- model = kwargs.pop('model', default_model)
222
-
223
- # Merge kwargs
224
- call_kwargs = {**self.llm_kwargs, **kwargs}
225
- if self.api_base:
226
- call_kwargs['api_base'] = self.api_base
227
- if self.api_key:
228
- call_kwargs['api_key'] = self.api_key
229
-
230
- # Call LiteLLM
231
- response = await litellm.acompletion(
232
- model=model,
233
- messages=messages,
234
- **call_kwargs
235
- )
236
-
237
- # Extract text
238
- return response.choices[0].message.content
239
-
240
- def _build_repl_env(self, query: str, context: str) -> Dict[str, Any]:
241
- """
242
- Build REPL environment.
243
-
244
- Args:
245
- query: User query
246
- context: Context string
247
-
248
- Returns:
249
- Environment dict
250
- """
251
- env: Dict[str, Any] = {
252
- 'context': context,
253
- 'query': query,
254
- 'recursive_llm': self._make_recursive_fn(),
255
- 're': re, # Whitelist re module
256
- }
257
- return env
258
-
259
- def _make_recursive_fn(self) -> Any:
260
- """
261
- Create recursive LLM function for REPL.
262
-
263
- Returns:
264
- Async function that can be called from REPL
265
- """
266
- async def recursive_llm(sub_query: str, sub_context: str) -> str:
267
- """
268
- Recursively process sub-context.
269
-
270
- Args:
271
- sub_query: Query for sub-context
272
- sub_context: Sub-context to process
273
-
274
- Returns:
275
- Answer from recursive call
276
- """
277
- if self._current_depth + 1 >= self.max_depth:
278
- return f"Max recursion depth ({self.max_depth}) reached"
279
-
280
- # Create sub-RLM with increased depth
281
- sub_rlm = RLM(
282
- model=self.recursive_model,
283
- recursive_model=self.recursive_model,
284
- api_base=self.api_base,
285
- api_key=self.api_key,
286
- max_depth=self.max_depth,
287
- max_iterations=self.max_iterations,
288
- _current_depth=self._current_depth + 1,
289
- **self.llm_kwargs
290
- )
291
-
292
- return await sub_rlm.acompletion(sub_query, sub_context)
293
-
294
- # Wrap in sync function for REPL compatibility
295
- def sync_recursive_llm(sub_query: str, sub_context: str) -> str:
296
- """Sync wrapper for recursive_llm."""
297
- # Check if we're in an async context
298
- try:
299
- loop = asyncio.get_running_loop()
300
- # We're in async context, but REPL is sync
301
- # Create a new thread to run async code
302
- import concurrent.futures
303
- with concurrent.futures.ThreadPoolExecutor() as executor:
304
- future = executor.submit(
305
- asyncio.run,
306
- recursive_llm(sub_query, sub_context)
307
- )
308
- return future.result()
309
- except RuntimeError:
310
- # No running loop, safe to use asyncio.run
311
- return asyncio.run(recursive_llm(sub_query, sub_context))
312
-
313
- return sync_recursive_llm
314
-
315
- @property
316
- def stats(self) -> Dict[str, int]:
317
- """Get execution statistics."""
318
- return {
319
- 'llm_calls': self._llm_calls,
320
- 'iterations': self._iterations,
321
- 'depth': self._current_depth,
322
- }
@@ -1,93 +0,0 @@
1
- """Parse FINAL() and FINAL_VAR() statements from LLM responses."""
2
-
3
- import re
4
- from typing import Optional, Dict, Any
5
-
6
-
7
- def extract_final(response: str) -> Optional[str]:
8
- """
9
- Extract answer from FINAL() statement.
10
-
11
- Args:
12
- response: LLM response text
13
-
14
- Returns:
15
- Extracted answer or None if not found
16
- """
17
- # Look for FINAL("answer") or FINAL('answer')
18
- patterns = [
19
- r'FINAL\s*\(\s*"""(.*)"""', # FINAL("""answer""") - triple double quotes
20
- r"FINAL\s*\(\s*'''(.*)'''", # FINAL('''answer''') - triple single quotes
21
- r'FINAL\s*\(\s*"([^"]*)"', # FINAL("answer") - double quotes
22
- r"FINAL\s*\(\s*'([^']*)'", # FINAL('answer') - single quotes
23
- ]
24
-
25
- for pattern in patterns:
26
- match = re.search(pattern, response, re.DOTALL)
27
- if match:
28
- return match.group(1).strip()
29
-
30
- return None
31
-
32
-
33
- def extract_final_var(response: str, env: Dict[str, Any]) -> Optional[str]:
34
- """
35
- Extract answer from FINAL_VAR() statement.
36
-
37
- Args:
38
- response: LLM response text
39
- env: REPL environment with variables
40
-
41
- Returns:
42
- Variable value as string or None if not found
43
- """
44
- # Look for FINAL_VAR(var_name)
45
- match = re.search(r'FINAL_VAR\s*\(\s*(\w+)\s*\)', response)
46
- if not match:
47
- return None
48
-
49
- var_name = match.group(1)
50
-
51
- # Get variable from environment
52
- if var_name in env:
53
- value = env[var_name]
54
- return str(value)
55
-
56
- return None
57
-
58
-
59
- def is_final(response: str) -> bool:
60
- """
61
- Check if response contains FINAL() or FINAL_VAR().
62
-
63
- Args:
64
- response: LLM response text
65
-
66
- Returns:
67
- True if response contains final statement
68
- """
69
- return 'FINAL(' in response or 'FINAL_VAR(' in response
70
-
71
-
72
- def parse_response(response: str, env: Dict[str, Any]) -> Optional[str]:
73
- """
74
- Parse response for any final statement.
75
-
76
- Args:
77
- response: LLM response text
78
- env: REPL environment
79
-
80
- Returns:
81
- Final answer or None
82
- """
83
- # Try FINAL() first
84
- answer = extract_final(response)
85
- if answer is not None:
86
- return answer
87
-
88
- # Try FINAL_VAR()
89
- answer = extract_final_var(response, env)
90
- if answer is not None:
91
- return answer
92
-
93
- return None