recursive-llm-ts 1.0.3 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +19 -0
- package/dist/rlm-bridge.d.ts +1 -0
- package/dist/rlm-bridge.js +19 -4
- package/package.json +1 -1
- package/recursive-llm/src/rlm/core.py +7 -24
package/README.md
CHANGED
|
@@ -87,6 +87,7 @@ interface RLMConfig {
|
|
|
87
87
|
// Execution limits
|
|
88
88
|
max_depth?: number; // Maximum recursion depth (default: 5)
|
|
89
89
|
max_iterations?: number; // Maximum REPL iterations per call (default: 30)
|
|
90
|
+
pythonia_timeout?: number; // Python bridge timeout in ms (default: 100000ms = 100s)
|
|
90
91
|
|
|
91
92
|
// LiteLLM parameters - pass any additional parameters supported by LiteLLM
|
|
92
93
|
api_version?: string; // API version (e.g., for Azure)
|
|
@@ -174,6 +175,24 @@ const rlm = new RLM('openai/your-model', {
|
|
|
174
175
|
});
|
|
175
176
|
```
|
|
176
177
|
|
|
178
|
+
### Long-Running Processes
|
|
179
|
+
|
|
180
|
+
For large documents or queue-based processing that may take longer than the default 100s timeout:
|
|
181
|
+
|
|
182
|
+
```typescript
|
|
183
|
+
const rlm = new RLM('gpt-4o-mini', {
|
|
184
|
+
max_iterations: 50, // Allow more iterations for complex processing
|
|
185
|
+
pythonia_timeout: 600000, // 10 minutes timeout for Python bridge
|
|
186
|
+
timeout: 300 // 5 minutes timeout for LLM API calls
|
|
187
|
+
});
|
|
188
|
+
|
|
189
|
+
// Process very large document
|
|
190
|
+
const result = await rlm.completion(
|
|
191
|
+
'Summarize all key points from this document',
|
|
192
|
+
veryLargeDocument
|
|
193
|
+
);
|
|
194
|
+
```
|
|
195
|
+
|
|
177
196
|
### Other Providers
|
|
178
197
|
|
|
179
198
|
See the [LiteLLM documentation](https://docs.litellm.ai/docs/providers) for the complete list of supported providers and their configuration.
|
package/dist/rlm-bridge.d.ts
CHANGED
package/dist/rlm-bridge.js
CHANGED
|
@@ -41,6 +41,17 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
41
41
|
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
42
42
|
});
|
|
43
43
|
};
|
|
44
|
+
var __rest = (this && this.__rest) || function (s, e) {
|
|
45
|
+
var t = {};
|
|
46
|
+
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
|
|
47
|
+
t[p] = s[p];
|
|
48
|
+
if (s != null && typeof Object.getOwnPropertySymbols === "function")
|
|
49
|
+
for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
|
|
50
|
+
if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
|
|
51
|
+
t[p[i]] = s[p[i]];
|
|
52
|
+
}
|
|
53
|
+
return t;
|
|
54
|
+
};
|
|
44
55
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
45
56
|
exports.RLMBridge = void 0;
|
|
46
57
|
const pythonia_1 = require("pythonia");
|
|
@@ -67,11 +78,15 @@ class RLMBridge {
|
|
|
67
78
|
return __awaiter(this, arguments, void 0, function* (model, query, context, rlmConfig = {}) {
|
|
68
79
|
yield this.ensureRLMModule();
|
|
69
80
|
try {
|
|
70
|
-
//
|
|
81
|
+
// Extract pythonia timeout (default: 100000ms)
|
|
82
|
+
const pythoniaTimeout = rlmConfig.pythonia_timeout || 100000;
|
|
83
|
+
// Remove pythonia_timeout from config passed to Python
|
|
84
|
+
const { pythonia_timeout } = rlmConfig, pythonConfig = __rest(rlmConfig, ["pythonia_timeout"]);
|
|
85
|
+
// Create RLM instance with config, passing timeout to pythonia
|
|
71
86
|
const RLMClass = yield this.rlmModule.RLM;
|
|
72
|
-
const rlmInstance = yield RLMClass(model,
|
|
73
|
-
// Call completion method
|
|
74
|
-
const result = yield rlmInstance.completion(query, context);
|
|
87
|
+
const rlmInstance = yield RLMClass(model, Object.assign(Object.assign({}, pythonConfig), { $timeout: pythoniaTimeout }));
|
|
88
|
+
// Call completion method with timeout
|
|
89
|
+
const result = yield rlmInstance.completion(query, context, { $timeout: pythoniaTimeout });
|
|
75
90
|
const stats = yield rlmInstance.stats;
|
|
76
91
|
// Convert Python stats dict to JS object
|
|
77
92
|
const statsObj = {
|
package/package.json
CHANGED
|
@@ -54,31 +54,14 @@ class RLM:
|
|
|
54
54
|
_current_depth: Internal current depth tracker
|
|
55
55
|
**llm_kwargs: Additional LiteLLM parameters
|
|
56
56
|
"""
|
|
57
|
-
# Patch for recursive-llm-ts bug where config is passed as 2nd positional arg
|
|
58
|
-
if isinstance(recursive_model, dict):
|
|
59
|
-
config = recursive_model
|
|
60
|
-
# Reset recursive_model default
|
|
61
|
-
self.recursive_model = config.get('recursive_model', model)
|
|
62
|
-
self.api_base = config.get('api_base', api_base)
|
|
63
|
-
self.api_key = config.get('api_key', api_key)
|
|
64
|
-
self.max_depth = config.get('max_depth', max_depth)
|
|
65
|
-
self.max_iterations = config.get('max_iterations', max_iterations)
|
|
66
|
-
|
|
67
|
-
# Extract other llm kwargs
|
|
68
|
-
excluded = {'recursive_model', 'api_base', 'api_key', 'max_depth', 'max_iterations'}
|
|
69
|
-
self.llm_kwargs = {k: v for k, v in config.items() if k not in excluded}
|
|
70
|
-
# Merge with any actual kwargs passed
|
|
71
|
-
self.llm_kwargs.update(llm_kwargs)
|
|
72
|
-
else:
|
|
73
|
-
self.recursive_model = recursive_model or model
|
|
74
|
-
self.api_base = api_base
|
|
75
|
-
self.api_key = api_key
|
|
76
|
-
self.max_depth = max_depth
|
|
77
|
-
self.max_iterations = max_iterations
|
|
78
|
-
self.llm_kwargs = llm_kwargs
|
|
79
|
-
|
|
80
|
-
self._current_depth = _current_depth
|
|
81
57
|
self.model = model
|
|
58
|
+
self.recursive_model = recursive_model or model
|
|
59
|
+
self.api_base = api_base
|
|
60
|
+
self.api_key = api_key
|
|
61
|
+
self.max_depth = max_depth
|
|
62
|
+
self.max_iterations = max_iterations
|
|
63
|
+
self._current_depth = _current_depth
|
|
64
|
+
self.llm_kwargs = llm_kwargs
|
|
82
65
|
|
|
83
66
|
self.repl = REPLExecutor()
|
|
84
67
|
|