jasper-context-compactor 0.3.0 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/cli.js +23 -5
  2. package/package.json +4 -2
package/cli.js CHANGED
@@ -58,9 +58,9 @@ async function detectModelContextWindow(config) {
58
58
  'openai/gpt-4': 128000,
59
59
  'openai/gpt-4-turbo': 128000,
60
60
  'openai/gpt-3.5-turbo': 16000,
61
- 'mlx': 8000,
62
- 'ollama': 8000,
63
- 'llama': 8000,
61
+ 'mlx': 32000, // Most MLX models support 32K+
62
+ 'ollama': 32000, // Most Ollama models support 32K+
63
+ 'llama': 32000,
64
64
  'mistral': 32000,
65
65
  'qwen': 32000,
66
66
  };
@@ -165,7 +165,7 @@ async function setup() {
165
165
 
166
166
  const checkConfig = await prompt(' Check your config for model info? (y/n): ');
167
167
 
168
- let maxTokens = 8000;
168
+ let maxTokens = 16000; // OpenClaw minimum
169
169
  let detectedInfo = null;
170
170
 
171
171
  if (checkConfig.toLowerCase() === 'y' || checkConfig.toLowerCase() === 'yes') {
@@ -202,16 +202,34 @@ async function setup() {
202
202
  console.log(' • Mistral / Qwen (medium): 32,000');
203
203
  console.log(' • Claude / GPT-4 (large): 128,000+');
204
204
  console.log('');
205
+ console.log(' ⚠️ Minimum recommended: 16,000 tokens (OpenClaw requirement)');
206
+ console.log('');
205
207
  console.log(' Check your model\'s docs or LM Studio/Ollama settings.');
206
208
  console.log(' Config location: ~/.openclaw/openclaw.json');
207
209
  console.log('');
208
210
 
209
- const customTokens = await prompt(' Enter maxTokens (default 8000): ');
211
+ const customTokens = await prompt(' Enter maxTokens (default 16000, minimum 16000): ');
210
212
  if (/^\d+$/.test(customTokens)) {
211
213
  maxTokens = parseInt(customTokens, 10);
214
+ } else {
215
+ maxTokens = 16000;
212
216
  }
213
217
  }
214
218
 
219
+ // Enforce minimum
220
+ const MIN_TOKENS = 16000;
221
+ if (maxTokens < MIN_TOKENS) {
222
+ console.log('');
223
+ console.log(` ⚠️ Warning: ${maxTokens} tokens is below OpenClaw's minimum of ${MIN_TOKENS}.`);
224
+ console.log(` Increasing to ${MIN_TOKENS} to prevent agent failures.`);
225
+ console.log('');
226
+ console.log(' If your model truly has a smaller context window, consider:');
227
+ console.log(' • Using a larger model (Qwen 7B+ or Mistral 7B+)');
228
+ console.log(' • Using the cloud fallback for complex tasks');
229
+ console.log('');
230
+ maxTokens = MIN_TOKENS;
231
+ }
232
+
215
233
  // Calculate derived values
216
234
  const keepRecentTokens = Math.floor(maxTokens * 0.25);
217
235
  const summaryMaxTokens = Math.floor(maxTokens * 0.125);
package/package.json CHANGED
@@ -1,13 +1,15 @@
1
1
  {
2
2
  "name": "jasper-context-compactor",
3
- "version": "0.3.0",
3
+ "version": "0.3.1",
4
4
  "description": "Context compaction plugin for OpenClaw - works with local models (MLX, llama.cpp) that don't report token limits",
5
5
  "main": "index.ts",
6
6
  "bin": {
7
7
  "context-compactor": "./cli.js"
8
8
  },
9
9
  "openclaw": {
10
- "extensions": ["./index.ts"]
10
+ "extensions": [
11
+ "./index.ts"
12
+ ]
11
13
  },
12
14
  "keywords": [
13
15
  "openclaw",