jasper-context-compactor 0.3.0 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/cli.js +88 -29
  2. package/package.json +4 -2
package/cli.js CHANGED
@@ -48,30 +48,63 @@ function backupConfig() {
48
48
  }
49
49
 
50
50
  async function detectModelContextWindow(config) {
51
- const model = config?.agents?.defaults?.model?.primary;
52
- if (!model) return null;
51
+ const modelId = config?.agents?.defaults?.model?.primary;
52
+ if (!modelId) return null;
53
53
 
54
+ // Parse provider/model from the model ID (e.g., "anthropic/claude-opus-4-5")
55
+ const [providerName, ...modelParts] = modelId.split('/');
56
+ const modelName = modelParts.join('/');
57
+
58
+ // Look up in config's models.providers
59
+ const providers = config?.models?.providers || {};
60
+ const provider = providers[providerName];
61
+
62
+ if (provider?.models) {
63
+ // Find the model in the provider's models array
64
+ const modelConfig = provider.models.find(m =>
65
+ m.id === modelName || m.id === modelId
66
+ );
67
+
68
+ if (modelConfig?.contextWindow) {
69
+ return {
70
+ model: modelId,
71
+ tokens: modelConfig.contextWindow,
72
+ source: 'config',
73
+ maxTokens: modelConfig.maxTokens
74
+ };
75
+ }
76
+ }
77
+
78
+ // Fallback: check for ollama models with contextWindow in the config
79
+ for (const [pName, pConfig] of Object.entries(providers)) {
80
+ if (pConfig?.models) {
81
+ for (const m of pConfig.models) {
82
+ if (m.id && modelId.includes(m.id) && m.contextWindow) {
83
+ return {
84
+ model: modelId,
85
+ tokens: m.contextWindow,
86
+ source: 'config',
87
+ maxTokens: m.maxTokens
88
+ };
89
+ }
90
+ }
91
+ }
92
+ }
93
+
94
+ // Final fallback: known defaults for common model families
54
95
  const knownContexts = {
55
- 'anthropic/claude-opus': 200000,
56
- 'anthropic/claude-sonnet': 200000,
57
- 'anthropic/claude-haiku': 200000,
96
+ 'anthropic/claude': 200000,
58
97
  'openai/gpt-4': 128000,
59
- 'openai/gpt-4-turbo': 128000,
60
- 'openai/gpt-3.5-turbo': 16000,
61
- 'mlx': 8000,
62
- 'ollama': 8000,
63
- 'llama': 8000,
64
- 'mistral': 32000,
65
- 'qwen': 32000,
98
+ 'openai/gpt-3.5': 16000,
66
99
  };
67
100
 
68
101
  for (const [pattern, tokens] of Object.entries(knownContexts)) {
69
- if (model.toLowerCase().includes(pattern.toLowerCase())) {
70
- return { model, tokens, source: 'detected' };
102
+ if (modelId.toLowerCase().includes(pattern.toLowerCase())) {
103
+ return { model: modelId, tokens, source: 'fallback' };
71
104
  }
72
105
  }
73
106
 
74
- return { model, tokens: null, source: 'unknown' };
107
+ return { model: modelId, tokens: null, source: 'unknown' };
75
108
  }
76
109
 
77
110
  async function setup() {
@@ -165,7 +198,7 @@ async function setup() {
165
198
 
166
199
  const checkConfig = await prompt(' Check your config for model info? (y/n): ');
167
200
 
168
- let maxTokens = 8000;
201
+ let maxTokens = 16000; // OpenClaw minimum
169
202
  let detectedInfo = null;
170
203
 
171
204
  if (checkConfig.toLowerCase() === 'y' || checkConfig.toLowerCase() === 'yes') {
@@ -174,44 +207,70 @@ async function setup() {
174
207
  if (detectedInfo && detectedInfo.tokens) {
175
208
  console.log('');
176
209
  console.log(` ✓ Detected model: ${detectedInfo.model}`);
177
- console.log(` ✓ Context window: ~${detectedInfo.tokens.toLocaleString()} tokens`);
210
+ console.log(` ✓ Context window: ${detectedInfo.tokens.toLocaleString()} tokens (from ${detectedInfo.source})`);
178
211
 
179
- const suggested = Math.floor(detectedInfo.tokens * 0.8);
180
- console.log(` → Suggested maxTokens: ${suggested.toLocaleString()} (80% of context)`);
212
+ // Use the actual contextWindow, apply minimum
213
+ let suggested = detectedInfo.tokens;
214
+ if (suggested < 16000) {
215
+ console.log(` ⚠ Model context (${suggested}) is below OpenClaw minimum (16000)`);
216
+ console.log(` → Will use 16,000 tokens to prevent agent failures`);
217
+ suggested = 16000;
218
+ }
181
219
  console.log('');
182
220
 
183
221
  const useDetected = await prompt(` Use ${suggested.toLocaleString()} tokens? (y/n, or enter custom): `);
184
222
 
185
- if (useDetected.toLowerCase() === 'y' || useDetected.toLowerCase() === 'yes') {
223
+ if (useDetected.toLowerCase() === 'y' || useDetected.toLowerCase() === 'yes' || useDetected === '') {
186
224
  maxTokens = suggested;
187
225
  } else if (/^\d+$/.test(useDetected)) {
188
226
  maxTokens = parseInt(useDetected, 10);
227
+ } else {
228
+ maxTokens = suggested; // Default to suggested on invalid input
189
229
  }
190
230
  } else if (detectedInfo && detectedInfo.model) {
191
231
  console.log('');
192
232
  console.log(` ⚠ Found model: ${detectedInfo.model}`);
193
- console.log(' ⚠ Could not determine context window automatically.');
233
+ console.log(' ⚠ No contextWindow defined in your config for this model.');
234
+ console.log(' 💡 Add contextWindow to your model config in openclaw.json');
194
235
  }
195
236
  }
196
237
 
197
- // Manual entry if needed
198
- if (maxTokens === 8000 && (!detectedInfo || !detectedInfo.tokens)) {
238
+ // Manual entry if no context window was detected or user declined
239
+ if (!detectedInfo?.tokens || maxTokens === 16000) {
240
+ console.log('');
241
+ console.log(' Could not auto-detect context window from your config.');
199
242
  console.log('');
200
243
  console.log(' Common context windows:');
201
- console.log(' • MLX / llama.cpp (small): 4,000 - 8,000');
202
- console.log(' • Mistral / Qwen (medium): 32,000');
203
- console.log(' • Claude / GPT-4 (large): 128,000+');
244
+ console.log(' • Ollama / llama.cpp (small): 8,000 - 16,000');
245
+ console.log(' • Mistral / Qwen (medium): 32,000');
246
+ console.log(' • Claude / GPT-4 (large): 128,000+');
204
247
  console.log('');
205
- console.log(' Check your model\'s docs or LM Studio/Ollama settings.');
206
- console.log(' Config location: ~/.openclaw/openclaw.json');
248
+ console.log(' 💡 Tip: Check your model config in ~/.openclaw/openclaw.json');
249
+ console.log(' Look for: models.providers.<provider>.models[].contextWindow');
250
+ console.log('');
251
+ console.log(' ⚠️ Minimum: 16,000 tokens (OpenClaw requirement)');
207
252
  console.log('');
208
253
 
209
- const customTokens = await prompt(' Enter maxTokens (default 8000): ');
254
+ const customTokens = await prompt(' Enter maxTokens (default 16000, minimum 16000): ');
210
255
  if (/^\d+$/.test(customTokens)) {
211
256
  maxTokens = parseInt(customTokens, 10);
212
257
  }
213
258
  }
214
259
 
260
+ // Enforce minimum
261
+ const MIN_TOKENS = 16000;
262
+ if (maxTokens < MIN_TOKENS) {
263
+ console.log('');
264
+ console.log(` ⚠️ Warning: ${maxTokens} tokens is below OpenClaw's minimum of ${MIN_TOKENS}.`);
265
+ console.log(` Increasing to ${MIN_TOKENS} to prevent agent failures.`);
266
+ console.log('');
267
+ console.log(' If your model truly has a smaller context window, consider:');
268
+ console.log(' • Using a larger model (Qwen 7B+ or Mistral 7B+)');
269
+ console.log(' • Using the cloud fallback for complex tasks');
270
+ console.log('');
271
+ maxTokens = MIN_TOKENS;
272
+ }
273
+
215
274
  // Calculate derived values
216
275
  const keepRecentTokens = Math.floor(maxTokens * 0.25);
217
276
  const summaryMaxTokens = Math.floor(maxTokens * 0.125);
package/package.json CHANGED
@@ -1,13 +1,15 @@
1
1
  {
2
2
  "name": "jasper-context-compactor",
3
- "version": "0.3.0",
3
+ "version": "0.3.2",
4
4
  "description": "Context compaction plugin for OpenClaw - works with local models (MLX, llama.cpp) that don't report token limits",
5
5
  "main": "index.ts",
6
6
  "bin": {
7
7
  "context-compactor": "./cli.js"
8
8
  },
9
9
  "openclaw": {
10
- "extensions": ["./index.ts"]
10
+ "extensions": [
11
+ "./index.ts"
12
+ ]
11
13
  },
12
14
  "keywords": [
13
15
  "openclaw",