jasper-context-compactor 0.3.1 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/cli.js +71 -30
  2. package/package.json +1 -1
package/cli.js CHANGED
@@ -48,30 +48,63 @@ function backupConfig() {
48
48
  }
49
49
 
50
50
  async function detectModelContextWindow(config) {
51
- const model = config?.agents?.defaults?.model?.primary;
52
- if (!model) return null;
51
+ const modelId = config?.agents?.defaults?.model?.primary;
52
+ if (!modelId) return null;
53
53
 
54
+ // Parse provider/model from the model ID (e.g., "anthropic/claude-opus-4-5")
55
+ const [providerName, ...modelParts] = modelId.split('/');
56
+ const modelName = modelParts.join('/');
57
+
58
+ // Look up in config's models.providers
59
+ const providers = config?.models?.providers || {};
60
+ const provider = providers[providerName];
61
+
62
+ if (provider?.models) {
63
+ // Find the model in the provider's models array
64
+ const modelConfig = provider.models.find(m =>
65
+ m.id === modelName || m.id === modelId
66
+ );
67
+
68
+ if (modelConfig?.contextWindow) {
69
+ return {
70
+ model: modelId,
71
+ tokens: modelConfig.contextWindow,
72
+ source: 'config',
73
+ maxTokens: modelConfig.maxTokens
74
+ };
75
+ }
76
+ }
77
+
78
+ // Fallback: check for ollama models with contextWindow in the config
79
+ for (const [pName, pConfig] of Object.entries(providers)) {
80
+ if (pConfig?.models) {
81
+ for (const m of pConfig.models) {
82
+ if (m.id && modelId.includes(m.id) && m.contextWindow) {
83
+ return {
84
+ model: modelId,
85
+ tokens: m.contextWindow,
86
+ source: 'config',
87
+ maxTokens: m.maxTokens
88
+ };
89
+ }
90
+ }
91
+ }
92
+ }
93
+
94
+ // Final fallback: known defaults for common model families
54
95
  const knownContexts = {
55
- 'anthropic/claude-opus': 200000,
56
- 'anthropic/claude-sonnet': 200000,
57
- 'anthropic/claude-haiku': 200000,
96
+ 'anthropic/claude': 200000,
58
97
  'openai/gpt-4': 128000,
59
- 'openai/gpt-4-turbo': 128000,
60
- 'openai/gpt-3.5-turbo': 16000,
61
- 'mlx': 32000, // Most MLX models support 32K+
62
- 'ollama': 32000, // Most Ollama models support 32K+
63
- 'llama': 32000,
64
- 'mistral': 32000,
65
- 'qwen': 32000,
98
+ 'openai/gpt-3.5': 16000,
66
99
  };
67
100
 
68
101
  for (const [pattern, tokens] of Object.entries(knownContexts)) {
69
- if (model.toLowerCase().includes(pattern.toLowerCase())) {
70
- return { model, tokens, source: 'detected' };
102
+ if (modelId.toLowerCase().includes(pattern.toLowerCase())) {
103
+ return { model: modelId, tokens, source: 'fallback' };
71
104
  }
72
105
  }
73
106
 
74
- return { model, tokens: null, source: 'unknown' };
107
+ return { model: modelId, tokens: null, source: 'unknown' };
75
108
  }
76
109
 
77
110
  async function setup() {
@@ -174,45 +207,53 @@ async function setup() {
174
207
  if (detectedInfo && detectedInfo.tokens) {
175
208
  console.log('');
176
209
  console.log(` ✓ Detected model: ${detectedInfo.model}`);
177
- console.log(` ✓ Context window: ~${detectedInfo.tokens.toLocaleString()} tokens`);
210
+ console.log(` ✓ Context window: ${detectedInfo.tokens.toLocaleString()} tokens (from ${detectedInfo.source})`);
178
211
 
179
- const suggested = Math.floor(detectedInfo.tokens * 0.8);
180
- console.log(` → Suggested maxTokens: ${suggested.toLocaleString()} (80% of context)`);
212
+ // Use the actual contextWindow, apply minimum
213
+ let suggested = detectedInfo.tokens;
214
+ if (suggested < 16000) {
215
+ console.log(` ⚠ Model context (${suggested}) is below OpenClaw minimum (16000)`);
216
+ console.log(` → Will use 16,000 tokens to prevent agent failures`);
217
+ suggested = 16000;
218
+ }
181
219
  console.log('');
182
220
 
183
221
  const useDetected = await prompt(` Use ${suggested.toLocaleString()} tokens? (y/n, or enter custom): `);
184
222
 
185
- if (useDetected.toLowerCase() === 'y' || useDetected.toLowerCase() === 'yes') {
223
+ if (useDetected.toLowerCase() === 'y' || useDetected.toLowerCase() === 'yes' || useDetected === '') {
186
224
  maxTokens = suggested;
187
225
  } else if (/^\d+$/.test(useDetected)) {
188
226
  maxTokens = parseInt(useDetected, 10);
227
+ } else {
228
+ maxTokens = suggested; // Default to suggested on invalid input
189
229
  }
190
230
  } else if (detectedInfo && detectedInfo.model) {
191
231
  console.log('');
192
232
  console.log(` ⚠ Found model: ${detectedInfo.model}`);
193
- console.log(' ⚠ Could not determine context window automatically.');
233
+ console.log(' ⚠ No contextWindow defined in your config for this model.');
234
+ console.log(' 💡 Add contextWindow to your model config in openclaw.json');
194
235
  }
195
236
  }
196
237
 
197
- // Manual entry if needed
198
- if (maxTokens === 8000 && (!detectedInfo || !detectedInfo.tokens)) {
238
+ // Manual entry if no context window was detected or user declined
239
+ if (!detectedInfo?.tokens || maxTokens === 16000) {
240
+ console.log('');
241
+ console.log(' Could not auto-detect context window from your config.');
199
242
  console.log('');
200
243
  console.log(' Common context windows:');
201
- console.log(' • MLX / llama.cpp (small): 4,000 - 8,000');
202
- console.log(' • Mistral / Qwen (medium): 32,000');
203
- console.log(' • Claude / GPT-4 (large): 128,000+');
244
+ console.log(' • Ollama / llama.cpp (small): 8,000 - 16,000');
245
+ console.log(' • Mistral / Qwen (medium): 32,000');
246
+ console.log(' • Claude / GPT-4 (large): 128,000+');
204
247
  console.log('');
205
- console.log(' ⚠️ Minimum recommended: 16,000 tokens (OpenClaw requirement)');
248
+ console.log(' 💡 Tip: Check your model config in ~/.openclaw/openclaw.json');
249
+ console.log(' Look for: models.providers.<provider>.models[].contextWindow');
206
250
  console.log('');
207
- console.log(' Check your model\'s docs or LM Studio/Ollama settings.');
208
- console.log(' Config location: ~/.openclaw/openclaw.json');
251
+ console.log(' ⚠️ Minimum: 16,000 tokens (OpenClaw requirement)');
209
252
  console.log('');
210
253
 
211
254
  const customTokens = await prompt(' Enter maxTokens (default 16000, minimum 16000): ');
212
255
  if (/^\d+$/.test(customTokens)) {
213
256
  maxTokens = parseInt(customTokens, 10);
214
- } else {
215
- maxTokens = 16000;
216
257
  }
217
258
  }
218
259
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "jasper-context-compactor",
3
- "version": "0.3.1",
3
+ "version": "0.3.2",
4
4
  "description": "Context compaction plugin for OpenClaw - works with local models (MLX, llama.cpp) that don't report token limits",
5
5
  "main": "index.ts",
6
6
  "bin": {