jasper-context-compactor 0.3.4 → 0.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +14 -3
- package/cli.js +90 -44
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -25,9 +25,20 @@ npx jasper-context-compactor setup
|
|
|
25
25
|
|
|
26
26
|
1. ✅ **Back up your config** — Saves `openclaw.json` to `~/.openclaw/backups/` with restore instructions
|
|
27
27
|
2. ✅ **Ask permission** — Won't read your config without consent
|
|
28
|
-
3. ✅ **Detect
|
|
29
|
-
4. ✅ **
|
|
30
|
-
5. ✅ **
|
|
28
|
+
3. ✅ **Detect local models** — Automatically identifies Ollama, llama.cpp, MLX, LM Studio providers
|
|
29
|
+
4. ✅ **Suggest token limits** — Based on your model's contextWindow from config
|
|
30
|
+
5. ✅ **Let you customize** — Enter your own values if auto-detection doesn't match
|
|
31
|
+
6. ✅ **Update config safely** — Adds the plugin with your chosen settings
|
|
32
|
+
|
|
33
|
+
### Supported Local Providers
|
|
34
|
+
|
|
35
|
+
The setup automatically detects these providers (primary or fallback):
|
|
36
|
+
- **Ollama** — `/ollama` endpoint
|
|
37
|
+
- **llama.cpp** — llamacpp provider
|
|
38
|
+
- **MLX** — mlx provider
|
|
39
|
+
- **LM Studio** — lmstudio provider
|
|
40
|
+
- **friend-gpu** — Custom GPU servers
|
|
41
|
+
- **OpenRouter** — When routing to local models
|
|
31
42
|
|
|
32
43
|
Then restart OpenClaw:
|
|
33
44
|
```bash
|
package/cli.js
CHANGED
|
@@ -47,58 +47,83 @@ function backupConfig() {
|
|
|
47
47
|
return backupPath;
|
|
48
48
|
}
|
|
49
49
|
|
|
50
|
+
// Local model providers that benefit from context compaction
|
|
51
|
+
const LOCAL_PROVIDERS = ['ollama', 'lmstudio', 'llamacpp', 'mlx', 'friend-gpu', 'openrouter'];
|
|
52
|
+
|
|
53
|
+
function isLocalProvider(providerId) {
|
|
54
|
+
if (!providerId) return false;
|
|
55
|
+
const lower = providerId.toLowerCase();
|
|
56
|
+
return LOCAL_PROVIDERS.some(p => lower.includes(p));
|
|
57
|
+
}
|
|
58
|
+
|
|
50
59
|
async function detectModelContextWindow(config) {
|
|
51
|
-
const
|
|
52
|
-
if (!
|
|
60
|
+
const modelConfig = config?.agents?.defaults?.model;
|
|
61
|
+
if (!modelConfig) return null;
|
|
53
62
|
|
|
54
|
-
// Parse provider/model from the model ID (e.g., "ollama/qwen2.5")
|
|
55
|
-
const [providerName, ...modelParts] = modelId.split('/');
|
|
56
|
-
const modelName = modelParts.join('/'); // e.g., "qwen2.5"
|
|
57
|
-
|
|
58
|
-
// Look up in config's models.providers
|
|
59
63
|
const providers = config?.models?.providers || {};
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
64
|
+
|
|
65
|
+
// Collect all model candidates: primary first, then fallbacks
|
|
66
|
+
const candidates = [];
|
|
67
|
+
if (modelConfig.primary) candidates.push(modelConfig.primary);
|
|
68
|
+
if (modelConfig.fallbacks) candidates.push(...modelConfig.fallbacks);
|
|
69
|
+
|
|
70
|
+
// Track if any local models are in the chain
|
|
71
|
+
let hasLocalModel = false;
|
|
72
|
+
let localModelInfo = null;
|
|
73
|
+
|
|
74
|
+
// Find the first candidate that has a contextWindow defined in its provider
|
|
75
|
+
for (const modelId of candidates) {
|
|
76
|
+
if (!modelId.includes('/')) continue; // Skip if no provider prefix
|
|
77
|
+
|
|
78
|
+
const [providerName, ...modelParts] = modelId.split('/');
|
|
79
|
+
const modelName = modelParts.join('/'); // e.g., "qwen2.5"
|
|
70
80
|
|
|
71
|
-
if
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
81
|
+
// Check if this is a local provider
|
|
82
|
+
if (isLocalProvider(providerName)) {
|
|
83
|
+
hasLocalModel = true;
|
|
84
|
+
|
|
85
|
+
const provider = providers[providerName];
|
|
86
|
+
const found = provider?.models?.find(m => m.id === modelName);
|
|
87
|
+
|
|
88
|
+
if (!localModelInfo || found?.contextWindow) {
|
|
89
|
+
localModelInfo = {
|
|
90
|
+
model: modelId,
|
|
91
|
+
tokens: found?.contextWindow || null,
|
|
92
|
+
source: found?.contextWindow ? 'config' : 'unknown',
|
|
93
|
+
maxTokens: found?.maxTokens,
|
|
94
|
+
isLocal: true
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
const provider = providers[providerName];
|
|
100
|
+
if (!provider?.models) continue;
|
|
101
|
+
|
|
102
|
+
// Find model by ID in this provider's models array
|
|
103
|
+
const found = provider.models.find(m => m.id === modelName);
|
|
104
|
+
|
|
105
|
+
if (found?.contextWindow) {
|
|
106
|
+
return {
|
|
107
|
+
model: modelId,
|
|
108
|
+
tokens: found.contextWindow,
|
|
75
109
|
source: 'config',
|
|
76
|
-
maxTokens:
|
|
110
|
+
maxTokens: found.maxTokens,
|
|
111
|
+
hasLocalModel,
|
|
112
|
+
localModelInfo
|
|
77
113
|
};
|
|
78
114
|
}
|
|
79
115
|
}
|
|
80
116
|
|
|
81
|
-
//
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
if (m.id === modelName || modelName.includes(m.id) || m.id.includes(modelName)) {
|
|
88
|
-
if (m.contextWindow) {
|
|
89
|
-
return {
|
|
90
|
-
model: modelId,
|
|
91
|
-
tokens: m.contextWindow,
|
|
92
|
-
source: 'config',
|
|
93
|
-
maxTokens: m.maxTokens
|
|
94
|
-
};
|
|
95
|
-
}
|
|
96
|
-
}
|
|
97
|
-
}
|
|
98
|
-
}
|
|
117
|
+
// If we have local model info but no contextWindow from config, return that
|
|
118
|
+
if (localModelInfo) {
|
|
119
|
+
return {
|
|
120
|
+
...localModelInfo,
|
|
121
|
+
hasLocalModel: true
|
|
122
|
+
};
|
|
99
123
|
}
|
|
100
124
|
|
|
101
|
-
//
|
|
125
|
+
// No contextWindow found in config - try known defaults
|
|
126
|
+
const primaryId = modelConfig.primary || '';
|
|
102
127
|
const knownContexts = {
|
|
103
128
|
'anthropic/claude': 200000,
|
|
104
129
|
'openai/gpt-4': 128000,
|
|
@@ -106,12 +131,12 @@ async function detectModelContextWindow(config) {
|
|
|
106
131
|
};
|
|
107
132
|
|
|
108
133
|
for (const [pattern, tokens] of Object.entries(knownContexts)) {
|
|
109
|
-
if (
|
|
110
|
-
return { model:
|
|
134
|
+
if (primaryId.toLowerCase().includes(pattern.toLowerCase())) {
|
|
135
|
+
return { model: primaryId, tokens, source: 'fallback', hasLocalModel };
|
|
111
136
|
}
|
|
112
137
|
}
|
|
113
138
|
|
|
114
|
-
return { model:
|
|
139
|
+
return { model: primaryId, tokens: null, source: 'unknown', hasLocalModel };
|
|
115
140
|
}
|
|
116
141
|
|
|
117
142
|
async function setup() {
|
|
@@ -216,6 +241,27 @@ async function setup() {
|
|
|
216
241
|
console.log(' [DEBUG] detectedInfo:', JSON.stringify(detectedInfo, null, 2));
|
|
217
242
|
}
|
|
218
243
|
|
|
244
|
+
// Show local model recommendation
|
|
245
|
+
if (detectedInfo?.hasLocalModel || detectedInfo?.isLocal) {
|
|
246
|
+
console.log('');
|
|
247
|
+
console.log(' 🎯 Local model detected in your config!');
|
|
248
|
+
const localModel = detectedInfo.localModelInfo?.model || detectedInfo.model;
|
|
249
|
+
console.log(` → ${localModel}`);
|
|
250
|
+
console.log('');
|
|
251
|
+
console.log(' Local models (Ollama, llama.cpp, MLX, LM Studio) don\'t report');
|
|
252
|
+
console.log(' context overflow errors — they silently truncate or produce garbage.');
|
|
253
|
+
console.log(' This plugin is HIGHLY recommended for your setup.');
|
|
254
|
+
} else if (detectedInfo?.model) {
|
|
255
|
+
// Cloud-only config
|
|
256
|
+
const providerName = detectedInfo.model.split('/')[0] || '';
|
|
257
|
+
if (['anthropic', 'openai', 'google'].includes(providerName.toLowerCase())) {
|
|
258
|
+
console.log('');
|
|
259
|
+
console.log(' ℹ️ Cloud-only config detected (no local models).');
|
|
260
|
+
console.log(' Cloud APIs report context limits properly, so this plugin');
|
|
261
|
+
console.log(' is less critical — but can still help with token costs.');
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
|
|
219
265
|
if (detectedInfo && detectedInfo.tokens) {
|
|
220
266
|
console.log('');
|
|
221
267
|
console.log(` ✓ Detected model: ${detectedInfo.model}`);
|
package/package.json
CHANGED