jasper-context-compactor 0.3.5 → 0.3.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +14 -3
  2. package/cli.js +86 -3
  3. package/package.json +1 -1
package/README.md CHANGED
@@ -25,9 +25,20 @@ npx jasper-context-compactor setup
25
25
 
26
26
  1. ✅ **Back up your config** — Saves `openclaw.json` to `~/.openclaw/backups/` with restore instructions
27
27
  2. ✅ **Ask permission** — Won't read your config without consent
28
- 3. ✅ **Detect your model** — Suggests appropriate token limits based on your setup
29
- 4. ✅ **Let you customize** — Enter your own values if auto-detection doesn't match
30
- 5. ✅ **Update config safely** — Adds the plugin with your chosen settings
28
+ 3. ✅ **Detect local models** — Automatically identifies Ollama, llama.cpp, MLX, LM Studio providers
29
+ 4. ✅ **Suggest token limits** — Based on your model's contextWindow from config
30
+ 5. ✅ **Let you customize** — Enter your own values if auto-detection doesn't match
31
+ 6. ✅ **Update config safely** — Adds the plugin with your chosen settings
32
+
33
+ ### Supported Local Providers
34
+
35
+ The setup automatically detects these providers (primary or fallback):
36
+ - **Ollama** — `/ollama` endpoint
37
+ - **llama.cpp** — llamacpp provider
38
+ - **MLX** — mlx provider
39
+ - **LM Studio** — lmstudio provider
40
+ - **friend-gpu** — Custom GPU servers
41
+ - **OpenRouter** — When routing to local models
31
42
 
32
43
  Then restart OpenClaw:
33
44
  ```bash
package/cli.js CHANGED
@@ -47,6 +47,45 @@ function backupConfig() {
47
47
  return backupPath;
48
48
  }
49
49
 
50
+ // Local model providers that benefit from context compaction
51
+ const LOCAL_PROVIDER_NAMES = ['ollama', 'lmstudio', 'llamacpp', 'mlx', 'friend-gpu', 'openrouter'];
52
+
53
+ // URLs that indicate local/Ollama endpoints
54
+ const LOCAL_URL_PATTERNS = [
55
+ ':11434', // Ollama default port
56
+ 'localhost',
57
+ '127.0.0.1',
58
+ '0.0.0.0',
59
+ /100\.\d+\.\d+\.\d+/, // Tailscale
60
+ /192\.168\.\d+\.\d+/, // Local network
61
+ /10\.\d+\.\d+\.\d+/, // Private network
62
+ ];
63
+
64
+ function isLocalProvider(providerId, providers = {}) {
65
+ if (!providerId) return false;
66
+ const lower = providerId.toLowerCase();
67
+
68
+ // Check provider name
69
+ if (LOCAL_PROVIDER_NAMES.some(p => lower.includes(p))) {
70
+ return true;
71
+ }
72
+
73
+ // Check provider's baseUrl for local patterns
74
+ const provider = providers[providerId];
75
+ if (provider?.baseUrl) {
76
+ const url = provider.baseUrl.toLowerCase();
77
+ for (const pattern of LOCAL_URL_PATTERNS) {
78
+ if (pattern instanceof RegExp) {
79
+ if (pattern.test(url)) return true;
80
+ } else {
81
+ if (url.includes(pattern)) return true;
82
+ }
83
+ }
84
+ }
85
+
86
+ return false;
87
+ }
88
+
50
89
  async function detectModelContextWindow(config) {
51
90
  const modelConfig = config?.agents?.defaults?.model;
52
91
  if (!modelConfig) return null;
@@ -58,6 +97,10 @@ async function detectModelContextWindow(config) {
58
97
  if (modelConfig.primary) candidates.push(modelConfig.primary);
59
98
  if (modelConfig.fallbacks) candidates.push(...modelConfig.fallbacks);
60
99
 
100
+ // Track if any local models are in the chain
101
+ let hasLocalModel = false;
102
+ let localModelInfo = null;
103
+
61
104
  // Find the first candidate that has a contextWindow defined in its provider
62
105
  for (const modelId of candidates) {
63
106
  if (!modelId.includes('/')) continue; // Skip if no provider prefix
@@ -65,6 +108,24 @@ async function detectModelContextWindow(config) {
65
108
  const [providerName, ...modelParts] = modelId.split('/');
66
109
  const modelName = modelParts.join('/'); // e.g., "qwen2.5"
67
110
 
111
+ // Check if this is a local provider (by name or baseUrl)
112
+ if (isLocalProvider(providerName, providers)) {
113
+ hasLocalModel = true;
114
+
115
+ const provider = providers[providerName];
116
+ const found = provider?.models?.find(m => m.id === modelName);
117
+
118
+ if (!localModelInfo || found?.contextWindow) {
119
+ localModelInfo = {
120
+ model: modelId,
121
+ tokens: found?.contextWindow || null,
122
+ source: found?.contextWindow ? 'config' : 'unknown',
123
+ maxTokens: found?.maxTokens,
124
+ isLocal: true
125
+ };
126
+ }
127
+ }
128
+
68
129
  const provider = providers[providerName];
69
130
  if (!provider?.models) continue;
70
131
 
@@ -76,11 +137,21 @@ async function detectModelContextWindow(config) {
76
137
  model: modelId,
77
138
  tokens: found.contextWindow,
78
139
  source: 'config',
79
- maxTokens: found.maxTokens
140
+ maxTokens: found.maxTokens,
141
+ hasLocalModel,
142
+ localModelInfo
80
143
  };
81
144
  }
82
145
  }
83
146
 
147
+ // If we have local model info but no contextWindow from config, return that
148
+ if (localModelInfo) {
149
+ return {
150
+ ...localModelInfo,
151
+ hasLocalModel: true
152
+ };
153
+ }
154
+
84
155
  // No contextWindow found in config - try known defaults
85
156
  const primaryId = modelConfig.primary || '';
86
157
  const knownContexts = {
@@ -91,11 +162,11 @@ async function detectModelContextWindow(config) {
91
162
 
92
163
  for (const [pattern, tokens] of Object.entries(knownContexts)) {
93
164
  if (primaryId.toLowerCase().includes(pattern.toLowerCase())) {
94
- return { model: primaryId, tokens, source: 'fallback' };
165
+ return { model: primaryId, tokens, source: 'fallback', hasLocalModel };
95
166
  }
96
167
  }
97
168
 
98
- return { model: primaryId, tokens: null, source: 'unknown' };
169
+ return { model: primaryId, tokens: null, source: 'unknown', hasLocalModel };
99
170
  }
100
171
 
101
172
  async function setup() {
@@ -200,6 +271,18 @@ async function setup() {
200
271
  console.log(' [DEBUG] detectedInfo:', JSON.stringify(detectedInfo, null, 2));
201
272
  }
202
273
 
274
+ // Show local model recommendation
275
+ if (detectedInfo?.hasLocalModel || detectedInfo?.isLocal) {
276
+ console.log('');
277
+ console.log(' 🎯 Local model detected in your config!');
278
+ const localModel = detectedInfo.localModelInfo?.model || detectedInfo.model;
279
+ console.log(` → ${localModel}`);
280
+ console.log('');
281
+ console.log(' Local models (Ollama, llama.cpp, MLX, LM Studio) don\'t report');
282
+ console.log(' context overflow errors — they silently truncate or produce garbage.');
283
+ console.log(' This plugin is HIGHLY recommended for your setup.');
284
+ }
285
+
203
286
  if (detectedInfo && detectedInfo.tokens) {
204
287
  console.log('');
205
288
  console.log(` ✓ Detected model: ${detectedInfo.model}`);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "jasper-context-compactor",
3
- "version": "0.3.5",
3
+ "version": "0.3.7",
4
4
  "description": "Context compaction plugin for OpenClaw - works with local models (MLX, llama.cpp) that don't report token limits",
5
5
  "main": "index.ts",
6
6
  "bin": {