jasper-context-compactor 0.3.6 → 0.3.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/README.md +5 -2
  2. package/cli.js +35 -14
  3. package/index.ts +19 -1
  4. package/package.json +1 -1
package/README.md CHANGED
@@ -33,12 +33,13 @@ npx jasper-context-compactor setup
33
33
  ### Supported Local Providers
34
34
 
35
35
  The setup automatically detects these providers (primary or fallback):
36
- - **Ollama** — `/ollama` endpoint
36
+ - **Ollama** — Any provider with `ollama` in name or `:11434` in baseUrl
37
37
  - **llama.cpp** — llamacpp provider
38
38
  - **MLX** — mlx provider
39
39
  - **LM Studio** — lmstudio provider
40
40
  - **friend-gpu** — Custom GPU servers
41
41
  - **OpenRouter** — When routing to local models
42
+ - **Local network** — Any provider with localhost, 127.0.0.1, or Tailscale IP in baseUrl
42
43
 
43
44
  Then restart OpenClaw:
44
45
  ```bash
@@ -81,7 +82,8 @@ The setup configures these values in `~/.openclaw/openclaw.json`:
81
82
  "maxTokens": 8000,
82
83
  "keepRecentTokens": 2000,
83
84
  "summaryMaxTokens": 1000,
84
- "charsPerToken": 4
85
+ "charsPerToken": 4,
86
+ "modelFilter": ["ollama", "lmstudio"]
85
87
  }
86
88
  }
87
89
  }
@@ -95,6 +97,7 @@ The setup configures these values in `~/.openclaw/openclaw.json`:
95
97
  | `keepRecentTokens` | Recent context to preserve (default: 25% of max) |
96
98
  | `summaryMaxTokens` | Max tokens for the summary (default: 12.5% of max) |
97
99
  | `charsPerToken` | Token estimation ratio (4 works for English) |
100
+ | `modelFilter` | (Optional) Only compact for these providers. If not set, compacts all sessions.
98
101
 
99
102
  ## Restoring Your Config
100
103
 
package/cli.js CHANGED
@@ -48,12 +48,42 @@ function backupConfig() {
48
48
  }
49
49
 
50
50
  // Local model providers that benefit from context compaction
51
- const LOCAL_PROVIDERS = ['ollama', 'lmstudio', 'llamacpp', 'mlx', 'friend-gpu', 'openrouter'];
51
+ const LOCAL_PROVIDER_NAMES = ['ollama', 'lmstudio', 'llamacpp', 'mlx', 'friend-gpu', 'openrouter'];
52
52
 
53
- function isLocalProvider(providerId) {
53
+ // URLs that indicate local/Ollama endpoints
54
+ const LOCAL_URL_PATTERNS = [
55
+ ':11434', // Ollama default port
56
+ 'localhost',
57
+ '127.0.0.1',
58
+ '0.0.0.0',
59
+ /100\.\d+\.\d+\.\d+/, // Tailscale
60
+ /192\.168\.\d+\.\d+/, // Local network
61
+ /10\.\d+\.\d+\.\d+/, // Private network
62
+ ];
63
+
64
+ function isLocalProvider(providerId, providers = {}) {
54
65
  if (!providerId) return false;
55
66
  const lower = providerId.toLowerCase();
56
- return LOCAL_PROVIDERS.some(p => lower.includes(p));
67
+
68
+ // Check provider name
69
+ if (LOCAL_PROVIDER_NAMES.some(p => lower.includes(p))) {
70
+ return true;
71
+ }
72
+
73
+ // Check provider's baseUrl for local patterns
74
+ const provider = providers[providerId];
75
+ if (provider?.baseUrl) {
76
+ const url = provider.baseUrl.toLowerCase();
77
+ for (const pattern of LOCAL_URL_PATTERNS) {
78
+ if (pattern instanceof RegExp) {
79
+ if (pattern.test(url)) return true;
80
+ } else {
81
+ if (url.includes(pattern)) return true;
82
+ }
83
+ }
84
+ }
85
+
86
+ return false;
57
87
  }
58
88
 
59
89
  async function detectModelContextWindow(config) {
@@ -78,8 +108,8 @@ async function detectModelContextWindow(config) {
78
108
  const [providerName, ...modelParts] = modelId.split('/');
79
109
  const modelName = modelParts.join('/'); // e.g., "qwen2.5"
80
110
 
81
- // Check if this is a local provider
82
- if (isLocalProvider(providerName)) {
111
+ // Check if this is a local provider (by name or baseUrl)
112
+ if (isLocalProvider(providerName, providers)) {
83
113
  hasLocalModel = true;
84
114
 
85
115
  const provider = providers[providerName];
@@ -251,15 +281,6 @@ async function setup() {
251
281
  console.log(' Local models (Ollama, llama.cpp, MLX, LM Studio) don\'t report');
252
282
  console.log(' context overflow errors — they silently truncate or produce garbage.');
253
283
  console.log(' This plugin is HIGHLY recommended for your setup.');
254
- } else if (detectedInfo?.model) {
255
- // Cloud-only config
256
- const providerName = detectedInfo.model.split('/')[0] || '';
257
- if (['anthropic', 'openai', 'google'].includes(providerName.toLowerCase())) {
258
- console.log('');
259
- console.log(' ℹ️ Cloud-only config detected (no local models).');
260
- console.log(' Cloud APIs report context limits properly, so this plugin');
261
- console.log(' is less critical — but can still help with token costs.');
262
- }
263
284
  }
264
285
 
265
286
  if (detectedInfo && detectedInfo.tokens) {
package/index.ts CHANGED
@@ -24,6 +24,12 @@ interface PluginConfig {
24
24
  charsPerToken?: number;
25
25
  summaryModel?: string;
26
26
  logLevel?: 'debug' | 'info' | 'warn' | 'error';
27
+ /**
28
+ * Optional: Only run compaction when session model matches these providers.
29
+ * Example: ['ollama', 'lmstudio', 'friend-gpu']
30
+ * If not set, compaction runs for all sessions when enabled.
31
+ */
32
+ modelFilter?: string[];
27
33
  }
28
34
 
29
35
  interface Message {
@@ -162,8 +168,9 @@ export default function register(api: PluginApi) {
162
168
  const summaryMaxTokens = cfg.summaryMaxTokens ?? 1000;
163
169
  const charsPerToken = cfg.charsPerToken ?? 4;
164
170
  const summaryModel = cfg.summaryModel;
171
+ const modelFilter = cfg.modelFilter; // Optional: ['ollama', 'lmstudio', etc.]
165
172
 
166
- api.logger.info(`[context-compactor] Initialized (maxTokens=${maxTokens}, keepRecent=${keepRecentTokens})`);
173
+ api.logger.info(`[context-compactor] Initialized (maxTokens=${maxTokens}, keepRecent=${keepRecentTokens}${modelFilter ? `, filter=${modelFilter.join(',')}` : ''})`);
167
174
 
168
175
  // ============================================================================
169
176
  // Core: before_agent_start hook
@@ -173,12 +180,23 @@ export default function register(api: PluginApi) {
173
180
  prompt?: string;
174
181
  sessionKey?: string;
175
182
  sessionId?: string;
183
+ model?: string;
176
184
  context?: {
177
185
  sessionFile?: string;
178
186
  messages?: Message[];
179
187
  };
180
188
  }) => {
181
189
  try {
190
+ // If modelFilter is set, only run for matching providers
191
+ if (modelFilter && modelFilter.length > 0 && event.model) {
192
+ const modelLower = event.model.toLowerCase();
193
+ const matches = modelFilter.some(filter => modelLower.includes(filter.toLowerCase()));
194
+ if (!matches) {
195
+ api.logger.debug?.(`[context-compactor] Skipping - model ${event.model} not in filter`);
196
+ return;
197
+ }
198
+ }
199
+
182
200
  // Get current messages from context or session file
183
201
  let messages: Message[] = event.context?.messages ?? [];
184
202
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "jasper-context-compactor",
3
- "version": "0.3.6",
3
+ "version": "0.3.8",
4
4
  "description": "Context compaction plugin for OpenClaw - works with local models (MLX, llama.cpp) that don't report token limits",
5
5
  "main": "index.ts",
6
6
  "bin": {