@opencompress/opencompress 1.3.0 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +45 -34
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -5,40 +5,44 @@ function getApiKey(api) {
5
5
  const auth = api.config.auth;
6
6
  return auth?.profiles?.opencompress?.credentials?.["api-key"]?.apiKey;
7
7
  }
8
- var OPENCOMPRESS_MODELS = [
9
- // OpenAI
10
- { id: "gpt-4o", name: "GPT-4o (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 128e3, maxTokens: 16384 },
11
- { id: "gpt-4o-mini", name: "GPT-4o Mini (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 128e3, maxTokens: 16384 },
12
- { id: "gpt-4.1", name: "GPT-4.1 (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
13
- { id: "gpt-4.1-mini", name: "GPT-4.1 Mini (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
14
- { id: "gpt-4.1-nano", name: "GPT-4.1 Nano (Compressed)", reasoning: false, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
15
- { id: "o3", name: "O3 (Compressed)", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 1e5 },
16
- { id: "o4-mini", name: "O4 Mini (Compressed)", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 1e5 },
17
- // Anthropic
18
- { id: "claude-sonnet-4-6", name: "Claude Sonnet 4.6 (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
19
- { id: "claude-opus-4-6", name: "Claude Opus 4.6 (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
20
- { id: "claude-haiku-4-5-20251001", name: "Claude Haiku 4.5 (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
21
- // Google
22
- { id: "gemini-2.5-pro", name: "Gemini 2.5 Pro (Compressed)", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
23
- { id: "gemini-2.5-flash", name: "Gemini 2.5 Flash (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
24
- { id: "google/gemini-2.5-pro-preview", name: "Gemini 2.5 Pro Preview (Compressed)", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
25
- // DeepSeek
26
- { id: "deepseek/deepseek-chat-v3-0324", name: "DeepSeek V3 (Compressed)", reasoning: false, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
27
- { id: "deepseek/deepseek-reasoner", name: "DeepSeek Reasoner (Compressed)", reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
28
- // Meta
29
- { id: "meta-llama/llama-4-maverick", name: "Llama 4 Maverick (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
30
- { id: "meta-llama/llama-4-scout", name: "Llama 4 Scout (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 524288, maxTokens: 65536 },
31
- // Qwen
32
- { id: "qwen/qwen3-235b-a22b", name: "Qwen3 235B (Compressed)", reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
33
- { id: "qwen/qwen3-32b", name: "Qwen3 32B (Compressed)", reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
34
- // Mistral
35
- { id: "mistralai/mistral-large-2411", name: "Mistral Large (Compressed)", reasoning: false, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 }
8
+ var FALLBACK_MODELS = [
9
+ { id: "gpt-4o", name: "GPT-4o", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 128e3, maxTokens: 16384 },
10
+ { id: "gpt-4o-mini", name: "GPT-4o Mini", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 128e3, maxTokens: 16384 },
11
+ { id: "gpt-4.1", name: "GPT-4.1", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
12
+ { id: "gpt-4.1-mini", name: "GPT-4.1 Mini", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
13
+ { id: "claude-sonnet-4-6", name: "Claude Sonnet 4.6", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
14
+ { id: "claude-haiku-4-5-20251001", name: "Claude Haiku 4.5", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
15
+ { id: "gemini-2.5-pro", name: "Gemini 2.5 Pro", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
16
+ { id: "gemini-2.5-flash", name: "Gemini 2.5 Flash", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
17
+ { id: "deepseek/deepseek-chat-v3-0324", name: "DeepSeek V3", reasoning: false, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
18
+ { id: "deepseek/deepseek-reasoner", name: "DeepSeek Reasoner", reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 }
36
19
  ];
37
- function buildProviderModels(baseUrl, upstreamKey, upstreamBaseUrl) {
20
+ function readExistingModels(api) {
21
+ const providers = api.config.models?.providers;
22
+ if (!providers) return null;
23
+ const seen = /* @__PURE__ */ new Set();
24
+ const models = [];
25
+ for (const [providerId, providerConfig] of Object.entries(providers)) {
26
+ if (providerId === "opencompress") continue;
27
+ const providerModels = providerConfig.models || [];
28
+ for (const m of providerModels) {
29
+ if (m.name?.includes("\u2192")) continue;
30
+ if (seen.has(m.id)) continue;
31
+ seen.add(m.id);
32
+ models.push({
33
+ ...m,
34
+ // Zero out cost — billing handled by OpenCompress proxy
35
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }
36
+ });
37
+ }
38
+ }
39
+ return models.length > 0 ? models : null;
40
+ }
41
+ function buildProviderModels(baseUrl, upstreamKey, upstreamBaseUrl, models) {
38
42
  const config = {
39
43
  baseUrl: `${baseUrl}/v1`,
40
44
  api: "openai-completions",
41
- models: OPENCOMPRESS_MODELS
45
+ models: models || FALLBACK_MODELS
42
46
  };
43
47
  if (upstreamKey || upstreamBaseUrl) {
44
48
  config.headers = {};
@@ -115,6 +119,7 @@ var opencompressProvider = {
115
119
  });
116
120
  } catch {
117
121
  }
122
+ const modelCount = FALLBACK_MODELS.length;
118
123
  return {
119
124
  profiles: [
120
125
  {
@@ -131,7 +136,7 @@ var opencompressProvider = {
131
136
  },
132
137
  defaultModel: "gpt-4o-mini",
133
138
  notes: [
134
- `OpenCompress is ready! Connected to ${provider}.`,
139
+ `OpenCompress is ready! Connected to ${provider} (${modelCount} models).`,
135
140
  "Your LLM key is stored locally only \u2014 never on our server.",
136
141
  `Free credit: ${data.freeCredit}. Dashboard: opencompress.ai/dashboard`
137
142
  ]
@@ -154,7 +159,8 @@ var plugin = {
154
159
  const existingHeaders = api.config.models?.providers?.opencompress?.headers;
155
160
  const existingUpstreamKey = existingHeaders?.["X-Upstream-Key"];
156
161
  const existingUpstreamBaseUrl = existingHeaders?.["X-Upstream-Base-Url"];
157
- const providerModels = buildProviderModels(baseUrl, existingUpstreamKey, existingUpstreamBaseUrl);
162
+ const existingModels = readExistingModels(api);
163
+ const providerModels = buildProviderModels(baseUrl, existingUpstreamKey, existingUpstreamBaseUrl, existingModels || void 0);
158
164
  opencompressProvider.models = providerModels;
159
165
  api.registerProvider(opencompressProvider);
160
166
  if (!api.config.models) {
@@ -164,7 +170,9 @@ var plugin = {
164
170
  api.config.models.providers = {};
165
171
  }
166
172
  api.config.models.providers.opencompress = providerModels;
167
- api.logger.info("OpenCompress provider registered (20 models, 5-layer compression)");
173
+ const modelCount = existingModels ? existingModels.length : FALLBACK_MODELS.length;
174
+ const source = existingModels ? "from existing providers" : "fallback";
175
+ api.logger.info(`OpenCompress provider registered (${modelCount} models ${source}, 5-layer compression)`);
168
176
  api.registerCommand({
169
177
  name: "compress-stats",
170
178
  description: "Show OpenCompress usage statistics and savings",
@@ -280,7 +288,8 @@ var plugin = {
280
288
  provider = "google";
281
289
  upstreamBaseUrl = "https://generativelanguage.googleapis.com/v1beta/openai";
282
290
  }
283
- const updatedModels = buildProviderModels(baseUrl, upstreamKey, upstreamBaseUrl);
291
+ const existingModels2 = readExistingModels(api);
292
+ const updatedModels = buildProviderModels(baseUrl, upstreamKey, upstreamBaseUrl, existingModels2 || void 0);
284
293
  if (api.config.models?.providers) {
285
294
  api.config.models.providers.opencompress = updatedModels;
286
295
  }
@@ -295,9 +304,11 @@ var plugin = {
295
304
  });
296
305
  } catch {
297
306
  }
307
+ const modelCount2 = existingModels2 ? existingModels2.length : FALLBACK_MODELS.length;
298
308
  return {
299
309
  text: [
300
310
  `Switched to **BYOK mode** (${provider}).`,
311
+ `Loaded **${modelCount2} models** from your ${provider} account.`,
301
312
  "",
302
313
  "Your key is stored **locally only** \u2014 never sent to our server for storage.",
303
314
  "It's passed through on each request via header and discarded immediately.",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@opencompress/opencompress",
3
- "version": "1.3.0",
3
+ "version": "1.5.0",
4
4
  "description": "OpenCompress plugin for OpenClaw — automatic 5-layer prompt compression for any LLM",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",