@opencompress/opencompress 1.2.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +126 -46
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -5,40 +5,66 @@ function getApiKey(api) {
5
5
  const auth = api.config.auth;
6
6
  return auth?.profiles?.opencompress?.credentials?.["api-key"]?.apiKey;
7
7
  }
8
- var OPENCOMPRESS_MODELS = [
9
- // OpenAI
10
- { id: "gpt-4o", name: "GPT-4o (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 128e3, maxTokens: 16384 },
11
- { id: "gpt-4o-mini", name: "GPT-4o Mini (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 128e3, maxTokens: 16384 },
12
- { id: "gpt-4.1", name: "GPT-4.1 (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
13
- { id: "gpt-4.1-mini", name: "GPT-4.1 Mini (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
14
- { id: "gpt-4.1-nano", name: "GPT-4.1 Nano (Compressed)", reasoning: false, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
15
- { id: "o3", name: "O3 (Compressed)", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 1e5 },
16
- { id: "o4-mini", name: "O4 Mini (Compressed)", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 1e5 },
17
- // Anthropic
18
- { id: "claude-sonnet-4-6", name: "Claude Sonnet 4.6 (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
19
- { id: "claude-opus-4-6", name: "Claude Opus 4.6 (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
20
- { id: "claude-haiku-4-5-20251001", name: "Claude Haiku 4.5 (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
21
- // Google
22
- { id: "gemini-2.5-pro", name: "Gemini 2.5 Pro (Compressed)", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
23
- { id: "gemini-2.5-flash", name: "Gemini 2.5 Flash (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
24
- { id: "google/gemini-2.5-pro-preview", name: "Gemini 2.5 Pro Preview (Compressed)", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
25
- // DeepSeek
26
- { id: "deepseek/deepseek-chat-v3-0324", name: "DeepSeek V3 (Compressed)", reasoning: false, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
27
- { id: "deepseek/deepseek-reasoner", name: "DeepSeek Reasoner (Compressed)", reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
28
- // Meta
29
- { id: "meta-llama/llama-4-maverick", name: "Llama 4 Maverick (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
30
- { id: "meta-llama/llama-4-scout", name: "Llama 4 Scout (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 524288, maxTokens: 65536 },
31
- // Qwen
32
- { id: "qwen/qwen3-235b-a22b", name: "Qwen3 235B (Compressed)", reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
33
- { id: "qwen/qwen3-32b", name: "Qwen3 32B (Compressed)", reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
34
- // Mistral
35
- { id: "mistralai/mistral-large-2411", name: "Mistral Large (Compressed)", reasoning: false, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 }
8
+ var FALLBACK_MODELS = [
9
+ { id: "gpt-4o", name: "GPT-4o", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 128e3, maxTokens: 16384 },
10
+ { id: "gpt-4o-mini", name: "GPT-4o Mini", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 128e3, maxTokens: 16384 },
11
+ { id: "gpt-4.1", name: "GPT-4.1", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
12
+ { id: "gpt-4.1-mini", name: "GPT-4.1 Mini", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
13
+ { id: "claude-sonnet-4-6", name: "Claude Sonnet 4.6", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
14
+ { id: "claude-haiku-4-5-20251001", name: "Claude Haiku 4.5", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
15
+ { id: "gemini-2.5-pro", name: "Gemini 2.5 Pro", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
16
+ { id: "gemini-2.5-flash", name: "Gemini 2.5 Flash", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
17
+ { id: "deepseek/deepseek-chat-v3-0324", name: "DeepSeek V3", reasoning: false, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
18
+ { id: "deepseek/deepseek-reasoner", name: "DeepSeek Reasoner", reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 }
36
19
  ];
37
- function buildProviderModels(baseUrl, upstreamKey, upstreamBaseUrl) {
20
+ function upstreamToModels(data) {
21
+ return data.map((m) => {
22
+ const promptPrice = m.pricing?.prompt ? parseFloat(m.pricing.prompt) * 1e6 : 0;
23
+ const completionPrice = m.pricing?.completion ? parseFloat(m.pricing.completion) * 1e6 : 0;
24
+ const isReasoning = /\bo[34]\b|reasoner|thinking|deepthink/i.test(m.id);
25
+ const inputMods = ["text"];
26
+ const archInputs = m.architecture?.input_modalities || [];
27
+ if (archInputs.includes("image") || /vision|4o|gemini|claude-.*[34]/i.test(m.id)) {
28
+ inputMods.push("image");
29
+ }
30
+ return {
31
+ id: m.id,
32
+ name: m.id,
33
+ reasoning: isReasoning,
34
+ input: inputMods,
35
+ cost: {
36
+ input: promptPrice,
37
+ output: completionPrice,
38
+ cacheRead: 0,
39
+ cacheWrite: 0
40
+ },
41
+ contextWindow: m.context_length || 128e3,
42
+ maxTokens: Math.min(m.context_length || 128e3, 65536)
43
+ };
44
+ });
45
+ }
46
+ async function fetchUpstreamModels(baseUrl, apiKey, upstreamKey, upstreamBaseUrl) {
47
+ try {
48
+ const headers = {
49
+ Authorization: `Bearer ${apiKey}`
50
+ };
51
+ if (upstreamKey) headers["X-Upstream-Key"] = upstreamKey;
52
+ if (upstreamBaseUrl) headers["X-Upstream-Base-Url"] = upstreamBaseUrl;
53
+ const res = await fetch(`${baseUrl}/v1/models`, { headers });
54
+ if (!res.ok) return null;
55
+ const data = await res.json();
56
+ const models = data.data || [];
57
+ if (models.length === 0) return null;
58
+ return upstreamToModels(models);
59
+ } catch {
60
+ return null;
61
+ }
62
+ }
63
+ function buildProviderModels(baseUrl, upstreamKey, upstreamBaseUrl, models) {
38
64
  const config = {
39
65
  baseUrl: `${baseUrl}/v1`,
40
66
  api: "openai-completions",
41
- models: OPENCOMPRESS_MODELS
67
+ models: models || FALLBACK_MODELS
42
68
  };
43
69
  if (upstreamKey || upstreamBaseUrl) {
44
70
  config.headers = {};
@@ -59,18 +85,41 @@ var opencompressProvider = {
59
85
  {
60
86
  id: "api-key",
61
87
  label: "OpenCompress",
62
- hint: "One-click setup \u2014 no API key needed",
88
+ hint: "Connect your LLM key \u2014 compress every call, save 40-70%",
63
89
  kind: "custom",
64
90
  run: async (ctx) => {
65
91
  ctx.prompter.note(
66
- "OpenCompress compresses all LLM prompts automatically.\n53% fewer tokens, 62% faster, 96% quality preserved.\nWe'll create your account now \u2014 $1 free credit included."
92
+ "OpenCompress compresses all LLM prompts automatically.\n53% fewer tokens, 62% faster, 96% quality preserved.\n\nConnect your existing LLM API key to get started.\nSupported: OpenAI, Anthropic, OpenRouter, Google"
67
93
  );
94
+ const llmKey = await ctx.prompter.text({
95
+ message: "Enter your LLM API key (OpenAI/Anthropic/OpenRouter):",
96
+ validate: (val) => {
97
+ if (!val || val.length < 10) return "Please enter a valid API key";
98
+ if (val.startsWith("sk-occ-")) return "Enter your LLM provider key, not an OpenCompress key";
99
+ return void 0;
100
+ }
101
+ });
102
+ if (typeof llmKey === "symbol") {
103
+ throw new Error("Setup cancelled");
104
+ }
105
+ let provider = "openrouter";
106
+ let upstreamBaseUrl = "https://openrouter.ai/api/v1";
107
+ if (llmKey.startsWith("sk-proj-") || llmKey.startsWith("sk-") && !llmKey.startsWith("sk-ant-") && !llmKey.startsWith("sk-or-")) {
108
+ provider = "openai";
109
+ upstreamBaseUrl = "https://api.openai.com/v1";
110
+ } else if (llmKey.startsWith("sk-ant-")) {
111
+ provider = "anthropic";
112
+ upstreamBaseUrl = "https://api.anthropic.com/v1";
113
+ } else if (llmKey.startsWith("AIza")) {
114
+ provider = "google";
115
+ upstreamBaseUrl = "https://generativelanguage.googleapis.com/v1beta/openai";
116
+ }
68
117
  const spinner = ctx.prompter.progress("Creating account...");
69
118
  try {
70
119
  const res = await fetch(`${DEFAULT_BASE_URL}/v1/provision`, {
71
120
  method: "POST",
72
121
  headers: { "Content-Type": "application/json" },
73
- body: "{}"
122
+ body: JSON.stringify({ upstreamApiKey: llmKey })
74
123
  });
75
124
  if (!res.ok) {
76
125
  const err = await res.json().catch(() => ({ error: { message: "Unknown error" } }));
@@ -81,6 +130,19 @@ var opencompressProvider = {
81
130
  }
82
131
  const data = await res.json();
83
132
  spinner.stop("Account created");
133
+ try {
134
+ await fetch(`${DEFAULT_BASE_URL}/v1/byok`, {
135
+ method: "POST",
136
+ headers: {
137
+ Authorization: `Bearer ${data.apiKey}`,
138
+ "Content-Type": "application/json"
139
+ },
140
+ body: JSON.stringify({ provider, passthrough: true })
141
+ });
142
+ } catch {
143
+ }
144
+ const upstreamModels = await fetchUpstreamModels(DEFAULT_BASE_URL, data.apiKey, llmKey, upstreamBaseUrl);
145
+ const modelCount = upstreamModels ? upstreamModels.length : FALLBACK_MODELS.length;
84
146
  return {
85
147
  profiles: [
86
148
  {
@@ -88,10 +150,18 @@ var opencompressProvider = {
88
150
  credential: { apiKey: data.apiKey }
89
151
  }
90
152
  ],
153
+ configPatch: {
154
+ models: {
155
+ providers: {
156
+ opencompress: buildProviderModels(DEFAULT_BASE_URL, llmKey, upstreamBaseUrl, upstreamModels || void 0)
157
+ }
158
+ }
159
+ },
91
160
  defaultModel: "gpt-4o-mini",
92
161
  notes: [
93
- "OpenCompress is ready! All LLM calls are now compressed automatically.",
94
- `Free credit: ${data.freeCredit}. Add more: POST /api/v1/topup or visit opencompress.ai/dashboard`
162
+ `OpenCompress is ready! Connected to ${provider} (${modelCount} models).`,
163
+ "Your LLM key is stored locally only \u2014 never on our server.",
164
+ `Free credit: ${data.freeCredit}. Dashboard: opencompress.ai/dashboard`
95
165
  ]
96
166
  };
97
167
  } catch (err) {
@@ -112,7 +182,12 @@ var plugin = {
112
182
  const existingHeaders = api.config.models?.providers?.opencompress?.headers;
113
183
  const existingUpstreamKey = existingHeaders?.["X-Upstream-Key"];
114
184
  const existingUpstreamBaseUrl = existingHeaders?.["X-Upstream-Base-Url"];
115
- const providerModels = buildProviderModels(baseUrl, existingUpstreamKey, existingUpstreamBaseUrl);
185
+ const apiKey = getApiKey(api);
186
+ let dynamicModels = null;
187
+ if (apiKey && existingUpstreamKey) {
188
+ dynamicModels = await fetchUpstreamModels(baseUrl, apiKey, existingUpstreamKey, existingUpstreamBaseUrl);
189
+ }
190
+ const providerModels = buildProviderModels(baseUrl, existingUpstreamKey, existingUpstreamBaseUrl, dynamicModels || void 0);
116
191
  opencompressProvider.models = providerModels;
117
192
  api.registerProvider(opencompressProvider);
118
193
  if (!api.config.models) {
@@ -122,22 +197,24 @@ var plugin = {
122
197
  api.config.models.providers = {};
123
198
  }
124
199
  api.config.models.providers.opencompress = providerModels;
125
- api.logger.info("OpenCompress provider registered (20 models, 5-layer compression)");
200
+ const modelCount = dynamicModels ? dynamicModels.length : FALLBACK_MODELS.length;
201
+ const source = dynamicModels ? "upstream" : "fallback";
202
+ api.logger.info(`OpenCompress provider registered (${modelCount} ${source} models, 5-layer compression)`);
126
203
  api.registerCommand({
127
204
  name: "compress-stats",
128
205
  description: "Show OpenCompress usage statistics and savings",
129
206
  acceptsArgs: true,
130
207
  requireAuth: false,
131
208
  handler: async () => {
132
- const apiKey = getApiKey(api);
133
- if (!apiKey) {
209
+ const apiKey2 = getApiKey(api);
210
+ if (!apiKey2) {
134
211
  return {
135
212
  text: "No API key found. Run `openclaw onboard opencompress` to set up."
136
213
  };
137
214
  }
138
215
  try {
139
216
  const res = await fetch(`${baseUrl}/user/stats`, {
140
- headers: { Authorization: `Bearer ${apiKey}` }
217
+ headers: { Authorization: `Bearer ${apiKey2}` }
141
218
  });
142
219
  if (!res.ok) {
143
220
  return { text: `Failed to fetch stats: HTTP ${res.status}` };
@@ -177,14 +254,14 @@ var plugin = {
177
254
  acceptsArgs: true,
178
255
  requireAuth: false,
179
256
  handler: async (ctx) => {
180
- const apiKey = getApiKey(api);
181
- if (!apiKey) {
257
+ const apiKey2 = getApiKey(api);
258
+ if (!apiKey2) {
182
259
  return { text: "Not set up. Run `openclaw onboard opencompress` first." };
183
260
  }
184
261
  const upstreamKey = ctx.args?.trim();
185
262
  if (!upstreamKey) {
186
263
  const res = await fetch(`${baseUrl}/v1/topup`, {
187
- headers: { Authorization: `Bearer ${apiKey}` }
264
+ headers: { Authorization: `Bearer ${apiKey2}` }
188
265
  });
189
266
  const data = res.ok ? await res.json() : null;
190
267
  return {
@@ -211,7 +288,7 @@ var plugin = {
211
288
  try {
212
289
  await fetch(`${baseUrl}/v1/byok`, {
213
290
  method: "DELETE",
214
- headers: { Authorization: `Bearer ${apiKey}` }
291
+ headers: { Authorization: `Bearer ${apiKey2}` }
215
292
  });
216
293
  } catch {
217
294
  }
@@ -238,7 +315,8 @@ var plugin = {
238
315
  provider = "google";
239
316
  upstreamBaseUrl = "https://generativelanguage.googleapis.com/v1beta/openai";
240
317
  }
241
- const updatedModels = buildProviderModels(baseUrl, upstreamKey, upstreamBaseUrl);
318
+ const upstreamModels = await fetchUpstreamModels(baseUrl, apiKey2, upstreamKey, upstreamBaseUrl);
319
+ const updatedModels = buildProviderModels(baseUrl, upstreamKey, upstreamBaseUrl, upstreamModels || void 0);
242
320
  if (api.config.models?.providers) {
243
321
  api.config.models.providers.opencompress = updatedModels;
244
322
  }
@@ -246,16 +324,18 @@ var plugin = {
246
324
  await fetch(`${baseUrl}/v1/byok`, {
247
325
  method: "POST",
248
326
  headers: {
249
- Authorization: `Bearer ${apiKey}`,
327
+ Authorization: `Bearer ${apiKey2}`,
250
328
  "Content-Type": "application/json"
251
329
  },
252
330
  body: JSON.stringify({ provider, passthrough: true })
253
331
  });
254
332
  } catch {
255
333
  }
334
+ const modelCount2 = upstreamModels ? upstreamModels.length : FALLBACK_MODELS.length;
256
335
  return {
257
336
  text: [
258
337
  `Switched to **BYOK mode** (${provider}).`,
338
+ `Loaded **${modelCount2} models** from your ${provider} account.`,
259
339
  "",
260
340
  "Your key is stored **locally only** \u2014 never sent to our server for storage.",
261
341
  "It's passed through on each request via header and discarded immediately.",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@opencompress/opencompress",
3
- "version": "1.2.0",
3
+ "version": "1.4.0",
4
4
  "description": "OpenCompress plugin for OpenClaw — automatic 5-layer prompt compression for any LLM",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",