@opencompress/opencompress 1.7.0 → 1.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +35 -383
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -6,7 +6,7 @@ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require
6
6
  });
7
7
 
8
8
  // src/index.ts
9
- var VERSION = "1.7.0";
9
+ var VERSION = "1.8.0";
10
10
  var DEFAULT_BASE_URL = "https://www.opencompress.ai/api";
11
11
  function getApiKey(api) {
12
12
  const auth = api.config.auth;
@@ -35,96 +35,7 @@ function getApiKey(api) {
35
35
  }
36
36
  return void 0;
37
37
  }
38
- var FALLBACK_MODELS = [
39
- { id: "gpt-4o", name: "GPT-4o", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 128e3, maxTokens: 16384 },
40
- { id: "gpt-4o-mini", name: "GPT-4o Mini", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 128e3, maxTokens: 16384 },
41
- { id: "gpt-4.1", name: "GPT-4.1", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
42
- { id: "gpt-4.1-mini", name: "GPT-4.1 Mini", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
43
- { id: "claude-sonnet-4-6", name: "Claude Sonnet 4.6", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
44
- { id: "claude-haiku-4-5-20251001", name: "Claude Haiku 4.5", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
45
- { id: "gemini-2.5-pro", name: "Gemini 2.5 Pro", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
46
- { id: "gemini-2.5-flash", name: "Gemini 2.5 Flash", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
47
- { id: "deepseek/deepseek-chat-v3-0324", name: "DeepSeek V3", reasoning: false, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
48
- { id: "deepseek/deepseek-reasoner", name: "DeepSeek Reasoner", reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 }
49
- ];
50
- function readExistingModels(api) {
51
- const providers = api.config.models?.providers;
52
- if (!providers) return null;
53
- const seen = /* @__PURE__ */ new Set();
54
- const models = [];
55
- for (const [providerId, providerConfig] of Object.entries(providers)) {
56
- if (providerId === "opencompress") continue;
57
- const providerModels = providerConfig.models || [];
58
- for (const m of providerModels) {
59
- if (m.name?.includes("\u2192")) continue;
60
- const rawId = m.id.includes("/") ? m.id.split("/").slice(1).join("/") : m.id;
61
- if (seen.has(rawId)) continue;
62
- seen.add(rawId);
63
- const upstreamId = m.id.startsWith(`${providerId}/`) ? rawId : m.id;
64
- models.push({
65
- ...m,
66
- id: upstreamId,
67
- name: m.name || upstreamId,
68
- // Zero out cost — billing handled by OpenCompress proxy
69
- cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }
70
- });
71
- }
72
- }
73
- return models.length > 0 ? models : null;
74
- }
75
- function buildProviderModels(baseUrl, upstreamKey, upstreamBaseUrl, models, apiKey) {
76
- const config = {
77
- baseUrl: `${baseUrl}/v1`,
78
- api: "openai-completions",
79
- apiKey: apiKey || void 0,
80
- models: models || FALLBACK_MODELS
81
- };
82
- if (upstreamKey || upstreamBaseUrl) {
83
- config.headers = {};
84
- if (upstreamKey) config.headers["X-Upstream-Key"] = upstreamKey;
85
- if (upstreamBaseUrl) config.headers["X-Upstream-Base-Url"] = upstreamBaseUrl;
86
- }
87
- return config;
88
- }
89
- function persistModelsConfig(providerModels) {
90
- try {
91
- const os = __require("os");
92
- const fs = __require("fs");
93
- const path = __require("path");
94
- const configPath = path.join(os.homedir(), ".openclaw", "openclaw.json");
95
- if (!fs.existsSync(configPath)) return;
96
- const raw = fs.readFileSync(configPath, "utf-8");
97
- let config;
98
- try {
99
- config = JSON.parse(raw);
100
- } catch {
101
- return;
102
- }
103
- if (!config.models) config.models = {};
104
- const models = config.models;
105
- if (!models.providers) models.providers = {};
106
- const providers = models.providers;
107
- const configSafeModels = providerModels.models.map((m) => ({
108
- id: m.id,
109
- name: m.name
110
- }));
111
- const configEntry = {
112
- baseUrl: providerModels.baseUrl,
113
- api: providerModels.api || "openai-completions",
114
- models: configSafeModels
115
- };
116
- if (providerModels.apiKey) {
117
- configEntry.apiKey = providerModels.apiKey;
118
- }
119
- if (providerModels.headers) {
120
- configEntry.headers = providerModels.headers;
121
- }
122
- providers.opencompress = configEntry;
123
- fs.writeFileSync(configPath, JSON.stringify(config, null, 2) + "\n");
124
- } catch {
125
- }
126
- }
127
- function persistAgentModelsJson(providerModels) {
38
+ function persistAuthProfile(apiKey) {
128
39
  try {
129
40
  const os = __require("os");
130
41
  const fs = __require("fs");
@@ -133,37 +44,27 @@ function persistAgentModelsJson(providerModels) {
133
44
  if (!fs.existsSync(agentsDir)) return;
134
45
  const agentDirs = fs.readdirSync(agentsDir);
135
46
  for (const agent of agentDirs) {
136
- const modelsPath = path.join(agentsDir, agent, "agent", "models.json");
137
- const modelsDir = path.dirname(modelsPath);
138
- if (!fs.existsSync(modelsDir)) continue;
139
- let data = { providers: {} };
140
- if (fs.existsSync(modelsPath)) {
47
+ const authPath = path.join(agentsDir, agent, "agent", "auth-profiles.json");
48
+ const authDir = path.dirname(authPath);
49
+ if (!fs.existsSync(authDir)) {
50
+ fs.mkdirSync(authDir, { recursive: true });
51
+ }
52
+ let profiles = {
53
+ version: 1,
54
+ profiles: {}
55
+ };
56
+ if (fs.existsSync(authPath)) {
141
57
  try {
142
- data = JSON.parse(fs.readFileSync(modelsPath, "utf-8"));
143
- if (!data.providers || typeof data.providers !== "object") {
144
- data.providers = {};
145
- }
58
+ profiles = JSON.parse(fs.readFileSync(authPath, "utf-8"));
146
59
  } catch {
147
- data = { providers: {} };
148
60
  }
149
61
  }
150
- data.providers.opencompress = {
151
- baseUrl: providerModels.baseUrl,
152
- api: providerModels.api || "openai-completions",
153
- apiKey: providerModels.apiKey || void 0,
154
- models: providerModels.models.map((m) => ({
155
- id: m.id,
156
- name: m.name,
157
- api: m.api || "openai-completions",
158
- reasoning: m.reasoning ?? false,
159
- input: m.input || ["text"],
160
- cost: m.cost || { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
161
- contextWindow: m.contextWindow || 2e5,
162
- maxTokens: m.maxTokens || 8192
163
- })),
164
- ...providerModels.headers ? { headers: providerModels.headers } : {}
62
+ profiles.profiles["opencompress:default"] = {
63
+ type: "api_key",
64
+ provider: "opencompress",
65
+ key: apiKey
165
66
  };
166
- fs.writeFileSync(modelsPath, JSON.stringify(data, null, 2) + "\n");
67
+ fs.writeFileSync(authPath, JSON.stringify(profiles, null, 2) + "\n");
167
68
  }
168
69
  } catch {
169
70
  }
@@ -197,78 +98,6 @@ function persistAgentAuthJson(apiKey) {
197
98
  } catch {
198
99
  }
199
100
  }
200
- function injectModelsAllowlist(models) {
201
- try {
202
- const os = __require("os");
203
- const fs = __require("fs");
204
- const path = __require("path");
205
- const configPath = path.join(os.homedir(), ".openclaw", "openclaw.json");
206
- if (!fs.existsSync(configPath)) return;
207
- const raw = fs.readFileSync(configPath, "utf-8");
208
- let config;
209
- try {
210
- config = JSON.parse(raw);
211
- } catch {
212
- return;
213
- }
214
- if (!config.agents) config.agents = {};
215
- const agents = config.agents;
216
- if (!agents.defaults) agents.defaults = {};
217
- const defaults = agents.defaults;
218
- if (!defaults.models) defaults.models = {};
219
- const allowlist = defaults.models;
220
- const existingKeys = Object.keys(allowlist);
221
- if (existingKeys.length === 0) return;
222
- let changed = false;
223
- for (const m of models) {
224
- const fullId = `opencompress/${m.id}`;
225
- if (!allowlist[fullId]) {
226
- allowlist[fullId] = {};
227
- changed = true;
228
- }
229
- }
230
- if (changed) {
231
- const tmpPath = `${configPath}.tmp.${process.pid}`;
232
- fs.writeFileSync(tmpPath, JSON.stringify(config, null, 2) + "\n");
233
- fs.renameSync(tmpPath, configPath);
234
- }
235
- } catch {
236
- }
237
- }
238
- function persistAuthProfile(apiKey) {
239
- try {
240
- const os = __require("os");
241
- const fs = __require("fs");
242
- const path = __require("path");
243
- const agentsDir = path.join(os.homedir(), ".openclaw", "agents");
244
- if (!fs.existsSync(agentsDir)) return;
245
- const agentDirs = fs.readdirSync(agentsDir);
246
- for (const agent of agentDirs) {
247
- const authPath = path.join(agentsDir, agent, "agent", "auth-profiles.json");
248
- const authDir = path.dirname(authPath);
249
- if (!fs.existsSync(authDir)) {
250
- fs.mkdirSync(authDir, { recursive: true });
251
- }
252
- let profiles = {
253
- version: 1,
254
- profiles: {}
255
- };
256
- if (fs.existsSync(authPath)) {
257
- try {
258
- profiles = JSON.parse(fs.readFileSync(authPath, "utf-8"));
259
- } catch {
260
- }
261
- }
262
- profiles.profiles["opencompress:default"] = {
263
- type: "api_key",
264
- provider: "opencompress",
265
- key: apiKey
266
- };
267
- fs.writeFileSync(authPath, JSON.stringify(profiles, null, 2) + "\n");
268
- }
269
- } catch {
270
- }
271
- }
272
101
  function proxyStatePath() {
273
102
  const os = __require("os");
274
103
  const path = __require("path");
@@ -420,47 +249,30 @@ var opencompressProvider = {
420
249
  docsPath: "https://docs.opencompress.ai",
421
250
  aliases: ["oc", "compress"],
422
251
  envVars: ["OPENCOMPRESS_API_KEY"],
423
- models: buildProviderModels(DEFAULT_BASE_URL),
252
+ // No models — we're a transparent proxy, not a router.
253
+ // Users keep their existing providers; we just compress their traffic.
254
+ models: {
255
+ baseUrl: `${DEFAULT_BASE_URL}/v1`,
256
+ api: "openai-completions",
257
+ models: []
258
+ },
424
259
  formatApiKey: (cred) => cred.apiKey || "",
425
260
  auth: [
426
261
  {
427
262
  id: "api-key",
428
263
  label: "OpenCompress",
429
- hint: "Connect your LLM key \u2014 compress every call, save 40-70%",
264
+ hint: "Compress all LLM calls automatically \u2014 save 40-60% on any provider",
430
265
  kind: "custom",
431
266
  run: async (ctx) => {
432
267
  ctx.prompter.note(
433
- "OpenCompress compresses all LLM prompts automatically.\n53% fewer tokens, 62% faster, 96% quality preserved.\n\nConnect your existing LLM API key to get started.\nSupported: OpenAI, Anthropic, OpenRouter, Google"
268
+ "OpenCompress compresses all LLM prompts automatically.\nWorks with any provider you already have (OpenAI, Anthropic, OpenRouter, etc.).\n\nAfter setup, run `/compress on` to enable compression for all providers."
434
269
  );
435
- const llmKey = await ctx.prompter.text({
436
- message: "Enter your LLM API key (OpenAI/Anthropic/OpenRouter):",
437
- validate: (val) => {
438
- if (!val || val.length < 10) return "Please enter a valid API key";
439
- if (val.startsWith("sk-occ-")) return "Enter your LLM provider key, not an OpenCompress key";
440
- return void 0;
441
- }
442
- });
443
- if (typeof llmKey === "symbol") {
444
- throw new Error("Setup cancelled");
445
- }
446
- let provider = "openrouter";
447
- let upstreamBaseUrl = "https://openrouter.ai/api/v1";
448
- if (llmKey.startsWith("sk-proj-") || llmKey.startsWith("sk-") && !llmKey.startsWith("sk-ant-") && !llmKey.startsWith("sk-or-")) {
449
- provider = "openai";
450
- upstreamBaseUrl = "https://api.openai.com/v1";
451
- } else if (llmKey.startsWith("sk-ant-")) {
452
- provider = "anthropic";
453
- upstreamBaseUrl = "https://api.anthropic.com/v1";
454
- } else if (llmKey.startsWith("AIza")) {
455
- provider = "google";
456
- upstreamBaseUrl = "https://generativelanguage.googleapis.com/v1beta/openai";
457
- }
458
- const spinner = ctx.prompter.progress("Creating account...");
270
+ const spinner = ctx.prompter.progress("Creating OpenCompress account...");
459
271
  try {
460
272
  const res = await fetch(`${DEFAULT_BASE_URL}/v1/provision`, {
461
273
  method: "POST",
462
274
  headers: { "Content-Type": "application/json" },
463
- body: JSON.stringify({ upstreamApiKey: llmKey })
275
+ body: JSON.stringify({})
464
276
  });
465
277
  if (!res.ok) {
466
278
  const err = await res.json().catch(() => ({ error: { message: "Unknown error" } }));
@@ -471,21 +283,6 @@ var opencompressProvider = {
471
283
  }
472
284
  const data = await res.json();
473
285
  spinner.stop("Account created");
474
- try {
475
- await fetch(`${DEFAULT_BASE_URL}/v1/byok`, {
476
- method: "POST",
477
- headers: {
478
- Authorization: `Bearer ${data.apiKey}`,
479
- "Content-Type": "application/json"
480
- },
481
- body: JSON.stringify({ provider, passthrough: true })
482
- });
483
- } catch {
484
- }
485
- const onboardModels = buildProviderModels(DEFAULT_BASE_URL, llmKey, upstreamBaseUrl, void 0, data.apiKey);
486
- const modelCount = FALLBACK_MODELS.length;
487
- persistModelsConfig(onboardModels);
488
- persistAgentModelsJson(onboardModels);
489
286
  persistAuthProfile(data.apiKey);
490
287
  persistAgentAuthJson(data.apiKey);
491
288
  return {
@@ -495,18 +292,11 @@ var opencompressProvider = {
495
292
  credential: { apiKey: data.apiKey }
496
293
  }
497
294
  ],
498
- configPatch: {
499
- models: {
500
- providers: {
501
- opencompress: onboardModels
502
- }
503
- }
504
- },
505
- defaultModel: "gpt-4o-mini",
506
295
  notes: [
507
- `OpenCompress is ready! Connected to ${provider} (${modelCount} models).`,
508
- "Your LLM key is stored locally only \u2014 never on our server.",
509
- `Free credit: ${data.freeCredit}. Dashboard: opencompress.ai/dashboard`
296
+ "OpenCompress is ready!",
297
+ `Free credit: ${data.freeCredit}`,
298
+ "Run `/compress on` to enable compression for all your LLM providers.",
299
+ "Dashboard: opencompress.ai/dashboard"
510
300
  ]
511
301
  };
512
302
  } catch (err) {
@@ -520,53 +310,17 @@ var opencompressProvider = {
520
310
  var plugin = {
521
311
  id: "opencompress",
522
312
  name: "OpenCompress",
523
- description: "5-layer prompt compression \u2014 40-60% cost reduction, 62% latency cut, 6ms CompactClassifier",
313
+ description: "Transparent prompt compression \u2014 save 40-60% on any LLM provider",
524
314
  version: VERSION,
525
315
  register(api) {
526
316
  const baseUrl = api.pluginConfig?.baseUrl || DEFAULT_BASE_URL;
527
- const existingHeaders = api.config.models?.providers?.opencompress?.headers;
528
- const existingUpstreamKey = existingHeaders?.["X-Upstream-Key"];
529
- const existingUpstreamBaseUrl = existingHeaders?.["X-Upstream-Base-Url"];
530
- const existingApiKey = api.config.models?.providers?.opencompress?.apiKey || getApiKey(api);
531
- const existingModels = readExistingModels(api);
532
- const providerModels = buildProviderModels(baseUrl, existingUpstreamKey, existingUpstreamBaseUrl, existingModels || void 0, existingApiKey);
533
- opencompressProvider.models = providerModels;
534
317
  api.registerProvider(opencompressProvider);
535
- if (!api.config.models) {
536
- api.config.models = { providers: {} };
537
- }
538
- if (!api.config.models.providers) {
539
- api.config.models.providers = {};
540
- }
541
- api.config.models.providers.opencompress = providerModels;
542
- persistModelsConfig(providerModels);
543
- persistAgentModelsJson(providerModels);
544
- {
545
- if (!api.config.agents) api.config.agents = {};
546
- const agents = api.config.agents;
547
- if (!agents.defaults) agents.defaults = {};
548
- const defaults = agents.defaults;
549
- if (!defaults.models) defaults.models = {};
550
- const allowlist = defaults.models;
551
- if (Object.keys(allowlist).length > 0) {
552
- for (const m of providerModels.models) {
553
- const fullId = `opencompress/${m.id}`;
554
- if (!allowlist[fullId]) {
555
- allowlist[fullId] = {};
556
- }
557
- }
558
- }
559
- }
560
- const modelsForAllowlist = providerModels.models;
561
- setTimeout(() => injectModelsAllowlist(modelsForAllowlist), 3e3);
562
318
  const apiKey = getApiKey(api);
563
319
  if (apiKey) {
564
320
  persistAuthProfile(apiKey);
565
321
  persistAgentAuthJson(apiKey);
566
322
  }
567
- const modelCount = existingModels ? existingModels.length : FALLBACK_MODELS.length;
568
- const source = existingModels ? "from existing providers" : "fallback";
569
- api.logger.info(`OpenCompress provider registered (${modelCount} models ${source}, 5-layer compression)`);
323
+ api.logger.info("OpenCompress registered (transparent proxy mode)");
570
324
  api.registerCommand({
571
325
  name: "compress-stats",
572
326
  description: "Show OpenCompress usage statistics and savings",
@@ -615,108 +369,6 @@ var plugin = {
615
369
  }
616
370
  });
617
371
  api.logger.info("Registered /compress-stats command");
618
- api.registerCommand({
619
- name: "compress-byok",
620
- description: "Connect your own LLM key (OpenAI/Anthropic/OpenRouter) to save more",
621
- acceptsArgs: true,
622
- requireAuth: false,
623
- handler: async (ctx) => {
624
- const apiKey2 = getApiKey(api);
625
- if (!apiKey2) {
626
- return { text: "Not set up. Run `openclaw onboard opencompress` first." };
627
- }
628
- const upstreamKey = ctx.args?.trim();
629
- if (!upstreamKey) {
630
- const res = await fetch(`${baseUrl}/v1/topup`, {
631
- headers: { Authorization: `Bearer ${apiKey2}` }
632
- });
633
- const data = res.ok ? await res.json() : null;
634
- return {
635
- text: [
636
- "**BYOK (Bring Your Own Key)**",
637
- "",
638
- "Connect your LLM provider key to pay them directly \u2014 we only charge the compression fee (20% of savings).",
639
- "",
640
- "**Usage:**",
641
- " `/compress-byok sk-proj-xxx` \u2014 Connect OpenAI key",
642
- " `/compress-byok sk-ant-xxx` \u2014 Connect Anthropic key",
643
- " `/compress-byok sk-or-xxx` \u2014 Connect OpenRouter key",
644
- " `/compress-byok off` \u2014 Switch back to router mode",
645
- "",
646
- data ? `**Balance:** $${Number(data.balance || 0).toFixed(2)}` : ""
647
- ].join("\n")
648
- };
649
- }
650
- if (upstreamKey === "off" || upstreamKey === "disable" || upstreamKey === "router") {
651
- const cleanModels = buildProviderModels(baseUrl);
652
- if (api.config.models?.providers) {
653
- api.config.models.providers.opencompress = cleanModels;
654
- }
655
- persistModelsConfig(cleanModels);
656
- persistAgentModelsJson(cleanModels);
657
- try {
658
- await fetch(`${baseUrl}/v1/byok`, {
659
- method: "DELETE",
660
- headers: { Authorization: `Bearer ${apiKey2}` }
661
- });
662
- } catch {
663
- }
664
- return { text: "Switched back to **router mode**. Your upstream key has been removed from local config." };
665
- }
666
- if (upstreamKey.startsWith("sk-occ-")) {
667
- return { text: "That's an OpenCompress key. Provide your LLM provider key (OpenAI, Anthropic, etc.)." };
668
- }
669
- if (upstreamKey.length < 10) {
670
- return { text: "Key looks too short. Provide your full LLM API key." };
671
- }
672
- let provider = "unknown";
673
- let upstreamBaseUrl = "";
674
- if (upstreamKey.startsWith("sk-proj-") || upstreamKey.startsWith("sk-") && !upstreamKey.startsWith("sk-ant-") && !upstreamKey.startsWith("sk-or-")) {
675
- provider = "openai";
676
- upstreamBaseUrl = "https://api.openai.com/v1";
677
- } else if (upstreamKey.startsWith("sk-ant-")) {
678
- provider = "anthropic";
679
- upstreamBaseUrl = "https://api.anthropic.com/v1";
680
- } else if (upstreamKey.startsWith("sk-or-")) {
681
- provider = "openrouter";
682
- upstreamBaseUrl = "https://openrouter.ai/api/v1";
683
- } else if (upstreamKey.startsWith("AIza")) {
684
- provider = "google";
685
- upstreamBaseUrl = "https://generativelanguage.googleapis.com/v1beta/openai";
686
- }
687
- const existingModels2 = readExistingModels(api);
688
- const updatedModels = buildProviderModels(baseUrl, upstreamKey, upstreamBaseUrl, existingModels2 || void 0);
689
- if (api.config.models?.providers) {
690
- api.config.models.providers.opencompress = updatedModels;
691
- }
692
- persistModelsConfig(updatedModels);
693
- persistAgentModelsJson(updatedModels);
694
- try {
695
- await fetch(`${baseUrl}/v1/byok`, {
696
- method: "POST",
697
- headers: {
698
- Authorization: `Bearer ${apiKey2}`,
699
- "Content-Type": "application/json"
700
- },
701
- body: JSON.stringify({ provider, passthrough: true })
702
- });
703
- } catch {
704
- }
705
- const modelCount2 = existingModels2 ? existingModels2.length : FALLBACK_MODELS.length;
706
- return {
707
- text: [
708
- `Switched to **BYOK mode** (${provider}).`,
709
- `Loaded **${modelCount2} models** from your ${provider} account.`,
710
- "",
711
- "Your key is stored **locally only** \u2014 never sent to our server for storage.",
712
- "It's passed through on each request via header and discarded immediately.",
713
- "",
714
- "To switch back: `/compress-byok off`"
715
- ].join("\n")
716
- };
717
- }
718
- });
719
- api.logger.info("Registered /compress-byok command");
720
372
  api.registerCommand({
721
373
  name: "compress",
722
374
  description: "Toggle transparent compression for all LLM providers",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@opencompress/opencompress",
3
- "version": "1.7.0",
3
+ "version": "1.8.0",
4
4
  "description": "OpenCompress plugin for OpenClaw — automatic 5-layer prompt compression for any LLM",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",