@opencompress/opencompress 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,112 @@
1
+ # OpenCompress Plugin for OpenClaw
2
+
3
+ Automatic 5-layer prompt compression for any LLM. Drop-in replacement for your current provider — same models, same API, 40-70% cheaper and faster.
4
+
5
+ ## How it works
6
+
7
+ ```
8
+ Your Agent → OpenCompress (compress) → Your LLM provider (OpenRouter/OpenAI/Anthropic)
9
+ ```
10
+
11
+ OpenCompress sits between OpenClaw and your existing LLM provider. It compresses prompts through a 5-layer pipeline (distilled model + dict aliasing + output shaping + adaptive rate + closed-loop control), then forwards to your provider using your own API key (BYOK).
12
+
13
+ - **53%** average input token reduction
14
+ - **62%** latency improvement
15
+ - **96%** quality preservation (SQuALITY benchmark)
16
+
17
+ ## Install
18
+
19
+ ```bash
20
+ openclaw plugins install /path/to/opencompress
21
+ ```
22
+
23
+ Or from npm (coming soon):
24
+
25
+ ```bash
26
+ openclaw plugins install @opencompress/opencompress
27
+ ```
28
+
29
+ ## Setup
30
+
31
+ ### 1. Get an OpenCompress API key
32
+
33
+ 1. Go to [opencompress.ai/dashboard](https://www.opencompress.ai/dashboard)
34
+ 2. Sign up and create a **BYOK key** — enter your existing OpenRouter or OpenAI API key
35
+ 3. Copy the `sk-occ-...` key
36
+
37
+ ### 2. Onboard in OpenClaw
38
+
39
+ ```bash
40
+ openclaw onboard opencompress
41
+ ```
42
+
43
+ The wizard will prompt for your API key and verify it.
44
+
45
+ ### 3. Use it
46
+
47
+ Switch to the OpenCompress provider:
48
+
49
+ ```
50
+ /model opencompress/gpt-4o-mini
51
+ ```
52
+
53
+ All your requests now go through compression automatically. Model IDs are identical to OpenRouter/OpenAI — no config changes needed.
54
+
55
+ ## Commands
56
+
57
+ | Command | Description |
58
+ |---------|-------------|
59
+ | `/compress-stats` | Show compression savings (calls, tokens saved, cost saved) |
60
+
61
+ ## Supported models (20)
62
+
63
+ | Model | ID |
64
+ |-------|-----|
65
+ | GPT-4o | `gpt-4o` |
66
+ | GPT-4o Mini | `gpt-4o-mini` |
67
+ | GPT-4.1 | `gpt-4.1` |
68
+ | GPT-4.1 Mini | `gpt-4.1-mini` |
69
+ | GPT-4.1 Nano | `gpt-4.1-nano` |
70
+ | O3 | `o3` |
71
+ | O4 Mini | `o4-mini` |
72
+ | Claude Sonnet 4.6 | `claude-sonnet-4-6` |
73
+ | Claude Opus 4.6 | `claude-opus-4-6` |
74
+ | Claude Haiku 4.5 | `claude-haiku-4-5-20251001` |
75
+ | Gemini 2.5 Pro | `gemini-2.5-pro` |
76
+ | Gemini 2.5 Flash | `gemini-2.5-flash` |
77
+ | DeepSeek V3 | `deepseek/deepseek-chat-v3-0324` |
78
+ | DeepSeek Reasoner | `deepseek/deepseek-reasoner` |
79
+ | Llama 4 Maverick | `meta-llama/llama-4-maverick` |
80
+ | Llama 4 Scout | `meta-llama/llama-4-scout` |
81
+ | Qwen3 235B | `qwen/qwen3-235b-a22b` |
82
+ | Qwen3 32B | `qwen/qwen3-32b` |
83
+ | Mistral Large | `mistralai/mistral-large-2411` |
84
+ | Gemini 2.5 Pro Preview | `google/gemini-2.5-pro-preview` |
85
+
86
+ ## Pricing
87
+
88
+ OpenCompress charges **20% of what you save**. If compression saves you $1.00, you pay $0.20 — net saving $0.80.
89
+
90
+ BYOK mode: you pay your LLM provider directly + the compression fee.
91
+
92
+ ## Configuration
93
+
94
+ Plugin config options in `openclaw.plugin.json`:
95
+
96
+ | Key | Default | Description |
97
+ |-----|---------|-------------|
98
+ | `apiKey` | — | Your `sk-occ-...` API key (optional, set during onboard) |
99
+ | `baseUrl` | `https://www.opencompress.ai/api` | Custom API endpoint |
100
+
101
+ ## Development
102
+
103
+ ```bash
104
+ npm install
105
+ npm run build # Build with tsup
106
+ npm run dev # Watch mode
107
+ npm run typecheck # Type check
108
+ ```
109
+
110
+ ## License
111
+
112
+ MIT
@@ -0,0 +1,143 @@
1
+ /**
2
+ * OpenClaw Plugin Types (locally defined)
3
+ *
4
+ * OpenClaw's plugin SDK uses duck typing — these match the shapes
5
+ * expected by registerProvider() and the plugin system.
6
+ * Defined locally to avoid depending on internal OpenClaw paths.
7
+ */
8
+ type ModelApi = "openai-completions" | "openai-responses" | "anthropic-messages" | "google-generative-ai";
9
+ type ModelDefinitionConfig = {
10
+ id: string;
11
+ name: string;
12
+ api?: ModelApi;
13
+ reasoning: boolean;
14
+ input: Array<"text" | "image">;
15
+ cost: {
16
+ input: number;
17
+ output: number;
18
+ cacheRead: number;
19
+ cacheWrite: number;
20
+ };
21
+ contextWindow: number;
22
+ maxTokens: number;
23
+ headers?: Record<string, string>;
24
+ };
25
+ type ModelProviderConfig = {
26
+ baseUrl: string;
27
+ apiKey?: string;
28
+ api?: ModelApi;
29
+ headers?: Record<string, string>;
30
+ authHeader?: boolean;
31
+ models: ModelDefinitionConfig[];
32
+ };
33
+ type AuthProfileCredential = {
34
+ apiKey?: string;
35
+ type?: string;
36
+ [key: string]: unknown;
37
+ };
38
+ type ProviderAuthResult = {
39
+ profiles: Array<{
40
+ profileId: string;
41
+ credential: AuthProfileCredential;
42
+ }>;
43
+ configPatch?: Record<string, unknown>;
44
+ defaultModel?: string;
45
+ notes?: string[];
46
+ };
47
+ type WizardPrompter = {
48
+ text: (opts: {
49
+ message: string;
50
+ validate?: (value: string) => string | undefined;
51
+ }) => Promise<string | symbol>;
52
+ note: (message: string) => void;
53
+ progress: (message: string) => {
54
+ stop: (message?: string) => void;
55
+ };
56
+ };
57
+ type ProviderAuthContext = {
58
+ config: Record<string, unknown>;
59
+ agentDir?: string;
60
+ workspaceDir?: string;
61
+ prompter: WizardPrompter;
62
+ runtime: {
63
+ log: (message: string) => void;
64
+ };
65
+ isRemote: boolean;
66
+ openUrl: (url: string) => Promise<void>;
67
+ };
68
+ type ProviderAuthMethod = {
69
+ id: string;
70
+ label: string;
71
+ hint?: string;
72
+ kind: "oauth" | "api_key" | "token" | "device_code" | "custom";
73
+ run: (ctx: ProviderAuthContext) => Promise<ProviderAuthResult>;
74
+ };
75
+ type ProviderPlugin = {
76
+ id: string;
77
+ label: string;
78
+ docsPath?: string;
79
+ aliases?: string[];
80
+ envVars?: string[];
81
+ models?: ModelProviderConfig;
82
+ auth: ProviderAuthMethod[];
83
+ formatApiKey?: (cred: AuthProfileCredential) => string;
84
+ };
85
+ type PluginLogger = {
86
+ debug?: (message: string) => void;
87
+ info: (message: string) => void;
88
+ warn: (message: string) => void;
89
+ error: (message: string) => void;
90
+ };
91
+ type OpenClawPluginService = {
92
+ id: string;
93
+ start: () => void | Promise<void>;
94
+ stop?: () => void | Promise<void>;
95
+ };
96
+ type CommandHandler = {
97
+ name: string;
98
+ description: string;
99
+ acceptsArgs?: boolean;
100
+ requireAuth?: boolean;
101
+ handler: (ctx: {
102
+ args?: string;
103
+ }) => Promise<{
104
+ text: string;
105
+ }>;
106
+ };
107
+ type OpenClawPluginApi = {
108
+ id: string;
109
+ name: string;
110
+ version?: string;
111
+ description?: string;
112
+ source: string;
113
+ config: Record<string, unknown> & {
114
+ models?: {
115
+ providers?: Record<string, ModelProviderConfig>;
116
+ };
117
+ agents?: Record<string, unknown>;
118
+ };
119
+ pluginConfig?: Record<string, unknown>;
120
+ logger: PluginLogger;
121
+ registerProvider: (provider: ProviderPlugin) => void;
122
+ registerTool: (tool: unknown, opts?: unknown) => void;
123
+ registerHook: (events: string | string[], handler: unknown, opts?: unknown) => void;
124
+ registerHttpRoute: (params: {
125
+ path: string;
126
+ handler: unknown;
127
+ }) => void;
128
+ registerService: (service: OpenClawPluginService) => void;
129
+ registerCommand: (command: CommandHandler) => void;
130
+ resolvePath: (input: string) => string;
131
+ on: (hookName: string, handler: unknown, opts?: unknown) => void;
132
+ };
133
+ type OpenClawPluginDefinition = {
134
+ id?: string;
135
+ name?: string;
136
+ description?: string;
137
+ version?: string;
138
+ register?: (api: OpenClawPluginApi) => void | Promise<void>;
139
+ activate?: (api: OpenClawPluginApi) => void | Promise<void>;
140
+ };
141
+ declare const plugin: OpenClawPluginDefinition;
142
+
143
+ export { plugin as default };
package/dist/index.js ADDED
@@ -0,0 +1,244 @@
1
+ // src/index.ts
2
+ var VERSION = "1.0.0";
3
+ var DEFAULT_BASE_URL = "https://www.opencompress.ai/api";
4
+ function getApiKey(api) {
5
+ const auth = api.config.auth;
6
+ return auth?.profiles?.opencompress?.credentials?.["api-key"]?.apiKey;
7
+ }
8
+ var OPENCOMPRESS_MODELS = [
9
+ // OpenAI
10
+ { id: "gpt-4o", name: "GPT-4o (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 128e3, maxTokens: 16384 },
11
+ { id: "gpt-4o-mini", name: "GPT-4o Mini (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 128e3, maxTokens: 16384 },
12
+ { id: "gpt-4.1", name: "GPT-4.1 (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
13
+ { id: "gpt-4.1-mini", name: "GPT-4.1 Mini (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
14
+ { id: "gpt-4.1-nano", name: "GPT-4.1 Nano (Compressed)", reasoning: false, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1047576, maxTokens: 32768 },
15
+ { id: "o3", name: "O3 (Compressed)", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 1e5 },
16
+ { id: "o4-mini", name: "O4 Mini (Compressed)", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 1e5 },
17
+ // Anthropic
18
+ { id: "claude-sonnet-4-6", name: "Claude Sonnet 4.6 (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
19
+ { id: "claude-opus-4-6", name: "Claude Opus 4.6 (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
20
+ { id: "claude-haiku-4-5-20251001", name: "Claude Haiku 4.5 (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 2e5, maxTokens: 128e3 },
21
+ // Google
22
+ { id: "gemini-2.5-pro", name: "Gemini 2.5 Pro (Compressed)", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
23
+ { id: "gemini-2.5-flash", name: "Gemini 2.5 Flash (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
24
+ { id: "google/gemini-2.5-pro-preview", name: "Gemini 2.5 Pro Preview (Compressed)", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
25
+ // DeepSeek
26
+ { id: "deepseek/deepseek-chat-v3-0324", name: "DeepSeek V3 (Compressed)", reasoning: false, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
27
+ { id: "deepseek/deepseek-reasoner", name: "DeepSeek Reasoner (Compressed)", reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
28
+ // Meta
29
+ { id: "meta-llama/llama-4-maverick", name: "Llama 4 Maverick (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 1048576, maxTokens: 65536 },
30
+ { id: "meta-llama/llama-4-scout", name: "Llama 4 Scout (Compressed)", reasoning: false, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 524288, maxTokens: 65536 },
31
+ // Qwen
32
+ { id: "qwen/qwen3-235b-a22b", name: "Qwen3 235B (Compressed)", reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
33
+ { id: "qwen/qwen3-32b", name: "Qwen3 32B (Compressed)", reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 },
34
+ // Mistral
35
+ { id: "mistralai/mistral-large-2411", name: "Mistral Large (Compressed)", reasoning: false, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 131072, maxTokens: 8192 }
36
+ ];
37
+ function buildProviderModels(baseUrl) {
38
+ return {
39
+ baseUrl: `${baseUrl}/v1`,
40
+ api: "openai-completions",
41
+ models: OPENCOMPRESS_MODELS
42
+ };
43
+ }
44
+ var opencompressProvider = {
45
+ id: "opencompress",
46
+ label: "OpenCompress",
47
+ docsPath: "https://docs.opencompress.ai",
48
+ aliases: ["oc", "compress"],
49
+ envVars: ["OPENCOMPRESS_API_KEY"],
50
+ models: buildProviderModels(DEFAULT_BASE_URL),
51
+ formatApiKey: (cred) => cred.apiKey || "",
52
+ auth: [
53
+ {
54
+ id: "api-key",
55
+ label: "OpenCompress",
56
+ hint: "One-click setup \u2014 no API key needed",
57
+ kind: "custom",
58
+ run: async (ctx) => {
59
+ ctx.prompter.note(
60
+ "OpenCompress compresses all LLM prompts automatically.\n53% fewer tokens, 62% faster, 96% quality preserved.\nWe'll create your account now \u2014 $1 free credit included."
61
+ );
62
+ const spinner = ctx.prompter.progress("Creating account...");
63
+ try {
64
+ const res = await fetch(`${DEFAULT_BASE_URL}/v1/provision`, {
65
+ method: "POST",
66
+ headers: { "Content-Type": "application/json" },
67
+ body: "{}"
68
+ });
69
+ if (!res.ok) {
70
+ const err = await res.json().catch(() => ({ error: { message: "Unknown error" } }));
71
+ spinner.stop("Setup failed");
72
+ throw new Error(
73
+ `Provisioning failed: ${err.error?.message || res.statusText}`
74
+ );
75
+ }
76
+ const data = await res.json();
77
+ spinner.stop("Account created");
78
+ return {
79
+ profiles: [
80
+ {
81
+ profileId: "default",
82
+ credential: { apiKey: data.apiKey }
83
+ }
84
+ ],
85
+ defaultModel: "gpt-4o-mini",
86
+ notes: [
87
+ "OpenCompress is ready! All LLM calls are now compressed automatically.",
88
+ `Free credit: ${data.freeCredit}. Add more: POST /api/v1/topup or visit opencompress.ai/dashboard`
89
+ ]
90
+ };
91
+ } catch (err) {
92
+ spinner.stop("Setup failed");
93
+ throw err instanceof Error ? err : new Error(String(err));
94
+ }
95
+ }
96
+ }
97
+ ]
98
+ };
99
+ var plugin = {
100
+ id: "opencompress",
101
+ name: "OpenCompress",
102
+ description: "5-layer prompt compression \u2014 53% input reduction, 62% latency cut, 96% quality",
103
+ version: VERSION,
104
+ async register(api) {
105
+ const baseUrl = api.pluginConfig?.baseUrl || DEFAULT_BASE_URL;
106
+ if (baseUrl !== DEFAULT_BASE_URL) {
107
+ opencompressProvider.models = buildProviderModels(baseUrl);
108
+ }
109
+ api.registerProvider(opencompressProvider);
110
+ if (!api.config.models) {
111
+ api.config.models = { providers: {} };
112
+ }
113
+ if (!api.config.models.providers) {
114
+ api.config.models.providers = {};
115
+ }
116
+ api.config.models.providers.opencompress = buildProviderModels(baseUrl);
117
+ api.logger.info("OpenCompress provider registered (20 models, 5-layer compression)");
118
+ api.registerCommand({
119
+ name: "compress-stats",
120
+ description: "Show OpenCompress usage statistics and savings",
121
+ acceptsArgs: true,
122
+ requireAuth: false,
123
+ handler: async () => {
124
+ const apiKey = getApiKey(api);
125
+ if (!apiKey) {
126
+ return {
127
+ text: "No API key found. Run `openclaw onboard opencompress` to set up."
128
+ };
129
+ }
130
+ try {
131
+ const res = await fetch(`${baseUrl}/user/stats`, {
132
+ headers: { Authorization: `Bearer ${apiKey}` }
133
+ });
134
+ if (!res.ok) {
135
+ return { text: `Failed to fetch stats: HTTP ${res.status}` };
136
+ }
137
+ const stats = await res.json();
138
+ const calls = stats.totalCalls ?? 0;
139
+ const savings = stats.totalSavings?.toFixed(4) ?? "0.0000";
140
+ const rate = stats.avgCompressionRate ? `${(stats.avgCompressionRate * 100).toFixed(1)}%` : "N/A";
141
+ const origTokens = stats.totalOriginalTokens?.toLocaleString() ?? "N/A";
142
+ const compTokens = stats.totalCompressedTokens?.toLocaleString() ?? "N/A";
143
+ return {
144
+ text: [
145
+ "```",
146
+ "OpenCompress Stats",
147
+ "==================",
148
+ `API calls: ${calls}`,
149
+ `Avg compression: ${rate}`,
150
+ `Original tokens: ${origTokens}`,
151
+ `Compressed tokens: ${compTokens}`,
152
+ `Total savings: $${savings}`,
153
+ "```",
154
+ "",
155
+ "Dashboard: https://www.opencompress.ai/dashboard"
156
+ ].join("\n")
157
+ };
158
+ } catch (err) {
159
+ return {
160
+ text: `Error fetching stats: ${err instanceof Error ? err.message : String(err)}`
161
+ };
162
+ }
163
+ }
164
+ });
165
+ api.logger.info("Registered /compress-stats command");
166
+ api.registerCommand({
167
+ name: "compress-byok",
168
+ description: "Connect your own LLM key (OpenAI/Anthropic/OpenRouter) to save more",
169
+ acceptsArgs: true,
170
+ requireAuth: false,
171
+ handler: async (ctx) => {
172
+ const apiKey = getApiKey(api);
173
+ if (!apiKey) {
174
+ return { text: "Not set up. Run `openclaw onboard opencompress` first." };
175
+ }
176
+ const upstreamKey = ctx.args?.trim();
177
+ if (!upstreamKey) {
178
+ const res2 = await fetch(`${baseUrl}/v1/topup`, {
179
+ headers: { Authorization: `Bearer ${apiKey}` }
180
+ });
181
+ const data2 = res2.ok ? await res2.json() : null;
182
+ return {
183
+ text: [
184
+ "**BYOK (Bring Your Own Key)**",
185
+ "",
186
+ "Connect your LLM provider key to pay them directly \u2014 we only charge the compression fee (20% of savings).",
187
+ "",
188
+ "**Usage:**",
189
+ " `/compress-byok sk-proj-xxx` \u2014 Connect OpenAI key",
190
+ " `/compress-byok sk-ant-xxx` \u2014 Connect Anthropic key",
191
+ " `/compress-byok sk-or-xxx` \u2014 Connect OpenRouter key",
192
+ " `/compress-byok off` \u2014 Switch back to router mode",
193
+ "",
194
+ data2 ? `**Balance:** $${data2.balance.toFixed(2)}` : ""
195
+ ].join("\n")
196
+ };
197
+ }
198
+ if (upstreamKey === "off" || upstreamKey === "disable" || upstreamKey === "router") {
199
+ const res2 = await fetch(`${baseUrl}/v1/byok`, {
200
+ method: "DELETE",
201
+ headers: { Authorization: `Bearer ${apiKey}` }
202
+ });
203
+ if (!res2.ok) {
204
+ return { text: `Failed to switch: HTTP ${res2.status}` };
205
+ }
206
+ return { text: "Switched back to **router mode**. We handle LLM routing via OpenRouter." };
207
+ }
208
+ if (upstreamKey.startsWith("sk-occ-")) {
209
+ return { text: "That's an OpenCompress key. Provide your LLM provider key (OpenAI, Anthropic, etc.)." };
210
+ }
211
+ if (upstreamKey.length < 10) {
212
+ return { text: "Key looks too short. Provide your full LLM API key." };
213
+ }
214
+ const res = await fetch(`${baseUrl}/v1/byok`, {
215
+ method: "POST",
216
+ headers: {
217
+ Authorization: `Bearer ${apiKey}`,
218
+ "Content-Type": "application/json"
219
+ },
220
+ body: JSON.stringify({ upstreamApiKey: upstreamKey })
221
+ });
222
+ if (!res.ok) {
223
+ const err = await res.json().catch(() => ({ error: { message: "Unknown error" } }));
224
+ return { text: `Failed: ${err.error?.message || res.statusText}` };
225
+ }
226
+ const data = await res.json();
227
+ return {
228
+ text: [
229
+ `Switched to **BYOK mode** (${data.provider}).`,
230
+ "",
231
+ data.billing,
232
+ "",
233
+ "To switch back: `/compress-byok off`"
234
+ ].join("\n")
235
+ };
236
+ }
237
+ });
238
+ api.logger.info("Registered /compress-byok command");
239
+ }
240
+ };
241
+ var index_default = plugin;
242
+ export {
243
+ index_default as default
244
+ };
@@ -0,0 +1,20 @@
1
+ {
2
+ "id": "opencompress",
3
+ "name": "OpenCompress",
4
+ "description": "5-layer prompt compression — 53% input reduction, 62% latency cut, 96% quality. Save 40-70% on any LLM.",
5
+ "configSchema": {
6
+ "type": "object",
7
+ "properties": {
8
+ "apiKey": {
9
+ "type": "string",
10
+ "description": "OpenCompress API key (sk-occ-...)"
11
+ },
12
+ "baseUrl": {
13
+ "type": "string",
14
+ "default": "https://www.opencompress.ai/api",
15
+ "description": "OpenCompress API base URL"
16
+ }
17
+ },
18
+ "required": []
19
+ }
20
+ }
package/package.json ADDED
@@ -0,0 +1,60 @@
1
+ {
2
+ "name": "@opencompress/opencompress",
3
+ "version": "1.0.0",
4
+ "description": "OpenCompress plugin for OpenClaw — automatic 5-layer prompt compression for any LLM",
5
+ "type": "module",
6
+ "main": "dist/index.js",
7
+ "types": "dist/index.d.ts",
8
+ "openclaw": {
9
+ "extensions": [
10
+ "./dist/index.js"
11
+ ]
12
+ },
13
+ "exports": {
14
+ ".": {
15
+ "import": "./dist/index.js",
16
+ "types": "./dist/index.d.ts"
17
+ }
18
+ },
19
+ "files": [
20
+ "dist",
21
+ "openclaw.plugin.json"
22
+ ],
23
+ "keywords": [
24
+ "openclaw",
25
+ "plugin",
26
+ "compression",
27
+ "llm",
28
+ "opencompress",
29
+ "prompt-compression",
30
+ "cost-optimization",
31
+ "byok"
32
+ ],
33
+ "author": "OpenCompress <hello@opencompress.ai>",
34
+ "license": "MIT",
35
+ "repository": {
36
+ "type": "git",
37
+ "url": "git+https://github.com/claw-compactor/openclaw-plugin.git"
38
+ },
39
+ "peerDependencies": {
40
+ "openclaw": ">=2025.1.0"
41
+ },
42
+ "peerDependenciesMeta": {
43
+ "openclaw": {
44
+ "optional": true
45
+ }
46
+ },
47
+ "devDependencies": {
48
+ "@types/node": "^25.3.3",
49
+ "tsup": "^8.0.0",
50
+ "typescript": "^5.7.0"
51
+ },
52
+ "engines": {
53
+ "node": ">=20"
54
+ },
55
+ "scripts": {
56
+ "build": "tsup",
57
+ "dev": "tsup --watch",
58
+ "typecheck": "tsc --noEmit"
59
+ }
60
+ }