openclaw-triage-gate 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +120 -0
- package/openclaw.plugin.json +63 -0
- package/package.json +35 -0
- package/src/config.ts +82 -0
- package/src/index.ts +98 -0
- package/src/providers.ts +149 -0
- package/src/triage.ts +142 -0
package/README.md
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
# openclaw-triage-gate
|
|
2
|
+
|
|
3
|
+
A lightweight triage gate for OpenClaw group chats. Uses a cheap model to decide if the bot should respond before the expensive main model runs.
|
|
4
|
+
|
|
5
|
+
## The Problem
|
|
6
|
+
|
|
7
|
+
When an OpenClaw bot is in group chats with `requireMention: false`, **every message** triggers the main model (e.g. Opus at $15/M input, $75/M output) — even when the bot decides not to respond (`NO_REPLY`). In active groups, this wastes most of the token budget on messages the bot ignores anyway.
|
|
8
|
+
|
|
9
|
+
## The Solution
|
|
10
|
+
|
|
11
|
+
This plugin intercepts group messages **before** the main model runs. It calls a cheap triage model (e.g. Haiku at $1/M input, $5/M output) to make a quick RESPOND/SKIP decision. If the triage model says SKIP, the main model never fires.
|
|
12
|
+
|
|
13
|
+
```
|
|
14
|
+
Group message → Triage model (cheap) → SKIP? → Done ($0.001)
|
|
15
|
+
→ RESPOND? → Main model (expensive) → Reply
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
DMs always pass through without triage.
|
|
19
|
+
|
|
20
|
+
## Estimated Savings
|
|
21
|
+
|
|
22
|
+
Assuming ~80% of group messages don't need a response:
|
|
23
|
+
|
|
24
|
+
| Scenario | Cost per 100 messages |
|
|
25
|
+
|----------|----------------------|
|
|
26
|
+
| Without plugin (all Opus) | $5-22 |
|
|
27
|
+
| With plugin (Haiku triage) | $1-5 |
|
|
28
|
+
|
|
29
|
+
**75-90% reduction in group chat token costs.**
|
|
30
|
+
|
|
31
|
+
## Install
|
|
32
|
+
|
|
33
|
+
```bash
|
|
34
|
+
openclaw plugins install openclaw-triage-gate
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Configure
|
|
38
|
+
|
|
39
|
+
Add to your OpenClaw config:
|
|
40
|
+
|
|
41
|
+
```json
|
|
42
|
+
{
|
|
43
|
+
"plugins": {
|
|
44
|
+
"entries": {
|
|
45
|
+
"triage-gate": {
|
|
46
|
+
"enabled": true,
|
|
47
|
+
"config": {
|
|
48
|
+
"triageModel": "anthropic/claude-haiku-4-5-20251001"
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
Or via CLI:
|
|
57
|
+
|
|
58
|
+
```bash
|
|
59
|
+
openclaw config set plugins.entries.triage-gate.enabled true
|
|
60
|
+
openclaw config set plugins.entries.triage-gate.config.triageModel "anthropic/claude-haiku-4-5-20251001"
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
## Config Options
|
|
64
|
+
|
|
65
|
+
| Option | Type | Default | Description |
|
|
66
|
+
|--------|------|---------|-------------|
|
|
67
|
+
| `triageModel` | string | `anthropic/claude-haiku-4-5-20251001` | Model for triage decisions, in `provider/model` format |
|
|
68
|
+
| `triagePrompt` | string | (built-in) | Custom prompt — must instruct the model to reply RESPOND or SKIP |
|
|
69
|
+
| `groups` | string[] | all groups | Only apply triage to these group IDs |
|
|
70
|
+
| `excludeGroups` | string[] | none | Skip triage for these group IDs |
|
|
71
|
+
| `maxTriageTokens` | number | 10 | Max output tokens for triage response |
|
|
72
|
+
| `logDecisions` | boolean | true | Log each triage decision |
|
|
73
|
+
|
|
74
|
+
### Model Examples
|
|
75
|
+
|
|
76
|
+
Use any model your OpenClaw instance has configured:
|
|
77
|
+
|
|
78
|
+
```json
|
|
79
|
+
{ "triageModel": "anthropic/claude-haiku-4-5-20251001" }
|
|
80
|
+
{ "triageModel": "minimax/minimax-m2.5" }
|
|
81
|
+
{ "triageModel": "openai/gpt-4.1-mini" }
|
|
82
|
+
{ "triageModel": "openrouter/meta-llama/llama-4-scout" }
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
The plugin resolves API keys from OpenClaw's existing provider config — no need to add API keys to the plugin config.
|
|
86
|
+
|
|
87
|
+
### Custom Triage Prompt
|
|
88
|
+
|
|
89
|
+
You can customize when the bot responds by providing your own prompt:
|
|
90
|
+
|
|
91
|
+
```json
|
|
92
|
+
{
|
|
93
|
+
"triagePrompt": "You are a triage system. Reply RESPOND if the message needs the bot's attention, SKIP otherwise.\n\nAlways RESPOND to: questions, requests for help, mentions of health or emergencies.\nAlways SKIP: greetings, emoji reactions, casual chat."
|
|
94
|
+
}
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
## How It Works
|
|
98
|
+
|
|
99
|
+
1. The plugin registers a `before_dispatch` hook in OpenClaw
|
|
100
|
+
2. When a group message arrives, the hook fires before the main agent
|
|
101
|
+
3. The plugin calls the configured triage model with the message content
|
|
102
|
+
4. If the model says SKIP, the plugin returns `{ handled: true }` — the main agent never runs
|
|
103
|
+
5. If the model says RESPOND (or anything unclear), the message proceeds to the main agent normally
|
|
104
|
+
|
|
105
|
+
On any error (API timeout, missing key, etc.), the plugin defaults to letting the message through. Better to waste some tokens than silently drop messages.
|
|
106
|
+
|
|
107
|
+
## Future Enhancements
|
|
108
|
+
|
|
109
|
+
These are planned but not yet implemented:
|
|
110
|
+
|
|
111
|
+
- Recent message history in triage context for better accuracy
|
|
112
|
+
- Per-group custom triage prompts
|
|
113
|
+
- Confidence scores with configurable thresholds
|
|
114
|
+
- Analytics dashboard (hit/miss ratio, tokens saved)
|
|
115
|
+
- Keyword bypass list (always respond to certain words)
|
|
116
|
+
- Rate-based bypass (skip triage in quiet groups)
|
|
117
|
+
|
|
118
|
+
## License
|
|
119
|
+
|
|
120
|
+
MIT
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "triage-gate",
|
|
3
|
+
"name": "Triage Gate",
|
|
4
|
+
"description": "Uses a cheap model to decide if the bot should respond in group chats before the main model runs. Saves 75-90% of group chat token costs.",
|
|
5
|
+
"configSchema": {
|
|
6
|
+
"type": "object",
|
|
7
|
+
"additionalProperties": false,
|
|
8
|
+
"properties": {
|
|
9
|
+
"triageModel": {
|
|
10
|
+
"type": "string",
|
|
11
|
+
"description": "Model to use for triage decisions, in provider/model format."
|
|
12
|
+
},
|
|
13
|
+
"triagePrompt": {
|
|
14
|
+
"type": "string",
|
|
15
|
+
"description": "Custom prompt for triage decisions. Must instruct the model to reply with RESPOND or SKIP."
|
|
16
|
+
},
|
|
17
|
+
"groups": {
|
|
18
|
+
"type": "array",
|
|
19
|
+
"items": { "type": "string" },
|
|
20
|
+
"description": "Only apply triage to these group IDs. Empty means all groups."
|
|
21
|
+
},
|
|
22
|
+
"excludeGroups": {
|
|
23
|
+
"type": "array",
|
|
24
|
+
"items": { "type": "string" },
|
|
25
|
+
"description": "Skip triage for these group IDs (always let messages through)."
|
|
26
|
+
},
|
|
27
|
+
"maxTriageTokens": {
|
|
28
|
+
"type": "number",
|
|
29
|
+
"description": "Max output tokens for the triage model response."
|
|
30
|
+
},
|
|
31
|
+
"logDecisions": {
|
|
32
|
+
"type": "boolean",
|
|
33
|
+
"description": "Whether to log each triage decision."
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
},
|
|
37
|
+
"uiHints": {
|
|
38
|
+
"triageModel": {
|
|
39
|
+
"label": "Triage Model",
|
|
40
|
+
"help": "Model for triage decisions (e.g. anthropic/claude-haiku-4-5, minimax/minimax-m2.5). Default: anthropic/claude-haiku-4-5-20251001"
|
|
41
|
+
},
|
|
42
|
+
"triagePrompt": {
|
|
43
|
+
"label": "Triage Prompt",
|
|
44
|
+
"help": "Custom prompt that tells the triage model when to RESPOND vs SKIP"
|
|
45
|
+
},
|
|
46
|
+
"groups": {
|
|
47
|
+
"label": "Groups",
|
|
48
|
+
"help": "Group IDs to apply triage to (empty = all groups)"
|
|
49
|
+
},
|
|
50
|
+
"excludeGroups": {
|
|
51
|
+
"label": "Exclude Groups",
|
|
52
|
+
"help": "Group IDs to skip triage for (messages always proceed to main model)"
|
|
53
|
+
},
|
|
54
|
+
"maxTriageTokens": {
|
|
55
|
+
"label": "Max Triage Tokens",
|
|
56
|
+
"help": "Max output tokens for triage response (default: 10)"
|
|
57
|
+
},
|
|
58
|
+
"logDecisions": {
|
|
59
|
+
"label": "Log Decisions",
|
|
60
|
+
"help": "Log each triage decision to the plugin logger (default: true)"
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "openclaw-triage-gate",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "A lightweight triage gate for OpenClaw group chats. Uses a cheap model to decide if the bot should respond before the expensive main model runs.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "src/index.ts",
|
|
7
|
+
"license": "MIT",
|
|
8
|
+
"repository": {
|
|
9
|
+
"type": "git",
|
|
10
|
+
"url": "git+https://github.com/as3445/openclaw-triage-gate.git"
|
|
11
|
+
},
|
|
12
|
+
"keywords": [
|
|
13
|
+
"openclaw",
|
|
14
|
+
"plugin",
|
|
15
|
+
"triage",
|
|
16
|
+
"token-optimization",
|
|
17
|
+
"group-chat"
|
|
18
|
+
],
|
|
19
|
+
"peerDependencies": {
|
|
20
|
+
"openclaw": ">=2026.3.22"
|
|
21
|
+
},
|
|
22
|
+
"peerDependenciesMeta": {
|
|
23
|
+
"openclaw": {
|
|
24
|
+
"optional": true
|
|
25
|
+
}
|
|
26
|
+
},
|
|
27
|
+
"openclaw": {
|
|
28
|
+
"extensions": [
|
|
29
|
+
"./src/index.ts"
|
|
30
|
+
],
|
|
31
|
+
"install": {
|
|
32
|
+
"npmSpec": "openclaw-triage-gate"
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
}
|
package/src/config.ts
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Configuration for the triage-gate plugin.
|
|
3
|
+
*
|
|
4
|
+
* All fields are optional — sensible defaults are used when omitted.
|
|
5
|
+
*/
|
|
6
|
+
export type TriageGateConfig = {
|
|
7
|
+
/**
|
|
8
|
+
* Model to use for triage decisions, in "provider/model" format.
|
|
9
|
+
*
|
|
10
|
+
* Examples:
|
|
11
|
+
* "anthropic/claude-haiku-4-5-20251001"
|
|
12
|
+
* "minimax/minimax-m2.5"
|
|
13
|
+
* "openai/gpt-4.1-mini"
|
|
14
|
+
*
|
|
15
|
+
* The plugin resolves the API key from OpenClaw's configured providers —
|
|
16
|
+
* no need to put API keys in the plugin config.
|
|
17
|
+
*
|
|
18
|
+
* Default: "anthropic/claude-haiku-4-5-20251001"
|
|
19
|
+
*/
|
|
20
|
+
triageModel?: string;
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Custom prompt for triage decisions. Replaces the built-in default.
|
|
24
|
+
* Must instruct the model to reply with exactly "RESPOND" or "SKIP".
|
|
25
|
+
*
|
|
26
|
+
* The message content is appended after this prompt.
|
|
27
|
+
*/
|
|
28
|
+
triagePrompt?: string;
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Only apply triage to these group IDs. When empty or omitted,
|
|
32
|
+
* triage applies to ALL group chats.
|
|
33
|
+
*/
|
|
34
|
+
groups?: string[];
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Skip triage for these group IDs — messages in these groups
|
|
38
|
+
* always proceed to the main model.
|
|
39
|
+
*/
|
|
40
|
+
excludeGroups?: string[];
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Max output tokens for the triage model response.
|
|
44
|
+
* Keep this low — we only need "RESPOND" or "SKIP".
|
|
45
|
+
* Default: 10
|
|
46
|
+
*/
|
|
47
|
+
maxTriageTokens?: number;
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Whether to log each triage decision.
|
|
51
|
+
* Default: true
|
|
52
|
+
*/
|
|
53
|
+
logDecisions?: boolean;
|
|
54
|
+
};
|
|
55
|
+
|
|
56
|
+
/** The default triage model when none is configured. */
|
|
57
|
+
export const DEFAULT_TRIAGE_MODEL = "anthropic/claude-haiku-4-5-20251001";
|
|
58
|
+
|
|
59
|
+
/** The default max output tokens for a triage call. */
|
|
60
|
+
export const DEFAULT_MAX_TRIAGE_TOKENS = 10;
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* The built-in triage prompt. Instructs the model to reply with
|
|
64
|
+
* exactly "RESPOND" or "SKIP" based on whether the bot should reply.
|
|
65
|
+
*/
|
|
66
|
+
export const DEFAULT_TRIAGE_PROMPT = `You are a message triage system for a group chat bot.
|
|
67
|
+
Decide if the bot should respond to this message.
|
|
68
|
+
|
|
69
|
+
Reply ONLY with "RESPOND" or "SKIP". Nothing else.
|
|
70
|
+
|
|
71
|
+
RESPOND when:
|
|
72
|
+
- The bot is directly addressed by name or asked a question
|
|
73
|
+
- The bot can add genuine value (information, help, insight)
|
|
74
|
+
- Something urgent or important is happening
|
|
75
|
+
- Correcting significant misinformation
|
|
76
|
+
|
|
77
|
+
SKIP when:
|
|
78
|
+
- Casual banter between people
|
|
79
|
+
- Someone already answered the question
|
|
80
|
+
- A response would just be acknowledgment ("nice", "yeah", "lol")
|
|
81
|
+
- The conversation is flowing fine without the bot
|
|
82
|
+
- The message is a reaction, emoji, or sticker`;
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* openclaw-triage-gate — A lightweight triage gate for OpenClaw group chats.
|
|
3
|
+
*
|
|
4
|
+
* Registers a `before_dispatch` hook that intercepts group chat messages.
|
|
5
|
+
* For each group message, it calls a cheap model (e.g. Haiku) to decide
|
|
6
|
+
* whether the bot should respond. If the triage model says "SKIP", the
|
|
7
|
+
* main model (e.g. Opus) never runs — saving tokens and money.
|
|
8
|
+
*
|
|
9
|
+
* DMs always pass through without triage.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import { definePluginEntry, type OpenClawPluginApi } from "openclaw/plugin-sdk/plugin-entry";
|
|
13
|
+
import { evaluateMessage } from "./triage.js";
|
|
14
|
+
import { type TriageGateConfig } from "./config.js";
|
|
15
|
+
|
|
16
|
+
export default definePluginEntry({
|
|
17
|
+
id: "triage-gate",
|
|
18
|
+
name: "Triage Gate",
|
|
19
|
+
description:
|
|
20
|
+
"Uses a cheap model to decide if the bot should respond in group chats, saving 75-90% of group chat token costs.",
|
|
21
|
+
|
|
22
|
+
register(api: OpenClawPluginApi) {
|
|
23
|
+
const config = (api.pluginConfig ?? {}) as TriageGateConfig;
|
|
24
|
+
const logDecisions = config.logDecisions !== false; // default: true
|
|
25
|
+
|
|
26
|
+
// Pre-compute the set of groups to include/exclude for fast lookups
|
|
27
|
+
const includeGroups = config.groups?.length
|
|
28
|
+
? new Set(config.groups)
|
|
29
|
+
: null; // null = all groups
|
|
30
|
+
const excludeGroups = config.excludeGroups?.length
|
|
31
|
+
? new Set(config.excludeGroups)
|
|
32
|
+
: null;
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Resolve an API key for a provider/model using OpenClaw's auth system.
|
|
36
|
+
* This keeps the plugin model-agnostic — it works with any provider
|
|
37
|
+
* the user has configured in OpenClaw.
|
|
38
|
+
*/
|
|
39
|
+
async function resolveApiKey(provider: string, _model: string): Promise<string> {
|
|
40
|
+
try {
|
|
41
|
+
const result = await api.runtime.modelAuth.resolveApiKeyForProvider({
|
|
42
|
+
provider,
|
|
43
|
+
});
|
|
44
|
+
return result?.apiKey ?? "";
|
|
45
|
+
} catch {
|
|
46
|
+
return "";
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// Register the before_dispatch hook. This fires for every inbound message
|
|
51
|
+
// BEFORE the main agent (Opus, Sonnet, etc.) processes it.
|
|
52
|
+
api.on("before_dispatch", async (event) => {
|
|
53
|
+
// Only triage group messages — DMs always go through
|
|
54
|
+
if (!event.isGroup) {
|
|
55
|
+
return; // undefined = no decision, proceed normally
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
// Check if this group should be triaged
|
|
59
|
+
const groupId = event.sessionKey ?? "";
|
|
60
|
+
if (excludeGroups?.has(groupId)) {
|
|
61
|
+
return; // This group is excluded from triage
|
|
62
|
+
}
|
|
63
|
+
if (includeGroups && !includeGroups.has(groupId)) {
|
|
64
|
+
return; // This group isn't in the include list
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
// Skip triage for very short messages (likely reactions or stickers)
|
|
68
|
+
if (!event.content || event.content.trim().length < 2) {
|
|
69
|
+
return { handled: true }; // Skip silently
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// Run the triage model
|
|
73
|
+
const result = await evaluateMessage({
|
|
74
|
+
content: event.content,
|
|
75
|
+
config,
|
|
76
|
+
resolveApiKey,
|
|
77
|
+
logger: logDecisions ? api.logger : undefined,
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
if (logDecisions) {
|
|
81
|
+
const decision = result.shouldRespond ? "RESPOND" : "SKIP";
|
|
82
|
+
api.logger.info?.(
|
|
83
|
+
`triage-gate: ${decision} (${result.durationMs}ms) — "${event.content.slice(0, 80)}"`,
|
|
84
|
+
);
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
if (!result.shouldRespond) {
|
|
88
|
+
// Tell OpenClaw to skip the main agent for this message
|
|
89
|
+
return { handled: true };
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
// Let the message through to the main agent
|
|
93
|
+
return; // undefined = proceed normally
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
api.logger.info?.("triage-gate: plugin loaded");
|
|
97
|
+
},
|
|
98
|
+
});
|
package/src/providers.ts
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Thin adapter that maps a provider name to its API endpoint and request format.
|
|
3
|
+
*
|
|
4
|
+
* Supports two API formats:
|
|
5
|
+
* 1. Anthropic Messages API (for Anthropic models)
|
|
6
|
+
* 2. OpenAI-compatible Chat Completions API (for everything else)
|
|
7
|
+
*
|
|
8
|
+
* Future: add more provider-specific formats here as needed.
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
// ---------------------------------------------------------------------------
|
|
12
|
+
// Types
|
|
13
|
+
// ---------------------------------------------------------------------------
|
|
14
|
+
|
|
15
|
+
export type ProviderAdapter = {
|
|
16
|
+
/** Full URL to POST the request to. */
|
|
17
|
+
endpoint: string;
|
|
18
|
+
|
|
19
|
+
/** Build the fetch request body for this provider. */
|
|
20
|
+
buildRequestBody: (params: {
|
|
21
|
+
model: string;
|
|
22
|
+
systemPrompt: string;
|
|
23
|
+
userMessage: string;
|
|
24
|
+
maxTokens: number;
|
|
25
|
+
}) => string;
|
|
26
|
+
|
|
27
|
+
/** Build the request headers (including auth). */
|
|
28
|
+
buildHeaders: (apiKey: string) => Record<string, string>;
|
|
29
|
+
|
|
30
|
+
/** Extract the text response from the API response body. */
|
|
31
|
+
extractResponse: (body: unknown) => string;
|
|
32
|
+
};
|
|
33
|
+
|
|
34
|
+
// ---------------------------------------------------------------------------
|
|
35
|
+
// Anthropic Messages API
|
|
36
|
+
// ---------------------------------------------------------------------------
|
|
37
|
+
|
|
38
|
+
const anthropicAdapter: ProviderAdapter = {
|
|
39
|
+
endpoint: "https://api.anthropic.com/v1/messages",
|
|
40
|
+
|
|
41
|
+
buildRequestBody({ model, systemPrompt, userMessage, maxTokens }) {
|
|
42
|
+
return JSON.stringify({
|
|
43
|
+
model,
|
|
44
|
+
max_tokens: maxTokens,
|
|
45
|
+
system: systemPrompt,
|
|
46
|
+
messages: [{ role: "user", content: userMessage }],
|
|
47
|
+
});
|
|
48
|
+
},
|
|
49
|
+
|
|
50
|
+
buildHeaders(apiKey) {
|
|
51
|
+
return {
|
|
52
|
+
"Content-Type": "application/json",
|
|
53
|
+
"x-api-key": apiKey,
|
|
54
|
+
"anthropic-version": "2023-06-01",
|
|
55
|
+
};
|
|
56
|
+
},
|
|
57
|
+
|
|
58
|
+
extractResponse(body) {
|
|
59
|
+
const msg = body as { content?: Array<{ type: string; text?: string }> };
|
|
60
|
+
const textBlock = msg.content?.find((b) => b.type === "text");
|
|
61
|
+
return textBlock?.text?.trim() ?? "";
|
|
62
|
+
},
|
|
63
|
+
};
|
|
64
|
+
|
|
65
|
+
// ---------------------------------------------------------------------------
|
|
66
|
+
// OpenAI-compatible Chat Completions API
|
|
67
|
+
// Used for OpenAI, OpenRouter, MiniMax, and other compatible providers.
|
|
68
|
+
// ---------------------------------------------------------------------------
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Known base URLs for providers that use the OpenAI-compatible format.
|
|
72
|
+
* If a provider isn't listed here, we fall back to OpenAI's endpoint.
|
|
73
|
+
*/
|
|
74
|
+
const OPENAI_COMPATIBLE_ENDPOINTS: Record<string, string> = {
|
|
75
|
+
openai: "https://api.openai.com/v1/chat/completions",
|
|
76
|
+
openrouter: "https://openrouter.ai/api/v1/chat/completions",
|
|
77
|
+
minimax: "https://openrouter.ai/api/v1/chat/completions",
|
|
78
|
+
};
|
|
79
|
+
|
|
80
|
+
function createOpenAICompatibleAdapter(provider: string): ProviderAdapter {
|
|
81
|
+
const endpoint =
|
|
82
|
+
OPENAI_COMPATIBLE_ENDPOINTS[provider] ??
|
|
83
|
+
"https://api.openai.com/v1/chat/completions";
|
|
84
|
+
|
|
85
|
+
return {
|
|
86
|
+
endpoint,
|
|
87
|
+
|
|
88
|
+
buildRequestBody({ model, systemPrompt, userMessage, maxTokens }) {
|
|
89
|
+
return JSON.stringify({
|
|
90
|
+
model,
|
|
91
|
+
max_tokens: maxTokens,
|
|
92
|
+
messages: [
|
|
93
|
+
{ role: "system", content: systemPrompt },
|
|
94
|
+
{ role: "user", content: userMessage },
|
|
95
|
+
],
|
|
96
|
+
});
|
|
97
|
+
},
|
|
98
|
+
|
|
99
|
+
buildHeaders(apiKey) {
|
|
100
|
+
return {
|
|
101
|
+
"Content-Type": "application/json",
|
|
102
|
+
Authorization: `Bearer ${apiKey}`,
|
|
103
|
+
};
|
|
104
|
+
},
|
|
105
|
+
|
|
106
|
+
extractResponse(body) {
|
|
107
|
+
const resp = body as {
|
|
108
|
+
choices?: Array<{ message?: { content?: string } }>;
|
|
109
|
+
};
|
|
110
|
+
return resp.choices?.[0]?.message?.content?.trim() ?? "";
|
|
111
|
+
},
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// ---------------------------------------------------------------------------
|
|
116
|
+
// Public API
|
|
117
|
+
// ---------------------------------------------------------------------------
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Parse a "provider/model" string into its two parts.
|
|
121
|
+
*
|
|
122
|
+
* Examples:
|
|
123
|
+
* "anthropic/claude-haiku-4-5-20251001" → { provider: "anthropic", model: "claude-haiku-4-5-20251001" }
|
|
124
|
+
* "openai/gpt-4.1-mini" → { provider: "openai", model: "gpt-4.1-mini" }
|
|
125
|
+
* "minimax/minimax/minimax-m2.5" → { provider: "minimax", model: "minimax/minimax-m2.5" }
|
|
126
|
+
*/
|
|
127
|
+
export function parseModelString(modelString: string): {
|
|
128
|
+
provider: string;
|
|
129
|
+
model: string;
|
|
130
|
+
} {
|
|
131
|
+
const slashIndex = modelString.indexOf("/");
|
|
132
|
+
if (slashIndex === -1) {
|
|
133
|
+
return { provider: "anthropic", model: modelString };
|
|
134
|
+
}
|
|
135
|
+
return {
|
|
136
|
+
provider: modelString.slice(0, slashIndex),
|
|
137
|
+
model: modelString.slice(slashIndex + 1),
|
|
138
|
+
};
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
/**
|
|
142
|
+
* Get the appropriate API adapter for a given provider.
|
|
143
|
+
*/
|
|
144
|
+
export function getProviderAdapter(provider: string): ProviderAdapter {
|
|
145
|
+
if (provider === "anthropic") {
|
|
146
|
+
return anthropicAdapter;
|
|
147
|
+
}
|
|
148
|
+
return createOpenAICompatibleAdapter(provider);
|
|
149
|
+
}
|
package/src/triage.ts
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Core triage logic: calls a cheap model to decide if the bot should respond.
|
|
3
|
+
*
|
|
4
|
+
* The flow is simple:
|
|
5
|
+
* 1. Build a prompt with the triage instructions + the message content
|
|
6
|
+
* 2. Call the triage model via its provider's API
|
|
7
|
+
* 3. Parse the response as RESPOND or SKIP
|
|
8
|
+
* 4. Return true (should respond) or false (skip)
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import {
|
|
12
|
+
DEFAULT_TRIAGE_MODEL,
|
|
13
|
+
DEFAULT_TRIAGE_PROMPT,
|
|
14
|
+
DEFAULT_MAX_TRIAGE_TOKENS,
|
|
15
|
+
type TriageGateConfig,
|
|
16
|
+
} from "./config.js";
|
|
17
|
+
import { parseModelString, getProviderAdapter } from "./providers.js";
|
|
18
|
+
|
|
19
|
+
// ---------------------------------------------------------------------------
|
|
20
|
+
// Types
|
|
21
|
+
// ---------------------------------------------------------------------------
|
|
22
|
+
|
|
23
|
+
type TriageParams = {
|
|
24
|
+
/** The message content to evaluate. */
|
|
25
|
+
content: string;
|
|
26
|
+
|
|
27
|
+
/** Plugin config. */
|
|
28
|
+
config: TriageGateConfig;
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Resolves an API key for a given provider/model.
|
|
32
|
+
* Injected from the OpenClaw plugin runtime so the plugin stays model-agnostic.
|
|
33
|
+
*/
|
|
34
|
+
resolveApiKey: (provider: string, model: string) => Promise<string>;
|
|
35
|
+
|
|
36
|
+
/** Optional logger for debug output. */
|
|
37
|
+
logger?: {
|
|
38
|
+
info?: (msg: string) => void;
|
|
39
|
+
warn?: (msg: string) => void;
|
|
40
|
+
};
|
|
41
|
+
};
|
|
42
|
+
|
|
43
|
+
export type TriageResult = {
|
|
44
|
+
/** Whether the bot should respond to this message. */
|
|
45
|
+
shouldRespond: boolean;
|
|
46
|
+
|
|
47
|
+
/** The raw response from the triage model (for debugging). */
|
|
48
|
+
rawResponse: string;
|
|
49
|
+
|
|
50
|
+
/** How long the triage call took in milliseconds. */
|
|
51
|
+
durationMs: number;
|
|
52
|
+
};
|
|
53
|
+
|
|
54
|
+
// ---------------------------------------------------------------------------
|
|
55
|
+
// Main function
|
|
56
|
+
// ---------------------------------------------------------------------------
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Ask a cheap model whether the bot should respond to a group chat message.
|
|
60
|
+
*
|
|
61
|
+
* Returns `shouldRespond: true` if the model says RESPOND,
|
|
62
|
+
* `shouldRespond: false` if it says SKIP.
|
|
63
|
+
*
|
|
64
|
+
* On error (API failure, timeout, etc.), defaults to `shouldRespond: true`
|
|
65
|
+
* so the main model still runs — we'd rather waste some tokens than
|
|
66
|
+
* silently drop messages.
|
|
67
|
+
*/
|
|
68
|
+
export async function evaluateMessage(params: TriageParams): Promise<TriageResult> {
|
|
69
|
+
const { content, config, resolveApiKey, logger } = params;
|
|
70
|
+
const startTime = Date.now();
|
|
71
|
+
|
|
72
|
+
const modelString = config.triageModel ?? DEFAULT_TRIAGE_MODEL;
|
|
73
|
+
const { provider, model } = parseModelString(modelString);
|
|
74
|
+
const prompt = config.triagePrompt ?? DEFAULT_TRIAGE_PROMPT;
|
|
75
|
+
const maxTokens = config.maxTriageTokens ?? DEFAULT_MAX_TRIAGE_TOKENS;
|
|
76
|
+
|
|
77
|
+
try {
|
|
78
|
+
// Resolve the API key from OpenClaw's provider config
|
|
79
|
+
const apiKey = await resolveApiKey(provider, model);
|
|
80
|
+
if (!apiKey) {
|
|
81
|
+
logger?.warn?.(`triage-gate: no API key found for provider "${provider}", allowing message through`);
|
|
82
|
+
return { shouldRespond: true, rawResponse: "", durationMs: Date.now() - startTime };
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// Get the right adapter for this provider's API format
|
|
86
|
+
const adapter = getProviderAdapter(provider);
|
|
87
|
+
|
|
88
|
+
// Make the API call
|
|
89
|
+
const response = await fetch(adapter.endpoint, {
|
|
90
|
+
method: "POST",
|
|
91
|
+
headers: adapter.buildHeaders(apiKey),
|
|
92
|
+
body: adapter.buildRequestBody({
|
|
93
|
+
model,
|
|
94
|
+
systemPrompt: prompt,
|
|
95
|
+
userMessage: `Message: ${content}`,
|
|
96
|
+
maxTokens,
|
|
97
|
+
}),
|
|
98
|
+
signal: AbortSignal.timeout(5000), // 5s timeout — triage should be fast
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
if (!response.ok) {
|
|
102
|
+
const errorText = await response.text().catch(() => "unknown error");
|
|
103
|
+
logger?.warn?.(
|
|
104
|
+
`triage-gate: API returned ${response.status} from ${provider}, allowing message through. Error: ${errorText}`,
|
|
105
|
+
);
|
|
106
|
+
return { shouldRespond: true, rawResponse: errorText, durationMs: Date.now() - startTime };
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
const body = await response.json();
|
|
110
|
+
const rawResponse = adapter.extractResponse(body);
|
|
111
|
+
const shouldRespond = parseTriageDecision(rawResponse);
|
|
112
|
+
|
|
113
|
+
return { shouldRespond, rawResponse, durationMs: Date.now() - startTime };
|
|
114
|
+
} catch (error) {
|
|
115
|
+
// On any error, default to letting the message through.
|
|
116
|
+
// Better to waste tokens than silently drop a message.
|
|
117
|
+
logger?.warn?.(`triage-gate: error during triage (${String(error)}), allowing message through`);
|
|
118
|
+
return { shouldRespond: true, rawResponse: "", durationMs: Date.now() - startTime };
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// ---------------------------------------------------------------------------
|
|
123
|
+
// Helpers
|
|
124
|
+
// ---------------------------------------------------------------------------
|
|
125
|
+
|
|
126
|
+
/**
|
|
127
|
+
* Parse the triage model's response into a boolean decision.
|
|
128
|
+
*
|
|
129
|
+
* Looks for "RESPOND" or "SKIP" in the response text.
|
|
130
|
+
* If unclear, defaults to true (respond) to avoid dropping messages.
|
|
131
|
+
*/
|
|
132
|
+
export function parseTriageDecision(response: string): boolean {
|
|
133
|
+
const normalized = response.toUpperCase().trim();
|
|
134
|
+
|
|
135
|
+
if (normalized.startsWith("SKIP")) return false;
|
|
136
|
+
if (normalized.startsWith("RESPOND")) return true;
|
|
137
|
+
|
|
138
|
+
// If the response doesn't clearly match either, default to responding.
|
|
139
|
+
// This is intentional — false negatives (bot stays silent when it shouldn't)
|
|
140
|
+
// are worse than false positives (bot responds when it didn't need to).
|
|
141
|
+
return true;
|
|
142
|
+
}
|