@sage-protocol/sage-plugin 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,37 @@
1
+ # Sage Plugin (OpenCode)
2
+
3
+ Unified OpenCode plugin for Scroll that handles both prompt capture (for RLM feedback) and inline skill/prompt suggestions.
4
+
5
+ ## Requirements
6
+ - Bun runtime (tested with 1.3+)
7
+ - `scroll` CLI available in PATH (or set `SCROLL_BIN`)
8
+
9
+ ## Setup
10
+ 1. Copy the plugin into OpenCode plugins (scoped name retained):
11
+ ```bash
12
+ mkdir -p ~/.config/opencode/plugin
13
+ mkdir -p ~/.config/opencode/plugin/@sage-protocol
14
+ cp -r sage-plugin ~/.config/opencode/plugin/@sage-protocol/
15
+ ```
16
+ 2. Ensure `opencode.json` includes the plugin:
17
+ ```json
18
+ {
19
+ "plugin": ["@sage-protocol/sage-plugin"],
20
+ "mcp": { "scroll": { "type": "local", "command": ["scroll", "mcp", "start"], "enabled": true } }
21
+ }
22
+ ```
23
+ (running `scroll init --opencode` will add this automatically.)
24
+
25
+ ## Environment
26
+ - `SCROLL_BIN`: override scroll binary path
27
+ - `SCROLL_SUGGEST_LIMIT`: suggestions per request (default 3)
28
+ - `SCROLL_SUGGEST_DEBOUNCE_MS`: debounce for TUI suggestions (default 800ms)
29
+ - `SCROLL_SUGGEST_PROVISION`: set `0` to skip MCP provisioning
30
+ - `SCROLL_PLUGIN_DRY_RUN`: set `1` to disable spawning scroll (useful for tests)
31
+
32
+ ## Dev
33
+ ```bash
34
+ bun install
35
+ bun run lint
36
+ bun test
37
+ ```
package/biome.json ADDED
@@ -0,0 +1,9 @@
1
+ {
2
+ "$schema": "https://biomejs.dev/schemas/1.7.3/schema.json",
3
+ "formatter": {
4
+ "enabled": true
5
+ },
6
+ "linter": {
7
+ "enabled": true
8
+ }
9
+ }
package/bun.lock ADDED
@@ -0,0 +1,31 @@
1
+ {
2
+ "lockfileVersion": 1,
3
+ "configVersion": 1,
4
+ "workspaces": {
5
+ "": {
6
+ "name": "scroll-plugin",
7
+ "devDependencies": {
8
+ "@biomejs/biome": "^1.7.3",
9
+ },
10
+ },
11
+ },
12
+ "packages": {
13
+ "@biomejs/biome": ["@biomejs/biome@1.9.4", "", { "optionalDependencies": { "@biomejs/cli-darwin-arm64": "1.9.4", "@biomejs/cli-darwin-x64": "1.9.4", "@biomejs/cli-linux-arm64": "1.9.4", "@biomejs/cli-linux-arm64-musl": "1.9.4", "@biomejs/cli-linux-x64": "1.9.4", "@biomejs/cli-linux-x64-musl": "1.9.4", "@biomejs/cli-win32-arm64": "1.9.4", "@biomejs/cli-win32-x64": "1.9.4" }, "bin": { "biome": "bin/biome" } }, "sha512-1rkd7G70+o9KkTn5KLmDYXihGoTaIGO9PIIN2ZB7UJxFrWw04CZHPYiMRjYsaDvVV7hP1dYNRLxSANLaBFGpog=="],
14
+
15
+ "@biomejs/cli-darwin-arm64": ["@biomejs/cli-darwin-arm64@1.9.4", "", { "os": "darwin", "cpu": "arm64" }, "sha512-bFBsPWrNvkdKrNCYeAp+xo2HecOGPAy9WyNyB/jKnnedgzl4W4Hb9ZMzYNbf8dMCGmUdSavlYHiR01QaYR58cw=="],
16
+
17
+ "@biomejs/cli-darwin-x64": ["@biomejs/cli-darwin-x64@1.9.4", "", { "os": "darwin", "cpu": "x64" }, "sha512-ngYBh/+bEedqkSevPVhLP4QfVPCpb+4BBe2p7Xs32dBgs7rh9nY2AIYUL6BgLw1JVXV8GlpKmb/hNiuIxfPfZg=="],
18
+
19
+ "@biomejs/cli-linux-arm64": ["@biomejs/cli-linux-arm64@1.9.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-fJIW0+LYujdjUgJJuwesP4EjIBl/N/TcOX3IvIHJQNsAqvV2CHIogsmA94BPG6jZATS4Hi+xv4SkBBQSt1N4/g=="],
20
+
21
+ "@biomejs/cli-linux-arm64-musl": ["@biomejs/cli-linux-arm64-musl@1.9.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-v665Ct9WCRjGa8+kTr0CzApU0+XXtRgwmzIf1SeKSGAv+2scAlW6JR5PMFo6FzqqZ64Po79cKODKf3/AAmECqA=="],
22
+
23
+ "@biomejs/cli-linux-x64": ["@biomejs/cli-linux-x64@1.9.4", "", { "os": "linux", "cpu": "x64" }, "sha512-lRCJv/Vi3Vlwmbd6K+oQ0KhLHMAysN8lXoCI7XeHlxaajk06u7G+UsFSO01NAs5iYuWKmVZjmiOzJ0OJmGsMwg=="],
24
+
25
+ "@biomejs/cli-linux-x64-musl": ["@biomejs/cli-linux-x64-musl@1.9.4", "", { "os": "linux", "cpu": "x64" }, "sha512-gEhi/jSBhZ2m6wjV530Yy8+fNqG8PAinM3oV7CyO+6c3CEh16Eizm21uHVsyVBEB6RIM8JHIl6AGYCv6Q6Q9Tg=="],
26
+
27
+ "@biomejs/cli-win32-arm64": ["@biomejs/cli-win32-arm64@1.9.4", "", { "os": "win32", "cpu": "arm64" }, "sha512-tlbhLk+WXZmgwoIKwHIHEBZUwxml7bRJgk0X2sPyNR3S93cdRq6XulAZRQJ17FYGGzWne0fgrXBKpl7l4M87Hg=="],
28
+
29
+ "@biomejs/cli-win32-x64": ["@biomejs/cli-win32-x64@1.9.4", "", { "os": "win32", "cpu": "x64" }, "sha512-8Y5wMhVIPaWe6jw2H+KlEm4wP/f7EW3810ZLmDlrEEy5KvBsb9ECEfu/kMWD484ijfQ8+nIi0giMgu9g1UAuuA=="],
30
+ }
31
+ }
package/index.js ADDED
@@ -0,0 +1,378 @@
1
+ // Scroll OpenCode plugin: capture + suggest + RLM feedback combined
2
+ //
3
+ // Uses the documented OpenCode plugin event handler pattern.
4
+ // Spawns scroll commands via the `$` shell helper for portability.
5
+ // Now includes RLM feedback appending when steering is detected.
6
+
7
+ export const ScrollPlugin = async ({ client, $, directory }) => {
8
+ const CONFIG = {
9
+ scrollBin: process.env.SCROLL_BIN || "scroll",
10
+ suggestLimit: Number.parseInt(process.env.SCROLL_SUGGEST_LIMIT || "3", 10),
11
+ debounceMs: Number.parseInt(
12
+ process.env.SCROLL_SUGGEST_DEBOUNCE_MS || "800",
13
+ 10,
14
+ ),
15
+ provision: (process.env.SCROLL_SUGGEST_PROVISION || "1") === "1",
16
+ dryRun: (process.env.SCROLL_PLUGIN_DRY_RUN || "0") === "1",
17
+ enableRlmFeedback: (process.env.SCROLL_RLM_FEEDBACK || "1") === "1",
18
+ };
19
+
20
+ let promptCaptured = false;
21
+ let lastInput = "";
22
+ let lastInjected = "";
23
+ let timer = null;
24
+ let runId = 0;
25
+
26
+ // Session/model tracking (populated by chat.message hook or session.created event)
27
+ let currentSessionId = null;
28
+ let currentModel = null;
29
+ let assistantParts = []; // accumulate streaming text parts
30
+
31
+ // RLM Feedback tracking
32
+ let lastSuggestion = null;
33
+ let lastSuggestionTimestamp = null;
34
+ let lastSuggestionPromptKey = null;
35
+ const SUGGESTION_CORRELATION_WINDOW_MS = 30000; // 30 second window
36
+
37
+ const log = async (level, message, extra = {}) => {
38
+ try {
39
+ if (client?.app?.log) {
40
+ await client.app.log({
41
+ service: "sage-plugin",
42
+ level,
43
+ message,
44
+ extra,
45
+ });
46
+ } else {
47
+ console.log(`[sage-plugin:${level}]`, message, extra);
48
+ }
49
+ } catch {
50
+ /* logging should never break the plugin */
51
+ }
52
+ };
53
+
54
+ const execScroll = async (args, env = {}) => {
55
+ if (CONFIG.dryRun) return "";
56
+
57
+ const scrollEnv = { ...env, SCROLL_SOURCE: "opencode" };
58
+
59
+ try {
60
+ if ($) {
61
+ // Use OpenCode's $ shell helper for portability
62
+ const cmd = [CONFIG.scrollBin, ...args]
63
+ .map((a) => `'${a.replace(/'/g, "'\\''")}'`)
64
+ .join(" ");
65
+ const result = await $({ env: scrollEnv })`${cmd}`;
66
+ return (result?.stdout ?? result ?? "").toString().trim();
67
+ }
68
+ // Fallback to Bun.spawn if $ not available
69
+ if (typeof Bun !== "undefined") {
70
+ const proc = Bun.spawn([CONFIG.scrollBin, ...args], {
71
+ env: { ...process.env, ...scrollEnv },
72
+ stdout: "pipe",
73
+ stderr: "pipe",
74
+ });
75
+ const stdout = await new Response(proc.stdout).text();
76
+ return stdout.trim();
77
+ }
78
+ return "";
79
+ } catch (e) {
80
+ throw new Error(`scroll command failed: ${e.message || e}`);
81
+ }
82
+ };
83
+
84
+ // Parse suggestion output to extract prompt key
85
+ const parseSuggestionKey = (suggestionText) => {
86
+ // Look for patterns like "ultrawork-parallel-orchestration" or similar keys
87
+ // Format is typically: prompt_name (key: actual-key)
88
+ const keyMatch = suggestionText.match(/\(key:\s*([^)]+)\)/);
89
+ if (keyMatch) {
90
+ return keyMatch[1].trim();
91
+ }
92
+
93
+ // Try to match standalone keys in the text
94
+ const lines = suggestionText.split("\n");
95
+ for (const line of lines) {
96
+ // Look for common prompt key patterns
97
+ const match = line.match(/^\s*[-•*]?\s*([a-z0-9-]+)(?:\s*[-:]\s*|\s*$)/);
98
+ if (match?.[1]?.includes("-")) {
99
+ return match[1];
100
+ }
101
+ }
102
+
103
+ return null;
104
+ };
105
+
106
+ // Append RLM feedback to a prompt
107
+ const appendRlmFeedback = async (promptKey, feedbackEntry) => {
108
+ if (!CONFIG.enableRlmFeedback || !promptKey) {
109
+ return false;
110
+ }
111
+
112
+ try {
113
+ await log("debug", "appending RLM feedback", {
114
+ promptKey,
115
+ feedback: feedbackEntry,
116
+ });
117
+
118
+ const result = await execScroll([
119
+ "suggest",
120
+ "feedback",
121
+ promptKey,
122
+ feedbackEntry,
123
+ "--source",
124
+ "opencode-plugin",
125
+ ]);
126
+
127
+ if (result) {
128
+ await log("info", "RLM feedback appended", { promptKey });
129
+ return true;
130
+ }
131
+ } catch (e) {
132
+ await log("warn", "failed to append RLM feedback", {
133
+ promptKey,
134
+ error: String(e),
135
+ });
136
+ }
137
+
138
+ return false;
139
+ };
140
+
141
+ // Analyze prompt correlation with suggestion
142
+ const analyzePromptCorrelation = async (userPrompt) => {
143
+ if (!lastSuggestion || !lastSuggestionTimestamp) {
144
+ return null;
145
+ }
146
+
147
+ const now = Date.now();
148
+ const timeDiff = now - lastSuggestionTimestamp;
149
+
150
+ // Outside correlation window
151
+ if (timeDiff > SUGGESTION_CORRELATION_WINDOW_MS) {
152
+ return null;
153
+ }
154
+
155
+ const suggestionKey = lastSuggestionPromptKey;
156
+ if (!suggestionKey) {
157
+ return null;
158
+ }
159
+
160
+ // Check if user prompt matches or differs from suggestion
161
+ const userPromptLower = userPrompt.toLowerCase().trim();
162
+ const suggestionLower = lastSuggestion.toLowerCase().trim();
163
+
164
+ // Extract keywords from both
165
+ const userKeywords = userPromptLower.split(/\s+/);
166
+ const suggestionKeywords = suggestionLower.split(/\s+/);
167
+
168
+ // Check for significant overlap
169
+ const overlap = userKeywords.filter((k) => suggestionKeywords.includes(k));
170
+ const overlapRatio =
171
+ overlap.length / Math.max(userKeywords.length, suggestionKeywords.length);
172
+
173
+ // Determine correlation type
174
+ if (overlapRatio > 0.7) {
175
+ return { type: "accepted", key: suggestionKey, overlap: overlapRatio };
176
+ }
177
+ if (overlapRatio > 0.3) {
178
+ // Steering - user modified the suggestion
179
+ const addedKeywords = userKeywords.filter(
180
+ (k) => !suggestionKeywords.includes(k),
181
+ );
182
+ const removedKeywords = suggestionKeywords.filter(
183
+ (k) => !userKeywords.includes(k),
184
+ );
185
+
186
+ return {
187
+ type: "steered",
188
+ key: suggestionKey,
189
+ overlap: overlapRatio,
190
+ added: addedKeywords,
191
+ removed: removedKeywords,
192
+ };
193
+ }
194
+ return { type: "rejected", key: suggestionKey, overlap: overlapRatio };
195
+ };
196
+
197
+ const scheduleSuggest = (text) => {
198
+ lastInput = text;
199
+ runId += 1;
200
+ const current = runId;
201
+
202
+ if (timer) clearTimeout(timer);
203
+
204
+ timer = setTimeout(() => {
205
+ void (async () => {
206
+ const prompt = lastInput.trim();
207
+ if (!prompt) return;
208
+ if (current !== runId) return;
209
+ if (prompt === lastInjected) return;
210
+
211
+ await log("debug", "running scroll suggest", {
212
+ cwd: directory,
213
+ prompt_len: prompt.length,
214
+ });
215
+
216
+ try {
217
+ const args = [
218
+ "suggest",
219
+ "skill",
220
+ prompt,
221
+ "--limit",
222
+ CONFIG.suggestLimit.toString(),
223
+ ];
224
+ if (CONFIG.provision) args.push("--provision");
225
+
226
+ const suggestions = await execScroll(args);
227
+ if (!suggestions) return;
228
+ if (current !== runId) return;
229
+
230
+ // Store suggestion for correlation tracking
231
+ lastSuggestion = prompt;
232
+ lastSuggestionTimestamp = Date.now();
233
+ lastSuggestionPromptKey = parseSuggestionKey(suggestions);
234
+
235
+ await log("debug", "suggestion stored for correlation", {
236
+ key: lastSuggestionPromptKey,
237
+ timestamp: lastSuggestionTimestamp,
238
+ });
239
+
240
+ lastInjected = prompt;
241
+ await client.tui.appendPrompt({
242
+ body: { text: `\n\n${suggestions}\n` },
243
+ });
244
+ } catch (e) {
245
+ await log("warn", "scroll suggest failed", { error: String(e) });
246
+ }
247
+ })();
248
+ }, CONFIG.debounceMs);
249
+ };
250
+
251
+ return {
252
+ // Structured hook: reliable way to capture user prompts with model/session info
253
+ "chat.message": async (input, output) => {
254
+ // input: { sessionID, agent, model: {providerID, modelID}, messageID }
255
+ // output: { message: UserMessage, parts: Part[] }
256
+ currentSessionId = input?.sessionID ?? currentSessionId;
257
+ currentModel = input?.model?.modelID ?? currentModel;
258
+
259
+ const textParts = (output?.parts ?? []).filter((p) => p.type === "text");
260
+ const content = textParts.map((p) => p.text ?? "").join("\n");
261
+ if (!content.trim()) return;
262
+
263
+ promptCaptured = true;
264
+ assistantParts = [];
265
+
266
+ // Analyze correlation with previous suggestion
267
+ const correlation = await analyzePromptCorrelation(content);
268
+ if (correlation) {
269
+ await log("debug", "prompt correlation detected", correlation);
270
+
271
+ let feedbackEntry = "";
272
+ const date = new Date().toISOString().split("T")[0];
273
+
274
+ switch (correlation.type) {
275
+ case "accepted":
276
+ feedbackEntry = `[${date}] Prompt suggestion accepted (overlap: ${(correlation.overlap * 100).toFixed(0)}%)`;
277
+ break;
278
+ case "steered": {
279
+ const added = correlation.added?.slice(0, 3).join(", ") || "none";
280
+ const removed =
281
+ correlation.removed?.slice(0, 3).join(", ") || "none";
282
+ feedbackEntry = `[${date}] User steered from suggestion - Added keywords: "${added}" - Removed: "${removed}"`;
283
+ break;
284
+ }
285
+ case "rejected":
286
+ feedbackEntry = `[${date}] Prompt suggestion rejected (low overlap: ${(correlation.overlap * 100).toFixed(0)}%)`;
287
+ break;
288
+ }
289
+
290
+ if (feedbackEntry) {
291
+ await appendRlmFeedback(correlation.key, feedbackEntry);
292
+ }
293
+
294
+ lastSuggestion = null;
295
+ lastSuggestionTimestamp = null;
296
+ lastSuggestionPromptKey = null;
297
+ }
298
+
299
+ try {
300
+ await execScroll(["capture", "hook", "prompt"], {
301
+ PROMPT: content,
302
+ SCROLL_SESSION_ID: currentSessionId ?? "",
303
+ SCROLL_MODEL: currentModel ?? "",
304
+ SCROLL_WORKSPACE: directory ?? "",
305
+ });
306
+ } catch (e) {
307
+ await log("warn", "capture prompt failed", { error: String(e) });
308
+ promptCaptured = false;
309
+ }
310
+ },
311
+
312
+ event: async ({ event }) => {
313
+ const { type: eventType, properties } = event;
314
+
315
+ switch (eventType) {
316
+ case "message.part.updated": {
317
+ // OpenCode schema: { part: { id, sessionID, messageID, type, text }, delta? }
318
+ const part = properties?.part;
319
+ if (part?.type === "text" && promptCaptured) {
320
+ // Accumulate assistant text parts during streaming
321
+ assistantParts.push(part.text ?? "");
322
+ }
323
+ break;
324
+ }
325
+
326
+ case "message.updated": {
327
+ // OpenCode schema: { info: { id, sessionID, role, modelID, providerID, cost, tokens: {input, output, reasoning, cache} } }
328
+ const info = properties?.info;
329
+ if (info?.role === "assistant" && promptCaptured) {
330
+ const responseText = assistantParts.join("");
331
+ if (responseText.trim()) {
332
+ try {
333
+ await execScroll(["capture", "hook", "response"], {
334
+ CLAUDE_RESPONSE: responseText,
335
+ SCROLL_SESSION_ID: info.sessionID ?? currentSessionId ?? "",
336
+ SCROLL_MODEL: info.modelID ?? currentModel ?? "",
337
+ TOKENS_INPUT: String(info.tokens?.input ?? ""),
338
+ TOKENS_OUTPUT: String(info.tokens?.output ?? ""),
339
+ });
340
+ } catch (e) {
341
+ await log("warn", "capture response failed", {
342
+ error: String(e),
343
+ });
344
+ }
345
+ }
346
+ promptCaptured = false;
347
+ assistantParts = [];
348
+ }
349
+ break;
350
+ }
351
+
352
+ case "session.created": {
353
+ // OpenCode schema: { info: { id, parentID, directory, title, ... } }
354
+ const info = properties?.info;
355
+ currentSessionId = info?.id ?? null;
356
+ promptCaptured = false;
357
+ assistantParts = [];
358
+ await log("info", "session created", {
359
+ sessionId: currentSessionId ?? "unknown",
360
+ isSubagent: info?.parentID != null,
361
+ cwd: directory,
362
+ });
363
+ break;
364
+ }
365
+
366
+ case "tui.prompt.append": {
367
+ const text = properties?.text ?? "";
368
+ if (text.trim()) {
369
+ scheduleSuggest(text);
370
+ }
371
+ break;
372
+ }
373
+ }
374
+ },
375
+ };
376
+ };
377
+
378
+ export default ScrollPlugin;
package/index.test.js ADDED
@@ -0,0 +1,427 @@
1
+ import { beforeEach, describe, expect, it } from "bun:test";
2
+ import ScrollPlugin from "./index.js";
3
+
4
+ describe("ScrollPlugin", () => {
5
+ beforeEach(() => {
6
+ process.env.SCROLL_PLUGIN_DRY_RUN = "1";
7
+ });
8
+
9
+ const makeClient = () => {
10
+ const appLogCalls = [];
11
+ const promptAppends = [];
12
+ return {
13
+ client: {
14
+ app: {
15
+ log: ({ level, message, extra }) =>
16
+ appLogCalls.push({ level, message, extra }),
17
+ },
18
+ tui: {
19
+ appendPrompt: ({ body }) => promptAppends.push(body?.text ?? ""),
20
+ },
21
+ },
22
+ appLogCalls,
23
+ promptAppends,
24
+ };
25
+ };
26
+
27
+ const make$ = () => {
28
+ const calls = [];
29
+ const shell = (opts) => {
30
+ return (strings, ...values) => {
31
+ const cmd = strings.reduce(
32
+ (acc, str, i) => acc + str + (values[i] ?? ""),
33
+ "",
34
+ );
35
+ calls.push({ cmd, env: opts?.env });
36
+ return { stdout: "" };
37
+ };
38
+ };
39
+ shell.calls = calls;
40
+ return shell;
41
+ };
42
+
43
+ it("returns event handler and chat.message hook", async () => {
44
+ const { client } = makeClient();
45
+ const plugin = await ScrollPlugin({
46
+ client,
47
+ $: make$(),
48
+ directory: "/tmp",
49
+ });
50
+
51
+ expect(typeof plugin.event).toBe("function");
52
+ expect(typeof plugin["chat.message"]).toBe("function");
53
+ });
54
+
55
+ it("chat.message hook captures prompt with session/model env vars", async () => {
56
+ const { client } = makeClient();
57
+ const $mock = make$();
58
+ const plugin = await ScrollPlugin({ client, $: $mock, directory: "/tmp" });
59
+
60
+ await plugin["chat.message"](
61
+ {
62
+ sessionID: "sess-abc",
63
+ model: { providerID: "anthropic", modelID: "claude-3" },
64
+ },
65
+ { message: {}, parts: [{ type: "text", text: "hello world" }] },
66
+ );
67
+
68
+ // In dry-run mode no command is executed, but state should be set
69
+ // No error means it worked
70
+ });
71
+
72
+ it("chat.message hook ignores empty parts", async () => {
73
+ const { client } = makeClient();
74
+ const plugin = await ScrollPlugin({
75
+ client,
76
+ $: make$(),
77
+ directory: "/tmp",
78
+ });
79
+
80
+ // Should not throw or set promptCaptured
81
+ await plugin["chat.message"](
82
+ { sessionID: "s1" },
83
+ { parts: [{ type: "text", text: " " }] },
84
+ );
85
+
86
+ // Subsequent assistant message.updated should be ignored (no prompt captured)
87
+ await plugin.event({
88
+ event: {
89
+ type: "message.updated",
90
+ properties: { info: { role: "assistant", modelID: "x", tokens: {} } },
91
+ },
92
+ });
93
+ });
94
+
95
+ it("message.part.updated accumulates assistant text parts", async () => {
96
+ const { client } = makeClient();
97
+ const plugin = await ScrollPlugin({
98
+ client,
99
+ $: make$(),
100
+ directory: "/tmp",
101
+ });
102
+
103
+ // First capture a prompt via chat.message hook
104
+ await plugin["chat.message"](
105
+ { sessionID: "s1", model: { modelID: "claude-3" } },
106
+ { parts: [{ type: "text", text: "explain rust" }] },
107
+ );
108
+
109
+ // Simulate streaming assistant parts
110
+ await plugin.event({
111
+ event: {
112
+ type: "message.part.updated",
113
+ properties: {
114
+ part: {
115
+ type: "text",
116
+ text: "Rust is ",
117
+ sessionID: "s1",
118
+ messageID: "m1",
119
+ },
120
+ },
121
+ },
122
+ });
123
+ await plugin.event({
124
+ event: {
125
+ type: "message.part.updated",
126
+ properties: {
127
+ part: {
128
+ type: "text",
129
+ text: "a systems language.",
130
+ sessionID: "s1",
131
+ messageID: "m1",
132
+ },
133
+ },
134
+ },
135
+ });
136
+
137
+ // Finalize with message.updated
138
+ await plugin.event({
139
+ event: {
140
+ type: "message.updated",
141
+ properties: {
142
+ info: {
143
+ role: "assistant",
144
+ sessionID: "s1",
145
+ modelID: "claude-3",
146
+ tokens: { input: 10, output: 20 },
147
+ },
148
+ },
149
+ },
150
+ });
151
+
152
+ // No error means parts were accumulated and flushed correctly
153
+ });
154
+
155
+ it("message.updated ignores non-assistant roles", async () => {
156
+ const { client } = makeClient();
157
+ const plugin = await ScrollPlugin({
158
+ client,
159
+ $: make$(),
160
+ directory: "/tmp",
161
+ });
162
+
163
+ await plugin["chat.message"](
164
+ { sessionID: "s1" },
165
+ { parts: [{ type: "text", text: "hi" }] },
166
+ );
167
+
168
+ // user role message.updated should not flush
169
+ await plugin.event({
170
+ event: {
171
+ type: "message.updated",
172
+ properties: { info: { role: "user", sessionID: "s1" } },
173
+ },
174
+ });
175
+
176
+ // promptCaptured should still be true — assistant parts can still arrive
177
+ // Verify by sending an actual assistant completion
178
+ await plugin.event({
179
+ event: {
180
+ type: "message.part.updated",
181
+ properties: { part: { type: "text", text: "response" } },
182
+ },
183
+ });
184
+ await plugin.event({
185
+ event: {
186
+ type: "message.updated",
187
+ properties: {
188
+ info: { role: "assistant", tokens: { input: 1, output: 2 } },
189
+ },
190
+ },
191
+ });
192
+ });
193
+
194
+ it("session.created resets state and tracks session ID", async () => {
195
+ const { client, appLogCalls } = makeClient();
196
+ const plugin = await ScrollPlugin({
197
+ client,
198
+ $: make$(),
199
+ directory: "/tmp",
200
+ });
201
+
202
+ // Capture a prompt first
203
+ await plugin["chat.message"](
204
+ { sessionID: "old-session" },
205
+ { parts: [{ type: "text", text: "hello" }] },
206
+ );
207
+
208
+ // New session resets everything
209
+ await plugin.event({
210
+ event: {
211
+ type: "session.created",
212
+ properties: {
213
+ info: {
214
+ id: "new-session-123",
215
+ parentID: null,
216
+ directory: "/project",
217
+ },
218
+ },
219
+ },
220
+ });
221
+
222
+ const sessionLog = appLogCalls.find((c) => c.message === "session created");
223
+ expect(sessionLog).toBeDefined();
224
+ expect(sessionLog.extra.sessionId).toBe("new-session-123");
225
+ expect(sessionLog.extra.isSubagent).toBe(false);
226
+ });
227
+
228
+ it("session.created detects subagent via parentID", async () => {
229
+ const { client, appLogCalls } = makeClient();
230
+ const plugin = await ScrollPlugin({
231
+ client,
232
+ $: make$(),
233
+ directory: "/tmp",
234
+ });
235
+
236
+ await plugin.event({
237
+ event: {
238
+ type: "session.created",
239
+ properties: { info: { id: "child-1", parentID: "parent-1" } },
240
+ },
241
+ });
242
+
243
+ const sessionLog = appLogCalls.find((c) => c.message === "session created");
244
+ expect(sessionLog.extra.isSubagent).toBe(true);
245
+ });
246
+
247
+ it("multiple prompt-response cycles work correctly", async () => {
248
+ const { client } = makeClient();
249
+ const plugin = await ScrollPlugin({
250
+ client,
251
+ $: make$(),
252
+ directory: "/tmp",
253
+ });
254
+
255
+ // Cycle 1
256
+ await plugin["chat.message"](
257
+ { sessionID: "s1", model: { modelID: "claude-3" } },
258
+ { parts: [{ type: "text", text: "first question" }] },
259
+ );
260
+ await plugin.event({
261
+ event: {
262
+ type: "message.part.updated",
263
+ properties: { part: { type: "text", text: "first answer" } },
264
+ },
265
+ });
266
+ await plugin.event({
267
+ event: {
268
+ type: "message.updated",
269
+ properties: {
270
+ info: { role: "assistant", tokens: { input: 5, output: 10 } },
271
+ },
272
+ },
273
+ });
274
+
275
+ // Cycle 2
276
+ await plugin["chat.message"](
277
+ { sessionID: "s1", model: { modelID: "claude-3" } },
278
+ { parts: [{ type: "text", text: "second question" }] },
279
+ );
280
+ await plugin.event({
281
+ event: {
282
+ type: "message.part.updated",
283
+ properties: { part: { type: "text", text: "second answer" } },
284
+ },
285
+ });
286
+ await plugin.event({
287
+ event: {
288
+ type: "message.updated",
289
+ properties: {
290
+ info: { role: "assistant", tokens: { input: 8, output: 15 } },
291
+ },
292
+ },
293
+ });
294
+
295
+ // No errors means state properly resets between cycles
296
+ });
297
+
298
+ it("handles missing/null properties gracefully", async () => {
299
+ const { client } = makeClient();
300
+ const plugin = await ScrollPlugin({
301
+ client,
302
+ $: make$(),
303
+ directory: "/tmp",
304
+ });
305
+
306
+ // chat.message with null parts
307
+ await plugin["chat.message"](null, null);
308
+ await plugin["chat.message"]({}, { parts: null });
309
+ await plugin["chat.message"]({}, { parts: [] });
310
+
311
+ // Events with missing properties
312
+ await plugin.event({
313
+ event: { type: "message.part.updated", properties: {} },
314
+ });
315
+ await plugin.event({ event: { type: "message.updated", properties: {} } });
316
+ await plugin.event({ event: { type: "session.created", properties: {} } });
317
+ await plugin.event({ event: { type: "unknown.event", properties: {} } });
318
+ });
319
+
320
+ it("schedules suggest on tui.prompt.append", async () => {
321
+ const { client } = makeClient();
322
+ const plugin = await ScrollPlugin({
323
+ client,
324
+ $: make$(),
325
+ directory: "/tmp",
326
+ });
327
+
328
+ await plugin.event({
329
+ event: {
330
+ type: "tui.prompt.append",
331
+ properties: { text: "build an MCP server" },
332
+ },
333
+ });
334
+
335
+ // Suggest is debounced, so no immediate effect to assert
336
+ });
337
+
338
+ it("RLM feedback calls 'suggest feedback' (not 'prompts append-feedback')", async () => {
339
+ // Disable dry-run so execScroll actually invokes $
340
+ process.env.SCROLL_PLUGIN_DRY_RUN = "";
341
+ process.env.SCROLL_RLM_FEEDBACK = "1";
342
+
343
+ const { client } = makeClient();
344
+ const $mock = make$();
345
+ const plugin = await ScrollPlugin({ client, $: $mock, directory: "/tmp" });
346
+
347
+ // 1. Capture a prompt
348
+ await plugin["chat.message"](
349
+ { sessionID: "s1", model: { modelID: "claude-3" } },
350
+ { parts: [{ type: "text", text: "explain rust" }] },
351
+ );
352
+
353
+ // 2. Simulate a suggestion being injected (tui.prompt.append triggers suggest)
354
+ // We need to trigger the internal suggest flow which sets lastSuggestionPromptKey.
355
+ // The simplest path: simulate the full prompt→suggest→response→feedback cycle.
356
+ // Since suggest is debounced and async, instead we directly drive message.updated
357
+ // which triggers the RLM feedback path when a suggestion was correlated.
358
+
359
+ // 3. Simulate assistant response with tool-use of suggest command
360
+ await plugin.event({
361
+ event: {
362
+ type: "message.part.updated",
363
+ properties: {
364
+ part: { type: "text", text: "Rust is a systems language." },
365
+ },
366
+ },
367
+ });
368
+ await plugin.event({
369
+ event: {
370
+ type: "message.updated",
371
+ properties: {
372
+ info: { role: "assistant", tokens: { input: 10, output: 20 } },
373
+ },
374
+ },
375
+ });
376
+
377
+ // Check that any calls to $ containing "feedback" use "suggest feedback", not "prompts append-feedback"
378
+ const feedbackCalls = $mock.calls.filter((c) => c.cmd.includes("feedback"));
379
+ for (const call of feedbackCalls) {
380
+ expect(call.cmd).toContain("suggest");
381
+ expect(call.cmd).not.toContain("append-feedback");
382
+ expect(call.cmd).not.toContain("prompts");
383
+ }
384
+
385
+ // Restore dry-run for other tests
386
+ process.env.SCROLL_PLUGIN_DRY_RUN = "1";
387
+ });
388
+
389
+ it("non-text parts in message.part.updated are ignored", async () => {
390
+ const { client } = makeClient();
391
+ const plugin = await ScrollPlugin({
392
+ client,
393
+ $: make$(),
394
+ directory: "/tmp",
395
+ });
396
+
397
+ await plugin["chat.message"](
398
+ { sessionID: "s1" },
399
+ { parts: [{ type: "text", text: "question" }] },
400
+ );
401
+
402
+ // Tool-use part should be ignored
403
+ await plugin.event({
404
+ event: {
405
+ type: "message.part.updated",
406
+ properties: { part: { type: "tool-use", name: "bash", input: {} } },
407
+ },
408
+ });
409
+
410
+ // Only text parts should be accumulated — tool-use ignored
411
+ await plugin.event({
412
+ event: {
413
+ type: "message.part.updated",
414
+ properties: { part: { type: "text", text: "actual response" } },
415
+ },
416
+ });
417
+
418
+ await plugin.event({
419
+ event: {
420
+ type: "message.updated",
421
+ properties: {
422
+ info: { role: "assistant", tokens: { input: 1, output: 1 } },
423
+ },
424
+ },
425
+ });
426
+ });
427
+ });
package/package.json ADDED
@@ -0,0 +1,18 @@
1
+ {
2
+ "name": "@sage-protocol/sage-plugin",
3
+ "version": "0.1.4",
4
+ "description": "OpenCode plugin for Scroll: capture + suggest in one module",
5
+ "main": "index.js",
6
+ "type": "module",
7
+ "license": "MIT",
8
+ "publishConfig": {
9
+ "access": "public"
10
+ },
11
+ "scripts": {
12
+ "lint": "bunx biome check .",
13
+ "test": "bun test --no-bail"
14
+ },
15
+ "devDependencies": {
16
+ "@biomejs/biome": "^1.7.3"
17
+ }
18
+ }