@sureshsankaran/ralph-wiggum 0.1.11 → 0.1.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +0 -12
- package/dist/index.js +115 -79
- package/package.json +1 -1
- package/scripts/setup-loop.js +1 -0
package/dist/index.d.ts
CHANGED
|
@@ -1,15 +1,3 @@
|
|
|
1
1
|
import type { Plugin } from "@opencode-ai/plugin";
|
|
2
|
-
/**
|
|
3
|
-
* Ralph Wiggum Plugin - Iterative AI Development
|
|
4
|
-
*
|
|
5
|
-
* This plugin implements the Ralph Wiggum technique: continuously feeding
|
|
6
|
-
* the same prompt to the AI until the task is complete.
|
|
7
|
-
*
|
|
8
|
-
* Usage:
|
|
9
|
-
* 1. Start a loop: /ralph-loop "Your task description" --max-iterations 10 --completion-promise "DONE"
|
|
10
|
-
* 2. Cancel a loop: /cancel-ralph
|
|
11
|
-
*
|
|
12
|
-
* The AI should output <promise>DONE</promise> when the task is complete.
|
|
13
|
-
*/
|
|
14
2
|
export declare const RalphWiggumPlugin: Plugin;
|
|
15
3
|
export default RalphWiggumPlugin;
|
package/dist/index.js
CHANGED
|
@@ -1,21 +1,27 @@
|
|
|
1
|
-
import { existsSync, readFileSync, writeFileSync, unlinkSync, mkdirSync } from "node:fs";
|
|
1
|
+
import { existsSync, readFileSync, writeFileSync, unlinkSync, mkdirSync, appendFileSync } from "node:fs";
|
|
2
2
|
import { join } from "node:path";
|
|
3
3
|
import { homedir } from "node:os";
|
|
4
|
-
// Use a global state directory that won't be affected by project snapshot/revert
|
|
5
4
|
function getStateFilePath() {
|
|
6
5
|
const configDir = process.env.XDG_CONFIG_HOME || join(homedir(), ".config");
|
|
7
6
|
const stateDir = join(configDir, "opencode", "state");
|
|
8
|
-
// Ensure directory exists
|
|
9
7
|
mkdirSync(stateDir, { recursive: true });
|
|
10
8
|
return join(stateDir, "ralph-loop.local.md");
|
|
11
9
|
}
|
|
10
|
+
function getLogPath() {
|
|
11
|
+
const configDir = process.env.XDG_CONFIG_HOME || join(homedir(), ".config");
|
|
12
|
+
return join(configDir, "opencode", "state", "ralph-debug.log");
|
|
13
|
+
}
|
|
14
|
+
function log(msg) {
|
|
15
|
+
const ts = new Date().toISOString();
|
|
16
|
+
appendFileSync(getLogPath(), `[${ts}] ${msg}\n`);
|
|
17
|
+
}
|
|
12
18
|
function parseState(content) {
|
|
13
19
|
const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---\n([\s\S]*)$/);
|
|
14
20
|
if (!frontmatterMatch)
|
|
15
21
|
return null;
|
|
16
22
|
const [, frontmatter, prompt] = frontmatterMatch;
|
|
17
23
|
const lines = frontmatter.split("\n");
|
|
18
|
-
const state = { prompt: prompt.trim() };
|
|
24
|
+
const state = { prompt: prompt.trim(), last_processed_id: null };
|
|
19
25
|
for (const line of lines) {
|
|
20
26
|
const [key, ...valueParts] = line.split(":");
|
|
21
27
|
if (!key)
|
|
@@ -37,6 +43,9 @@ function parseState(content) {
|
|
|
37
43
|
case "started_at":
|
|
38
44
|
state.started_at = value.replace(/^"|"$/g, "");
|
|
39
45
|
break;
|
|
46
|
+
case "last_processed_id":
|
|
47
|
+
state.last_processed_id = value === "null" ? null : value.replace(/^"|"$/g, "");
|
|
48
|
+
break;
|
|
40
49
|
}
|
|
41
50
|
}
|
|
42
51
|
if (state.active === undefined ||
|
|
@@ -51,100 +60,127 @@ function extractPromiseText(text) {
|
|
|
51
60
|
const match = text.match(/<promise>(.*?)<\/promise>/s);
|
|
52
61
|
return match ? match[1].trim().replace(/\s+/g, " ") : null;
|
|
53
62
|
}
|
|
54
|
-
/**
|
|
55
|
-
* Ralph Wiggum Plugin - Iterative AI Development
|
|
56
|
-
*
|
|
57
|
-
* This plugin implements the Ralph Wiggum technique: continuously feeding
|
|
58
|
-
* the same prompt to the AI until the task is complete.
|
|
59
|
-
*
|
|
60
|
-
* Usage:
|
|
61
|
-
* 1. Start a loop: /ralph-loop "Your task description" --max-iterations 10 --completion-promise "DONE"
|
|
62
|
-
* 2. Cancel a loop: /cancel-ralph
|
|
63
|
-
*
|
|
64
|
-
* The AI should output <promise>DONE</promise> when the task is complete.
|
|
65
|
-
*/
|
|
66
63
|
export const RalphWiggumPlugin = async ({ client }) => {
|
|
64
|
+
const version = "0.1.12";
|
|
65
|
+
log(`Plugin loaded, version=${version}`);
|
|
67
66
|
const stateFilePath = getStateFilePath();
|
|
68
|
-
//
|
|
67
|
+
// Clear log file on plugin load
|
|
68
|
+
try {
|
|
69
|
+
writeFileSync(getLogPath(), "");
|
|
70
|
+
}
|
|
71
|
+
catch { }
|
|
69
72
|
let completionDetected = false;
|
|
70
|
-
let
|
|
71
|
-
let
|
|
72
|
-
|
|
73
|
+
let lastProcessedId = null;
|
|
74
|
+
let iterationInProgress = false;
|
|
75
|
+
let callCount = 0;
|
|
76
|
+
log(`Plugin initialized`);
|
|
73
77
|
const hooks = {
|
|
74
|
-
// Stop hook: called when main session loop is about to exit
|
|
75
78
|
"experimental.session.stop": async (input, output) => {
|
|
76
|
-
|
|
77
|
-
|
|
79
|
+
const myCallId = ++callCount;
|
|
80
|
+
log(`[${myCallId}] Hook entry - iterationInProgress=${iterationInProgress}, lastProcessedId=${lastProcessedId}`);
|
|
81
|
+
// FIRST: Check lock synchronously before any async operations
|
|
82
|
+
if (iterationInProgress) {
|
|
83
|
+
log(`[${myCallId}] Blocked by lock`);
|
|
84
|
+
output.decision = "block";
|
|
78
85
|
return;
|
|
86
|
+
}
|
|
87
|
+
if (!existsSync(stateFilePath)) {
|
|
88
|
+
log(`[${myCallId}] No state file`);
|
|
89
|
+
return;
|
|
90
|
+
}
|
|
79
91
|
const content = readFileSync(stateFilePath, "utf-8");
|
|
80
92
|
const state = parseState(content);
|
|
81
|
-
if (!state || !state.active)
|
|
82
|
-
|
|
83
|
-
// If completion already detected, allow normal exit
|
|
84
|
-
if (completionDetected)
|
|
93
|
+
if (!state || !state.active) {
|
|
94
|
+
log(`[${myCallId}] No active state`);
|
|
85
95
|
return;
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
.reverse()
|
|
90
|
-
.find((m) => m.info.role === "assistant" && m.parts.some((p) => p.type === "text"));
|
|
91
|
-
if (!lastAssistant)
|
|
92
|
-
return;
|
|
93
|
-
// Prevent double-triggering: if we already sent a prompt for this iteration
|
|
94
|
-
// and haven't seen a new assistant message, skip
|
|
95
|
-
if (pendingIteration === state.iteration && lastAssistant.info.id === lastAssistantId) {
|
|
96
|
-
output.decision = "block"; // Still block, we're waiting for AI to respond
|
|
96
|
+
}
|
|
97
|
+
if (completionDetected) {
|
|
98
|
+
log(`[${myCallId}] Completion already detected`);
|
|
97
99
|
return;
|
|
98
100
|
}
|
|
99
|
-
//
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
101
|
+
// Acquire lock BEFORE async operation
|
|
102
|
+
iterationInProgress = true;
|
|
103
|
+
log(`[${myCallId}] Lock acquired, state.iteration=${state.iteration}`);
|
|
104
|
+
try {
|
|
105
|
+
const messages = await client.session.messages({ path: { id: input.sessionID } }).then((res) => res.data ?? []);
|
|
106
|
+
const lastAssistant = [...messages]
|
|
107
|
+
.reverse()
|
|
108
|
+
.find((m) => m.info.role === "assistant" && m.parts.some((p) => p.type === "text"));
|
|
109
|
+
if (!lastAssistant) {
|
|
110
|
+
log(`[${myCallId}] No assistant message, releasing lock`);
|
|
111
|
+
iterationInProgress = false;
|
|
112
|
+
output.decision = "block";
|
|
113
|
+
return;
|
|
114
|
+
}
|
|
115
|
+
const assistantId = lastAssistant.info.id;
|
|
116
|
+
log(`[${myCallId}] Found assistant id=${assistantId}, lastProcessedId=${lastProcessedId}`);
|
|
117
|
+
// Check if we already processed this message
|
|
118
|
+
if (lastProcessedId === assistantId) {
|
|
119
|
+
log(`[${myCallId}] Already processed, keeping lock, blocking`);
|
|
120
|
+
output.decision = "block";
|
|
121
|
+
return;
|
|
122
|
+
}
|
|
123
|
+
// Mark this message as processed IMMEDIATELY
|
|
124
|
+
lastProcessedId = assistantId;
|
|
125
|
+
log(`[${myCallId}] Processing new message, set lastProcessedId=${assistantId}`);
|
|
126
|
+
const textParts = lastAssistant.parts.filter((p) => p.type === "text");
|
|
127
|
+
const fullText = textParts.map((p) => p.text).join("\n");
|
|
128
|
+
// Check completion promise
|
|
129
|
+
if (state.completion_promise) {
|
|
130
|
+
const promiseText = extractPromiseText(fullText);
|
|
131
|
+
if (promiseText === state.completion_promise) {
|
|
132
|
+
completionDetected = true;
|
|
133
|
+
console.log(`\nRalph loop complete! Detected <promise>${state.completion_promise}</promise>`);
|
|
134
|
+
try {
|
|
135
|
+
unlinkSync(stateFilePath);
|
|
136
|
+
}
|
|
137
|
+
catch { }
|
|
138
|
+
iterationInProgress = false;
|
|
139
|
+
log(`[${myCallId}] Completion detected, allowing exit`);
|
|
140
|
+
return;
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
// Max-iteration safety
|
|
144
|
+
if (state.max_iterations > 0 && state.iteration >= state.max_iterations) {
|
|
145
|
+
console.log(`\nRalph loop: Max iterations (${state.max_iterations}) reached.`);
|
|
109
146
|
try {
|
|
110
147
|
unlinkSync(stateFilePath);
|
|
111
148
|
}
|
|
112
149
|
catch { }
|
|
150
|
+
iterationInProgress = false;
|
|
151
|
+
log(`[${myCallId}] Max iterations reached, allowing exit`);
|
|
113
152
|
return;
|
|
114
153
|
}
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
154
|
+
const nextIteration = state.iteration + 1;
|
|
155
|
+
log(`[${myCallId}] Advancing to iteration ${nextIteration}`);
|
|
156
|
+
// Update state file
|
|
157
|
+
let updatedContent = content.replace(/^iteration: \d+$/m, `iteration: ${nextIteration}`);
|
|
158
|
+
if (updatedContent.includes("last_processed_id:")) {
|
|
159
|
+
updatedContent = updatedContent.replace(/^last_processed_id: .*$/m, `last_processed_id: "${assistantId}"`);
|
|
121
160
|
}
|
|
122
|
-
|
|
123
|
-
|
|
161
|
+
else {
|
|
162
|
+
updatedContent = updatedContent.replace(/^(started_at: .*)$/m, `$1\nlast_processed_id: "${assistantId}"`);
|
|
163
|
+
}
|
|
164
|
+
writeFileSync(stateFilePath, updatedContent);
|
|
165
|
+
const systemMsg = state.completion_promise
|
|
166
|
+
? `Ralph iteration ${nextIteration} | To stop: output <promise>${state.completion_promise}</promise> (ONLY when TRUE)`
|
|
167
|
+
: `Ralph iteration ${nextIteration} | No completion promise set`;
|
|
168
|
+
console.log(`\n${systemMsg}`);
|
|
169
|
+
await client.session.promptAsync({
|
|
170
|
+
path: { id: input.sessionID },
|
|
171
|
+
body: {
|
|
172
|
+
parts: [{ type: "text", text: `[${systemMsg}]\n\n${state.prompt}` }],
|
|
173
|
+
},
|
|
174
|
+
});
|
|
175
|
+
iterationInProgress = false;
|
|
176
|
+
log(`[${myCallId}] Prompt sent, lock released, blocking exit`);
|
|
177
|
+
output.decision = "block";
|
|
178
|
+
}
|
|
179
|
+
catch (err) {
|
|
180
|
+
log(`[${myCallId}] Error: ${err}`);
|
|
181
|
+
iterationInProgress = false;
|
|
182
|
+
throw err;
|
|
124
183
|
}
|
|
125
|
-
const nextIteration = state.iteration + 1;
|
|
126
|
-
pendingIteration = nextIteration;
|
|
127
|
-
// Update state file with new iteration
|
|
128
|
-
const updated = content.replace(/^iteration: \d+$/m, `iteration: ${nextIteration}`);
|
|
129
|
-
writeFileSync(stateFilePath, updated);
|
|
130
|
-
const systemMsg = state.completion_promise
|
|
131
|
-
? `Ralph iteration ${nextIteration} | To stop: output <promise>${state.completion_promise}</promise> (ONLY when TRUE)`
|
|
132
|
-
: `Ralph iteration ${nextIteration} | No completion promise set`;
|
|
133
|
-
console.log(`\n${systemMsg}`);
|
|
134
|
-
// Enqueue next iteration by sending same prompt back into main session
|
|
135
|
-
await client.session.promptAsync({
|
|
136
|
-
path: { id: input.sessionID },
|
|
137
|
-
body: {
|
|
138
|
-
parts: [
|
|
139
|
-
{
|
|
140
|
-
type: "text",
|
|
141
|
-
text: `[${systemMsg}]\n\n${state.prompt}`,
|
|
142
|
-
},
|
|
143
|
-
],
|
|
144
|
-
},
|
|
145
|
-
});
|
|
146
|
-
// Block loop exit so we can continue iterating
|
|
147
|
-
output.decision = "block";
|
|
148
184
|
},
|
|
149
185
|
};
|
|
150
186
|
return hooks;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@sureshsankaran/ralph-wiggum",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.12",
|
|
4
4
|
"description": "Ralph Wiggum iterative AI development plugin for OpenCode - continuously loops the same prompt until task completion",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|