discoclaw 0.2.4 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/.context/pa.md +1 -1
  2. package/.context/runtime.md +48 -4
  3. package/.env.example +6 -0
  4. package/.env.example.full +7 -0
  5. package/README.md +5 -1
  6. package/dist/config.js +2 -0
  7. package/dist/cron/cron-sync-coordinator.js +4 -0
  8. package/dist/cron/cron-sync-coordinator.test.js +8 -0
  9. package/dist/cron/executor.js +36 -1
  10. package/dist/cron/executor.test.js +157 -0
  11. package/dist/cron/forum-sync.js +47 -0
  12. package/dist/cron/forum-sync.test.js +234 -0
  13. package/dist/cron/run-stats.js +10 -3
  14. package/dist/cron/run-stats.test.js +67 -3
  15. package/dist/discord/actions-config.js +41 -8
  16. package/dist/discord/actions-config.test.js +130 -8
  17. package/dist/discord/actions-crons.js +18 -0
  18. package/dist/discord/actions-crons.test.js +12 -0
  19. package/dist/discord/models-command.js +5 -0
  20. package/dist/index.js +28 -0
  21. package/dist/mcp-detect.js +74 -0
  22. package/dist/mcp-detect.test.js +160 -0
  23. package/dist/runtime/openai-compat.js +224 -90
  24. package/dist/runtime/openai-compat.test.js +409 -2
  25. package/dist/runtime/openai-tool-exec.js +433 -0
  26. package/dist/runtime/openai-tool-exec.test.js +267 -0
  27. package/dist/runtime/openai-tool-schemas.js +174 -0
  28. package/dist/runtime/openai-tool-schemas.test.js +74 -0
  29. package/dist/runtime/tools/fs-glob.js +102 -0
  30. package/dist/runtime/tools/fs-glob.test.js +67 -0
  31. package/dist/runtime/tools/fs-read-file.js +49 -0
  32. package/dist/runtime/tools/fs-read-file.test.js +51 -0
  33. package/dist/runtime/tools/fs-realpath.js +51 -0
  34. package/dist/runtime/tools/fs-realpath.test.js +72 -0
  35. package/dist/runtime/tools/fs-write-file.js +45 -0
  36. package/dist/runtime/tools/fs-write-file.test.js +56 -0
  37. package/dist/runtime/tools/image-download.js +138 -0
  38. package/dist/runtime/tools/image-download.test.js +106 -0
  39. package/dist/runtime/tools/path-security.js +72 -0
  40. package/dist/runtime/tools/types.js +4 -0
  41. package/dist/workspace-bootstrap.js +0 -1
  42. package/dist/workspace-bootstrap.test.js +0 -2
  43. package/package.json +1 -1
  44. package/templates/mcp.json +8 -0
  45. package/templates/workspace/TOOLS.md +70 -1
  46. package/templates/workspace/HEARTBEAT.md +0 -10
@@ -1,3 +1,30 @@
1
+ import { buildToolSchemas, OPENAI_TO_DISCO_NAME } from './openai-tool-schemas.js';
2
+ import { executeToolCall } from './openai-tool-exec.js';
3
+ const TOOL_LOOP_CAP = 25;
4
+ const SYSTEM_SENTINEL = '---\nThe sections above are internal system context.';
5
+ /**
6
+ * Split a combined prompt into system + user messages.
7
+ * If `params.systemPrompt` is explicitly set, use that directly.
8
+ * Otherwise, auto-detect by scanning for the sentinel delimiter.
9
+ */
10
+ export function splitSystemPrompt(params) {
11
+ if (params.systemPrompt) {
12
+ return { system: params.systemPrompt, user: params.prompt };
13
+ }
14
+ const idx = params.prompt.indexOf(SYSTEM_SENTINEL);
15
+ if (idx === -1) {
16
+ return { system: undefined, user: params.prompt };
17
+ }
18
+ const splitPoint = idx + SYSTEM_SENTINEL.length;
19
+ // Skip a single trailing newline after the sentinel if present
20
+ const afterSentinel = splitPoint < params.prompt.length && params.prompt[splitPoint] === '\n'
21
+ ? splitPoint + 1
22
+ : splitPoint;
23
+ return {
24
+ system: params.prompt.slice(0, splitPoint),
25
+ user: params.prompt.slice(afterSentinel),
26
+ };
27
+ }
1
28
  /**
2
29
  * Returns true for models that require `max_completion_tokens` instead of `max_tokens`.
3
30
  * Strips any provider namespace (e.g. "openai/") before matching.
@@ -20,7 +47,41 @@ function parseSSEData(line) {
20
47
  return undefined;
21
48
  }
22
49
  export function createOpenAICompatRuntime(opts) {
23
- const capabilities = new Set(['streaming_text']);
50
+ const caps = ['streaming_text'];
51
+ if (opts.enableTools) {
52
+ caps.push('tools_fs', 'tools_exec');
53
+ }
54
+ const capabilities = new Set(caps);
55
+ /** Shared fetch with OAuth 401 retry logic. Used by both streaming and tool-loop paths. */
56
+ async function fetchWithAuth(url, body, signal) {
57
+ let bearerToken = opts.auth === 'chatgpt_oauth'
58
+ ? await opts.tokenProvider.getAccessToken()
59
+ : opts.apiKey;
60
+ let response = await fetch(url, {
61
+ method: 'POST',
62
+ headers: {
63
+ 'Authorization': `Bearer ${bearerToken}`,
64
+ 'Content-Type': 'application/json',
65
+ },
66
+ body,
67
+ signal,
68
+ });
69
+ // On 401 with OAuth, force-refresh the token and retry once
70
+ if (!response.ok && response.status === 401 && opts.auth === 'chatgpt_oauth') {
71
+ opts.log?.debug('openai-compat: 401 received, force-refreshing OAuth token');
72
+ bearerToken = await opts.tokenProvider.getAccessToken(true);
73
+ response = await fetch(url, {
74
+ method: 'POST',
75
+ headers: {
76
+ 'Authorization': `Bearer ${bearerToken}`,
77
+ 'Content-Type': 'application/json',
78
+ },
79
+ body,
80
+ signal,
81
+ });
82
+ }
83
+ return response;
84
+ }
24
85
  return {
25
86
  id: opts.id ?? 'openai',
26
87
  capabilities,
@@ -34,12 +95,10 @@ export function createOpenAICompatRuntime(opts) {
34
95
  ? { max_completion_tokens: params.maxTokens }
35
96
  : { max_tokens: params.maxTokens })
36
97
  : {};
37
- const body = JSON.stringify({
38
- model,
39
- messages: [{ role: 'user', content: params.prompt }],
40
- stream: true,
41
- ...tokenField,
42
- });
98
+ // Determine whether to enter the tool loop
99
+ const toolsRequested = opts.enableTools && params.tools && params.tools.length > 0;
100
+ const toolSchemas = toolsRequested ? buildToolSchemas(params.tools) : [];
101
+ const useTools = toolSchemas.length > 0;
43
102
  const controller = new AbortController();
44
103
  let timer;
45
104
  if (params.timeoutMs) {
@@ -50,83 +109,169 @@ export function createOpenAICompatRuntime(opts) {
50
109
  params.signal?.addEventListener('abort', onCallerAbort, { once: true });
51
110
  if (params.signal?.aborted)
52
111
  controller.abort();
53
- let accumulated = '';
112
+ const { system: sysContent, user: userContent } = splitSystemPrompt(params);
54
113
  try {
55
114
  opts.log?.debug({ url, model }, 'openai-compat: request');
56
- // Resolve bearer token: static key or dynamic OAuth
57
- let bearerToken = opts.auth === 'chatgpt_oauth'
58
- ? await opts.tokenProvider.getAccessToken()
59
- : opts.apiKey;
60
- let response = await fetch(url, {
61
- method: 'POST',
62
- headers: {
63
- 'Authorization': `Bearer ${bearerToken}`,
64
- 'Content-Type': 'application/json',
65
- },
66
- body,
67
- signal: controller.signal,
68
- });
69
- // On 401 with OAuth, force-refresh the token and retry once
70
- if (!response.ok && response.status === 401 && opts.auth === 'chatgpt_oauth') {
71
- opts.log?.debug('openai-compat: 401 received, force-refreshing OAuth token');
72
- bearerToken = await opts.tokenProvider.getAccessToken(true);
73
- response = await fetch(url, {
74
- method: 'POST',
75
- headers: {
76
- 'Authorization': `Bearer ${bearerToken}`,
77
- 'Content-Type': 'application/json',
78
- },
79
- body,
80
- signal: controller.signal,
81
- });
82
- }
83
- if (!response.ok) {
84
- yield { type: 'error', message: `OpenAI API error: ${response.status} ${response.statusText}` };
85
- yield { type: 'done' };
86
- return;
87
- }
88
- if (!response.body) {
89
- yield { type: 'error', message: 'OpenAI API returned no response body' };
115
+ if (useTools) {
116
+ // ── Tool-loop path (non-streaming rounds) ──────────────────
117
+ const allowedRoots = [params.cwd, ...(params.addDirs ?? [])].filter(s => s !== '');
118
+ const messages = [];
119
+ if (sysContent)
120
+ messages.push({ role: 'system', content: sysContent });
121
+ messages.push({ role: 'user', content: userContent });
122
+ for (let round = 0; round < TOOL_LOOP_CAP; round++) {
123
+ const body = JSON.stringify({
124
+ model,
125
+ messages,
126
+ stream: false,
127
+ tools: toolSchemas,
128
+ ...tokenField,
129
+ });
130
+ const response = await fetchWithAuth(url, body, controller.signal);
131
+ if (!response.ok) {
132
+ let detail = '';
133
+ try {
134
+ const errBody = await response.json();
135
+ detail = `: ${JSON.stringify(errBody.error ?? errBody)}`;
136
+ }
137
+ catch { /* ignore */ }
138
+ yield { type: 'error', message: `OpenAI API error: ${response.status} ${response.statusText}${detail}` };
139
+ yield { type: 'done' };
140
+ return;
141
+ }
142
+ const json = await response.json();
143
+ const choice = json.choices?.[0];
144
+ const assistantMsg = choice?.message;
145
+ if (!assistantMsg) {
146
+ yield { type: 'error', message: 'No response from model' };
147
+ yield { type: 'done' };
148
+ return;
149
+ }
150
+ const toolCalls = assistantMsg.tool_calls;
151
+ if (!toolCalls || toolCalls.length === 0) {
152
+ // Model returned a final text response — emit and exit
153
+ const content = assistantMsg.content ?? '';
154
+ if (content)
155
+ yield { type: 'text_delta', text: content };
156
+ yield { type: 'text_final', text: content };
157
+ yield { type: 'done' };
158
+ return;
159
+ }
160
+ // Append the assistant message (with tool_calls) to the conversation
161
+ messages.push(assistantMsg);
162
+ // Execute each tool call
163
+ for (const tc of toolCalls) {
164
+ const fnName = tc.function?.name ?? '';
165
+ const tcId = tc.id;
166
+ const discoName = OPENAI_TO_DISCO_NAME[fnName] ?? fnName;
167
+ let args;
168
+ try {
169
+ args = JSON.parse(tc.function?.arguments ?? '{}');
170
+ }
171
+ catch {
172
+ // Malformed JSON — feed error back to model instead of crashing
173
+ yield { type: 'tool_start', name: discoName, input: tc.function?.arguments };
174
+ yield { type: 'tool_end', name: discoName, output: 'Malformed JSON in tool call arguments', ok: false };
175
+ messages.push({
176
+ role: 'tool',
177
+ tool_call_id: tcId ?? 'unknown',
178
+ content: 'Malformed JSON in tool call arguments',
179
+ });
180
+ continue;
181
+ }
182
+ yield { type: 'tool_start', name: discoName, input: args };
183
+ const result = await executeToolCall(fnName, args, allowedRoots);
184
+ yield { type: 'tool_end', name: discoName, output: result.result, ok: result.ok };
185
+ messages.push({
186
+ role: 'tool',
187
+ tool_call_id: tcId ?? 'unknown',
188
+ content: result.result,
189
+ });
190
+ }
191
+ }
192
+ // Safety cap reached
193
+ yield { type: 'error', message: 'Tool loop safety cap reached (25 iterations)' };
90
194
  yield { type: 'done' };
91
- return;
92
195
  }
93
- const reader = response.body.getReader();
94
- const decoder = new TextDecoder();
95
- let buffer = '';
96
- // Process a single SSE line, returning 'done' if [DONE] sentinel was hit
97
- const processLine = function* (line) {
98
- const data = parseSSEData(line);
99
- if (data === undefined)
100
- return false;
101
- if (data === '[DONE]') {
102
- yield { type: 'text_final', text: accumulated };
196
+ else {
197
+ // ── Streaming text path (no tools) ─────────────────────────
198
+ const streamMessages = [];
199
+ if (sysContent)
200
+ streamMessages.push({ role: 'system', content: sysContent });
201
+ streamMessages.push({ role: 'user', content: userContent });
202
+ const body = JSON.stringify({
203
+ model,
204
+ messages: streamMessages,
205
+ stream: true,
206
+ ...tokenField,
207
+ });
208
+ let accumulated = '';
209
+ const response = await fetchWithAuth(url, body, controller.signal);
210
+ if (!response.ok) {
211
+ let detail = '';
212
+ try {
213
+ const errBody = await response.json();
214
+ detail = `: ${JSON.stringify(errBody.error ?? errBody)}`;
215
+ }
216
+ catch { /* ignore */ }
217
+ yield { type: 'error', message: `OpenAI API error: ${response.status} ${response.statusText}${detail}` };
103
218
  yield { type: 'done' };
104
- return true;
219
+ return;
105
220
  }
106
- try {
107
- const parsed = JSON.parse(data);
108
- const content = parsed?.choices?.[0]?.delta?.content;
109
- if (content) {
110
- accumulated += content;
111
- yield { type: 'text_delta', text: content };
112
- }
221
+ if (!response.body) {
222
+ yield { type: 'error', message: 'OpenAI API returned no response body' };
223
+ yield { type: 'done' };
224
+ return;
113
225
  }
114
- catch {
115
- // Skip unparseable lines
226
+ const reader = response.body.getReader();
227
+ const decoder = new TextDecoder();
228
+ let buffer = '';
229
+ // Process a single SSE line, returning 'done' if [DONE] sentinel was hit
230
+ const processLine = function* (line) {
231
+ const data = parseSSEData(line);
232
+ if (data === undefined)
233
+ return false;
234
+ if (data === '[DONE]') {
235
+ yield { type: 'text_final', text: accumulated };
236
+ yield { type: 'done' };
237
+ return true;
238
+ }
239
+ try {
240
+ const parsed = JSON.parse(data);
241
+ const content = parsed?.choices?.[0]?.delta?.content;
242
+ if (content) {
243
+ accumulated += content;
244
+ yield { type: 'text_delta', text: content };
245
+ }
246
+ }
247
+ catch {
248
+ // Skip unparseable lines
249
+ }
250
+ return false;
251
+ };
252
+ while (true) {
253
+ const { done, value } = await reader.read();
254
+ if (done)
255
+ break;
256
+ buffer += decoder.decode(value, { stream: true });
257
+ // Process complete lines
258
+ const lines = buffer.split('\n');
259
+ // Keep the last (possibly incomplete) line in the buffer
260
+ buffer = lines.pop() ?? '';
261
+ for (const line of lines) {
262
+ const result = processLine(line);
263
+ let step = result.next();
264
+ while (!step.done) {
265
+ yield step.value;
266
+ step = result.next();
267
+ }
268
+ if (step.value)
269
+ return; // [DONE] hit
270
+ }
116
271
  }
117
- return false;
118
- };
119
- while (true) {
120
- const { done, value } = await reader.read();
121
- if (done)
122
- break;
123
- buffer += decoder.decode(value, { stream: true });
124
- // Process complete lines
125
- const lines = buffer.split('\n');
126
- // Keep the last (possibly incomplete) line in the buffer
127
- buffer = lines.pop() ?? '';
128
- for (const line of lines) {
129
- const result = processLine(line);
272
+ // Process any remaining buffered content (stream ended without trailing newline)
273
+ if (buffer.trim()) {
274
+ const result = processLine(buffer);
130
275
  let step = result.next();
131
276
  while (!step.done) {
132
277
  yield step.value;
@@ -135,21 +280,10 @@ export function createOpenAICompatRuntime(opts) {
135
280
  if (step.value)
136
281
  return; // [DONE] hit
137
282
  }
283
+ // Stream ended without [DONE] — emit what we have
284
+ yield { type: 'text_final', text: accumulated };
285
+ yield { type: 'done' };
138
286
  }
139
- // Process any remaining buffered content (stream ended without trailing newline)
140
- if (buffer.trim()) {
141
- const result = processLine(buffer);
142
- let step = result.next();
143
- while (!step.done) {
144
- yield step.value;
145
- step = result.next();
146
- }
147
- if (step.value)
148
- return; // [DONE] hit
149
- }
150
- // Stream ended without [DONE] — emit what we have
151
- yield { type: 'text_final', text: accumulated };
152
- yield { type: 'done' };
153
287
  }
154
288
  catch (err) {
155
289
  if (timer)