openclaw-codex-app-server 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/AGENTS.md +22 -0
- package/LICENSE +21 -0
- package/README.md +129 -0
- package/index.ts +69 -0
- package/openclaw.plugin.json +105 -0
- package/package.json +28 -0
- package/src/client.test.ts +332 -0
- package/src/client.ts +2914 -0
- package/src/config.ts +103 -0
- package/src/controller.test.ts +1177 -0
- package/src/controller.ts +3232 -0
- package/src/format.test.ts +502 -0
- package/src/format.ts +869 -0
- package/src/openclaw-plugin-sdk.d.ts +237 -0
- package/src/pending-input.test.ts +298 -0
- package/src/pending-input.ts +785 -0
- package/src/state.test.ts +228 -0
- package/src/state.ts +354 -0
- package/src/thread-picker.test.ts +47 -0
- package/src/thread-picker.ts +98 -0
- package/src/thread-selection.test.ts +89 -0
- package/src/thread-selection.ts +106 -0
- package/src/types.ts +372 -0
- package/tsconfig.json +24 -0
|
@@ -0,0 +1,332 @@
|
|
|
1
|
+
import { describe, expect, it } from "vitest";
|
|
2
|
+
import { __testing } from "./client.js";
|
|
3
|
+
|
|
4
|
+
describe("buildTurnStartPayloads", () => {
|
|
5
|
+
it("keeps legacy text and message input fallbacks for normal turns", () => {
|
|
6
|
+
expect(
|
|
7
|
+
__testing.buildTurnStartPayloads({
|
|
8
|
+
threadId: "thread-123",
|
|
9
|
+
prompt: "ship it",
|
|
10
|
+
model: "gpt-5.4",
|
|
11
|
+
}),
|
|
12
|
+
).toEqual([
|
|
13
|
+
{
|
|
14
|
+
threadId: "thread-123",
|
|
15
|
+
input: [{ type: "text", text: "ship it" }],
|
|
16
|
+
model: "gpt-5.4",
|
|
17
|
+
},
|
|
18
|
+
{
|
|
19
|
+
thread_id: "thread-123",
|
|
20
|
+
input: [{ type: "text", text: "ship it" }],
|
|
21
|
+
model: "gpt-5.4",
|
|
22
|
+
},
|
|
23
|
+
{
|
|
24
|
+
threadId: "thread-123",
|
|
25
|
+
input: [
|
|
26
|
+
{
|
|
27
|
+
type: "message",
|
|
28
|
+
role: "user",
|
|
29
|
+
content: [{ type: "input_text", text: "ship it" }],
|
|
30
|
+
},
|
|
31
|
+
],
|
|
32
|
+
model: "gpt-5.4",
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
thread_id: "thread-123",
|
|
36
|
+
input: [
|
|
37
|
+
{
|
|
38
|
+
type: "message",
|
|
39
|
+
role: "user",
|
|
40
|
+
content: [{ type: "input_text", text: "ship it" }],
|
|
41
|
+
},
|
|
42
|
+
],
|
|
43
|
+
model: "gpt-5.4",
|
|
44
|
+
},
|
|
45
|
+
]);
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
it("prefers text-only collaboration payloads and preserves explicit null developer instructions", () => {
|
|
49
|
+
expect(
|
|
50
|
+
__testing.buildTurnStartPayloads({
|
|
51
|
+
threadId: "thread-123",
|
|
52
|
+
prompt: "plan it",
|
|
53
|
+
model: "gpt-5.4",
|
|
54
|
+
collaborationMode: {
|
|
55
|
+
mode: "plan",
|
|
56
|
+
settings: {
|
|
57
|
+
model: "gpt-5.4",
|
|
58
|
+
developerInstructions: null,
|
|
59
|
+
},
|
|
60
|
+
},
|
|
61
|
+
}),
|
|
62
|
+
).toEqual([
|
|
63
|
+
{
|
|
64
|
+
threadId: "thread-123",
|
|
65
|
+
input: [{ type: "text", text: "plan it" }],
|
|
66
|
+
model: "gpt-5.4",
|
|
67
|
+
collaborationMode: {
|
|
68
|
+
mode: "plan",
|
|
69
|
+
settings: {
|
|
70
|
+
model: "gpt-5.4",
|
|
71
|
+
developerInstructions: null,
|
|
72
|
+
},
|
|
73
|
+
},
|
|
74
|
+
},
|
|
75
|
+
{
|
|
76
|
+
thread_id: "thread-123",
|
|
77
|
+
input: [{ type: "text", text: "plan it" }],
|
|
78
|
+
model: "gpt-5.4",
|
|
79
|
+
collaboration_mode: {
|
|
80
|
+
mode: "plan",
|
|
81
|
+
settings: {
|
|
82
|
+
model: "gpt-5.4",
|
|
83
|
+
developer_instructions: null,
|
|
84
|
+
},
|
|
85
|
+
},
|
|
86
|
+
},
|
|
87
|
+
{
|
|
88
|
+
threadId: "thread-123",
|
|
89
|
+
input: [{ type: "text", text: "plan it" }],
|
|
90
|
+
model: "gpt-5.4",
|
|
91
|
+
collaborationMode: {
|
|
92
|
+
mode: "plan",
|
|
93
|
+
},
|
|
94
|
+
},
|
|
95
|
+
{
|
|
96
|
+
thread_id: "thread-123",
|
|
97
|
+
input: [{ type: "text", text: "plan it" }],
|
|
98
|
+
model: "gpt-5.4",
|
|
99
|
+
collaboration_mode: {
|
|
100
|
+
mode: "plan",
|
|
101
|
+
},
|
|
102
|
+
},
|
|
103
|
+
{
|
|
104
|
+
threadId: "thread-123",
|
|
105
|
+
input: [{ type: "text", text: "plan it" }],
|
|
106
|
+
model: "gpt-5.4",
|
|
107
|
+
},
|
|
108
|
+
{
|
|
109
|
+
thread_id: "thread-123",
|
|
110
|
+
input: [{ type: "text", text: "plan it" }],
|
|
111
|
+
model: "gpt-5.4",
|
|
112
|
+
},
|
|
113
|
+
]);
|
|
114
|
+
});
|
|
115
|
+
});
|
|
116
|
+
|
|
117
|
+
describe("extractThreadTokenUsageSnapshot", () => {
|
|
118
|
+
it("prefers current-context usage over cumulative totals when both are present", () => {
|
|
119
|
+
expect(
|
|
120
|
+
__testing.extractThreadTokenUsageSnapshot({
|
|
121
|
+
threadId: "thread-123",
|
|
122
|
+
tokenUsage: {
|
|
123
|
+
last: {
|
|
124
|
+
totalTokens: 139_000,
|
|
125
|
+
inputTokens: 120_000,
|
|
126
|
+
cachedInputTokens: 9_000,
|
|
127
|
+
outputTokens: 10_000,
|
|
128
|
+
},
|
|
129
|
+
total: {
|
|
130
|
+
totalTokens: 56_100_000,
|
|
131
|
+
inputTokens: 55_000_000,
|
|
132
|
+
cachedInputTokens: 300_000,
|
|
133
|
+
outputTokens: 1_100_000,
|
|
134
|
+
},
|
|
135
|
+
modelContextWindow: 258_000,
|
|
136
|
+
},
|
|
137
|
+
}),
|
|
138
|
+
).toEqual({
|
|
139
|
+
totalTokens: 139_000,
|
|
140
|
+
inputTokens: 120_000,
|
|
141
|
+
cachedInputTokens: 9_000,
|
|
142
|
+
outputTokens: 10_000,
|
|
143
|
+
reasoningOutputTokens: undefined,
|
|
144
|
+
contextWindow: 258_000,
|
|
145
|
+
remainingTokens: 119_000,
|
|
146
|
+
remainingPercent: 46,
|
|
147
|
+
});
|
|
148
|
+
});
|
|
149
|
+
|
|
150
|
+
it("normalizes thread/tokenUsage/updated notifications into a context snapshot", () => {
|
|
151
|
+
expect(
|
|
152
|
+
__testing.extractThreadTokenUsageSnapshot({
|
|
153
|
+
threadId: "thread-123",
|
|
154
|
+
turnId: "turn-123",
|
|
155
|
+
tokenUsage: {
|
|
156
|
+
total: {
|
|
157
|
+
totalTokens: 54_000,
|
|
158
|
+
inputTokens: 49_000,
|
|
159
|
+
cachedInputTokens: 3_000,
|
|
160
|
+
outputTokens: 5_000,
|
|
161
|
+
reasoningOutputTokens: 1_000,
|
|
162
|
+
},
|
|
163
|
+
modelContextWindow: 272_000,
|
|
164
|
+
},
|
|
165
|
+
}),
|
|
166
|
+
).toEqual({
|
|
167
|
+
totalTokens: 54_000,
|
|
168
|
+
inputTokens: 49_000,
|
|
169
|
+
cachedInputTokens: 3_000,
|
|
170
|
+
outputTokens: 5_000,
|
|
171
|
+
reasoningOutputTokens: 1_000,
|
|
172
|
+
contextWindow: 272_000,
|
|
173
|
+
remainingTokens: 218_000,
|
|
174
|
+
remainingPercent: 80,
|
|
175
|
+
});
|
|
176
|
+
});
|
|
177
|
+
});
|
|
178
|
+
|
|
179
|
+
describe("extractFileChangePathsFromReadResult", () => {
|
|
180
|
+
it("formats in-workspace files as relative paths and keeps outside files absolute", () => {
|
|
181
|
+
expect(
|
|
182
|
+
__testing.extractFileChangePathsFromReadResult(
|
|
183
|
+
{
|
|
184
|
+
thread: {
|
|
185
|
+
turns: [
|
|
186
|
+
{
|
|
187
|
+
id: "turn-1",
|
|
188
|
+
items: [
|
|
189
|
+
{
|
|
190
|
+
type: "fileChange",
|
|
191
|
+
id: "item-1",
|
|
192
|
+
changes: [
|
|
193
|
+
{ path: "/repo/openclaw/src/a.ts", kind: "update" },
|
|
194
|
+
{ path: "/repo/openclaw/docs/b.md", kind: "add" },
|
|
195
|
+
{ path: "/tmp/outside.txt", kind: "delete" },
|
|
196
|
+
],
|
|
197
|
+
},
|
|
198
|
+
],
|
|
199
|
+
},
|
|
200
|
+
],
|
|
201
|
+
},
|
|
202
|
+
},
|
|
203
|
+
"item-1",
|
|
204
|
+
"/repo/openclaw",
|
|
205
|
+
),
|
|
206
|
+
).toEqual(["src/a.ts", "docs/b.md", "/tmp/outside.txt"]);
|
|
207
|
+
});
|
|
208
|
+
});
|
|
209
|
+
|
|
210
|
+
describe("extractRateLimitSummaries", () => {
|
|
211
|
+
it("extracts primary and secondary window snapshots from rateLimitsByLimitId", () => {
|
|
212
|
+
expect(
|
|
213
|
+
__testing.extractRateLimitSummaries({
|
|
214
|
+
rateLimitsByLimitId: {
|
|
215
|
+
codex: {
|
|
216
|
+
limitName: "Codex",
|
|
217
|
+
primary: {
|
|
218
|
+
usedPercent: 15,
|
|
219
|
+
windowDurationMins: 300,
|
|
220
|
+
resetsAt: "2026-03-13T10:03:00-04:00",
|
|
221
|
+
},
|
|
222
|
+
secondary: {
|
|
223
|
+
usedPercent: 9,
|
|
224
|
+
windowDurationMins: 10_080,
|
|
225
|
+
resetsAt: "2026-03-14T10:03:00-04:00",
|
|
226
|
+
},
|
|
227
|
+
},
|
|
228
|
+
},
|
|
229
|
+
}),
|
|
230
|
+
).toEqual([
|
|
231
|
+
{
|
|
232
|
+
name: "5h limit",
|
|
233
|
+
limitId: "codex",
|
|
234
|
+
usedPercent: 15,
|
|
235
|
+
remaining: 85,
|
|
236
|
+
resetAt: new Date("2026-03-13T10:03:00-04:00").getTime(),
|
|
237
|
+
windowSeconds: 18_000,
|
|
238
|
+
windowMinutes: 300,
|
|
239
|
+
},
|
|
240
|
+
{
|
|
241
|
+
name: "Weekly limit",
|
|
242
|
+
limitId: "codex",
|
|
243
|
+
usedPercent: 9,
|
|
244
|
+
remaining: 91,
|
|
245
|
+
resetAt: new Date("2026-03-14T10:03:00-04:00").getTime(),
|
|
246
|
+
windowSeconds: 604_800,
|
|
247
|
+
windowMinutes: 10_080,
|
|
248
|
+
},
|
|
249
|
+
]);
|
|
250
|
+
});
|
|
251
|
+
|
|
252
|
+
it("merges generic rows into existing named windows without losing used percentages", () => {
|
|
253
|
+
expect(
|
|
254
|
+
__testing.extractRateLimitSummaries({
|
|
255
|
+
rateLimits: [
|
|
256
|
+
{
|
|
257
|
+
name: "5h limit",
|
|
258
|
+
resetAt: "2026-03-13T10:03:00-04:00",
|
|
259
|
+
windowSeconds: 18_000,
|
|
260
|
+
},
|
|
261
|
+
],
|
|
262
|
+
rateLimitsByLimitId: {
|
|
263
|
+
codex: {
|
|
264
|
+
primary: {
|
|
265
|
+
usedPercent: 15,
|
|
266
|
+
windowDurationMins: 300,
|
|
267
|
+
},
|
|
268
|
+
},
|
|
269
|
+
},
|
|
270
|
+
}),
|
|
271
|
+
).toEqual([
|
|
272
|
+
{
|
|
273
|
+
name: "5h limit",
|
|
274
|
+
limitId: "codex",
|
|
275
|
+
remaining: 85,
|
|
276
|
+
usedPercent: 15,
|
|
277
|
+
resetAt: new Date("2026-03-13T10:03:00-04:00").getTime(),
|
|
278
|
+
windowSeconds: 18_000,
|
|
279
|
+
windowMinutes: 300,
|
|
280
|
+
},
|
|
281
|
+
]);
|
|
282
|
+
});
|
|
283
|
+
});
|
|
284
|
+
|
|
285
|
+
describe("createPendingInputCoordinator", () => {
|
|
286
|
+
it("surfaces only one pending approval at a time", async () => {
|
|
287
|
+
const surfaced: Array<string | null> = [];
|
|
288
|
+
const coordinator = __testing.createPendingInputCoordinator({
|
|
289
|
+
inputTimeoutMs: 60_000,
|
|
290
|
+
onPendingInput: async (state) => {
|
|
291
|
+
surfaced.push(state?.requestId ?? null);
|
|
292
|
+
},
|
|
293
|
+
});
|
|
294
|
+
|
|
295
|
+
const first = coordinator.enqueue({
|
|
296
|
+
state: {
|
|
297
|
+
requestId: "req-1",
|
|
298
|
+
options: ["approve"],
|
|
299
|
+
expiresAt: Date.now() + 60_000,
|
|
300
|
+
method: "item/exec/requestApproval",
|
|
301
|
+
},
|
|
302
|
+
options: ["approve"],
|
|
303
|
+
actions: [],
|
|
304
|
+
methodLower: "item/exec/requestapproval",
|
|
305
|
+
});
|
|
306
|
+
const second = coordinator.enqueue({
|
|
307
|
+
state: {
|
|
308
|
+
requestId: "req-2",
|
|
309
|
+
options: ["approve"],
|
|
310
|
+
expiresAt: Date.now() + 60_000,
|
|
311
|
+
method: "item/exec/requestApproval",
|
|
312
|
+
},
|
|
313
|
+
options: ["approve"],
|
|
314
|
+
actions: [],
|
|
315
|
+
methodLower: "item/exec/requestapproval",
|
|
316
|
+
});
|
|
317
|
+
|
|
318
|
+
await new Promise((resolve) => setTimeout(resolve, 0));
|
|
319
|
+
expect(surfaced).toEqual(["req-1"]);
|
|
320
|
+
expect(coordinator.current()?.state.requestId).toBe("req-1");
|
|
321
|
+
|
|
322
|
+
await coordinator.settleCurrent({ index: 0, option: "approve" });
|
|
323
|
+
await expect(first.response).resolves.toEqual({ index: 0, option: "approve" });
|
|
324
|
+
await new Promise((resolve) => setTimeout(resolve, 0));
|
|
325
|
+
|
|
326
|
+
expect(surfaced).toEqual(["req-1", null, "req-2"]);
|
|
327
|
+
expect(coordinator.current()?.state.requestId).toBe("req-2");
|
|
328
|
+
|
|
329
|
+
await coordinator.settleCurrent({ index: 0, option: "approve" });
|
|
330
|
+
await expect(second.response).resolves.toEqual({ index: 0, option: "approve" });
|
|
331
|
+
});
|
|
332
|
+
});
|