@llblab/pi-telegram 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/AGENTS.md +90 -0
- package/BACKLOG.md +5 -0
- package/CHANGELOG.md +17 -0
- package/README.md +202 -0
- package/docs/README.md +9 -0
- package/docs/architecture.md +148 -0
- package/index.ts +1968 -0
- package/lib/api.ts +222 -0
- package/lib/attachments.ts +98 -0
- package/lib/media.ts +234 -0
- package/lib/menu.ts +951 -0
- package/lib/model-switch.ts +62 -0
- package/lib/polling.ts +122 -0
- package/lib/queue.ts +534 -0
- package/lib/registration.ts +163 -0
- package/lib/rendering.ts +697 -0
- package/lib/replies.ts +313 -0
- package/lib/setup.ts +41 -0
- package/lib/status.ts +109 -0
- package/lib/turns.ts +144 -0
- package/lib/updates.ts +397 -0
- package/package.json +40 -0
- package/screenshot.png +0 -0
- package/tests/api.test.ts +89 -0
- package/tests/attachments.test.ts +132 -0
- package/tests/config.test.ts +80 -0
- package/tests/media.test.ts +77 -0
- package/tests/menu.test.ts +645 -0
- package/tests/polling.test.ts +129 -0
- package/tests/queue.test.ts +2982 -0
- package/tests/registration.test.ts +268 -0
- package/tests/rendering.test.ts +308 -0
- package/tests/replies.test.ts +362 -0
- package/tests/turns.test.ts +132 -0
- package/tests/updates.test.ts +366 -0
|
@@ -0,0 +1,645 @@
|
|
|
1
|
+
import assert from "node:assert/strict";
|
|
2
|
+
import test from "node:test";
|
|
3
|
+
|
|
4
|
+
import {
|
|
5
|
+
applyTelegramModelPageSelection,
|
|
6
|
+
applyTelegramModelScopeSelection,
|
|
7
|
+
buildModelMenuReplyMarkup,
|
|
8
|
+
buildStatusReplyMarkup,
|
|
9
|
+
buildTelegramModelCallbackPlan,
|
|
10
|
+
buildTelegramModelMenuRenderPayload,
|
|
11
|
+
buildTelegramModelMenuState,
|
|
12
|
+
buildTelegramStatusMenuRenderPayload,
|
|
13
|
+
buildTelegramThinkingMenuRenderPayload,
|
|
14
|
+
buildThinkingMenuReplyMarkup,
|
|
15
|
+
buildThinkingMenuText,
|
|
16
|
+
formatScopedModelButtonText,
|
|
17
|
+
getCanonicalModelId,
|
|
18
|
+
getTelegramModelMenuPage,
|
|
19
|
+
getTelegramModelSelection,
|
|
20
|
+
getModelMenuItems,
|
|
21
|
+
handleTelegramMenuCallbackEntry,
|
|
22
|
+
handleTelegramModelMenuCallbackAction,
|
|
23
|
+
handleTelegramStatusMenuCallbackAction,
|
|
24
|
+
handleTelegramThinkingMenuCallbackAction,
|
|
25
|
+
isThinkingLevel,
|
|
26
|
+
MODEL_MENU_TITLE,
|
|
27
|
+
modelsMatch,
|
|
28
|
+
parseTelegramMenuCallbackAction,
|
|
29
|
+
resolveScopedModelPatterns,
|
|
30
|
+
sendTelegramModelMenuMessage,
|
|
31
|
+
sendTelegramStatusMessage,
|
|
32
|
+
sortScopedModels,
|
|
33
|
+
TELEGRAM_MODEL_PAGE_SIZE,
|
|
34
|
+
updateTelegramModelMenuMessage,
|
|
35
|
+
updateTelegramStatusMessage,
|
|
36
|
+
updateTelegramThinkingMenuMessage,
|
|
37
|
+
type TelegramModelMenuState,
|
|
38
|
+
} from "../lib/menu.ts";
|
|
39
|
+
|
|
40
|
+
test("Menu helpers match models, detect thinking levels, and expose constants", () => {
|
|
41
|
+
assert.equal(MODEL_MENU_TITLE, "<b>Choose a model:</b>");
|
|
42
|
+
assert.equal(TELEGRAM_MODEL_PAGE_SIZE, 6);
|
|
43
|
+
assert.equal(
|
|
44
|
+
modelsMatch(
|
|
45
|
+
{ provider: "openai", id: "gpt-5" },
|
|
46
|
+
{ provider: "openai", id: "gpt-5" },
|
|
47
|
+
),
|
|
48
|
+
true,
|
|
49
|
+
);
|
|
50
|
+
assert.equal(
|
|
51
|
+
modelsMatch(
|
|
52
|
+
{ provider: "openai", id: "gpt-5" },
|
|
53
|
+
{ provider: "anthropic", id: "gpt-5" },
|
|
54
|
+
),
|
|
55
|
+
false,
|
|
56
|
+
);
|
|
57
|
+
assert.equal(
|
|
58
|
+
getCanonicalModelId({ provider: "openai", id: "gpt-5" }),
|
|
59
|
+
"openai/gpt-5",
|
|
60
|
+
);
|
|
61
|
+
assert.equal(isThinkingLevel("high"), true);
|
|
62
|
+
assert.equal(isThinkingLevel("impossible"), false);
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
test("Menu helpers resolve scoped model patterns and sort current models first", () => {
|
|
66
|
+
const models = [
|
|
67
|
+
{ provider: "openai", id: "gpt-5", name: "GPT 5" },
|
|
68
|
+
{ provider: "openai", id: "gpt-5-latest", name: "GPT 5 Latest" },
|
|
69
|
+
{
|
|
70
|
+
provider: "anthropic",
|
|
71
|
+
id: "claude-sonnet-20250101",
|
|
72
|
+
name: "Claude Sonnet",
|
|
73
|
+
},
|
|
74
|
+
] as const;
|
|
75
|
+
const resolved = resolveScopedModelPatterns(
|
|
76
|
+
["gpt-5:high", "anthropic/*:low"],
|
|
77
|
+
models as never,
|
|
78
|
+
);
|
|
79
|
+
assert.deepEqual(
|
|
80
|
+
resolved.map((entry) => ({
|
|
81
|
+
id: entry.model.id,
|
|
82
|
+
thinking: entry.thinkingLevel,
|
|
83
|
+
})),
|
|
84
|
+
[
|
|
85
|
+
{ id: "gpt-5", thinking: "high" },
|
|
86
|
+
{ id: "claude-sonnet-20250101", thinking: "low" },
|
|
87
|
+
],
|
|
88
|
+
);
|
|
89
|
+
const sorted = sortScopedModels(resolved, models[0] as never);
|
|
90
|
+
assert.equal(sorted[0]?.model.id, "gpt-5");
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
test("Menu helpers build model menu state and parse callback actions", () => {
|
|
94
|
+
const modelA = { provider: "openai", id: "gpt-5", reasoning: true } as const;
|
|
95
|
+
const modelB = {
|
|
96
|
+
provider: "anthropic",
|
|
97
|
+
id: "claude-3",
|
|
98
|
+
reasoning: false,
|
|
99
|
+
} as const;
|
|
100
|
+
const state = buildTelegramModelMenuState({
|
|
101
|
+
chatId: 1,
|
|
102
|
+
activeModel: modelA as never,
|
|
103
|
+
availableModels: [modelA, modelB] as never,
|
|
104
|
+
configuredScopedModelPatterns: ["missing-model"],
|
|
105
|
+
cliScopedModelPatterns: ["missing-model"],
|
|
106
|
+
});
|
|
107
|
+
assert.equal(state.chatId, 1);
|
|
108
|
+
assert.equal(state.scope, "all");
|
|
109
|
+
assert.match(state.note ?? "", /No CLI scoped models matched/);
|
|
110
|
+
assert.deepEqual(parseTelegramMenuCallbackAction("status:model"), {
|
|
111
|
+
kind: "status",
|
|
112
|
+
action: "model",
|
|
113
|
+
});
|
|
114
|
+
assert.deepEqual(parseTelegramMenuCallbackAction("thinking:set:high"), {
|
|
115
|
+
kind: "thinking:set",
|
|
116
|
+
level: "high",
|
|
117
|
+
});
|
|
118
|
+
assert.deepEqual(parseTelegramMenuCallbackAction("model:pick:2"), {
|
|
119
|
+
kind: "model",
|
|
120
|
+
action: "pick",
|
|
121
|
+
value: "2",
|
|
122
|
+
});
|
|
123
|
+
assert.deepEqual(parseTelegramMenuCallbackAction("unknown"), {
|
|
124
|
+
kind: "ignore",
|
|
125
|
+
});
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
test("Menu helpers apply menu mutations and resolve model selections", () => {
|
|
129
|
+
const modelA = { provider: "openai", id: "gpt-5", reasoning: true } as const;
|
|
130
|
+
const state = {
|
|
131
|
+
chatId: 1,
|
|
132
|
+
messageId: 2,
|
|
133
|
+
page: 0,
|
|
134
|
+
scope: "all" as const,
|
|
135
|
+
scopedModels: [{ model: modelA, thinkingLevel: "high" as const }],
|
|
136
|
+
allModels: [{ model: modelA }],
|
|
137
|
+
mode: "status" as const,
|
|
138
|
+
} as unknown as TelegramModelMenuState;
|
|
139
|
+
assert.equal(applyTelegramModelScopeSelection(state, "scoped"), "changed");
|
|
140
|
+
assert.equal(state.scope, "scoped");
|
|
141
|
+
assert.equal(applyTelegramModelScopeSelection(state, "scoped"), "unchanged");
|
|
142
|
+
assert.equal(applyTelegramModelScopeSelection(state, "bad"), "invalid");
|
|
143
|
+
assert.equal(applyTelegramModelPageSelection(state, "2"), "changed");
|
|
144
|
+
assert.equal(state.page, 2);
|
|
145
|
+
assert.equal(applyTelegramModelPageSelection(state, "2"), "unchanged");
|
|
146
|
+
assert.equal(applyTelegramModelPageSelection(state, "bad"), "invalid");
|
|
147
|
+
assert.deepEqual(getTelegramModelSelection(state, "bad"), { kind: "invalid" });
|
|
148
|
+
assert.deepEqual(getTelegramModelSelection(state, "9"), { kind: "missing" });
|
|
149
|
+
assert.equal(getTelegramModelSelection(state, "0").kind, "selected");
|
|
150
|
+
});
|
|
151
|
+
|
|
152
|
+
test("Menu helpers derive normalized menu pages without mutating state", () => {
|
|
153
|
+
const modelA = { provider: "openai", id: "gpt-5" } as const;
|
|
154
|
+
const modelB = { provider: "anthropic", id: "claude-3" } as const;
|
|
155
|
+
const state = {
|
|
156
|
+
chatId: 1,
|
|
157
|
+
messageId: 2,
|
|
158
|
+
page: 99,
|
|
159
|
+
scope: "all" as const,
|
|
160
|
+
scopedModels: [],
|
|
161
|
+
allModels: [{ model: modelA }, { model: modelB }],
|
|
162
|
+
mode: "model" as const,
|
|
163
|
+
} as unknown as TelegramModelMenuState;
|
|
164
|
+
const menuPage = getTelegramModelMenuPage(state, 1);
|
|
165
|
+
assert.equal(menuPage.page, 1);
|
|
166
|
+
assert.equal(menuPage.pageCount, 2);
|
|
167
|
+
assert.equal(menuPage.start, 1);
|
|
168
|
+
assert.deepEqual(menuPage.items, [{ model: modelB }]);
|
|
169
|
+
assert.equal(state.page, 99);
|
|
170
|
+
const markup = buildModelMenuReplyMarkup(state, modelA as never, 1);
|
|
171
|
+
assert.equal(markup.inline_keyboard[1]?.[1]?.text, "2/2");
|
|
172
|
+
assert.equal(state.page, 99);
|
|
173
|
+
});
|
|
174
|
+
|
|
175
|
+
test("Menu helpers build model callback plans for paging, selection, and restart modes", () => {
|
|
176
|
+
const modelA = { provider: "openai", id: "gpt-5", reasoning: true } as const;
|
|
177
|
+
const modelB = { provider: "anthropic", id: "claude-3", reasoning: false } as const;
|
|
178
|
+
const state = {
|
|
179
|
+
chatId: 1,
|
|
180
|
+
messageId: 2,
|
|
181
|
+
page: 0,
|
|
182
|
+
scope: "all" as const,
|
|
183
|
+
scopedModels: [{ model: modelA, thinkingLevel: "high" as const }],
|
|
184
|
+
allModels: [{ model: modelA }, { model: modelB }],
|
|
185
|
+
mode: "model" as const,
|
|
186
|
+
} as unknown as TelegramModelMenuState;
|
|
187
|
+
assert.deepEqual(
|
|
188
|
+
buildTelegramModelCallbackPlan({
|
|
189
|
+
data: "model:page:1",
|
|
190
|
+
state,
|
|
191
|
+
activeModel: modelA as never,
|
|
192
|
+
currentThinkingLevel: "medium",
|
|
193
|
+
isIdle: true,
|
|
194
|
+
canRestartBusyRun: false,
|
|
195
|
+
hasActiveToolExecutions: false,
|
|
196
|
+
}),
|
|
197
|
+
{ kind: "update-menu" },
|
|
198
|
+
);
|
|
199
|
+
assert.deepEqual(
|
|
200
|
+
buildTelegramModelCallbackPlan({
|
|
201
|
+
data: "model:pick:0",
|
|
202
|
+
state,
|
|
203
|
+
activeModel: modelA as never,
|
|
204
|
+
currentThinkingLevel: "medium",
|
|
205
|
+
isIdle: true,
|
|
206
|
+
canRestartBusyRun: false,
|
|
207
|
+
hasActiveToolExecutions: false,
|
|
208
|
+
}),
|
|
209
|
+
{
|
|
210
|
+
kind: "refresh-status",
|
|
211
|
+
selection: state.allModels[0],
|
|
212
|
+
callbackText: "Model: gpt-5",
|
|
213
|
+
shouldApplyThinkingLevel: false,
|
|
214
|
+
},
|
|
215
|
+
);
|
|
216
|
+
assert.deepEqual(
|
|
217
|
+
buildTelegramModelCallbackPlan({
|
|
218
|
+
data: "model:pick:1",
|
|
219
|
+
state,
|
|
220
|
+
activeModel: modelA as never,
|
|
221
|
+
currentThinkingLevel: "medium",
|
|
222
|
+
isIdle: false,
|
|
223
|
+
canRestartBusyRun: true,
|
|
224
|
+
hasActiveToolExecutions: true,
|
|
225
|
+
}),
|
|
226
|
+
{
|
|
227
|
+
kind: "switch-model",
|
|
228
|
+
selection: state.allModels[1],
|
|
229
|
+
mode: "restart-after-tool",
|
|
230
|
+
callbackText: "Switched to claude-3. Restarting after the current tool finishes…",
|
|
231
|
+
},
|
|
232
|
+
);
|
|
233
|
+
assert.deepEqual(
|
|
234
|
+
buildTelegramModelCallbackPlan({
|
|
235
|
+
data: "model:pick:1",
|
|
236
|
+
state,
|
|
237
|
+
activeModel: modelA as never,
|
|
238
|
+
currentThinkingLevel: "medium",
|
|
239
|
+
isIdle: false,
|
|
240
|
+
canRestartBusyRun: false,
|
|
241
|
+
hasActiveToolExecutions: false,
|
|
242
|
+
}),
|
|
243
|
+
{ kind: "answer", text: "Pi is busy. Send /stop first." },
|
|
244
|
+
);
|
|
245
|
+
});
|
|
246
|
+
|
|
247
|
+
test("Menu helpers route callback entry states before action handlers", async () => {
|
|
248
|
+
const events: string[] = [];
|
|
249
|
+
await handleTelegramMenuCallbackEntry("callback-1", undefined, undefined, {
|
|
250
|
+
handleStatusAction: async () => false,
|
|
251
|
+
handleThinkingAction: async () => false,
|
|
252
|
+
handleModelAction: async () => false,
|
|
253
|
+
answerCallbackQuery: async (_id, text) => {
|
|
254
|
+
events.push(`answer:${text ?? ""}`);
|
|
255
|
+
},
|
|
256
|
+
});
|
|
257
|
+
await handleTelegramMenuCallbackEntry("callback-2", "status:model", undefined, {
|
|
258
|
+
handleStatusAction: async () => false,
|
|
259
|
+
handleThinkingAction: async () => false,
|
|
260
|
+
handleModelAction: async () => false,
|
|
261
|
+
answerCallbackQuery: async (_id, text) => {
|
|
262
|
+
events.push(`answer:${text ?? ""}`);
|
|
263
|
+
},
|
|
264
|
+
});
|
|
265
|
+
await handleTelegramMenuCallbackEntry(
|
|
266
|
+
"callback-3",
|
|
267
|
+
"status:model",
|
|
268
|
+
{
|
|
269
|
+
chatId: 1,
|
|
270
|
+
messageId: 2,
|
|
271
|
+
page: 0,
|
|
272
|
+
scope: "all",
|
|
273
|
+
scopedModels: [],
|
|
274
|
+
allModels: [],
|
|
275
|
+
mode: "status",
|
|
276
|
+
},
|
|
277
|
+
{
|
|
278
|
+
handleStatusAction: async () => {
|
|
279
|
+
events.push("status");
|
|
280
|
+
return true;
|
|
281
|
+
},
|
|
282
|
+
handleThinkingAction: async () => false,
|
|
283
|
+
handleModelAction: async () => false,
|
|
284
|
+
answerCallbackQuery: async (_id, text) => {
|
|
285
|
+
events.push(`answer:${text ?? ""}`);
|
|
286
|
+
},
|
|
287
|
+
},
|
|
288
|
+
);
|
|
289
|
+
assert.deepEqual(events, [
|
|
290
|
+
"answer:",
|
|
291
|
+
"answer:Interactive message expired.",
|
|
292
|
+
"status",
|
|
293
|
+
]);
|
|
294
|
+
});
|
|
295
|
+
|
|
296
|
+
test("Menu helpers execute model callback actions across update, switch, and restart paths", async () => {
|
|
297
|
+
const events: string[] = [];
|
|
298
|
+
const modelA = { provider: "openai", id: "gpt-5", reasoning: true } as const;
|
|
299
|
+
const modelB = { provider: "anthropic", id: "claude-3", reasoning: false } as const;
|
|
300
|
+
const state = {
|
|
301
|
+
chatId: 1,
|
|
302
|
+
messageId: 2,
|
|
303
|
+
page: 0,
|
|
304
|
+
scope: "all" as const,
|
|
305
|
+
scopedModels: [],
|
|
306
|
+
allModels: [{ model: modelA }, { model: modelB }],
|
|
307
|
+
mode: "model" as const,
|
|
308
|
+
} as unknown as TelegramModelMenuState;
|
|
309
|
+
assert.equal(
|
|
310
|
+
await handleTelegramModelMenuCallbackAction(
|
|
311
|
+
"callback-1",
|
|
312
|
+
{
|
|
313
|
+
data: "model:page:1",
|
|
314
|
+
state,
|
|
315
|
+
activeModel: modelA as never,
|
|
316
|
+
currentThinkingLevel: "medium",
|
|
317
|
+
isIdle: true,
|
|
318
|
+
canRestartBusyRun: false,
|
|
319
|
+
hasActiveToolExecutions: false,
|
|
320
|
+
},
|
|
321
|
+
{
|
|
322
|
+
updateModelMenuMessage: async () => {
|
|
323
|
+
events.push("update-menu");
|
|
324
|
+
},
|
|
325
|
+
updateStatusMessage: async () => {
|
|
326
|
+
events.push("status");
|
|
327
|
+
},
|
|
328
|
+
answerCallbackQuery: async (_id, text) => {
|
|
329
|
+
events.push(`answer:${text ?? ""}`);
|
|
330
|
+
},
|
|
331
|
+
setModel: async () => true,
|
|
332
|
+
setCurrentModel: (model) => {
|
|
333
|
+
events.push(`current:${model.id}`);
|
|
334
|
+
},
|
|
335
|
+
setThinkingLevel: (level) => {
|
|
336
|
+
events.push(`thinking:${level}`);
|
|
337
|
+
},
|
|
338
|
+
stagePendingModelSwitch: (selection) => {
|
|
339
|
+
events.push(`pending:${selection.model.id}`);
|
|
340
|
+
},
|
|
341
|
+
restartInterruptedTelegramTurn: (selection) => {
|
|
342
|
+
events.push(`restart:${selection.model.id}`);
|
|
343
|
+
return true;
|
|
344
|
+
},
|
|
345
|
+
},
|
|
346
|
+
),
|
|
347
|
+
true,
|
|
348
|
+
);
|
|
349
|
+
assert.equal(
|
|
350
|
+
await handleTelegramModelMenuCallbackAction(
|
|
351
|
+
"callback-2",
|
|
352
|
+
{
|
|
353
|
+
data: "model:pick:1",
|
|
354
|
+
state,
|
|
355
|
+
activeModel: modelA as never,
|
|
356
|
+
currentThinkingLevel: "medium",
|
|
357
|
+
isIdle: false,
|
|
358
|
+
canRestartBusyRun: true,
|
|
359
|
+
hasActiveToolExecutions: true,
|
|
360
|
+
},
|
|
361
|
+
{
|
|
362
|
+
updateModelMenuMessage: async () => {
|
|
363
|
+
events.push("unexpected:update");
|
|
364
|
+
},
|
|
365
|
+
updateStatusMessage: async () => {
|
|
366
|
+
events.push("status");
|
|
367
|
+
},
|
|
368
|
+
answerCallbackQuery: async (_id, text) => {
|
|
369
|
+
events.push(`answer:${text ?? ""}`);
|
|
370
|
+
},
|
|
371
|
+
setModel: async () => true,
|
|
372
|
+
setCurrentModel: (model) => {
|
|
373
|
+
events.push(`current:${model.id}`);
|
|
374
|
+
},
|
|
375
|
+
setThinkingLevel: (level) => {
|
|
376
|
+
events.push(`thinking:${level}`);
|
|
377
|
+
},
|
|
378
|
+
stagePendingModelSwitch: (selection) => {
|
|
379
|
+
events.push(`pending:${selection.model.id}`);
|
|
380
|
+
},
|
|
381
|
+
restartInterruptedTelegramTurn: (selection) => {
|
|
382
|
+
events.push(`restart:${selection.model.id}`);
|
|
383
|
+
return true;
|
|
384
|
+
},
|
|
385
|
+
},
|
|
386
|
+
),
|
|
387
|
+
true,
|
|
388
|
+
);
|
|
389
|
+
assert.equal(
|
|
390
|
+
await handleTelegramModelMenuCallbackAction(
|
|
391
|
+
"callback-3",
|
|
392
|
+
{
|
|
393
|
+
data: "model:pick:1",
|
|
394
|
+
state,
|
|
395
|
+
activeModel: modelA as never,
|
|
396
|
+
currentThinkingLevel: "medium",
|
|
397
|
+
isIdle: false,
|
|
398
|
+
canRestartBusyRun: true,
|
|
399
|
+
hasActiveToolExecutions: false,
|
|
400
|
+
},
|
|
401
|
+
{
|
|
402
|
+
updateModelMenuMessage: async () => {
|
|
403
|
+
events.push("unexpected:update");
|
|
404
|
+
},
|
|
405
|
+
updateStatusMessage: async () => {
|
|
406
|
+
events.push("status");
|
|
407
|
+
},
|
|
408
|
+
answerCallbackQuery: async (_id, text) => {
|
|
409
|
+
events.push(`answer:${text ?? ""}`);
|
|
410
|
+
},
|
|
411
|
+
setModel: async () => true,
|
|
412
|
+
setCurrentModel: (model) => {
|
|
413
|
+
events.push(`current:${model.id}`);
|
|
414
|
+
},
|
|
415
|
+
setThinkingLevel: (level) => {
|
|
416
|
+
events.push(`thinking:${level}`);
|
|
417
|
+
},
|
|
418
|
+
stagePendingModelSwitch: (selection) => {
|
|
419
|
+
events.push(`pending:${selection.model.id}`);
|
|
420
|
+
},
|
|
421
|
+
restartInterruptedTelegramTurn: (selection) => {
|
|
422
|
+
events.push(`restart:${selection.model.id}`);
|
|
423
|
+
return true;
|
|
424
|
+
},
|
|
425
|
+
},
|
|
426
|
+
),
|
|
427
|
+
true,
|
|
428
|
+
);
|
|
429
|
+
assert.equal(events[0], "update-menu");
|
|
430
|
+
assert.equal(events[1], "answer:");
|
|
431
|
+
assert.equal(events[2], "current:claude-3");
|
|
432
|
+
assert.equal(events[3], "status");
|
|
433
|
+
assert.equal(events[4], "pending:claude-3");
|
|
434
|
+
assert.equal(
|
|
435
|
+
events[5],
|
|
436
|
+
"answer:Switched to claude-3. Restarting after the current tool finishes…",
|
|
437
|
+
);
|
|
438
|
+
assert.equal(events[6], "current:claude-3");
|
|
439
|
+
assert.equal(events[7], "status");
|
|
440
|
+
assert.equal(events[8], "restart:claude-3");
|
|
441
|
+
assert.equal(events[9], "answer:Switching to claude-3 and continuing…");
|
|
442
|
+
});
|
|
443
|
+
|
|
444
|
+
test("Menu helpers handle status and thinking callback actions", async () => {
|
|
445
|
+
const events: string[] = [];
|
|
446
|
+
const reasoningModel = {
|
|
447
|
+
provider: "openai",
|
|
448
|
+
id: "gpt-5",
|
|
449
|
+
reasoning: true,
|
|
450
|
+
} as const;
|
|
451
|
+
const plainModel = {
|
|
452
|
+
provider: "openai",
|
|
453
|
+
id: "gpt-4o",
|
|
454
|
+
reasoning: false,
|
|
455
|
+
} as const;
|
|
456
|
+
assert.equal(
|
|
457
|
+
await handleTelegramStatusMenuCallbackAction(
|
|
458
|
+
"callback-1",
|
|
459
|
+
"status:model",
|
|
460
|
+
reasoningModel as never,
|
|
461
|
+
{
|
|
462
|
+
updateModelMenuMessage: async () => {
|
|
463
|
+
events.push("status:model");
|
|
464
|
+
},
|
|
465
|
+
updateThinkingMenuMessage: async () => {
|
|
466
|
+
events.push("status:thinking");
|
|
467
|
+
},
|
|
468
|
+
answerCallbackQuery: async (_id, text) => {
|
|
469
|
+
events.push(`answer:${text ?? ""}`);
|
|
470
|
+
},
|
|
471
|
+
},
|
|
472
|
+
),
|
|
473
|
+
true,
|
|
474
|
+
);
|
|
475
|
+
assert.equal(
|
|
476
|
+
await handleTelegramThinkingMenuCallbackAction(
|
|
477
|
+
"callback-2",
|
|
478
|
+
"thinking:set:high",
|
|
479
|
+
reasoningModel as never,
|
|
480
|
+
{
|
|
481
|
+
setThinkingLevel: (level) => {
|
|
482
|
+
events.push(`set:${level}`);
|
|
483
|
+
},
|
|
484
|
+
getCurrentThinkingLevel: () => "high",
|
|
485
|
+
updateStatusMessage: async () => {
|
|
486
|
+
events.push("status:update");
|
|
487
|
+
},
|
|
488
|
+
answerCallbackQuery: async (_id, text) => {
|
|
489
|
+
events.push(`answer:${text ?? ""}`);
|
|
490
|
+
},
|
|
491
|
+
},
|
|
492
|
+
),
|
|
493
|
+
true,
|
|
494
|
+
);
|
|
495
|
+
assert.equal(
|
|
496
|
+
await handleTelegramStatusMenuCallbackAction(
|
|
497
|
+
"callback-3",
|
|
498
|
+
"status:thinking",
|
|
499
|
+
plainModel as never,
|
|
500
|
+
{
|
|
501
|
+
updateModelMenuMessage: async () => {
|
|
502
|
+
events.push("unexpected:model");
|
|
503
|
+
},
|
|
504
|
+
updateThinkingMenuMessage: async () => {
|
|
505
|
+
events.push("unexpected:thinking");
|
|
506
|
+
},
|
|
507
|
+
answerCallbackQuery: async (_id, text) => {
|
|
508
|
+
events.push(`answer:${text ?? ""}`);
|
|
509
|
+
},
|
|
510
|
+
},
|
|
511
|
+
),
|
|
512
|
+
true,
|
|
513
|
+
);
|
|
514
|
+
assert.equal(events[0], "status:model");
|
|
515
|
+
assert.equal(events[1], "answer:");
|
|
516
|
+
assert.equal(events[2], "set:high");
|
|
517
|
+
assert.equal(events[3], "status:update");
|
|
518
|
+
assert.equal(events[4], "answer:Thinking: high");
|
|
519
|
+
assert.equal(events[5], "answer:This model has no reasoning controls.");
|
|
520
|
+
});
|
|
521
|
+
|
|
522
|
+
test("Menu helpers build pure render payloads before transport", () => {
|
|
523
|
+
const modelA = { provider: "openai", id: "gpt-5", reasoning: true } as const;
|
|
524
|
+
const state = {
|
|
525
|
+
chatId: 1,
|
|
526
|
+
messageId: 2,
|
|
527
|
+
page: 0,
|
|
528
|
+
scope: "all" as const,
|
|
529
|
+
scopedModels: [],
|
|
530
|
+
allModels: [{ model: modelA }],
|
|
531
|
+
mode: "status" as const,
|
|
532
|
+
} as unknown as TelegramModelMenuState;
|
|
533
|
+
const modelPayload = buildTelegramModelMenuRenderPayload(state, modelA as never);
|
|
534
|
+
const thinkingPayload = buildTelegramThinkingMenuRenderPayload(modelA as never, "medium");
|
|
535
|
+
const statusPayload = buildTelegramStatusMenuRenderPayload(
|
|
536
|
+
"<b>Status</b>",
|
|
537
|
+
modelA as never,
|
|
538
|
+
"medium",
|
|
539
|
+
);
|
|
540
|
+
assert.equal(modelPayload.nextMode, "model");
|
|
541
|
+
assert.equal(modelPayload.text, "<b>Choose a model:</b>");
|
|
542
|
+
assert.equal(modelPayload.mode, "html");
|
|
543
|
+
assert.equal(thinkingPayload.nextMode, "thinking");
|
|
544
|
+
assert.match(thinkingPayload.text, /^Choose a thinking level/);
|
|
545
|
+
assert.equal(thinkingPayload.mode, "plain");
|
|
546
|
+
assert.equal(statusPayload.nextMode, "status");
|
|
547
|
+
assert.equal(statusPayload.text, "<b>Status</b>");
|
|
548
|
+
assert.equal(statusPayload.mode, "html");
|
|
549
|
+
assert.equal(state.mode, "status");
|
|
550
|
+
});
|
|
551
|
+
|
|
552
|
+
test("Menu helpers update and send interactive menu messages", async () => {
|
|
553
|
+
const events: string[] = [];
|
|
554
|
+
const modelA = { provider: "openai", id: "gpt-5", reasoning: true } as const;
|
|
555
|
+
const state = {
|
|
556
|
+
chatId: 1,
|
|
557
|
+
messageId: 2,
|
|
558
|
+
page: 0,
|
|
559
|
+
scope: "all" as const,
|
|
560
|
+
scopedModels: [],
|
|
561
|
+
allModels: [{ model: modelA }],
|
|
562
|
+
mode: "status" as const,
|
|
563
|
+
} as unknown as TelegramModelMenuState;
|
|
564
|
+
const deps = {
|
|
565
|
+
editInteractiveMessage: async (
|
|
566
|
+
chatId: number,
|
|
567
|
+
messageId: number,
|
|
568
|
+
text: string,
|
|
569
|
+
mode: "html" | "plain",
|
|
570
|
+
) => {
|
|
571
|
+
events.push(`edit:${chatId}:${messageId}:${mode}:${text}`);
|
|
572
|
+
},
|
|
573
|
+
sendInteractiveMessage: async (
|
|
574
|
+
chatId: number,
|
|
575
|
+
text: string,
|
|
576
|
+
mode: "html" | "plain",
|
|
577
|
+
) => {
|
|
578
|
+
events.push(`send:${chatId}:${mode}:${text}`);
|
|
579
|
+
return 99;
|
|
580
|
+
},
|
|
581
|
+
};
|
|
582
|
+
await updateTelegramModelMenuMessage(state, modelA as never, deps);
|
|
583
|
+
await updateTelegramThinkingMenuMessage(state, modelA as never, "medium", deps);
|
|
584
|
+
await updateTelegramStatusMessage(
|
|
585
|
+
state,
|
|
586
|
+
"<b>Status</b>",
|
|
587
|
+
modelA as never,
|
|
588
|
+
"medium",
|
|
589
|
+
deps,
|
|
590
|
+
);
|
|
591
|
+
const sentStatusId = await sendTelegramStatusMessage(
|
|
592
|
+
state,
|
|
593
|
+
"<b>Status</b>",
|
|
594
|
+
modelA as never,
|
|
595
|
+
"medium",
|
|
596
|
+
deps,
|
|
597
|
+
);
|
|
598
|
+
const sentModelId = await sendTelegramModelMenuMessage(state, modelA as never, deps);
|
|
599
|
+
assert.equal(sentStatusId, 99);
|
|
600
|
+
assert.equal(sentModelId, 99);
|
|
601
|
+
assert.equal(events[0], "edit:1:2:html:<b>Choose a model:</b>");
|
|
602
|
+
assert.match(events[1] ?? "", /^edit:1:2:plain:Choose a thinking level/);
|
|
603
|
+
assert.equal(events[2], "edit:1:2:html:<b>Status</b>");
|
|
604
|
+
assert.equal(events[3], "send:1:html:<b>Status</b>");
|
|
605
|
+
assert.equal(events[4], "send:1:html:<b>Choose a model:</b>");
|
|
606
|
+
});
|
|
607
|
+
|
|
608
|
+
test("Menu helpers build model, thinking, and status UI payloads", () => {
|
|
609
|
+
const modelA = { provider: "openai", id: "gpt-5", reasoning: true } as const;
|
|
610
|
+
const modelB = {
|
|
611
|
+
provider: "anthropic",
|
|
612
|
+
id: "claude-3",
|
|
613
|
+
reasoning: false,
|
|
614
|
+
} as const;
|
|
615
|
+
const state = {
|
|
616
|
+
chatId: 1,
|
|
617
|
+
messageId: 2,
|
|
618
|
+
page: 0,
|
|
619
|
+
scope: "scoped" as const,
|
|
620
|
+
scopedModels: [{ model: modelA, thinkingLevel: "high" as const }],
|
|
621
|
+
allModels: [{ model: modelB }],
|
|
622
|
+
mode: "model" as const,
|
|
623
|
+
} as unknown as TelegramModelMenuState;
|
|
624
|
+
assert.deepEqual(getModelMenuItems(state), state.scopedModels);
|
|
625
|
+
assert.match(
|
|
626
|
+
formatScopedModelButtonText(state.scopedModels[0], modelA as never),
|
|
627
|
+
/^✅ /,
|
|
628
|
+
);
|
|
629
|
+
const modelMarkup = buildModelMenuReplyMarkup(state, modelA as never, 6);
|
|
630
|
+
assert.equal(
|
|
631
|
+
modelMarkup.inline_keyboard[0]?.[0]?.callback_data,
|
|
632
|
+
"model:pick:0",
|
|
633
|
+
);
|
|
634
|
+
const thinkingText = buildThinkingMenuText(modelA as never, "medium");
|
|
635
|
+
assert.match(thinkingText, /Model: openai\/gpt-5/);
|
|
636
|
+
const thinkingMarkup = buildThinkingMenuReplyMarkup("medium");
|
|
637
|
+
assert.equal(
|
|
638
|
+
thinkingMarkup.inline_keyboard.some((row) => row[0]?.text === "✅ medium"),
|
|
639
|
+
true,
|
|
640
|
+
);
|
|
641
|
+
const statusMarkup = buildStatusReplyMarkup(modelA as never, "medium");
|
|
642
|
+
assert.equal(statusMarkup.inline_keyboard.length, 2);
|
|
643
|
+
const noReasoningMarkup = buildStatusReplyMarkup(modelB as never, "medium");
|
|
644
|
+
assert.equal(noReasoningMarkup.inline_keyboard.length, 1);
|
|
645
|
+
});
|