@www.hyperlinks.space/program-kit 1.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +53 -0
- package/api/ai.ts +111 -0
- package/api/base.ts +117 -0
- package/api/blockchain.ts +58 -0
- package/api/bot.ts +19 -0
- package/api/ping.ts +41 -0
- package/api/releases.ts +162 -0
- package/api/telegram.ts +65 -0
- package/api/tsconfig.json +17 -0
- package/app/_layout.tsx +135 -0
- package/app/ai.tsx +39 -0
- package/app/components/GlobalBottomBar.tsx +447 -0
- package/app/components/GlobalBottomBarWeb.tsx +362 -0
- package/app/components/GlobalLogoBar.tsx +108 -0
- package/app/components/GlobalLogoBarFallback.tsx +66 -0
- package/app/components/GlobalLogoBarWithFallback.tsx +24 -0
- package/app/components/HyperlinksSpaceLogo.tsx +29 -0
- package/app/components/Telegram.tsx +648 -0
- package/app/components/telegramWebApp.ts +359 -0
- package/app/fonts.ts +12 -0
- package/app/index.tsx +102 -0
- package/app/theme.ts +117 -0
- package/app.json +60 -0
- package/assets/icon.ico +0 -0
- package/assets/images/favicon.png +0 -0
- package/blockchain/coffee.ts +217 -0
- package/blockchain/router.ts +44 -0
- package/bot/format.ts +143 -0
- package/bot/grammy.ts +52 -0
- package/bot/responder.ts +620 -0
- package/bot/webhook.ts +262 -0
- package/database/messages.ts +128 -0
- package/database/start.ts +133 -0
- package/database/users.ts +46 -0
- package/docs/ai_and_search_bar_input.md +94 -0
- package/docs/ai_bot_messages.md +124 -0
- package/docs/backlogs/medium_term_backlog.md +26 -0
- package/docs/backlogs/short_term_backlog.md +39 -0
- package/docs/blue_bar_tackling.md +143 -0
- package/docs/bot_async_streaming.md +174 -0
- package/docs/build_and_install.md +129 -0
- package/docs/database_messages.md +34 -0
- package/docs/fonts.md +18 -0
- package/docs/releases.md +201 -0
- package/docs/releases_github_actions.md +188 -0
- package/docs/scalability.md +34 -0
- package/docs/security_plan_raw.md +244 -0
- package/docs/security_raw.md +345 -0
- package/docs/timing_raw.md +63 -0
- package/docs/tma_logo_bar_jump_investigation.md +69 -0
- package/docs/update.md +205 -0
- package/docs/wallets_hosting_architecture.md +257 -0
- package/eas.json +47 -0
- package/eslint.config.js +10 -0
- package/fullREADME.md +159 -0
- package/global.css +67 -0
- package/npmReadMe.md +53 -0
- package/package.json +214 -0
- package/scripts/load-env.ts +17 -0
- package/scripts/migrate-db.ts +16 -0
- package/scripts/program-kit-init.cjs +58 -0
- package/scripts/run-bot-local.ts +30 -0
- package/scripts/set-webhook.ts +67 -0
- package/scripts/test-api-base.ts +12 -0
- package/telegram/post.ts +328 -0
- package/tsconfig.json +17 -0
- package/vercel.json +7 -0
- package/windows/after-sign-windows-icon.cjs +13 -0
- package/windows/build-layout.cjs +72 -0
- package/windows/build-with-progress.cjs +88 -0
- package/windows/build.cjs +2247 -0
- package/windows/cleanup-legacy-appdata-installs.ps1 +91 -0
- package/windows/cleanup-legacy-windows-shortcuts.ps1 +46 -0
- package/windows/cleanup.cjs +200 -0
- package/windows/embed-windows-exe-icon.cjs +55 -0
- package/windows/extractAppPackage.nsh +150 -0
- package/windows/forge/README.md +41 -0
- package/windows/forge/forge.config.js +138 -0
- package/windows/forge/make-with-stamp.cjs +65 -0
- package/windows/forge-cleanup.cjs +255 -0
- package/windows/hsp-app-process.ps1 +63 -0
- package/windows/installer-hooks.nsi +373 -0
- package/windows/product-brand.cjs +42 -0
- package/windows/remove-orphan-uninstall-registry.ps1 +67 -0
- package/windows/run-installed-with-icon-debug.cmd +20 -0
- package/windows/run-win-electron-builder.cjs +46 -0
- package/windows/updater-dialog.html +143 -0
package/bot/responder.ts
ADDED
|
@@ -0,0 +1,620 @@
|
|
|
1
|
+
import type { Context } from "grammy";
|
|
2
|
+
import { normalizeSymbol } from "../blockchain/coffee.js";
|
|
3
|
+
import { transmit, transmitStream } from "../ai/transmitter.js";
|
|
4
|
+
import { normalizeUsername } from "../database/users.js";
|
|
5
|
+
import { getMaxTelegramUpdateIdForThread, insertMessage } from "../database/messages.js";
|
|
6
|
+
import {
|
|
7
|
+
closeOpenTelegramHtml,
|
|
8
|
+
mdToTelegramHtml,
|
|
9
|
+
stripUnpairedMarkdownDelimiters,
|
|
10
|
+
truncateTelegramHtmlSafe,
|
|
11
|
+
} from "./format.js";
|
|
12
|
+
|
|
13
|
+
/** Telegram text message length limit. */
|
|
14
|
+
const MAX_MESSAGE_TEXT_LENGTH = 4096;
|
|
15
|
+
|
|
16
|
+
/** Instruction passed to AI when the message comes from the bot: keep replies under 4096 chars and mention TMA for long answers. */
|
|
17
|
+
const TELEGRAM_BOT_LENGTH_INSTRUCTION =
|
|
18
|
+
"Please give an answer in less than 4096 chars. If user asks for a long message or a message with more than 4096 chars add a sentence that full responses are available only in TMA and your bot you can give just a short answer that follows.";
|
|
19
|
+
|
|
20
|
+
/** Split text into chunks of at most maxLen, preferring to break at newlines. */
|
|
21
|
+
function chunkText(text: string, maxLen: number): string[] {
|
|
22
|
+
if (text.length <= maxLen) return [text];
|
|
23
|
+
const chunks: string[] = [];
|
|
24
|
+
let start = 0;
|
|
25
|
+
while (start < text.length) {
|
|
26
|
+
let end = Math.min(start + maxLen, text.length);
|
|
27
|
+
if (end < text.length) {
|
|
28
|
+
const lastNewline = text.lastIndexOf("\n", end - 1);
|
|
29
|
+
if (lastNewline >= start) end = lastNewline + 1;
|
|
30
|
+
}
|
|
31
|
+
chunks.push(text.slice(start, end));
|
|
32
|
+
start = end;
|
|
33
|
+
}
|
|
34
|
+
return chunks;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/** Send long text as multiple messages (each ≤ MAX_MESSAGE_TEXT_LENGTH). First chunk replies to replyToMessageId or uses replyOptions; rest reply to previous sent message. */
|
|
38
|
+
async function sendLongMessage(
|
|
39
|
+
api: Context["api"],
|
|
40
|
+
chatId: number,
|
|
41
|
+
fullText: string,
|
|
42
|
+
replyOptions: { message_thread_id?: number; reply_parameters?: { message_id: number } },
|
|
43
|
+
replyOptionsWithHtml: { message_thread_id?: number; reply_parameters?: { message_id: number }; parse_mode: "HTML" },
|
|
44
|
+
opts: { replyToMessageId?: number },
|
|
45
|
+
): Promise<void> {
|
|
46
|
+
const chunks = chunkText(fullText, MAX_MESSAGE_TEXT_LENGTH);
|
|
47
|
+
if (chunks.length === 0) return;
|
|
48
|
+
let lastSentId: number | undefined = opts.replyToMessageId;
|
|
49
|
+
for (let i = 0; i < chunks.length; i++) {
|
|
50
|
+
const formatted = truncateTelegramHtmlSafe(
|
|
51
|
+
closeOpenTelegramHtml(
|
|
52
|
+
stripUnpairedMarkdownDelimiters(mdToTelegramHtml(chunks[i])),
|
|
53
|
+
),
|
|
54
|
+
MAX_MESSAGE_TEXT_LENGTH,
|
|
55
|
+
);
|
|
56
|
+
const partOptions =
|
|
57
|
+
i === 0 && lastSentId === undefined
|
|
58
|
+
? replyOptionsWithHtml
|
|
59
|
+
: {
|
|
60
|
+
...(replyOptions.message_thread_id !== undefined ? { message_thread_id: replyOptions.message_thread_id } : {}),
|
|
61
|
+
...(lastSentId !== undefined ? { reply_parameters: { message_id: lastSentId } } : {}),
|
|
62
|
+
parse_mode: "HTML" as const,
|
|
63
|
+
};
|
|
64
|
+
try {
|
|
65
|
+
const sent = await api.sendMessage(chatId, formatted, partOptions);
|
|
66
|
+
const id = (sent as { message_id?: number }).message_id;
|
|
67
|
+
if (typeof id === "number") lastSentId = id;
|
|
68
|
+
} catch (e) {
|
|
69
|
+
console.error("[bot][sendLongMessage]", (e as Error)?.message ?? e);
|
|
70
|
+
try {
|
|
71
|
+
const markdown = toTelegramMarkdown(chunks[i]);
|
|
72
|
+
const sent = await api.sendMessage(chatId, markdown, {
|
|
73
|
+
...partOptions,
|
|
74
|
+
parse_mode: "Markdown",
|
|
75
|
+
});
|
|
76
|
+
const id = (sent as { message_id?: number }).message_id;
|
|
77
|
+
if (typeof id === "number") lastSentId = id;
|
|
78
|
+
} catch {
|
|
79
|
+
const sent = await api.sendMessage(chatId, chunks[i], {
|
|
80
|
+
...(replyOptions.message_thread_id !== undefined ? { message_thread_id: replyOptions.message_thread_id } : {}),
|
|
81
|
+
...(lastSentId !== undefined ? { reply_parameters: { message_id: lastSentId } } : {}),
|
|
82
|
+
});
|
|
83
|
+
const id = (sent as { message_id?: number }).message_id;
|
|
84
|
+
if (typeof id === "number") lastSentId = id;
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/** Convert AI-style markdown to Telegram Markdown (* bold, _ italic, ` code) for parse_mode fallback. */
|
|
91
|
+
function toTelegramMarkdown(s: string): string {
|
|
92
|
+
return s.replace(/\*\*/g, "*");
|
|
93
|
+
}
|
|
94
|
+
/** Throttle editMessageText to avoid Telegram 429 rate limits. */
|
|
95
|
+
const EDIT_THROTTLE_MS = 500;
|
|
96
|
+
/** If content grew by more than this many chars, edit immediately so long tail doesn't stick. */
|
|
97
|
+
const EDIT_MIN_CHARS_TO_SEND_NOW = 20;
|
|
98
|
+
|
|
99
|
+
/** Track latest generation per chat so newer messages cancel older streams. */
|
|
100
|
+
const chatGenerations = new Map<number, number>();
|
|
101
|
+
|
|
102
|
+
type BotSourceContext = {
|
|
103
|
+
source: "bot";
|
|
104
|
+
username?: string | null;
|
|
105
|
+
locale?: string | null;
|
|
106
|
+
};
|
|
107
|
+
|
|
108
|
+
function buildBotContext(ctx: Context): BotSourceContext {
|
|
109
|
+
const from = ctx.from;
|
|
110
|
+
return {
|
|
111
|
+
source: "bot",
|
|
112
|
+
username: from?.username ?? null,
|
|
113
|
+
locale:
|
|
114
|
+
typeof from?.language_code === "string" ? from.language_code : null,
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
function extractPlainText(ctx: Context): string | null {
|
|
119
|
+
const msg = ctx.message;
|
|
120
|
+
if (!msg) return null;
|
|
121
|
+
if ("text" in msg && typeof msg.text === "string") {
|
|
122
|
+
return msg.text.trim();
|
|
123
|
+
}
|
|
124
|
+
if ("caption" in msg && typeof (msg as any).caption === "string") {
|
|
125
|
+
return (msg as any).caption.trim();
|
|
126
|
+
}
|
|
127
|
+
return null;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/** True if message looks like a single token ticker (e.g. DOGS, TON, $USDT). */
|
|
131
|
+
function looksLikeTicker(text: string): boolean {
|
|
132
|
+
const parts = text.split(/\s+/);
|
|
133
|
+
const first = parts[0]?.replace(/^\$/g, "") ?? "";
|
|
134
|
+
return parts.length === 1 && normalizeSymbol(first).length > 0;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
export async function handleBotAiResponse(ctx: Context): Promise<void> {
|
|
138
|
+
const from = ctx.from;
|
|
139
|
+
const userId = from ? String(from.id) : undefined;
|
|
140
|
+
const context = buildBotContext(ctx);
|
|
141
|
+
|
|
142
|
+
const text = extractPlainText(ctx);
|
|
143
|
+
/** When the user writes in a topic/thread, we must send drafts and replies to the same thread. */
|
|
144
|
+
const messageThreadId =
|
|
145
|
+
typeof (ctx.message as { message_thread_id?: number } | undefined)?.message_thread_id === "number"
|
|
146
|
+
? (ctx.message as { message_thread_id: number }).message_thread_id
|
|
147
|
+
: undefined;
|
|
148
|
+
const replyToMessageId =
|
|
149
|
+
ctx.message && typeof (ctx.message as { message_id?: number }).message_id === "number"
|
|
150
|
+
? (ctx.message as { message_id: number }).message_id
|
|
151
|
+
: undefined;
|
|
152
|
+
const replyOptions: { message_thread_id?: number; reply_parameters?: { message_id: number } } = {
|
|
153
|
+
...(messageThreadId !== undefined ? { message_thread_id: messageThreadId } : {}),
|
|
154
|
+
...(replyToMessageId !== undefined ? { reply_parameters: { message_id: replyToMessageId } } : {}),
|
|
155
|
+
};
|
|
156
|
+
const replyOptionsWithHtml = { ...replyOptions, parse_mode: "HTML" as const };
|
|
157
|
+
|
|
158
|
+
if (!text) {
|
|
159
|
+
const msg = ctx.message;
|
|
160
|
+
const hasTextOrCaption =
|
|
161
|
+
(msg && "text" in msg) || (msg && "caption" in (msg as any));
|
|
162
|
+
if (hasTextOrCaption) {
|
|
163
|
+
await ctx.reply("Send me a message or token ticker (e.g. USDT).", replyOptions);
|
|
164
|
+
}
|
|
165
|
+
return;
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
const user_telegram = normalizeUsername(from?.username);
|
|
169
|
+
const thread_id = messageThreadId ?? 0;
|
|
170
|
+
const update_id = typeof (ctx.update as { update_id?: number }).update_id === "number"
|
|
171
|
+
? (ctx.update as { update_id: number }).update_id
|
|
172
|
+
: undefined;
|
|
173
|
+
const threadContext =
|
|
174
|
+
user_telegram && update_id !== undefined
|
|
175
|
+
? { user_telegram, thread_id, type: "bot" as const, telegram_update_id: update_id }
|
|
176
|
+
: undefined;
|
|
177
|
+
|
|
178
|
+
const mode = looksLikeTicker(text) ? "token_info" : "chat";
|
|
179
|
+
const chatId = ctx.chat?.id;
|
|
180
|
+
const isPrivate = ctx.chat?.type === "private";
|
|
181
|
+
const canStream = isPrivate && typeof chatId === "number";
|
|
182
|
+
/** When streaming we send one message early then edit it; used to detect streaming path. */
|
|
183
|
+
let streamSentMessageId: number | null = null;
|
|
184
|
+
|
|
185
|
+
const numericChatId =
|
|
186
|
+
typeof chatId === "number" ? chatId : undefined;
|
|
187
|
+
let generation = 0;
|
|
188
|
+
if (numericChatId !== undefined) {
|
|
189
|
+
const prev = chatGenerations.get(numericChatId) ?? 0;
|
|
190
|
+
generation = prev + 1;
|
|
191
|
+
chatGenerations.set(numericChatId, generation);
|
|
192
|
+
}
|
|
193
|
+
const isCancelled = (): boolean =>
|
|
194
|
+
numericChatId !== undefined &&
|
|
195
|
+
chatGenerations.get(numericChatId) !== generation;
|
|
196
|
+
|
|
197
|
+
const shouldAbortSend = async (): Promise<boolean> => {
|
|
198
|
+
if (!threadContext) return false;
|
|
199
|
+
const max = await getMaxTelegramUpdateIdForThread(
|
|
200
|
+
threadContext.user_telegram,
|
|
201
|
+
threadContext.thread_id,
|
|
202
|
+
"bot",
|
|
203
|
+
);
|
|
204
|
+
return max !== null && max !== threadContext.telegram_update_id;
|
|
205
|
+
};
|
|
206
|
+
|
|
207
|
+
let result: Awaited<ReturnType<typeof transmit>>;
|
|
208
|
+
/** Set in streaming path; when cancelled send partial and persist. When aborted by newer message, persist only (no send) to avoid flash. */
|
|
209
|
+
let interruptedReplyCallback: ((opts: { sendToChat: boolean }) => Promise<void>) | null = null;
|
|
210
|
+
|
|
211
|
+
if (canStream && chatId !== undefined) {
|
|
212
|
+
let sentMessageId: number | null = null;
|
|
213
|
+
let lastEdited = "";
|
|
214
|
+
let lastSendTime = 0;
|
|
215
|
+
let pending: string | null = null;
|
|
216
|
+
let throttleTimer: ReturnType<typeof setTimeout> | null = null;
|
|
217
|
+
let editsDisabled = false;
|
|
218
|
+
/** Latest accumulated text from stream; used for interrupted reply and persist. */
|
|
219
|
+
let streamedAccumulated = "";
|
|
220
|
+
|
|
221
|
+
/** When turn is interrupted: message already exists (we sent early); optionally final edit, always persist. HTML only (format pipeline is strict). */
|
|
222
|
+
const sendInterruptedReply = async (opts: { sendToChat: boolean }): Promise<void> => {
|
|
223
|
+
const content = streamedAccumulated.trim();
|
|
224
|
+
if (sentMessageId !== null && content.length > 0) {
|
|
225
|
+
const toEdit = truncateTelegramHtmlSafe(
|
|
226
|
+
closeOpenTelegramHtml(
|
|
227
|
+
stripUnpairedMarkdownDelimiters(mdToTelegramHtml(content)),
|
|
228
|
+
),
|
|
229
|
+
MAX_MESSAGE_TEXT_LENGTH,
|
|
230
|
+
);
|
|
231
|
+
try {
|
|
232
|
+
await ctx.api.editMessageText(chatId, sentMessageId, toEdit, { parse_mode: "HTML" });
|
|
233
|
+
} catch (e) {
|
|
234
|
+
console.error("[bot][edit] interrupted reply", (e as Error)?.message ?? e);
|
|
235
|
+
}
|
|
236
|
+
} else if (opts.sendToChat && content.length > 0) {
|
|
237
|
+
const toSend = truncateTelegramHtmlSafe(
|
|
238
|
+
closeOpenTelegramHtml(
|
|
239
|
+
stripUnpairedMarkdownDelimiters(mdToTelegramHtml(content)),
|
|
240
|
+
),
|
|
241
|
+
MAX_MESSAGE_TEXT_LENGTH,
|
|
242
|
+
);
|
|
243
|
+
try {
|
|
244
|
+
await ctx.reply(toSend, replyOptionsWithHtml);
|
|
245
|
+
} catch (e) {
|
|
246
|
+
console.error("[bot][reply] interrupted", (e as Error)?.message ?? e);
|
|
247
|
+
}
|
|
248
|
+
} else if (opts.sendToChat && sentMessageId === null) {
|
|
249
|
+
try {
|
|
250
|
+
await ctx.reply("…", replyOptions);
|
|
251
|
+
} catch (_) {}
|
|
252
|
+
}
|
|
253
|
+
if (threadContext && content.length > 0) {
|
|
254
|
+
await insertMessage({
|
|
255
|
+
user_telegram: threadContext.user_telegram,
|
|
256
|
+
thread_id: threadContext.thread_id,
|
|
257
|
+
type: "bot",
|
|
258
|
+
role: "assistant",
|
|
259
|
+
content,
|
|
260
|
+
});
|
|
261
|
+
}
|
|
262
|
+
};
|
|
263
|
+
|
|
264
|
+
/** One send (first) or edit in flight at a time so we never send multiple messages by race. */
|
|
265
|
+
let sendOrEditQueue = Promise.resolve<void>(undefined);
|
|
266
|
+
const typingFrames = ["%", "#", "@", "+", "@", "#"];
|
|
267
|
+
let typingIndex = 0;
|
|
268
|
+
let typingInterval: ReturnType<typeof setInterval> | null = null;
|
|
269
|
+
|
|
270
|
+
const stopTypingSpinner = (): void => {
|
|
271
|
+
if (!typingInterval) return;
|
|
272
|
+
clearInterval(typingInterval);
|
|
273
|
+
typingInterval = null;
|
|
274
|
+
};
|
|
275
|
+
|
|
276
|
+
/** First call sends a message (claims message_id); later calls edit that message. HTML only; format pipeline is strict so Telegram accepts it. */
|
|
277
|
+
const sendOrEditOnce = (formatted: string, _rawSlice: string): Promise<void> => {
|
|
278
|
+
const run = async (): Promise<void> => {
|
|
279
|
+
if (await shouldAbortSend()) return;
|
|
280
|
+
if (isCancelled() || editsDisabled) return;
|
|
281
|
+
const text = truncateTelegramHtmlSafe(formatted.trim() || "…", MAX_MESSAGE_TEXT_LENGTH);
|
|
282
|
+
try {
|
|
283
|
+
if (sentMessageId === null) {
|
|
284
|
+
const sent = await ctx.api.sendMessage(chatId, text, replyOptionsWithHtml);
|
|
285
|
+
const id = (sent as { message_id?: number }).message_id;
|
|
286
|
+
if (typeof id === "number") {
|
|
287
|
+
sentMessageId = id;
|
|
288
|
+
streamSentMessageId = id;
|
|
289
|
+
}
|
|
290
|
+
} else {
|
|
291
|
+
await ctx.api.editMessageText(chatId, sentMessageId, text, { parse_mode: "HTML" });
|
|
292
|
+
}
|
|
293
|
+
} catch (e: unknown) {
|
|
294
|
+
const err = e as { error_code?: number; description?: string; parameters?: { retry_after?: number } };
|
|
295
|
+
if (err?.description?.includes("not modified")) return;
|
|
296
|
+
if (err?.error_code === 429) {
|
|
297
|
+
await new Promise((r) => setTimeout(r, Math.min((err.parameters?.retry_after ?? 1) * 1000, 2000)));
|
|
298
|
+
try {
|
|
299
|
+
if (sentMessageId === null) {
|
|
300
|
+
const sent = await ctx.api.sendMessage(chatId, text, replyOptionsWithHtml);
|
|
301
|
+
const id = (sent as { message_id?: number }).message_id;
|
|
302
|
+
if (typeof id === "number") {
|
|
303
|
+
sentMessageId = id;
|
|
304
|
+
streamSentMessageId = id;
|
|
305
|
+
}
|
|
306
|
+
} else {
|
|
307
|
+
await ctx.api.editMessageText(chatId, sentMessageId, text, { parse_mode: "HTML" });
|
|
308
|
+
}
|
|
309
|
+
} catch (e2) {
|
|
310
|
+
console.error("[bot][edit] 429 retry failed", (e2 as Error)?.message ?? e2);
|
|
311
|
+
editsDisabled = true;
|
|
312
|
+
}
|
|
313
|
+
} else {
|
|
314
|
+
console.error("[bot][edit] HTML rejected", err?.description ?? (e as Error)?.message ?? e);
|
|
315
|
+
editsDisabled = true;
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
};
|
|
319
|
+
sendOrEditQueue = sendOrEditQueue.then(() => run());
|
|
320
|
+
return sendOrEditQueue;
|
|
321
|
+
};
|
|
322
|
+
|
|
323
|
+
const flushEdit = (awaitSend = false): void | Promise<void> => {
|
|
324
|
+
if (isCancelled()) return;
|
|
325
|
+
if (pending === null) return;
|
|
326
|
+
const slice = pending;
|
|
327
|
+
pending = null;
|
|
328
|
+
throttleTimer = null;
|
|
329
|
+
lastEdited = slice;
|
|
330
|
+
lastSendTime = Date.now();
|
|
331
|
+
const formatted = closeOpenTelegramHtml(
|
|
332
|
+
stripUnpairedMarkdownDelimiters(mdToTelegramHtml(slice)),
|
|
333
|
+
);
|
|
334
|
+
if (!formatted.trim() && !slice.trim()) return;
|
|
335
|
+
const p = sendOrEditOnce(formatted, slice);
|
|
336
|
+
if (awaitSend) return p;
|
|
337
|
+
void p;
|
|
338
|
+
};
|
|
339
|
+
|
|
340
|
+
const sendOrEdit = (accumulated: string): void => {
|
|
341
|
+
stopTypingSpinner();
|
|
342
|
+
streamedAccumulated = accumulated;
|
|
343
|
+
if (isCancelled()) return;
|
|
344
|
+
const slice = accumulated.length > MAX_MESSAGE_TEXT_LENGTH
|
|
345
|
+
? accumulated.slice(0, MAX_MESSAGE_TEXT_LENGTH)
|
|
346
|
+
: accumulated;
|
|
347
|
+
if (slice === lastEdited && (sentMessageId !== null || lastEdited !== "")) return;
|
|
348
|
+
const formatted = closeOpenTelegramHtml(
|
|
349
|
+
stripUnpairedMarkdownDelimiters(mdToTelegramHtml(slice)),
|
|
350
|
+
);
|
|
351
|
+
if (!formatted.trim() && slice.trim()) {
|
|
352
|
+
lastEdited = slice;
|
|
353
|
+
return;
|
|
354
|
+
}
|
|
355
|
+
if (!slice.trim() && sentMessageId !== null) return;
|
|
356
|
+
const now = Date.now();
|
|
357
|
+
const throttleElapsed = now - lastSendTime;
|
|
358
|
+
const bigChunk = slice.length - lastEdited.length >= EDIT_MIN_CHARS_TO_SEND_NOW;
|
|
359
|
+
const shouldSendNow =
|
|
360
|
+
sentMessageId === null ||
|
|
361
|
+
throttleElapsed >= EDIT_THROTTLE_MS ||
|
|
362
|
+
(bigChunk && slice.length > lastEdited.length);
|
|
363
|
+
if (shouldSendNow) {
|
|
364
|
+
lastEdited = slice;
|
|
365
|
+
lastSendTime = now;
|
|
366
|
+
pending = null;
|
|
367
|
+
if (throttleTimer) {
|
|
368
|
+
clearTimeout(throttleTimer);
|
|
369
|
+
throttleTimer = null;
|
|
370
|
+
}
|
|
371
|
+
void sendOrEditOnce(formatted, slice);
|
|
372
|
+
} else {
|
|
373
|
+
pending = slice;
|
|
374
|
+
if (!throttleTimer) {
|
|
375
|
+
throttleTimer = setTimeout(
|
|
376
|
+
() => void flushEdit(),
|
|
377
|
+
EDIT_THROTTLE_MS - throttleElapsed,
|
|
378
|
+
);
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
};
|
|
382
|
+
|
|
383
|
+
interruptedReplyCallback = sendInterruptedReply;
|
|
384
|
+
|
|
385
|
+
await sendOrEditOnce(typingFrames[typingIndex], typingFrames[typingIndex]);
|
|
386
|
+
|
|
387
|
+
typingInterval = setInterval(() => {
|
|
388
|
+
if (sentMessageId === null) return;
|
|
389
|
+
typingIndex = (typingIndex + 1) % typingFrames.length;
|
|
390
|
+
ctx.api
|
|
391
|
+
.editMessageText(chatId, sentMessageId, typingFrames[typingIndex])
|
|
392
|
+
.catch(() => {});
|
|
393
|
+
}, 300);
|
|
394
|
+
try {
|
|
395
|
+
result = await transmitStream(
|
|
396
|
+
{ input: text, userId, context, mode, threadContext, instructions: TELEGRAM_BOT_LENGTH_INSTRUCTION },
|
|
397
|
+
sendOrEdit,
|
|
398
|
+
{
|
|
399
|
+
isCancelled,
|
|
400
|
+
getAbortSignal: async () => (await shouldAbortSend()) || isCancelled(),
|
|
401
|
+
},
|
|
402
|
+
);
|
|
403
|
+
} catch (e) {
|
|
404
|
+
const errMsg = (e as Error)?.message ?? "AI streaming failed unexpectedly.";
|
|
405
|
+
console.error("[bot][stream]", errMsg);
|
|
406
|
+
result = {
|
|
407
|
+
ok: false,
|
|
408
|
+
provider: "openai",
|
|
409
|
+
mode,
|
|
410
|
+
error: errMsg,
|
|
411
|
+
};
|
|
412
|
+
} finally {
|
|
413
|
+
stopTypingSpinner();
|
|
414
|
+
}
|
|
415
|
+
if (result.skipped) {
|
|
416
|
+
stopTypingSpinner();
|
|
417
|
+
return;
|
|
418
|
+
}
|
|
419
|
+
if (isCancelled()) {
|
|
420
|
+
stopTypingSpinner();
|
|
421
|
+
await sendInterruptedReply({ sendToChat: !(await shouldAbortSend()) });
|
|
422
|
+
return;
|
|
423
|
+
}
|
|
424
|
+
if (throttleTimer) {
|
|
425
|
+
clearTimeout(throttleTimer);
|
|
426
|
+
throttleTimer = null;
|
|
427
|
+
}
|
|
428
|
+
const finalFlush = flushEdit(true);
|
|
429
|
+
if (finalFlush) await finalFlush;
|
|
430
|
+
|
|
431
|
+
if (
|
|
432
|
+
mode === "token_info" &&
|
|
433
|
+
(!result.ok || !result.output_text) &&
|
|
434
|
+
result.error?.includes("temporarily unavailable")
|
|
435
|
+
) {
|
|
436
|
+
if (isCancelled()) {
|
|
437
|
+
stopTypingSpinner();
|
|
438
|
+
return;
|
|
439
|
+
}
|
|
440
|
+
lastEdited = "";
|
|
441
|
+
try {
|
|
442
|
+
result = await transmitStream(
|
|
443
|
+
{
|
|
444
|
+
input: text,
|
|
445
|
+
userId,
|
|
446
|
+
context,
|
|
447
|
+
mode: "chat",
|
|
448
|
+
threadContext: threadContext ? { ...threadContext, skipClaim: true } : undefined,
|
|
449
|
+
instructions: TELEGRAM_BOT_LENGTH_INSTRUCTION,
|
|
450
|
+
},
|
|
451
|
+
sendOrEdit,
|
|
452
|
+
{
|
|
453
|
+
isCancelled,
|
|
454
|
+
getAbortSignal: async () => (await shouldAbortSend()) || isCancelled(),
|
|
455
|
+
},
|
|
456
|
+
);
|
|
457
|
+
} catch (e) {
|
|
458
|
+
const errMsg = (e as Error)?.message ?? "AI fallback streaming failed unexpectedly.";
|
|
459
|
+
console.error("[bot][stream][fallback]", errMsg);
|
|
460
|
+
result = {
|
|
461
|
+
ok: false,
|
|
462
|
+
provider: "openai",
|
|
463
|
+
mode: "chat",
|
|
464
|
+
error: errMsg,
|
|
465
|
+
};
|
|
466
|
+
} finally {
|
|
467
|
+
stopTypingSpinner();
|
|
468
|
+
}
|
|
469
|
+
if (result.skipped) {
|
|
470
|
+
stopTypingSpinner();
|
|
471
|
+
return;
|
|
472
|
+
}
|
|
473
|
+
if (isCancelled()) {
|
|
474
|
+
stopTypingSpinner();
|
|
475
|
+
await sendInterruptedReply({ sendToChat: !(await shouldAbortSend()) });
|
|
476
|
+
return;
|
|
477
|
+
}
|
|
478
|
+
if (throttleTimer) {
|
|
479
|
+
clearTimeout(throttleTimer);
|
|
480
|
+
throttleTimer = null;
|
|
481
|
+
}
|
|
482
|
+
const retryFlush = flushEdit(true);
|
|
483
|
+
if (retryFlush) await retryFlush;
|
|
484
|
+
}
|
|
485
|
+
await sendOrEditQueue;
|
|
486
|
+
// Ensure the streamed message shows the full content: last delta may not be the final snapshot (SDK/stream timing), so do one final edit from result.output_text.
|
|
487
|
+
if (
|
|
488
|
+
result.ok &&
|
|
489
|
+
result.output_text &&
|
|
490
|
+
streamSentMessageId !== null &&
|
|
491
|
+
chatId !== undefined
|
|
492
|
+
) {
|
|
493
|
+
const fullSlice = result.output_text.slice(0, MAX_MESSAGE_TEXT_LENGTH);
|
|
494
|
+
const finalFormatted = truncateTelegramHtmlSafe(
|
|
495
|
+
closeOpenTelegramHtml(
|
|
496
|
+
stripUnpairedMarkdownDelimiters(mdToTelegramHtml(fullSlice)),
|
|
497
|
+
),
|
|
498
|
+
MAX_MESSAGE_TEXT_LENGTH,
|
|
499
|
+
);
|
|
500
|
+
if (finalFormatted.trim()) {
|
|
501
|
+
sendOrEditQueue = sendOrEditQueue.then(async () => {
|
|
502
|
+
try {
|
|
503
|
+
await ctx.api.editMessageText(chatId, streamSentMessageId!, finalFormatted, { parse_mode: "HTML" });
|
|
504
|
+
} catch (e: unknown) {
|
|
505
|
+
const err = e as { description?: string; message?: string };
|
|
506
|
+
if (err?.description?.includes("not modified")) return;
|
|
507
|
+
console.error("[bot][edit] final completion edit", err?.description ?? err?.message ?? e);
|
|
508
|
+
try {
|
|
509
|
+
await ctx.api.editMessageText(chatId, streamSentMessageId!, fullSlice, {});
|
|
510
|
+
} catch (e2: unknown) {
|
|
511
|
+
const d2 = (e2 as { description?: string })?.description;
|
|
512
|
+
console.error("[bot][edit] final completion plain fallback", d2 ?? (e2 as Error)?.message ?? e2);
|
|
513
|
+
}
|
|
514
|
+
}
|
|
515
|
+
});
|
|
516
|
+
await sendOrEditQueue;
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
} else {
|
|
520
|
+
result = await transmit({ input: text, userId, context, mode, threadContext, instructions: TELEGRAM_BOT_LENGTH_INSTRUCTION });
|
|
521
|
+
if (result.skipped) return;
|
|
522
|
+
if (isCancelled()) {
|
|
523
|
+
return;
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
if (
|
|
527
|
+
mode === "token_info" &&
|
|
528
|
+
(!result.ok || !result.output_text) &&
|
|
529
|
+
result.error?.includes("temporarily unavailable")
|
|
530
|
+
) {
|
|
531
|
+
if (isCancelled()) {
|
|
532
|
+
return;
|
|
533
|
+
}
|
|
534
|
+
result = await transmit({
|
|
535
|
+
input: text,
|
|
536
|
+
userId,
|
|
537
|
+
context,
|
|
538
|
+
mode: "chat",
|
|
539
|
+
threadContext: threadContext ? { ...threadContext, skipClaim: true } : undefined,
|
|
540
|
+
instructions: TELEGRAM_BOT_LENGTH_INSTRUCTION,
|
|
541
|
+
});
|
|
542
|
+
if (result.skipped) return;
|
|
543
|
+
}
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
if (!result.ok || !result.output_text) {
|
|
547
|
+
if (await shouldAbortSend()) return;
|
|
548
|
+
if (isCancelled()) return;
|
|
549
|
+
const errMsg = result.error ?? "AI returned no output.";
|
|
550
|
+
console.error("[bot][ai]", errMsg);
|
|
551
|
+
const message: string =
|
|
552
|
+
mode === "token_info" && result.error
|
|
553
|
+
? result.error
|
|
554
|
+
: "AI is temporarily unavailable. Please try again in a moment.";
|
|
555
|
+
if (streamSentMessageId !== null && chatId !== undefined) {
|
|
556
|
+
try {
|
|
557
|
+
await ctx.api.editMessageText(chatId, streamSentMessageId, message, {});
|
|
558
|
+
} catch {
|
|
559
|
+
await ctx.reply(message, replyOptions);
|
|
560
|
+
}
|
|
561
|
+
} else {
|
|
562
|
+
await ctx.reply(message, replyOptions);
|
|
563
|
+
}
|
|
564
|
+
return;
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
if (await shouldAbortSend() && interruptedReplyCallback) {
|
|
568
|
+
await interruptedReplyCallback({ sendToChat: false });
|
|
569
|
+
return;
|
|
570
|
+
}
|
|
571
|
+
if (await shouldAbortSend()) return;
|
|
572
|
+
if (isCancelled() && interruptedReplyCallback) {
|
|
573
|
+
await interruptedReplyCallback({ sendToChat: true });
|
|
574
|
+
return;
|
|
575
|
+
}
|
|
576
|
+
if (isCancelled()) return;
|
|
577
|
+
|
|
578
|
+
// Streaming path: first message already has up to 4096. Send overflow as continuation if needed; then we're done.
|
|
579
|
+
if (streamSentMessageId !== null && chatId !== undefined) {
|
|
580
|
+
if (result.output_text.length > MAX_MESSAGE_TEXT_LENGTH) {
|
|
581
|
+
await sendLongMessage(
|
|
582
|
+
ctx.api,
|
|
583
|
+
chatId,
|
|
584
|
+
result.output_text.slice(MAX_MESSAGE_TEXT_LENGTH),
|
|
585
|
+
replyOptions,
|
|
586
|
+
replyOptionsWithHtml,
|
|
587
|
+
{ replyToMessageId: streamSentMessageId },
|
|
588
|
+
);
|
|
589
|
+
}
|
|
590
|
+
return;
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
if (result.output_text.length <= MAX_MESSAGE_TEXT_LENGTH) {
|
|
594
|
+
const textToFormat = result.output_text;
|
|
595
|
+
const formatted = truncateTelegramHtmlSafe(
|
|
596
|
+
closeOpenTelegramHtml(
|
|
597
|
+
stripUnpairedMarkdownDelimiters(mdToTelegramHtml(textToFormat)),
|
|
598
|
+
),
|
|
599
|
+
MAX_MESSAGE_TEXT_LENGTH,
|
|
600
|
+
);
|
|
601
|
+
try {
|
|
602
|
+
await ctx.reply(formatted, replyOptionsWithHtml);
|
|
603
|
+
} catch (e) {
|
|
604
|
+
console.error("[bot][reply] HTML reply failed", (e as Error)?.message ?? e);
|
|
605
|
+
try {
|
|
606
|
+
await ctx.reply(toTelegramMarkdown(textToFormat), { ...replyOptions, parse_mode: "Markdown" });
|
|
607
|
+
} catch {
|
|
608
|
+
await ctx.reply(textToFormat, replyOptions);
|
|
609
|
+
}
|
|
610
|
+
}
|
|
611
|
+
return;
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
if (chatId !== undefined) {
|
|
615
|
+
await sendLongMessage(ctx.api, chatId, result.output_text, replyOptions, replyOptionsWithHtml, {});
|
|
616
|
+
} else {
|
|
617
|
+
const textToFormat = result.output_text.slice(0, MAX_MESSAGE_TEXT_LENGTH);
|
|
618
|
+
await ctx.reply(textToFormat, replyOptions);
|
|
619
|
+
}
|
|
620
|
+
}
|