@adriangalilea/utils 0.12.0 → 0.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -198,7 +198,7 @@ Every `SourcedError` carries `source`, `operation`, `status`, `context`, and the
198
198
  - **XDG**: XDG Base Directory paths — reads env vars set by [xdg-dirs](https://github.com/adriangalilea/xdg-dirs), falls back to spec defaults
199
199
  - **Unseen**: Persistent dedup filter — "what's new since last time?" for cron/monitoring workflows
200
200
  - **Project Discovery**: Find project/monorepo roots, detect JS/TS projects
201
- - **Bot plugins (GramIO)**: `kit` (graceful shutdown + admin context), `access-control` (gate + approve/deny menu, backed by sessions), `llm-stream` (streaming LLM markdown to Telegram with graceful degradation)
201
+ - **Bot plugins (GramIO)**: `kit` (graceful shutdown + admin context), `access-control` (gate + approve/deny menu, backed by sessions), `llm` (OpenAI-compat SSE parser `streamChat` + Telegram streaming output `ctx.startStream` + per-thread conversation history `ctx.llm`), `coalesce`, `language`, `menu`
202
202
 
203
203
  ### XDG Base Directories
204
204
 
@@ -305,7 +305,7 @@ Then `pnpm install`. Every `ctx.send` / `ctx.sendDocument` / `ctx.reply` / etc.
305
305
  | `@adriangalilea/utils/bot/kit` | `gracefulStart(bot)` — SIGINT/SIGTERM → `bot.stop()` → exit; force-kills if shutdown hangs.<br>`adminContext({ adminId? })` — reads `TELEGRAM_ADMIN_ID` from `kev` (with optional hardcoded fallback), decorates `ctx.adminId` + `ctx.isAdmin`. |
306
306
  | `@adriangalilea/utils/bot/access-control` | Personal-bot ACL — gates non-admin/non-default users; admin gets DM with `[✅ Aprobar][❌ Denegar]` on first attempt; `/access` opens a persistent menu (revoke / reapprove / list pending). Backed by `@gramio/session` per-user + a small index. |
307
307
  | `@adriangalilea/utils/bot/coalesce` | Joins client-split inbound messages back into one. When a user pastes >4096 chars, Telegram clients fragment it into separate `message` updates with no marker. Middleware detects the burst and emits one combined event. |
308
- | `@adriangalilea/utils/bot/llm-stream` | `ctx.startStream()` for LLM token streams. Debounced `editMessageText`, splits at 4000 chars on paragraph/line/word boundary, parses Markdown locally so malformed mid-stream markup degrades to plain text instead of failing. |
308
+ | `@adriangalilea/utils/bot/llm` | The full LLM-chatbot pipeline in one module. **Input:** `streamChat(response)` parses OpenAI-compatible SSE (OpenAI, vllm, mlx-lm, llama.cpp, Together, Groq, …) into a typed `AsyncGenerator<{type: 'content' \| 'reasoning', text}>`. **Output:** `ctx.startStream()` debounces `editMessageText`, splits at 4000 chars on paragraph/line/word boundary, parses Markdown locally so malformed mid-stream markup degrades to plain text. **History:** `llmHistory({...}).plugin` decorates `ctx.llm` with `.add() / .get() / .clear() / .all()` — per-(user, thread) conversation in OpenAI `ChatMessage` shape, persisted in the shared session record so the menu's 🗑 Forget button wipes it together with everything else. |
309
309
 
310
310
  Standard wiring:
311
311
 
@@ -314,18 +314,45 @@ import { Bot } from 'gramio'
314
314
  import { redisStorage } from '@gramio/storage-redis'
315
315
  import { adminContext, gracefulStart } from '@adriangalilea/utils/bot/kit'
316
316
  import { accessControl } from '@adriangalilea/utils/bot/access-control'
317
- import { llmStream } from '@adriangalilea/utils/bot/llm-stream'
317
+ import { session } from '@gramio/session'
318
+ import { llmStream, llmHistory, streamChat } from '@adriangalilea/utils/bot/llm'
318
319
 
319
320
  const storage = redisStorage() // ONE instance, shared
321
+ const userSession = session({ storage, key: 'session', initial: () => ({}) })
322
+ const chat = llmHistory({ session: userSession, maxTurns: 20, retentionDays: 7 })
320
323
 
321
324
  const bot = new Bot(process.env.BOT_TOKEN!)
322
325
  .extend(adminContext({ adminId: 190202471 })) // KEV.TELEGRAM_ADMIN_ID overrides
323
- .extend(accessControl({ storage, defaults: [] })) // gate; depends on adminContext
326
+ .extend(userSession)
327
+ .extend(accessControl({ session: userSession, storage, defaults: [] }))
324
328
  .extend(llmStream())
325
- .command('chat', async (ctx) => {
329
+ .extend(chat.plugin)
330
+ .on('message', async (ctx) => {
331
+ if (!ctx.access.allowed) return
332
+ ctx.llm.add({ role: 'user', content: ctx.text ?? '' })
333
+
334
+ // Any OpenAI-compatible endpoint: vllm-mlx, mlx-lm, llama.cpp, Together, Groq, OpenAI, …
335
+ const response = await fetch(process.env.LLM_URL!, {
336
+ method: 'POST',
337
+ headers: { 'Content-Type': 'application/json' },
338
+ body: JSON.stringify({
339
+ model: process.env.LLM_MODEL,
340
+ messages: [{ role: 'system', content: 'You are helpful.' }, ...ctx.llm.get()],
341
+ stream: true,
342
+ }),
343
+ })
344
+
326
345
  const stream = ctx.startStream()
327
- for await (const chunk of yourLLM()) await stream.append(chunk.text)
346
+ let assistant = ''
347
+ for await (const chunk of streamChat(response)) {
348
+ if (chunk.type === 'content') {
349
+ assistant += chunk.text
350
+ await stream.append(chunk.text)
351
+ }
352
+ // chunk.type === 'reasoning' is also yielded for thinking models
353
+ }
328
354
  await stream.end()
355
+ ctx.llm.add({ role: 'assistant', content: assistant })
329
356
  })
330
357
 
331
358
  await gracefulStart(bot)
@@ -25,7 +25,7 @@
25
25
  * storage[String(userId)] = {
26
26
  * access: { status, approvedAt, … }, // ← this plugin
27
27
  * language: 'es', // ← bot/language
28
- * history: { items: [...] }, // ← bot/message-history
28
+ * llm: { shards: { 'general': [...] } }, // ← bot/llm (history)
29
29
  * }
30
30
  *
31
31
  * Plus one tiny admin-side index so `/access` can list pending /
@@ -25,7 +25,7 @@
25
25
  * storage[String(userId)] = {
26
26
  * access: { status, approvedAt, … }, // ← this plugin
27
27
  * language: 'es', // ← bot/language
28
- * history: { items: [...] }, // ← bot/message-history
28
+ * llm: { shards: { 'general': [...] } }, // ← bot/llm (history)
29
29
  * }
30
30
  *
31
31
  * Plus one tiny admin-side index so `/access` can list pending /
@@ -6,7 +6,7 @@
6
6
  *
7
7
  * import { adminContext, gracefulStart } from '@adriangalilea/utils/bot/kit'
8
8
  * import { accessControl } from '@adriangalilea/utils/bot/access-control'
9
- * import { llmStream } from '@adriangalilea/utils/bot/llm-stream'
9
+ * import { llmStream } from '@adriangalilea/utils/bot/llm'
10
10
  *
11
11
  * Or all-in-one (pulls every subpath):
12
12
  * import { ... } from '@adriangalilea/utils/bot'
@@ -14,8 +14,7 @@
14
14
  export * from './kit.js';
15
15
  export * from './access-control.js';
16
16
  export * from './coalesce.js';
17
- export * from './llm-stream.js';
17
+ export * from './llm.js';
18
18
  export * from './menu.js';
19
19
  export * from './language.js';
20
- export * from './message-history.js';
21
20
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/bot/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AACH,cAAc,UAAU,CAAA;AACxB,cAAc,qBAAqB,CAAA;AACnC,cAAc,eAAe,CAAA;AAC7B,cAAc,iBAAiB,CAAA;AAC/B,cAAc,WAAW,CAAA;AACzB,cAAc,eAAe,CAAA;AAC7B,cAAc,sBAAsB,CAAA"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/bot/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AACH,cAAc,UAAU,CAAA;AACxB,cAAc,qBAAqB,CAAA;AACnC,cAAc,eAAe,CAAA;AAC7B,cAAc,UAAU,CAAA;AACxB,cAAc,WAAW,CAAA;AACzB,cAAc,eAAe,CAAA"}
package/dist/bot/index.js CHANGED
@@ -6,7 +6,7 @@
6
6
  *
7
7
  * import { adminContext, gracefulStart } from '@adriangalilea/utils/bot/kit'
8
8
  * import { accessControl } from '@adriangalilea/utils/bot/access-control'
9
- * import { llmStream } from '@adriangalilea/utils/bot/llm-stream'
9
+ * import { llmStream } from '@adriangalilea/utils/bot/llm'
10
10
  *
11
11
  * Or all-in-one (pulls every subpath):
12
12
  * import { ... } from '@adriangalilea/utils/bot'
@@ -14,8 +14,7 @@
14
14
  export * from './kit.js';
15
15
  export * from './access-control.js';
16
16
  export * from './coalesce.js';
17
- export * from './llm-stream.js';
17
+ export * from './llm.js';
18
18
  export * from './menu.js';
19
19
  export * from './language.js';
20
- export * from './message-history.js';
21
20
  //# sourceMappingURL=index.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/bot/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AACH,cAAc,UAAU,CAAA;AACxB,cAAc,qBAAqB,CAAA;AACnC,cAAc,eAAe,CAAA;AAC7B,cAAc,iBAAiB,CAAA;AAC/B,cAAc,WAAW,CAAA;AACzB,cAAc,eAAe,CAAA;AAC7B,cAAc,sBAAsB,CAAA"}
1
+ {"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/bot/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AACH,cAAc,UAAU,CAAA;AACxB,cAAc,qBAAqB,CAAA;AACnC,cAAc,eAAe,CAAA;AAC7B,cAAc,UAAU,CAAA;AACxB,cAAc,WAAW,CAAA;AACzB,cAAc,eAAe,CAAA"}
@@ -0,0 +1,426 @@
1
+ /**
2
+ * GramIO LLM toolkit — three primitives that together form a complete
3
+ * Telegram LLM-chatbot pipeline:
4
+ *
5
+ * `streamChat(response)` — INPUT. Parses OpenAI-compatible SSE
6
+ * (OpenAI, vllm, mlx-lm, llama.cpp,
7
+ * Together, Groq, …) into a typed
8
+ * `AsyncGenerator` of `{type, text}` with
9
+ * `content` / `reasoning` separation.
10
+ *
11
+ * `ctx.startStream()` — OUTPUT. Debounced `editMessageText` to
12
+ * Telegram, local Markdown parse via
13
+ * `@gramio/format`, 4000-char split on
14
+ * paragraph / line / word boundary.
15
+ *
16
+ * `ctx.llm.add / .get / …` — HISTORY. Per-(user, thread) conversation
17
+ * buffer in OpenAI `ChatMessage` shape.
18
+ * Persisted in the shared `@gramio/session`
19
+ * record under the `llm` field, so the
20
+ * `botMenu` 🗑 Forget button wipes it
21
+ * together with everything else (one
22
+ * record, one delete, no per-plugin
23
+ * registry).
24
+ *
25
+ * The trio composes:
26
+ *
27
+ * fetch(...) ──streamChat──> chunks ──ctx.startStream──> Telegram
28
+ * ▲ │
29
+ * └──── ctx.llm.get() ◀──── ctx.llm.add(assistant) ◀──┘
30
+ *
31
+ * Peer deps: `gramio`, `@gramio/session`, `@gramio/format`, `marked`.
32
+ *
33
+ * @example
34
+ * import { Bot } from 'gramio'
35
+ * import { session } from '@gramio/session'
36
+ * import {
37
+ * llmStream, llmHistory, streamChat,
38
+ * } from '@adriangalilea/utils/bot/llm'
39
+ *
40
+ * const userSession = session({ storage, key: 'session', initial: () => ({}) })
41
+ * const chat = llmHistory({ session: userSession, maxTurns: 20, retentionDays: 7 })
42
+ *
43
+ * const bot = new Bot(process.env.BOT_TOKEN!)
44
+ * .extend(userSession)
45
+ * .extend(llmStream())
46
+ * .extend(chat.plugin)
47
+ * .on('message', async (ctx) => {
48
+ * ctx.llm.add({ role: 'user', content: ctx.text ?? '' })
49
+ * const messages = ctx.llm.get()
50
+ *
51
+ * const response = await fetch(process.env.LLM_URL!, {
52
+ * method: 'POST',
53
+ * headers: { 'Content-Type': 'application/json' },
54
+ * body: JSON.stringify({
55
+ * model: process.env.LLM_MODEL,
56
+ * messages: [{ role: 'system', content: 'You are helpful.' }, ...messages],
57
+ * stream: true,
58
+ * }),
59
+ * })
60
+ *
61
+ * const stream = ctx.startStream()
62
+ * let assistant = ''
63
+ * for await (const chunk of streamChat(response)) {
64
+ * if (chunk.type === 'content') {
65
+ * assistant += chunk.text
66
+ * await stream.append(chunk.text)
67
+ * }
68
+ * }
69
+ * await stream.end()
70
+ * ctx.llm.add({ role: 'assistant', content: assistant })
71
+ * })
72
+ *
73
+ * bot.start()
74
+ */
75
+ import { type DeriveDefinitions, Plugin } from 'gramio';
76
+ import { session } from '@gramio/session';
77
+ /**
78
+ * A single chunk yielded by `streamChat`. Two kinds:
79
+ *
80
+ * - `content` — the visible reply text the user should see
81
+ * - `reasoning` — chain-of-thought / "thinking" text from reasoning
82
+ * models. Empty unless the model emits it.
83
+ *
84
+ * Most callers care about `content` only. Render `reasoning` separately
85
+ * (collapsed, italicized) if you want to surface thinking.
86
+ */
87
+ export type LLMChunk = {
88
+ type: 'content';
89
+ text: string;
90
+ } | {
91
+ type: 'reasoning';
92
+ text: string;
93
+ };
94
+ /**
95
+ * Parse an OpenAI-compatible chat-completions SSE response into a
96
+ * typed `AsyncGenerator<LLMChunk>`. Reads `response.body` once.
97
+ *
98
+ * Recognised reasoning aliases (as of 2026): `reasoning_content`
99
+ * (vllm, qwen3, DeepSeek-R1, gpt-oss harmony) and `reasoning` (some
100
+ * mlx-lm forks, gemma builds). If a model surfaces a new key, add it
101
+ * here — single source of truth for the field.
102
+ *
103
+ * Constrained-SSE assumption: lines are `\n`-delimited and each event
104
+ * is `data: <json>` or `data: [DONE]`. This matches every OpenAI-compat
105
+ * server in the wild but is NOT the full SSE spec (no comments, no
106
+ * multi-line `data:`, no `retry`/`id` fields). Swap to
107
+ * `eventsource-parser` if you hit a producer that needs them.
108
+ *
109
+ * Malformed JSON lines are silently skipped; the generator ends when
110
+ * the stream closes.
111
+ *
112
+ * @param response the `fetch` `Response` from a `stream: true` chat
113
+ * completion call. Must not be already consumed.
114
+ * @throws if `response.body` is null (non-streaming response).
115
+ *
116
+ * @example framework-agnostic — parser doesn't know about Telegram
117
+ * const res = await fetch(url, { method: 'POST', body })
118
+ * for await (const chunk of streamChat(res)) {
119
+ * if (chunk.type === 'content') process.stdout.write(chunk.text)
120
+ * }
121
+ */
122
+ export declare function streamChat(response: Response): AsyncGenerator<LLMChunk>;
123
+ export type StreamOptions = {
124
+ /** Debounce window between edits, in ms. Default 800. */
125
+ debounceMs?: number;
126
+ /** Initial placeholder shown until the first chunk arrives. Default "…". */
127
+ placeholder?: string;
128
+ /** Parse buffer as markdown. Default true. Set false for plain text streaming. */
129
+ markdown?: boolean;
130
+ /** Called on edit/send errors after internal recovery (rate limits, etc.). */
131
+ onError?: (err: unknown) => void;
132
+ };
133
+ export declare class MarkdownStreamer {
134
+ private buffer;
135
+ private currentMessageId?;
136
+ private firstSendPromise?;
137
+ private debounceTimer?;
138
+ private inFlight;
139
+ private dirty;
140
+ private ended;
141
+ private chatId;
142
+ private threadId?;
143
+ private bot;
144
+ private opts;
145
+ constructor(ctx: {
146
+ chat: {
147
+ id: number;
148
+ };
149
+ threadId?: number;
150
+ bot: MarkdownStreamer['bot'];
151
+ }, opts: StreamOptions);
152
+ /** Append a chunk. Schedules a debounced edit. */
153
+ append(text: string): Promise<void>;
154
+ /** Flush any pending edit and close the stream. Idempotent. */
155
+ end(): Promise<void>;
156
+ private scheduleFlush;
157
+ private flushNow;
158
+ }
159
+ /**
160
+ * GramIO plugin. Adds `ctx.startStream(opts?)` on every message context.
161
+ *
162
+ * Defaults set here apply to every stream; per-call options in
163
+ * `ctx.startStream({...})` override them.
164
+ */
165
+ export declare const llmStream: (defaults?: StreamOptions) => Plugin<{}, DeriveDefinitions & {
166
+ message: {
167
+ startStream: (opts?: StreamOptions) => MarkdownStreamer;
168
+ };
169
+ }, {}>;
170
+ /**
171
+ * Multimodal content shape from OpenAI's chat-completions spec. Either
172
+ * a plain string or an ordered array of typed parts. Image URLs cover
173
+ * both http(s) and Telegram `getFile` resolved paths.
174
+ */
175
+ export type ChatContent = string | Array<{
176
+ type: 'text';
177
+ text: string;
178
+ } | {
179
+ type: 'image_url';
180
+ image_url: {
181
+ url: string;
182
+ };
183
+ }>;
184
+ /**
185
+ * One turn in the conversation. The library does NOT filter by role —
186
+ * if you persist `system` turns, they ride along on every `get()`.
187
+ * Most callers prepend their system prompt fresh each request and only
188
+ * persist `user` / `assistant`.
189
+ */
190
+ export type ChatMessage = {
191
+ role: 'system' | 'user' | 'assistant' | 'tool';
192
+ content: ChatContent;
193
+ /** Unix seconds when added — used for retention pruning. */
194
+ date: number;
195
+ };
196
+ /** Per-thread shards of `ChatMessage`s, persisted in the session. */
197
+ type ChatRecord = {
198
+ shards: {
199
+ [threadKey: string]: ChatMessage[];
200
+ };
201
+ };
202
+ /** Loose session shape — this plugin only touches the `llm` field. */
203
+ type LLMSessionLike = {
204
+ llm?: ChatRecord;
205
+ };
206
+ /** @internal — kept unexported so it doesn't clash with peers' refs. */
207
+ type LLMSessionPluginRef = ReturnType<typeof session<LLMSessionLike, 'session'>>;
208
+ export type LLMHistoryOptions = {
209
+ /**
210
+ * Shared session plugin. This plugin extends it for type flow;
211
+ * gramio's runtime dedup ensures the session derive runs once.
212
+ */
213
+ session: LLMSessionPluginRef;
214
+ /** Ring buffer cap **per thread**. Oldest entries dropped past this. */
215
+ maxTurns: number;
216
+ /** Entries older than this (in days) are dropped on read. */
217
+ retentionDays: number;
218
+ };
219
+ export type LLMHistoryFeature = {
220
+ plugin: ReturnType<typeof buildHistoryPlugin>;
221
+ };
222
+ /**
223
+ * Methods decorated onto `ctx.llm`. All synchronous — reads/writes the
224
+ * session record via `@gramio/session`'s Proxy, which auto-persists.
225
+ *
226
+ * Thread isolation is automatic: every method operates on the shard
227
+ * for `ctx.threadId` (or `'general'` when no thread). Different threads
228
+ * = different conversations, no leakage.
229
+ */
230
+ export type LLMHistoryApi = {
231
+ /** Append one message to the CURRENT thread's shard. */
232
+ add: (message: Omit<ChatMessage, 'date'> & {
233
+ date?: number;
234
+ }) => void;
235
+ /** Pruned snapshot of the CURRENT thread, oldest-first. */
236
+ get: () => ReadonlyArray<ChatMessage>;
237
+ /** Wipe the CURRENT thread's shard. */
238
+ clear: () => void;
239
+ /**
240
+ * Full sharded map, pruned. Use for /export or admin views. Keys are
241
+ * thread ids (or `'general'`) → ordered messages.
242
+ */
243
+ all: () => Readonly<{
244
+ [threadKey: string]: ReadonlyArray<ChatMessage>;
245
+ }>;
246
+ /** Wipe ALL threads for this user. */
247
+ clearAll: () => void;
248
+ };
249
+ type LLMHistoryDerives = {
250
+ llm: LLMHistoryApi;
251
+ };
252
+ /**
253
+ * Per-(user, thread) LLM conversation history. Opt-in. Persists in the
254
+ * shared `@gramio/session` record under `llm`, so 🗑 Forget from
255
+ * `botMenu` wipes it together with everything else — one record, one
256
+ * delete, no per-plugin registry.
257
+ *
258
+ * @example
259
+ * const chat = llmHistory({ session: userSession, maxTurns: 20, retentionDays: 7 })
260
+ * bot.extend(chat.plugin)
261
+ * .on('message', (ctx) => {
262
+ * ctx.llm.add({ role: 'user', content: ctx.text ?? '' })
263
+ * const messages = ctx.llm.get() // ChatMessage[] for current thread
264
+ * // ... call LLM with messages, then:
265
+ * ctx.llm.add({ role: 'assistant', content: reply })
266
+ * })
267
+ */
268
+ export declare const llmHistory: (opts: LLMHistoryOptions) => LLMHistoryFeature;
269
+ declare const buildHistoryPlugin: (args: {
270
+ sessionPlugin: LLMSessionPluginRef;
271
+ maxTurns: number;
272
+ retentionDays: number;
273
+ }) => Plugin<{}, DeriveDefinitions & {
274
+ global: LLMHistoryDerives;
275
+ } & {
276
+ message: {
277
+ session: LLMSessionLike & {
278
+ $clear: () => Promise<void>;
279
+ };
280
+ };
281
+ channel_post: {
282
+ session: LLMSessionLike & {
283
+ $clear: () => Promise<void>;
284
+ };
285
+ };
286
+ inline_query: {
287
+ session: LLMSessionLike & {
288
+ $clear: () => Promise<void>;
289
+ };
290
+ };
291
+ chosen_inline_result: {
292
+ session: LLMSessionLike & {
293
+ $clear: () => Promise<void>;
294
+ };
295
+ };
296
+ callback_query: {
297
+ session: LLMSessionLike & {
298
+ $clear: () => Promise<void>;
299
+ };
300
+ };
301
+ shipping_query: {
302
+ session: LLMSessionLike & {
303
+ $clear: () => Promise<void>;
304
+ };
305
+ };
306
+ pre_checkout_query: {
307
+ session: LLMSessionLike & {
308
+ $clear: () => Promise<void>;
309
+ };
310
+ };
311
+ poll_answer: {
312
+ session: LLMSessionLike & {
313
+ $clear: () => Promise<void>;
314
+ };
315
+ };
316
+ chat_join_request: {
317
+ session: LLMSessionLike & {
318
+ $clear: () => Promise<void>;
319
+ };
320
+ };
321
+ new_chat_members: {
322
+ session: LLMSessionLike & {
323
+ $clear: () => Promise<void>;
324
+ };
325
+ };
326
+ new_chat_title: {
327
+ session: LLMSessionLike & {
328
+ $clear: () => Promise<void>;
329
+ };
330
+ };
331
+ new_chat_photo: {
332
+ session: LLMSessionLike & {
333
+ $clear: () => Promise<void>;
334
+ };
335
+ };
336
+ delete_chat_photo: {
337
+ session: LLMSessionLike & {
338
+ $clear: () => Promise<void>;
339
+ };
340
+ };
341
+ group_chat_created: {
342
+ session: LLMSessionLike & {
343
+ $clear: () => Promise<void>;
344
+ };
345
+ };
346
+ message_auto_delete_timer_changed: {
347
+ session: LLMSessionLike & {
348
+ $clear: () => Promise<void>;
349
+ };
350
+ };
351
+ migrate_to_chat_id: {
352
+ session: LLMSessionLike & {
353
+ $clear: () => Promise<void>;
354
+ };
355
+ };
356
+ migrate_from_chat_id: {
357
+ session: LLMSessionLike & {
358
+ $clear: () => Promise<void>;
359
+ };
360
+ };
361
+ pinned_message: {
362
+ session: LLMSessionLike & {
363
+ $clear: () => Promise<void>;
364
+ };
365
+ };
366
+ invoice: {
367
+ session: LLMSessionLike & {
368
+ $clear: () => Promise<void>;
369
+ };
370
+ };
371
+ successful_payment: {
372
+ session: LLMSessionLike & {
373
+ $clear: () => Promise<void>;
374
+ };
375
+ };
376
+ chat_shared: {
377
+ session: LLMSessionLike & {
378
+ $clear: () => Promise<void>;
379
+ };
380
+ };
381
+ proximity_alert_triggered: {
382
+ session: LLMSessionLike & {
383
+ $clear: () => Promise<void>;
384
+ };
385
+ };
386
+ video_chat_scheduled: {
387
+ session: LLMSessionLike & {
388
+ $clear: () => Promise<void>;
389
+ };
390
+ };
391
+ video_chat_started: {
392
+ session: LLMSessionLike & {
393
+ $clear: () => Promise<void>;
394
+ };
395
+ };
396
+ video_chat_ended: {
397
+ session: LLMSessionLike & {
398
+ $clear: () => Promise<void>;
399
+ };
400
+ };
401
+ video_chat_participants_invited: {
402
+ session: LLMSessionLike & {
403
+ $clear: () => Promise<void>;
404
+ };
405
+ };
406
+ web_app_data: {
407
+ session: LLMSessionLike & {
408
+ $clear: () => Promise<void>;
409
+ };
410
+ };
411
+ location: {
412
+ session: LLMSessionLike & {
413
+ $clear: () => Promise<void>;
414
+ };
415
+ };
416
+ passport_data: {
417
+ session: LLMSessionLike & {
418
+ $clear: () => Promise<void>;
419
+ };
420
+ };
421
+ } & {
422
+ message: LLMHistoryDerives;
423
+ callback_query: LLMHistoryDerives;
424
+ }, {}>;
425
+ export {};
426
+ //# sourceMappingURL=llm.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llm.d.ts","sourceRoot":"","sources":["../../src/bot/llm.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAyEG;AACH,OAAO,EAAE,KAAK,iBAAiB,EAAE,MAAM,EAAE,MAAM,QAAQ,CAAA;AACvD,OAAO,EAAE,OAAO,EAAE,MAAM,iBAAiB,CAAA;AAKzC;;;;;;;;;GASG;AACH,MAAM,MAAM,QAAQ,GAChB;IAAE,IAAI,EAAE,SAAS,CAAC;IAAC,IAAI,EAAE,MAAM,CAAA;CAAE,GACjC;IAAE,IAAI,EAAE,WAAW,CAAC;IAAC,IAAI,EAAE,MAAM,CAAA;CAAE,CAAA;AAWvC;;;;;;;;;;;;;;;;;;;;;;;;;;;GA2BG;AACH,wBAAuB,UAAU,CAC/B,QAAQ,EAAE,QAAQ,GACjB,cAAc,CAAC,QAAQ,CAAC,CAkC1B;AAOD,MAAM,MAAM,aAAa,GAAG;IAC1B,yDAAyD;IACzD,UAAU,CAAC,EAAE,MAAM,CAAA;IACnB,4EAA4E;IAC5E,WAAW,CAAC,EAAE,MAAM,CAAA;IACpB,kFAAkF;IAClF,QAAQ,CAAC,EAAE,OAAO,CAAA;IAClB,8EAA8E;IAC9E,OAAO,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,KAAK,IAAI,CAAA;CACjC,CAAA;AAED,qBAAa,gBAAgB;IAC3B,OAAO,CAAC,MAAM,CAAK;IACnB,OAAO,CAAC,gBAAgB,CAAC,CAAQ;IACjC,OAAO,CAAC,gBAAgB,CAAC,CAAe;IACxC,OAAO,CAAC,aAAa,CAAC,CAA+B;IACrD,OAAO,CAAC,QAAQ,CAAQ;IACxB,OAAO,CAAC,KAAK,CAAQ;IACrB,OAAO,CAAC,KAAK,CAAQ;IAErB,OAAO,CAAC,MAAM,CAAQ;IACtB,OAAO,CAAC,QAAQ,CAAC,CAAQ;IAIzB,OAAO,CAAC,GAAG,CAaV;IACD,OAAO,CAAC,IAAI,CAAyB;gBAGnC,GAAG,EAAE;QACH,IAAI,EAAE;YAAE,EAAE,EAAE,MAAM,CAAA;SAAE,CAAA;QACpB,QAAQ,CAAC,EAAE,MAAM,CAAA;QACjB,GAAG,EAAE,gBAAgB,CAAC,KAAK,CAAC,CAAA;KAC7B,EACD,IAAI,EAAE,aAAa;IAgBrB,kDAAkD;IAC5C,MAAM,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IA+CzC,+DAA+D;IACzD,GAAG,IAAI,OAAO,CAAC,IAAI,CAAC;IAW1B,OAAO,CAAC,aAAa;YAaP,QAAQ;CAmCvB;AAED;;;;;GAKG;AACH,eAAO,MAAM,SAAS,GAAI,WAAU,aAAkB;;6BAI9B,aAAa;;MAEhC,CAAA;AAgBL;;;;GAIG;AACH,MAAM,MAAM,WAAW,GACnB,MAAM,GACN,KAAK,CACD;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,MAAM,CAAA;CAAE,GAC9B;IAAE,IAAI,EAAE,WAAW,CAAC;IAAC,SAAS,EAAE;QAAE,GAAG,EAAE,MAAM,CAAA;KAAE,CAAA;CAAE,CACpD,CAAA;AAEL;;;;;GAKG;AACH,MAAM,MAAM,WAAW,GAAG;IACxB,IAAI,EAAE,QAAQ,GAAG,MAAM,GAAG,WAAW,GAAG,MAAM,CAAA;IAC9C,OAAO,EAAE,WAAW,CAAA;IACpB,4DAA4D;IAC5D,IAAI,EAAE,MAAM,CAAA;CACb,CAAA;AAED,qEAAqE;AACrE,KAAK,UAAU,GAAG;IAChB,MAAM,EAAE;QAAE,CAAC,SAAS,EAAE,MAAM,GAAG,WAAW,EAAE,CAAA;KAAE,CAAA;CAC/C,CAAA;AAED,sEAAsE;AACtE,KAAK,cAAc,GAAG;IAAE,GAAG,CAAC,EAAE,UAAU,CAAA;CAAE,CAAA;AAE1C,wEAAwE;AACxE,KAAK,mBAAmB,GAAG,UAAU,CAAC,OAAO,OAAO,CAAC,cAAc,EAAE,SAAS,CAAC,CAAC,CAAA;AAEhF,MAAM,MAAM,iBAAiB,GAAG;IAC9B;;;OAGG;IACH,OAAO,EAAE,mBAAmB,CAAA;IAC5B,wEAAwE;IACxE,QAAQ,EAAE,MAAM,CAAA;IAChB,6DAA6D;IAC7D,aAAa,EAAE,MAAM,CAAA;CACtB,CAAA;AAED,MAAM,MAAM,iBAAiB,GAAG;IAC9B,MAAM,EAAE,UAAU,CAAC,OAAO,kBAAkB,CAAC,CAAA;CAC9C,CAAA;AAED;;;;;;;GAOG;AACH,MAAM,MAAM,aAAa,GAAG;IAC1B,wDAAwD;IACxD,GAAG,EAAE,CAAC,OAAO,EAAE,IAAI,CAAC,WAAW,EAAE,MAAM,CAAC,GAAG;QAAE,IAAI,CAAC,EAAE,MAAM,CAAA;KAAE,KAAK,IAAI,CAAA;IACrE,2DAA2D;IAC3D,GAAG,EAAE,MAAM,aAAa,CAAC,WAAW,CAAC,CAAA;IACrC,uCAAuC;IACvC,KAAK,EAAE,MAAM,IAAI,CAAA;IACjB;;;OAGG;IACH,GAAG,EAAE,MAAM,QAAQ,CAAC;QAAE,CAAC,SAAS,EAAE,MAAM,GAAG,aAAa,CAAC,WAAW,CAAC,CAAA;KAAE,CAAC,CAAA;IACxE,sCAAsC;IACtC,QAAQ,EAAE,MAAM,IAAI,CAAA;CACrB,CAAA;AAED,KAAK,iBAAiB,GAAG;IAAE,GAAG,EAAE,aAAa,CAAA;CAAE,CAAA;AAoC/C;;;;;;;;;;;;;;;GAeG;AACH,eAAO,MAAM,UAAU,GAAI,MAAM,iBAAiB,KAAG,iBAWpD,CAAA;AAED,QAAA,MAAM,kBAAkB,GAAI,MAAM;IAChC,aAAa,EAAE,mBAAmB,CAAA;IAClC,QAAQ,EAAE,MAAM,CAAA;IAChB,aAAa,EAAE,MAAM,CAAA;CACtB;YAGqD,iBAAiB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;MA8CtE,CAAA"}