chat 4.20.2 → 4.22.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +479 -332
- package/dist/index.js +389 -69
- package/dist/index.js.map +1 -1
- package/dist/{jsx-runtime-C2ATKxHQ.d.ts → jsx-runtime-DraWieqP.d.ts} +1 -1
- package/dist/jsx-runtime.d.ts +1 -1
- package/docs/api/chat.mdx +2 -0
- package/docs/api/index.mdx +1 -1
- package/docs/api/message.mdx +1 -1
- package/docs/api/postable-message.mdx +1 -1
- package/docs/api/to-ai-messages.mdx +190 -0
- package/docs/concurrency.mdx +223 -0
- package/docs/contributing/building.mdx +15 -1
- package/docs/guides/code-review-hono.mdx +4 -5
- package/docs/handling-events.mdx +2 -0
- package/docs/meta.json +1 -0
- package/docs/posting-messages.mdx +2 -0
- package/docs/streaming.mdx +3 -50
- package/package.json +1 -1
|
@@ -729,4 +729,4 @@ declare namespace JSX {
|
|
|
729
729
|
}
|
|
730
730
|
}
|
|
731
731
|
|
|
732
|
-
export { type DividerProps as $, type ActionsComponent as A, type ButtonComponent as B, type
|
|
732
|
+
export { type DividerProps as $, type ActionsComponent as A, type ButtonComponent as B, type ChatElement as C, type DividerComponent as D, type ImageElement as E, type FieldComponent as F, type LinkButtonElement as G, type LinkButtonOptions as H, type ImageComponent as I, type LinkElement as J, type SectionElement as K, type LinkButtonComponent as L, type ModalElement as M, type TableAlignment as N, type TableElement as O, type TableOptions as P, type TextElement as Q, type RadioSelectComponent as R, type SectionComponent as S, type TextComponent as T, type TextStyle as U, type ButtonProps as V, type CardJSXElement as W, type CardJSXProps as X, type CardLinkProps as Y, type CardProps as Z, type ContainerProps as _, type CardElement as a, type FieldProps as a0, type ImageProps as a1, type LinkButtonProps as a2, type ModalProps as a3, type SelectOptionProps as a4, type SelectProps as a5, type TextInputProps as a6, type TextProps as a7, type ModalChild as a8, type ModalOptions as a9, type RadioSelectElement as aa, type RadioSelectOptions as ab, type SelectElement as ac, type SelectOptionElement as ad, type SelectOptions as ae, type TextInputElement as af, type TextInputOptions as ag, type TableProps as ah, type TableComponent as ai, isCardLinkProps as aj, jsx as ak, jsxs as al, jsxDEV as am, Fragment as an, JSX as ao, type CardChild as b, type CardComponent as c, cardChildToFallbackText as d, type CardLinkComponent as e, type FieldsComponent as f, fromReactElement as g, isJSX as h, isCardElement as i, Table as j, toModalElement as k, fromReactModalElement as l, isModalElement as m, type ModalComponent as n, type SelectComponent as o, type SelectOptionComponent as p, type TextInputComponent as q, type ActionsElement as r, type ButtonElement as s, toCardElement as t, type ButtonOptions as u, type ButtonStyle as v, type CardOptions as w, type DividerElement as x, type FieldElement as y, type FieldsElement as z };
|
package/dist/jsx-runtime.d.ts
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export { A as ActionsComponent, B as ButtonComponent, V as ButtonProps, c as CardComponent, W as CardJSXElement, X as CardJSXProps, e as CardLinkComponent, Y as CardLinkProps, Z as CardProps,
|
|
1
|
+
export { A as ActionsComponent, B as ButtonComponent, V as ButtonProps, c as CardComponent, W as CardJSXElement, X as CardJSXProps, e as CardLinkComponent, Y as CardLinkProps, Z as CardProps, C as ChatElement, _ as ContainerProps, D as DividerComponent, $ as DividerProps, F as FieldComponent, a0 as FieldProps, f as FieldsComponent, an as Fragment, I as ImageComponent, a1 as ImageProps, ao as JSX, L as LinkButtonComponent, a2 as LinkButtonProps, n as ModalComponent, a3 as ModalProps, R as RadioSelectComponent, S as SectionComponent, o as SelectComponent, p as SelectOptionComponent, a4 as SelectOptionProps, a5 as SelectProps, ai as TableComponent, ah as TableProps, T as TextComponent, q as TextInputComponent, a6 as TextInputProps, a7 as TextProps, aj as isCardLinkProps, h as isJSX, ak as jsx, am as jsxDEV, al as jsxs, t as toCardElement, k as toModalElement } from './jsx-runtime-DraWieqP.js';
|
package/docs/api/chat.mdx
CHANGED
|
@@ -465,6 +465,8 @@ await bot.initialize();
|
|
|
465
465
|
await bot.shutdown();
|
|
466
466
|
```
|
|
467
467
|
|
|
468
|
+
During shutdown, the SDK calls the optional `disconnect()` method on each adapter before disconnecting the state adapter. This lets adapters clean up platform connections, close WebSockets, or tear down subscriptions. If any adapter's `disconnect()` fails, the remaining adapters and state adapter still disconnect gracefully.
|
|
469
|
+
|
|
468
470
|
### reviver
|
|
469
471
|
|
|
470
472
|
Get a `JSON.parse` reviver that deserializes `Thread` and `Message` objects from workflow payloads.
|
package/docs/api/index.mdx
CHANGED
|
@@ -24,7 +24,7 @@ import { Chat, root, paragraph, text, Card, Button, emoji } from "chat";
|
|
|
24
24
|
|
|
25
25
|
| Export | Description |
|
|
26
26
|
|--------|-------------|
|
|
27
|
-
| [`toAiMessages`](/docs/
|
|
27
|
+
| [`toAiMessages`](/docs/api/to-ai-messages) | Convert `Message[]` to AI SDK `{ role, content }[]` format |
|
|
28
28
|
|
|
29
29
|
## Message formats
|
|
30
30
|
|
package/docs/api/message.mdx
CHANGED
|
@@ -186,7 +186,7 @@ Links found in incoming messages are extracted and exposed as `LinkPreview` obje
|
|
|
186
186
|
/>
|
|
187
187
|
|
|
188
188
|
<Callout type="info">
|
|
189
|
-
When using [`toAiMessages()`](/docs/
|
|
189
|
+
When using [`toAiMessages()`](/docs/api/to-ai-messages), link metadata is automatically appended to the message content. Embedded message links are labeled as `[Embedded message: ...]` so the AI model understands the context.
|
|
190
190
|
</Callout>
|
|
191
191
|
|
|
192
192
|
### Platform support
|
|
@@ -150,7 +150,7 @@ await thread.post(result.fullStream);
|
|
|
150
150
|
await thread.post(result.textStream);
|
|
151
151
|
```
|
|
152
152
|
|
|
153
|
-
When using `fullStream`, the SDK auto-detects `text-delta` and `step
|
|
153
|
+
When using `fullStream`, the SDK auto-detects `text-delta` and `finish-step` events, extracting text and inserting paragraph breaks between agent steps.
|
|
154
154
|
|
|
155
155
|
## FileUpload
|
|
156
156
|
|
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: toAiMessages
|
|
3
|
+
description: Convert Chat SDK messages to AI SDK conversation format.
|
|
4
|
+
type: reference
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
Convert an array of `Message` objects into the `{ role, content }[]` format expected by AI SDKs. The output is structurally compatible with AI SDK's `ModelMessage[]`.
|
|
8
|
+
|
|
9
|
+
```typescript
|
|
10
|
+
import { toAiMessages } from "chat";
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Usage
|
|
14
|
+
|
|
15
|
+
```typescript title="lib/bot.ts" lineNumbers
|
|
16
|
+
import { toAiMessages } from "chat";
|
|
17
|
+
|
|
18
|
+
bot.onSubscribedMessage(async (thread, message) => {
|
|
19
|
+
const result = await thread.adapter.fetchMessages(thread.id, { limit: 20 });
|
|
20
|
+
const history = await toAiMessages(result.messages);
|
|
21
|
+
const response = await agent.stream({ prompt: history });
|
|
22
|
+
await thread.post(response.fullStream);
|
|
23
|
+
});
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
## Signature
|
|
27
|
+
|
|
28
|
+
```typescript
|
|
29
|
+
function toAiMessages(
|
|
30
|
+
messages: Message[],
|
|
31
|
+
options?: ToAiMessagesOptions
|
|
32
|
+
): Promise<AiMessage[]>
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
### Parameters
|
|
36
|
+
|
|
37
|
+
<TypeTable
|
|
38
|
+
type={{
|
|
39
|
+
messages: {
|
|
40
|
+
description: 'Array of Chat SDK Message objects. Works with FetchResult.messages, thread.recentMessages, or any collected iterable.',
|
|
41
|
+
type: 'Message[]',
|
|
42
|
+
},
|
|
43
|
+
options: {
|
|
44
|
+
description: 'Optional configuration.',
|
|
45
|
+
type: 'ToAiMessagesOptions',
|
|
46
|
+
default: '{}',
|
|
47
|
+
},
|
|
48
|
+
}}
|
|
49
|
+
/>
|
|
50
|
+
|
|
51
|
+
### Options
|
|
52
|
+
|
|
53
|
+
<TypeTable
|
|
54
|
+
type={{
|
|
55
|
+
includeNames: {
|
|
56
|
+
description: 'Prefix user messages with [username]: for multi-user context.',
|
|
57
|
+
type: 'boolean',
|
|
58
|
+
default: 'false',
|
|
59
|
+
},
|
|
60
|
+
transformMessage: {
|
|
61
|
+
description: 'Transform or filter each message after default processing. Return null to skip the message.',
|
|
62
|
+
type: '(aiMessage: AiMessage, source: Message) => AiMessage | null | Promise<AiMessage | null>',
|
|
63
|
+
},
|
|
64
|
+
onUnsupportedAttachment: {
|
|
65
|
+
description: 'Called when an attachment type is not supported (video, audio).',
|
|
66
|
+
type: '(attachment: Attachment, message: Message) => void',
|
|
67
|
+
default: 'console.warn',
|
|
68
|
+
},
|
|
69
|
+
}}
|
|
70
|
+
/>
|
|
71
|
+
|
|
72
|
+
### Returns
|
|
73
|
+
|
|
74
|
+
`Promise<AiMessage[]>` — an array of messages with `role` and `content` fields, directly assignable to AI SDK's `ModelMessage[]`.
|
|
75
|
+
|
|
76
|
+
## Behavior
|
|
77
|
+
|
|
78
|
+
- **Role mapping** — `author.isMe === true` maps to `"assistant"`, all others to `"user"`
|
|
79
|
+
- **Filtering** — Messages with empty or whitespace-only text are removed
|
|
80
|
+
- **Sorting** — Messages are sorted chronologically (oldest first) by `metadata.dateSent`
|
|
81
|
+
- **Links** — Link metadata (URL, title, description, site name) is appended to message content. Embedded message links are labeled as `[Embedded message: ...]`
|
|
82
|
+
- **Attachments** — Images and text files (JSON, XML, YAML, etc.) are included as multipart content using `fetchData()`. Video and audio attachments trigger `onUnsupportedAttachment`
|
|
83
|
+
|
|
84
|
+
## Return types
|
|
85
|
+
|
|
86
|
+
```typescript
|
|
87
|
+
type AiMessage = AiUserMessage | AiAssistantMessage;
|
|
88
|
+
|
|
89
|
+
interface AiUserMessage {
|
|
90
|
+
role: "user";
|
|
91
|
+
content: string | AiMessagePart[];
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
interface AiAssistantMessage {
|
|
95
|
+
role: "assistant";
|
|
96
|
+
content: string;
|
|
97
|
+
}
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
User messages have multipart `content` when attachments are present:
|
|
101
|
+
|
|
102
|
+
```typescript
|
|
103
|
+
type AiMessagePart = AiTextPart | AiImagePart | AiFilePart;
|
|
104
|
+
|
|
105
|
+
interface AiTextPart {
|
|
106
|
+
type: "text";
|
|
107
|
+
text: string;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
interface AiImagePart {
|
|
111
|
+
type: "image";
|
|
112
|
+
image: DataContent | URL;
|
|
113
|
+
mediaType?: string;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
interface AiFilePart {
|
|
117
|
+
type: "file";
|
|
118
|
+
data: DataContent | URL;
|
|
119
|
+
filename?: string;
|
|
120
|
+
mediaType: string;
|
|
121
|
+
}
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
## Examples
|
|
125
|
+
|
|
126
|
+
### Multi-user context
|
|
127
|
+
|
|
128
|
+
Prefix each user message with their username so the AI model can distinguish speakers:
|
|
129
|
+
|
|
130
|
+
```typescript
|
|
131
|
+
const history = await toAiMessages(result.messages, { includeNames: true });
|
|
132
|
+
// [{ role: "user", content: "[alice]: Hello" },
|
|
133
|
+
// { role: "assistant", content: "Hi there!" },
|
|
134
|
+
// { role: "user", content: "[bob]: Thanks" }]
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
### Transforming messages
|
|
138
|
+
|
|
139
|
+
Replace raw user IDs with readable names:
|
|
140
|
+
|
|
141
|
+
```typescript
|
|
142
|
+
const history = await toAiMessages(result.messages, {
|
|
143
|
+
transformMessage: (aiMessage) => {
|
|
144
|
+
if (typeof aiMessage.content === "string") {
|
|
145
|
+
return {
|
|
146
|
+
...aiMessage,
|
|
147
|
+
content: aiMessage.content.replace(/<@U123>/g, "@VercelBot"),
|
|
148
|
+
};
|
|
149
|
+
}
|
|
150
|
+
return aiMessage;
|
|
151
|
+
},
|
|
152
|
+
});
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
### Filtering messages
|
|
156
|
+
|
|
157
|
+
Skip messages from a specific user:
|
|
158
|
+
|
|
159
|
+
```typescript
|
|
160
|
+
const history = await toAiMessages(result.messages, {
|
|
161
|
+
transformMessage: (aiMessage, source) => {
|
|
162
|
+
if (source.author.userId === "U_NOISY_BOT") return null;
|
|
163
|
+
return aiMessage;
|
|
164
|
+
},
|
|
165
|
+
});
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
### Handling unsupported attachments
|
|
169
|
+
|
|
170
|
+
```typescript
|
|
171
|
+
const history = await toAiMessages(result.messages, {
|
|
172
|
+
onUnsupportedAttachment: (attachment, message) => {
|
|
173
|
+
logger.warn(`Skipped ${attachment.type} attachment in message ${message.id}`);
|
|
174
|
+
},
|
|
175
|
+
});
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
## Supported attachment types
|
|
179
|
+
|
|
180
|
+
| Type | MIME types | Included as |
|
|
181
|
+
|------|-----------|-------------|
|
|
182
|
+
| `image` | Any image MIME type | `FilePart` with base64 data |
|
|
183
|
+
| `file` | `text/*`, `application/json`, `application/xml`, `application/javascript`, `application/typescript`, `application/yaml`, `application/toml` | `FilePart` with base64 data |
|
|
184
|
+
| `video` | Any | Skipped (triggers `onUnsupportedAttachment`) |
|
|
185
|
+
| `audio` | Any | Skipped (triggers `onUnsupportedAttachment`) |
|
|
186
|
+
| `file` | Other (e.g. `application/pdf`) | Silently skipped |
|
|
187
|
+
|
|
188
|
+
<Callout type="info">
|
|
189
|
+
Attachments require `fetchData()` to be available on the attachment object. Attachments without `fetchData()` are silently skipped.
|
|
190
|
+
</Callout>
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Concurrency
|
|
3
|
+
description: Control how overlapping messages on the same thread are handled — queue, debounce, drop, or process concurrently.
|
|
4
|
+
type: guide
|
|
5
|
+
prerequisites:
|
|
6
|
+
- /docs/handling-events
|
|
7
|
+
related:
|
|
8
|
+
- /docs/state
|
|
9
|
+
- /docs/streaming
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
When multiple messages arrive on the same thread while a handler is still processing, the SDK needs a strategy. By default, the incoming message is dropped. The `concurrency` option on `ChatConfig` lets you choose what happens instead.
|
|
13
|
+
|
|
14
|
+
## Strategies
|
|
15
|
+
|
|
16
|
+
### Drop (default)
|
|
17
|
+
|
|
18
|
+
The original behavior. If a handler is already running on a thread, the new message is discarded and a `LockError` is thrown. No queuing, no retries.
|
|
19
|
+
|
|
20
|
+
```typescript title="lib/bot.ts"
|
|
21
|
+
const bot = new Chat({
|
|
22
|
+
concurrency: "drop",
|
|
23
|
+
// ...
|
|
24
|
+
});
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
### Queue
|
|
28
|
+
|
|
29
|
+
Messages that arrive while a handler is running are enqueued. When the current handler finishes, only the **latest** queued message is dispatched. All intermediate messages are provided as `context.skipped`, giving your handler full visibility into what happened while it was busy.
|
|
30
|
+
|
|
31
|
+
```typescript title="lib/bot.ts" lineNumbers
|
|
32
|
+
const bot = new Chat({
|
|
33
|
+
concurrency: "queue",
|
|
34
|
+
// ...
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
bot.onNewMention(async (thread, message, context) => {
|
|
38
|
+
if (context && context.skipped.length > 0) {
|
|
39
|
+
await thread.post(
|
|
40
|
+
`You sent ${context.totalSinceLastHandler} messages while I was thinking. Responding to your latest.`
|
|
41
|
+
);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const response = await generateAIResponse(message.text);
|
|
45
|
+
await thread.post(response);
|
|
46
|
+
});
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
**Flow:**
|
|
50
|
+
|
|
51
|
+
```
|
|
52
|
+
A arrives → acquire lock → process A
|
|
53
|
+
B arrives → lock busy → enqueue B
|
|
54
|
+
C arrives → lock busy → enqueue C
|
|
55
|
+
D arrives → lock busy → enqueue D
|
|
56
|
+
A done → drain: [B, C, D] → handler(D, { skipped: [B, C] })
|
|
57
|
+
D done → queue empty → release lock
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
### Debounce
|
|
61
|
+
|
|
62
|
+
Every message starts or resets a debounce timer. Only the **final message in a burst** is processed.
|
|
63
|
+
|
|
64
|
+
This is particularly useful for platforms like **WhatsApp** and **Telegram** where users tend to send a flurry of short messages in quick succession instead of composing a single message — "hey", "quick question", "how do I reset my password?" arriving as three separate webhooks within a few seconds. Without debounce, the bot would respond to "hey" before the actual question even arrives. With debounce, the SDK waits for a pause in the conversation and processes only the final message.
|
|
65
|
+
|
|
66
|
+
```typescript title="lib/bot.ts" lineNumbers
|
|
67
|
+
const bot = new Chat({
|
|
68
|
+
concurrency: { strategy: "debounce", debounceMs: 1500 },
|
|
69
|
+
// ...
|
|
70
|
+
});
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
<Callout type="info">
|
|
74
|
+
WhatsApp and Telegram adapters default to `lockScope: "channel"`, so debounce applies to the entire conversation — not just a single thread.
|
|
75
|
+
</Callout>
|
|
76
|
+
|
|
77
|
+
**Flow:**
|
|
78
|
+
|
|
79
|
+
```
|
|
80
|
+
A arrives → acquire lock → store A as pending → sleep(debounceMs)
|
|
81
|
+
B arrives → lock busy → overwrite pending with B (A dropped)
|
|
82
|
+
C arrives → lock busy → overwrite pending with C (B dropped)
|
|
83
|
+
... debounceMs elapses with no new message ...
|
|
84
|
+
→ process C → release lock
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
Debounce also works well for rapid corrections ("wait, I meant...") and multi-part messages on any platform.
|
|
88
|
+
|
|
89
|
+
### Concurrent
|
|
90
|
+
|
|
91
|
+
No locking at all. Every message is processed immediately in its own handler invocation. Use this for stateless handlers where thread ordering doesn't matter.
|
|
92
|
+
|
|
93
|
+
```typescript title="lib/bot.ts"
|
|
94
|
+
const bot = new Chat({
|
|
95
|
+
concurrency: "concurrent",
|
|
96
|
+
// ...
|
|
97
|
+
});
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
## Configuration
|
|
101
|
+
|
|
102
|
+
For fine-grained control, pass a `ConcurrencyConfig` object instead of a strategy string:
|
|
103
|
+
|
|
104
|
+
```typescript title="lib/bot.ts" lineNumbers
|
|
105
|
+
const bot = new Chat({
|
|
106
|
+
concurrency: {
|
|
107
|
+
strategy: "queue",
|
|
108
|
+
maxQueueSize: 20, // Max queued messages per thread (default: 10)
|
|
109
|
+
onQueueFull: "drop-oldest", // or "drop-newest" (default: "drop-oldest")
|
|
110
|
+
queueEntryTtlMs: 60_000, // Discard stale entries after 60s (default: 90s)
|
|
111
|
+
},
|
|
112
|
+
// ...
|
|
113
|
+
});
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
### All options
|
|
117
|
+
|
|
118
|
+
| Option | Strategies | Default | Description |
|
|
119
|
+
|--------|-----------|---------|-------------|
|
|
120
|
+
| `strategy` | all | `"drop"` | The concurrency strategy to use |
|
|
121
|
+
| `maxQueueSize` | queue, debounce | `10` | Maximum queued messages per thread |
|
|
122
|
+
| `onQueueFull` | queue, debounce | `"drop-oldest"` | Whether to evict the oldest or reject the newest message when the queue is full |
|
|
123
|
+
| `queueEntryTtlMs` | queue, debounce | `90000` | TTL for queued entries in milliseconds. Expired entries are discarded on dequeue |
|
|
124
|
+
| `debounceMs` | debounce | `1500` | Debounce window in milliseconds |
|
|
125
|
+
| `maxConcurrent` | concurrent | `Infinity` | Max concurrent handlers per thread |
|
|
126
|
+
|
|
127
|
+
## MessageContext
|
|
128
|
+
|
|
129
|
+
All handler types (`onNewMention`, `onSubscribedMessage`, `onNewMessage`) accept an optional `MessageContext` as their last parameter. It is only populated when using the `queue` strategy and messages were skipped.
|
|
130
|
+
|
|
131
|
+
```typescript
|
|
132
|
+
interface MessageContext {
|
|
133
|
+
/** Messages that arrived while the previous handler was running, in chronological order. */
|
|
134
|
+
skipped: Message[];
|
|
135
|
+
/** Total messages received since last handler ran (skipped.length + 1). */
|
|
136
|
+
totalSinceLastHandler: number;
|
|
137
|
+
}
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
Existing handlers that don't use `context` are unaffected — the parameter is optional.
|
|
141
|
+
|
|
142
|
+
### Example: Pass all messages to an LLM
|
|
143
|
+
|
|
144
|
+
```typescript title="lib/bot.ts" lineNumbers
|
|
145
|
+
bot.onSubscribedMessage(async (thread, message, context) => {
|
|
146
|
+
// Combine skipped messages with the current one for full context
|
|
147
|
+
const allMessages = [...(context?.skipped ?? []), message];
|
|
148
|
+
|
|
149
|
+
const response = await generateAIResponse(
|
|
150
|
+
allMessages.map((m) => m.text).join("\n\n")
|
|
151
|
+
);
|
|
152
|
+
await thread.post(response);
|
|
153
|
+
});
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
## Lock scope
|
|
157
|
+
|
|
158
|
+
By default, locks are scoped to the thread — messages in different threads are processed independently. For platforms like WhatsApp and Telegram where conversations happen at the channel level rather than in threads, the lock scope defaults to `"channel"`.
|
|
159
|
+
|
|
160
|
+
You can override this globally:
|
|
161
|
+
|
|
162
|
+
```typescript title="lib/bot.ts" lineNumbers
|
|
163
|
+
const bot = new Chat({
|
|
164
|
+
concurrency: "queue",
|
|
165
|
+
lockScope: "channel", // or "thread" (default)
|
|
166
|
+
// ...
|
|
167
|
+
});
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
Or resolve it dynamically per message:
|
|
171
|
+
|
|
172
|
+
```typescript title="lib/bot.ts" lineNumbers
|
|
173
|
+
const bot = new Chat({
|
|
174
|
+
concurrency: "queue",
|
|
175
|
+
lockScope: ({ isDM, adapter }) => {
|
|
176
|
+
// Use channel scope for DMs, thread scope for group channels
|
|
177
|
+
return isDM ? "channel" : "thread";
|
|
178
|
+
},
|
|
179
|
+
// ...
|
|
180
|
+
});
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
## State adapter requirements
|
|
184
|
+
|
|
185
|
+
The `queue` and `debounce` strategies require three additional methods on your state adapter:
|
|
186
|
+
|
|
187
|
+
| Method | Description |
|
|
188
|
+
|--------|-------------|
|
|
189
|
+
| `enqueue(threadId, entry, maxSize)` | Atomically append a message to the thread's queue. Returns new depth. |
|
|
190
|
+
| `dequeue(threadId)` | Pop the next (oldest) message from the queue. Returns `null` if empty. |
|
|
191
|
+
| `queueDepth(threadId)` | Return the current number of queued messages. |
|
|
192
|
+
|
|
193
|
+
All built-in state adapters (`@chat-adapter/state-memory`, `@chat-adapter/state-redis`, `@chat-adapter/state-ioredis`) implement these methods. The Redis adapters use Lua scripts for atomicity.
|
|
194
|
+
|
|
195
|
+
## Observability
|
|
196
|
+
|
|
197
|
+
All strategies emit structured log events at `info` level:
|
|
198
|
+
|
|
199
|
+
| Event | Strategy | Data |
|
|
200
|
+
|-------|----------|------|
|
|
201
|
+
| `message-queued` | queue | threadId, messageId, queueDepth |
|
|
202
|
+
| `message-dequeued` | queue, debounce | threadId, messageId, skippedCount |
|
|
203
|
+
| `message-dropped` | drop, queue | threadId, messageId, reason |
|
|
204
|
+
| `message-expired` | queue, debounce | threadId, messageId |
|
|
205
|
+
| `message-superseded` | debounce | threadId, droppedId |
|
|
206
|
+
| `message-debouncing` | debounce | threadId, messageId, debounceMs |
|
|
207
|
+
| `message-debounce-reset` | debounce | threadId, messageId |
|
|
208
|
+
|
|
209
|
+
## Choosing a strategy
|
|
210
|
+
|
|
211
|
+
| Use case | Strategy | Why |
|
|
212
|
+
|----------|----------|-----|
|
|
213
|
+
| Simple bots, one-shot commands | `drop` | No complexity, no queue overhead |
|
|
214
|
+
| AI chatbots, customer support | `queue` | Never lose messages; handler sees full conversation context |
|
|
215
|
+
| WhatsApp/Telegram bots, rapid corrections | `debounce` | Users send many short messages in quick succession; wait for a pause before responding |
|
|
216
|
+
| Stateless lookups, translations | `concurrent` | Maximum throughput, no ordering needed |
|
|
217
|
+
|
|
218
|
+
## Backward compatibility
|
|
219
|
+
|
|
220
|
+
- The default strategy is `drop` — existing behavior is unchanged.
|
|
221
|
+
- The deprecated `onLockConflict` option continues to work but should be replaced with `concurrency`.
|
|
222
|
+
- Handler signatures are backward-compatible; the new `context` parameter is optional.
|
|
223
|
+
- Deduplication always runs regardless of strategy.
|
|
@@ -31,7 +31,7 @@ Chat SDK ships with Vercel-maintained adapters for Slack, Teams, Google Chat, Di
|
|
|
31
31
|
|
|
32
32
|
#### Qualifications for vendor official tier
|
|
33
33
|
|
|
34
|
-
-
|
|
34
|
+
- Commitment for continued maintenance of the adapter.
|
|
35
35
|
- GitHub hosting in official vendor-owned org.
|
|
36
36
|
- Documentation of the adapter in primary vendor docs.
|
|
37
37
|
- Announcement of the adapter in blog post or changelog and social media.
|
|
@@ -241,6 +241,19 @@ async initialize(chat: ChatInstance): Promise<void> {
|
|
|
241
241
|
}
|
|
242
242
|
```
|
|
243
243
|
|
|
244
|
+
### Disconnect
|
|
245
|
+
|
|
246
|
+
The optional `disconnect()` method is called during `chat.shutdown()` to clean up resources. Use it to close persistent connections, tear down subscriptions, or release any platform-specific resources.
|
|
247
|
+
|
|
248
|
+
```typescript title="src/adapter.ts"
|
|
249
|
+
async disconnect(): Promise<void> {
|
|
250
|
+
// Close WebSocket connections, clean up subscriptions, etc.
|
|
251
|
+
// Example: await this.matrixClient.stop();
|
|
252
|
+
}
|
|
253
|
+
```
|
|
254
|
+
|
|
255
|
+
Adapters that don't hold persistent connections can skip this method entirely.
|
|
256
|
+
|
|
244
257
|
### Thread ID encode/decode
|
|
245
258
|
|
|
246
259
|
Thread IDs typically follow the pattern `{adapter}:{segment1}:{segment2}`, though some adapters use more or fewer segments. The `encodeThreadId` and `decodeThreadId` methods must roundtrip consistently. Use `base64url` encoding for segments that contain special characters.
|
|
@@ -530,6 +543,7 @@ These methods are not required but extend your adapter's capabilities:
|
|
|
530
543
|
|
|
531
544
|
| Method | Purpose |
|
|
532
545
|
|--------|---------|
|
|
546
|
+
| `disconnect()` | Clean up connections and resources during shutdown |
|
|
533
547
|
| `openDM(userId)` | Open a direct message conversation |
|
|
534
548
|
| `isDM(threadId)` | Check if a thread is a DM |
|
|
535
549
|
| `stream(threadId, textStream)` | Stream AI responses in real-time |
|
|
@@ -23,7 +23,7 @@ Scaffold a new Hono project and install dependencies:
|
|
|
23
23
|
```sh title="Terminal"
|
|
24
24
|
pnpm create hono my-review-bot
|
|
25
25
|
cd my-review-bot
|
|
26
|
-
pnpm add @octokit/rest @vercel/sandbox ai bash-tool chat @chat-adapter/github @chat-adapter/state-redis
|
|
26
|
+
pnpm add @octokit/rest @vercel/functions @vercel/sandbox ai bash-tool chat @chat-adapter/github @chat-adapter/state-redis
|
|
27
27
|
```
|
|
28
28
|
|
|
29
29
|
<Callout type="info">
|
|
@@ -198,6 +198,7 @@ Create the Hono app with a single webhook route that delegates to Chat SDK:
|
|
|
198
198
|
|
|
199
199
|
```typescript title="src/index.ts" lineNumbers
|
|
200
200
|
import { Hono } from "hono";
|
|
201
|
+
import { waitUntil } from "@vercel/functions";
|
|
201
202
|
import { bot } from "./bot";
|
|
202
203
|
|
|
203
204
|
const app = new Hono();
|
|
@@ -208,15 +209,13 @@ app.post("/api/webhooks/github", async (c) => {
|
|
|
208
209
|
return c.text("GitHub adapter not configured", 404);
|
|
209
210
|
}
|
|
210
211
|
|
|
211
|
-
return handler(c.req.raw, {
|
|
212
|
-
waitUntil: (task) => c.executionCtx.waitUntil(task),
|
|
213
|
-
});
|
|
212
|
+
return handler(c.req.raw, { waitUntil });
|
|
214
213
|
});
|
|
215
214
|
|
|
216
215
|
export default app;
|
|
217
216
|
```
|
|
218
217
|
|
|
219
|
-
Chat SDK's GitHub adapter handles signature verification, event parsing, and routing internally. The `waitUntil` option ensures the review completes after the HTTP response is sent.
|
|
218
|
+
Chat SDK's GitHub adapter handles signature verification, event parsing, and routing internally. The `waitUntil` option ensures the review completes after the HTTP response is sent — required on serverless platforms where the function would otherwise terminate before your handlers finish.
|
|
220
219
|
|
|
221
220
|
## Test locally
|
|
222
221
|
|
package/docs/handling-events.mdx
CHANGED
|
@@ -111,6 +111,8 @@ bot.onSubscribedMessage(async (thread, message) => {
|
|
|
111
111
|
});
|
|
112
112
|
```
|
|
113
113
|
|
|
114
|
+
See [`toAiMessages`](/docs/api/to-ai-messages) for all options including multi-user name prefixing, message transforms, and attachment handling.
|
|
115
|
+
|
|
114
116
|
### Example: Unsubscribe on keyword
|
|
115
117
|
|
|
116
118
|
```typescript title="lib/bot.ts" lineNumbers
|
package/docs/meta.json
CHANGED
|
@@ -151,6 +151,8 @@ await thread.post(result.fullStream);
|
|
|
151
151
|
|
|
152
152
|
Both `fullStream` and `textStream` are supported. Use `fullStream` with multi-step agents — it preserves paragraph breaks between steps. Any `AsyncIterable<string>` also works for custom streams.
|
|
153
153
|
|
|
154
|
+
For multi-turn conversations, use [`toAiMessages()`](/docs/api/to-ai-messages) to convert thread history into the `{ role, content }[]` format expected by AI SDKs.
|
|
155
|
+
|
|
154
156
|
See the [Streaming](/docs/streaming) page for details on platform behavior and configuration.
|
|
155
157
|
|
|
156
158
|
## Attachments and files
|
package/docs/streaming.mdx
CHANGED
|
@@ -28,7 +28,7 @@ bot.onNewMention(async (thread, message) => {
|
|
|
28
28
|
|
|
29
29
|
### Why `fullStream` over `textStream`?
|
|
30
30
|
|
|
31
|
-
When AI SDK agents make tool calls between text steps, `textStream` concatenates all text without separators — `"hello.how are you?"` instead of `"hello.\n\nhow are you?"`. The `fullStream` contains explicit `step
|
|
31
|
+
When AI SDK agents make tool calls between text steps, `textStream` concatenates all text without separators — `"hello.how are you?"` instead of `"hello.\n\nhow are you?"`. The `fullStream` contains explicit `finish-step` events that Chat SDK uses to inject paragraph breaks between steps automatically.
|
|
32
32
|
|
|
33
33
|
Both stream types are auto-detected:
|
|
34
34
|
|
|
@@ -184,7 +184,7 @@ await thread.stream(textStream, {
|
|
|
184
184
|
## Streaming with conversation history
|
|
185
185
|
|
|
186
186
|
Combine message history with streaming for multi-turn AI conversations.
|
|
187
|
-
Use `toAiMessages()` to convert chat messages into the `{ role, content }` format expected by AI SDKs:
|
|
187
|
+
Use [`toAiMessages()`](/docs/api/to-ai-messages) to convert chat messages into the `{ role, content }` format expected by AI SDKs:
|
|
188
188
|
|
|
189
189
|
```typescript title="lib/bot.ts" lineNumbers
|
|
190
190
|
import { toAiMessages } from "chat";
|
|
@@ -200,51 +200,4 @@ bot.onSubscribedMessage(async (thread, message) => {
|
|
|
200
200
|
});
|
|
201
201
|
```
|
|
202
202
|
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
Converts an array of `Message` objects into AI SDK conversation format:
|
|
206
|
-
|
|
207
|
-
- Maps `author.isMe` to `"assistant"` role, all others to `"user"`
|
|
208
|
-
- Filters out empty messages
|
|
209
|
-
- Sorts chronologically (oldest first)
|
|
210
|
-
- Appends link metadata (URLs, titles, descriptions) when present
|
|
211
|
-
- Labels embedded message links (e.g. shared Slack messages) as `[Embedded message: ...]`
|
|
212
|
-
|
|
213
|
-
| Option | Type | Default | Description |
|
|
214
|
-
|--------|------|---------|-------------|
|
|
215
|
-
| `includeNames` | `boolean` | `false` | Prefix user messages with `[username]: ` for multi-user context |
|
|
216
|
-
| `transformMessage` | `(aiMessage, source) => AiMessage \| Promise<AiMessage \| null> \| null` | — | Transform or filter each message after default processing. Return `null` to skip. |
|
|
217
|
-
| `onUnsupportedAttachment` | `(attachment, message) => void` | `console.warn` | Called when an attachment type is not supported |
|
|
218
|
-
|
|
219
|
-
### Customizing messages with `transformMessage`
|
|
220
|
-
|
|
221
|
-
Use `transformMessage` to modify, enrich, or filter messages after default processing:
|
|
222
|
-
|
|
223
|
-
```typescript title="lib/bot.ts" lineNumbers
|
|
224
|
-
import { toAiMessages } from "chat";
|
|
225
|
-
|
|
226
|
-
const history = await toAiMessages(result.messages, {
|
|
227
|
-
transformMessage: (aiMessage, source) => {
|
|
228
|
-
// Replace bot user IDs with readable names
|
|
229
|
-
if (typeof aiMessage.content === "string") {
|
|
230
|
-
return {
|
|
231
|
-
...aiMessage,
|
|
232
|
-
content: aiMessage.content.replace(/<@U123>/g, "@VercelBot"),
|
|
233
|
-
};
|
|
234
|
-
}
|
|
235
|
-
return aiMessage;
|
|
236
|
-
},
|
|
237
|
-
});
|
|
238
|
-
```
|
|
239
|
-
|
|
240
|
-
Return `null` to skip a message entirely:
|
|
241
|
-
|
|
242
|
-
```typescript
|
|
243
|
-
const history = await toAiMessages(result.messages, {
|
|
244
|
-
transformMessage: (aiMessage, source) => {
|
|
245
|
-
// Skip messages from a specific user
|
|
246
|
-
if (source.author.userId === "U_NOISY_BOT") return null;
|
|
247
|
-
return aiMessage;
|
|
248
|
-
},
|
|
249
|
-
});
|
|
250
|
-
```
|
|
203
|
+
See the [`toAiMessages` API reference](/docs/api/to-ai-messages) for all options including `includeNames`, `transformMessage`, and attachment handling.
|