ai-stream-utils 1.6.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +197 -416
- package/dist/convert-ui-message-stream-to-sse-stream-CcQRXKju.mjs +165 -0
- package/dist/index.d.mts +538 -110
- package/dist/index.mjs +414 -261
- package/dist/types-B4nePmEd.d.mts +53 -0
- package/dist/utils/index.d.mts +2 -46
- package/dist/utils/index.mjs +2 -81
- package/package.json +49 -31
- package/dist/create-async-iterable-stream-x_DKVIDi.mjs +0 -59
package/README.md
CHANGED
|
@@ -9,348 +9,224 @@
|
|
|
9
9
|
|
|
10
10
|
</div>
|
|
11
11
|
|
|
12
|
-
This library provides composable
|
|
12
|
+
This library provides composable filter and transformation utilities for UI message streams created by [`streamText()`](https://ai-sdk.dev/docs/reference/ai-sdk-core/stream-text) in the AI SDK.
|
|
13
13
|
|
|
14
14
|
### Why?
|
|
15
15
|
|
|
16
16
|
The AI SDK UI message stream created by [`toUIMessageStream()`](https://ai-sdk.dev/docs/reference/ai-sdk-core/stream-text#to-ui-message-stream) streams all parts (text, tools, reasoning, etc.) to the client by default. However, you may want to:
|
|
17
17
|
|
|
18
|
-
- **Filter**: Tool calls like database queries often contain large amounts of data or sensitive information that should not be
|
|
18
|
+
- **Filter**: Tool calls like database queries often contain large amounts of data or sensitive information that should not be streamed to the client
|
|
19
19
|
- **Transform**: Modify text or tool outputs while they are streamed to the client
|
|
20
|
+
- **Observe**: Log stream lifecycle events, tool calls, or other chunks without consuming or modifying the stream
|
|
20
21
|
|
|
21
22
|
This library provides type-safe, composable utilities for all these use cases.
|
|
22
23
|
|
|
23
24
|
### Installation
|
|
24
25
|
|
|
25
|
-
This library
|
|
26
|
+
This library supports AI SDK v5 and v6.
|
|
26
27
|
|
|
27
28
|
```bash
|
|
28
29
|
npm install ai-stream-utils
|
|
29
30
|
```
|
|
30
31
|
|
|
31
|
-
## Overview
|
|
32
|
-
|
|
33
|
-
| Function | Input | Returns | Use Case |
|
|
34
|
-
|----------|-------------|---------|----------|
|
|
35
|
-
| [`mapUIMessageStream`](#mapuimessagestream) | [UIMessageChunk](https://github.com/vercel/ai/blob/main/packages/ai/src/ui-message-stream/ui-message-chunks.ts) | `chunk \| chunk[] \| null` | Transform or filter chunks in real-time (e.g., smooth streaming) |
|
|
36
|
-
| [`flatMapUIMessageStream`](#flatmapuimessagestream) | [UIMessagePart](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#uimessagepart-types) | `part \| part[] \| null` | Buffer until complete, then transform (e.g., redact tool output) |
|
|
37
|
-
| [`filterUIMessageStream`](#filteruimessagestream) | [UIMessageChunk](https://github.com/vercel/ai/blob/main/packages/ai/src/ui-message-stream/ui-message-chunks.ts) | `boolean` | Include/exclude parts by type (e.g., hide reasoning) |
|
|
38
|
-
| [`consumeUIMessageStream`](#consumeuimessagestream) | [UIMessageChunk](https://github.com/vercel/ai/blob/main/packages/ai/src/ui-message-stream/ui-message-chunks.ts) | `Promise<UIMessage>` | Consume entire stream and return final message (e.g., server-side processing) |
|
|
39
|
-
|
|
40
32
|
## Usage
|
|
41
33
|
|
|
42
|
-
|
|
34
|
+
The `pipe` function provides a composable pipeline API for filtering, transforming, and observing UI message streams. Multiple operators can be chained together, and type guards automatically narrow chunk and part types, thus enabling type-safe stream transformations with autocomplete.
|
|
43
35
|
|
|
44
|
-
|
|
36
|
+
### `.filter()`
|
|
37
|
+
|
|
38
|
+
Filter chunks by returning `true` to keep or `false` to exclude.
|
|
45
39
|
|
|
46
40
|
```typescript
|
|
47
|
-
|
|
41
|
+
const stream = pipe(result.toUIMessageStream())
|
|
42
|
+
.filter(({ chunk, part }) => {
|
|
43
|
+
// chunk.type: "text-delta" | "text-start" | "tool-input-available" | ...
|
|
44
|
+
// part.type: "text" | "reasoning" | "tool-weather" | ...
|
|
48
45
|
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
({ chunk, part }) => {
|
|
52
|
-
// Transform: modify the chunk
|
|
53
|
-
if (chunk.type === 'text-delta') {
|
|
54
|
-
return { ...chunk, delta: chunk.delta.toUpperCase() };
|
|
55
|
-
}
|
|
56
|
-
// Filter: return null to exclude chunks
|
|
57
|
-
if (part.type === 'tool-weather') {
|
|
58
|
-
return null;
|
|
46
|
+
if (chunk.type === "data-weather") {
|
|
47
|
+
return false; // exclude chunk
|
|
59
48
|
}
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
)
|
|
49
|
+
|
|
50
|
+
return true; // keep chunk
|
|
51
|
+
})
|
|
52
|
+
.toStream();
|
|
63
53
|
```
|
|
64
54
|
|
|
65
|
-
|
|
55
|
+
**Type guards** provide a simpler API for common filtering patterns:
|
|
66
56
|
|
|
67
|
-
|
|
57
|
+
- `includeChunks("text-delta")` or `includeChunks(["text-delta", "text-end"])`: Include specific chunk types
|
|
58
|
+
- `excludeChunks("text-delta")` or `excludeChunks(["text-delta", "text-end"])`: Exclude specific chunk types
|
|
59
|
+
- `includeParts("text")` or `includeParts(["text", "reasoning"])`: Include specific part types
|
|
60
|
+
- `excludeParts("reasoning")` or `excludeParts(["reasoning", "tool-database"])`: Exclude specific part types
|
|
68
61
|
|
|
69
|
-
|
|
70
|
-
import { flatMapUIMessageStream, partTypeIs } from 'ai-stream-utils';
|
|
62
|
+
**Example:** Exclude tool calls from the client.
|
|
71
63
|
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
({ part }) => {
|
|
77
|
-
// Transform: modify the complete part
|
|
78
|
-
if (part.state === 'output-available') {
|
|
79
|
-
return { ...part, output: { ...part.output, temperature: toFahrenheit(part.output.temperature) } };
|
|
80
|
-
}
|
|
81
|
-
// Filter: return null to exclude parts
|
|
82
|
-
return part;
|
|
83
|
-
}
|
|
84
|
-
);
|
|
64
|
+
```typescript
|
|
65
|
+
const stream = pipe(result.toUIMessageStream())
|
|
66
|
+
.filter(excludeParts(["tool-weather", "tool-database"]))
|
|
67
|
+
.toStream();
|
|
85
68
|
```
|
|
86
69
|
|
|
87
|
-
### `
|
|
70
|
+
### `.map()`
|
|
88
71
|
|
|
89
|
-
|
|
72
|
+
Transform chunks by returning a chunk, an array of chunks, or `null` to exclude.
|
|
90
73
|
|
|
91
74
|
```typescript
|
|
92
|
-
|
|
75
|
+
const stream = pipe(result.toUIMessageStream())
|
|
76
|
+
.map(({ chunk, part }) => {
|
|
77
|
+
// chunk.type: "text-delta" | "text-start" | "tool-input-available" | ...
|
|
78
|
+
// part.type: "text" | "reasoning" | "tool-weather" | ...
|
|
93
79
|
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
includeParts(['text', 'tool-weather'])
|
|
98
|
-
);
|
|
80
|
+
if (chunk.type === "text-start") {
|
|
81
|
+
return chunk; // pass through unchanged
|
|
82
|
+
}
|
|
99
83
|
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
excludeParts(['reasoning', 'tool-database'])
|
|
104
|
-
);
|
|
84
|
+
if (chunk.type === "text-delta") {
|
|
85
|
+
return { ...chunk, delta: "modified" }; // transform chunk
|
|
86
|
+
}
|
|
105
87
|
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
({ part, chunk }) => {
|
|
110
|
-
if (part.type === 'text') return true;
|
|
111
|
-
if (chunk.type === 'tool-input-available') return true;
|
|
112
|
-
return false;
|
|
113
|
-
}
|
|
114
|
-
);
|
|
115
|
-
```
|
|
88
|
+
if (chunk.type === "data-weather") {
|
|
89
|
+
return [chunk1, chunk2]; // emit multiple chunks
|
|
90
|
+
}
|
|
116
91
|
|
|
117
|
-
|
|
92
|
+
return null; // exclude chunk (same as filter)
|
|
93
|
+
})
|
|
94
|
+
.toStream();
|
|
95
|
+
```
|
|
118
96
|
|
|
119
|
-
|
|
97
|
+
**Example:** Convert text to uppercase.
|
|
120
98
|
|
|
121
99
|
```typescript
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
});
|
|
128
|
-
|
|
129
|
-
/* Consume the entire stream and get the final message */
|
|
130
|
-
const message = await consumeUIMessageStream(
|
|
131
|
-
result.toUIMessageStream<MyUIMessage>()
|
|
132
|
-
);
|
|
100
|
+
const stream = pipe(result.toUIMessageStream())
|
|
101
|
+
.map(({ chunk }) => {
|
|
102
|
+
if (chunk.type === "text-delta") {
|
|
103
|
+
return { ...chunk, delta: chunk.delta.toUpperCase() };
|
|
104
|
+
}
|
|
133
105
|
|
|
134
|
-
|
|
106
|
+
return chunk;
|
|
107
|
+
})
|
|
108
|
+
.toStream();
|
|
135
109
|
```
|
|
136
110
|
|
|
137
|
-
|
|
111
|
+
### `.on()`
|
|
138
112
|
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
Buffers multiple text chunks into a string, splits at word boundaries and re-emits each word as a separate chunk for smoother UI rendering. See [examples/smooth-streaming.ts](./examples/smooth-streaming.ts) for the full implementation.
|
|
113
|
+
Observe chunks without modifying the stream. The callback is invoked for matching chunks.
|
|
142
114
|
|
|
143
115
|
```typescript
|
|
144
|
-
|
|
116
|
+
const stream = pipe(result.toUIMessageStream())
|
|
117
|
+
.on(
|
|
118
|
+
({ chunk, part }) => {
|
|
119
|
+
// return true to invoke callback, false to skip
|
|
120
|
+
return chunk.type === "text-delta";
|
|
121
|
+
},
|
|
122
|
+
({ chunk, part }) => {
|
|
123
|
+
// callback invoked for matching chunks
|
|
124
|
+
console.log(chunk, part);
|
|
125
|
+
},
|
|
126
|
+
)
|
|
127
|
+
.toStream();
|
|
128
|
+
```
|
|
145
129
|
|
|
146
|
-
|
|
147
|
-
let buffer = '';
|
|
130
|
+
**Type guard** provides a type-safe way to observe specific chunk types:
|
|
148
131
|
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
({ chunk }) => {
|
|
152
|
-
if (chunk.type !== 'text-delta') {
|
|
153
|
-
// Flush buffer on non-text chunks
|
|
154
|
-
if (buffer.length > 0) {
|
|
155
|
-
const flushed = { type: 'text-delta' as const, id: chunk.id, delta: buffer };
|
|
156
|
-
buffer = '';
|
|
157
|
-
return [flushed, chunk];
|
|
158
|
-
}
|
|
159
|
-
return chunk;
|
|
160
|
-
}
|
|
132
|
+
- `chunkType("text-delta")` or `chunkType(["start", "finish"])`: Observe specific chunk types
|
|
133
|
+
- `partType("text")` or `partType(["text", "reasoning"])`: Observe chunks belonging to specific part types
|
|
161
134
|
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
let match;
|
|
167
|
-
while ((match = WORD_REGEX.exec(buffer)) !== null) {
|
|
168
|
-
chunks.push({ type: 'text-delta', id: chunk.id, delta: buffer.slice(0, match.index + match[0].length) });
|
|
169
|
-
buffer = buffer.slice(match.index + match[0].length);
|
|
170
|
-
}
|
|
171
|
-
// Emit the word-by-word chunks
|
|
172
|
-
return chunks;
|
|
173
|
-
}
|
|
174
|
-
);
|
|
135
|
+
> [!NOTE]
|
|
136
|
+
> The `partType` type guard still operates on chunks. That means `partType("text")` will match any text chunks such as `text-start`, `text-delta`, and `text-end`.
|
|
137
|
+
|
|
138
|
+
**Example:** Log stream lifecycle events.
|
|
175
139
|
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
140
|
+
```typescript
|
|
141
|
+
const stream = pipe(result.toUIMessageStream())
|
|
142
|
+
.on(chunkType("start"), () => {
|
|
143
|
+
console.log("Stream started");
|
|
144
|
+
})
|
|
145
|
+
.on(chunkType("finish"), ({ chunk }) => {
|
|
146
|
+
console.log("Stream finished:", chunk.finishReason);
|
|
147
|
+
})
|
|
148
|
+
.on(chunkType("tool-input-available"), ({ chunk }) => {
|
|
149
|
+
console.log("Tool called:", chunk.toolName, chunk.input);
|
|
150
|
+
})
|
|
151
|
+
.toStream();
|
|
180
152
|
```
|
|
181
153
|
|
|
182
|
-
###
|
|
154
|
+
### `.toStream()`
|
|
183
155
|
|
|
184
|
-
|
|
156
|
+
Convert the pipeline back to a `AsyncIterableStream<InferUIMessageChunk<UI_MESSAGE>>` that can be returned to the client or consumed.
|
|
185
157
|
|
|
186
158
|
```typescript
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
description: 'Look up order details by order ID',
|
|
192
|
-
inputSchema: z.object({
|
|
193
|
-
orderId: z.string().describe('The order ID to look up'),
|
|
194
|
-
}),
|
|
195
|
-
execute: ({ orderId }) => ({
|
|
196
|
-
orderId,
|
|
197
|
-
status: 'shipped',
|
|
198
|
-
items: ['iPhone 15'],
|
|
199
|
-
total: 1299.99,
|
|
200
|
-
email: 'customer@example.com', // Sensitive
|
|
201
|
-
address: '123 Main St, SF, CA 94102', // Sensitive
|
|
202
|
-
}),
|
|
203
|
-
}),
|
|
204
|
-
};
|
|
159
|
+
const stream = pipe(result.toUIMessageStream())
|
|
160
|
+
.filter(({ chunk }) => {})
|
|
161
|
+
.map(({ chunk }) => {})
|
|
162
|
+
.toStream();
|
|
205
163
|
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
});
|
|
164
|
+
// Iterate with for-await-of
|
|
165
|
+
for await (const chunk of stream) {
|
|
166
|
+
console.log(chunk);
|
|
167
|
+
}
|
|
211
168
|
|
|
212
|
-
//
|
|
213
|
-
const
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
({ part }) => {
|
|
217
|
-
if (part.state === 'output-available') {
|
|
218
|
-
return {
|
|
219
|
-
...part,
|
|
220
|
-
output: {
|
|
221
|
-
...part.output,
|
|
222
|
-
email: '[REDACTED]',
|
|
223
|
-
address: '[REDACTED]',
|
|
224
|
-
},
|
|
225
|
-
};
|
|
226
|
-
}
|
|
227
|
-
return part;
|
|
228
|
-
},
|
|
229
|
-
);
|
|
169
|
+
// Consume as ReadableStream
|
|
170
|
+
for await (const message of readUIMessageStream({ stream })) {
|
|
171
|
+
console.log(message);
|
|
172
|
+
}
|
|
230
173
|
|
|
231
|
-
//
|
|
232
|
-
|
|
233
|
-
// { type: 'tool-output-available', output: { orderId: '12345', email: '[REDACTED]', address: '[REDACTED]' } }
|
|
174
|
+
// Return to client with useChat()
|
|
175
|
+
return stream;
|
|
234
176
|
```
|
|
235
177
|
|
|
236
|
-
###
|
|
178
|
+
### Chaining and Type Narrowing
|
|
237
179
|
|
|
238
|
-
|
|
180
|
+
Multiple operators can be chained together. After filtering with type guards, chunk and part types are narrowed automatically.
|
|
239
181
|
|
|
240
182
|
```typescript
|
|
241
|
-
|
|
183
|
+
const stream = pipe<MyUIMessage>(result.toUIMessageStream())
|
|
184
|
+
.filter(includeParts("text"))
|
|
185
|
+
.map(({ chunk, part }) => {
|
|
186
|
+
// chunk is narrowed to text chunks only
|
|
187
|
+
// part.type is narrowed to "text"
|
|
188
|
+
return chunk;
|
|
189
|
+
})
|
|
190
|
+
.toStream();
|
|
191
|
+
```
|
|
242
192
|
|
|
243
|
-
|
|
244
|
-
askForPermission: tool({
|
|
245
|
-
description: 'Ask for permission to access current location',
|
|
246
|
-
inputSchema: z.object({
|
|
247
|
-
message: z.string().describe('The message to ask for permission'),
|
|
248
|
-
}),
|
|
249
|
-
}),
|
|
250
|
-
};
|
|
193
|
+
### Control Chunks
|
|
251
194
|
|
|
252
|
-
|
|
253
|
-
model: openai('gpt-4o'),
|
|
254
|
-
prompt: 'Is it sunny today?',
|
|
255
|
-
tools,
|
|
256
|
-
});
|
|
195
|
+
[Control chunks](https://github.com/vercel/ai/blob/main/packages/ai/src/ui-message-stream/ui-message-chunks.ts#L278-L293) always pass through regardless of filter/transform settings:
|
|
257
196
|
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
if (current.part.state === 'input-available') {
|
|
264
|
-
// Check if a text part was already streamed
|
|
265
|
-
const hasTextPart = context.parts.some((p) => p.type === 'text');
|
|
266
|
-
|
|
267
|
-
if (!hasTextPart) {
|
|
268
|
-
// Inject a text part from the tool call message
|
|
269
|
-
return [
|
|
270
|
-
{ type: 'text', text: current.part.input.message },
|
|
271
|
-
current.part,
|
|
272
|
-
];
|
|
273
|
-
}
|
|
274
|
-
}
|
|
275
|
-
return current.part;
|
|
276
|
-
},
|
|
277
|
-
);
|
|
197
|
+
- `start`: Stream start marker
|
|
198
|
+
- `finish`: Stream finish marker
|
|
199
|
+
- `abort`: Stream abort marker
|
|
200
|
+
- `message-metadata`: Message metadata updates
|
|
201
|
+
- `error`: Error messages
|
|
278
202
|
|
|
279
|
-
|
|
280
|
-
// { type: 'text', text: 'May I access your location?' }
|
|
281
|
-
// { type: 'tool-askForPermission', input: { message: 'May I access your location?' } }
|
|
282
|
-
```
|
|
203
|
+
## Stream Utilities
|
|
283
204
|
|
|
284
|
-
|
|
205
|
+
Helper functions for consuming streams and converting between streams, arrays, and async iterables.
|
|
285
206
|
|
|
286
|
-
|
|
207
|
+
### `consumeUIMessageStream`
|
|
208
|
+
|
|
209
|
+
Consumes a UI message stream by fully reading it and returns the final assembled message. Useful for server-side processing without streaming to the client.
|
|
287
210
|
|
|
288
211
|
```typescript
|
|
289
|
-
import {
|
|
290
|
-
|
|
291
|
-
const toFahrenheit = (celsius: number) => (celsius * 9) / 5 + 32;
|
|
292
|
-
|
|
293
|
-
const tools = {
|
|
294
|
-
weather: tool({
|
|
295
|
-
description: 'Get the weather in a location',
|
|
296
|
-
inputSchema: z.object({ location: z.string() }),
|
|
297
|
-
execute: ({ location }) => ({
|
|
298
|
-
location,
|
|
299
|
-
temperature: 22, // Celsius from API
|
|
300
|
-
unit: 'C',
|
|
301
|
-
}),
|
|
302
|
-
}),
|
|
303
|
-
};
|
|
212
|
+
import { consumeUIMessageStream } from "ai-stream-utils";
|
|
304
213
|
|
|
305
214
|
const result = streamText({
|
|
306
|
-
model: openai(
|
|
307
|
-
prompt:
|
|
308
|
-
tools,
|
|
215
|
+
model: openai("gpt-4o"),
|
|
216
|
+
prompt: "Tell me a joke",
|
|
309
217
|
});
|
|
310
218
|
|
|
311
|
-
|
|
312
|
-
const stream = flatMapUIMessageStream(
|
|
313
|
-
result.toUIMessageStream<MyUIMessage>(),
|
|
314
|
-
partTypeIs('tool-weather'),
|
|
315
|
-
({ part }) => {
|
|
316
|
-
if (part.state === 'output-available') {
|
|
317
|
-
return {
|
|
318
|
-
...part,
|
|
319
|
-
output: {
|
|
320
|
-
...part.output,
|
|
321
|
-
temperature: toFahrenheit(part.output.temperature),
|
|
322
|
-
unit: 'F',
|
|
323
|
-
},
|
|
324
|
-
};
|
|
325
|
-
}
|
|
326
|
-
return part;
|
|
327
|
-
},
|
|
328
|
-
);
|
|
219
|
+
const message = await consumeUIMessageStream(result.toUIMessageStream<MyUIMessage>());
|
|
329
220
|
|
|
330
|
-
//
|
|
331
|
-
// { type: 'tool-output-available', output: { location: 'Tokyo', temperature: 71.6, unit: 'F' } }
|
|
221
|
+
console.log(message.parts); // All parts fully assembled
|
|
332
222
|
```
|
|
333
223
|
|
|
334
|
-
## Stream Utilities
|
|
335
|
-
|
|
336
|
-
Helper functions for converting between streams, arrays, and async iterables.
|
|
337
|
-
|
|
338
|
-
| Function | Converts | To |
|
|
339
|
-
|----------|----------|-----|
|
|
340
|
-
| `createAsyncIterableStream` | `ReadableStream<T>` | `AsyncIterableStream<T>` |
|
|
341
|
-
| `convertArrayToStream` | `Array<T>` | `ReadableStream<T>` |
|
|
342
|
-
| `convertAsyncIterableToStream` | `AsyncIterable<T>` | `ReadableStream<T>` |
|
|
343
|
-
| `convertAsyncIterableToArray` | `AsyncIterable<T>` | `Promise<Array<T>>` |
|
|
344
|
-
| `convertStreamToArray` | `ReadableStream<T>` | `Promise<Array<T>>` |
|
|
345
|
-
| `convertUIMessageToSSEStream` | `ReadableStream<UIMessageChunk>` | `ReadableStream<string>` |
|
|
346
|
-
| `convertSSEToUIMessageStream` | `ReadableStream<string>` | `ReadableStream<UIMessageChunk>` |
|
|
347
|
-
|
|
348
224
|
### `createAsyncIterableStream`
|
|
349
225
|
|
|
350
226
|
Adds async iterator protocol to a `ReadableStream`, enabling `for await...of` loops.
|
|
351
227
|
|
|
352
228
|
```typescript
|
|
353
|
-
import { createAsyncIterableStream } from
|
|
229
|
+
import { createAsyncIterableStream } from "ai-stream-utils";
|
|
354
230
|
|
|
355
231
|
const asyncStream = createAsyncIterableStream(readableStream);
|
|
356
232
|
for await (const chunk of asyncStream) {
|
|
@@ -363,7 +239,7 @@ for await (const chunk of asyncStream) {
|
|
|
363
239
|
Converts an array to a `ReadableStream` that emits each element.
|
|
364
240
|
|
|
365
241
|
```typescript
|
|
366
|
-
import { convertArrayToStream } from
|
|
242
|
+
import { convertArrayToStream } from "ai-stream-utils";
|
|
367
243
|
|
|
368
244
|
const stream = convertArrayToStream([1, 2, 3]);
|
|
369
245
|
```
|
|
@@ -373,7 +249,7 @@ const stream = convertArrayToStream([1, 2, 3]);
|
|
|
373
249
|
Converts an async iterable (e.g., async generator) to a `ReadableStream`.
|
|
374
250
|
|
|
375
251
|
```typescript
|
|
376
|
-
import { convertAsyncIterableToStream } from
|
|
252
|
+
import { convertAsyncIterableToStream } from "ai-stream-utils";
|
|
377
253
|
|
|
378
254
|
async function* generator() {
|
|
379
255
|
yield 1;
|
|
@@ -387,7 +263,7 @@ const stream = convertAsyncIterableToStream(generator());
|
|
|
387
263
|
Collects all values from an async iterable into an array.
|
|
388
264
|
|
|
389
265
|
```typescript
|
|
390
|
-
import { convertAsyncIterableToArray } from
|
|
266
|
+
import { convertAsyncIterableToArray } from "ai-stream-utils";
|
|
391
267
|
|
|
392
268
|
const array = await convertAsyncIterableToArray(asyncIterable);
|
|
393
269
|
```
|
|
@@ -397,7 +273,7 @@ const array = await convertAsyncIterableToArray(asyncIterable);
|
|
|
397
273
|
Consumes a `ReadableStream` and collects all chunks into an array.
|
|
398
274
|
|
|
399
275
|
```typescript
|
|
400
|
-
import { convertStreamToArray } from
|
|
276
|
+
import { convertStreamToArray } from "ai-stream-utils";
|
|
401
277
|
|
|
402
278
|
const array = await convertStreamToArray(readableStream);
|
|
403
279
|
```
|
|
@@ -407,7 +283,7 @@ const array = await convertStreamToArray(readableStream);
|
|
|
407
283
|
Converts a UI message stream to an SSE (Server-Sent Events) stream. Useful for sending UI message chunks over HTTP as SSE-formatted text.
|
|
408
284
|
|
|
409
285
|
```typescript
|
|
410
|
-
import { convertUIMessageToSSEStream } from
|
|
286
|
+
import { convertUIMessageToSSEStream } from "ai-stream-utils";
|
|
411
287
|
|
|
412
288
|
const uiStream = result.toUIMessageStream();
|
|
413
289
|
const sseStream = convertUIMessageToSSEStream(uiStream);
|
|
@@ -420,189 +296,94 @@ const sseStream = convertUIMessageToSSEStream(uiStream);
|
|
|
420
296
|
Converts an SSE stream back to a UI message stream. Useful for parsing SSE-formatted responses on the client.
|
|
421
297
|
|
|
422
298
|
```typescript
|
|
423
|
-
import { convertSSEToUIMessageStream } from
|
|
299
|
+
import { convertSSEToUIMessageStream } from "ai-stream-utils";
|
|
424
300
|
|
|
425
|
-
const response = await fetch(
|
|
301
|
+
const response = await fetch("/api/chat");
|
|
426
302
|
const sseStream = response.body.pipeThrough(new TextDecoderStream());
|
|
427
303
|
const uiStream = convertSSEToUIMessageStream(sseStream);
|
|
428
304
|
```
|
|
429
305
|
|
|
430
|
-
##
|
|
306
|
+
## Deprecated Functions
|
|
431
307
|
|
|
432
|
-
|
|
308
|
+
> [!WARNING]
|
|
309
|
+
> These functions are deprecated and will be removed in a future version. Use `pipe()` instead.
|
|
433
310
|
|
|
434
|
-
|
|
311
|
+
### `mapUIMessageStream`
|
|
435
312
|
|
|
436
313
|
```typescript
|
|
437
|
-
import
|
|
314
|
+
import { mapUIMessageStream } from "ai-stream-utils";
|
|
438
315
|
|
|
439
|
-
|
|
440
|
-
type
|
|
441
|
-
|
|
316
|
+
const stream = mapUIMessageStream(result.toUIMessageStream(), ({ chunk }) => {
|
|
317
|
+
if (chunk.type === "text-delta") {
|
|
318
|
+
return { ...chunk, delta: chunk.delta.toUpperCase() };
|
|
319
|
+
}
|
|
320
|
+
return chunk;
|
|
321
|
+
});
|
|
322
|
+
```
|
|
442
323
|
|
|
443
|
-
|
|
444
|
-
MyUIMessageMetadata,
|
|
445
|
-
MyDataPart,
|
|
446
|
-
MyTools
|
|
447
|
-
>;
|
|
324
|
+
### `filterUIMessageStream`
|
|
448
325
|
|
|
449
|
-
|
|
450
|
-
|
|
326
|
+
```typescript
|
|
327
|
+
import { filterUIMessageStream, includeParts } from "ai-stream-utils";
|
|
451
328
|
|
|
452
|
-
// Type-safe filtering with autocomplete
|
|
453
329
|
const stream = filterUIMessageStream(
|
|
454
|
-
|
|
455
|
-
includeParts([
|
|
456
|
-
);
|
|
457
|
-
|
|
458
|
-
// Type-safe chunk mapping
|
|
459
|
-
const stream = mapUIMessageStream(
|
|
460
|
-
uiStream,
|
|
461
|
-
({ chunk, part }) => {
|
|
462
|
-
// part.type is typed based on MyUIMessage
|
|
463
|
-
return chunk;
|
|
464
|
-
}
|
|
330
|
+
result.toUIMessageStream(),
|
|
331
|
+
includeParts(["text", "tool-weather"]),
|
|
465
332
|
);
|
|
466
333
|
```
|
|
467
334
|
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
The transformed stream has the same type as the original UI message stream. You can consume it with [`useChat()`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/use-chat) or [`readUIMessageStream()`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/read-ui-message-stream).
|
|
471
|
-
|
|
472
|
-
Since message parts may be different on the client vs. the server, you may need to reconcile message parts when the client sends messages back to the server.
|
|
473
|
-
|
|
474
|
-
If you save messages to a database and configure `useChat()` to [only send the last message](https://ai-sdk.dev/docs/ai-sdk-ui/chatbot-message-persistence#sending-only-the-last-message), you can read existing messages from the database. This means the model will have access to all message parts, including filtered parts not available on the client.
|
|
475
|
-
|
|
476
|
-
## Part Type Mapping
|
|
477
|
-
|
|
478
|
-
The transformations operate on [UIMessagePart](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#uimessagepart-types) types, which are derived from [UIMessageChunk](https://github.com/vercel/ai/blob/main/packages/ai/src/ui-message-stream/ui-message-chunks.ts) types:
|
|
479
|
-
|
|
480
|
-
| Part Type | Chunk Types |
|
|
481
|
-
| ----------------- | ------------------------------------- |
|
|
482
|
-
| [`text`](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#textuipart) | `text-start`, `text-delta`, `text-end` |
|
|
483
|
-
| [`reasoning`](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#reasoninguipart) | `reasoning-start`, `reasoning-delta`, `reasoning-end` |
|
|
484
|
-
| [`tool-{name}`](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#tooluipart) | `tool-input-start`, `tool-input-delta`, `tool-input-available`, `tool-input-error`, `tool-output-available`, `tool-output-error` |
|
|
485
|
-
| [`data-{name}`](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#datauipart) | `data-{name}` |
|
|
486
|
-
| [`step-start`](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#stepstartuipart) | `start-step` |
|
|
487
|
-
| [`file`](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#fileuipart) | `file` |
|
|
488
|
-
| [`source-url`](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#sourceurluipart) | `source-url` |
|
|
489
|
-
| [`source-document`](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#sourcedocumentuipart) | `source-document` |
|
|
490
|
-
|
|
491
|
-
### Control Chunks
|
|
492
|
-
|
|
493
|
-
[Control chunks](https://github.com/vercel/ai/blob/main/packages/ai/src/ui-message-stream/ui-message-chunks.ts#L278-L293) always pass through regardless of filter/transform settings:
|
|
494
|
-
|
|
495
|
-
- `start`: Stream start marker
|
|
496
|
-
- `finish`: Stream finish marker
|
|
497
|
-
- `abort`: Stream abort marker
|
|
498
|
-
- `message-metadata`: Message metadata updates
|
|
499
|
-
- `error`: Error messages
|
|
500
|
-
|
|
501
|
-
### Step Boundary Handling
|
|
502
|
-
|
|
503
|
-
Step boundaries are handled automatically:
|
|
504
|
-
|
|
505
|
-
1. `start-step` is buffered until the first content chunk is encountered
|
|
506
|
-
2. If the first content chunk passes through, `start-step` is included
|
|
507
|
-
3. If the first content chunk is filtered out, `start-step` is also filtered out
|
|
508
|
-
4. `finish-step` is only included if the corresponding `start-step` was included
|
|
509
|
-
|
|
510
|
-
## API Reference
|
|
511
|
-
|
|
512
|
-
### `mapUIMessageStream`
|
|
335
|
+
### `flatMapUIMessageStream`
|
|
513
336
|
|
|
514
337
|
```typescript
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
)
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
338
|
+
import { flatMapUIMessageStream, partTypeIs } from "ai-stream-utils";
|
|
339
|
+
|
|
340
|
+
const stream = flatMapUIMessageStream(
|
|
341
|
+
result.toUIMessageStream(),
|
|
342
|
+
partTypeIs("tool-weather"),
|
|
343
|
+
({ part }) => {
|
|
344
|
+
if (part.state === "output-available") {
|
|
345
|
+
return {
|
|
346
|
+
...part,
|
|
347
|
+
output: { ...part.output, temperature: toFahrenheit(part.output.temperature) },
|
|
348
|
+
};
|
|
349
|
+
}
|
|
350
|
+
return part;
|
|
351
|
+
},
|
|
352
|
+
);
|
|
528
353
|
```
|
|
529
354
|
|
|
530
|
-
|
|
355
|
+
## Type Safety
|
|
531
356
|
|
|
532
|
-
|
|
533
|
-
// Without predicate - buffer all parts
|
|
534
|
-
function flatMapUIMessageStream<UI_MESSAGE extends UIMessage>(
|
|
535
|
-
stream: ReadableStream<UIMessageChunk>,
|
|
536
|
-
flatMapFn: FlatMapUIMessageStreamFn<UI_MESSAGE>,
|
|
537
|
-
): AsyncIterableStream<InferUIMessageChunk<UI_MESSAGE>>
|
|
538
|
-
|
|
539
|
-
// With predicate - buffer only matching parts, pass through others
|
|
540
|
-
function flatMapUIMessageStream<UI_MESSAGE extends UIMessage, PART extends InferUIMessagePart<UI_MESSAGE>>(
|
|
541
|
-
stream: ReadableStream<UIMessageChunk>,
|
|
542
|
-
predicate: FlatMapUIMessageStreamPredicate<UI_MESSAGE, PART>,
|
|
543
|
-
flatMapFn: FlatMapUIMessageStreamFn<UI_MESSAGE, PART>,
|
|
544
|
-
): AsyncIterableStream<InferUIMessageChunk<UI_MESSAGE>>
|
|
545
|
-
|
|
546
|
-
type FlatMapUIMessageStreamFn<UI_MESSAGE extends UIMessage, PART = InferUIMessagePart<UI_MESSAGE>> = (
|
|
547
|
-
input: FlatMapInput<UI_MESSAGE, PART>,
|
|
548
|
-
context: FlatMapContext<UI_MESSAGE>,
|
|
549
|
-
) => InferUIMessagePart<UI_MESSAGE> | InferUIMessagePart<UI_MESSAGE>[] | null;
|
|
550
|
-
|
|
551
|
-
type FlatMapInput<UI_MESSAGE extends UIMessage, PART = InferUIMessagePart<UI_MESSAGE>> = {
|
|
552
|
-
part: PART;
|
|
553
|
-
};
|
|
554
|
-
|
|
555
|
-
type FlatMapContext<UI_MESSAGE extends UIMessage> = {
|
|
556
|
-
index: number;
|
|
557
|
-
parts: InferUIMessagePart<UI_MESSAGE>[];
|
|
558
|
-
};
|
|
559
|
-
```
|
|
357
|
+
The [`toUIMessageStream()`](https://ai-sdk.dev/docs/reference/ai-sdk-core/stream-text#to-ui-message-stream) from [`streamText()`](https://ai-sdk.dev/docs/reference/ai-sdk-core/stream-text) returns a generic `ReadableStream<UIMessageChunk>`, which means the part types cannot be inferred automatically.
|
|
560
358
|
|
|
561
|
-
|
|
359
|
+
To enable autocomplete and type-safety, pass your [`UIMessage`](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#creating-your-own-uimessage-type) type as a generic parameter:
|
|
562
360
|
|
|
563
361
|
```typescript
|
|
564
|
-
|
|
565
|
-
type: T | T[],
|
|
566
|
-
): FlatMapUIMessageStreamPredicate<UI_MESSAGE, Extract<InferUIMessagePart<UI_MESSAGE>, { type: T }>>
|
|
362
|
+
import type { UIMessage, InferUITools } from "ai";
|
|
567
363
|
|
|
568
|
-
type
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
### `filterUIMessageStream`
|
|
364
|
+
type MyUIMessageMetadata = {};
|
|
365
|
+
type MyDataPart = {};
|
|
366
|
+
type MyTools = InferUITools<typeof tools>;
|
|
573
367
|
|
|
574
|
-
|
|
575
|
-
function filterUIMessageStream<UI_MESSAGE extends UIMessage>(
|
|
576
|
-
stream: ReadableStream<UIMessageChunk>,
|
|
577
|
-
filterFn: FilterUIMessageStreamPredicate<UI_MESSAGE>,
|
|
578
|
-
): AsyncIterableStream<InferUIMessageChunk<UI_MESSAGE>>
|
|
579
|
-
|
|
580
|
-
type FilterUIMessageStreamPredicate<UI_MESSAGE extends UIMessage> = (
|
|
581
|
-
input: MapInput<UI_MESSAGE>,
|
|
582
|
-
context: MapContext<UI_MESSAGE>,
|
|
583
|
-
) => boolean;
|
|
584
|
-
```
|
|
368
|
+
type MyUIMessage = UIMessage<MyUIMessageMetadata, MyDataPart, MyTools>;
|
|
585
369
|
|
|
586
|
-
|
|
370
|
+
// Use MyUIMessage type when creating the UI message stream
|
|
371
|
+
const uiStream = result.toUIMessageStream<MyUIMessage>();
|
|
587
372
|
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
)
|
|
373
|
+
// Type-safe filtering with autocomplete
|
|
374
|
+
const stream = pipe<MyUIMessage>(uiStream)
|
|
375
|
+
.filter(includeParts(["text", "tool-weather"])) // Autocomplete works!
|
|
376
|
+
.map(({ chunk, part }) => {
|
|
377
|
+
// part.type is typed based on MyUIMessage
|
|
378
|
+
return chunk;
|
|
379
|
+
})
|
|
380
|
+
.toStream();
|
|
592
381
|
```
|
|
593
382
|
|
|
594
|
-
|
|
383
|
+
## Client-Side Usage
|
|
595
384
|
|
|
596
|
-
|
|
597
|
-
function excludeParts<UI_MESSAGE extends UIMessage>(
|
|
598
|
-
partTypes: Array<InferUIMessagePartType<UI_MESSAGE>>,
|
|
599
|
-
): FilterUIMessageStreamPredicate<UI_MESSAGE>
|
|
600
|
-
```
|
|
385
|
+
The transformed stream has the same type as the original UI message stream. You can consume it with [`useChat()`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/use-chat) or [`readUIMessageStream()`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/read-ui-message-stream).
|
|
601
386
|
|
|
602
|
-
|
|
387
|
+
Since message parts may be different on the client vs. the server, you may need to reconcile message parts when the client sends messages back to the server.
|
|
603
388
|
|
|
604
|
-
|
|
605
|
-
async function consumeUIMessageStream<UI_MESSAGE extends UIMessage>(
|
|
606
|
-
stream: ReadableStream<InferUIMessageChunk<UI_MESSAGE>>,
|
|
607
|
-
): Promise<UI_MESSAGE>
|
|
608
|
-
```
|
|
389
|
+
If you save messages to a database and configure `useChat()` to [only send the last message](https://ai-sdk.dev/docs/ai-sdk-ui/chatbot-message-persistence#sending-only-the-last-message), you can read existing messages from the database. This means the model will have access to all message parts, including filtered parts not available on the client.
|