ai-stream-utils 0.0.3 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -30,69 +30,32 @@ npm install ai-stream-utils
30
30
 
31
31
  ## Overview
32
32
 
33
- | Function | Object | Use Case |
34
- |----------|-------------|----------|
35
- | [`mapUIMessageStream`](#mapuimessagestream) | [UIMessageChunk](https://github.com/vercel/ai/blob/main/packages/ai/src/ui-message-stream/ui-message-chunks.ts) | Transform chunks while streaming to the client |
36
- | [`flatMapUIMessageStream`](#flatmapuimessagestream) | [UIMessagePart](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#uimessagepart-types) | Buffer chunks until a part is complete, then transform the part |
37
- | [`filterUIMessageStream`](#filteruimessagestream) | [UIMessageChunk](https://github.com/vercel/ai/blob/main/packages/ai/src/ui-message-stream/ui-message-chunks.ts) | Filter chunks while streaming to the client |
33
+ | Function | Input | Returns | Use Case |
34
+ |----------|-------------|---------|----------|
35
+ | [`mapUIMessageStream`](#mapuimessagestream) | [UIMessageChunk](https://github.com/vercel/ai/blob/main/packages/ai/src/ui-message-stream/ui-message-chunks.ts) | `chunk \| chunk[] \| null` | Transform or filter chunks in real-time (e.g., smooth streaming) |
36
+ | [`flatMapUIMessageStream`](#flatmapuimessagestream) | [UIMessagePart](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#uimessagepart-types) | `part \| part[] \| null` | Buffer until complete, then transform (e.g., redact tool output) |
37
+ | [`filterUIMessageStream`](#filteruimessagestream) | [UIMessageChunk](https://github.com/vercel/ai/blob/main/packages/ai/src/ui-message-stream/ui-message-chunks.ts) | `boolean` | Include/exclude parts by type (e.g., hide reasoning) |
38
38
 
39
39
  ## Usage
40
40
 
41
41
  ### `mapUIMessageStream`
42
42
 
43
- Transform or filter individual chunks as they stream through. The map function receives the chunk and a partial representation of the part it belongs to.
43
+ The `mapUIMessageStream` function operates on chunks and can be used to transform or filter individual chunks as they stream through. It receives the current chunk and the partial part representing all already processed chunks.
44
44
 
45
45
  ```typescript
46
46
  import { mapUIMessageStream } from 'ai-stream-utils';
47
- import { streamText } from 'ai';
48
47
 
49
- const tools = {
50
- weather: tool({
51
- description: 'Get the weather in a location',
52
- inputSchema: z.object({
53
- location: z.string().describe('The location to get the weather for'),
54
- }),
55
- execute: ({ location }) => ({
56
- location,
57
- temperature: 72 + Math.floor(Math.random() * 21) - 10,
58
- unit: "C",
59
- }),
60
- }),
61
- };
62
-
63
- const result = streamText({
64
- model,
65
- prompt: 'What is the weather in Tokyo?',
66
- tools,
67
- });
68
-
69
- // Filter out tool-call chunks by part type
70
- const stream = mapUIMessageStream(
71
- result.toUIMessageStream<MyUIMessage>(),
72
- ({ chunk, part }) => {
73
- if (part.type === "tool-weather") {
74
- return null;
75
- }
76
- return chunk;
77
- }
78
- );
79
-
80
- // Transform text chunks to uppercase
81
48
  const stream = mapUIMessageStream(
82
49
  result.toUIMessageStream<MyUIMessage>(),
83
50
  ({ chunk, part }) => {
51
+ // Transform: modify the chunk
84
52
  if (chunk.type === 'text-delta') {
85
53
  return { ...chunk, delta: chunk.delta.toUpperCase() };
86
54
  }
87
- return chunk;
88
- }
89
- );
90
-
91
- // Access chunk history and index
92
- const stream = mapUIMessageStream(
93
- result.toUIMessageStream<MyUIMessage>(),
94
- ({ chunk }, { index, chunks }) => {
95
- console.log(`Chunk ${index}, total seen: ${chunks.length}`);
55
+ // Filter: return null to exclude chunks
56
+ if (part.type === 'tool-weather') {
57
+ return null;
58
+ }
96
59
  return chunk;
97
60
  }
98
61
  );
@@ -100,49 +63,21 @@ const stream = mapUIMessageStream(
100
63
 
101
64
  ### `flatMapUIMessageStream`
102
65
 
103
- Buffer all chunks for a part until it's complete, then transform the complete part. This is useful when you need access to the full part content before deciding how to transform it.
104
-
105
- When a predicate is provided (e.g., `partTypeIs('text')`), only matching parts are buffered for transformation. Non-matching parts stream through immediately without buffering, preserving real-time streaming behavior.
66
+ The `flatMapUIMessageStream` function operates on parts. It buffers all chunks of a particular type (e.g. text parts) until the part is complete and then transforms or filters the complete part. The optional predicate `partTypeIs()` can be used to selectively buffer only specific parts while streaming others through immediately.
106
67
 
107
68
  ```typescript
108
69
  import { flatMapUIMessageStream, partTypeIs } from 'ai-stream-utils';
109
70
 
110
- // Filter out reasoning parts
111
- const stream = flatMapUIMessageStream(
112
- result.toUIMessageStream<MyUIMessage>(),
113
- ({ part }) => part.type === 'reasoning' ? null : part
114
- );
115
-
116
- // Transform text content
117
71
  const stream = flatMapUIMessageStream(
118
72
  result.toUIMessageStream<MyUIMessage>(),
73
+ // Predicate to only buffer tool-weather parts and pass through other parts
74
+ partTypeIs('tool-weather'),
119
75
  ({ part }) => {
120
- if (part.type === 'text') {
121
- return { ...part, text: part.text.toUpperCase() };
76
+ // Transform: modify the complete part
77
+ if (part.state === 'output-available') {
78
+ return { ...part, output: { ...part.output, temperature: toFahrenheit(part.output.temperature) } };
122
79
  }
123
- return part;
124
- }
125
- );
126
-
127
- // Buffer only specific parts, pass through others immediately
128
- const stream = flatMapUIMessageStream(
129
- result.toUIMessageStream<MyUIMessage>(),
130
- partTypeIs('text'),
131
- ({ part }) => ({ ...part, text: part.text.toUpperCase() })
132
- );
133
-
134
- // Buffer multiple part types
135
- const stream = flatMapUIMessageStream(
136
- result.toUIMessageStream<MyUIMessage>(),
137
- partTypeIs(['text', 'reasoning']),
138
- ({ part }) => part // part is typed as TextUIPart | ReasoningUIPart
139
- );
140
-
141
- // Access part history
142
- const stream = flatMapUIMessageStream(
143
- result.toUIMessageStream<MyUIMessage>(),
144
- ({ part }, { index, parts }) => {
145
- console.log(`Part ${index}, previous parts:`, parts.slice(0, -1));
80
+ // Filter: return null to exclude parts
146
81
  return part;
147
82
  }
148
83
  );
@@ -150,7 +85,7 @@ const stream = flatMapUIMessageStream(
150
85
 
151
86
  ### `filterUIMessageStream`
152
87
 
153
- Filter individual chunks as they stream through. This is a convenience wrapper around `mapUIMessageStream` that provides a simpler API for filtering chunks by part type. Use the `includeParts()` and `excludeParts()` helper functions for common patterns, or provide a custom filter function.
88
+ The `filterUIMessageStream` function is a convenience function around `mapUIMessageStream` with a simpler API to filter chunks by part type. It provides the `includeParts()` and `excludeParts()` predicates for common patterns.
154
89
 
155
90
  ```typescript
156
91
  import { filterUIMessageStream, includeParts, excludeParts } from 'ai-stream-utils';
@@ -170,16 +105,280 @@ const stream = filterUIMessageStream(
170
105
  // Custom filter function
171
106
  const stream = filterUIMessageStream(
172
107
  result.toUIMessageStream<MyUIMessage>(),
173
- ({ part }, { index }) => {
174
- // Include text parts
108
+ ({ part, chunk }) => {
175
109
  if (part.type === 'text') return true;
176
- // Include only first 5 parts
177
- if (index < 5) return true;
110
+ if (chunk.type === 'tool-input-available') return true;
178
111
  return false;
179
112
  }
180
113
  );
181
114
  ```
182
115
 
116
+ ## Examples
117
+
118
+ ### Smooth Streaming
119
+
120
+ Buffers multiple text chunks into a string, splits at word boundaries and re-emits each word as a separate chunk for smoother UI rendering. See [examples/smooth-streaming.ts](./examples/smooth-streaming.ts) for the full implementation.
121
+
122
+ ```typescript
123
+ import { mapUIMessageStream } from 'ai-stream-utils';
124
+
125
+ const WORD_REGEX = /\S+\s+/m;
126
+ let buffer = '';
127
+
128
+ const smoothedStream = mapUIMessageStream(
129
+ result.toUIMessageStream(),
130
+ ({ chunk }) => {
131
+ if (chunk.type !== 'text-delta') {
132
+ // Flush buffer on non-text chunks
133
+ if (buffer.length > 0) {
134
+ const flushed = { type: 'text-delta' as const, id: chunk.id, delta: buffer };
135
+ buffer = '';
136
+ return [flushed, chunk];
137
+ }
138
+ return chunk;
139
+ }
140
+
141
+ // Append the text delta to the buffer
142
+ buffer += chunk.delta;
143
+ const chunks = [];
144
+
145
+ let match;
146
+ while ((match = WORD_REGEX.exec(buffer)) !== null) {
147
+ chunks.push({ type: 'text-delta', id: chunk.id, delta: buffer.slice(0, match.index + match[0].length) });
148
+ buffer = buffer.slice(match.index + match[0].length);
149
+ }
150
+ // Emit the word-by-word chunks
151
+ return chunks;
152
+ }
153
+ );
154
+
155
+ // Output: word-by-word streaming
156
+ // { type: 'text-delta', delta: 'Why ' }
157
+ // { type: 'text-delta', delta: "don't " }
158
+ // { type: 'text-delta', delta: 'scientists ' }
159
+ ```
160
+
161
+ ### Redacting Sensitive Data
162
+
163
+ Buffer tool calls until complete, then redact sensitive fields before streaming to the client. See [examples/order-lookup.ts](./examples/order-lookup.ts) for the full example.
164
+
165
+ ```typescript
166
+ import { flatMapUIMessageStream, partTypeIs } from 'ai-stream-utils';
167
+
168
+ const tools = {
169
+ lookupOrder: tool({
170
+ description: 'Look up order details by order ID',
171
+ inputSchema: z.object({
172
+ orderId: z.string().describe('The order ID to look up'),
173
+ }),
174
+ execute: ({ orderId }) => ({
175
+ orderId,
176
+ status: 'shipped',
177
+ items: ['iPhone 15'],
178
+ total: 1299.99,
179
+ email: 'customer@example.com', // Sensitive
180
+ address: '123 Main St, SF, CA 94102', // Sensitive
181
+ }),
182
+ }),
183
+ };
184
+
185
+ const result = streamText({
186
+ model: openai('gpt-4o'),
187
+ prompt: 'Where is my order #12345?',
188
+ tools,
189
+ });
190
+
191
+ // Buffer tool-lookupOrder parts, stream text parts immediately
192
+ const redactedStream = flatMapUIMessageStream(
193
+ result.toUIMessageStream<MyUIMessage>(),
194
+ partTypeIs('tool-lookupOrder'),
195
+ ({ part }) => {
196
+ if (part.state === 'output-available') {
197
+ return {
198
+ ...part,
199
+ output: {
200
+ ...part.output,
201
+ email: '[REDACTED]',
202
+ address: '[REDACTED]',
203
+ },
204
+ };
205
+ }
206
+ return part;
207
+ },
208
+ );
209
+
210
+ // Text streams immediately, tool output is redacted:
211
+ // { type: 'text-delta', delta: 'Let me look that up...' }
212
+ // { type: 'tool-output-available', output: { orderId: '12345', email: '[REDACTED]', address: '[REDACTED]' } }
213
+ ```
214
+
215
+ ### Conditional Part Injection
216
+
217
+ Inspect previously streamed parts to conditionally inject new parts. This example creates a text part from a tool call message if the model didn't generate one. See [examples/ask-permission.ts](./examples/ask-permission.ts) for the full example.
218
+
219
+ ```typescript
220
+ import { flatMapUIMessageStream, partTypeIs } from 'ai-stream-utils';
221
+
222
+ const tools = {
223
+ askForPermission: tool({
224
+ description: 'Ask for permission to access current location',
225
+ inputSchema: z.object({
226
+ message: z.string().describe('The message to ask for permission'),
227
+ }),
228
+ }),
229
+ };
230
+
231
+ const result = streamText({
232
+ model: openai('gpt-4o'),
233
+ prompt: 'Is it sunny today?',
234
+ tools,
235
+ });
236
+
237
+ // Buffer askForPermission tool calls, check if text was already generated
238
+ const stream = flatMapUIMessageStream(
239
+ result.toUIMessageStream<MyUIMessage>(),
240
+ partTypeIs('tool-askForPermission'),
241
+ (current, context) => {
242
+ if (current.part.state === 'input-available') {
243
+ // Check if a text part was already streamed
244
+ const hasTextPart = context.parts.some((p) => p.type === 'text');
245
+
246
+ if (!hasTextPart) {
247
+ // Inject a text part from the tool call message
248
+ return [
249
+ { type: 'text', text: current.part.input.message },
250
+ current.part,
251
+ ];
252
+ }
253
+ }
254
+ return current.part;
255
+ },
256
+ );
257
+
258
+ // If model only generated tool call, we inject the text:
259
+ // { type: 'text', text: 'May I access your location?' }
260
+ // { type: 'tool-askForPermission', input: { message: 'May I access your location?' } }
261
+ ```
262
+
263
+ ### Transform Tool Output
264
+
265
+ Transform tool outputs on-the-fly, such as converting temperature units. See [examples/weather.ts](./examples/weather.ts) for the full example.
266
+
267
+ ```typescript
268
+ import { flatMapUIMessageStream, partTypeIs } from 'ai-stream-utils';
269
+
270
+ const toFahrenheit = (celsius: number) => (celsius * 9) / 5 + 32;
271
+
272
+ const tools = {
273
+ weather: tool({
274
+ description: 'Get the weather in a location',
275
+ inputSchema: z.object({ location: z.string() }),
276
+ execute: ({ location }) => ({
277
+ location,
278
+ temperature: 22, // Celsius from API
279
+ unit: 'C',
280
+ }),
281
+ }),
282
+ };
283
+
284
+ const result = streamText({
285
+ model: openai('gpt-4o'),
286
+ prompt: 'What is the weather in Tokyo?',
287
+ tools,
288
+ });
289
+
290
+ // Convert Celsius to Fahrenheit before streaming to client
291
+ const stream = flatMapUIMessageStream(
292
+ result.toUIMessageStream<MyUIMessage>(),
293
+ partTypeIs('tool-weather'),
294
+ ({ part }) => {
295
+ if (part.state === 'output-available') {
296
+ return {
297
+ ...part,
298
+ output: {
299
+ ...part.output,
300
+ temperature: toFahrenheit(part.output.temperature),
301
+ unit: 'F',
302
+ },
303
+ };
304
+ }
305
+ return part;
306
+ },
307
+ );
308
+
309
+ // Output is converted:
310
+ // { type: 'tool-output-available', output: { location: 'Tokyo', temperature: 71.6, unit: 'F' } }
311
+ ```
312
+
313
+ ## Stream Utilities
314
+
315
+ Helper functions for converting between streams, arrays, and async iterables.
316
+
317
+ | Function | Converts | To |
318
+ |----------|----------|-----|
319
+ | `createAsyncIterableStream` | `ReadableStream<T>` | `AsyncIterableStream<T>` |
320
+ | `convertArrayToStream` | `Array<T>` | `ReadableStream<T>` |
321
+ | `convertAsyncIterableToStream` | `AsyncIterable<T>` | `ReadableStream<T>` |
322
+ | `convertAsyncIterableToArray` | `AsyncIterable<T>` | `Promise<Array<T>>` |
323
+ | `convertStreamToArray` | `ReadableStream<T>` | `Promise<Array<T>>` |
324
+
325
+ ### `createAsyncIterableStream`
326
+
327
+ Adds async iterator protocol to a `ReadableStream`, enabling `for await...of` loops.
328
+
329
+ ```typescript
330
+ import { createAsyncIterableStream } from 'ai-stream-utils/utils';
331
+
332
+ const asyncStream = createAsyncIterableStream(readableStream);
333
+ for await (const chunk of asyncStream) {
334
+ console.log(chunk);
335
+ }
336
+ ```
337
+
338
+ ### `convertArrayToStream`
339
+
340
+ Converts an array to a `ReadableStream` that emits each element.
341
+
342
+ ```typescript
343
+ import { convertArrayToStream } from 'ai-stream-utils/utils';
344
+
345
+ const stream = convertArrayToStream([1, 2, 3]);
346
+ ```
347
+
348
+ ### `convertAsyncIterableToStream`
349
+
350
+ Converts an async iterable (e.g., async generator) to a `ReadableStream`.
351
+
352
+ ```typescript
353
+ import { convertAsyncIterableToStream } from 'ai-stream-utils/utils';
354
+
355
+ async function* generator() {
356
+ yield 1;
357
+ yield 2;
358
+ }
359
+ const stream = convertAsyncIterableToStream(generator());
360
+ ```
361
+
362
+ ### `convertAsyncIterableToArray`
363
+
364
+ Collects all values from an async iterable into an array.
365
+
366
+ ```typescript
367
+ import { convertAsyncIterableToArray } from 'ai-stream-utils/utils';
368
+
369
+ const array = await convertAsyncIterableToArray(asyncIterable);
370
+ ```
371
+
372
+ ### `convertStreamToArray`
373
+
374
+ Consumes a `ReadableStream` and collects all chunks into an array.
375
+
376
+ ```typescript
377
+ import { convertStreamToArray } from 'ai-stream-utils/utils';
378
+
379
+ const array = await convertStreamToArray(readableStream);
380
+ ```
381
+
183
382
  ## Type Safety
184
383
 
185
384
  The [`toUIMessageStream()`](https://ai-sdk.dev/docs/reference/ai-sdk-core/stream-text#to-ui-message-stream) from [`streamText()`](https://ai-sdk.dev/docs/reference/ai-sdk-core/stream-text) returns a generic `ReadableStream<UIMessageChunk>`, which means the part types cannot be inferred automatically.
@@ -272,17 +471,11 @@ function mapUIMessageStream<UI_MESSAGE extends UIMessage>(
272
471
 
273
472
  type MapUIMessageStreamFn<UI_MESSAGE extends UIMessage> = (
274
473
  input: MapInput<UI_MESSAGE>,
275
- context: MapContext<UI_MESSAGE>,
276
- ) => InferUIMessageChunk<UI_MESSAGE> | null;
474
+ ) => InferUIMessageChunk<UI_MESSAGE> | InferUIMessageChunk<UI_MESSAGE>[] | null;
277
475
 
278
476
  type MapInput<UI_MESSAGE extends UIMessage> = {
279
477
  chunk: InferUIMessageChunk<UI_MESSAGE>;
280
- part: InferPartialUIMessagePart<UI_MESSAGE>;
281
- };
282
-
283
- type MapContext<UI_MESSAGE extends UIMessage> = {
284
- index: number;
285
- chunks: InferUIMessageChunk<UI_MESSAGE>[];
478
+ part: InferUIMessagePart<UI_MESSAGE>;
286
479
  };
287
480
  ```
288
481
 
@@ -305,7 +498,7 @@ function flatMapUIMessageStream<UI_MESSAGE extends UIMessage, PART extends Infer
305
498
  type FlatMapUIMessageStreamFn<UI_MESSAGE extends UIMessage, PART = InferUIMessagePart<UI_MESSAGE>> = (
306
499
  input: FlatMapInput<UI_MESSAGE, PART>,
307
500
  context: FlatMapContext<UI_MESSAGE>,
308
- ) => PART | null;
501
+ ) => InferUIMessagePart<UI_MESSAGE> | InferUIMessagePart<UI_MESSAGE>[] | null;
309
502
 
310
503
  type FlatMapInput<UI_MESSAGE extends UIMessage, PART = InferUIMessagePart<UI_MESSAGE>> = {
311
504
  part: PART;
@@ -325,7 +518,7 @@ function partTypeIs<UI_MESSAGE extends UIMessage, T extends InferUIMessagePartTy
325
518
  ): FlatMapUIMessageStreamPredicate<UI_MESSAGE, Extract<InferUIMessagePart<UI_MESSAGE>, { type: T }>>
326
519
 
327
520
  type FlatMapUIMessageStreamPredicate<UI_MESSAGE extends UIMessage, PART extends InferUIMessagePart<UI_MESSAGE>> =
328
- (part: InferPartialUIMessagePart<UI_MESSAGE>) => boolean;
521
+ (part: InferUIMessagePart<UI_MESSAGE>) => boolean;
329
522
  ```
330
523
 
331
524
  ### `filterUIMessageStream`
@@ -0,0 +1,59 @@
1
+ //#region src/utils/create-async-iterable-stream.ts
2
+ /**
3
+ * Converts a ReadableStream to an AsyncIterableStream.
4
+ * Copied from https://github.com/vercel/ai/blob/main/packages/ai/src/util/async-iterable-stream.ts
5
+ */
6
+ function createAsyncIterableStream(source) {
7
+ /** Pipe through a TransformStream to ensure a fresh, unlocked stream. */
8
+ const stream = source.pipeThrough(new TransformStream());
9
+ /** Implements the async iterator protocol for the stream. */
10
+ return Object.assign(stream, { [Symbol.asyncIterator]() {
11
+ const reader = stream.getReader();
12
+ let finished = false;
13
+ /** Cleans up the reader by cancelling and releasing the lock. */
14
+ async function cleanup(cancelStream) {
15
+ finished = true;
16
+ try {
17
+ if (cancelStream) await reader.cancel?.();
18
+ } finally {
19
+ try {
20
+ reader.releaseLock();
21
+ } catch {}
22
+ }
23
+ }
24
+ return {
25
+ async next() {
26
+ if (finished) return {
27
+ done: true,
28
+ value: void 0
29
+ };
30
+ const { done, value } = await reader.read();
31
+ if (done) {
32
+ await cleanup(true);
33
+ return {
34
+ done: true,
35
+ value: void 0
36
+ };
37
+ }
38
+ return {
39
+ done: false,
40
+ value
41
+ };
42
+ },
43
+ async return() {
44
+ await cleanup(true);
45
+ return {
46
+ done: true,
47
+ value: void 0
48
+ };
49
+ },
50
+ async throw(err) {
51
+ await cleanup(true);
52
+ throw err;
53
+ }
54
+ };
55
+ } });
56
+ }
57
+
58
+ //#endregion
59
+ export { createAsyncIterableStream as t };