@trigger.dev/sdk 0.0.0-prerelease-20260327224514 → 0.0.0-prerelease-20260331093145
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commonjs/v3/ai.d.ts +5 -1798
- package/dist/commonjs/v3/ai.js +13 -2882
- package/dist/commonjs/v3/ai.js.map +1 -1
- package/dist/commonjs/v3/index.d.ts +2 -2
- package/dist/commonjs/v3/runs.d.ts +2 -9
- package/dist/commonjs/v3/runs.js +0 -1
- package/dist/commonjs/v3/runs.js.map +1 -1
- package/dist/commonjs/v3/shared.d.ts +2 -24
- package/dist/commonjs/v3/shared.js +1 -160
- package/dist/commonjs/v3/shared.js.map +1 -1
- package/dist/commonjs/v3/streams.js +19 -73
- package/dist/commonjs/v3/streams.js.map +1 -1
- package/dist/commonjs/v3/tasks.d.ts +1 -2
- package/dist/commonjs/v3/tasks.js +0 -1
- package/dist/commonjs/v3/tasks.js.map +1 -1
- package/dist/commonjs/version.js +1 -1
- package/dist/esm/v3/ai.d.ts +5 -1798
- package/dist/esm/v3/ai.js +14 -2883
- package/dist/esm/v3/ai.js.map +1 -1
- package/dist/esm/v3/index.d.ts +2 -2
- package/dist/esm/v3/runs.d.ts +0 -7
- package/dist/esm/v3/runs.js +0 -1
- package/dist/esm/v3/runs.js.map +1 -1
- package/dist/esm/v3/shared.d.ts +2 -24
- package/dist/esm/v3/shared.js +1 -159
- package/dist/esm/v3/shared.js.map +1 -1
- package/dist/esm/v3/streams.js +19 -73
- package/dist/esm/v3/streams.js.map +1 -1
- package/dist/esm/v3/tasks.d.ts +1 -2
- package/dist/esm/v3/tasks.js +1 -2
- package/dist/esm/v3/tasks.js.map +1 -1
- package/dist/esm/version.js +1 -1
- package/package.json +6 -41
- package/dist/commonjs/v3/chat-constants.d.ts +0 -10
- package/dist/commonjs/v3/chat-constants.js +0 -14
- package/dist/commonjs/v3/chat-constants.js.map +0 -1
- package/dist/commonjs/v3/chat-react.d.ts +0 -135
- package/dist/commonjs/v3/chat-react.js +0 -248
- package/dist/commonjs/v3/chat-react.js.map +0 -1
- package/dist/commonjs/v3/chat.d.ts +0 -346
- package/dist/commonjs/v3/chat.js +0 -616
- package/dist/commonjs/v3/chat.js.map +0 -1
- package/dist/commonjs/v3/chat.test.d.ts +0 -1
- package/dist/commonjs/v3/chat.test.js +0 -1855
- package/dist/commonjs/v3/chat.test.js.map +0 -1
- package/dist/esm/v3/chat-constants.d.ts +0 -10
- package/dist/esm/v3/chat-constants.js +0 -11
- package/dist/esm/v3/chat-constants.js.map +0 -1
- package/dist/esm/v3/chat-react.d.ts +0 -135
- package/dist/esm/v3/chat-react.js +0 -244
- package/dist/esm/v3/chat-react.js.map +0 -1
- package/dist/esm/v3/chat.d.ts +0 -346
- package/dist/esm/v3/chat.js +0 -611
- package/dist/esm/v3/chat.js.map +0 -1
- package/dist/esm/v3/chat.test.d.ts +0 -1
- package/dist/esm/v3/chat.test.js +0 -1853
- package/dist/esm/v3/chat.test.js.map +0 -1
package/dist/commonjs/v3/ai.d.ts
CHANGED
|
@@ -1,27 +1,6 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import type { ModelMessage, ToolSet, UIMessage, UIMessageChunk, UIMessageStreamOptions, LanguageModelUsage } from "ai";
|
|
1
|
+
import { Task, type inferSchemaIn, type TaskSchema, type TaskWithSchema } from "@trigger.dev/core/v3";
|
|
3
2
|
import { Tool, ToolCallOptions } from "ai";
|
|
4
|
-
|
|
5
|
-
import type { ResolvedPrompt } from "./prompt.js";
|
|
6
|
-
/** Re-export for typing `ctx` in `chat.task` hooks without importing `@trigger.dev/core`. */
|
|
7
|
-
export type { TaskRunContext } from "@trigger.dev/core/v3";
|
|
8
|
-
import { CHAT_MESSAGES_STREAM_ID, CHAT_STOP_STREAM_ID } from "./chat-constants.js";
|
|
9
|
-
export type ToolCallExecutionOptions = {
|
|
10
|
-
toolCallId: string;
|
|
11
|
-
experimental_context?: unknown;
|
|
12
|
-
/** Chat context — only present when the tool runs inside a chat.task turn. */
|
|
13
|
-
chatId?: string;
|
|
14
|
-
turn?: number;
|
|
15
|
-
continuation?: boolean;
|
|
16
|
-
clientData?: unknown;
|
|
17
|
-
};
|
|
18
|
-
/** Chat context stored in locals during each chat.task turn for auto-detection. */
|
|
19
|
-
type ChatTurnContext<TClientData = unknown> = {
|
|
20
|
-
chatId: string;
|
|
21
|
-
turn: number;
|
|
22
|
-
continuation: boolean;
|
|
23
|
-
clientData?: TClientData;
|
|
24
|
-
};
|
|
3
|
+
export type ToolCallExecutionOptions = Omit<ToolCallOptions, "abortSignal">;
|
|
25
4
|
type ToolResultContent = Array<{
|
|
26
5
|
type: "text";
|
|
27
6
|
text: string;
|
|
@@ -33,1783 +12,11 @@ type ToolResultContent = Array<{
|
|
|
33
12
|
export type ToolOptions<TResult> = {
|
|
34
13
|
experimental_toToolResultContent?: (result: TResult) => ToolResultContent;
|
|
35
14
|
};
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
/**
|
|
39
|
-
* Returns an `execute` function for the AI SDK `tool()` helper (or any compatible tool definition).
|
|
40
|
-
* Preferred API for task-backed tools: the same Trigger wiring as the deprecated `ai.tool()`
|
|
41
|
-
* (`triggerAndSubscribe`, tool-call metadata, chat context, `chat.local` serialization) without
|
|
42
|
-
* building the tool object. You supply `description`, `inputSchema`, and any AI-SDK-only options
|
|
43
|
-
* (e.g. `experimental_toToolResultContent`) on `tool()` yourself.
|
|
44
|
-
*
|
|
45
|
-
* @example
|
|
46
|
-
* ```ts
|
|
47
|
-
* import { tool } from "ai";
|
|
48
|
-
* import { z } from "zod";
|
|
49
|
-
* import { ai } from "@trigger.dev/sdk/ai";
|
|
50
|
-
* import { myTask } from "./trigger/myTask";
|
|
51
|
-
*
|
|
52
|
-
* export const myTool = tool({
|
|
53
|
-
* description: myTask.description ?? "",
|
|
54
|
-
* inputSchema: z.object({ id: z.string() }),
|
|
55
|
-
* execute: ai.toolExecute(myTask),
|
|
56
|
-
* });
|
|
57
|
-
* ```
|
|
58
|
-
*/
|
|
59
|
-
declare function toolExecute<TIdentifier extends string, TInput = void, TOutput = unknown>(task: Task<TIdentifier, TInput, TOutput>): (input: TInput, toolOpts: ToolCallOptions) => Promise<TOutput>;
|
|
60
|
-
declare function toolExecute<TIdentifier extends string, TTaskSchema extends TaskSchema | undefined = undefined, TOutput = unknown>(task: TaskWithSchema<TIdentifier, TTaskSchema, TOutput>): (input: inferSchemaIn<TTaskSchema>, toolOpts: ToolCallOptions) => Promise<TOutput>;
|
|
61
|
-
/**
|
|
62
|
-
* @deprecated Use `tool()` from the `ai` package with `execute: ai.toolExecute(task)` instead.
|
|
63
|
-
* This helper may be removed in a future major release.
|
|
64
|
-
*/
|
|
65
|
-
declare function toolFromTask<TIdentifier extends string, TInput = void, TOutput = unknown>(task: Task<TIdentifier, TInput, TOutput>, options?: ToolOptions<TOutput>): ToolSetCompatible<Tool<TInput, TOutput>>;
|
|
66
|
-
/** @deprecated Use `tool()` from `ai` with `execute: ai.toolExecute(task)`. */
|
|
67
|
-
declare function toolFromTask<TIdentifier extends string, TTaskSchema extends TaskSchema | undefined = undefined, TOutput = unknown>(task: TaskWithSchema<TIdentifier, TTaskSchema, TOutput>, options?: ToolOptions<TOutput>): ToolSetCompatible<Tool<inferSchemaIn<TTaskSchema>, TOutput>>;
|
|
15
|
+
declare function toolFromTask<TIdentifier extends string, TInput = void, TOutput = unknown>(task: Task<TIdentifier, TInput, TOutput>, options?: ToolOptions<TOutput>): Tool<TInput, TOutput>;
|
|
16
|
+
declare function toolFromTask<TIdentifier extends string, TTaskSchema extends TaskSchema | undefined = undefined, TOutput = unknown>(task: TaskWithSchema<TIdentifier, TTaskSchema, TOutput>, options?: ToolOptions<TOutput>): Tool<inferSchemaIn<TTaskSchema>, TOutput>;
|
|
68
17
|
declare function getToolOptionsFromMetadata(): ToolCallExecutionOptions | undefined;
|
|
69
|
-
/**
|
|
70
|
-
* Get the current tool call ID from inside a subtask invoked via `ai.toolExecute()` (or legacy `ai.tool()`).
|
|
71
|
-
* Returns `undefined` if not running as a tool subtask.
|
|
72
|
-
*/
|
|
73
|
-
declare function getToolCallId(): string | undefined;
|
|
74
|
-
/**
|
|
75
|
-
* Get the chat context from inside a subtask invoked via `ai.toolExecute()` (or legacy `ai.tool()`) within a `chat.task`.
|
|
76
|
-
* Pass `typeof yourChatTask` as the type parameter to get typed `clientData`.
|
|
77
|
-
* Returns `undefined` if the parent is not a chat task.
|
|
78
|
-
*
|
|
79
|
-
* @example
|
|
80
|
-
* ```ts
|
|
81
|
-
* const ctx = ai.chatContext<typeof myChat>();
|
|
82
|
-
* // ctx?.clientData is typed based on myChat's clientDataSchema
|
|
83
|
-
* ```
|
|
84
|
-
*/
|
|
85
|
-
declare function getToolChatContext<TChatTask extends AnyTask = AnyTask>(): ChatTurnContext<InferChatClientData<TChatTask>> | undefined;
|
|
86
|
-
/**
|
|
87
|
-
* Get the chat context from inside a subtask, throwing if not in a chat context.
|
|
88
|
-
* Pass `typeof yourChatTask` as the type parameter to get typed `clientData`.
|
|
89
|
-
*
|
|
90
|
-
* @example
|
|
91
|
-
* ```ts
|
|
92
|
-
* const ctx = ai.chatContextOrThrow<typeof myChat>();
|
|
93
|
-
* // ctx.chatId, ctx.clientData are guaranteed non-null
|
|
94
|
-
* ```
|
|
95
|
-
*/
|
|
96
|
-
declare function getToolChatContextOrThrow<TChatTask extends AnyTask = AnyTask>(): ChatTurnContext<InferChatClientData<TChatTask>>;
|
|
97
18
|
export declare const ai: {
|
|
98
|
-
/**
|
|
99
|
-
* @deprecated Use `tool()` from the `ai` package with `execute: ai.toolExecute(task)` instead.
|
|
100
|
-
*/
|
|
101
19
|
tool: typeof toolFromTask;
|
|
102
|
-
/**
|
|
103
|
-
* Preferred: return value for the `execute` field of AI SDK `tool()`. Keeps Trigger subtask and
|
|
104
|
-
* metadata behavior without coupling to a specific `ai` version’s `Tool` / `ToolSet` types.
|
|
105
|
-
*/
|
|
106
|
-
toolExecute: typeof toolExecute;
|
|
107
20
|
currentToolOptions: typeof getToolOptionsFromMetadata;
|
|
108
|
-
/** Get the tool call ID from inside a subtask invoked via `ai.toolExecute()` (or legacy `ai.tool()`). */
|
|
109
|
-
toolCallId: typeof getToolCallId;
|
|
110
|
-
/** Get chat context (chatId, turn, clientData, etc.) from inside a subtask of a `chat.task`. Returns undefined if not in a chat context. */
|
|
111
|
-
chatContext: typeof getToolChatContext;
|
|
112
|
-
/** Get chat context or throw if not in a chat context. Pass `typeof yourChatTask` for typed clientData. */
|
|
113
|
-
chatContextOrThrow: typeof getToolChatContextOrThrow;
|
|
114
|
-
};
|
|
115
|
-
/**
|
|
116
|
-
* Creates a public access token for a chat task.
|
|
117
|
-
*
|
|
118
|
-
* This is a convenience helper that creates a multi-use trigger public token
|
|
119
|
-
* scoped to the given task. Use it in a server action to provide the frontend
|
|
120
|
-
* `TriggerChatTransport` with an `accessToken`.
|
|
121
|
-
*
|
|
122
|
-
* @example
|
|
123
|
-
* ```ts
|
|
124
|
-
* // actions.ts
|
|
125
|
-
* "use server";
|
|
126
|
-
* import { chat } from "@trigger.dev/sdk/ai";
|
|
127
|
-
* import type { myChat } from "@/trigger/chat";
|
|
128
|
-
*
|
|
129
|
-
* export const getChatToken = () => chat.createAccessToken<typeof myChat>("my-chat");
|
|
130
|
-
* ```
|
|
131
|
-
*/
|
|
132
|
-
declare function createChatAccessToken<TTask extends AnyTask>(taskId: TaskIdentifier<TTask>): Promise<string>;
|
|
133
|
-
/**
|
|
134
|
-
* The default stream key used for chat transport communication.
|
|
135
|
-
* Both `TriggerChatTransport` (frontend) and `pipeChat`/`chatTask` (backend)
|
|
136
|
-
* use this key by default.
|
|
137
|
-
*/
|
|
138
|
-
export declare const CHAT_STREAM_KEY = "chat";
|
|
139
|
-
export { CHAT_MESSAGES_STREAM_ID, CHAT_STOP_STREAM_ID };
|
|
140
|
-
/**
|
|
141
|
-
* A stream writer passed to chat lifecycle callbacks (`onPreload`, `onChatStart`,
|
|
142
|
-
* `onTurnStart`, `onTurnComplete`, `onCompacted`).
|
|
143
|
-
*
|
|
144
|
-
* Write custom `UIMessageChunk` parts (e.g. `data-*` parts) directly to the chat
|
|
145
|
-
* stream without the ceremony of `chat.stream.writer({ execute })`.
|
|
146
|
-
*
|
|
147
|
-
* The writer is lazy — no stream overhead if you don't call `write()` or `merge()`.
|
|
148
|
-
*
|
|
149
|
-
* @example
|
|
150
|
-
* ```ts
|
|
151
|
-
* onTurnStart: async ({ writer }) => {
|
|
152
|
-
* writer.write({ type: "data-status", data: { loading: true } });
|
|
153
|
-
* },
|
|
154
|
-
* onTurnComplete: async ({ writer, uiMessages }) => {
|
|
155
|
-
* writer.write({ type: "data-analytics", data: { messageCount: uiMessages.length } });
|
|
156
|
-
* },
|
|
157
|
-
* ```
|
|
158
|
-
*/
|
|
159
|
-
export type ChatWriter = {
|
|
160
|
-
/** Write a single UIMessageChunk to the chat stream. */
|
|
161
|
-
write(part: UIMessageChunk): void;
|
|
162
|
-
/** Merge another stream's chunks into the chat stream. */
|
|
163
|
-
merge(stream: ReadableStream<UIMessageChunk>): void;
|
|
164
|
-
};
|
|
165
|
-
/**
|
|
166
|
-
* The wire payload shape sent by `TriggerChatTransport`.
|
|
167
|
-
* Uses `metadata` to match the AI SDK's `ChatRequestOptions` field name.
|
|
168
|
-
*/
|
|
169
|
-
export type ChatTaskWirePayload<TMessage extends UIMessage = UIMessage, TMetadata = unknown> = {
|
|
170
|
-
messages: TMessage[];
|
|
171
|
-
chatId: string;
|
|
172
|
-
trigger: "submit-message" | "regenerate-message" | "preload";
|
|
173
|
-
messageId?: string;
|
|
174
|
-
metadata?: TMetadata;
|
|
175
|
-
/** Whether this run is continuing an existing chat whose previous run ended. */
|
|
176
|
-
continuation?: boolean;
|
|
177
|
-
/** The run ID of the previous run (only set when `continuation` is true). */
|
|
178
|
-
previousRunId?: string;
|
|
179
|
-
/** Override idle timeout for this run (seconds). Set by transport.preload(). */
|
|
180
|
-
idleTimeoutInSeconds?: number;
|
|
181
|
-
};
|
|
182
|
-
/**
|
|
183
|
-
* The payload shape passed to the `chatTask` run function.
|
|
184
|
-
*
|
|
185
|
-
* - `messages` contains model-ready messages (converted via `convertToModelMessages`) —
|
|
186
|
-
* pass these directly to `streamText`.
|
|
187
|
-
* - `clientData` contains custom data from the frontend (the `metadata` field from `sendMessage()`).
|
|
188
|
-
*
|
|
189
|
-
* The backend accumulates the full conversation history across turns, so the frontend
|
|
190
|
-
* only needs to send new messages after the first turn.
|
|
191
|
-
*/
|
|
192
|
-
export type ChatTaskPayload<TClientData = unknown> = {
|
|
193
|
-
/** Model-ready messages — pass directly to `streamText({ messages })`. */
|
|
194
|
-
messages: ModelMessage[];
|
|
195
|
-
/** The unique identifier for the chat session */
|
|
196
|
-
chatId: string;
|
|
197
|
-
/**
|
|
198
|
-
* The trigger type:
|
|
199
|
-
* - `"submit-message"`: A new user message
|
|
200
|
-
* - `"regenerate-message"`: Regenerate the last assistant response
|
|
201
|
-
* - `"preload"`: Run was preloaded before the first message (only on turn 0)
|
|
202
|
-
*/
|
|
203
|
-
trigger: "submit-message" | "regenerate-message" | "preload";
|
|
204
|
-
/** The ID of the message to regenerate (only for `"regenerate-message"`) */
|
|
205
|
-
messageId?: string;
|
|
206
|
-
/** Custom data from the frontend (passed via `metadata` on `sendMessage()` or the transport). */
|
|
207
|
-
clientData?: TClientData;
|
|
208
|
-
/** Whether this run is continuing an existing chat (previous run timed out or was cancelled). False for brand new chats. */
|
|
209
|
-
continuation: boolean;
|
|
210
|
-
/** The run ID of the previous run (only set when `continuation` is true). */
|
|
211
|
-
previousRunId?: string;
|
|
212
|
-
/** Whether this run was preloaded before the first message. */
|
|
213
|
-
preloaded: boolean;
|
|
214
|
-
};
|
|
215
|
-
/**
|
|
216
|
-
* Abort signals provided to the `chatTask` run function.
|
|
217
|
-
*/
|
|
218
|
-
export type ChatTaskSignals = {
|
|
219
|
-
/** Combined signal — fires on run cancel OR stop generation. Pass to `streamText`. */
|
|
220
|
-
signal: AbortSignal;
|
|
221
|
-
/** Fires only when the run is cancelled, expired, or exceeds maxDuration. */
|
|
222
|
-
cancelSignal: AbortSignal;
|
|
223
|
-
/** Fires only when the frontend stops generation for this turn (per-turn, reset each turn). */
|
|
224
|
-
stopSignal: AbortSignal;
|
|
225
|
-
};
|
|
226
|
-
/**
|
|
227
|
-
* The full payload passed to a `chatTask` run function.
|
|
228
|
-
* Extends `ChatTaskPayload` (the wire payload) with abort signals.
|
|
229
|
-
*/
|
|
230
|
-
export type ChatTaskRunPayload<TClientData = unknown> = ChatTaskPayload<TClientData> & ChatTaskSignals & {
|
|
231
|
-
/**
|
|
232
|
-
* Task run context — same object as the `ctx` passed to a standard `task({ run })` handler’s second argument.
|
|
233
|
-
* Use for tags, metadata, parent run links, or any API that needs the full run record.
|
|
234
|
-
*/
|
|
235
|
-
ctx: TaskRunContext;
|
|
236
|
-
/** Token usage from the previous turn. Undefined on turn 0. */
|
|
237
|
-
previousTurnUsage?: LanguageModelUsage;
|
|
238
|
-
/** Cumulative token usage across all completed turns so far. */
|
|
239
|
-
totalUsage: LanguageModelUsage;
|
|
240
|
-
};
|
|
241
|
-
/** Convenience re-export of the AI SDK's `LanguageModelUsage` type. */
|
|
242
|
-
export type ChatTurnUsage = LanguageModelUsage;
|
|
243
|
-
/**
|
|
244
|
-
* Replace the accumulated conversation messages for the current run.
|
|
245
|
-
*
|
|
246
|
-
* Call from `onTurnStart` to compact before `run()` executes, or from
|
|
247
|
-
* `onTurnComplete` to compact before the next turn. Takes `UIMessage[]`
|
|
248
|
-
* and converts to `ModelMessage[]` internally.
|
|
249
|
-
*/
|
|
250
|
-
declare function setChatMessages<TUIM extends UIMessage = UIMessage>(uiMessages: TUIM[]): void;
|
|
251
|
-
/** State stored in locals during prepareStep compaction. */
|
|
252
|
-
interface CompactionState {
|
|
253
|
-
summary: string;
|
|
254
|
-
baseResponseMessageCount: number;
|
|
255
|
-
}
|
|
256
|
-
/**
|
|
257
|
-
* Event passed to `summarize` callbacks.
|
|
258
|
-
*/
|
|
259
|
-
export type SummarizeEvent = {
|
|
260
|
-
/** The current model messages to summarize. */
|
|
261
|
-
messages: ModelMessage[];
|
|
262
|
-
/** Full usage object from the triggering step/turn. */
|
|
263
|
-
usage?: LanguageModelUsage;
|
|
264
|
-
/** Cumulative token usage across all completed turns. Present in chat.task contexts. */
|
|
265
|
-
totalUsage?: LanguageModelUsage;
|
|
266
|
-
/** The chat session ID (if running inside a chat.task). */
|
|
267
|
-
chatId?: string;
|
|
268
|
-
/** The current turn number (0-indexed, if inside a chat.task). */
|
|
269
|
-
turn?: number;
|
|
270
|
-
/** Custom data from the frontend (if inside a chat.task). */
|
|
271
|
-
clientData?: unknown;
|
|
272
|
-
/**
|
|
273
|
-
* Where compaction is running:
|
|
274
|
-
* - `"inner"` — between tool-call steps (prepareStep)
|
|
275
|
-
* - `"outer"` — between turns
|
|
276
|
-
*/
|
|
277
|
-
source?: "inner" | "outer";
|
|
278
|
-
/** The step number (0-indexed). Only present when `source` is `"inner"`. */
|
|
279
|
-
stepNumber?: number;
|
|
280
|
-
};
|
|
281
|
-
/**
|
|
282
|
-
* Event passed to `compactUIMessages` and `compactModelMessages` callbacks.
|
|
283
|
-
*/
|
|
284
|
-
export type CompactMessagesEvent<TUIM extends UIMessage = UIMessage> = {
|
|
285
|
-
/** The generated summary text. */
|
|
286
|
-
summary: string;
|
|
287
|
-
/** The current UI messages (full conversation). */
|
|
288
|
-
uiMessages: TUIM[];
|
|
289
|
-
/** The current model messages (full conversation). */
|
|
290
|
-
modelMessages: ModelMessage[];
|
|
291
|
-
/** The chat session ID. */
|
|
292
|
-
chatId: string;
|
|
293
|
-
/** The current turn number (0-indexed). */
|
|
294
|
-
turn: number;
|
|
295
|
-
/** Custom data from the frontend. */
|
|
296
|
-
clientData?: unknown;
|
|
297
|
-
/**
|
|
298
|
-
* Where compaction is running:
|
|
299
|
-
* - `"inner"` — between tool-call steps (prepareStep)
|
|
300
|
-
* - `"outer"` — between turns
|
|
301
|
-
*/
|
|
302
|
-
source: "inner" | "outer";
|
|
303
|
-
};
|
|
304
|
-
/**
|
|
305
|
-
* Options for the `compaction` field on `chat.task()`.
|
|
306
|
-
*
|
|
307
|
-
* Handles compaction automatically in both the inner loop (prepareStep, between
|
|
308
|
-
* tool-call steps) and the outer loop (between turns, for single-step responses
|
|
309
|
-
* where prepareStep never fires).
|
|
310
|
-
*/
|
|
311
|
-
export type ChatTaskCompactionOptions<TUIM extends UIMessage = UIMessage> = {
|
|
312
|
-
/** Decide whether to compact. Return true to trigger compaction. */
|
|
313
|
-
shouldCompact: (event: ShouldCompactEvent) => boolean | Promise<boolean>;
|
|
314
|
-
/** Generate a summary from the current messages. Return the summary text. */
|
|
315
|
-
summarize: (event: SummarizeEvent) => Promise<string>;
|
|
316
|
-
/**
|
|
317
|
-
* Transform UI messages after compaction (what gets persisted and displayed).
|
|
318
|
-
* Default: preserve all UI messages unchanged.
|
|
319
|
-
*
|
|
320
|
-
* @example
|
|
321
|
-
* ```ts
|
|
322
|
-
* // Flatten to summary
|
|
323
|
-
* compactUIMessages: ({ summary }) => [{
|
|
324
|
-
* id: generateId(), role: "assistant",
|
|
325
|
-
* parts: [{ type: "text", text: `[Summary]\n\n${summary}` }],
|
|
326
|
-
* }],
|
|
327
|
-
*
|
|
328
|
-
* // Summary + keep last 4 messages
|
|
329
|
-
* compactUIMessages: ({ uiMessages, summary }) => [
|
|
330
|
-
* { id: generateId(), role: "assistant",
|
|
331
|
-
* parts: [{ type: "text", text: `[Summary]\n\n${summary}` }] },
|
|
332
|
-
* ...uiMessages.slice(-4),
|
|
333
|
-
* ],
|
|
334
|
-
* ```
|
|
335
|
-
*/
|
|
336
|
-
compactUIMessages?: (event: CompactMessagesEvent<TUIM>) => TUIM[] | Promise<TUIM[]>;
|
|
337
|
-
/**
|
|
338
|
-
* Transform model messages after compaction (what gets sent to the LLM).
|
|
339
|
-
* Default: replace all with a single summary message.
|
|
340
|
-
*
|
|
341
|
-
* @example
|
|
342
|
-
* ```ts
|
|
343
|
-
* // Summary + keep last 2 model messages
|
|
344
|
-
* compactModelMessages: ({ modelMessages, summary }) => [
|
|
345
|
-
* { role: "user", content: summary },
|
|
346
|
-
* ...modelMessages.slice(-2),
|
|
347
|
-
* ],
|
|
348
|
-
* ```
|
|
349
|
-
*/
|
|
350
|
-
compactModelMessages?: (event: CompactMessagesEvent<TUIM>) => ModelMessage[] | Promise<ModelMessage[]>;
|
|
351
|
-
};
|
|
352
|
-
/**
|
|
353
|
-
* Event passed to `shouldInject` and `prepareMessages` callbacks.
|
|
354
|
-
*/
|
|
355
|
-
export type PendingMessagesBatchEvent<TUIM extends UIMessage = UIMessage> = {
|
|
356
|
-
/** All pending UI messages that arrived during streaming (batch). */
|
|
357
|
-
messages: TUIM[];
|
|
358
|
-
/** Current model messages in the conversation. */
|
|
359
|
-
modelMessages: ModelMessage[];
|
|
360
|
-
/** Completed steps so far. */
|
|
361
|
-
steps: CompactionStep[];
|
|
362
|
-
/** Current step number (0-indexed). */
|
|
363
|
-
stepNumber: number;
|
|
364
|
-
/** Chat session ID. */
|
|
365
|
-
chatId: string;
|
|
366
|
-
/** Current turn number (0-indexed). */
|
|
367
|
-
turn: number;
|
|
368
|
-
/** Custom data from the frontend. */
|
|
369
|
-
clientData?: unknown;
|
|
370
|
-
};
|
|
371
|
-
/**
|
|
372
|
-
* Event passed to `onReceived` callback (per-message, as they arrive).
|
|
373
|
-
*/
|
|
374
|
-
export type PendingMessageReceivedEvent<TUIM extends UIMessage = UIMessage> = {
|
|
375
|
-
/** The UI message that arrived during streaming. */
|
|
376
|
-
message: TUIM;
|
|
377
|
-
/** Chat session ID. */
|
|
378
|
-
chatId: string;
|
|
379
|
-
/** Current turn number (0-indexed). */
|
|
380
|
-
turn: number;
|
|
381
|
-
};
|
|
382
|
-
/**
|
|
383
|
-
* Event passed to `onInjected` callback (batch, after injection).
|
|
384
|
-
*/
|
|
385
|
-
export type PendingMessagesInjectedEvent<TUIM extends UIMessage = UIMessage> = {
|
|
386
|
-
/** All UI messages that were injected. */
|
|
387
|
-
messages: TUIM[];
|
|
388
|
-
/** The model messages that were injected. */
|
|
389
|
-
injectedModelMessages: ModelMessage[];
|
|
390
|
-
/** Chat session ID. */
|
|
391
|
-
chatId: string;
|
|
392
|
-
/** Current turn number (0-indexed). */
|
|
393
|
-
turn: number;
|
|
394
|
-
/** Step number where injection occurred. */
|
|
395
|
-
stepNumber: number;
|
|
396
|
-
};
|
|
397
|
-
/**
|
|
398
|
-
* Options for the `pendingMessages` field on `chat.task()`, `chat.createSession()`,
|
|
399
|
-
* or `ChatMessageAccumulator`.
|
|
400
|
-
*
|
|
401
|
-
* Configures how messages that arrive during streaming are handled. When
|
|
402
|
-
* `shouldInject` is provided and returns `true`, the full batch of pending
|
|
403
|
-
* messages is injected between tool-call steps via `prepareStep`.
|
|
404
|
-
* Otherwise, messages queue for the next turn.
|
|
405
|
-
*/
|
|
406
|
-
export type PendingMessagesOptions<TUIM extends UIMessage = UIMessage> = {
|
|
407
|
-
/**
|
|
408
|
-
* Decide whether to inject pending messages between tool-call steps.
|
|
409
|
-
* Called once per step boundary with the full batch of pending messages.
|
|
410
|
-
* If absent, no injection happens — messages only queue for the next turn.
|
|
411
|
-
*/
|
|
412
|
-
shouldInject?: (event: PendingMessagesBatchEvent<TUIM>) => boolean | Promise<boolean>;
|
|
413
|
-
/**
|
|
414
|
-
* Transform the batch of pending messages before injection.
|
|
415
|
-
* Return the model messages to inject.
|
|
416
|
-
* Default: convert each UI message via `convertToModelMessages`.
|
|
417
|
-
*/
|
|
418
|
-
prepare?: (event: PendingMessagesBatchEvent<TUIM>) => ModelMessage[] | Promise<ModelMessage[]>;
|
|
419
|
-
/** Called when a message arrives during streaming (per-message). */
|
|
420
|
-
onReceived?: (event: PendingMessageReceivedEvent<TUIM>) => void | Promise<void>;
|
|
421
|
-
/** Called after a batch of messages is injected via `prepareStep`. */
|
|
422
|
-
onInjected?: (event: PendingMessagesInjectedEvent<TUIM>) => void | Promise<void>;
|
|
423
|
-
};
|
|
424
|
-
/**
|
|
425
|
-
* The data part type used to signal that pending messages were injected
|
|
426
|
-
* between tool-call steps. The frontend can match on this to render
|
|
427
|
-
* injection points inline in the assistant response.
|
|
428
|
-
*/
|
|
429
|
-
export declare const PENDING_MESSAGE_INJECTED_TYPE: "data-pending-message-injected";
|
|
430
|
-
/**
|
|
431
|
-
* Event passed to the `prepareMessages` hook.
|
|
432
|
-
*/
|
|
433
|
-
export type PrepareMessagesEvent<TClientData = unknown> = {
|
|
434
|
-
/** The messages to transform. Return the transformed array. */
|
|
435
|
-
messages: ModelMessage[];
|
|
436
|
-
/** Why messages are being prepared. */
|
|
437
|
-
reason: "run" | "compaction-rebuild" | "compaction-result";
|
|
438
|
-
/** The chat session ID. */
|
|
439
|
-
chatId: string;
|
|
440
|
-
/** The current turn number (0-indexed). */
|
|
441
|
-
turn: number;
|
|
442
|
-
/** Custom data from the frontend. */
|
|
443
|
-
clientData?: TClientData;
|
|
444
|
-
};
|
|
445
|
-
/**
|
|
446
|
-
* Data shape for `data-compaction` stream chunks emitted during compaction.
|
|
447
|
-
* Use to type the `data` field when rendering compaction parts in the frontend.
|
|
448
|
-
*/
|
|
449
|
-
export type CompactionChunkData = {
|
|
450
|
-
status: "compacting" | "complete";
|
|
451
|
-
totalTokens: number | undefined;
|
|
452
|
-
};
|
|
453
|
-
/**
|
|
454
|
-
* Event passed to the `onCompacted` callback.
|
|
455
|
-
*/
|
|
456
|
-
export type CompactedEvent = {
|
|
457
|
-
/** Task run context — same as `task` lifecycle hooks and `chat.task` `run({ ctx })`. */
|
|
458
|
-
ctx: TaskRunContext;
|
|
459
|
-
/** The generated summary text. */
|
|
460
|
-
summary: string;
|
|
461
|
-
/** The messages that were compacted (pre-compaction). */
|
|
462
|
-
messages: ModelMessage[];
|
|
463
|
-
/** Number of messages before compaction. */
|
|
464
|
-
messageCount: number;
|
|
465
|
-
/** Token usage from the step that triggered compaction. */
|
|
466
|
-
usage: LanguageModelUsage;
|
|
467
|
-
/** Total token count that triggered compaction. */
|
|
468
|
-
totalTokens: number | undefined;
|
|
469
|
-
/** Input token count from the triggering step. */
|
|
470
|
-
inputTokens: number | undefined;
|
|
471
|
-
/** Output token count from the triggering step. */
|
|
472
|
-
outputTokens: number | undefined;
|
|
473
|
-
/** The step number where compaction occurred (0-indexed). */
|
|
474
|
-
stepNumber: number;
|
|
475
|
-
/** The chat session ID (if running inside a chat.task). */
|
|
476
|
-
chatId?: string;
|
|
477
|
-
/** The current turn number (if running inside a chat.task). */
|
|
478
|
-
turn?: number;
|
|
479
|
-
/** Stream writer — write custom `UIMessageChunk` parts to the chat stream. Lazy: no overhead if unused. */
|
|
480
|
-
writer: ChatWriter;
|
|
481
|
-
};
|
|
482
|
-
/**
|
|
483
|
-
* Event passed to `shouldCompact` callbacks.
|
|
484
|
-
*/
|
|
485
|
-
export type ShouldCompactEvent = {
|
|
486
|
-
/** The current model messages (full conversation). */
|
|
487
|
-
messages: ModelMessage[];
|
|
488
|
-
/** Total token count from the triggering step/turn. */
|
|
489
|
-
totalTokens: number | undefined;
|
|
490
|
-
/** Input token count from the triggering step/turn. */
|
|
491
|
-
inputTokens: number | undefined;
|
|
492
|
-
/** Output token count from the triggering step/turn. */
|
|
493
|
-
outputTokens: number | undefined;
|
|
494
|
-
/** Full usage object from the triggering step/turn. */
|
|
495
|
-
usage?: LanguageModelUsage;
|
|
496
|
-
/** Cumulative token usage across all completed turns. Present in chat.task contexts. */
|
|
497
|
-
totalUsage?: LanguageModelUsage;
|
|
498
|
-
/** The chat session ID (if running inside a chat.task). */
|
|
499
|
-
chatId?: string;
|
|
500
|
-
/** The current turn number (0-indexed, if inside a chat.task). */
|
|
501
|
-
turn?: number;
|
|
502
|
-
/** Custom data from the frontend (if inside a chat.task). */
|
|
503
|
-
clientData?: unknown;
|
|
504
|
-
/**
|
|
505
|
-
* Where this check is running:
|
|
506
|
-
* - `"inner"` — between tool-call steps (prepareStep)
|
|
507
|
-
* - `"outer"` — between turns (after response, before onBeforeTurnComplete)
|
|
508
|
-
*/
|
|
509
|
-
source?: "inner" | "outer";
|
|
510
|
-
/** The step number (0-indexed). Only present when `source` is `"inner"`. */
|
|
511
|
-
stepNumber?: number;
|
|
512
|
-
/** The steps array from prepareStep. Only present when `source` is `"inner"`. */
|
|
513
|
-
steps?: CompactionStep[];
|
|
514
|
-
};
|
|
515
|
-
/**
|
|
516
|
-
* Options for `chat.compaction()` — the high-level prepareStep factory.
|
|
517
|
-
*/
|
|
518
|
-
export type CompactionOptions = {
|
|
519
|
-
/** Generate a summary from the current messages. Return the summary text. */
|
|
520
|
-
summarize: (messages: ModelMessage[]) => Promise<string>;
|
|
521
|
-
/** Token threshold — compact when totalTokens exceeds this. Ignored if `shouldCompact` is provided. */
|
|
522
|
-
threshold?: number;
|
|
523
|
-
/** Custom compaction trigger. When provided, used instead of `threshold`. */
|
|
524
|
-
shouldCompact?: (event: ShouldCompactEvent) => boolean | Promise<boolean>;
|
|
525
|
-
};
|
|
526
|
-
/** A step object as received in prepareStep's `steps` array. */
|
|
527
|
-
export type CompactionStep = {
|
|
528
|
-
usage: LanguageModelUsage;
|
|
529
|
-
finishReason: string;
|
|
530
|
-
content: Array<{
|
|
531
|
-
type: string;
|
|
532
|
-
toolCallId?: string;
|
|
533
|
-
}>;
|
|
534
|
-
response: {
|
|
535
|
-
messages: Array<any>;
|
|
536
|
-
};
|
|
537
|
-
};
|
|
538
|
-
/**
|
|
539
|
-
* Result of `chat.compact()`. Discriminated union so you can inspect
|
|
540
|
-
* what happened, but also directly compatible with prepareStep's return type.
|
|
541
|
-
*
|
|
542
|
-
* - `"skipped"` — no compaction needed (first step, boundary unsafe, or under threshold). Return `undefined` to prepareStep.
|
|
543
|
-
* - `"rebuilt"` — previous compaction exists, messages rebuilt from summary + new response messages.
|
|
544
|
-
* - `"compacted"` — compaction just happened, includes the generated summary.
|
|
545
|
-
*/
|
|
546
|
-
export type CompactResult = {
|
|
547
|
-
type: "skipped";
|
|
548
|
-
} | {
|
|
549
|
-
type: "rebuilt";
|
|
550
|
-
messages: ModelMessage[];
|
|
551
|
-
} | {
|
|
552
|
-
type: "compacted";
|
|
553
|
-
messages: ModelMessage[];
|
|
554
|
-
summary: string;
|
|
555
|
-
};
|
|
556
|
-
/**
|
|
557
|
-
* Options for `chat.compact()` — the low-level compaction function.
|
|
558
|
-
*/
|
|
559
|
-
export type CompactOptions = {
|
|
560
|
-
/** Generate a summary from the current messages. Return the summary text. */
|
|
561
|
-
summarize: (messages: ModelMessage[]) => Promise<string>;
|
|
562
|
-
/** Token threshold — compact when totalTokens exceeds this. Ignored if `shouldCompact` is provided. */
|
|
563
|
-
threshold?: number;
|
|
564
|
-
/** Custom compaction trigger. When provided, used instead of `threshold`. */
|
|
565
|
-
shouldCompact?: (event: ShouldCompactEvent) => boolean | Promise<boolean>;
|
|
566
|
-
};
|
|
567
|
-
/**
|
|
568
|
-
* Read the current compaction state. Returns the summary and base message count
|
|
569
|
-
* if compaction has occurred in this turn, or `undefined` if not.
|
|
570
|
-
*
|
|
571
|
-
* Use in a custom `prepareStep` to rebuild from a previous compaction:
|
|
572
|
-
* ```ts
|
|
573
|
-
* const state = chat.getCompactionState();
|
|
574
|
-
* if (state) {
|
|
575
|
-
* return { messages: [{ role: "user", content: state.summary }, ...newMsgs] };
|
|
576
|
-
* }
|
|
577
|
-
* ```
|
|
578
|
-
*/
|
|
579
|
-
declare function getCompactionState(): CompactionState | undefined;
|
|
580
|
-
/**
|
|
581
|
-
* Low-level compaction for use inside a custom `prepareStep`.
|
|
582
|
-
*
|
|
583
|
-
* Handles the full decision tree: first step, already-compacted rebuild,
|
|
584
|
-
* boundary safety, threshold check, summarization, stream chunks, state
|
|
585
|
-
* storage, and accumulator update.
|
|
586
|
-
*
|
|
587
|
-
* Returns a `CompactResult` — inspect `result.type` to see what happened,
|
|
588
|
-
* or convert to a prepareStep return with `result.type === "skipped" ? undefined : result`.
|
|
589
|
-
*
|
|
590
|
-
* @example
|
|
591
|
-
* ```ts
|
|
592
|
-
* prepareStep: async ({ messages, steps }) => {
|
|
593
|
-
* // your custom logic here...
|
|
594
|
-
* const result = await chat.compact(messages, steps, {
|
|
595
|
-
* threshold: 80_000,
|
|
596
|
-
* summarize: async (msgs) => generateText({ model, messages: msgs }).then(r => r.text),
|
|
597
|
-
* });
|
|
598
|
-
* if (result.type === "compacted") {
|
|
599
|
-
* logger.info("Compacted!", { summary: result.summary });
|
|
600
|
-
* }
|
|
601
|
-
* return result.type === "skipped" ? undefined : result;
|
|
602
|
-
* },
|
|
603
|
-
* ```
|
|
604
|
-
*/
|
|
605
|
-
declare function chatCompact(messages: ModelMessage[], steps: CompactionStep[], options: CompactOptions): Promise<CompactResult>;
|
|
606
|
-
/**
|
|
607
|
-
* Returns a `prepareStep` function that handles context compaction automatically.
|
|
608
|
-
*
|
|
609
|
-
* Monitors token usage between tool-call steps. When `totalTokens` exceeds
|
|
610
|
-
* the threshold, generates a summary via `summarize()`, replaces the message
|
|
611
|
-
* history, and emits `data-compaction` stream chunks for the frontend.
|
|
612
|
-
*
|
|
613
|
-
* @example
|
|
614
|
-
* ```ts
|
|
615
|
-
* return streamText({
|
|
616
|
-
* ...chat.toStreamTextOptions({ registry }),
|
|
617
|
-
* messages: chat.addCacheBreaks(messages),
|
|
618
|
-
* prepareStep: chat.compactionStep({
|
|
619
|
-
* threshold: 80_000,
|
|
620
|
-
* summarize: async (messages) => {
|
|
621
|
-
* return generateText({ model, messages: [...messages, { role: "user", content: "Summarize." }] })
|
|
622
|
-
* .then((r) => r.text);
|
|
623
|
-
* },
|
|
624
|
-
* }),
|
|
625
|
-
* tools: { ... },
|
|
626
|
-
* });
|
|
627
|
-
* ```
|
|
628
|
-
*/
|
|
629
|
-
declare function chatCompactionStep(options: CompactionOptions): (args: {
|
|
630
|
-
messages: ModelMessage[];
|
|
631
|
-
steps: CompactionStep[];
|
|
632
|
-
}) => Promise<{
|
|
633
|
-
messages: ModelMessage[];
|
|
634
|
-
} | undefined>;
|
|
635
|
-
/**
|
|
636
|
-
* Checks whether it's safe to compact the message history. Returns `false`
|
|
637
|
-
* if any tool calls are in-flight (incomplete tool invocations without results).
|
|
638
|
-
*
|
|
639
|
-
* Call before `chat.setMessages()` to avoid corrupting tool-call state.
|
|
640
|
-
*/
|
|
641
|
-
declare function isCompactionSafe(messages: UIMessage[]): boolean;
|
|
642
|
-
/**
|
|
643
|
-
* A resolved prompt stored via `chat.prompt.set()`. Either a full `ResolvedPrompt`
|
|
644
|
-
* from `prompts.define().resolve()`, or a lightweight wrapper around a plain string.
|
|
645
|
-
*/
|
|
646
|
-
export type ChatPromptValue = ResolvedPrompt | {
|
|
647
|
-
text: string;
|
|
648
|
-
model: undefined;
|
|
649
|
-
config: undefined;
|
|
650
|
-
promptId: string;
|
|
651
|
-
version: number;
|
|
652
|
-
labels: string[];
|
|
653
|
-
toAISDKTelemetry: (additionalMetadata?: Record<string, string>) => {
|
|
654
|
-
experimental_telemetry: {
|
|
655
|
-
isEnabled: true;
|
|
656
|
-
metadata: Record<string, string>;
|
|
657
|
-
};
|
|
658
|
-
};
|
|
659
|
-
};
|
|
660
|
-
/**
|
|
661
|
-
* Store a resolved prompt (or plain string) for the current run.
|
|
662
|
-
* Call from any hook (`onPreload`, `onChatStart`, `onTurnStart`) or `run()`.
|
|
663
|
-
*/
|
|
664
|
-
declare function setChatPrompt(resolved: ResolvedPrompt | string): void;
|
|
665
|
-
/**
|
|
666
|
-
* Read the stored prompt. Throws if `chat.prompt.set()` has not been called.
|
|
667
|
-
*/
|
|
668
|
-
declare function getChatPrompt(): ChatPromptValue;
|
|
669
|
-
/**
|
|
670
|
-
* Options for {@link toStreamTextOptions}.
|
|
671
|
-
*/
|
|
672
|
-
export type ToStreamTextOptionsOptions = {
|
|
673
|
-
/** Additional telemetry metadata merged into `experimental_telemetry.metadata`. */
|
|
674
|
-
telemetry?: Record<string, string>;
|
|
675
|
-
/**
|
|
676
|
-
* An AI SDK provider registry (from `createProviderRegistry`) or any object
|
|
677
|
-
* with a `languageModel(id)` method. When provided and the stored prompt has
|
|
678
|
-
* a `model` string, the resolved `LanguageModel` is included in the returned
|
|
679
|
-
* options so `streamText` uses it directly.
|
|
680
|
-
*
|
|
681
|
-
* The model string should use the `"provider:model-id"` format
|
|
682
|
-
* (e.g. `"openai:gpt-4o"`, `"anthropic:claude-sonnet-4-6"`).
|
|
683
|
-
*/
|
|
684
|
-
registry?: {
|
|
685
|
-
languageModel(modelId: string): unknown;
|
|
686
|
-
};
|
|
687
|
-
};
|
|
688
|
-
/**
|
|
689
|
-
* Returns an options object ready to spread into `streamText()`.
|
|
690
|
-
*
|
|
691
|
-
* Includes `system`, `experimental_telemetry`, and any config fields
|
|
692
|
-
* (temperature, maxTokens, etc.) from the stored prompt.
|
|
693
|
-
*
|
|
694
|
-
* When a `registry` is provided and the prompt has a `model` string,
|
|
695
|
-
* the resolved `LanguageModel` is included as `model`.
|
|
696
|
-
*
|
|
697
|
-
* If no prompt has been set, returns `{}` (no-op spread).
|
|
698
|
-
*/
|
|
699
|
-
declare function toStreamTextOptions(options?: ToStreamTextOptionsOptions): Record<string, unknown>;
|
|
700
|
-
/**
|
|
701
|
-
* Options for `pipeChat`.
|
|
702
|
-
*/
|
|
703
|
-
export type PipeChatOptions = {
|
|
704
|
-
/**
|
|
705
|
-
* Override the stream key. Must match the `streamKey` on `TriggerChatTransport`.
|
|
706
|
-
* @default "chat"
|
|
707
|
-
*/
|
|
708
|
-
streamKey?: string;
|
|
709
|
-
/** An AbortSignal to cancel the stream. */
|
|
710
|
-
signal?: AbortSignal;
|
|
711
|
-
/**
|
|
712
|
-
* The target run ID to pipe to.
|
|
713
|
-
* @default "self" (current run)
|
|
714
|
-
*/
|
|
715
|
-
target?: string;
|
|
716
|
-
/** Override the default span name for this operation. */
|
|
717
|
-
spanName?: string;
|
|
718
|
-
};
|
|
719
|
-
/**
|
|
720
|
-
* Options for customizing the `toUIMessageStream()` call used when piping
|
|
721
|
-
* `streamText` results to the frontend.
|
|
722
|
-
*
|
|
723
|
-
* Set static defaults via `uiMessageStreamOptions` on `chat.task()`, or
|
|
724
|
-
* override per-turn via `chat.setUIMessageStreamOptions()`.
|
|
725
|
-
*
|
|
726
|
-
* `onFinish`, `originalMessages`, and `generateMessageId` are omitted because
|
|
727
|
-
* they are managed internally for response capture and message accumulation.
|
|
728
|
-
* Use `streamText`'s `onFinish` for custom finish handling, or drop down to
|
|
729
|
-
* raw task mode with `chat.pipe()` for full control.
|
|
730
|
-
*/
|
|
731
|
-
export type ChatUIMessageStreamOptions<TUIM extends UIMessage = UIMessage> = Omit<UIMessageStreamOptions<TUIM>, "onFinish" | "originalMessages" | "generateMessageId">;
|
|
732
|
-
/**
|
|
733
|
-
* An object with a `toUIMessageStream()` method (e.g. `StreamTextResult` from `streamText()`).
|
|
734
|
-
*/
|
|
735
|
-
type UIMessageStreamable = {
|
|
736
|
-
toUIMessageStream: (...args: any[]) => AsyncIterable<unknown> | ReadableStream<unknown>;
|
|
737
|
-
};
|
|
738
|
-
/**
|
|
739
|
-
* Pipes a chat stream to the realtime stream, making it available to the
|
|
740
|
-
* `TriggerChatTransport` on the frontend.
|
|
741
|
-
*
|
|
742
|
-
* Accepts:
|
|
743
|
-
* - A `StreamTextResult` from `streamText()` (has `.toUIMessageStream()`)
|
|
744
|
-
* - An `AsyncIterable` of `UIMessageChunk`s
|
|
745
|
-
* - A `ReadableStream` of `UIMessageChunk`s
|
|
746
|
-
*
|
|
747
|
-
* Must be called from inside a Trigger.dev task's `run` function.
|
|
748
|
-
*
|
|
749
|
-
* @example
|
|
750
|
-
* ```ts
|
|
751
|
-
* import { task } from "@trigger.dev/sdk";
|
|
752
|
-
* import { chat, type ChatTaskPayload } from "@trigger.dev/sdk/ai";
|
|
753
|
-
* import { streamText, convertToModelMessages } from "ai";
|
|
754
|
-
*
|
|
755
|
-
* export const myChatTask = task({
|
|
756
|
-
* id: "my-chat-task",
|
|
757
|
-
* run: async (payload: ChatTaskPayload) => {
|
|
758
|
-
* const result = streamText({
|
|
759
|
-
* model: openai("gpt-4o"),
|
|
760
|
-
* messages: payload.messages,
|
|
761
|
-
* });
|
|
762
|
-
*
|
|
763
|
-
* await chat.pipe(result);
|
|
764
|
-
* },
|
|
765
|
-
* });
|
|
766
|
-
* ```
|
|
767
|
-
*
|
|
768
|
-
* @example
|
|
769
|
-
* ```ts
|
|
770
|
-
* // Works from anywhere inside a task — even deep in your agent code
|
|
771
|
-
* async function runAgentLoop(messages: CoreMessage[]) {
|
|
772
|
-
* const result = streamText({ model, messages });
|
|
773
|
-
* await chat.pipe(result);
|
|
774
|
-
* }
|
|
775
|
-
* ```
|
|
776
|
-
*/
|
|
777
|
-
declare function pipeChat(source: UIMessageStreamable | AsyncIterable<unknown> | ReadableStream<unknown>, options?: PipeChatOptions): Promise<void>;
|
|
778
|
-
/**
|
|
779
|
-
* Options for defining a chat task.
|
|
780
|
-
*
|
|
781
|
-
* Extends the standard `TaskOptions` but pre-types the payload as `ChatTaskPayload`
|
|
782
|
-
* and overrides `run` to accept `ChatTaskRunPayload` (with abort signals).
|
|
783
|
-
*
|
|
784
|
-
* **Auto-piping:** If the `run` function returns a value with `.toUIMessageStream()`
|
|
785
|
-
* (like a `StreamTextResult`), the stream is automatically piped to the frontend.
|
|
786
|
-
*
|
|
787
|
-
* **Single-run mode:** By default, the task uses input streams so that the
|
|
788
|
-
* entire conversation lives inside one run. After each AI response, the task
|
|
789
|
-
* emits a control chunk and suspends via `messagesInput.wait()`. The frontend
|
|
790
|
-
* transport resumes the same run by sending the next message via input streams.
|
|
791
|
-
*/
|
|
792
|
-
/**
|
|
793
|
-
* Event passed to the `onPreload` callback.
|
|
794
|
-
*/
|
|
795
|
-
export type PreloadEvent<TClientData = unknown> = {
|
|
796
|
-
/** Task run context — same as `task({ run })` second-argument `ctx`. */
|
|
797
|
-
ctx: TaskRunContext;
|
|
798
|
-
/** The unique identifier for the chat session. */
|
|
799
|
-
chatId: string;
|
|
800
|
-
/** The Trigger.dev run ID for this conversation. */
|
|
801
|
-
runId: string;
|
|
802
|
-
/** A scoped access token for this chat run. */
|
|
803
|
-
chatAccessToken: string;
|
|
804
|
-
/** Custom data from the frontend. */
|
|
805
|
-
clientData?: TClientData;
|
|
806
|
-
/** Stream writer — write custom `UIMessageChunk` parts to the chat stream. Lazy: no overhead if unused. */
|
|
807
|
-
writer: ChatWriter;
|
|
808
|
-
};
|
|
809
|
-
/**
|
|
810
|
-
* Event passed to the `onChatStart` callback.
|
|
811
|
-
*/
|
|
812
|
-
export type ChatStartEvent<TClientData = unknown> = {
|
|
813
|
-
/** Task run context — same as `task({ run })` second-argument `ctx`. */
|
|
814
|
-
ctx: TaskRunContext;
|
|
815
|
-
/** The unique identifier for the chat session. */
|
|
816
|
-
chatId: string;
|
|
817
|
-
/** The initial model-ready messages for this conversation. */
|
|
818
|
-
messages: ModelMessage[];
|
|
819
|
-
/** Custom data from the frontend (passed via `metadata` on `sendMessage()` or the transport). */
|
|
820
|
-
clientData: TClientData;
|
|
821
|
-
/** The Trigger.dev run ID for this conversation. */
|
|
822
|
-
runId: string;
|
|
823
|
-
/** A scoped access token for this chat run. Persist this for frontend reconnection. */
|
|
824
|
-
chatAccessToken: string;
|
|
825
|
-
/** Whether this run is continuing an existing chat (previous run timed out or was cancelled). False for brand new chats. */
|
|
826
|
-
continuation: boolean;
|
|
827
|
-
/** The run ID of the previous run (only set when `continuation` is true). */
|
|
828
|
-
previousRunId?: string;
|
|
829
|
-
/** Whether this run was preloaded before the first message. */
|
|
830
|
-
preloaded: boolean;
|
|
831
|
-
/** Stream writer — write custom `UIMessageChunk` parts to the chat stream. Lazy: no overhead if unused. */
|
|
832
|
-
writer: ChatWriter;
|
|
833
|
-
};
|
|
834
|
-
/**
|
|
835
|
-
* Event passed to the `onTurnStart` callback.
|
|
836
|
-
*/
|
|
837
|
-
export type TurnStartEvent<TClientData = unknown, TUIM extends UIMessage = UIMessage> = {
|
|
838
|
-
/** Task run context — same as `task({ run })` second-argument `ctx`. */
|
|
839
|
-
ctx: TaskRunContext;
|
|
840
|
-
/** The unique identifier for the chat session. */
|
|
841
|
-
chatId: string;
|
|
842
|
-
/** The accumulated model-ready messages (all turns so far, including new user message). */
|
|
843
|
-
messages: ModelMessage[];
|
|
844
|
-
/** The accumulated UI messages (all turns so far, including new user message). */
|
|
845
|
-
uiMessages: TUIM[];
|
|
846
|
-
/** The turn number (0-indexed). */
|
|
847
|
-
turn: number;
|
|
848
|
-
/** The Trigger.dev run ID for this conversation. */
|
|
849
|
-
runId: string;
|
|
850
|
-
/** A scoped access token for this chat run. */
|
|
851
|
-
chatAccessToken: string;
|
|
852
|
-
/** Custom data from the frontend. */
|
|
853
|
-
clientData?: TClientData;
|
|
854
|
-
/** Whether this run is continuing an existing chat (previous run timed out or was cancelled). False for brand new chats. */
|
|
855
|
-
continuation: boolean;
|
|
856
|
-
/** The run ID of the previous run (only set when `continuation` is true). */
|
|
857
|
-
previousRunId?: string;
|
|
858
|
-
/** Whether this run was preloaded before the first message. */
|
|
859
|
-
preloaded: boolean;
|
|
860
|
-
/** Token usage from the previous turn. Undefined on turn 0. */
|
|
861
|
-
previousTurnUsage?: LanguageModelUsage;
|
|
862
|
-
/** Cumulative token usage across all completed turns so far. */
|
|
863
|
-
totalUsage: LanguageModelUsage;
|
|
864
|
-
/** Stream writer — write custom `UIMessageChunk` parts to the chat stream. Lazy: no overhead if unused. */
|
|
865
|
-
writer: ChatWriter;
|
|
866
|
-
};
|
|
867
|
-
/**
|
|
868
|
-
* Event passed to the `onTurnComplete` callback.
|
|
869
|
-
*/
|
|
870
|
-
export type TurnCompleteEvent<TClientData = unknown, TUIM extends UIMessage = UIMessage> = {
|
|
871
|
-
/** Task run context — same as `task({ run })` second-argument `ctx`. */
|
|
872
|
-
ctx: TaskRunContext;
|
|
873
|
-
/** The unique identifier for the chat session. */
|
|
874
|
-
chatId: string;
|
|
875
|
-
/** The full accumulated conversation in model format (all turns so far). */
|
|
876
|
-
messages: ModelMessage[];
|
|
877
|
-
/**
|
|
878
|
-
* The full accumulated conversation in UI format (all turns so far).
|
|
879
|
-
* This is the format expected by `useChat` — store this for persistence.
|
|
880
|
-
*/
|
|
881
|
-
uiMessages: TUIM[];
|
|
882
|
-
/**
|
|
883
|
-
* Only the new model messages from this turn (user message(s) + assistant response).
|
|
884
|
-
* Useful for appending to an existing conversation record.
|
|
885
|
-
*/
|
|
886
|
-
newMessages: ModelMessage[];
|
|
887
|
-
/**
|
|
888
|
-
* Only the new UI messages from this turn (user message(s) + assistant response).
|
|
889
|
-
* Useful for inserting individual message records instead of overwriting the full history.
|
|
890
|
-
*/
|
|
891
|
-
newUIMessages: TUIM[];
|
|
892
|
-
/** The assistant's response for this turn, with aborted parts cleaned up when `stopped` is true. Undefined if `pipeChat` was used manually. */
|
|
893
|
-
responseMessage: TUIM | undefined;
|
|
894
|
-
/**
|
|
895
|
-
* The raw assistant response before abort cleanup. Includes incomplete tool parts
|
|
896
|
-
* (`input-available`, `partial-call`) and streaming reasoning/text parts.
|
|
897
|
-
* Use this if you need custom cleanup logic. Same as `responseMessage` when not stopped.
|
|
898
|
-
*/
|
|
899
|
-
rawResponseMessage: TUIM | undefined;
|
|
900
|
-
/** The turn number (0-indexed). */
|
|
901
|
-
turn: number;
|
|
902
|
-
/** The Trigger.dev run ID for this conversation. */
|
|
903
|
-
runId: string;
|
|
904
|
-
/** A fresh scoped access token for this chat run (renewed each turn). Persist this for frontend reconnection. */
|
|
905
|
-
chatAccessToken: string;
|
|
906
|
-
/** The last event ID from the stream writer. Use this with `resume: true` to avoid replaying events after refresh. */
|
|
907
|
-
lastEventId?: string;
|
|
908
|
-
/** Custom data from the frontend. */
|
|
909
|
-
clientData?: TClientData;
|
|
910
|
-
/** Whether the user stopped generation during this turn. */
|
|
911
|
-
stopped: boolean;
|
|
912
|
-
/** Whether this run is continuing an existing chat (previous run timed out or was cancelled). False for brand new chats. */
|
|
913
|
-
continuation: boolean;
|
|
914
|
-
/** The run ID of the previous run (only set when `continuation` is true). */
|
|
915
|
-
previousRunId?: string;
|
|
916
|
-
/** Whether this run was preloaded before the first message. */
|
|
917
|
-
preloaded: boolean;
|
|
918
|
-
/** Token usage for this turn. Undefined if usage couldn't be captured (e.g. manual pipeChat). */
|
|
919
|
-
usage?: LanguageModelUsage;
|
|
920
|
-
/** Cumulative token usage across all turns in this run (including this turn). */
|
|
921
|
-
totalUsage: LanguageModelUsage;
|
|
922
|
-
};
|
|
923
|
-
/**
|
|
924
|
-
* Event passed to the `onBeforeTurnComplete` callback.
|
|
925
|
-
* Same as `TurnCompleteEvent` but includes a `writer` since the stream is still open.
|
|
926
|
-
*/
|
|
927
|
-
export type BeforeTurnCompleteEvent<TClientData = unknown, TUIM extends UIMessage = UIMessage> = TurnCompleteEvent<TClientData, TUIM> & {
|
|
928
|
-
/** Stream writer — write custom `UIMessageChunk` parts to the chat stream. Lazy: no overhead if unused. */
|
|
929
|
-
writer: ChatWriter;
|
|
930
|
-
};
|
|
931
|
-
export type ChatTaskOptions<TIdentifier extends string, TClientDataSchema extends TaskSchema | undefined = undefined, TUIMessage extends UIMessage = UIMessage> = Omit<TaskOptions<TIdentifier, ChatTaskWirePayload<TUIMessage, inferSchemaIn<TClientDataSchema>>, unknown>, "run"> & {
|
|
932
|
-
/**
|
|
933
|
-
* Schema for validating `clientData` from the frontend.
|
|
934
|
-
* Accepts Zod, ArkType, Valibot, or any supported schema library.
|
|
935
|
-
* When provided, `clientData` is parsed and typed in all hooks and `run`.
|
|
936
|
-
*
|
|
937
|
-
* @example
|
|
938
|
-
* ```ts
|
|
939
|
-
* import { z } from "zod";
|
|
940
|
-
*
|
|
941
|
-
* chat.task({
|
|
942
|
-
* id: "my-chat",
|
|
943
|
-
* clientDataSchema: z.object({ model: z.string().optional(), userId: z.string() }),
|
|
944
|
-
* run: async ({ messages, clientData, ctx, signal }) => {
|
|
945
|
-
* // clientData is typed as { model?: string; userId: string }
|
|
946
|
-
* // ctx is the same TaskRunContext as in task({ run: (payload, { ctx }) => ... })
|
|
947
|
-
* },
|
|
948
|
-
* });
|
|
949
|
-
* ```
|
|
950
|
-
*/
|
|
951
|
-
clientDataSchema?: TClientDataSchema;
|
|
952
|
-
/**
|
|
953
|
-
* The run function for the chat task.
|
|
954
|
-
*
|
|
955
|
-
* Receives a `ChatTaskRunPayload` with the conversation messages, chat session ID,
|
|
956
|
-
* trigger type, task `ctx` (same as `task({ run })`’s second argument), and abort signals
|
|
957
|
-
* (`signal`, `cancelSignal`, `stopSignal`).
|
|
958
|
-
*
|
|
959
|
-
* **Auto-piping:** If this function returns a value with `.toUIMessageStream()`,
|
|
960
|
-
* the stream is automatically piped to the frontend.
|
|
961
|
-
*/
|
|
962
|
-
run: (payload: ChatTaskRunPayload<inferSchemaOut<TClientDataSchema>>) => Promise<unknown>;
|
|
963
|
-
/**
|
|
964
|
-
* Called when a preloaded run starts, before the first message arrives.
|
|
965
|
-
*
|
|
966
|
-
* Use this to initialize state, create DB records, and load context early —
|
|
967
|
-
* so everything is ready when the user's first message comes through.
|
|
968
|
-
*
|
|
969
|
-
* @example
|
|
970
|
-
* ```ts
|
|
971
|
-
* onPreload: async ({ ctx, chatId, clientData }) => {
|
|
972
|
-
* await db.chat.create({ data: { id: chatId } });
|
|
973
|
-
* userContext.init(await loadUser(clientData.userId));
|
|
974
|
-
* }
|
|
975
|
-
* ```
|
|
976
|
-
*/
|
|
977
|
-
onPreload?: (event: PreloadEvent<inferSchemaOut<TClientDataSchema>>) => Promise<void> | void;
|
|
978
|
-
/**
|
|
979
|
-
* Called on the first turn (turn 0) of a new run, before the `run` function executes.
|
|
980
|
-
*
|
|
981
|
-
* Use this to create the chat record in your database when a new conversation starts.
|
|
982
|
-
*
|
|
983
|
-
* @example
|
|
984
|
-
* ```ts
|
|
985
|
-
* onChatStart: async ({ ctx, chatId, messages, clientData }) => {
|
|
986
|
-
* await db.chat.create({ data: { id: chatId, userId: clientData.userId } });
|
|
987
|
-
* }
|
|
988
|
-
* ```
|
|
989
|
-
*/
|
|
990
|
-
onChatStart?: (event: ChatStartEvent<inferSchemaOut<TClientDataSchema>>) => Promise<void> | void;
|
|
991
|
-
/**
|
|
992
|
-
* Called at the start of every turn, after message accumulation and `onChatStart` (turn 0),
|
|
993
|
-
* but before the `run` function executes.
|
|
994
|
-
*
|
|
995
|
-
* Use this to persist messages before streaming begins, so a mid-stream page refresh
|
|
996
|
-
* still shows the user's message.
|
|
997
|
-
*
|
|
998
|
-
* @example
|
|
999
|
-
* ```ts
|
|
1000
|
-
* onTurnStart: async ({ ctx, chatId, uiMessages }) => {
|
|
1001
|
-
* await db.chat.update({ where: { id: chatId }, data: { messages: uiMessages } });
|
|
1002
|
-
* }
|
|
1003
|
-
* ```
|
|
1004
|
-
*/
|
|
1005
|
-
onTurnStart?: (event: TurnStartEvent<inferSchemaOut<TClientDataSchema>, TUIMessage>) => Promise<void> | void;
|
|
1006
|
-
/**
|
|
1007
|
-
* Called after the response is captured but before the stream closes.
|
|
1008
|
-
* The stream is still open, so you can write custom chunks to the frontend
|
|
1009
|
-
* (e.g. compaction progress). Use this for compaction, post-processing,
|
|
1010
|
-
* or any work where the user should see real-time status updates.
|
|
1011
|
-
*
|
|
1012
|
-
* @example
|
|
1013
|
-
* ```ts
|
|
1014
|
-
* onBeforeTurnComplete: async ({ ctx, writer, usage }) => {
|
|
1015
|
-
* if (usage?.inputTokens && usage.inputTokens > 5000) {
|
|
1016
|
-
* writer.write({ type: "data-compaction", id: generateId(), data: { status: "compacting" } });
|
|
1017
|
-
* // ... compact messages ...
|
|
1018
|
-
* chat.setMessages(compactedMessages);
|
|
1019
|
-
* writer.write({ type: "data-compaction", id: generateId(), data: { status: "complete" } });
|
|
1020
|
-
* }
|
|
1021
|
-
* }
|
|
1022
|
-
* ```
|
|
1023
|
-
*/
|
|
1024
|
-
onBeforeTurnComplete?: (event: BeforeTurnCompleteEvent<inferSchemaOut<TClientDataSchema>, TUIMessage>) => Promise<void> | void;
|
|
1025
|
-
/**
|
|
1026
|
-
* Called when conversation compaction occurs (via `chat.compact()` or
|
|
1027
|
-
* `chat.compactionStep()`). Use for logging, billing, or persisting the summary.
|
|
1028
|
-
*
|
|
1029
|
-
* @example
|
|
1030
|
-
* ```ts
|
|
1031
|
-
* onCompacted: async ({ ctx, summary, totalTokens, chatId }) => {
|
|
1032
|
-
* logger.info("Compacted", { totalTokens, chatId });
|
|
1033
|
-
* await db.compactionLog.create({ data: { chatId, summary } });
|
|
1034
|
-
* }
|
|
1035
|
-
* ```
|
|
1036
|
-
*/
|
|
1037
|
-
onCompacted?: (event: CompactedEvent) => Promise<void> | void;
|
|
1038
|
-
/**
|
|
1039
|
-
* Automatic context compaction. When provided, compaction runs automatically
|
|
1040
|
-
* in both the inner loop (prepareStep, between tool-call steps) and the
|
|
1041
|
-
* outer loop (between turns, for single-step responses where prepareStep
|
|
1042
|
-
* never fires).
|
|
1043
|
-
*
|
|
1044
|
-
* The `shouldCompact` callback decides when to compact, and `summarize`
|
|
1045
|
-
* generates the summary. The prepareStep is auto-injected into
|
|
1046
|
-
* `chat.toStreamTextOptions()` — if you provide your own `prepareStep`
|
|
1047
|
-
* after spreading, it overrides the auto-injected one.
|
|
1048
|
-
*
|
|
1049
|
-
* @example
|
|
1050
|
-
* ```ts
|
|
1051
|
-
* chat.task({
|
|
1052
|
-
* id: "my-chat",
|
|
1053
|
-
* compaction: {
|
|
1054
|
-
* shouldCompact: ({ totalTokens }) => (totalTokens ?? 0) > 80_000,
|
|
1055
|
-
* summarize: async (messages) =>
|
|
1056
|
-
* generateText({ model, messages: [...messages, { role: "user", content: "Summarize." }] })
|
|
1057
|
-
* .then((r) => r.text),
|
|
1058
|
-
* },
|
|
1059
|
-
* run: async ({ messages, signal }) => {
|
|
1060
|
-
* return streamText({ ...chat.toStreamTextOptions({ registry }), messages });
|
|
1061
|
-
* },
|
|
1062
|
-
* });
|
|
1063
|
-
* ```
|
|
1064
|
-
*/
|
|
1065
|
-
compaction?: ChatTaskCompactionOptions<TUIMessage>;
|
|
1066
|
-
/**
|
|
1067
|
-
* Configure how messages that arrive during streaming are handled.
|
|
1068
|
-
*
|
|
1069
|
-
* By default, messages queue for the next turn. When `shouldInject` is provided
|
|
1070
|
-
* and returns `true`, messages are injected between tool-call steps via
|
|
1071
|
-
* `prepareStep` — allowing users to steer the agent mid-execution.
|
|
1072
|
-
*
|
|
1073
|
-
* @example
|
|
1074
|
-
* ```ts
|
|
1075
|
-
* pendingMessages: {
|
|
1076
|
-
* shouldInject: ({ steps }) => steps.length > 0,
|
|
1077
|
-
* onReceived: ({ message }) => logger.info("Steering message received"),
|
|
1078
|
-
* },
|
|
1079
|
-
* ```
|
|
1080
|
-
*/
|
|
1081
|
-
pendingMessages?: PendingMessagesOptions<TUIMessage>;
|
|
1082
|
-
/**
|
|
1083
|
-
* Called after each assistant response completes. Use to persist the
|
|
1084
|
-
* conversation to your database after each assistant response.
|
|
1085
|
-
*
|
|
1086
|
-
* @example
|
|
1087
|
-
* ```ts
|
|
1088
|
-
* onTurnComplete: async ({ ctx, chatId, messages }) => {
|
|
1089
|
-
* await db.chat.update({ where: { id: chatId }, data: { messages } });
|
|
1090
|
-
* }
|
|
1091
|
-
* ```
|
|
1092
|
-
*/
|
|
1093
|
-
onTurnComplete?: (event: TurnCompleteEvent<inferSchemaOut<TClientDataSchema>, TUIMessage>) => Promise<void> | void;
|
|
1094
|
-
/**
|
|
1095
|
-
* Maximum number of conversational turns (message round-trips) a single run
|
|
1096
|
-
* will handle before ending. After this many turns the run completes
|
|
1097
|
-
* normally and the next message will start a fresh run.
|
|
1098
|
-
*
|
|
1099
|
-
* @default 100
|
|
1100
|
-
*/
|
|
1101
|
-
maxTurns?: number;
|
|
1102
|
-
/**
|
|
1103
|
-
* How long to wait for the next message before timing out and ending the run.
|
|
1104
|
-
* Accepts any duration string (e.g. `"1h"`, `"30m"`).
|
|
1105
|
-
*
|
|
1106
|
-
* @default "1h"
|
|
1107
|
-
*/
|
|
1108
|
-
turnTimeout?: string;
|
|
1109
|
-
/**
|
|
1110
|
-
* How long (in seconds) the run stays idle (active, using compute) after each
|
|
1111
|
-
* turn, waiting for the next message. During this window responses are instant.
|
|
1112
|
-
* After this timeout the run suspends (frees compute) and waits via
|
|
1113
|
-
* `inputStream.wait()`.
|
|
1114
|
-
*
|
|
1115
|
-
* Set to `0` to suspend immediately after each turn.
|
|
1116
|
-
*
|
|
1117
|
-
* @default 30
|
|
1118
|
-
*/
|
|
1119
|
-
idleTimeoutInSeconds?: number;
|
|
1120
|
-
/**
|
|
1121
|
-
* How long the `chatAccessToken` (scoped to this run) remains valid.
|
|
1122
|
-
* A fresh token is minted after each turn, so this only needs to cover
|
|
1123
|
-
* the gap between turns.
|
|
1124
|
-
*
|
|
1125
|
-
* Accepts a duration string (e.g. `"1h"`, `"30m"`, `"2h"`).
|
|
1126
|
-
*
|
|
1127
|
-
* @default "1h"
|
|
1128
|
-
*/
|
|
1129
|
-
chatAccessTokenTTL?: string;
|
|
1130
|
-
/**
|
|
1131
|
-
* How long (in seconds) the run stays idle after `onPreload` fires,
|
|
1132
|
-
* waiting for the first message before suspending.
|
|
1133
|
-
*
|
|
1134
|
-
* Only applies to preloaded runs (triggered via `transport.preload()`).
|
|
1135
|
-
*
|
|
1136
|
-
* @default Same as `idleTimeoutInSeconds`
|
|
1137
|
-
*/
|
|
1138
|
-
preloadIdleTimeoutInSeconds?: number;
|
|
1139
|
-
/**
|
|
1140
|
-
* How long to wait (suspended) for the first message after a preloaded run starts.
|
|
1141
|
-
* If no message arrives within this time, the run ends.
|
|
1142
|
-
*
|
|
1143
|
-
* Only applies to preloaded runs.
|
|
1144
|
-
*
|
|
1145
|
-
* @default Same as `turnTimeout`
|
|
1146
|
-
*/
|
|
1147
|
-
preloadTimeout?: string;
|
|
1148
|
-
/**
|
|
1149
|
-
* Transform model messages before they're used anywhere — in `run()`,
|
|
1150
|
-
* in compaction rebuilds, and in compaction results.
|
|
1151
|
-
*
|
|
1152
|
-
* Define once, applied everywhere. Use for Anthropic cache breaks,
|
|
1153
|
-
* injecting system context, stripping PII, etc.
|
|
1154
|
-
*
|
|
1155
|
-
* @example
|
|
1156
|
-
* ```ts
|
|
1157
|
-
* prepareMessages: async ({ messages, reason }) => {
|
|
1158
|
-
* // Add Anthropic cache breaks to the last message
|
|
1159
|
-
* if (messages.length === 0) return messages;
|
|
1160
|
-
* const last = messages[messages.length - 1];
|
|
1161
|
-
* return [...messages.slice(0, -1), {
|
|
1162
|
-
* ...last,
|
|
1163
|
-
* providerOptions: { ...last.providerOptions, anthropic: { cacheControl: { type: "ephemeral" } } },
|
|
1164
|
-
* }];
|
|
1165
|
-
* }
|
|
1166
|
-
* ```
|
|
1167
|
-
*/
|
|
1168
|
-
prepareMessages?: (event: PrepareMessagesEvent<inferSchemaOut<TClientDataSchema>>) => ModelMessage[] | Promise<ModelMessage[]>;
|
|
1169
|
-
/**
|
|
1170
|
-
* Default options for `toUIMessageStream()` when auto-piping or using
|
|
1171
|
-
* `turn.complete()` / `chat.pipeAndCapture()`.
|
|
1172
|
-
*
|
|
1173
|
-
* Controls how the `StreamTextResult` is converted to a `UIMessageChunk`
|
|
1174
|
-
* stream — error handling, reasoning/source visibility, metadata, etc.
|
|
1175
|
-
*
|
|
1176
|
-
* Can be overridden per-turn by calling `chat.setUIMessageStreamOptions()`
|
|
1177
|
-
* inside `run()` or lifecycle hooks. Per-turn values are merged on top
|
|
1178
|
-
* of these defaults (per-turn wins on conflicts).
|
|
1179
|
-
*
|
|
1180
|
-
* `onFinish`, `originalMessages`, and `generateMessageId` are managed
|
|
1181
|
-
* internally and cannot be overridden here. Use `streamText`'s `onFinish`
|
|
1182
|
-
* for custom finish handling, or drop to raw task mode for full control.
|
|
1183
|
-
*
|
|
1184
|
-
* @example
|
|
1185
|
-
* ```ts
|
|
1186
|
-
* chat.task({
|
|
1187
|
-
* id: "my-chat",
|
|
1188
|
-
* uiMessageStreamOptions: {
|
|
1189
|
-
* sendReasoning: true,
|
|
1190
|
-
* onError: (error) => error instanceof Error ? error.message : "An error occurred.",
|
|
1191
|
-
* },
|
|
1192
|
-
* run: async ({ messages, signal }) => { ... },
|
|
1193
|
-
* });
|
|
1194
|
-
* ```
|
|
1195
|
-
*/
|
|
1196
|
-
uiMessageStreamOptions?: ChatUIMessageStreamOptions<TUIMessage>;
|
|
1197
|
-
};
|
|
1198
|
-
/**
|
|
1199
|
-
* Creates a Trigger.dev task pre-configured for AI SDK chat.
|
|
1200
|
-
*
|
|
1201
|
-
* - **Pre-types the payload** as `ChatTaskRunPayload` — includes abort signals
|
|
1202
|
-
* - **Auto-pipes the stream** if `run` returns a `StreamTextResult`
|
|
1203
|
-
* - **Multi-turn**: keeps the conversation in a single run using input streams
|
|
1204
|
-
* - **Stop support**: frontend can stop generation mid-stream via the stop input stream
|
|
1205
|
-
* - For complex flows, use `pipeChat()` from anywhere inside your task code
|
|
1206
|
-
*
|
|
1207
|
-
* @example
|
|
1208
|
-
* ```ts
|
|
1209
|
-
* import { chat } from "@trigger.dev/sdk/ai";
|
|
1210
|
-
* import { streamText, convertToModelMessages } from "ai";
|
|
1211
|
-
* import { openai } from "@ai-sdk/openai";
|
|
1212
|
-
*
|
|
1213
|
-
* export const myChat = chat.task({
|
|
1214
|
-
* id: "my-chat",
|
|
1215
|
-
* run: async ({ messages, signal }) => {
|
|
1216
|
-
* return streamText({
|
|
1217
|
-
* model: openai("gpt-4o"),
|
|
1218
|
-
* messages, // already converted via convertToModelMessages
|
|
1219
|
-
* abortSignal: signal,
|
|
1220
|
-
* });
|
|
1221
|
-
* },
|
|
1222
|
-
* });
|
|
1223
|
-
* ```
|
|
1224
|
-
*/
|
|
1225
|
-
declare function chatTask<TIdentifier extends string, TClientDataSchema extends TaskSchema | undefined = undefined, TUIMessage extends UIMessage = UIMessage>(options: ChatTaskOptions<TIdentifier, TClientDataSchema, TUIMessage>): Task<TIdentifier, ChatTaskWirePayload<TUIMessage, inferSchemaIn<TClientDataSchema>>, unknown>;
|
|
1226
|
-
/**
|
|
1227
|
-
* Optional config for {@link chat.withUIMessage}. `streamOptions` become default
|
|
1228
|
-
* static `toUIMessageStream()` settings; inner `chat.task({ uiMessageStreamOptions })`
|
|
1229
|
-
* shallow-merges on top (task wins on conflicts).
|
|
1230
|
-
*/
|
|
1231
|
-
export type ChatWithUIMessageConfig<TUIM extends UIMessage = UIMessage> = {
|
|
1232
|
-
streamOptions?: ChatUIMessageStreamOptions<TUIM>;
|
|
1233
|
-
};
|
|
1234
|
-
/**
|
|
1235
|
-
* Fix the UI message type for a chat task (AI SDK `UIMessage` generics) while
|
|
1236
|
-
* keeping `id` and `clientDataSchema` inference on the inner {@link chat.task} call.
|
|
1237
|
-
*
|
|
1238
|
-
* @example
|
|
1239
|
-
* ```ts
|
|
1240
|
-
* type AgentUiMessage = UIMessage<unknown, UIDataTypes, UITools>;
|
|
1241
|
-
*
|
|
1242
|
-
* export const myChat = chat.withUIMessage<AgentUiMessage>({
|
|
1243
|
-
* streamOptions: { sendReasoning: true },
|
|
1244
|
-
* }).task({
|
|
1245
|
-
* id: "my-chat",
|
|
1246
|
-
* run: async ({ messages, signal }) => { ... },
|
|
1247
|
-
* });
|
|
1248
|
-
* ```
|
|
1249
|
-
*/
|
|
1250
|
-
declare function withUIMessage<TUIM extends UIMessage = UIMessage>(config?: ChatWithUIMessageConfig<TUIM>): {
|
|
1251
|
-
task: <TIdentifier extends string, TClientDataSchema extends TaskSchema | undefined = undefined>(options: ChatTaskOptions<TIdentifier, TClientDataSchema, TUIM>) => Task<TIdentifier, ChatTaskWirePayload<TUIM, inferSchemaIn<TClientDataSchema>>, unknown>;
|
|
1252
|
-
};
|
|
1253
|
-
/**
|
|
1254
|
-
* Override the turn timeout for subsequent turns in the current run.
|
|
1255
|
-
*
|
|
1256
|
-
* The turn timeout controls how long the run stays suspended (freeing compute)
|
|
1257
|
-
* waiting for the next user message. When it expires, the run completes
|
|
1258
|
-
* gracefully and the next message starts a fresh run.
|
|
1259
|
-
*
|
|
1260
|
-
* Call from inside a `chatTask` run function to adjust based on context.
|
|
1261
|
-
*
|
|
1262
|
-
* @param duration - A duration string (e.g. `"5m"`, `"1h"`, `"30s"`)
|
|
1263
|
-
*
|
|
1264
|
-
* @example
|
|
1265
|
-
* ```ts
|
|
1266
|
-
* run: async ({ messages, signal }) => {
|
|
1267
|
-
* chat.setTurnTimeout("2h");
|
|
1268
|
-
* return streamText({ model, messages, abortSignal: signal });
|
|
1269
|
-
* }
|
|
1270
|
-
* ```
|
|
1271
|
-
*/
|
|
1272
|
-
declare function setTurnTimeout(duration: string): void;
|
|
1273
|
-
/**
|
|
1274
|
-
* Override the turn timeout in seconds for subsequent turns in the current run.
|
|
1275
|
-
*
|
|
1276
|
-
* @param seconds - Number of seconds to wait for the next message before ending the run
|
|
1277
|
-
*
|
|
1278
|
-
* @example
|
|
1279
|
-
* ```ts
|
|
1280
|
-
* run: async ({ messages, signal }) => {
|
|
1281
|
-
* chat.setTurnTimeoutInSeconds(3600); // 1 hour
|
|
1282
|
-
* return streamText({ model, messages, abortSignal: signal });
|
|
1283
|
-
* }
|
|
1284
|
-
* ```
|
|
1285
|
-
*/
|
|
1286
|
-
declare function setTurnTimeoutInSeconds(seconds: number): void;
|
|
1287
|
-
/**
|
|
1288
|
-
* Override the idle timeout for subsequent turns in the current run.
|
|
1289
|
-
*
|
|
1290
|
-
* The idle timeout controls how long the run stays active (using compute)
|
|
1291
|
-
* after each turn, waiting for the next message. During this window,
|
|
1292
|
-
* responses are instant. After it expires, the run suspends.
|
|
1293
|
-
*
|
|
1294
|
-
* @param seconds - Number of seconds to stay idle (0 to suspend immediately)
|
|
1295
|
-
*
|
|
1296
|
-
* @example
|
|
1297
|
-
* ```ts
|
|
1298
|
-
* run: async ({ messages, signal }) => {
|
|
1299
|
-
* chat.setIdleTimeoutInSeconds(60);
|
|
1300
|
-
* return streamText({ model, messages, abortSignal: signal });
|
|
1301
|
-
* }
|
|
1302
|
-
* ```
|
|
1303
|
-
*/
|
|
1304
|
-
declare function setIdleTimeoutInSeconds(seconds: number): void;
|
|
1305
|
-
/**
|
|
1306
|
-
* Override the `toUIMessageStream()` options for the current turn.
|
|
1307
|
-
*
|
|
1308
|
-
* These options control how the `StreamTextResult` is converted to a
|
|
1309
|
-
* `UIMessageChunk` stream — error handling, reasoning/source visibility,
|
|
1310
|
-
* message metadata, etc.
|
|
1311
|
-
*
|
|
1312
|
-
* Per-turn options are merged on top of the static `uiMessageStreamOptions`
|
|
1313
|
-
* set on `chat.task()`. Per-turn values win on conflicts.
|
|
1314
|
-
*
|
|
1315
|
-
* @example
|
|
1316
|
-
* ```ts
|
|
1317
|
-
* run: async ({ messages, signal }) => {
|
|
1318
|
-
* chat.setUIMessageStreamOptions({
|
|
1319
|
-
* sendReasoning: true,
|
|
1320
|
-
* onError: (error) => error instanceof Error ? error.message : "An error occurred.",
|
|
1321
|
-
* });
|
|
1322
|
-
* return streamText({ model, messages, abortSignal: signal });
|
|
1323
|
-
* }
|
|
1324
|
-
* ```
|
|
1325
|
-
*/
|
|
1326
|
-
declare function setUIMessageStreamOptions(options: ChatUIMessageStreamOptions<UIMessage>): void;
|
|
1327
|
-
/**
|
|
1328
|
-
* Check whether the user stopped generation during the current turn.
|
|
1329
|
-
*
|
|
1330
|
-
* Works from **anywhere** inside a `chat.task` run — including inside
|
|
1331
|
-
* `streamText`'s `onFinish` callback — without needing to thread the
|
|
1332
|
-
* `stopSignal` through closures.
|
|
1333
|
-
*
|
|
1334
|
-
* This is especially useful when the AI SDK's `isAborted` flag is unreliable
|
|
1335
|
-
* (e.g. when using `createUIMessageStream` + `writer.merge()`).
|
|
1336
|
-
*
|
|
1337
|
-
* @example
|
|
1338
|
-
* ```ts
|
|
1339
|
-
* onFinish: ({ isAborted }) => {
|
|
1340
|
-
* const wasStopped = isAborted || chat.isStopped();
|
|
1341
|
-
* if (wasStopped) {
|
|
1342
|
-
* // handle stop
|
|
1343
|
-
* }
|
|
1344
|
-
* }
|
|
1345
|
-
* ```
|
|
1346
|
-
*/
|
|
1347
|
-
declare function isStopped(): boolean;
|
|
1348
|
-
/**
|
|
1349
|
-
* Register a promise that runs in the background during the current turn.
|
|
1350
|
-
*
|
|
1351
|
-
* Use this to move non-blocking work (DB writes, analytics, etc.) out of
|
|
1352
|
-
* the critical path. The promise runs in parallel with streaming and is
|
|
1353
|
-
* awaited (with a 5 s timeout) before `onTurnComplete` fires.
|
|
1354
|
-
*
|
|
1355
|
-
* @example
|
|
1356
|
-
* ```ts
|
|
1357
|
-
* onTurnStart: async ({ chatId, uiMessages }) => {
|
|
1358
|
-
* // Persist messages without blocking the LLM call
|
|
1359
|
-
* chat.defer(db.chat.update({ where: { id: chatId }, data: { messages: uiMessages } }));
|
|
1360
|
-
* },
|
|
1361
|
-
* ```
|
|
1362
|
-
*/
|
|
1363
|
-
declare function chatDefer(promise: Promise<unknown>): void;
|
|
1364
|
-
/**
|
|
1365
|
-
* Queue model messages for injection at the next `prepareStep` boundary.
|
|
1366
|
-
*
|
|
1367
|
-
* Use this to inject context from background work into the agent's conversation.
|
|
1368
|
-
* Messages are appended to the model messages before the next LLM inference call.
|
|
1369
|
-
*
|
|
1370
|
-
* Combine with `chat.defer()` to run background analysis and inject results:
|
|
1371
|
-
*
|
|
1372
|
-
* @example
|
|
1373
|
-
* ```ts
|
|
1374
|
-
* onTurnComplete: async ({ messages }) => {
|
|
1375
|
-
* chat.defer((async () => {
|
|
1376
|
-
* const review = await generateObject({
|
|
1377
|
-
* model: openai("gpt-4o-mini"),
|
|
1378
|
-
* messages: [...messages, { role: "user", content: "Review the last response." }],
|
|
1379
|
-
* schema: z.object({ suggestions: z.array(z.string()) }),
|
|
1380
|
-
* });
|
|
1381
|
-
* if (review.object.suggestions.length > 0) {
|
|
1382
|
-
* chat.inject([{
|
|
1383
|
-
* role: "system",
|
|
1384
|
-
* content: `Improvements for next response:\n${review.object.suggestions.join("\n")}`,
|
|
1385
|
-
* }]);
|
|
1386
|
-
* }
|
|
1387
|
-
* })());
|
|
1388
|
-
* },
|
|
1389
|
-
* ```
|
|
1390
|
-
*/
|
|
1391
|
-
declare function injectBackgroundContext(messages: ModelMessage[]): void;
|
|
1392
|
-
/**
|
|
1393
|
-
* Clean up a UIMessage that was captured during an aborted/stopped turn.
|
|
1394
|
-
*
|
|
1395
|
-
* When generation is stopped mid-stream, the captured message may contain:
|
|
1396
|
-
* - Tool parts stuck in incomplete states (`partial-call`, `input-available`,
|
|
1397
|
-
* `input-streaming`) that cause permanent UI spinners
|
|
1398
|
-
* - Reasoning parts with `state: "streaming"` instead of `"done"`
|
|
1399
|
-
* - Text parts with `state: "streaming"` instead of `"done"`
|
|
1400
|
-
*
|
|
1401
|
-
* This function returns a cleaned copy with:
|
|
1402
|
-
* - Incomplete tool parts removed entirely
|
|
1403
|
-
* - Reasoning and text parts marked as `"done"`
|
|
1404
|
-
*
|
|
1405
|
-
* `chat.task` calls this automatically when stop is detected before passing
|
|
1406
|
-
* the response to `onTurnComplete`. Use this manually when calling `pipeChat`
|
|
1407
|
-
* directly and capturing response messages yourself.
|
|
1408
|
-
*
|
|
1409
|
-
* @example
|
|
1410
|
-
* ```ts
|
|
1411
|
-
* onTurnComplete: async ({ responseMessage, stopped }) => {
|
|
1412
|
-
* // Already cleaned automatically by chat.task — but if you captured
|
|
1413
|
-
* // your own message via pipeChat, clean it manually:
|
|
1414
|
-
* const cleaned = chat.cleanupAbortedParts(myMessage);
|
|
1415
|
-
* await db.messages.save(cleaned);
|
|
1416
|
-
* }
|
|
1417
|
-
* ```
|
|
1418
|
-
*/
|
|
1419
|
-
declare function cleanupAbortedParts<TUIM extends UIMessage>(message: TUIM): TUIM;
|
|
1420
|
-
/**
|
|
1421
|
-
* Create a managed stop signal wired to the chat stop input stream.
|
|
1422
|
-
*
|
|
1423
|
-
* Call once at the start of your run. Use `signal` as the abort signal for
|
|
1424
|
-
* `streamText`. Call `reset()` at the start of each turn to get a fresh
|
|
1425
|
-
* per-turn signal. Call `cleanup()` when the run ends.
|
|
1426
|
-
*
|
|
1427
|
-
* @example
|
|
1428
|
-
* ```ts
|
|
1429
|
-
* const stop = chat.createStopSignal();
|
|
1430
|
-
* for (let turn = 0; turn < 100; turn++) {
|
|
1431
|
-
* stop.reset();
|
|
1432
|
-
* const result = streamText({ model, messages, abortSignal: stop.signal });
|
|
1433
|
-
* await chat.pipe(result);
|
|
1434
|
-
* // ...
|
|
1435
|
-
* }
|
|
1436
|
-
* stop.cleanup();
|
|
1437
|
-
* ```
|
|
1438
|
-
*/
|
|
1439
|
-
declare function createStopSignal(): {
|
|
1440
|
-
readonly signal: AbortSignal;
|
|
1441
|
-
reset: () => void;
|
|
1442
|
-
cleanup: () => void;
|
|
1443
|
-
};
|
|
1444
|
-
/**
|
|
1445
|
-
* Signal the frontend that the current turn is complete.
|
|
1446
|
-
*
|
|
1447
|
-
* The `TriggerChatTransport` intercepts this to close the ReadableStream
|
|
1448
|
-
* for the current turn. Call after piping the response stream.
|
|
1449
|
-
*
|
|
1450
|
-
* @example
|
|
1451
|
-
* ```ts
|
|
1452
|
-
* await chat.pipe(result);
|
|
1453
|
-
* await chat.writeTurnComplete();
|
|
1454
|
-
* ```
|
|
1455
|
-
*/
|
|
1456
|
-
declare function chatWriteTurnComplete(options?: {
|
|
1457
|
-
publicAccessToken?: string;
|
|
1458
|
-
}): Promise<void>;
|
|
1459
|
-
/**
|
|
1460
|
-
* Pipe a `StreamTextResult` (or similar) to the chat stream and capture
|
|
1461
|
-
* the assistant's response message via `onFinish`.
|
|
1462
|
-
*
|
|
1463
|
-
* Combines `toUIMessageStream()` + `onFinish` callback + `chat.pipe()`.
|
|
1464
|
-
* Returns the captured `UIMessage`, or `undefined` if capture failed.
|
|
1465
|
-
*
|
|
1466
|
-
* @example
|
|
1467
|
-
* ```ts
|
|
1468
|
-
* const result = streamText({ model, messages, abortSignal: signal });
|
|
1469
|
-
* const response = await chat.pipeAndCapture(result, { signal });
|
|
1470
|
-
* if (response) conversation.addResponse(response);
|
|
1471
|
-
* ```
|
|
1472
|
-
*/
|
|
1473
|
-
declare function pipeChatAndCapture(source: UIMessageStreamable, options?: {
|
|
1474
|
-
signal?: AbortSignal;
|
|
1475
|
-
spanName?: string;
|
|
1476
|
-
}): Promise<UIMessage | undefined>;
|
|
1477
|
-
/**
|
|
1478
|
-
* Accumulates conversation messages across turns.
|
|
1479
|
-
*
|
|
1480
|
-
* Handles the transport protocol: turn 0 sends full history (replace),
|
|
1481
|
-
* subsequent turns send only new messages (append), regenerate sends
|
|
1482
|
-
* full history minus last assistant message (replace).
|
|
1483
|
-
*
|
|
1484
|
-
* @example
|
|
1485
|
-
* ```ts
|
|
1486
|
-
* const conversation = new chat.MessageAccumulator();
|
|
1487
|
-
* for (let turn = 0; turn < 100; turn++) {
|
|
1488
|
-
* const messages = await conversation.addIncoming(payload.messages, payload.trigger, turn);
|
|
1489
|
-
* const result = streamText({ model, messages });
|
|
1490
|
-
* const response = await chat.pipeAndCapture(result);
|
|
1491
|
-
* if (response) await conversation.addResponse(response);
|
|
1492
|
-
* }
|
|
1493
|
-
* ```
|
|
1494
|
-
*/
|
|
1495
|
-
declare class ChatMessageAccumulator {
|
|
1496
|
-
modelMessages: ModelMessage[];
|
|
1497
|
-
uiMessages: UIMessage[];
|
|
1498
|
-
private _compaction?;
|
|
1499
|
-
private _pendingMessages?;
|
|
1500
|
-
private _steeringQueue;
|
|
1501
|
-
constructor(options?: {
|
|
1502
|
-
compaction?: ChatTaskCompactionOptions;
|
|
1503
|
-
pendingMessages?: PendingMessagesOptions;
|
|
1504
|
-
});
|
|
1505
|
-
/**
|
|
1506
|
-
* Add incoming messages from the transport payload.
|
|
1507
|
-
* Returns the full accumulated model messages for `streamText`.
|
|
1508
|
-
*/
|
|
1509
|
-
addIncoming(messages: UIMessage[], trigger: string, turn: number): Promise<ModelMessage[]>;
|
|
1510
|
-
/**
|
|
1511
|
-
* Add the assistant's response to the accumulator.
|
|
1512
|
-
* Call after `pipeAndCapture` with the captured response.
|
|
1513
|
-
*/
|
|
1514
|
-
/**
|
|
1515
|
-
* Replace all accumulated messages (for compaction).
|
|
1516
|
-
* Converts UIMessages to ModelMessages internally.
|
|
1517
|
-
*/
|
|
1518
|
-
setMessages(uiMessages: UIMessage[]): Promise<void>;
|
|
1519
|
-
addResponse(response: UIMessage): Promise<void>;
|
|
1520
|
-
/**
|
|
1521
|
-
* Queue a message for injection via `prepareStep`. Call from a
|
|
1522
|
-
* `messagesInput.on()` listener when a message arrives during streaming.
|
|
1523
|
-
*/
|
|
1524
|
-
steer(message: UIMessage, modelMessages?: ModelMessage[]): void;
|
|
1525
|
-
/**
|
|
1526
|
-
* Queue a message for injection, converting to model messages automatically.
|
|
1527
|
-
*/
|
|
1528
|
-
steerAsync(message: UIMessage): Promise<void>;
|
|
1529
|
-
/**
|
|
1530
|
-
* Get and clear unconsumed steering messages.
|
|
1531
|
-
*/
|
|
1532
|
-
drainSteering(): UIMessage[];
|
|
1533
|
-
/**
|
|
1534
|
-
* Returns a `prepareStep` function that handles both compaction and
|
|
1535
|
-
* pending message injection. Pass to `streamText({ prepareStep: conversation.prepareStep() })`.
|
|
1536
|
-
*/
|
|
1537
|
-
prepareStep(): ((args: {
|
|
1538
|
-
messages: ModelMessage[];
|
|
1539
|
-
steps: CompactionStep[];
|
|
1540
|
-
}) => Promise<{
|
|
1541
|
-
messages: ModelMessage[];
|
|
1542
|
-
} | undefined>) | undefined;
|
|
1543
|
-
/**
|
|
1544
|
-
* Run outer-loop compaction if needed. Call after adding the response
|
|
1545
|
-
* and capturing usage. Applies `compactModelMessages` and `compactUIMessages`
|
|
1546
|
-
* callbacks if configured.
|
|
1547
|
-
*
|
|
1548
|
-
* @returns `true` if compaction was performed, `false` otherwise.
|
|
1549
|
-
*/
|
|
1550
|
-
compactIfNeeded(usage: LanguageModelUsage | undefined, context?: {
|
|
1551
|
-
chatId?: string;
|
|
1552
|
-
turn?: number;
|
|
1553
|
-
clientData?: unknown;
|
|
1554
|
-
totalUsage?: LanguageModelUsage;
|
|
1555
|
-
}): Promise<boolean>;
|
|
1556
|
-
}
|
|
1557
|
-
export type ChatSessionOptions = {
|
|
1558
|
-
/** Run-level cancel signal (from task context). */
|
|
1559
|
-
signal: AbortSignal;
|
|
1560
|
-
/** Seconds to stay idle between turns before suspending. @default 30 */
|
|
1561
|
-
idleTimeoutInSeconds?: number;
|
|
1562
|
-
/** Duration string for suspend timeout. @default "1h" */
|
|
1563
|
-
timeout?: string;
|
|
1564
|
-
/** Max turns before ending. @default 100 */
|
|
1565
|
-
maxTurns?: number;
|
|
1566
|
-
/** Automatic context compaction — same options as `chat.task({ compaction })`. */
|
|
1567
|
-
compaction?: ChatTaskCompactionOptions;
|
|
1568
|
-
/** Configure mid-execution message injection — same options as `chat.task({ pendingMessages })`. */
|
|
1569
|
-
pendingMessages?: PendingMessagesOptions;
|
|
1570
|
-
};
|
|
1571
|
-
export type ChatTurn = {
|
|
1572
|
-
/** Turn number (0-indexed). */
|
|
1573
|
-
number: number;
|
|
1574
|
-
/** Chat session ID. */
|
|
1575
|
-
chatId: string;
|
|
1576
|
-
/** What triggered this turn. */
|
|
1577
|
-
trigger: string;
|
|
1578
|
-
/** Client data from the transport (`metadata` field on the wire payload). */
|
|
1579
|
-
clientData: unknown;
|
|
1580
|
-
/** Full accumulated model messages — pass directly to `streamText`. */
|
|
1581
|
-
readonly messages: ModelMessage[];
|
|
1582
|
-
/** Full accumulated UI messages — use for persistence. */
|
|
1583
|
-
readonly uiMessages: UIMessage[];
|
|
1584
|
-
/** Combined stop+cancel AbortSignal (fresh each turn). */
|
|
1585
|
-
signal: AbortSignal;
|
|
1586
|
-
/** Whether the user stopped generation this turn. */
|
|
1587
|
-
readonly stopped: boolean;
|
|
1588
|
-
/** Whether this is a continuation run. */
|
|
1589
|
-
continuation: boolean;
|
|
1590
|
-
/** Token usage from the previous turn. Undefined on turn 0. */
|
|
1591
|
-
previousTurnUsage?: LanguageModelUsage;
|
|
1592
|
-
/** Cumulative token usage across all completed turns so far. */
|
|
1593
|
-
totalUsage: LanguageModelUsage;
|
|
1594
|
-
/**
|
|
1595
|
-
* Replace accumulated messages (for compaction). Takes UIMessages and
|
|
1596
|
-
* converts to ModelMessages internally. After calling this, `turn.messages`
|
|
1597
|
-
* reflects the compacted history.
|
|
1598
|
-
*/
|
|
1599
|
-
setMessages(uiMessages: UIMessage[]): Promise<void>;
|
|
1600
|
-
/**
|
|
1601
|
-
* Easy path: pipe stream, capture response, accumulate it,
|
|
1602
|
-
* clean up aborted parts if stopped, and write turn-complete chunk.
|
|
1603
|
-
*/
|
|
1604
|
-
complete(source: UIMessageStreamable): Promise<UIMessage | undefined>;
|
|
1605
|
-
/**
|
|
1606
|
-
* Manual path: just write turn-complete chunk.
|
|
1607
|
-
* Use when you've already piped and accumulated manually.
|
|
1608
|
-
*/
|
|
1609
|
-
done(): Promise<void>;
|
|
1610
|
-
/**
|
|
1611
|
-
* Add the response to the accumulator manually.
|
|
1612
|
-
* Use with `chat.pipeAndCapture` when you need control between pipe and done.
|
|
1613
|
-
*/
|
|
1614
|
-
addResponse(response: UIMessage): Promise<void>;
|
|
1615
|
-
/**
|
|
1616
|
-
* Returns a `prepareStep` function that handles both compaction and
|
|
1617
|
-
* pending message injection. Pass to `streamText({ prepareStep: turn.prepareStep() })`.
|
|
1618
|
-
* Only needed when not using `chat.toStreamTextOptions()` (which auto-injects it).
|
|
1619
|
-
*/
|
|
1620
|
-
prepareStep(): ((args: {
|
|
1621
|
-
messages: ModelMessage[];
|
|
1622
|
-
steps: CompactionStep[];
|
|
1623
|
-
}) => Promise<{
|
|
1624
|
-
messages: ModelMessage[];
|
|
1625
|
-
} | undefined>) | undefined;
|
|
1626
|
-
};
|
|
1627
|
-
/**
|
|
1628
|
-
* Create a chat session that yields turns as an async iterator.
|
|
1629
|
-
*
|
|
1630
|
-
* Handles: preload wait, stop signals, message accumulation, turn-complete
|
|
1631
|
-
* signaling, and idle/suspend between turns. You control: initialization,
|
|
1632
|
-
* model/tool selection, persistence, and any custom per-turn logic.
|
|
1633
|
-
*
|
|
1634
|
-
* @example
|
|
1635
|
-
* ```ts
|
|
1636
|
-
* import { task } from "@trigger.dev/sdk";
|
|
1637
|
-
* import { chat, type ChatTaskWirePayload } from "@trigger.dev/sdk/ai";
|
|
1638
|
-
* import { streamText } from "ai";
|
|
1639
|
-
* import { openai } from "@ai-sdk/openai";
|
|
1640
|
-
*
|
|
1641
|
-
* export const myChat = task({
|
|
1642
|
-
* id: "my-chat",
|
|
1643
|
-
* run: async (payload: ChatTaskWirePayload, { signal }) => {
|
|
1644
|
-
* const session = chat.createSession(payload, { signal });
|
|
1645
|
-
*
|
|
1646
|
-
* for await (const turn of session) {
|
|
1647
|
-
* const result = streamText({
|
|
1648
|
-
* model: openai("gpt-4o"),
|
|
1649
|
-
* messages: turn.messages,
|
|
1650
|
-
* abortSignal: turn.signal,
|
|
1651
|
-
* });
|
|
1652
|
-
* await turn.complete(result);
|
|
1653
|
-
* }
|
|
1654
|
-
* },
|
|
1655
|
-
* });
|
|
1656
|
-
* ```
|
|
1657
|
-
*/
|
|
1658
|
-
declare function createChatSession(payload: ChatTaskWirePayload, options: ChatSessionOptions): AsyncIterable<ChatTurn>;
|
|
1659
|
-
/**
|
|
1660
|
-
* A Proxy-backed, run-scoped data object that appears as `T` to users.
|
|
1661
|
-
* Includes helper methods for initialization, dirty tracking, and serialization.
|
|
1662
|
-
* Internal metadata is stored behind Symbols and invisible to
|
|
1663
|
-
* `Object.keys()`, `JSON.stringify()`, and spread.
|
|
1664
|
-
*/
|
|
1665
|
-
export type ChatLocal<T extends Record<string, unknown>> = T & {
|
|
1666
|
-
/** Initialize the local with a value. Call in `onChatStart` or `run()`. */
|
|
1667
|
-
init(value: T): void;
|
|
1668
|
-
/** Returns `true` if any property was set since the last check. Resets the dirty flag. */
|
|
1669
|
-
hasChanged(): boolean;
|
|
1670
|
-
/** Returns a plain object copy of the current value. Useful for persistence. */
|
|
1671
|
-
get(): T;
|
|
1672
|
-
readonly [CHAT_LOCAL_KEY]: ReturnType<typeof locals.create<T>>;
|
|
1673
|
-
readonly [CHAT_LOCAL_DIRTY_KEY]: ReturnType<typeof locals.create<boolean>>;
|
|
1674
|
-
};
|
|
1675
|
-
/**
|
|
1676
|
-
* Creates a per-run typed data object accessible from anywhere during task execution.
|
|
1677
|
-
*
|
|
1678
|
-
* Declare at module level, then initialize inside a lifecycle hook (e.g. `onChatStart`)
|
|
1679
|
-
* using `chat.initLocal()`. Properties are accessible directly via the Proxy.
|
|
1680
|
-
*
|
|
1681
|
-
* Multiple locals can coexist — each gets its own isolated run-scoped storage.
|
|
1682
|
-
*
|
|
1683
|
-
* The `id` is required and must be unique across all `chat.local()` calls in
|
|
1684
|
-
* your project. It's used to serialize values into subtask metadata so that
|
|
1685
|
-
* `ai.toolExecute()` (or legacy `ai.tool()`) subtasks can auto-hydrate parent locals (read-only).
|
|
1686
|
-
*
|
|
1687
|
-
* @example
|
|
1688
|
-
* ```ts
|
|
1689
|
-
* import { chat } from "@trigger.dev/sdk/ai";
|
|
1690
|
-
*
|
|
1691
|
-
* const userPrefs = chat.local<{ theme: string; language: string }>({ id: "userPrefs" });
|
|
1692
|
-
* const gameState = chat.local<{ score: number; streak: number }>({ id: "gameState" });
|
|
1693
|
-
*
|
|
1694
|
-
* export const myChat = chat.task({
|
|
1695
|
-
* id: "my-chat",
|
|
1696
|
-
* onChatStart: async ({ clientData }) => {
|
|
1697
|
-
* const prefs = await db.prefs.findUnique({ where: { userId: clientData.userId } });
|
|
1698
|
-
* userPrefs.init(prefs ?? { theme: "dark", language: "en" });
|
|
1699
|
-
* gameState.init({ score: 0, streak: 0 });
|
|
1700
|
-
* },
|
|
1701
|
-
* onTurnComplete: async ({ chatId }) => {
|
|
1702
|
-
* if (gameState.hasChanged()) {
|
|
1703
|
-
* await db.save({ where: { chatId }, data: gameState.get() });
|
|
1704
|
-
* }
|
|
1705
|
-
* },
|
|
1706
|
-
* run: async ({ messages }) => {
|
|
1707
|
-
* gameState.score++;
|
|
1708
|
-
* return streamText({
|
|
1709
|
-
* system: `User prefers ${userPrefs.theme} theme. Score: ${gameState.score}`,
|
|
1710
|
-
* messages,
|
|
1711
|
-
* });
|
|
1712
|
-
* },
|
|
1713
|
-
* });
|
|
1714
|
-
* ```
|
|
1715
|
-
*/
|
|
1716
|
-
declare function chatLocal<T extends Record<string, unknown>>(options: {
|
|
1717
|
-
id: string;
|
|
1718
|
-
}): ChatLocal<T>;
|
|
1719
|
-
/**
|
|
1720
|
-
* Extracts the client data (metadata) type from a chat task.
|
|
1721
|
-
* Use this to type the `metadata` option on the transport.
|
|
1722
|
-
*
|
|
1723
|
-
* @example
|
|
1724
|
-
* ```ts
|
|
1725
|
-
* import type { InferChatClientData } from "@trigger.dev/sdk/ai";
|
|
1726
|
-
* import type { myChat } from "@/trigger/chat";
|
|
1727
|
-
*
|
|
1728
|
-
* type MyClientData = InferChatClientData<typeof myChat>;
|
|
1729
|
-
* // { model?: string; userId: string }
|
|
1730
|
-
* ```
|
|
1731
|
-
*/
|
|
1732
|
-
export type InferChatClientData<TTask extends AnyTask> = TTask extends Task<string, ChatTaskWirePayload<any, infer TMetadata>, any> ? TMetadata : unknown;
|
|
1733
|
-
/**
|
|
1734
|
-
* Extracts the UI message type from a chat task (wire payload `messages` items).
|
|
1735
|
-
*
|
|
1736
|
-
* @example
|
|
1737
|
-
* ```ts
|
|
1738
|
-
* import type { InferChatUIMessage } from "@trigger.dev/sdk/ai";
|
|
1739
|
-
* import type { myChat } from "@/trigger/chat";
|
|
1740
|
-
*
|
|
1741
|
-
* type Msg = InferChatUIMessage<typeof myChat>;
|
|
1742
|
-
* ```
|
|
1743
|
-
*/
|
|
1744
|
-
export type InferChatUIMessage<TTask extends AnyTask> = TTask extends Task<string, ChatTaskWirePayload<infer TUIM extends UIMessage, any>, any> ? TUIM : UIMessage;
|
|
1745
|
-
export declare const chat: {
|
|
1746
|
-
/** Create a chat task. See {@link chatTask}. */
|
|
1747
|
-
task: typeof chatTask;
|
|
1748
|
-
/** Create a chat task with a fixed {@link UIMessage} subtype and optional default stream options. See {@link withUIMessage}. */
|
|
1749
|
-
withUIMessage: typeof withUIMessage;
|
|
1750
|
-
/** Pipe a stream to the chat transport. See {@link pipeChat}. */
|
|
1751
|
-
pipe: typeof pipeChat;
|
|
1752
|
-
/** Create a per-run typed local. See {@link chatLocal}. */
|
|
1753
|
-
local: typeof chatLocal;
|
|
1754
|
-
/** Create a public access token for a chat task. See {@link createChatAccessToken}. */
|
|
1755
|
-
createAccessToken: typeof createChatAccessToken;
|
|
1756
|
-
/** Override the turn timeout at runtime (duration string). See {@link setTurnTimeout}. */
|
|
1757
|
-
setTurnTimeout: typeof setTurnTimeout;
|
|
1758
|
-
/** Override the turn timeout at runtime (seconds). See {@link setTurnTimeoutInSeconds}. */
|
|
1759
|
-
setTurnTimeoutInSeconds: typeof setTurnTimeoutInSeconds;
|
|
1760
|
-
/** Override the idle timeout at runtime. See {@link setIdleTimeoutInSeconds}. */
|
|
1761
|
-
setIdleTimeoutInSeconds: typeof setIdleTimeoutInSeconds;
|
|
1762
|
-
/** Override toUIMessageStream() options for the current turn. See {@link setUIMessageStreamOptions}. */
|
|
1763
|
-
setUIMessageStreamOptions: typeof setUIMessageStreamOptions;
|
|
1764
|
-
/** Check if the current turn was stopped by the user. See {@link isStopped}. */
|
|
1765
|
-
isStopped: typeof isStopped;
|
|
1766
|
-
/** Clean up aborted parts from a UIMessage. See {@link cleanupAbortedParts}. */
|
|
1767
|
-
cleanupAbortedParts: typeof cleanupAbortedParts;
|
|
1768
|
-
/** Register background work that runs in parallel with streaming. See {@link chatDefer}. */
|
|
1769
|
-
defer: typeof chatDefer;
|
|
1770
|
-
/** Queue model messages for injection at the next `prepareStep` boundary. See {@link injectBackgroundContext}. */
|
|
1771
|
-
inject: typeof injectBackgroundContext;
|
|
1772
|
-
/** Typed chat output stream for writing custom chunks or piping from subtasks. */
|
|
1773
|
-
stream: import("@trigger.dev/core/v3").RealtimeDefinedStream<UIMessageChunk>;
|
|
1774
|
-
/** Pre-built input stream for receiving messages from the transport. */
|
|
1775
|
-
messages: import("@trigger.dev/core/v3").RealtimeDefinedInputStream<ChatTaskWirePayload<UIMessage<unknown, import("ai").UIDataTypes, import("ai").UITools>, unknown>>;
|
|
1776
|
-
/** Create a managed stop signal wired to the stop input stream. See {@link createStopSignal}. */
|
|
1777
|
-
createStopSignal: typeof createStopSignal;
|
|
1778
|
-
/** Signal the frontend that the current turn is complete. See {@link chatWriteTurnComplete}. */
|
|
1779
|
-
writeTurnComplete: typeof chatWriteTurnComplete;
|
|
1780
|
-
/** Pipe a stream and capture the response message. See {@link pipeChatAndCapture}. */
|
|
1781
|
-
pipeAndCapture: typeof pipeChatAndCapture;
|
|
1782
|
-
/** Message accumulator class for raw task chat. See {@link ChatMessageAccumulator}. */
|
|
1783
|
-
MessageAccumulator: typeof ChatMessageAccumulator;
|
|
1784
|
-
/** Create a chat session (async iterator). See {@link createChatSession}. */
|
|
1785
|
-
createSession: typeof createChatSession;
|
|
1786
|
-
/**
|
|
1787
|
-
* Store and retrieve a resolved prompt for the current run.
|
|
1788
|
-
*
|
|
1789
|
-
* - `chat.prompt.set(resolved)` — store a `ResolvedPrompt` or plain string
|
|
1790
|
-
* - `chat.prompt()` — read the stored prompt (throws if not set)
|
|
1791
|
-
*/
|
|
1792
|
-
prompt: typeof getChatPrompt & {
|
|
1793
|
-
set: typeof setChatPrompt;
|
|
1794
|
-
};
|
|
1795
|
-
/**
|
|
1796
|
-
* Returns an options object ready to spread into `streamText()`.
|
|
1797
|
-
* Reads the stored prompt and returns `{ system, experimental_telemetry, ...config }`.
|
|
1798
|
-
* Returns `{}` if no prompt has been set.
|
|
1799
|
-
*/
|
|
1800
|
-
toStreamTextOptions: typeof toStreamTextOptions;
|
|
1801
|
-
/**
|
|
1802
|
-
* Replace the accumulated conversation messages for compaction.
|
|
1803
|
-
* Call from `onTurnStart` or `onTurnComplete`. Takes `UIMessage[]` and
|
|
1804
|
-
* converts to `ModelMessage[]` internally.
|
|
1805
|
-
*/
|
|
1806
|
-
setMessages: typeof setChatMessages;
|
|
1807
|
-
/** Check if it's safe to compact messages (no in-flight tool calls). */
|
|
1808
|
-
isCompactionSafe: typeof isCompactionSafe;
|
|
1809
|
-
/** Returns a `prepareStep` function that handles context compaction automatically. */
|
|
1810
|
-
compactionStep: typeof chatCompactionStep;
|
|
1811
|
-
/** Low-level compaction for use inside a custom `prepareStep`. */
|
|
1812
|
-
compact: typeof chatCompact;
|
|
1813
|
-
/** Read the current compaction state (summary + base message count). */
|
|
1814
|
-
getCompactionState: typeof getCompactionState;
|
|
1815
21
|
};
|
|
22
|
+
export {};
|