@ai-sdk/langchain 0.0.0-02dba89b-20251009204516 → 0.0.0-17394c74-20260122151521

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/adapter.ts ADDED
@@ -0,0 +1,520 @@
1
+ import {
2
+ SystemMessage,
3
+ BaseMessage,
4
+ AIMessageChunk,
5
+ } from '@langchain/core/messages';
6
+ import {
7
+ type UIMessage,
8
+ type UIMessageChunk,
9
+ convertToModelMessages,
10
+ type ModelMessage,
11
+ } from 'ai';
12
+ import {
13
+ convertToolResultPart,
14
+ convertAssistantContent,
15
+ convertUserContent,
16
+ processModelChunk,
17
+ processLangGraphEvent,
18
+ isToolResultPart,
19
+ extractReasoningFromContentBlocks,
20
+ } from './utils';
21
+ import { type LangGraphEventState } from './types';
22
+ import { type StreamCallbacks } from './stream-callbacks';
23
+
24
+ /**
25
+ * Converts AI SDK UIMessages to LangChain BaseMessage objects.
26
+ *
27
+ * This function transforms the AI SDK's message format into LangChain's message
28
+ * format, enabling seamless integration between the two frameworks.
29
+ *
30
+ * @param messages - Array of AI SDK UIMessage objects to convert.
31
+ * @returns Promise resolving to an array of LangChain BaseMessage objects.
32
+ *
33
+ * @example
34
+ * ```ts
35
+ * import { toBaseMessages } from '@ai-sdk/langchain';
36
+ *
37
+ * const langchainMessages = await toBaseMessages(uiMessages);
38
+ *
39
+ * // Use with LangChain
40
+ * const response = await model.invoke(langchainMessages);
41
+ * ```
42
+ */
43
+ export async function toBaseMessages(
44
+ messages: UIMessage[],
45
+ ): Promise<BaseMessage[]> {
46
+ const modelMessages = await convertToModelMessages(messages);
47
+ return convertModelMessages(modelMessages);
48
+ }
49
+
50
+ /**
51
+ * Converts ModelMessages to LangChain BaseMessage objects.
52
+ *
53
+ * @param modelMessages - Array of ModelMessage objects from convertToModelMessages.
54
+ * @returns Array of LangChain BaseMessage objects.
55
+ */
56
+ export function convertModelMessages(
57
+ modelMessages: ModelMessage[],
58
+ ): BaseMessage[] {
59
+ const result: BaseMessage[] = [];
60
+
61
+ for (const message of modelMessages) {
62
+ switch (message.role) {
63
+ case 'tool': {
64
+ // Tool messages contain an array of tool results
65
+ for (const item of message.content) {
66
+ if (isToolResultPart(item)) {
67
+ result.push(convertToolResultPart(item));
68
+ }
69
+ }
70
+ break;
71
+ }
72
+
73
+ case 'assistant': {
74
+ result.push(convertAssistantContent(message.content));
75
+ break;
76
+ }
77
+
78
+ case 'system': {
79
+ result.push(new SystemMessage({ content: message.content }));
80
+ break;
81
+ }
82
+
83
+ case 'user': {
84
+ result.push(convertUserContent(message.content));
85
+ break;
86
+ }
87
+ }
88
+ }
89
+
90
+ return result;
91
+ }
92
+
93
+ /**
94
+ * Type guard to check if a value is a streamEvents event object.
95
+ * streamEvents produces objects with `event` and `data` properties.
96
+ *
97
+ * @param value - The value to check.
98
+ * @returns True if the value is a streamEvents event object.
99
+ */
100
+ function isStreamEventsEvent(
101
+ value: unknown,
102
+ ): value is { event: string; data: Record<string, unknown> } {
103
+ if (value == null || typeof value !== 'object') return false;
104
+ const obj = value as Record<string, unknown>;
105
+ // Check for event property being a string
106
+ if (!('event' in obj) || typeof obj.event !== 'string') return false;
107
+ // Check for data property being an object (but allow null/undefined)
108
+ if (!('data' in obj)) return false;
109
+ // data can be null in some events, treat as empty object
110
+ return obj.data === null || typeof obj.data === 'object';
111
+ }
112
+
113
+ /**
114
+ * Processes a streamEvents event and emits UI message chunks.
115
+ *
116
+ * @param event - The streamEvents event to process.
117
+ * @param state - The state for tracking stream progress.
118
+ * @param controller - The controller to emit UI message chunks.
119
+ */
120
+ function processStreamEventsEvent(
121
+ event: {
122
+ event: string;
123
+ data: Record<string, unknown> | null;
124
+ run_id?: string;
125
+ name?: string;
126
+ },
127
+ state: {
128
+ started: boolean;
129
+ messageId: string;
130
+ reasoningStarted: boolean;
131
+ textStarted: boolean;
132
+ textMessageId: string | null;
133
+ reasoningMessageId: string | null;
134
+ },
135
+ controller: ReadableStreamDefaultController<UIMessageChunk>,
136
+ ): void {
137
+ /**
138
+ * Capture run_id from event level if available (streamEvents v2 format)
139
+ */
140
+ if (event.run_id && !state.started) {
141
+ state.messageId = event.run_id;
142
+ }
143
+
144
+ /**
145
+ * Skip events with null/undefined data
146
+ */
147
+ if (!event.data) return;
148
+
149
+ switch (event.event) {
150
+ case 'on_chat_model_start': {
151
+ /**
152
+ * Handle model start - capture message metadata if available
153
+ * run_id is at event level in v2, but check data for backwards compatibility
154
+ */
155
+ const runId = event.run_id || (event.data.run_id as string | undefined);
156
+ if (runId) {
157
+ state.messageId = runId;
158
+ }
159
+ break;
160
+ }
161
+
162
+ case 'on_chat_model_stream': {
163
+ /**
164
+ * Handle streaming token chunks
165
+ */
166
+ const chunk = event.data.chunk;
167
+ if (chunk && typeof chunk === 'object') {
168
+ /**
169
+ * Get message ID from chunk if available
170
+ */
171
+ const chunkId = (chunk as { id?: string }).id;
172
+ if (chunkId) {
173
+ state.messageId = chunkId;
174
+ }
175
+
176
+ /**
177
+ * Handle reasoning content from contentBlocks
178
+ */
179
+ const reasoning = extractReasoningFromContentBlocks(chunk);
180
+ if (reasoning) {
181
+ if (!state.reasoningStarted) {
182
+ // Track the ID used for reasoning-start to ensure reasoning-end uses the same ID
183
+ state.reasoningMessageId = state.messageId;
184
+ controller.enqueue({
185
+ type: 'reasoning-start',
186
+ id: state.messageId,
187
+ });
188
+ state.reasoningStarted = true;
189
+ state.started = true;
190
+ }
191
+ controller.enqueue({
192
+ type: 'reasoning-delta',
193
+ delta: reasoning,
194
+ id: state.reasoningMessageId ?? state.messageId,
195
+ });
196
+ }
197
+
198
+ /**
199
+ * Extract text content from chunk
200
+ */
201
+ const content = (chunk as { content?: unknown }).content;
202
+ const text =
203
+ typeof content === 'string'
204
+ ? content
205
+ : Array.isArray(content)
206
+ ? content
207
+ .filter(
208
+ (c): c is { type: 'text'; text: string } =>
209
+ typeof c === 'object' &&
210
+ c !== null &&
211
+ 'type' in c &&
212
+ c.type === 'text',
213
+ )
214
+ .map(c => c.text)
215
+ .join('')
216
+ : '';
217
+
218
+ if (text) {
219
+ /**
220
+ * If reasoning was streamed before text, close reasoning first
221
+ */
222
+ if (state.reasoningStarted && !state.textStarted) {
223
+ controller.enqueue({
224
+ type: 'reasoning-end',
225
+ id: state.reasoningMessageId ?? state.messageId,
226
+ });
227
+ state.reasoningStarted = false;
228
+ }
229
+
230
+ if (!state.textStarted) {
231
+ // Track the ID used for text-start to ensure text-end uses the same ID
232
+ state.textMessageId = state.messageId;
233
+ controller.enqueue({ type: 'text-start', id: state.messageId });
234
+ state.textStarted = true;
235
+ state.started = true;
236
+ }
237
+ controller.enqueue({
238
+ type: 'text-delta',
239
+ delta: text,
240
+ id: state.textMessageId ?? state.messageId,
241
+ });
242
+ }
243
+ }
244
+ break;
245
+ }
246
+
247
+ case 'on_tool_start': {
248
+ /**
249
+ * Handle tool call start
250
+ * run_id and name are at event level in v2, check data for backwards compatibility
251
+ */
252
+ const runId = event.run_id || (event.data.run_id as string | undefined);
253
+ const name = event.name || (event.data.name as string | undefined);
254
+
255
+ if (runId && name) {
256
+ controller.enqueue({
257
+ type: 'tool-input-start',
258
+ toolCallId: runId,
259
+ toolName: name,
260
+ dynamic: true,
261
+ });
262
+ }
263
+ break;
264
+ }
265
+
266
+ case 'on_tool_end': {
267
+ /**
268
+ * Handle tool call end
269
+ * run_id is at event level in v2, check data for backwards compatibility
270
+ */
271
+ const runId = event.run_id || (event.data.run_id as string | undefined);
272
+ const output = event.data.output;
273
+
274
+ if (runId) {
275
+ controller.enqueue({
276
+ type: 'tool-output-available',
277
+ toolCallId: runId,
278
+ output,
279
+ });
280
+ }
281
+ break;
282
+ }
283
+ }
284
+ }
285
+
286
+ /**
287
+ * Converts a LangChain stream to an AI SDK UIMessageStream.
288
+ *
289
+ * This function automatically detects the stream type and handles:
290
+ * - Direct model streams (AsyncIterable from `model.stream()`)
291
+ * - LangGraph streams (ReadableStream with `streamMode: ['values', 'messages']`)
292
+ * - streamEvents streams (from `agent.streamEvents()` or `model.streamEvents()`)
293
+ *
294
+ * @param stream - A stream from LangChain model.stream(), graph.stream(), or streamEvents().
295
+ * @param callbacks - Optional callbacks for stream lifecycle events.
296
+ * @returns A ReadableStream of UIMessageChunk objects.
297
+ *
298
+ * @example
299
+ * ```ts
300
+ * // With a direct model stream
301
+ * const model = new ChatOpenAI({ model: 'gpt-4o-mini' });
302
+ * const stream = await model.stream(messages);
303
+ * return createUIMessageStreamResponse({
304
+ * stream: toUIMessageStream(stream),
305
+ * });
306
+ *
307
+ * // With a LangGraph stream
308
+ * const graphStream = await graph.stream(
309
+ * { messages },
310
+ * { streamMode: ['values', 'messages'] }
311
+ * );
312
+ * return createUIMessageStreamResponse({
313
+ * stream: toUIMessageStream(graphStream),
314
+ * });
315
+ *
316
+ * // With streamEvents
317
+ * const streamEvents = agent.streamEvents(
318
+ * { messages },
319
+ * { version: "v2" }
320
+ * );
321
+ * return createUIMessageStreamResponse({
322
+ * stream: toUIMessageStream(streamEvents),
323
+ * });
324
+ * ```
325
+ */
326
+ export function toUIMessageStream(
327
+ stream: AsyncIterable<AIMessageChunk> | ReadableStream,
328
+ callbacks?: StreamCallbacks,
329
+ ): ReadableStream<UIMessageChunk> {
330
+ /**
331
+ * Track text chunks for onFinal callback
332
+ */
333
+ const textChunks: string[] = [];
334
+
335
+ /**
336
+ * State for model stream handling
337
+ */
338
+ const modelState = {
339
+ started: false,
340
+ messageId: 'langchain-msg-1',
341
+ reasoningStarted: false,
342
+ textStarted: false,
343
+ /** Track the ID used for text-start to ensure text-end uses the same ID */
344
+ textMessageId: null as string | null,
345
+ /** Track the ID used for reasoning-start to ensure reasoning-end uses the same ID */
346
+ reasoningMessageId: null as string | null,
347
+ };
348
+
349
+ /**
350
+ * State for LangGraph stream handling
351
+ */
352
+ const langGraphState: LangGraphEventState = {
353
+ messageSeen: {} as Record<
354
+ string,
355
+ { text?: boolean; reasoning?: boolean; tool?: Record<string, boolean> }
356
+ >,
357
+ messageConcat: {} as Record<string, AIMessageChunk>,
358
+ emittedToolCalls: new Set<string>(),
359
+ emittedImages: new Set<string>(),
360
+ emittedReasoningIds: new Set<string>(),
361
+ messageReasoningIds: {} as Record<string, string>,
362
+ toolCallInfoByIndex: {} as Record<
363
+ string,
364
+ Record<number, { id: string; name: string }>
365
+ >,
366
+ currentStep: null as number | null,
367
+ emittedToolCallsByKey: new Map<string, string>(),
368
+ };
369
+
370
+ /**
371
+ * Track detected stream type: null = not yet detected
372
+ */
373
+ let streamType: 'model' | 'langgraph' | 'streamEvents' | null = null;
374
+
375
+ /**
376
+ * Get async iterator from the stream (works for both AsyncIterable and ReadableStream)
377
+ */
378
+ const getAsyncIterator = (): AsyncIterator<unknown> => {
379
+ if (Symbol.asyncIterator in stream) {
380
+ return (stream as AsyncIterable<unknown>)[Symbol.asyncIterator]();
381
+ }
382
+ /**
383
+ * For ReadableStream without Symbol.asyncIterator
384
+ */
385
+ const reader = (stream as ReadableStream).getReader();
386
+ return {
387
+ async next() {
388
+ const { done, value } = await reader.read();
389
+ return { done, value };
390
+ },
391
+ };
392
+ };
393
+
394
+ const iterator = getAsyncIterator();
395
+
396
+ /**
397
+ * Create a wrapper around the controller to intercept text chunks for callbacks
398
+ */
399
+ const createCallbackController = (
400
+ originalController: ReadableStreamDefaultController<UIMessageChunk>,
401
+ ): ReadableStreamDefaultController<UIMessageChunk> => {
402
+ return {
403
+ get desiredSize() {
404
+ return originalController.desiredSize;
405
+ },
406
+ close: () => originalController.close(),
407
+ error: (e?: unknown) => originalController.error(e),
408
+ enqueue: (chunk: UIMessageChunk) => {
409
+ /**
410
+ * Intercept text-delta chunks for callbacks
411
+ */
412
+ if (callbacks && chunk.type === 'text-delta' && chunk.delta) {
413
+ textChunks.push(chunk.delta);
414
+ callbacks.onToken?.(chunk.delta);
415
+ callbacks.onText?.(chunk.delta);
416
+ }
417
+ originalController.enqueue(chunk);
418
+ },
419
+ };
420
+ };
421
+
422
+ return new ReadableStream<UIMessageChunk>({
423
+ async start(controller) {
424
+ await callbacks?.onStart?.();
425
+
426
+ const wrappedController = createCallbackController(controller);
427
+ controller.enqueue({ type: 'start' });
428
+
429
+ try {
430
+ while (true) {
431
+ const { done, value } = await iterator.next();
432
+ if (done) break;
433
+
434
+ /**
435
+ * Detect stream type on first value
436
+ */
437
+ if (streamType === null) {
438
+ if (Array.isArray(value)) {
439
+ streamType = 'langgraph';
440
+ } else if (isStreamEventsEvent(value)) {
441
+ streamType = 'streamEvents';
442
+ } else {
443
+ streamType = 'model';
444
+ }
445
+ }
446
+
447
+ /**
448
+ * Process based on detected type
449
+ */
450
+ if (streamType === 'model') {
451
+ processModelChunk(
452
+ value as AIMessageChunk,
453
+ modelState,
454
+ wrappedController,
455
+ );
456
+ } else if (streamType === 'streamEvents') {
457
+ processStreamEventsEvent(
458
+ value as {
459
+ event: string;
460
+ data: Record<string, unknown> | null;
461
+ run_id?: string;
462
+ name?: string;
463
+ },
464
+ modelState,
465
+ wrappedController,
466
+ );
467
+ } else {
468
+ processLangGraphEvent(
469
+ value as unknown[],
470
+ langGraphState,
471
+ wrappedController,
472
+ );
473
+ }
474
+ }
475
+
476
+ /**
477
+ * Finalize based on stream type
478
+ */
479
+ if (streamType === 'model' || streamType === 'streamEvents') {
480
+ if (modelState.reasoningStarted) {
481
+ controller.enqueue({
482
+ type: 'reasoning-end',
483
+ id: modelState.reasoningMessageId ?? modelState.messageId,
484
+ });
485
+ }
486
+ if (modelState.textStarted) {
487
+ /**
488
+ * Use the same ID that was used for text-start
489
+ */
490
+ controller.enqueue({
491
+ type: 'text-end',
492
+ id: modelState.textMessageId ?? modelState.messageId,
493
+ });
494
+ }
495
+ controller.enqueue({ type: 'finish' });
496
+ } else if (streamType === 'langgraph') {
497
+ /**
498
+ * Emit finish-step if a step was started
499
+ */
500
+ if (langGraphState.currentStep !== null) {
501
+ controller.enqueue({ type: 'finish-step' });
502
+ }
503
+ controller.enqueue({ type: 'finish' });
504
+ }
505
+
506
+ /**
507
+ * Call onFinal callback with aggregated text
508
+ */
509
+ await callbacks?.onFinal?.(textChunks.join(''));
510
+ } catch (error) {
511
+ controller.enqueue({
512
+ type: 'error',
513
+ errorText: error instanceof Error ? error.message : 'Unknown error',
514
+ });
515
+ } finally {
516
+ controller.close();
517
+ }
518
+ },
519
+ });
520
+ }
package/src/index.ts ADDED
@@ -0,0 +1,12 @@
1
+ export {
2
+ toBaseMessages,
3
+ toUIMessageStream,
4
+ convertModelMessages,
5
+ } from './adapter';
6
+
7
+ export {
8
+ LangSmithDeploymentTransport,
9
+ type LangSmithDeploymentTransportOptions,
10
+ } from './transport';
11
+
12
+ export { type StreamCallbacks } from './stream-callbacks';
@@ -0,0 +1,65 @@
1
+ /**
2
+ * Configuration options and helper callback methods for stream lifecycle events.
3
+ */
4
+ export interface StreamCallbacks {
5
+ /** `onStart`: Called once when the stream is initialized. */
6
+ onStart?: () => Promise<void> | void;
7
+
8
+ /** `onFinal`: Called once when the stream is closed with the final completion message. */
9
+ onFinal?: (completion: string) => Promise<void> | void;
10
+
11
+ /** `onToken`: Called for each tokenized message. */
12
+ onToken?: (token: string) => Promise<void> | void;
13
+
14
+ /** `onText`: Called for each text chunk. */
15
+ onText?: (text: string) => Promise<void> | void;
16
+ }
17
+
18
+ /**
19
+ * Creates a transform stream that encodes input messages and invokes optional callback functions.
20
+ * The transform stream uses the provided callbacks to execute custom logic at different stages of the stream's lifecycle.
21
+ * - `onStart`: Called once when the stream is initialized.
22
+ * - `onToken`: Called for each tokenized message.
23
+ * - `onFinal`: Called once when the stream is closed with the final completion message.
24
+ *
25
+ * This function is useful when you want to process a stream of messages and perform specific actions during the stream's lifecycle.
26
+ *
27
+ * @param {StreamCallbacks} [callbacks] - An object containing the callback functions.
28
+ * @return {TransformStream<string, string>} A transform stream that allows the execution of custom logic through callbacks.
29
+ *
30
+ * @example
31
+ * const callbacks = {
32
+ * onStart: async () => console.log('Stream started'),
33
+ * onToken: async (token) => console.log(`Token: ${token}`),
34
+ * onFinal: async () => data.close()
35
+ * };
36
+ * const transformer = createCallbacksTransformer(callbacks);
37
+ */
38
+ export function createCallbacksTransformer(
39
+ callbacks: StreamCallbacks | undefined = {},
40
+ ): TransformStream<string, string> {
41
+ let aggregatedResponse = '';
42
+
43
+ return new TransformStream({
44
+ async start(): Promise<void> {
45
+ if (callbacks.onStart) await callbacks.onStart();
46
+ },
47
+
48
+ async transform(message, controller): Promise<void> {
49
+ controller.enqueue(message);
50
+
51
+ aggregatedResponse += message;
52
+
53
+ if (callbacks.onToken) await callbacks.onToken(message);
54
+ if (callbacks.onText && typeof message === 'string') {
55
+ await callbacks.onText(message);
56
+ }
57
+ },
58
+
59
+ async flush(): Promise<void> {
60
+ if (callbacks.onFinal) {
61
+ await callbacks.onFinal(aggregatedResponse);
62
+ }
63
+ },
64
+ });
65
+ }
@@ -0,0 +1,88 @@
1
+ import { AIMessageChunk } from '@langchain/core/messages';
2
+ import {
3
+ type UIMessage,
4
+ type UIMessageChunk,
5
+ type ChatTransport,
6
+ type ChatRequestOptions,
7
+ } from 'ai';
8
+ import {
9
+ RemoteGraph,
10
+ type RemoteGraphParams,
11
+ } from '@langchain/langgraph/remote';
12
+ import { toBaseMessages, toUIMessageStream } from './adapter';
13
+
14
+ /**
15
+ * Options for configuring a LangSmith deployment transport.
16
+ * Extends RemoteGraphParams but makes graphId optional (defaults to 'agent').
17
+ */
18
+ export type LangSmithDeploymentTransportOptions = Omit<
19
+ RemoteGraphParams,
20
+ 'graphId'
21
+ > & {
22
+ /**
23
+ * The ID of the graph to connect to.
24
+ * @default 'agent'
25
+ */
26
+ graphId?: string;
27
+ };
28
+
29
+ /**
30
+ * A ChatTransport implementation for LangSmith/LangGraph deployments.
31
+ *
32
+ * This transport enables seamless integration between the AI SDK's useChat hook
33
+ * and LangSmith deployed LangGraph agents.
34
+ *
35
+ * @example
36
+ * ```ts
37
+ * import { LangSmithDeploymentTransport } from '@ai-sdk/langchain';
38
+ *
39
+ * // Use with useChat
40
+ * const { messages, input, handleSubmit } = useChat({
41
+ * transport: new LangSmithDeploymentTransport({
42
+ * url: 'https://your-deployment.us.langgraph.app',
43
+ * apiKey: 'my-api-key',
44
+ * }),
45
+ * });
46
+ * ```
47
+ */
48
+ export class LangSmithDeploymentTransport<UI_MESSAGE extends UIMessage>
49
+ implements ChatTransport<UI_MESSAGE>
50
+ {
51
+ protected graph: RemoteGraph;
52
+
53
+ constructor(options: LangSmithDeploymentTransportOptions) {
54
+ this.graph = new RemoteGraph({
55
+ ...options,
56
+ graphId: options.graphId ?? 'agent',
57
+ });
58
+ }
59
+
60
+ async sendMessages(
61
+ options: {
62
+ trigger: 'submit-message' | 'regenerate-message';
63
+ chatId: string;
64
+ messageId: string | undefined;
65
+ messages: UI_MESSAGE[];
66
+ abortSignal: AbortSignal | undefined;
67
+ } & ChatRequestOptions,
68
+ ): Promise<ReadableStream<UIMessageChunk>> {
69
+ const baseMessages = await toBaseMessages(options.messages);
70
+
71
+ const stream = await this.graph.stream(
72
+ { messages: baseMessages },
73
+ { streamMode: ['values', 'messages'] },
74
+ );
75
+
76
+ return toUIMessageStream(
77
+ stream as AsyncIterable<AIMessageChunk> | ReadableStream,
78
+ );
79
+ }
80
+
81
+ async reconnectToStream(
82
+ _options: {
83
+ chatId: string;
84
+ } & ChatRequestOptions,
85
+ ): Promise<ReadableStream<UIMessageChunk> | null> {
86
+ throw new Error('Method not implemented.');
87
+ }
88
+ }