@ai-sdk/langchain 2.0.19 → 2.0.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/langchain
2
2
 
3
+ ## 2.0.21
4
+
5
+ ### Patch Changes
6
+
7
+ - 75da969: fix(langchain): re-enable support for streamEvents in toUIMessageStream
8
+
9
+ ## 2.0.20
10
+
11
+ ### Patch Changes
12
+
13
+ - ai@6.0.19
14
+
3
15
  ## 2.0.19
4
16
 
5
17
  ### Patch Changes
package/README.md CHANGED
@@ -56,6 +56,33 @@ return createUIMessageStreamResponse({
56
56
  });
57
57
  ```
58
58
 
59
+ ### Streaming with `streamEvents`
60
+
61
+ You can also use `toUIMessageStream` with `streamEvents()` for more granular event handling:
62
+
63
+ ```ts
64
+ import { toBaseMessages, toUIMessageStream } from '@ai-sdk/langchain';
65
+ import { createUIMessageStreamResponse } from 'ai';
66
+
67
+ // Using streamEvents with an agent
68
+ const langchainMessages = await toBaseMessages(uiMessages);
69
+ const streamEvents = agent.streamEvents(
70
+ { messages: langchainMessages },
71
+ { version: 'v2' },
72
+ );
73
+
74
+ // Convert to UI message stream response
75
+ return createUIMessageStreamResponse({
76
+ stream: toUIMessageStream(streamEvents),
77
+ });
78
+ ```
79
+
80
+ The adapter automatically detects the stream type and handles:
81
+
82
+ - `on_chat_model_stream` events for text streaming
83
+ - `on_tool_start` and `on_tool_end` events for tool calls
84
+ - Reasoning content from contentBlocks
85
+
59
86
  ### Custom Data Streaming
60
87
 
61
88
  LangChain tools can emit custom data events using `config.writer()`. The adapter converts these to typed `data-{type}` parts:
@@ -174,16 +201,28 @@ Converts a LangChain/LangGraph stream to an AI SDK `UIMessageStream`.
174
201
 
175
202
  **Parameters:**
176
203
 
177
- - `stream`: `ReadableStream` - LangGraph stream with `streamMode: ['values', 'messages']`
204
+ - `stream`: `AsyncIterable | ReadableStream` - A stream from LangChain `model.stream()`, LangGraph `graph.stream()`, or `streamEvents()`
178
205
 
179
206
  **Returns:** `ReadableStream<UIMessageChunk>`
180
207
 
181
- **Supported stream events:**
208
+ **Supported stream types:**
209
+
210
+ - **Model streams** - Direct `AIMessageChunk` streams from `model.stream()`
211
+ - **LangGraph streams** - Streams with `streamMode: ['values', 'messages']`
212
+ - **streamEvents** - Event streams from `agent.streamEvents()` or `model.streamEvents()`
213
+
214
+ **Supported LangGraph stream events:**
182
215
 
183
216
  - `messages` - Streaming message chunks (text, tool calls)
184
217
  - `values` - State updates that finalize pending message chunks
185
218
  - `custom` - Custom data events (emitted as `data-{type}` chunks)
186
219
 
220
+ **Supported streamEvents events:**
221
+
222
+ - `on_chat_model_stream` - Token streaming from chat models
223
+ - `on_tool_start` - Tool execution start
224
+ - `on_tool_end` - Tool execution end with output
225
+
187
226
  ### `LangSmithDeploymentTransport`
188
227
 
189
228
  A `ChatTransport` implementation for LangSmith/LangGraph deployments.
package/dist/index.d.mts CHANGED
@@ -46,11 +46,12 @@ declare function convertModelMessages(modelMessages: ModelMessage[]): BaseMessag
46
46
  /**
47
47
  * Converts a LangChain stream to an AI SDK UIMessageStream.
48
48
  *
49
- * This function automatically detects the stream type and handles both:
49
+ * This function automatically detects the stream type and handles:
50
50
  * - Direct model streams (AsyncIterable from `model.stream()`)
51
51
  * - LangGraph streams (ReadableStream with `streamMode: ['values', 'messages']`)
52
+ * - streamEvents streams (from `agent.streamEvents()` or `model.streamEvents()`)
52
53
  *
53
- * @param stream - A stream from LangChain model.stream() or LangGraph graph.stream().
54
+ * @param stream - A stream from LangChain model.stream(), graph.stream(), or streamEvents().
54
55
  * @param callbacks - Optional callbacks for stream lifecycle events.
55
56
  * @returns A ReadableStream of UIMessageChunk objects.
56
57
  *
@@ -71,6 +72,15 @@ declare function convertModelMessages(modelMessages: ModelMessage[]): BaseMessag
71
72
  * return createUIMessageStreamResponse({
72
73
  * stream: toUIMessageStream(graphStream),
73
74
  * });
75
+ *
76
+ * // With streamEvents
77
+ * const streamEvents = agent.streamEvents(
78
+ * { messages },
79
+ * { version: "v2" }
80
+ * );
81
+ * return createUIMessageStreamResponse({
82
+ * stream: toUIMessageStream(streamEvents),
83
+ * });
74
84
  * ```
75
85
  */
76
86
  declare function toUIMessageStream(stream: AsyncIterable<AIMessageChunk> | ReadableStream, callbacks?: StreamCallbacks): ReadableStream<UIMessageChunk>;
package/dist/index.d.ts CHANGED
@@ -46,11 +46,12 @@ declare function convertModelMessages(modelMessages: ModelMessage[]): BaseMessag
46
46
  /**
47
47
  * Converts a LangChain stream to an AI SDK UIMessageStream.
48
48
  *
49
- * This function automatically detects the stream type and handles both:
49
+ * This function automatically detects the stream type and handles:
50
50
  * - Direct model streams (AsyncIterable from `model.stream()`)
51
51
  * - LangGraph streams (ReadableStream with `streamMode: ['values', 'messages']`)
52
+ * - streamEvents streams (from `agent.streamEvents()` or `model.streamEvents()`)
52
53
  *
53
- * @param stream - A stream from LangChain model.stream() or LangGraph graph.stream().
54
+ * @param stream - A stream from LangChain model.stream(), graph.stream(), or streamEvents().
54
55
  * @param callbacks - Optional callbacks for stream lifecycle events.
55
56
  * @returns A ReadableStream of UIMessageChunk objects.
56
57
  *
@@ -71,6 +72,15 @@ declare function convertModelMessages(modelMessages: ModelMessage[]): BaseMessag
71
72
  * return createUIMessageStreamResponse({
72
73
  * stream: toUIMessageStream(graphStream),
73
74
  * });
75
+ *
76
+ * // With streamEvents
77
+ * const streamEvents = agent.streamEvents(
78
+ * { messages },
79
+ * { version: "v2" }
80
+ * );
81
+ * return createUIMessageStreamResponse({
82
+ * stream: toUIMessageStream(streamEvents),
83
+ * });
74
84
  * ```
75
85
  */
76
86
  declare function toUIMessageStream(stream: AsyncIterable<AIMessageChunk> | ReadableStream, callbacks?: StreamCallbacks): ReadableStream<UIMessageChunk>;
package/dist/index.js CHANGED
@@ -668,13 +668,116 @@ function convertModelMessages(modelMessages) {
668
668
  }
669
669
  return result;
670
670
  }
671
+ function isStreamEventsEvent(value) {
672
+ if (value == null || typeof value !== "object") return false;
673
+ const obj = value;
674
+ if (!("event" in obj) || typeof obj.event !== "string") return false;
675
+ if (!("data" in obj)) return false;
676
+ return obj.data === null || typeof obj.data === "object";
677
+ }
678
+ function processStreamEventsEvent(event, state, controller) {
679
+ var _a, _b, _c;
680
+ if (event.run_id && !state.started) {
681
+ state.messageId = event.run_id;
682
+ }
683
+ if (!event.data) return;
684
+ switch (event.event) {
685
+ case "on_chat_model_start": {
686
+ const runId = event.run_id || event.data.run_id;
687
+ if (runId) {
688
+ state.messageId = runId;
689
+ }
690
+ break;
691
+ }
692
+ case "on_chat_model_stream": {
693
+ const chunk = event.data.chunk;
694
+ if (chunk && typeof chunk === "object") {
695
+ const chunkId = chunk.id;
696
+ if (chunkId) {
697
+ state.messageId = chunkId;
698
+ }
699
+ const reasoning = extractReasoningFromContentBlocks(chunk);
700
+ if (reasoning) {
701
+ if (!state.reasoningStarted) {
702
+ state.reasoningMessageId = state.messageId;
703
+ controller.enqueue({
704
+ type: "reasoning-start",
705
+ id: state.messageId
706
+ });
707
+ state.reasoningStarted = true;
708
+ state.started = true;
709
+ }
710
+ controller.enqueue({
711
+ type: "reasoning-delta",
712
+ delta: reasoning,
713
+ id: (_a = state.reasoningMessageId) != null ? _a : state.messageId
714
+ });
715
+ }
716
+ const content = chunk.content;
717
+ const text = typeof content === "string" ? content : Array.isArray(content) ? content.filter(
718
+ (c) => typeof c === "object" && c !== null && "type" in c && c.type === "text"
719
+ ).map((c) => c.text).join("") : "";
720
+ if (text) {
721
+ if (state.reasoningStarted && !state.textStarted) {
722
+ controller.enqueue({
723
+ type: "reasoning-end",
724
+ id: (_b = state.reasoningMessageId) != null ? _b : state.messageId
725
+ });
726
+ state.reasoningStarted = false;
727
+ }
728
+ if (!state.textStarted) {
729
+ state.textMessageId = state.messageId;
730
+ controller.enqueue({ type: "text-start", id: state.messageId });
731
+ state.textStarted = true;
732
+ state.started = true;
733
+ }
734
+ controller.enqueue({
735
+ type: "text-delta",
736
+ delta: text,
737
+ id: (_c = state.textMessageId) != null ? _c : state.messageId
738
+ });
739
+ }
740
+ }
741
+ break;
742
+ }
743
+ case "on_tool_start": {
744
+ const runId = event.run_id || event.data.run_id;
745
+ const name = event.name || event.data.name;
746
+ if (runId && name) {
747
+ controller.enqueue({
748
+ type: "tool-input-start",
749
+ toolCallId: runId,
750
+ toolName: name,
751
+ dynamic: true
752
+ });
753
+ }
754
+ break;
755
+ }
756
+ case "on_tool_end": {
757
+ const runId = event.run_id || event.data.run_id;
758
+ const output = event.data.output;
759
+ if (runId) {
760
+ controller.enqueue({
761
+ type: "tool-output-available",
762
+ toolCallId: runId,
763
+ output
764
+ });
765
+ }
766
+ break;
767
+ }
768
+ }
769
+ }
671
770
  function toUIMessageStream(stream, callbacks) {
672
771
  const textChunks = [];
673
772
  const modelState = {
674
773
  started: false,
675
774
  messageId: "langchain-msg-1",
676
775
  reasoningStarted: false,
677
- textStarted: false
776
+ textStarted: false,
777
+ /** Track the ID used for text-start to ensure text-end uses the same ID */
778
+ textMessageId: null,
779
+ /** Track the ID used for reasoning-start to ensure reasoning-end uses the same ID */
780
+ reasoningMessageId: null
678
781
  };
679
782
  const langGraphState = {
680
783
  messageSeen: {},
@@ -721,7 +824,7 @@ function toUIMessageStream(stream, callbacks) {
721
824
  };
722
825
  return new ReadableStream({
723
826
  async start(controller) {
724
- var _a, _b;
827
+ var _a, _b, _c, _d;
725
828
  await ((_a = callbacks == null ? void 0 : callbacks.onStart) == null ? void 0 : _a.call(callbacks));
726
829
  const wrappedController = createCallbackController(controller);
727
830
  controller.enqueue({ type: "start" });
@@ -732,6 +835,8 @@ function toUIMessageStream(stream, callbacks) {
732
835
  if (streamType === null) {
733
836
  if (Array.isArray(value)) {
734
837
  streamType = "langgraph";
838
+ } else if (isStreamEventsEvent(value)) {
839
+ streamType = "streamEvents";
735
840
  } else {
736
841
  streamType = "model";
737
842
  }
@@ -742,6 +847,12 @@ function toUIMessageStream(stream, callbacks) {
742
847
  modelState,
743
848
  wrappedController
744
849
  );
850
+ } else if (streamType === "streamEvents") {
851
+ processStreamEventsEvent(
852
+ value,
853
+ modelState,
854
+ wrappedController
855
+ );
745
856
  } else {
746
857
  processLangGraphEvent(
747
858
  value,
@@ -750,19 +861,22 @@ function toUIMessageStream(stream, callbacks) {
750
861
  );
751
862
  }
752
863
  }
753
- if (streamType === "model") {
864
+ if (streamType === "model" || streamType === "streamEvents") {
754
865
  if (modelState.reasoningStarted) {
755
866
  controller.enqueue({
756
867
  type: "reasoning-end",
757
- id: modelState.messageId
868
+ id: (_b = modelState.reasoningMessageId) != null ? _b : modelState.messageId
758
869
  });
759
870
  }
760
871
  if (modelState.textStarted) {
761
- controller.enqueue({ type: "text-end", id: modelState.messageId });
872
+ controller.enqueue({
873
+ type: "text-end",
874
+ id: (_c = modelState.textMessageId) != null ? _c : modelState.messageId
875
+ });
762
876
  }
763
877
  controller.enqueue({ type: "finish" });
764
878
  }
765
- await ((_b = callbacks == null ? void 0 : callbacks.onFinal) == null ? void 0 : _b.call(callbacks, textChunks.join("")));
879
+ await ((_d = callbacks == null ? void 0 : callbacks.onFinal) == null ? void 0 : _d.call(callbacks, textChunks.join("")));
766
880
  } catch (error) {
767
881
  controller.enqueue({
768
882
  type: "error",