@langchain/core 0.3.61 → 0.3.63

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # 🦜🍎️ @langchain/core
2
2
 
3
- [![CI](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml/badge.svg)](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml) ![npm](https://img.shields.io/npm/dm/@langchain/core) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
3
+ [![CI](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml/badge.svg)](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml) ![npm](https://img.shields.io/npm/dm/@langchain/core) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
4
4
 
5
5
  `@langchain/core` contains the core abstractions and schemas of LangChain.js, including base classes for language models,
6
6
  chat models, vectorstores, retrievers, and runnables.
@@ -13,7 +13,7 @@ $ yarn add @langchain/core
13
13
 
14
14
  ## 🤔 What is this?
15
15
 
16
- `@langchain/core` contains the base abstractions that power the rest of the LangChain ecosystem.
16
+ `@langchain/core` contains the base abstractions that power the rest of the LangChain ecosystem.
17
17
  These abstractions are designed to be as modular and simple as possible.
18
18
  Examples of these abstractions include those for language models, document loaders, embedding models, vectorstores, retrievers, and more.
19
19
  The benefit of having these abstractions is that any provider can implement the required interface and then easily be used in the rest of the LangChain ecosystem.
@@ -36,6 +36,7 @@ const prompt = ChatPromptTemplate.fromTemplate(
36
36
  );
37
37
 
38
38
  const model = new ChatOpenAI({
39
+ model: "gpt-4o-mini",
39
40
  temperature: 0.8,
40
41
  });
41
42
 
@@ -133,7 +134,7 @@ Because all used packages must share the same version of core, packages should n
133
134
 
134
135
  This recommendation will change to a caret once a major version (1.x.x) release has occurred.
135
136
 
136
- We suggest making all packages cross-compatible with ESM and CJS using a build step like the one in
137
+ We suggest making all packages cross-compatible with ESM and CJS using a build step like the one in
137
138
  [@langchain/anthropic](https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-anthropic), then running `yarn build` before running `npm publish`.
138
139
 
139
140
  We will be exploring how to make this process easier in the future.
@@ -383,7 +383,7 @@ exports.CallbackManagerForToolRun = CallbackManagerForToolRun;
383
383
  *
384
384
  * // Example of using LLMChain with OpenAI and a simple prompt
385
385
  * const chain = new LLMChain({
386
- * llm: new ChatOpenAI({ temperature: 0.9 }),
386
+ * llm: new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0.9 }),
387
387
  * prompt,
388
388
  * });
389
389
  *
@@ -103,7 +103,7 @@ export declare class CallbackManagerForToolRun extends BaseRunManager implements
103
103
  *
104
104
  * // Example of using LLMChain with OpenAI and a simple prompt
105
105
  * const chain = new LLMChain({
106
- * llm: new ChatOpenAI({ temperature: 0.9 }),
106
+ * llm: new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0.9 }),
107
107
  * prompt,
108
108
  * });
109
109
  *
@@ -371,7 +371,7 @@ export class CallbackManagerForToolRun extends BaseRunManager {
371
371
  *
372
372
  * // Example of using LLMChain with OpenAI and a simple prompt
373
373
  * const chain = new LLMChain({
374
- * llm: new ChatOpenAI({ temperature: 0.9 }),
374
+ * llm: new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0.9 }),
375
375
  * prompt,
376
376
  * });
377
377
  *
@@ -143,29 +143,38 @@ class AIMessageChunk extends base_js_1.BaseMessageChunk {
143
143
  };
144
144
  }
145
145
  else {
146
+ const groupedToolCallChunk = fields.tool_call_chunks.reduce((acc, chunk) => {
147
+ if (!chunk.id)
148
+ return acc;
149
+ acc[chunk.id] = acc[chunk.id] ?? [];
150
+ acc[chunk.id].push(chunk);
151
+ return acc;
152
+ }, {});
146
153
  const toolCalls = [];
147
154
  const invalidToolCalls = [];
148
- for (const toolCallChunk of fields.tool_call_chunks) {
155
+ for (const [id, chunks] of Object.entries(groupedToolCallChunk)) {
149
156
  let parsedArgs = {};
157
+ const name = chunks[0]?.name ?? "";
158
+ const argStr = chunks.map((c) => c.args || "").join("");
150
159
  try {
151
- parsedArgs = (0, json_js_1.parsePartialJson)(toolCallChunk.args || "{}");
160
+ parsedArgs = (0, json_js_1.parsePartialJson)(argStr);
152
161
  if (parsedArgs === null ||
153
162
  typeof parsedArgs !== "object" ||
154
163
  Array.isArray(parsedArgs)) {
155
164
  throw new Error("Malformed tool call chunk args.");
156
165
  }
157
166
  toolCalls.push({
158
- name: toolCallChunk.name ?? "",
167
+ name,
159
168
  args: parsedArgs,
160
- id: toolCallChunk.id,
169
+ id,
161
170
  type: "tool_call",
162
171
  });
163
172
  }
164
173
  catch (e) {
165
174
  invalidToolCalls.push({
166
- name: toolCallChunk.name,
167
- args: toolCallChunk.args,
168
- id: toolCallChunk.id,
175
+ name,
176
+ args: argStr,
177
+ id,
169
178
  error: "Malformed args.",
170
179
  type: "invalid_tool_call",
171
180
  });
@@ -137,29 +137,38 @@ export class AIMessageChunk extends BaseMessageChunk {
137
137
  };
138
138
  }
139
139
  else {
140
+ const groupedToolCallChunk = fields.tool_call_chunks.reduce((acc, chunk) => {
141
+ if (!chunk.id)
142
+ return acc;
143
+ acc[chunk.id] = acc[chunk.id] ?? [];
144
+ acc[chunk.id].push(chunk);
145
+ return acc;
146
+ }, {});
140
147
  const toolCalls = [];
141
148
  const invalidToolCalls = [];
142
- for (const toolCallChunk of fields.tool_call_chunks) {
149
+ for (const [id, chunks] of Object.entries(groupedToolCallChunk)) {
143
150
  let parsedArgs = {};
151
+ const name = chunks[0]?.name ?? "";
152
+ const argStr = chunks.map((c) => c.args || "").join("");
144
153
  try {
145
- parsedArgs = parsePartialJson(toolCallChunk.args || "{}");
154
+ parsedArgs = parsePartialJson(argStr);
146
155
  if (parsedArgs === null ||
147
156
  typeof parsedArgs !== "object" ||
148
157
  Array.isArray(parsedArgs)) {
149
158
  throw new Error("Malformed tool call chunk args.");
150
159
  }
151
160
  toolCalls.push({
152
- name: toolCallChunk.name ?? "",
161
+ name,
153
162
  args: parsedArgs,
154
- id: toolCallChunk.id,
163
+ id,
155
164
  type: "tool_call",
156
165
  });
157
166
  }
158
167
  catch (e) {
159
168
  invalidToolCalls.push({
160
- name: toolCallChunk.name,
161
- args: toolCallChunk.args,
162
- id: toolCallChunk.id,
169
+ name,
170
+ args: argStr,
171
+ id,
163
172
  error: "Malformed args.",
164
173
  type: "invalid_tool_call",
165
174
  });
@@ -191,7 +191,7 @@ class BaseMessage extends serializable_js_1.Serializable {
191
191
  writable: true,
192
192
  value: void 0
193
193
  });
194
- /** Response metadata. For example: response headers, logprobs, token counts. */
194
+ /** Response metadata. For example: response headers, logprobs, token counts, model name. */
195
195
  Object.defineProperty(this, "response_metadata", {
196
196
  enumerable: true,
197
197
  configurable: true,
@@ -6,7 +6,7 @@ export interface StoredMessageData {
6
6
  name: string | undefined;
7
7
  tool_call_id: string | undefined;
8
8
  additional_kwargs?: Record<string, any>;
9
- /** Response metadata. For example: response headers, logprobs, token counts. */
9
+ /** Response metadata. For example: response headers, logprobs, token counts, model name. */
10
10
  response_metadata?: Record<string, any>;
11
11
  id?: string;
12
12
  }
@@ -69,7 +69,7 @@ export type BaseMessageFields = {
69
69
  tool_calls?: OpenAIToolCall[];
70
70
  [key: string]: unknown;
71
71
  };
72
- /** Response metadata. For example: response headers, logprobs, token counts. */
72
+ /** Response metadata. For example: response headers, logprobs, token counts, model name. */
73
73
  response_metadata?: Record<string, any>;
74
74
  /**
75
75
  * An optional unique identifier for the message. This should ideally be
@@ -106,7 +106,7 @@ export declare abstract class BaseMessage extends Serializable implements BaseMe
106
106
  name?: string;
107
107
  /** Additional keyword arguments */
108
108
  additional_kwargs: NonNullable<BaseMessageFields["additional_kwargs"]>;
109
- /** Response metadata. For example: response headers, logprobs, token counts. */
109
+ /** Response metadata. For example: response headers, logprobs, token counts, model name. */
110
110
  response_metadata: NonNullable<BaseMessageFields["response_metadata"]>;
111
111
  /**
112
112
  * An optional unique identifier for the message. This should ideally be
@@ -179,7 +179,7 @@ export class BaseMessage extends Serializable {
179
179
  writable: true,
180
180
  value: void 0
181
181
  });
182
- /** Response metadata. For example: response headers, logprobs, token counts. */
182
+ /** Response metadata. For example: response headers, logprobs, token counts, model name. */
183
183
  Object.defineProperty(this, "response_metadata", {
184
184
  enumerable: true,
185
185
  configurable: true,
@@ -12,7 +12,7 @@ const transform_js_1 = require("./transform.cjs");
12
12
  *
13
13
  * const chain = RunnableSequence.from([
14
14
  * promptTemplate,
15
- * new ChatOpenAI({}),
15
+ * new ChatOpenAI({ model: "gpt-4o-mini" }),
16
16
  * new StringOutputParser(),
17
17
  * ]);
18
18
  *
@@ -10,7 +10,7 @@ import { MessageContentComplex, MessageContentImageUrl, MessageContentText } fro
10
10
  *
11
11
  * const chain = RunnableSequence.from([
12
12
  * promptTemplate,
13
- * new ChatOpenAI({}),
13
+ * new ChatOpenAI({ model: "gpt-4o-mini" }),
14
14
  * new StringOutputParser(),
15
15
  * ]);
16
16
  *
@@ -9,7 +9,7 @@ import { BaseTransformOutputParser } from "./transform.js";
9
9
  *
10
10
  * const chain = RunnableSequence.from([
11
11
  * promptTemplate,
12
- * new ChatOpenAI({}),
12
+ * new ChatOpenAI({ model: "gpt-4o-mini" }),
13
13
  * new StringOutputParser(),
14
14
  * ]);
15
15
  *
@@ -520,8 +520,9 @@ class Runnable extends serializable_js_1.Serializable {
520
520
  // add each chunk to the output stream
521
521
  const outerThis = this;
522
522
  async function consumeRunnableStream() {
523
+ let signal;
524
+ let listener = null;
523
525
  try {
524
- let signal;
525
526
  if (options?.signal) {
526
527
  if ("any" in AbortSignal) {
527
528
  // Use native AbortSignal.any() if available (Node 19+)
@@ -535,9 +536,10 @@ class Runnable extends serializable_js_1.Serializable {
535
536
  // Fallback for Node 18 and below - just use the provided signal
536
537
  signal = options.signal;
537
538
  // Ensure we still abort our controller when the parent signal aborts
538
- options.signal.addEventListener("abort", () => {
539
+ listener = () => {
539
540
  abortController.abort();
540
- }, { once: true });
541
+ };
542
+ options.signal.addEventListener("abort", listener, { once: true });
541
543
  }
542
544
  }
543
545
  else {
@@ -557,6 +559,9 @@ class Runnable extends serializable_js_1.Serializable {
557
559
  }
558
560
  finally {
559
561
  await eventStreamer.finish();
562
+ if (signal && listener) {
563
+ signal.removeEventListener("abort", listener);
564
+ }
560
565
  }
561
566
  }
562
567
  const runnableStreamConsumePromise = consumeRunnableStream();
@@ -1232,7 +1237,7 @@ exports.RunnableRetry = RunnableRetry;
1232
1237
  * const promptTemplate = PromptTemplate.fromTemplate(
1233
1238
  * "Tell me a joke about {topic}",
1234
1239
  * );
1235
- * const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({})]);
1240
+ * const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({ model: "gpt-4o-mini" })]);
1236
1241
  * const result = await chain.invoke({ topic: "bears" });
1237
1242
  * ```
1238
1243
  */
@@ -582,7 +582,7 @@ export type RunnableSequenceFields<RunInput, RunOutput> = {
582
582
  * const promptTemplate = PromptTemplate.fromTemplate(
583
583
  * "Tell me a joke about {topic}",
584
584
  * );
585
- * const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({})]);
585
+ * const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({ model: "gpt-4o-mini" })]);
586
586
  * const result = await chain.invoke({ topic: "bears" });
587
587
  * ```
588
588
  */
@@ -511,8 +511,9 @@ export class Runnable extends Serializable {
511
511
  // add each chunk to the output stream
512
512
  const outerThis = this;
513
513
  async function consumeRunnableStream() {
514
+ let signal;
515
+ let listener = null;
514
516
  try {
515
- let signal;
516
517
  if (options?.signal) {
517
518
  if ("any" in AbortSignal) {
518
519
  // Use native AbortSignal.any() if available (Node 19+)
@@ -526,9 +527,10 @@ export class Runnable extends Serializable {
526
527
  // Fallback for Node 18 and below - just use the provided signal
527
528
  signal = options.signal;
528
529
  // Ensure we still abort our controller when the parent signal aborts
529
- options.signal.addEventListener("abort", () => {
530
+ listener = () => {
530
531
  abortController.abort();
531
- }, { once: true });
532
+ };
533
+ options.signal.addEventListener("abort", listener, { once: true });
532
534
  }
533
535
  }
534
536
  else {
@@ -548,6 +550,9 @@ export class Runnable extends Serializable {
548
550
  }
549
551
  finally {
550
552
  await eventStreamer.finish();
553
+ if (signal && listener) {
554
+ signal.removeEventListener("abort", listener);
555
+ }
551
556
  }
552
557
  }
553
558
  const runnableStreamConsumePromise = consumeRunnableStream();
@@ -1219,7 +1224,7 @@ export class RunnableRetry extends RunnableBinding {
1219
1224
  * const promptTemplate = PromptTemplate.fromTemplate(
1220
1225
  * "Tell me a joke about {topic}",
1221
1226
  * );
1222
- * const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({})]);
1227
+ * const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({ model: "gpt-4o-mini" })]);
1223
1228
  * const result = await chain.invoke({ topic: "bears" });
1224
1229
  * ```
1225
1230
  */
@@ -112,7 +112,7 @@ class RunnablePassthrough extends base_js_1.Runnable {
112
112
  * schema: async () => db.getTableInfo(),
113
113
  * }),
114
114
  * prompt,
115
- * new ChatOpenAI({}).withConfig({ stop: ["\nSQLResult:"] }),
115
+ * new ChatOpenAI({ model: "gpt-4o-mini" }).withConfig({ stop: ["\nSQLResult:"] }),
116
116
  * new StringOutputParser(),
117
117
  * ]);
118
118
  * const result = await sqlQueryGeneratorChain.invoke({
@@ -57,7 +57,7 @@ export declare class RunnablePassthrough<RunInput = any> extends Runnable<RunInp
57
57
  * schema: async () => db.getTableInfo(),
58
58
  * }),
59
59
  * prompt,
60
- * new ChatOpenAI({}).withConfig({ stop: ["\nSQLResult:"] }),
60
+ * new ChatOpenAI({ model: "gpt-4o-mini" }).withConfig({ stop: ["\nSQLResult:"] }),
61
61
  * new StringOutputParser(),
62
62
  * ]);
63
63
  * const result = await sqlQueryGeneratorChain.invoke({
@@ -109,7 +109,7 @@ export class RunnablePassthrough extends Runnable {
109
109
  * schema: async () => db.getTableInfo(),
110
110
  * }),
111
111
  * prompt,
112
- * new ChatOpenAI({}).withConfig({ stop: ["\nSQLResult:"] }),
112
+ * new ChatOpenAI({ model: "gpt-4o-mini" }).withConfig({ stop: ["\nSQLResult:"] }),
113
113
  * new StringOutputParser(),
114
114
  * ]);
115
115
  * const result = await sqlQueryGeneratorChain.invoke({
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/core",
3
- "version": "0.3.61",
3
+ "version": "0.3.63",
4
4
  "description": "Core LangChain.js abstractions and schemas",
5
5
  "type": "module",
6
6
  "engines": {