@langchain/core 0.3.61 → 0.3.62
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -3
- package/dist/callbacks/manager.cjs +1 -1
- package/dist/callbacks/manager.d.ts +1 -1
- package/dist/callbacks/manager.js +1 -1
- package/dist/messages/base.cjs +1 -1
- package/dist/messages/base.d.ts +3 -3
- package/dist/messages/base.js +1 -1
- package/dist/output_parsers/string.cjs +1 -1
- package/dist/output_parsers/string.d.ts +1 -1
- package/dist/output_parsers/string.js +1 -1
- package/dist/runnables/base.cjs +9 -4
- package/dist/runnables/base.d.ts +1 -1
- package/dist/runnables/base.js +9 -4
- package/dist/runnables/passthrough.cjs +1 -1
- package/dist/runnables/passthrough.d.ts +1 -1
- package/dist/runnables/passthrough.js +1 -1
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# 🦜🍎️ @langchain/core
|
|
2
2
|
|
|
3
|
-
[](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml)  [](https://opensource.org/licenses/MIT) [](https://twitter.com/langchainai)
|
|
3
|
+
[](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml)  [](https://opensource.org/licenses/MIT) [](https://twitter.com/langchainai)
|
|
4
4
|
|
|
5
5
|
`@langchain/core` contains the core abstractions and schemas of LangChain.js, including base classes for language models,
|
|
6
6
|
chat models, vectorstores, retrievers, and runnables.
|
|
@@ -13,7 +13,7 @@ $ yarn add @langchain/core
|
|
|
13
13
|
|
|
14
14
|
## 🤔 What is this?
|
|
15
15
|
|
|
16
|
-
`@langchain/core` contains the base abstractions that power the rest of the LangChain ecosystem.
|
|
16
|
+
`@langchain/core` contains the base abstractions that power the rest of the LangChain ecosystem.
|
|
17
17
|
These abstractions are designed to be as modular and simple as possible.
|
|
18
18
|
Examples of these abstractions include those for language models, document loaders, embedding models, vectorstores, retrievers, and more.
|
|
19
19
|
The benefit of having these abstractions is that any provider can implement the required interface and then easily be used in the rest of the LangChain ecosystem.
|
|
@@ -36,6 +36,7 @@ const prompt = ChatPromptTemplate.fromTemplate(
|
|
|
36
36
|
);
|
|
37
37
|
|
|
38
38
|
const model = new ChatOpenAI({
|
|
39
|
+
model: "gpt-4o-mini",
|
|
39
40
|
temperature: 0.8,
|
|
40
41
|
});
|
|
41
42
|
|
|
@@ -133,7 +134,7 @@ Because all used packages must share the same version of core, packages should n
|
|
|
133
134
|
|
|
134
135
|
This recommendation will change to a caret once a major version (1.x.x) release has occurred.
|
|
135
136
|
|
|
136
|
-
We suggest making all packages cross-compatible with ESM and CJS using a build step like the one in
|
|
137
|
+
We suggest making all packages cross-compatible with ESM and CJS using a build step like the one in
|
|
137
138
|
[@langchain/anthropic](https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-anthropic), then running `yarn build` before running `npm publish`.
|
|
138
139
|
|
|
139
140
|
We will be exploring how to make this process easier in the future.
|
|
@@ -383,7 +383,7 @@ exports.CallbackManagerForToolRun = CallbackManagerForToolRun;
|
|
|
383
383
|
*
|
|
384
384
|
* // Example of using LLMChain with OpenAI and a simple prompt
|
|
385
385
|
* const chain = new LLMChain({
|
|
386
|
-
* llm: new ChatOpenAI({ temperature: 0.9 }),
|
|
386
|
+
* llm: new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0.9 }),
|
|
387
387
|
* prompt,
|
|
388
388
|
* });
|
|
389
389
|
*
|
|
@@ -103,7 +103,7 @@ export declare class CallbackManagerForToolRun extends BaseRunManager implements
|
|
|
103
103
|
*
|
|
104
104
|
* // Example of using LLMChain with OpenAI and a simple prompt
|
|
105
105
|
* const chain = new LLMChain({
|
|
106
|
-
* llm: new ChatOpenAI({ temperature: 0.9 }),
|
|
106
|
+
* llm: new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0.9 }),
|
|
107
107
|
* prompt,
|
|
108
108
|
* });
|
|
109
109
|
*
|
|
@@ -371,7 +371,7 @@ export class CallbackManagerForToolRun extends BaseRunManager {
|
|
|
371
371
|
*
|
|
372
372
|
* // Example of using LLMChain with OpenAI and a simple prompt
|
|
373
373
|
* const chain = new LLMChain({
|
|
374
|
-
* llm: new ChatOpenAI({ temperature: 0.9 }),
|
|
374
|
+
* llm: new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0.9 }),
|
|
375
375
|
* prompt,
|
|
376
376
|
* });
|
|
377
377
|
*
|
package/dist/messages/base.cjs
CHANGED
|
@@ -191,7 +191,7 @@ class BaseMessage extends serializable_js_1.Serializable {
|
|
|
191
191
|
writable: true,
|
|
192
192
|
value: void 0
|
|
193
193
|
});
|
|
194
|
-
/** Response metadata. For example: response headers, logprobs, token counts. */
|
|
194
|
+
/** Response metadata. For example: response headers, logprobs, token counts, model name. */
|
|
195
195
|
Object.defineProperty(this, "response_metadata", {
|
|
196
196
|
enumerable: true,
|
|
197
197
|
configurable: true,
|
package/dist/messages/base.d.ts
CHANGED
|
@@ -6,7 +6,7 @@ export interface StoredMessageData {
|
|
|
6
6
|
name: string | undefined;
|
|
7
7
|
tool_call_id: string | undefined;
|
|
8
8
|
additional_kwargs?: Record<string, any>;
|
|
9
|
-
/** Response metadata. For example: response headers, logprobs, token counts. */
|
|
9
|
+
/** Response metadata. For example: response headers, logprobs, token counts, model name. */
|
|
10
10
|
response_metadata?: Record<string, any>;
|
|
11
11
|
id?: string;
|
|
12
12
|
}
|
|
@@ -69,7 +69,7 @@ export type BaseMessageFields = {
|
|
|
69
69
|
tool_calls?: OpenAIToolCall[];
|
|
70
70
|
[key: string]: unknown;
|
|
71
71
|
};
|
|
72
|
-
/** Response metadata. For example: response headers, logprobs, token counts. */
|
|
72
|
+
/** Response metadata. For example: response headers, logprobs, token counts, model name. */
|
|
73
73
|
response_metadata?: Record<string, any>;
|
|
74
74
|
/**
|
|
75
75
|
* An optional unique identifier for the message. This should ideally be
|
|
@@ -106,7 +106,7 @@ export declare abstract class BaseMessage extends Serializable implements BaseMe
|
|
|
106
106
|
name?: string;
|
|
107
107
|
/** Additional keyword arguments */
|
|
108
108
|
additional_kwargs: NonNullable<BaseMessageFields["additional_kwargs"]>;
|
|
109
|
-
/** Response metadata. For example: response headers, logprobs, token counts. */
|
|
109
|
+
/** Response metadata. For example: response headers, logprobs, token counts, model name. */
|
|
110
110
|
response_metadata: NonNullable<BaseMessageFields["response_metadata"]>;
|
|
111
111
|
/**
|
|
112
112
|
* An optional unique identifier for the message. This should ideally be
|
package/dist/messages/base.js
CHANGED
|
@@ -179,7 +179,7 @@ export class BaseMessage extends Serializable {
|
|
|
179
179
|
writable: true,
|
|
180
180
|
value: void 0
|
|
181
181
|
});
|
|
182
|
-
/** Response metadata. For example: response headers, logprobs, token counts. */
|
|
182
|
+
/** Response metadata. For example: response headers, logprobs, token counts, model name. */
|
|
183
183
|
Object.defineProperty(this, "response_metadata", {
|
|
184
184
|
enumerable: true,
|
|
185
185
|
configurable: true,
|
|
@@ -10,7 +10,7 @@ import { MessageContentComplex, MessageContentImageUrl, MessageContentText } fro
|
|
|
10
10
|
*
|
|
11
11
|
* const chain = RunnableSequence.from([
|
|
12
12
|
* promptTemplate,
|
|
13
|
-
* new ChatOpenAI({}),
|
|
13
|
+
* new ChatOpenAI({ model: "gpt-4o-mini" }),
|
|
14
14
|
* new StringOutputParser(),
|
|
15
15
|
* ]);
|
|
16
16
|
*
|
package/dist/runnables/base.cjs
CHANGED
|
@@ -520,8 +520,9 @@ class Runnable extends serializable_js_1.Serializable {
|
|
|
520
520
|
// add each chunk to the output stream
|
|
521
521
|
const outerThis = this;
|
|
522
522
|
async function consumeRunnableStream() {
|
|
523
|
+
let signal;
|
|
524
|
+
let listener = null;
|
|
523
525
|
try {
|
|
524
|
-
let signal;
|
|
525
526
|
if (options?.signal) {
|
|
526
527
|
if ("any" in AbortSignal) {
|
|
527
528
|
// Use native AbortSignal.any() if available (Node 19+)
|
|
@@ -535,9 +536,10 @@ class Runnable extends serializable_js_1.Serializable {
|
|
|
535
536
|
// Fallback for Node 18 and below - just use the provided signal
|
|
536
537
|
signal = options.signal;
|
|
537
538
|
// Ensure we still abort our controller when the parent signal aborts
|
|
538
|
-
|
|
539
|
+
listener = () => {
|
|
539
540
|
abortController.abort();
|
|
540
|
-
}
|
|
541
|
+
};
|
|
542
|
+
options.signal.addEventListener("abort", listener, { once: true });
|
|
541
543
|
}
|
|
542
544
|
}
|
|
543
545
|
else {
|
|
@@ -557,6 +559,9 @@ class Runnable extends serializable_js_1.Serializable {
|
|
|
557
559
|
}
|
|
558
560
|
finally {
|
|
559
561
|
await eventStreamer.finish();
|
|
562
|
+
if (signal && listener) {
|
|
563
|
+
signal.removeEventListener("abort", listener);
|
|
564
|
+
}
|
|
560
565
|
}
|
|
561
566
|
}
|
|
562
567
|
const runnableStreamConsumePromise = consumeRunnableStream();
|
|
@@ -1232,7 +1237,7 @@ exports.RunnableRetry = RunnableRetry;
|
|
|
1232
1237
|
* const promptTemplate = PromptTemplate.fromTemplate(
|
|
1233
1238
|
* "Tell me a joke about {topic}",
|
|
1234
1239
|
* );
|
|
1235
|
-
* const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({})]);
|
|
1240
|
+
* const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({ model: "gpt-4o-mini" })]);
|
|
1236
1241
|
* const result = await chain.invoke({ topic: "bears" });
|
|
1237
1242
|
* ```
|
|
1238
1243
|
*/
|
package/dist/runnables/base.d.ts
CHANGED
|
@@ -582,7 +582,7 @@ export type RunnableSequenceFields<RunInput, RunOutput> = {
|
|
|
582
582
|
* const promptTemplate = PromptTemplate.fromTemplate(
|
|
583
583
|
* "Tell me a joke about {topic}",
|
|
584
584
|
* );
|
|
585
|
-
* const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({})]);
|
|
585
|
+
* const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({ model: "gpt-4o-mini" })]);
|
|
586
586
|
* const result = await chain.invoke({ topic: "bears" });
|
|
587
587
|
* ```
|
|
588
588
|
*/
|
package/dist/runnables/base.js
CHANGED
|
@@ -511,8 +511,9 @@ export class Runnable extends Serializable {
|
|
|
511
511
|
// add each chunk to the output stream
|
|
512
512
|
const outerThis = this;
|
|
513
513
|
async function consumeRunnableStream() {
|
|
514
|
+
let signal;
|
|
515
|
+
let listener = null;
|
|
514
516
|
try {
|
|
515
|
-
let signal;
|
|
516
517
|
if (options?.signal) {
|
|
517
518
|
if ("any" in AbortSignal) {
|
|
518
519
|
// Use native AbortSignal.any() if available (Node 19+)
|
|
@@ -526,9 +527,10 @@ export class Runnable extends Serializable {
|
|
|
526
527
|
// Fallback for Node 18 and below - just use the provided signal
|
|
527
528
|
signal = options.signal;
|
|
528
529
|
// Ensure we still abort our controller when the parent signal aborts
|
|
529
|
-
|
|
530
|
+
listener = () => {
|
|
530
531
|
abortController.abort();
|
|
531
|
-
}
|
|
532
|
+
};
|
|
533
|
+
options.signal.addEventListener("abort", listener, { once: true });
|
|
532
534
|
}
|
|
533
535
|
}
|
|
534
536
|
else {
|
|
@@ -548,6 +550,9 @@ export class Runnable extends Serializable {
|
|
|
548
550
|
}
|
|
549
551
|
finally {
|
|
550
552
|
await eventStreamer.finish();
|
|
553
|
+
if (signal && listener) {
|
|
554
|
+
signal.removeEventListener("abort", listener);
|
|
555
|
+
}
|
|
551
556
|
}
|
|
552
557
|
}
|
|
553
558
|
const runnableStreamConsumePromise = consumeRunnableStream();
|
|
@@ -1219,7 +1224,7 @@ export class RunnableRetry extends RunnableBinding {
|
|
|
1219
1224
|
* const promptTemplate = PromptTemplate.fromTemplate(
|
|
1220
1225
|
* "Tell me a joke about {topic}",
|
|
1221
1226
|
* );
|
|
1222
|
-
* const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({})]);
|
|
1227
|
+
* const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({ model: "gpt-4o-mini" })]);
|
|
1223
1228
|
* const result = await chain.invoke({ topic: "bears" });
|
|
1224
1229
|
* ```
|
|
1225
1230
|
*/
|
|
@@ -112,7 +112,7 @@ class RunnablePassthrough extends base_js_1.Runnable {
|
|
|
112
112
|
* schema: async () => db.getTableInfo(),
|
|
113
113
|
* }),
|
|
114
114
|
* prompt,
|
|
115
|
-
* new ChatOpenAI({}).withConfig({ stop: ["\nSQLResult:"] }),
|
|
115
|
+
* new ChatOpenAI({ model: "gpt-4o-mini" }).withConfig({ stop: ["\nSQLResult:"] }),
|
|
116
116
|
* new StringOutputParser(),
|
|
117
117
|
* ]);
|
|
118
118
|
* const result = await sqlQueryGeneratorChain.invoke({
|
|
@@ -57,7 +57,7 @@ export declare class RunnablePassthrough<RunInput = any> extends Runnable<RunInp
|
|
|
57
57
|
* schema: async () => db.getTableInfo(),
|
|
58
58
|
* }),
|
|
59
59
|
* prompt,
|
|
60
|
-
* new ChatOpenAI({}).withConfig({ stop: ["\nSQLResult:"] }),
|
|
60
|
+
* new ChatOpenAI({ model: "gpt-4o-mini" }).withConfig({ stop: ["\nSQLResult:"] }),
|
|
61
61
|
* new StringOutputParser(),
|
|
62
62
|
* ]);
|
|
63
63
|
* const result = await sqlQueryGeneratorChain.invoke({
|
|
@@ -109,7 +109,7 @@ export class RunnablePassthrough extends Runnable {
|
|
|
109
109
|
* schema: async () => db.getTableInfo(),
|
|
110
110
|
* }),
|
|
111
111
|
* prompt,
|
|
112
|
-
* new ChatOpenAI({}).withConfig({ stop: ["\nSQLResult:"] }),
|
|
112
|
+
* new ChatOpenAI({ model: "gpt-4o-mini" }).withConfig({ stop: ["\nSQLResult:"] }),
|
|
113
113
|
* new StringOutputParser(),
|
|
114
114
|
* ]);
|
|
115
115
|
* const result = await sqlQueryGeneratorChain.invoke({
|