@langchain/core 0.3.55 → 0.3.56
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/runnables/remote.cjs +2 -0
- package/dist/runnables/remote.d.ts +2 -0
- package/dist/runnables/remote.js +2 -0
- package/dist/tracers/tracer_langchain.cjs +4 -1
- package/dist/tracers/tracer_langchain.js +4 -1
- package/dist/utils/testing/helpers.cjs +7 -0
- package/dist/utils/testing/helpers.d.ts +3 -0
- package/dist/utils/testing/helpers.js +3 -0
- package/dist/utils/testing/index.cjs +109 -24
- package/dist/utils/testing/index.d.ts +36 -10
- package/dist/utils/testing/index.js +109 -24
- package/package.json +2 -2
|
@@ -239,6 +239,8 @@ function serialize(input) {
|
|
|
239
239
|
* `.invoke()`, `.stream()`, `.streamEvents()`, etc. methods that
|
|
240
240
|
* other runnables support.
|
|
241
241
|
*
|
|
242
|
+
* @deprecated LangServe is no longer actively developed - please consider using LangGraph Platform.
|
|
243
|
+
*
|
|
242
244
|
* @param url - The base URL of the LangServe endpoint.
|
|
243
245
|
* @param options - Optional configuration for the remote runnable, including timeout and headers.
|
|
244
246
|
* @param fetch - Optional custom fetch implementation.
|
|
@@ -15,6 +15,8 @@ type RemoteRunnableOptions = {
|
|
|
15
15
|
* `.invoke()`, `.stream()`, `.streamEvents()`, etc. methods that
|
|
16
16
|
* other runnables support.
|
|
17
17
|
*
|
|
18
|
+
* @deprecated LangServe is no longer actively developed - please consider using LangGraph Platform.
|
|
19
|
+
*
|
|
18
20
|
* @param url - The base URL of the LangServe endpoint.
|
|
19
21
|
* @param options - Optional configuration for the remote runnable, including timeout and headers.
|
|
20
22
|
* @param fetch - Optional custom fetch implementation.
|
package/dist/runnables/remote.js
CHANGED
|
@@ -236,6 +236,8 @@ function serialize(input) {
|
|
|
236
236
|
* `.invoke()`, `.stream()`, `.streamEvents()`, etc. methods that
|
|
237
237
|
* other runnables support.
|
|
238
238
|
*
|
|
239
|
+
* @deprecated LangServe is no longer actively developed - please consider using LangGraph Platform.
|
|
240
|
+
*
|
|
239
241
|
* @param url - The base URL of the LangServe endpoint.
|
|
240
242
|
* @param options - Optional configuration for the remote runnable, including timeout and headers.
|
|
241
243
|
* @param fetch - Optional custom fetch implementation.
|
|
@@ -151,7 +151,10 @@ class LangChainTracer extends base_js_1.BaseTracer {
|
|
|
151
151
|
}
|
|
152
152
|
static getTraceableRunTree() {
|
|
153
153
|
try {
|
|
154
|
-
return (
|
|
154
|
+
return (
|
|
155
|
+
// The type cast here provides forward compatibility. Old versions of LangSmith will just
|
|
156
|
+
// ignore the permitAbsentRunTree arg.
|
|
157
|
+
traceable_1.getCurrentRunTree(true));
|
|
155
158
|
}
|
|
156
159
|
catch {
|
|
157
160
|
return undefined;
|
|
@@ -148,7 +148,10 @@ export class LangChainTracer extends BaseTracer {
|
|
|
148
148
|
}
|
|
149
149
|
static getTraceableRunTree() {
|
|
150
150
|
try {
|
|
151
|
-
return
|
|
151
|
+
return (
|
|
152
|
+
// The type cast here provides forward compatibility. Old versions of LangSmith will just
|
|
153
|
+
// ignore the permitAbsentRunTree arg.
|
|
154
|
+
getCurrentRunTree(true));
|
|
152
155
|
}
|
|
153
156
|
catch {
|
|
154
157
|
return undefined;
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.charChunks = void 0;
|
|
4
|
+
const index_js_1 = require("../../messages/index.cjs");
|
|
5
|
+
/** Tiny helper to convert string into char chunk. For eg: Turn `"Hi!"` into `[AIMessageChunk("H"), AIMessageChunk("i"), AIMessageChunk("!")] */
|
|
6
|
+
const charChunks = (text) => [...text].map((c) => new index_js_1.AIMessageChunk({ content: c }));
|
|
7
|
+
exports.charChunks = charChunks;
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
import { AIMessageChunk } from "../../messages/index.js";
|
|
2
|
+
/** Tiny helper to convert string into char chunk. For eg: Turn `"Hi!"` into `[AIMessageChunk("H"), AIMessageChunk("i"), AIMessageChunk("!")] */
|
|
3
|
+
export const charChunks = (text) => [...text].map((c) => new AIMessageChunk({ content: c }));
|
|
@@ -16,6 +16,7 @@ const base_js_2 = require("../../runnables/base.cjs");
|
|
|
16
16
|
const index_js_3 = require("../../tools/index.cjs");
|
|
17
17
|
const base_js_3 = require("../../tracers/base.cjs");
|
|
18
18
|
const embeddings_js_1 = require("../../embeddings.cjs");
|
|
19
|
+
const json_schema_js_1 = require("../json_schema.cjs");
|
|
19
20
|
const vectorstores_js_1 = require("../../vectorstores.cjs");
|
|
20
21
|
const similarities_js_1 = require("../ml-distance/similarities.cjs");
|
|
21
22
|
/**
|
|
@@ -186,8 +187,8 @@ class FakeChatModel extends chat_models_js_1.BaseChatModel {
|
|
|
186
187
|
}
|
|
187
188
|
exports.FakeChatModel = FakeChatModel;
|
|
188
189
|
class FakeStreamingChatModel extends chat_models_js_1.BaseChatModel {
|
|
189
|
-
constructor(
|
|
190
|
-
super(
|
|
190
|
+
constructor({ sleep = 50, responses = [], chunks = [], toolStyle = "openai", thrownErrorString, ...rest }) {
|
|
191
|
+
super(rest);
|
|
191
192
|
Object.defineProperty(this, "sleep", {
|
|
192
193
|
enumerable: true,
|
|
193
194
|
configurable: true,
|
|
@@ -198,7 +199,19 @@ class FakeStreamingChatModel extends chat_models_js_1.BaseChatModel {
|
|
|
198
199
|
enumerable: true,
|
|
199
200
|
configurable: true,
|
|
200
201
|
writable: true,
|
|
201
|
-
value:
|
|
202
|
+
value: []
|
|
203
|
+
});
|
|
204
|
+
Object.defineProperty(this, "chunks", {
|
|
205
|
+
enumerable: true,
|
|
206
|
+
configurable: true,
|
|
207
|
+
writable: true,
|
|
208
|
+
value: []
|
|
209
|
+
});
|
|
210
|
+
Object.defineProperty(this, "toolStyle", {
|
|
211
|
+
enumerable: true,
|
|
212
|
+
configurable: true,
|
|
213
|
+
writable: true,
|
|
214
|
+
value: "openai"
|
|
202
215
|
});
|
|
203
216
|
Object.defineProperty(this, "thrownErrorString", {
|
|
204
217
|
enumerable: true,
|
|
@@ -206,54 +219,120 @@ class FakeStreamingChatModel extends chat_models_js_1.BaseChatModel {
|
|
|
206
219
|
writable: true,
|
|
207
220
|
value: void 0
|
|
208
221
|
});
|
|
209
|
-
this
|
|
210
|
-
|
|
211
|
-
|
|
222
|
+
Object.defineProperty(this, "tools", {
|
|
223
|
+
enumerable: true,
|
|
224
|
+
configurable: true,
|
|
225
|
+
writable: true,
|
|
226
|
+
value: []
|
|
227
|
+
});
|
|
228
|
+
this.sleep = sleep;
|
|
229
|
+
this.responses = responses;
|
|
230
|
+
this.chunks = chunks;
|
|
231
|
+
this.toolStyle = toolStyle;
|
|
232
|
+
this.thrownErrorString = thrownErrorString;
|
|
212
233
|
}
|
|
213
234
|
_llmType() {
|
|
214
235
|
return "fake";
|
|
215
236
|
}
|
|
237
|
+
bindTools(tools) {
|
|
238
|
+
const merged = [...this.tools, ...tools];
|
|
239
|
+
const toolDicts = merged.map((t) => {
|
|
240
|
+
switch (this.toolStyle) {
|
|
241
|
+
case "openai":
|
|
242
|
+
return {
|
|
243
|
+
type: "function",
|
|
244
|
+
function: {
|
|
245
|
+
name: t.name,
|
|
246
|
+
description: t.description,
|
|
247
|
+
parameters: (0, json_schema_js_1.toJsonSchema)(t.schema),
|
|
248
|
+
},
|
|
249
|
+
};
|
|
250
|
+
case "anthropic":
|
|
251
|
+
return {
|
|
252
|
+
name: t.name,
|
|
253
|
+
description: t.description,
|
|
254
|
+
input_schema: (0, json_schema_js_1.toJsonSchema)(t.schema),
|
|
255
|
+
};
|
|
256
|
+
case "bedrock":
|
|
257
|
+
return {
|
|
258
|
+
toolSpec: {
|
|
259
|
+
name: t.name,
|
|
260
|
+
description: t.description,
|
|
261
|
+
inputSchema: (0, json_schema_js_1.toJsonSchema)(t.schema),
|
|
262
|
+
},
|
|
263
|
+
};
|
|
264
|
+
case "google":
|
|
265
|
+
return {
|
|
266
|
+
name: t.name,
|
|
267
|
+
description: t.description,
|
|
268
|
+
parameters: (0, json_schema_js_1.toJsonSchema)(t.schema),
|
|
269
|
+
};
|
|
270
|
+
default:
|
|
271
|
+
throw new Error(`Unsupported tool style: ${this.toolStyle}`);
|
|
272
|
+
}
|
|
273
|
+
});
|
|
274
|
+
const wrapped = this.toolStyle === "google"
|
|
275
|
+
? [{ functionDeclarations: toolDicts }]
|
|
276
|
+
: toolDicts;
|
|
277
|
+
/* creating a *new* instance – mirrors LangChain .bind semantics for type-safety and avoiding noise */
|
|
278
|
+
const next = new FakeStreamingChatModel({
|
|
279
|
+
sleep: this.sleep,
|
|
280
|
+
responses: this.responses,
|
|
281
|
+
chunks: this.chunks,
|
|
282
|
+
toolStyle: this.toolStyle,
|
|
283
|
+
thrownErrorString: this.thrownErrorString,
|
|
284
|
+
});
|
|
285
|
+
next.tools = merged;
|
|
286
|
+
return next.bind({ tools: wrapped });
|
|
287
|
+
}
|
|
216
288
|
async _generate(messages, _options, _runManager) {
|
|
217
289
|
if (this.thrownErrorString) {
|
|
218
290
|
throw new Error(this.thrownErrorString);
|
|
219
291
|
}
|
|
220
|
-
const content = this.responses?.[0]
|
|
292
|
+
const content = this.responses?.[0]?.content ?? messages[0].content ?? "";
|
|
221
293
|
const generation = {
|
|
222
294
|
generations: [
|
|
223
295
|
{
|
|
224
296
|
text: "",
|
|
225
297
|
message: new index_js_1.AIMessage({
|
|
226
298
|
content,
|
|
299
|
+
tool_calls: this.chunks?.[0]?.tool_calls,
|
|
227
300
|
}),
|
|
228
301
|
},
|
|
229
302
|
],
|
|
230
303
|
};
|
|
231
304
|
return generation;
|
|
232
305
|
}
|
|
233
|
-
async *_streamResponseChunks(
|
|
306
|
+
async *_streamResponseChunks(_messages, _options, runManager) {
|
|
234
307
|
if (this.thrownErrorString) {
|
|
235
308
|
throw new Error(this.thrownErrorString);
|
|
236
309
|
}
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
yield new outputs_js_1.ChatGenerationChunk({
|
|
241
|
-
text: "",
|
|
310
|
+
if (this.chunks?.length) {
|
|
311
|
+
for (const msgChunk of this.chunks) {
|
|
312
|
+
const cg = new outputs_js_1.ChatGenerationChunk({
|
|
242
313
|
message: new index_js_1.AIMessageChunk({
|
|
243
|
-
content,
|
|
314
|
+
content: msgChunk.content,
|
|
315
|
+
tool_calls: msgChunk.tool_calls,
|
|
316
|
+
additional_kwargs: msgChunk.additional_kwargs ?? {},
|
|
244
317
|
}),
|
|
318
|
+
text: msgChunk.content?.toString() ?? "",
|
|
245
319
|
});
|
|
320
|
+
yield cg;
|
|
321
|
+
await runManager?.handleLLMNewToken(msgChunk.content, undefined, undefined, undefined, undefined, { chunk: cg });
|
|
246
322
|
}
|
|
323
|
+
return;
|
|
247
324
|
}
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
}
|
|
325
|
+
const fallback = this.responses?.[0] ??
|
|
326
|
+
new index_js_1.AIMessage(typeof _messages[0].content === "string" ? _messages[0].content : "");
|
|
327
|
+
const text = typeof fallback.content === "string" ? fallback.content : "";
|
|
328
|
+
for (const ch of text) {
|
|
329
|
+
await new Promise((r) => setTimeout(r, this.sleep));
|
|
330
|
+
const cg = new outputs_js_1.ChatGenerationChunk({
|
|
331
|
+
message: new index_js_1.AIMessageChunk({ content: ch }),
|
|
332
|
+
text: ch,
|
|
333
|
+
});
|
|
334
|
+
yield cg;
|
|
335
|
+
await runManager?.handleLLMNewToken(ch, undefined, undefined, undefined, undefined, { chunk: cg });
|
|
257
336
|
}
|
|
258
337
|
}
|
|
259
338
|
}
|
|
@@ -430,7 +509,13 @@ class FakeListChatModel extends chat_models_js_1.BaseChatModel {
|
|
|
430
509
|
withStructuredOutput(_params, _config) {
|
|
431
510
|
return base_js_2.RunnableLambda.from(async (input) => {
|
|
432
511
|
const message = await this.invoke(input);
|
|
433
|
-
|
|
512
|
+
if (message.tool_calls?.[0]?.args) {
|
|
513
|
+
return message.tool_calls[0].args;
|
|
514
|
+
}
|
|
515
|
+
if (typeof message.content === "string") {
|
|
516
|
+
return JSON.parse(message.content);
|
|
517
|
+
}
|
|
518
|
+
throw new Error("No structured output found");
|
|
434
519
|
});
|
|
435
520
|
}
|
|
436
521
|
}
|
|
@@ -4,7 +4,7 @@ import { BaseChatMessageHistory, BaseListChatMessageHistory } from "../../chat_h
|
|
|
4
4
|
import { Document } from "../../documents/document.js";
|
|
5
5
|
import { BaseChatModel, BaseChatModelCallOptions, BaseChatModelParams } from "../../language_models/chat_models.js";
|
|
6
6
|
import { BaseLLMParams, LLM } from "../../language_models/llms.js";
|
|
7
|
-
import { BaseMessage, AIMessage } from "../../messages/index.js";
|
|
7
|
+
import { BaseMessage, AIMessage, AIMessageChunk } from "../../messages/index.js";
|
|
8
8
|
import { BaseOutputParser } from "../../output_parsers/base.js";
|
|
9
9
|
import { GenerationChunk, type ChatResult, ChatGenerationChunk } from "../../outputs.js";
|
|
10
10
|
import { BaseRetriever } from "../../retrievers/index.js";
|
|
@@ -60,18 +60,18 @@ export declare class FakeChatModel extends BaseChatModel {
|
|
|
60
60
|
_llmType(): string;
|
|
61
61
|
_generate(messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
62
62
|
}
|
|
63
|
-
export declare class FakeStreamingChatModel extends BaseChatModel {
|
|
64
|
-
sleep
|
|
65
|
-
responses
|
|
63
|
+
export declare class FakeStreamingChatModel extends BaseChatModel<FakeStreamingChatModelCallOptions> {
|
|
64
|
+
sleep: number;
|
|
65
|
+
responses: BaseMessage[];
|
|
66
|
+
chunks: AIMessageChunk[];
|
|
67
|
+
toolStyle: "openai" | "anthropic" | "bedrock" | "google";
|
|
66
68
|
thrownErrorString?: string;
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
responses?: BaseMessage[];
|
|
70
|
-
thrownErrorString?: string;
|
|
71
|
-
} & BaseLLMParams);
|
|
69
|
+
private tools;
|
|
70
|
+
constructor({ sleep, responses, chunks, toolStyle, thrownErrorString, ...rest }: FakeStreamingChatModelFields & BaseLLMParams);
|
|
72
71
|
_llmType(): string;
|
|
72
|
+
bindTools(tools: (StructuredTool | ToolSpec)[]): Runnable<BaseLanguageModelInput, AIMessageChunk, FakeStreamingChatModelCallOptions>;
|
|
73
73
|
_generate(messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
74
|
-
_streamResponseChunks(
|
|
74
|
+
_streamResponseChunks(_messages: BaseMessage[], _options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
75
75
|
}
|
|
76
76
|
export declare class FakeRetriever extends BaseRetriever {
|
|
77
77
|
lc_namespace: string[];
|
|
@@ -81,6 +81,32 @@ export declare class FakeRetriever extends BaseRetriever {
|
|
|
81
81
|
});
|
|
82
82
|
_getRelevantDocuments(_query: string): Promise<Document<Record<string, any>>[]>;
|
|
83
83
|
}
|
|
84
|
+
/** Minimal shape actually needed by `bindTools` */
|
|
85
|
+
export interface ToolSpec {
|
|
86
|
+
name: string;
|
|
87
|
+
description?: string;
|
|
88
|
+
schema: z.ZodTypeAny | Record<string, unknown>;
|
|
89
|
+
}
|
|
90
|
+
/**
|
|
91
|
+
* Interface specific to the Fake Streaming Chat model.
|
|
92
|
+
*/
|
|
93
|
+
export interface FakeStreamingChatModelCallOptions extends BaseChatModelCallOptions {
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Interface for the Constructor-field specific to the Fake Streaming Chat model (all optional because we fill in defaults).
|
|
97
|
+
*/
|
|
98
|
+
export interface FakeStreamingChatModelFields extends BaseChatModelParams {
|
|
99
|
+
/** Milliseconds to pause between fallback char-by-char chunks */
|
|
100
|
+
sleep?: number;
|
|
101
|
+
/** Full AI messages to fall back to when no `chunks` supplied */
|
|
102
|
+
responses?: BaseMessage[];
|
|
103
|
+
/** Exact chunks to emit (can include tool-call deltas) */
|
|
104
|
+
chunks?: AIMessageChunk[];
|
|
105
|
+
/** How tool specs are formatted in `bindTools` */
|
|
106
|
+
toolStyle?: "openai" | "anthropic" | "bedrock" | "google";
|
|
107
|
+
/** Throw this error instead of streaming (useful in tests) */
|
|
108
|
+
thrownErrorString?: string;
|
|
109
|
+
}
|
|
84
110
|
/**
|
|
85
111
|
* Interface for the input parameters specific to the Fake List Chat model.
|
|
86
112
|
*/
|
|
@@ -13,6 +13,7 @@ import { Runnable, RunnableLambda } from "../../runnables/base.js";
|
|
|
13
13
|
import { StructuredTool } from "../../tools/index.js";
|
|
14
14
|
import { BaseTracer } from "../../tracers/base.js";
|
|
15
15
|
import { Embeddings, } from "../../embeddings.js";
|
|
16
|
+
import { toJsonSchema } from "../json_schema.js";
|
|
16
17
|
import { VectorStore } from "../../vectorstores.js";
|
|
17
18
|
import { cosine } from "../ml-distance/similarities.js";
|
|
18
19
|
/**
|
|
@@ -178,8 +179,8 @@ export class FakeChatModel extends BaseChatModel {
|
|
|
178
179
|
}
|
|
179
180
|
}
|
|
180
181
|
export class FakeStreamingChatModel extends BaseChatModel {
|
|
181
|
-
constructor(
|
|
182
|
-
super(
|
|
182
|
+
constructor({ sleep = 50, responses = [], chunks = [], toolStyle = "openai", thrownErrorString, ...rest }) {
|
|
183
|
+
super(rest);
|
|
183
184
|
Object.defineProperty(this, "sleep", {
|
|
184
185
|
enumerable: true,
|
|
185
186
|
configurable: true,
|
|
@@ -190,7 +191,19 @@ export class FakeStreamingChatModel extends BaseChatModel {
|
|
|
190
191
|
enumerable: true,
|
|
191
192
|
configurable: true,
|
|
192
193
|
writable: true,
|
|
193
|
-
value:
|
|
194
|
+
value: []
|
|
195
|
+
});
|
|
196
|
+
Object.defineProperty(this, "chunks", {
|
|
197
|
+
enumerable: true,
|
|
198
|
+
configurable: true,
|
|
199
|
+
writable: true,
|
|
200
|
+
value: []
|
|
201
|
+
});
|
|
202
|
+
Object.defineProperty(this, "toolStyle", {
|
|
203
|
+
enumerable: true,
|
|
204
|
+
configurable: true,
|
|
205
|
+
writable: true,
|
|
206
|
+
value: "openai"
|
|
194
207
|
});
|
|
195
208
|
Object.defineProperty(this, "thrownErrorString", {
|
|
196
209
|
enumerable: true,
|
|
@@ -198,54 +211,120 @@ export class FakeStreamingChatModel extends BaseChatModel {
|
|
|
198
211
|
writable: true,
|
|
199
212
|
value: void 0
|
|
200
213
|
});
|
|
201
|
-
this
|
|
202
|
-
|
|
203
|
-
|
|
214
|
+
Object.defineProperty(this, "tools", {
|
|
215
|
+
enumerable: true,
|
|
216
|
+
configurable: true,
|
|
217
|
+
writable: true,
|
|
218
|
+
value: []
|
|
219
|
+
});
|
|
220
|
+
this.sleep = sleep;
|
|
221
|
+
this.responses = responses;
|
|
222
|
+
this.chunks = chunks;
|
|
223
|
+
this.toolStyle = toolStyle;
|
|
224
|
+
this.thrownErrorString = thrownErrorString;
|
|
204
225
|
}
|
|
205
226
|
_llmType() {
|
|
206
227
|
return "fake";
|
|
207
228
|
}
|
|
229
|
+
bindTools(tools) {
|
|
230
|
+
const merged = [...this.tools, ...tools];
|
|
231
|
+
const toolDicts = merged.map((t) => {
|
|
232
|
+
switch (this.toolStyle) {
|
|
233
|
+
case "openai":
|
|
234
|
+
return {
|
|
235
|
+
type: "function",
|
|
236
|
+
function: {
|
|
237
|
+
name: t.name,
|
|
238
|
+
description: t.description,
|
|
239
|
+
parameters: toJsonSchema(t.schema),
|
|
240
|
+
},
|
|
241
|
+
};
|
|
242
|
+
case "anthropic":
|
|
243
|
+
return {
|
|
244
|
+
name: t.name,
|
|
245
|
+
description: t.description,
|
|
246
|
+
input_schema: toJsonSchema(t.schema),
|
|
247
|
+
};
|
|
248
|
+
case "bedrock":
|
|
249
|
+
return {
|
|
250
|
+
toolSpec: {
|
|
251
|
+
name: t.name,
|
|
252
|
+
description: t.description,
|
|
253
|
+
inputSchema: toJsonSchema(t.schema),
|
|
254
|
+
},
|
|
255
|
+
};
|
|
256
|
+
case "google":
|
|
257
|
+
return {
|
|
258
|
+
name: t.name,
|
|
259
|
+
description: t.description,
|
|
260
|
+
parameters: toJsonSchema(t.schema),
|
|
261
|
+
};
|
|
262
|
+
default:
|
|
263
|
+
throw new Error(`Unsupported tool style: ${this.toolStyle}`);
|
|
264
|
+
}
|
|
265
|
+
});
|
|
266
|
+
const wrapped = this.toolStyle === "google"
|
|
267
|
+
? [{ functionDeclarations: toolDicts }]
|
|
268
|
+
: toolDicts;
|
|
269
|
+
/* creating a *new* instance – mirrors LangChain .bind semantics for type-safety and avoiding noise */
|
|
270
|
+
const next = new FakeStreamingChatModel({
|
|
271
|
+
sleep: this.sleep,
|
|
272
|
+
responses: this.responses,
|
|
273
|
+
chunks: this.chunks,
|
|
274
|
+
toolStyle: this.toolStyle,
|
|
275
|
+
thrownErrorString: this.thrownErrorString,
|
|
276
|
+
});
|
|
277
|
+
next.tools = merged;
|
|
278
|
+
return next.bind({ tools: wrapped });
|
|
279
|
+
}
|
|
208
280
|
async _generate(messages, _options, _runManager) {
|
|
209
281
|
if (this.thrownErrorString) {
|
|
210
282
|
throw new Error(this.thrownErrorString);
|
|
211
283
|
}
|
|
212
|
-
const content = this.responses?.[0]
|
|
284
|
+
const content = this.responses?.[0]?.content ?? messages[0].content ?? "";
|
|
213
285
|
const generation = {
|
|
214
286
|
generations: [
|
|
215
287
|
{
|
|
216
288
|
text: "",
|
|
217
289
|
message: new AIMessage({
|
|
218
290
|
content,
|
|
291
|
+
tool_calls: this.chunks?.[0]?.tool_calls,
|
|
219
292
|
}),
|
|
220
293
|
},
|
|
221
294
|
],
|
|
222
295
|
};
|
|
223
296
|
return generation;
|
|
224
297
|
}
|
|
225
|
-
async *_streamResponseChunks(
|
|
298
|
+
async *_streamResponseChunks(_messages, _options, runManager) {
|
|
226
299
|
if (this.thrownErrorString) {
|
|
227
300
|
throw new Error(this.thrownErrorString);
|
|
228
301
|
}
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
yield new ChatGenerationChunk({
|
|
233
|
-
text: "",
|
|
302
|
+
if (this.chunks?.length) {
|
|
303
|
+
for (const msgChunk of this.chunks) {
|
|
304
|
+
const cg = new ChatGenerationChunk({
|
|
234
305
|
message: new AIMessageChunk({
|
|
235
|
-
content,
|
|
306
|
+
content: msgChunk.content,
|
|
307
|
+
tool_calls: msgChunk.tool_calls,
|
|
308
|
+
additional_kwargs: msgChunk.additional_kwargs ?? {},
|
|
236
309
|
}),
|
|
310
|
+
text: msgChunk.content?.toString() ?? "",
|
|
237
311
|
});
|
|
312
|
+
yield cg;
|
|
313
|
+
await runManager?.handleLLMNewToken(msgChunk.content, undefined, undefined, undefined, undefined, { chunk: cg });
|
|
238
314
|
}
|
|
315
|
+
return;
|
|
239
316
|
}
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
}
|
|
317
|
+
const fallback = this.responses?.[0] ??
|
|
318
|
+
new AIMessage(typeof _messages[0].content === "string" ? _messages[0].content : "");
|
|
319
|
+
const text = typeof fallback.content === "string" ? fallback.content : "";
|
|
320
|
+
for (const ch of text) {
|
|
321
|
+
await new Promise((r) => setTimeout(r, this.sleep));
|
|
322
|
+
const cg = new ChatGenerationChunk({
|
|
323
|
+
message: new AIMessageChunk({ content: ch }),
|
|
324
|
+
text: ch,
|
|
325
|
+
});
|
|
326
|
+
yield cg;
|
|
327
|
+
await runManager?.handleLLMNewToken(ch, undefined, undefined, undefined, undefined, { chunk: cg });
|
|
249
328
|
}
|
|
250
329
|
}
|
|
251
330
|
}
|
|
@@ -420,7 +499,13 @@ export class FakeListChatModel extends BaseChatModel {
|
|
|
420
499
|
withStructuredOutput(_params, _config) {
|
|
421
500
|
return RunnableLambda.from(async (input) => {
|
|
422
501
|
const message = await this.invoke(input);
|
|
423
|
-
|
|
502
|
+
if (message.tool_calls?.[0]?.args) {
|
|
503
|
+
return message.tool_calls[0].args;
|
|
504
|
+
}
|
|
505
|
+
if (typeof message.content === "string") {
|
|
506
|
+
return JSON.parse(message.content);
|
|
507
|
+
}
|
|
508
|
+
throw new Error("No structured output found");
|
|
424
509
|
});
|
|
425
510
|
}
|
|
426
511
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@langchain/core",
|
|
3
|
-
"version": "0.3.
|
|
3
|
+
"version": "0.3.56",
|
|
4
4
|
"description": "Core LangChain.js abstractions and schemas",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -38,7 +38,7 @@
|
|
|
38
38
|
"camelcase": "6",
|
|
39
39
|
"decamelize": "1.2.0",
|
|
40
40
|
"js-tiktoken": "^1.0.12",
|
|
41
|
-
"langsmith": "^0.3.
|
|
41
|
+
"langsmith": "^0.3.29",
|
|
42
42
|
"mustache": "^4.2.0",
|
|
43
43
|
"p-queue": "^6.6.2",
|
|
44
44
|
"p-retry": "4",
|