@langchain/core 1.0.0-alpha.4 → 1.0.0-alpha.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents.d.ts.map +1 -1
- package/dist/caches/base.d.ts.map +1 -1
- package/dist/callbacks/base.d.ts.map +1 -1
- package/dist/language_models/chat_models.d.ts.map +1 -1
- package/dist/load/import_map.cjs +1 -1
- package/dist/load/import_map.js +1 -1
- package/dist/messages/ai.cjs +15 -10
- package/dist/messages/ai.cjs.map +1 -1
- package/dist/messages/ai.js +15 -10
- package/dist/messages/ai.js.map +1 -1
- package/dist/messages/base.cjs +8 -1
- package/dist/messages/base.cjs.map +1 -1
- package/dist/messages/base.js +8 -1
- package/dist/messages/base.js.map +1 -1
- package/dist/utils/testing/chat_models.cjs +245 -0
- package/dist/utils/testing/chat_models.cjs.map +1 -0
- package/dist/utils/testing/chat_models.d.cts +134 -0
- package/dist/utils/testing/chat_models.d.cts.map +1 -0
- package/dist/utils/testing/chat_models.d.ts +134 -0
- package/dist/utils/testing/chat_models.d.ts.map +1 -0
- package/dist/utils/testing/chat_models.js +243 -0
- package/dist/utils/testing/chat_models.js.map +1 -0
- package/dist/utils/testing/embeddings.cjs +90 -0
- package/dist/utils/testing/embeddings.cjs.map +1 -0
- package/dist/utils/testing/embeddings.d.cts +58 -0
- package/dist/utils/testing/embeddings.d.cts.map +1 -0
- package/dist/utils/testing/embeddings.d.ts +58 -0
- package/dist/utils/testing/embeddings.d.ts.map +1 -0
- package/dist/utils/testing/embeddings.js +89 -0
- package/dist/utils/testing/embeddings.js.map +1 -0
- package/dist/utils/testing/index.cjs +42 -655
- package/dist/utils/testing/index.cjs.map +1 -1
- package/dist/utils/testing/index.d.cts +11 -379
- package/dist/utils/testing/index.d.ts +11 -379
- package/dist/utils/testing/index.js +10 -623
- package/dist/utils/testing/index.js.map +1 -1
- package/dist/utils/testing/llms.cjs +59 -0
- package/dist/utils/testing/llms.cjs.map +1 -0
- package/dist/utils/testing/llms.d.cts +31 -0
- package/dist/utils/testing/llms.d.cts.map +1 -0
- package/dist/utils/testing/llms.d.ts +31 -0
- package/dist/utils/testing/llms.d.ts.map +1 -0
- package/dist/utils/testing/llms.js +58 -0
- package/dist/utils/testing/llms.js.map +1 -0
- package/dist/utils/testing/message_history.cjs +67 -0
- package/dist/utils/testing/message_history.cjs.map +1 -0
- package/dist/utils/testing/message_history.d.cts +31 -0
- package/dist/utils/testing/message_history.d.cts.map +1 -0
- package/dist/utils/testing/message_history.d.ts +31 -0
- package/dist/utils/testing/message_history.d.ts.map +1 -0
- package/dist/utils/testing/message_history.js +65 -0
- package/dist/utils/testing/message_history.js.map +1 -0
- package/dist/utils/testing/output_parsers.cjs +20 -0
- package/dist/utils/testing/output_parsers.cjs.map +1 -0
- package/dist/utils/testing/output_parsers.d.cts +16 -0
- package/dist/utils/testing/output_parsers.d.cts.map +1 -0
- package/dist/utils/testing/output_parsers.d.ts +16 -0
- package/dist/utils/testing/output_parsers.d.ts.map +1 -0
- package/dist/utils/testing/output_parsers.js +20 -0
- package/dist/utils/testing/output_parsers.js.map +1 -0
- package/dist/utils/testing/retrievers.cjs +19 -0
- package/dist/utils/testing/retrievers.cjs.map +1 -0
- package/dist/utils/testing/retrievers.d.cts +17 -0
- package/dist/utils/testing/retrievers.d.cts.map +1 -0
- package/dist/utils/testing/retrievers.d.ts +17 -0
- package/dist/utils/testing/retrievers.d.ts.map +1 -0
- package/dist/utils/testing/retrievers.js +19 -0
- package/dist/utils/testing/retrievers.js.map +1 -0
- package/dist/utils/testing/runnables.cjs +19 -0
- package/dist/utils/testing/runnables.cjs.map +1 -0
- package/dist/utils/testing/runnables.d.cts +15 -0
- package/dist/utils/testing/runnables.d.cts.map +1 -0
- package/dist/utils/testing/runnables.d.ts +15 -0
- package/dist/utils/testing/runnables.d.ts.map +1 -0
- package/dist/utils/testing/runnables.js +19 -0
- package/dist/utils/testing/runnables.js.map +1 -0
- package/dist/utils/testing/tools.cjs +21 -0
- package/dist/utils/testing/tools.cjs.map +1 -0
- package/dist/utils/testing/tools.d.cts +21 -0
- package/dist/utils/testing/tools.d.cts.map +1 -0
- package/dist/utils/testing/tools.d.ts +21 -0
- package/dist/utils/testing/tools.d.ts.map +1 -0
- package/dist/utils/testing/tools.js +21 -0
- package/dist/utils/testing/tools.js.map +1 -0
- package/dist/utils/testing/tracers.cjs +25 -0
- package/dist/utils/testing/tracers.cjs.map +1 -0
- package/dist/utils/testing/tracers.d.cts +15 -0
- package/dist/utils/testing/tracers.d.cts.map +1 -0
- package/dist/utils/testing/tracers.d.ts +15 -0
- package/dist/utils/testing/tracers.d.ts.map +1 -0
- package/dist/utils/testing/tracers.js +25 -0
- package/dist/utils/testing/tracers.js.map +1 -0
- package/dist/utils/testing/vectorstores.cjs +129 -0
- package/dist/utils/testing/vectorstores.cjs.map +1 -0
- package/dist/utils/testing/vectorstores.d.cts +101 -0
- package/dist/utils/testing/vectorstores.d.cts.map +1 -0
- package/dist/utils/testing/vectorstores.d.ts +101 -0
- package/dist/utils/testing/vectorstores.d.ts.map +1 -0
- package/dist/utils/testing/vectorstores.js +129 -0
- package/dist/utils/testing/vectorstores.js.map +1 -0
- package/package.json +1 -1
- package/dist/utils/testing/index.d.cts.map +0 -1
- package/dist/utils/testing/index.d.ts.map +0 -1
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import { BaseMessage } from "../../messages/base.cjs";
|
|
2
|
+
import { AIMessage, AIMessageChunk } from "../../messages/ai.cjs";
|
|
3
|
+
import { MessageStructure } from "../../messages/message.cjs";
|
|
4
|
+
import { ChatGenerationChunk, ChatResult } from "../../outputs.cjs";
|
|
5
|
+
import { InteropZodType } from "../types/zod.cjs";
|
|
6
|
+
import { CallbackManagerForLLMRun } from "../../callbacks/manager.cjs";
|
|
7
|
+
import { Runnable } from "../../runnables/base.cjs";
|
|
8
|
+
import { BaseLanguageModelInput, StructuredOutputMethodOptions, StructuredOutputMethodParams } from "../../language_models/base.cjs";
|
|
9
|
+
import { StructuredTool } from "../../tools/index.cjs";
|
|
10
|
+
import { BaseChatModel, BaseChatModelCallOptions, BaseChatModelParams } from "../../language_models/chat_models.cjs";
|
|
11
|
+
import { BaseLLMParams } from "../../language_models/llms.cjs";
|
|
12
|
+
|
|
13
|
+
//#region src/utils/testing/chat_models.d.ts
|
|
14
|
+
/** Minimal shape actually needed by `bindTools` */
|
|
15
|
+
interface ToolSpec {
|
|
16
|
+
name: string;
|
|
17
|
+
description?: string;
|
|
18
|
+
schema: InteropZodType | Record<string, unknown>; // Either a Zod schema *or* a plain JSON-Schema object
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Interface specific to the Fake Streaming Chat model.
|
|
22
|
+
*/
|
|
23
|
+
interface FakeStreamingChatModelCallOptions extends BaseChatModelCallOptions {}
|
|
24
|
+
/**
|
|
25
|
+
* Interface for the Constructor-field specific to the Fake Streaming Chat model (all optional because we fill in defaults).
|
|
26
|
+
*/
|
|
27
|
+
interface FakeStreamingChatModelFields extends BaseChatModelParams {
|
|
28
|
+
/** Milliseconds to pause between fallback char-by-char chunks */
|
|
29
|
+
sleep?: number;
|
|
30
|
+
/** Full AI messages to fall back to when no `chunks` supplied */
|
|
31
|
+
responses?: BaseMessage[];
|
|
32
|
+
/** Exact chunks to emit (can include tool-call deltas) */
|
|
33
|
+
chunks?: AIMessageChunk[];
|
|
34
|
+
/** How tool specs are formatted in `bindTools` */
|
|
35
|
+
toolStyle?: "openai" | "anthropic" | "bedrock" | "google";
|
|
36
|
+
/** Throw this error instead of streaming (useful in tests) */
|
|
37
|
+
thrownErrorString?: string;
|
|
38
|
+
}
|
|
39
|
+
declare class FakeChatModel extends BaseChatModel {
|
|
40
|
+
_combineLLMOutput(): never[];
|
|
41
|
+
_llmType(): string;
|
|
42
|
+
_generate(messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
43
|
+
}
|
|
44
|
+
declare class FakeStreamingChatModel extends BaseChatModel<FakeStreamingChatModelCallOptions> {
|
|
45
|
+
sleep: number;
|
|
46
|
+
responses: BaseMessage[];
|
|
47
|
+
chunks: AIMessageChunk[];
|
|
48
|
+
toolStyle: "openai" | "anthropic" | "bedrock" | "google";
|
|
49
|
+
thrownErrorString?: string;
|
|
50
|
+
private tools;
|
|
51
|
+
constructor({
|
|
52
|
+
sleep,
|
|
53
|
+
responses,
|
|
54
|
+
chunks,
|
|
55
|
+
toolStyle,
|
|
56
|
+
thrownErrorString,
|
|
57
|
+
...rest
|
|
58
|
+
}: FakeStreamingChatModelFields & BaseLLMParams);
|
|
59
|
+
_llmType(): string;
|
|
60
|
+
bindTools(tools: (StructuredTool | ToolSpec)[]): Runnable<BaseLanguageModelInput, AIMessageChunk<MessageStructure>, FakeStreamingChatModelCallOptions>;
|
|
61
|
+
_generate(messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
62
|
+
_streamResponseChunks(_messages: BaseMessage[], _options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Interface for the input parameters specific to the Fake List Chat model.
|
|
66
|
+
*/
|
|
67
|
+
interface FakeChatInput extends BaseChatModelParams {
|
|
68
|
+
/** Responses to return */
|
|
69
|
+
responses: string[];
|
|
70
|
+
/** Time to sleep in milliseconds between responses */
|
|
71
|
+
sleep?: number;
|
|
72
|
+
emitCustomEvent?: boolean;
|
|
73
|
+
}
|
|
74
|
+
interface FakeListChatModelCallOptions extends BaseChatModelCallOptions {
|
|
75
|
+
thrownErrorString?: string;
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* A fake Chat Model that returns a predefined list of responses. It can be used
|
|
79
|
+
* for testing purposes.
|
|
80
|
+
* @example
|
|
81
|
+
* ```typescript
|
|
82
|
+
* const chat = new FakeListChatModel({
|
|
83
|
+
* responses: ["I'll callback later.", "You 'console' them!"]
|
|
84
|
+
* });
|
|
85
|
+
*
|
|
86
|
+
* const firstMessage = new HumanMessage("You want to hear a JavaScript joke?");
|
|
87
|
+
* const secondMessage = new HumanMessage("How do you cheer up a JavaScript developer?");
|
|
88
|
+
*
|
|
89
|
+
* // Call the chat model with a message and log the response
|
|
90
|
+
* const firstResponse = await chat.call([firstMessage]);
|
|
91
|
+
* console.log({ firstResponse });
|
|
92
|
+
*
|
|
93
|
+
* const secondResponse = await chat.call([secondMessage]);
|
|
94
|
+
* console.log({ secondResponse });
|
|
95
|
+
* ```
|
|
96
|
+
*/
|
|
97
|
+
declare class FakeListChatModel extends BaseChatModel<FakeListChatModelCallOptions> {
|
|
98
|
+
static lc_name(): string;
|
|
99
|
+
lc_serializable: boolean;
|
|
100
|
+
responses: string[];
|
|
101
|
+
i: number;
|
|
102
|
+
sleep?: number;
|
|
103
|
+
emitCustomEvent: boolean;
|
|
104
|
+
constructor(params: FakeChatInput);
|
|
105
|
+
_combineLLMOutput(): never[];
|
|
106
|
+
_llmType(): string;
|
|
107
|
+
_generate(_messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
108
|
+
_formatGeneration(text: string): {
|
|
109
|
+
message: AIMessage<MessageStructure>;
|
|
110
|
+
text: string;
|
|
111
|
+
};
|
|
112
|
+
_streamResponseChunks(_messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
113
|
+
_sleepIfRequested(): Promise<void>;
|
|
114
|
+
_sleep(): Promise<void>;
|
|
115
|
+
_createResponseChunk(text: string): ChatGenerationChunk;
|
|
116
|
+
_currentResponse(): string;
|
|
117
|
+
_incrementResponse(): void;
|
|
118
|
+
withStructuredOutput<
|
|
119
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
120
|
+
RunOutput extends Record<string, any> = Record<string, any>>(_params: StructuredOutputMethodParams<RunOutput, false> | InteropZodType<RunOutput>
|
|
121
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
122
|
+
| Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;
|
|
123
|
+
withStructuredOutput<
|
|
124
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
125
|
+
RunOutput extends Record<string, any> = Record<string, any>>(_params: StructuredOutputMethodParams<RunOutput, true> | InteropZodType<RunOutput>
|
|
126
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
127
|
+
| Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {
|
|
128
|
+
raw: BaseMessage;
|
|
129
|
+
parsed: RunOutput;
|
|
130
|
+
}>;
|
|
131
|
+
}
|
|
132
|
+
//#endregion
|
|
133
|
+
export { FakeChatInput, FakeChatModel, FakeListChatModel, FakeListChatModelCallOptions, FakeStreamingChatModel, FakeStreamingChatModelCallOptions, FakeStreamingChatModelFields, ToolSpec };
|
|
134
|
+
//# sourceMappingURL=chat_models.d.cts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"chat_models.d.cts","names":["CallbackManagerForLLMRun","BaseChatModel","BaseChatModelCallOptions","BaseChatModelParams","BaseLLMParams","BaseMessage","AIMessage","AIMessageChunk","ChatResult","ChatGenerationChunk","Runnable","StructuredTool","StructuredOutputMethodParams","BaseLanguageModelInput","StructuredOutputMethodOptions","InteropZodType","ToolSpec","Record","FakeStreamingChatModelCallOptions","FakeStreamingChatModelFields","FakeChatModel","Promise","FakeStreamingChatModel","sleep","responses","chunks","toolStyle","thrownErrorString","______messages_message_js0","MessageStructure","AsyncGenerator","FakeChatInput","FakeListChatModelCallOptions","FakeListChatModel","RunOutput"],"sources":["../../../src/utils/testing/chat_models.d.ts"],"sourcesContent":["import { CallbackManagerForLLMRun } from \"../../callbacks/manager.js\";\nimport { BaseChatModel, BaseChatModelCallOptions, BaseChatModelParams } from \"../../language_models/chat_models.js\";\nimport { BaseLLMParams } from \"../../language_models/llms.js\";\nimport { BaseMessage, AIMessage, AIMessageChunk } from \"../../messages/index.js\";\nimport { type ChatResult, ChatGenerationChunk } from \"../../outputs.js\";\nimport { Runnable } from \"../../runnables/base.js\";\nimport { StructuredTool } from \"../../tools/index.js\";\nimport { StructuredOutputMethodParams, BaseLanguageModelInput, StructuredOutputMethodOptions } from \"../../language_models/base.js\";\nimport { InteropZodType } from \"../types/zod.js\";\n/** Minimal shape actually needed by `bindTools` */\nexport interface ToolSpec {\n name: string;\n description?: string;\n schema: InteropZodType | Record<string, unknown>; // Either a Zod schema *or* a plain JSON-Schema object\n}\n/**\n * Interface specific to the Fake Streaming Chat model.\n */\nexport interface FakeStreamingChatModelCallOptions extends BaseChatModelCallOptions {\n}\n/**\n * Interface for the Constructor-field specific to the Fake Streaming Chat model (all optional because we fill in defaults).\n */\nexport interface FakeStreamingChatModelFields extends BaseChatModelParams {\n /** Milliseconds to pause between fallback char-by-char chunks */\n sleep?: number;\n /** Full AI messages to fall back to when no `chunks` supplied */\n responses?: BaseMessage[];\n /** Exact chunks to emit (can include tool-call deltas) */\n chunks?: AIMessageChunk[];\n /** How tool specs are formatted in `bindTools` */\n toolStyle?: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\";\n /** Throw this error instead of streaming (useful in tests) */\n thrownErrorString?: string;\n}\nexport declare class FakeChatModel extends BaseChatModel {\n _combineLLMOutput(): never[];\n _llmType(): string;\n _generate(messages: BaseMessage[], options?: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;\n}\nexport declare class FakeStreamingChatModel extends BaseChatModel<FakeStreamingChatModelCallOptions> {\n sleep: number;\n responses: BaseMessage[];\n chunks: AIMessageChunk[];\n toolStyle: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\";\n thrownErrorString?: string;\n private tools;\n constructor({ sleep, responses, chunks, toolStyle, thrownErrorString, ...rest }: FakeStreamingChatModelFields & BaseLLMParams);\n _llmType(): string;\n bindTools(tools: (StructuredTool | ToolSpec)[]): Runnable<BaseLanguageModelInput, AIMessageChunk<import(\"../../messages/message.js\").MessageStructure>, FakeStreamingChatModelCallOptions>;\n _generate(messages: BaseMessage[], _options: this[\"ParsedCallOptions\"], _runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;\n _streamResponseChunks(_messages: BaseMessage[], _options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;\n}\n/**\n * Interface for the input parameters specific to the Fake List Chat model.\n */\nexport interface FakeChatInput extends BaseChatModelParams {\n /** Responses to return */\n responses: string[];\n /** Time to sleep in milliseconds between responses */\n sleep?: number;\n emitCustomEvent?: boolean;\n}\nexport interface FakeListChatModelCallOptions extends BaseChatModelCallOptions {\n thrownErrorString?: string;\n}\n/**\n * A fake Chat Model that returns a predefined list of responses. It can be used\n * for testing purposes.\n * @example\n * ```typescript\n * const chat = new FakeListChatModel({\n * responses: [\"I'll callback later.\", \"You 'console' them!\"]\n * });\n *\n * const firstMessage = new HumanMessage(\"You want to hear a JavaScript joke?\");\n * const secondMessage = new HumanMessage(\"How do you cheer up a JavaScript developer?\");\n *\n * // Call the chat model with a message and log the response\n * const firstResponse = await chat.call([firstMessage]);\n * console.log({ firstResponse });\n *\n * const secondResponse = await chat.call([secondMessage]);\n * console.log({ secondResponse });\n * ```\n */\nexport declare class FakeListChatModel extends BaseChatModel<FakeListChatModelCallOptions> {\n static lc_name(): string;\n lc_serializable: boolean;\n responses: string[];\n i: number;\n sleep?: number;\n emitCustomEvent: boolean;\n constructor(params: FakeChatInput);\n _combineLLMOutput(): never[];\n _llmType(): string;\n _generate(_messages: BaseMessage[], options?: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;\n _formatGeneration(text: string): {\n message: AIMessage<import(\"../../messages/message.js\").MessageStructure>;\n text: string;\n };\n _streamResponseChunks(_messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;\n _sleepIfRequested(): Promise<void>;\n _sleep(): Promise<void>;\n _createResponseChunk(text: string): ChatGenerationChunk;\n _currentResponse(): string;\n _incrementResponse(): void;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(_params: StructuredOutputMethodParams<RunOutput, false> | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(_params: StructuredOutputMethodParams<RunOutput, true> | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n"],"mappings":";;;;;;;;;;;;;;UAUiBgB,QAAAA;;;EAAAA,MAAAA,EAGLD,cAHa,GAGIE,MAHJ,CAAA,MAAA,EAAA,OAAA,CAAA,CAAA,CAAA;;;;AAGU;AAKlBC,UAAAA,iCAAAA,SAA0ChB,wBAAwB,CAAA,CAKnF;;;;AAAsDC,UAArCgB,4BAAAA,SAAqChB,mBAAAA,CAAAA;EAAmB;EAYpDiB,KAAAA,CAAAA,EAAAA,MAAAA;EAAa;EAAA,SAGVf,CAAAA,EAXRA,WAWQA,EAAAA;EAAW;EAA8E,MAAWG,CAAAA,EAT/GD,cAS+GC,EAAAA;EAAU;EAAX,SAHhFP,CAAAA,EAAAA,QAAAA,GAAAA,WAAAA,GAAAA,SAAAA,GAAAA,QAAAA;EAAa;EAKnCqB,iBAAAA,CAAAA,EAAAA,MAAsB;;AAAuBJ,cAL7CE,aAAAA,SAAsBnB,aAAAA,CAKuBiB;EAAiC,iBAEpFb,CAAAA,CAAAA,EAAAA,KAAAA,EAAAA;EAAW,QACdE,CAAAA,CAAAA,EAAAA,MAAAA;EAAc,SAIRgB,CAAAA,QAAAA,EATMlB,WASNkB,EAAAA,EAAAA,OAAAA,CAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,EAAAA,UAAAA,CAAAA,EATuEvB,wBASvEuB,CAAAA,EATkGF,OASlGE,CAT0Gf,UAS1Ge,CAAAA;;AAAkBE,cAPfH,sBAAAA,SAA+BrB,aAOhBwB,CAP8BP,iCAO9BO,CAAAA,CAAAA;EAAM,KAAEC,EAAAA,MAAAA;EAAS,SAAEC,EALxCtB,WAKwCsB,EAAAA;EAAiB,MAAaR,EAJzEZ,cAIyEY,EAAAA;EAA4B,SAAGf,EAAAA,QAAAA,GAAAA,WAAAA,GAAAA,SAAAA,GAAAA,QAAAA;EAAa,iBAE3GO,CAAAA,EAAAA,MAAAA;EAAc,QAAGK,KAAAA;EAAQ,WAAeH,CAAAA;IAAAA,KAAAA;IAAAA,SAAAA;IAAAA,MAAAA;IAAAA,SAAAA;IAAAA,iBAAAA;IAAAA,GAAAA;EAAAA,CAAAA,EAFuBM,4BAEvBN,GAFsDT,aAEtDS;EAAsB,QAAA,CAAA,CAAA,EAAA,MAAA;EAAqE,SAAnEN,CAAAA,KAAAA,EAAAA,CAAhEI,cAAgEJ,GAA/CS,QAA+CT,CAAAA,EAAAA,CAAAA,EAAjCG,QAAiCH,CAAxBM,sBAAwBN,EAAAA,cAAAA,CAAF,gBAAA,CAAEA,EAAsEW,iCAAtEX,CAAAA;EAAc,SAAwDW,CAAAA,QAAAA,EACpIb,WADoIa,EAAAA,EAAAA,QAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,EAAAA,WAAAA,CAAAA,EAClElB,wBADkEkB,CAAAA,EACvCG,OADuCH,CAC/BV,UAD+BU,CAAAA;EAAiC,qBAAxIR,CAAAA,SAAAA,EAEhBL,WAFgBK,EAAAA,EAAAA,QAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,EAAAA,UAAAA,CAAAA,EAEiDV,wBAFjDU,CAAAA,EAE4EoB,cAF5EpB,CAE2FD,mBAF3FC,CAAAA;;;;;AAEhBL,UAKpB0B,aAAAA,SAAsB5B,mBALFE,CAAAA;EAAW;EAA8E,SAAkBI,EAAAA,MAAAA,EAAAA;EAAmB;EAApB,KAX3FR,CAAAA,EAAAA,MAAAA;EAAa,eAAA,CAAA,EAAA,OAAA;AAgBjE;AAOiB+B,UAAAA,4BAAAA,SAAqC9B,wBAAwB,CAAA;EAuBzD+B,iBAAAA,CAAAA,EAAiB,MAAA;;;;;;;;;;;;;;;;;;;;;;AAuBqFlB,cAvBtGkB,iBAAAA,SAA0BhC,aAuB4Ec,CAvB9DiB,4BAuB8DjB,CAAAA,CAAAA;EAAc,OAElIE,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAM,eAAwBH,EAAAA,OAAAA;EAA6B,SAAmBD,EAAAA,MAAAA,EAAAA;EAAsB,CAAA,EAAEqB,MAAAA;EAAS,KAA1CxB,CAAAA,EAAAA,MAAAA;EAAQ,eAG9DO,EAAAA,OAAAA;EAAM,WAAgBA,CAAAA,MAAAA,EArBpBc,aAqBoBd;EAAM,iBAAqDiB,CAAAA,CAAAA,EAAAA,KAAAA,EAAAA;EAAS,QAAtCtB,CAAAA,CAAAA,EAAAA,MAAAA;EAA4B,SAAmCsB,CAAAA,SAAAA,EAlBhH7B,WAkBgH6B,EAAAA,EAAAA,OAAAA,CAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,EAAAA,UAAAA,CAAAA,EAlB/ClC,wBAkB+CkC,CAAAA,EAlBpBb,OAkBoBa,CAlBZ1B,UAkBY0B,CAAAA;EAAS,iBAAxBnB,CAAAA,IAAAA,EAAAA,MAAAA,CAAAA,EAAAA;IAEnHE,OAAAA,EAlBUX,SAkBVW,CApBqH,gBAAA,CAoBrHA;IAA8BH,IAAAA,EAAAA,MAAAA;EAA6B,CAAA;EAAwC,qBAC7FT,CAAAA,SAAAA,EAhBwBA,WAgBxBA,EAAAA,EAAAA,OAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,EAAAA,UAAAA,CAAAA,EAhBwFL,wBAgBxFK,CAAAA,EAhBmHyB,cAgBnHzB,CAhBkII,mBAgBlIJ,CAAAA;EAAW,iBACR6B,CAAAA,CAAAA,EAhBSb,OAgBTa,CAAAA,IAAAA,CAAAA;EAAS,MAFkDxB,CAAAA,CAAAA,EAb7DW,OAa6DX,CAAAA,IAAAA,CAAAA;EAAQ,oBA9BpCT,CAAAA,IAAAA,EAAAA,MAAAA,CAAAA,EAkBPQ,mBAlBOR;EAAa,gBAAA,CAAA,CAAA,EAAA,MAAA;;;;oBAuBtCgB,sBAAsBA,8BAA8BL,6BAA6BsB,oBAAoBnB,eAAemB;;IAEnIjB,8BAA8BH,uCAAuCJ,SAASG,wBAAwBqB;;;oBAGvFjB,sBAAsBA,8BAA8BL,6BAA6BsB,mBAAmBnB,eAAemB;;IAElIjB,8BAA8BH,sCAAsCJ,SAASG;SACvER;YACG6B"}
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import { BaseMessage } from "../../messages/base.js";
|
|
2
|
+
import { AIMessage, AIMessageChunk } from "../../messages/ai.js";
|
|
3
|
+
import { MessageStructure } from "../../messages/message.js";
|
|
4
|
+
import { ChatGenerationChunk, ChatResult } from "../../outputs.js";
|
|
5
|
+
import { InteropZodType } from "../types/zod.js";
|
|
6
|
+
import { CallbackManagerForLLMRun } from "../../callbacks/manager.js";
|
|
7
|
+
import { Runnable } from "../../runnables/base.js";
|
|
8
|
+
import { BaseLanguageModelInput, StructuredOutputMethodOptions, StructuredOutputMethodParams } from "../../language_models/base.js";
|
|
9
|
+
import { StructuredTool } from "../../tools/index.js";
|
|
10
|
+
import { BaseChatModel, BaseChatModelCallOptions, BaseChatModelParams } from "../../language_models/chat_models.js";
|
|
11
|
+
import { BaseLLMParams } from "../../language_models/llms.js";
|
|
12
|
+
|
|
13
|
+
//#region src/utils/testing/chat_models.d.ts
|
|
14
|
+
/** Minimal shape actually needed by `bindTools` */
|
|
15
|
+
interface ToolSpec {
|
|
16
|
+
name: string;
|
|
17
|
+
description?: string;
|
|
18
|
+
schema: InteropZodType | Record<string, unknown>; // Either a Zod schema *or* a plain JSON-Schema object
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Interface specific to the Fake Streaming Chat model.
|
|
22
|
+
*/
|
|
23
|
+
interface FakeStreamingChatModelCallOptions extends BaseChatModelCallOptions {}
|
|
24
|
+
/**
|
|
25
|
+
* Interface for the Constructor-field specific to the Fake Streaming Chat model (all optional because we fill in defaults).
|
|
26
|
+
*/
|
|
27
|
+
interface FakeStreamingChatModelFields extends BaseChatModelParams {
|
|
28
|
+
/** Milliseconds to pause between fallback char-by-char chunks */
|
|
29
|
+
sleep?: number;
|
|
30
|
+
/** Full AI messages to fall back to when no `chunks` supplied */
|
|
31
|
+
responses?: BaseMessage[];
|
|
32
|
+
/** Exact chunks to emit (can include tool-call deltas) */
|
|
33
|
+
chunks?: AIMessageChunk[];
|
|
34
|
+
/** How tool specs are formatted in `bindTools` */
|
|
35
|
+
toolStyle?: "openai" | "anthropic" | "bedrock" | "google";
|
|
36
|
+
/** Throw this error instead of streaming (useful in tests) */
|
|
37
|
+
thrownErrorString?: string;
|
|
38
|
+
}
|
|
39
|
+
declare class FakeChatModel extends BaseChatModel {
|
|
40
|
+
_combineLLMOutput(): never[];
|
|
41
|
+
_llmType(): string;
|
|
42
|
+
_generate(messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
43
|
+
}
|
|
44
|
+
declare class FakeStreamingChatModel extends BaseChatModel<FakeStreamingChatModelCallOptions> {
|
|
45
|
+
sleep: number;
|
|
46
|
+
responses: BaseMessage[];
|
|
47
|
+
chunks: AIMessageChunk[];
|
|
48
|
+
toolStyle: "openai" | "anthropic" | "bedrock" | "google";
|
|
49
|
+
thrownErrorString?: string;
|
|
50
|
+
private tools;
|
|
51
|
+
constructor({
|
|
52
|
+
sleep,
|
|
53
|
+
responses,
|
|
54
|
+
chunks,
|
|
55
|
+
toolStyle,
|
|
56
|
+
thrownErrorString,
|
|
57
|
+
...rest
|
|
58
|
+
}: FakeStreamingChatModelFields & BaseLLMParams);
|
|
59
|
+
_llmType(): string;
|
|
60
|
+
bindTools(tools: (StructuredTool | ToolSpec)[]): Runnable<BaseLanguageModelInput, AIMessageChunk<MessageStructure>, FakeStreamingChatModelCallOptions>;
|
|
61
|
+
_generate(messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
62
|
+
_streamResponseChunks(_messages: BaseMessage[], _options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Interface for the input parameters specific to the Fake List Chat model.
|
|
66
|
+
*/
|
|
67
|
+
interface FakeChatInput extends BaseChatModelParams {
|
|
68
|
+
/** Responses to return */
|
|
69
|
+
responses: string[];
|
|
70
|
+
/** Time to sleep in milliseconds between responses */
|
|
71
|
+
sleep?: number;
|
|
72
|
+
emitCustomEvent?: boolean;
|
|
73
|
+
}
|
|
74
|
+
interface FakeListChatModelCallOptions extends BaseChatModelCallOptions {
|
|
75
|
+
thrownErrorString?: string;
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* A fake Chat Model that returns a predefined list of responses. It can be used
|
|
79
|
+
* for testing purposes.
|
|
80
|
+
* @example
|
|
81
|
+
* ```typescript
|
|
82
|
+
* const chat = new FakeListChatModel({
|
|
83
|
+
* responses: ["I'll callback later.", "You 'console' them!"]
|
|
84
|
+
* });
|
|
85
|
+
*
|
|
86
|
+
* const firstMessage = new HumanMessage("You want to hear a JavaScript joke?");
|
|
87
|
+
* const secondMessage = new HumanMessage("How do you cheer up a JavaScript developer?");
|
|
88
|
+
*
|
|
89
|
+
* // Call the chat model with a message and log the response
|
|
90
|
+
* const firstResponse = await chat.call([firstMessage]);
|
|
91
|
+
* console.log({ firstResponse });
|
|
92
|
+
*
|
|
93
|
+
* const secondResponse = await chat.call([secondMessage]);
|
|
94
|
+
* console.log({ secondResponse });
|
|
95
|
+
* ```
|
|
96
|
+
*/
|
|
97
|
+
declare class FakeListChatModel extends BaseChatModel<FakeListChatModelCallOptions> {
|
|
98
|
+
static lc_name(): string;
|
|
99
|
+
lc_serializable: boolean;
|
|
100
|
+
responses: string[];
|
|
101
|
+
i: number;
|
|
102
|
+
sleep?: number;
|
|
103
|
+
emitCustomEvent: boolean;
|
|
104
|
+
constructor(params: FakeChatInput);
|
|
105
|
+
_combineLLMOutput(): never[];
|
|
106
|
+
_llmType(): string;
|
|
107
|
+
_generate(_messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
108
|
+
_formatGeneration(text: string): {
|
|
109
|
+
message: AIMessage<MessageStructure>;
|
|
110
|
+
text: string;
|
|
111
|
+
};
|
|
112
|
+
_streamResponseChunks(_messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
113
|
+
_sleepIfRequested(): Promise<void>;
|
|
114
|
+
_sleep(): Promise<void>;
|
|
115
|
+
_createResponseChunk(text: string): ChatGenerationChunk;
|
|
116
|
+
_currentResponse(): string;
|
|
117
|
+
_incrementResponse(): void;
|
|
118
|
+
withStructuredOutput<
|
|
119
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
120
|
+
RunOutput extends Record<string, any> = Record<string, any>>(_params: StructuredOutputMethodParams<RunOutput, false> | InteropZodType<RunOutput>
|
|
121
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
122
|
+
| Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;
|
|
123
|
+
withStructuredOutput<
|
|
124
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
125
|
+
RunOutput extends Record<string, any> = Record<string, any>>(_params: StructuredOutputMethodParams<RunOutput, true> | InteropZodType<RunOutput>
|
|
126
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
127
|
+
| Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {
|
|
128
|
+
raw: BaseMessage;
|
|
129
|
+
parsed: RunOutput;
|
|
130
|
+
}>;
|
|
131
|
+
}
|
|
132
|
+
//#endregion
|
|
133
|
+
export { FakeChatInput, FakeChatModel, FakeListChatModel, FakeListChatModelCallOptions, FakeStreamingChatModel, FakeStreamingChatModelCallOptions, FakeStreamingChatModelFields, ToolSpec };
|
|
134
|
+
//# sourceMappingURL=chat_models.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"chat_models.d.ts","names":["CallbackManagerForLLMRun","BaseChatModel","BaseChatModelCallOptions","BaseChatModelParams","BaseLLMParams","BaseMessage","AIMessage","AIMessageChunk","ChatResult","ChatGenerationChunk","Runnable","StructuredTool","StructuredOutputMethodParams","BaseLanguageModelInput","StructuredOutputMethodOptions","InteropZodType","ToolSpec","Record","FakeStreamingChatModelCallOptions","FakeStreamingChatModelFields","FakeChatModel","Promise","FakeStreamingChatModel","sleep","responses","chunks","toolStyle","thrownErrorString","______messages_message_js0","MessageStructure","AsyncGenerator","FakeChatInput","FakeListChatModelCallOptions","FakeListChatModel","RunOutput"],"sources":["../../../src/utils/testing/chat_models.d.ts"],"sourcesContent":["import { CallbackManagerForLLMRun } from \"../../callbacks/manager.js\";\nimport { BaseChatModel, BaseChatModelCallOptions, BaseChatModelParams } from \"../../language_models/chat_models.js\";\nimport { BaseLLMParams } from \"../../language_models/llms.js\";\nimport { BaseMessage, AIMessage, AIMessageChunk } from \"../../messages/index.js\";\nimport { type ChatResult, ChatGenerationChunk } from \"../../outputs.js\";\nimport { Runnable } from \"../../runnables/base.js\";\nimport { StructuredTool } from \"../../tools/index.js\";\nimport { StructuredOutputMethodParams, BaseLanguageModelInput, StructuredOutputMethodOptions } from \"../../language_models/base.js\";\nimport { InteropZodType } from \"../types/zod.js\";\n/** Minimal shape actually needed by `bindTools` */\nexport interface ToolSpec {\n name: string;\n description?: string;\n schema: InteropZodType | Record<string, unknown>; // Either a Zod schema *or* a plain JSON-Schema object\n}\n/**\n * Interface specific to the Fake Streaming Chat model.\n */\nexport interface FakeStreamingChatModelCallOptions extends BaseChatModelCallOptions {\n}\n/**\n * Interface for the Constructor-field specific to the Fake Streaming Chat model (all optional because we fill in defaults).\n */\nexport interface FakeStreamingChatModelFields extends BaseChatModelParams {\n /** Milliseconds to pause between fallback char-by-char chunks */\n sleep?: number;\n /** Full AI messages to fall back to when no `chunks` supplied */\n responses?: BaseMessage[];\n /** Exact chunks to emit (can include tool-call deltas) */\n chunks?: AIMessageChunk[];\n /** How tool specs are formatted in `bindTools` */\n toolStyle?: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\";\n /** Throw this error instead of streaming (useful in tests) */\n thrownErrorString?: string;\n}\nexport declare class FakeChatModel extends BaseChatModel {\n _combineLLMOutput(): never[];\n _llmType(): string;\n _generate(messages: BaseMessage[], options?: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;\n}\nexport declare class FakeStreamingChatModel extends BaseChatModel<FakeStreamingChatModelCallOptions> {\n sleep: number;\n responses: BaseMessage[];\n chunks: AIMessageChunk[];\n toolStyle: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\";\n thrownErrorString?: string;\n private tools;\n constructor({ sleep, responses, chunks, toolStyle, thrownErrorString, ...rest }: FakeStreamingChatModelFields & BaseLLMParams);\n _llmType(): string;\n bindTools(tools: (StructuredTool | ToolSpec)[]): Runnable<BaseLanguageModelInput, AIMessageChunk<import(\"../../messages/message.js\").MessageStructure>, FakeStreamingChatModelCallOptions>;\n _generate(messages: BaseMessage[], _options: this[\"ParsedCallOptions\"], _runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;\n _streamResponseChunks(_messages: BaseMessage[], _options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;\n}\n/**\n * Interface for the input parameters specific to the Fake List Chat model.\n */\nexport interface FakeChatInput extends BaseChatModelParams {\n /** Responses to return */\n responses: string[];\n /** Time to sleep in milliseconds between responses */\n sleep?: number;\n emitCustomEvent?: boolean;\n}\nexport interface FakeListChatModelCallOptions extends BaseChatModelCallOptions {\n thrownErrorString?: string;\n}\n/**\n * A fake Chat Model that returns a predefined list of responses. It can be used\n * for testing purposes.\n * @example\n * ```typescript\n * const chat = new FakeListChatModel({\n * responses: [\"I'll callback later.\", \"You 'console' them!\"]\n * });\n *\n * const firstMessage = new HumanMessage(\"You want to hear a JavaScript joke?\");\n * const secondMessage = new HumanMessage(\"How do you cheer up a JavaScript developer?\");\n *\n * // Call the chat model with a message and log the response\n * const firstResponse = await chat.call([firstMessage]);\n * console.log({ firstResponse });\n *\n * const secondResponse = await chat.call([secondMessage]);\n * console.log({ secondResponse });\n * ```\n */\nexport declare class FakeListChatModel extends BaseChatModel<FakeListChatModelCallOptions> {\n static lc_name(): string;\n lc_serializable: boolean;\n responses: string[];\n i: number;\n sleep?: number;\n emitCustomEvent: boolean;\n constructor(params: FakeChatInput);\n _combineLLMOutput(): never[];\n _llmType(): string;\n _generate(_messages: BaseMessage[], options?: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;\n _formatGeneration(text: string): {\n message: AIMessage<import(\"../../messages/message.js\").MessageStructure>;\n text: string;\n };\n _streamResponseChunks(_messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;\n _sleepIfRequested(): Promise<void>;\n _sleep(): Promise<void>;\n _createResponseChunk(text: string): ChatGenerationChunk;\n _currentResponse(): string;\n _incrementResponse(): void;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(_params: StructuredOutputMethodParams<RunOutput, false> | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(_params: StructuredOutputMethodParams<RunOutput, true> | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n"],"mappings":";;;;;;;;;;;;;;UAUiBgB,QAAAA;;;EAAAA,MAAAA,EAGLD,cAHa,GAGIE,MAHJ,CAAA,MAAA,EAAA,OAAA,CAAA,CAAA,CAAA;;;;AAGU;AAKlBC,UAAAA,iCAAAA,SAA0ChB,wBAAwB,CAAA,CAKnF;;;;AAAsDC,UAArCgB,4BAAAA,SAAqChB,mBAAAA,CAAAA;EAAmB;EAYpDiB,KAAAA,CAAAA,EAAAA,MAAAA;EAAa;EAAA,SAGVf,CAAAA,EAXRA,WAWQA,EAAAA;EAAW;EAA8E,MAAWG,CAAAA,EAT/GD,cAS+GC,EAAAA;EAAU;EAAX,SAHhFP,CAAAA,EAAAA,QAAAA,GAAAA,WAAAA,GAAAA,SAAAA,GAAAA,QAAAA;EAAa;EAKnCqB,iBAAAA,CAAAA,EAAAA,MAAsB;;AAAuBJ,cAL7CE,aAAAA,SAAsBnB,aAAAA,CAKuBiB;EAAiC,iBAEpFb,CAAAA,CAAAA,EAAAA,KAAAA,EAAAA;EAAW,QACdE,CAAAA,CAAAA,EAAAA,MAAAA;EAAc,SAIRgB,CAAAA,QAAAA,EATMlB,WASNkB,EAAAA,EAAAA,OAAAA,CAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,EAAAA,UAAAA,CAAAA,EATuEvB,wBASvEuB,CAAAA,EATkGF,OASlGE,CAT0Gf,UAS1Ge,CAAAA;;AAAkBE,cAPfH,sBAAAA,SAA+BrB,aAOhBwB,CAP8BP,iCAO9BO,CAAAA,CAAAA;EAAM,KAAEC,EAAAA,MAAAA;EAAS,SAAEC,EALxCtB,WAKwCsB,EAAAA;EAAiB,MAAaR,EAJzEZ,cAIyEY,EAAAA;EAA4B,SAAGf,EAAAA,QAAAA,GAAAA,WAAAA,GAAAA,SAAAA,GAAAA,QAAAA;EAAa,iBAE3GO,CAAAA,EAAAA,MAAAA;EAAc,QAAGK,KAAAA;EAAQ,WAAeH,CAAAA;IAAAA,KAAAA;IAAAA,SAAAA;IAAAA,MAAAA;IAAAA,SAAAA;IAAAA,iBAAAA;IAAAA,GAAAA;EAAAA,CAAAA,EAFuBM,4BAEvBN,GAFsDT,aAEtDS;EAAsB,QAAA,CAAA,CAAA,EAAA,MAAA;EAAqE,SAAnEN,CAAAA,KAAAA,EAAAA,CAAhEI,cAAgEJ,GAA/CS,QAA+CT,CAAAA,EAAAA,CAAAA,EAAjCG,QAAiCH,CAAxBM,sBAAwBN,EAAAA,cAAAA,CAAF,gBAAA,CAAEA,EAAsEW,iCAAtEX,CAAAA;EAAc,SAAwDW,CAAAA,QAAAA,EACpIb,WADoIa,EAAAA,EAAAA,QAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,EAAAA,WAAAA,CAAAA,EAClElB,wBADkEkB,CAAAA,EACvCG,OADuCH,CAC/BV,UAD+BU,CAAAA;EAAiC,qBAAxIR,CAAAA,SAAAA,EAEhBL,WAFgBK,EAAAA,EAAAA,QAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,EAAAA,UAAAA,CAAAA,EAEiDV,wBAFjDU,CAAAA,EAE4EoB,cAF5EpB,CAE2FD,mBAF3FC,CAAAA;;;;;AAEhBL,UAKpB0B,aAAAA,SAAsB5B,mBALFE,CAAAA;EAAW;EAA8E,SAAkBI,EAAAA,MAAAA,EAAAA;EAAmB;EAApB,KAX3FR,CAAAA,EAAAA,MAAAA;EAAa,eAAA,CAAA,EAAA,OAAA;AAgBjE;AAOiB+B,UAAAA,4BAAAA,SAAqC9B,wBAAwB,CAAA;EAuBzD+B,iBAAAA,CAAAA,EAAiB,MAAA;;;;;;;;;;;;;;;;;;;;;;AAuBqFlB,cAvBtGkB,iBAAAA,SAA0BhC,aAuB4Ec,CAvB9DiB,4BAuB8DjB,CAAAA,CAAAA;EAAc,OAElIE,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAM,eAAwBH,EAAAA,OAAAA;EAA6B,SAAmBD,EAAAA,MAAAA,EAAAA;EAAsB,CAAA,EAAEqB,MAAAA;EAAS,KAA1CxB,CAAAA,EAAAA,MAAAA;EAAQ,eAG9DO,EAAAA,OAAAA;EAAM,WAAgBA,CAAAA,MAAAA,EArBpBc,aAqBoBd;EAAM,iBAAqDiB,CAAAA,CAAAA,EAAAA,KAAAA,EAAAA;EAAS,QAAtCtB,CAAAA,CAAAA,EAAAA,MAAAA;EAA4B,SAAmCsB,CAAAA,SAAAA,EAlBhH7B,WAkBgH6B,EAAAA,EAAAA,OAAAA,CAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,EAAAA,UAAAA,CAAAA,EAlB/ClC,wBAkB+CkC,CAAAA,EAlBpBb,OAkBoBa,CAlBZ1B,UAkBY0B,CAAAA;EAAS,iBAAxBnB,CAAAA,IAAAA,EAAAA,MAAAA,CAAAA,EAAAA;IAEnHE,OAAAA,EAlBUX,SAkBVW,CApBqH,gBAAA,CAoBrHA;IAA8BH,IAAAA,EAAAA,MAAAA;EAA6B,CAAA;EAAwC,qBAC7FT,CAAAA,SAAAA,EAhBwBA,WAgBxBA,EAAAA,EAAAA,OAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,EAAAA,UAAAA,CAAAA,EAhBwFL,wBAgBxFK,CAAAA,EAhBmHyB,cAgBnHzB,CAhBkII,mBAgBlIJ,CAAAA;EAAW,iBACR6B,CAAAA,CAAAA,EAhBSb,OAgBTa,CAAAA,IAAAA,CAAAA;EAAS,MAFkDxB,CAAAA,CAAAA,EAb7DW,OAa6DX,CAAAA,IAAAA,CAAAA;EAAQ,oBA9BpCT,CAAAA,IAAAA,EAAAA,MAAAA,CAAAA,EAkBPQ,mBAlBOR;EAAa,gBAAA,CAAA,CAAA,EAAA,MAAA;;;;oBAuBtCgB,sBAAsBA,8BAA8BL,6BAA6BsB,oBAAoBnB,eAAemB;;IAEnIjB,8BAA8BH,uCAAuCJ,SAASG,wBAAwBqB;;;oBAGvFjB,sBAAsBA,8BAA8BL,6BAA6BsB,mBAAmBnB,eAAemB;;IAElIjB,8BAA8BH,sCAAsCJ,SAASG;SACvER;YACG6B"}
|
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
import { AIMessage, AIMessageChunk } from "../../messages/ai.js";
|
|
2
|
+
import { ChatGenerationChunk } from "../../outputs.js";
|
|
3
|
+
import { toJsonSchema } from "../json_schema.js";
|
|
4
|
+
import { RunnableLambda } from "../../runnables/base.js";
|
|
5
|
+
import "../../messages/index.js";
|
|
6
|
+
import { BaseChatModel } from "../../language_models/chat_models.js";
|
|
7
|
+
|
|
8
|
+
//#region src/utils/testing/chat_models.ts
|
|
9
|
+
var FakeChatModel = class extends BaseChatModel {
|
|
10
|
+
_combineLLMOutput() {
|
|
11
|
+
return [];
|
|
12
|
+
}
|
|
13
|
+
_llmType() {
|
|
14
|
+
return "fake";
|
|
15
|
+
}
|
|
16
|
+
async _generate(messages, options, runManager) {
|
|
17
|
+
if (options?.stop?.length) return { generations: [{
|
|
18
|
+
message: new AIMessage(options.stop[0]),
|
|
19
|
+
text: options.stop[0]
|
|
20
|
+
}] };
|
|
21
|
+
const text = messages.map((m) => {
|
|
22
|
+
if (typeof m.content === "string") return m.content;
|
|
23
|
+
return JSON.stringify(m.content, null, 2);
|
|
24
|
+
}).join("\n");
|
|
25
|
+
await runManager?.handleLLMNewToken(text);
|
|
26
|
+
return {
|
|
27
|
+
generations: [{
|
|
28
|
+
message: new AIMessage(text),
|
|
29
|
+
text
|
|
30
|
+
}],
|
|
31
|
+
llmOutput: {}
|
|
32
|
+
};
|
|
33
|
+
}
|
|
34
|
+
};
|
|
35
|
+
var FakeStreamingChatModel = class FakeStreamingChatModel extends BaseChatModel {
|
|
36
|
+
sleep = 50;
|
|
37
|
+
responses = [];
|
|
38
|
+
chunks = [];
|
|
39
|
+
toolStyle = "openai";
|
|
40
|
+
thrownErrorString;
|
|
41
|
+
tools = [];
|
|
42
|
+
constructor({ sleep = 50, responses = [], chunks = [], toolStyle = "openai", thrownErrorString,...rest }) {
|
|
43
|
+
super(rest);
|
|
44
|
+
this.sleep = sleep;
|
|
45
|
+
this.responses = responses;
|
|
46
|
+
this.chunks = chunks;
|
|
47
|
+
this.toolStyle = toolStyle;
|
|
48
|
+
this.thrownErrorString = thrownErrorString;
|
|
49
|
+
}
|
|
50
|
+
_llmType() {
|
|
51
|
+
return "fake";
|
|
52
|
+
}
|
|
53
|
+
bindTools(tools) {
|
|
54
|
+
const merged = [...this.tools, ...tools];
|
|
55
|
+
const toolDicts = merged.map((t) => {
|
|
56
|
+
switch (this.toolStyle) {
|
|
57
|
+
case "openai": return {
|
|
58
|
+
type: "function",
|
|
59
|
+
function: {
|
|
60
|
+
name: t.name,
|
|
61
|
+
description: t.description,
|
|
62
|
+
parameters: toJsonSchema(t.schema)
|
|
63
|
+
}
|
|
64
|
+
};
|
|
65
|
+
case "anthropic": return {
|
|
66
|
+
name: t.name,
|
|
67
|
+
description: t.description,
|
|
68
|
+
input_schema: toJsonSchema(t.schema)
|
|
69
|
+
};
|
|
70
|
+
case "bedrock": return { toolSpec: {
|
|
71
|
+
name: t.name,
|
|
72
|
+
description: t.description,
|
|
73
|
+
inputSchema: toJsonSchema(t.schema)
|
|
74
|
+
} };
|
|
75
|
+
case "google": return {
|
|
76
|
+
name: t.name,
|
|
77
|
+
description: t.description,
|
|
78
|
+
parameters: toJsonSchema(t.schema)
|
|
79
|
+
};
|
|
80
|
+
default: throw new Error(`Unsupported tool style: ${this.toolStyle}`);
|
|
81
|
+
}
|
|
82
|
+
});
|
|
83
|
+
const wrapped = this.toolStyle === "google" ? [{ functionDeclarations: toolDicts }] : toolDicts;
|
|
84
|
+
const next = new FakeStreamingChatModel({
|
|
85
|
+
sleep: this.sleep,
|
|
86
|
+
responses: this.responses,
|
|
87
|
+
chunks: this.chunks,
|
|
88
|
+
toolStyle: this.toolStyle,
|
|
89
|
+
thrownErrorString: this.thrownErrorString
|
|
90
|
+
});
|
|
91
|
+
next.tools = merged;
|
|
92
|
+
return next.withConfig({ tools: wrapped });
|
|
93
|
+
}
|
|
94
|
+
async _generate(messages, _options, _runManager) {
|
|
95
|
+
if (this.thrownErrorString) throw new Error(this.thrownErrorString);
|
|
96
|
+
const content = this.responses?.[0]?.content ?? messages[0].content ?? "";
|
|
97
|
+
const generation = { generations: [{
|
|
98
|
+
text: "",
|
|
99
|
+
message: new AIMessage({
|
|
100
|
+
content,
|
|
101
|
+
tool_calls: this.chunks?.[0]?.tool_calls
|
|
102
|
+
})
|
|
103
|
+
}] };
|
|
104
|
+
return generation;
|
|
105
|
+
}
|
|
106
|
+
async *_streamResponseChunks(_messages, _options, runManager) {
|
|
107
|
+
if (this.thrownErrorString) throw new Error(this.thrownErrorString);
|
|
108
|
+
if (this.chunks?.length) {
|
|
109
|
+
for (const msgChunk of this.chunks) {
|
|
110
|
+
const cg = new ChatGenerationChunk({
|
|
111
|
+
message: new AIMessageChunk({
|
|
112
|
+
content: msgChunk.content,
|
|
113
|
+
tool_calls: msgChunk.tool_calls,
|
|
114
|
+
additional_kwargs: msgChunk.additional_kwargs ?? {}
|
|
115
|
+
}),
|
|
116
|
+
text: msgChunk.content?.toString() ?? ""
|
|
117
|
+
});
|
|
118
|
+
yield cg;
|
|
119
|
+
await runManager?.handleLLMNewToken(msgChunk.content, void 0, void 0, void 0, void 0, { chunk: cg });
|
|
120
|
+
}
|
|
121
|
+
return;
|
|
122
|
+
}
|
|
123
|
+
const fallback = this.responses?.[0] ?? new AIMessage(typeof _messages[0].content === "string" ? _messages[0].content : "");
|
|
124
|
+
const text = typeof fallback.content === "string" ? fallback.content : "";
|
|
125
|
+
for (const ch of text) {
|
|
126
|
+
await new Promise((r) => setTimeout(r, this.sleep));
|
|
127
|
+
const cg = new ChatGenerationChunk({
|
|
128
|
+
message: new AIMessageChunk({ content: ch }),
|
|
129
|
+
text: ch
|
|
130
|
+
});
|
|
131
|
+
yield cg;
|
|
132
|
+
await runManager?.handleLLMNewToken(ch, void 0, void 0, void 0, void 0, { chunk: cg });
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
};
|
|
136
|
+
/**
|
|
137
|
+
* A fake Chat Model that returns a predefined list of responses. It can be used
|
|
138
|
+
* for testing purposes.
|
|
139
|
+
* @example
|
|
140
|
+
* ```typescript
|
|
141
|
+
* const chat = new FakeListChatModel({
|
|
142
|
+
* responses: ["I'll callback later.", "You 'console' them!"]
|
|
143
|
+
* });
|
|
144
|
+
*
|
|
145
|
+
* const firstMessage = new HumanMessage("You want to hear a JavaScript joke?");
|
|
146
|
+
* const secondMessage = new HumanMessage("How do you cheer up a JavaScript developer?");
|
|
147
|
+
*
|
|
148
|
+
* // Call the chat model with a message and log the response
|
|
149
|
+
* const firstResponse = await chat.call([firstMessage]);
|
|
150
|
+
* console.log({ firstResponse });
|
|
151
|
+
*
|
|
152
|
+
* const secondResponse = await chat.call([secondMessage]);
|
|
153
|
+
* console.log({ secondResponse });
|
|
154
|
+
* ```
|
|
155
|
+
*/
|
|
156
|
+
var FakeListChatModel = class extends BaseChatModel {
|
|
157
|
+
static lc_name() {
|
|
158
|
+
return "FakeListChatModel";
|
|
159
|
+
}
|
|
160
|
+
lc_serializable = true;
|
|
161
|
+
responses;
|
|
162
|
+
i = 0;
|
|
163
|
+
sleep;
|
|
164
|
+
emitCustomEvent = false;
|
|
165
|
+
constructor(params) {
|
|
166
|
+
super(params);
|
|
167
|
+
const { responses, sleep, emitCustomEvent } = params;
|
|
168
|
+
this.responses = responses;
|
|
169
|
+
this.sleep = sleep;
|
|
170
|
+
this.emitCustomEvent = emitCustomEvent ?? this.emitCustomEvent;
|
|
171
|
+
}
|
|
172
|
+
_combineLLMOutput() {
|
|
173
|
+
return [];
|
|
174
|
+
}
|
|
175
|
+
_llmType() {
|
|
176
|
+
return "fake-list";
|
|
177
|
+
}
|
|
178
|
+
async _generate(_messages, options, runManager) {
|
|
179
|
+
await this._sleepIfRequested();
|
|
180
|
+
if (options?.thrownErrorString) throw new Error(options.thrownErrorString);
|
|
181
|
+
if (this.emitCustomEvent) await runManager?.handleCustomEvent("some_test_event", { someval: true });
|
|
182
|
+
if (options?.stop?.length) return { generations: [this._formatGeneration(options.stop[0])] };
|
|
183
|
+
else {
|
|
184
|
+
const response = this._currentResponse();
|
|
185
|
+
this._incrementResponse();
|
|
186
|
+
return {
|
|
187
|
+
generations: [this._formatGeneration(response)],
|
|
188
|
+
llmOutput: {}
|
|
189
|
+
};
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
_formatGeneration(text) {
|
|
193
|
+
return {
|
|
194
|
+
message: new AIMessage(text),
|
|
195
|
+
text
|
|
196
|
+
};
|
|
197
|
+
}
|
|
198
|
+
async *_streamResponseChunks(_messages, options, runManager) {
|
|
199
|
+
const response = this._currentResponse();
|
|
200
|
+
this._incrementResponse();
|
|
201
|
+
if (this.emitCustomEvent) await runManager?.handleCustomEvent("some_test_event", { someval: true });
|
|
202
|
+
for await (const text of response) {
|
|
203
|
+
await this._sleepIfRequested();
|
|
204
|
+
if (options?.thrownErrorString) throw new Error(options.thrownErrorString);
|
|
205
|
+
const chunk = this._createResponseChunk(text);
|
|
206
|
+
yield chunk;
|
|
207
|
+
runManager?.handleLLMNewToken(text);
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
async _sleepIfRequested() {
|
|
211
|
+
if (this.sleep !== void 0) await this._sleep();
|
|
212
|
+
}
|
|
213
|
+
async _sleep() {
|
|
214
|
+
return new Promise((resolve) => {
|
|
215
|
+
setTimeout(() => resolve(), this.sleep);
|
|
216
|
+
});
|
|
217
|
+
}
|
|
218
|
+
_createResponseChunk(text) {
|
|
219
|
+
return new ChatGenerationChunk({
|
|
220
|
+
message: new AIMessageChunk({ content: text }),
|
|
221
|
+
text
|
|
222
|
+
});
|
|
223
|
+
}
|
|
224
|
+
_currentResponse() {
|
|
225
|
+
return this.responses[this.i];
|
|
226
|
+
}
|
|
227
|
+
_incrementResponse() {
|
|
228
|
+
if (this.i < this.responses.length - 1) this.i += 1;
|
|
229
|
+
else this.i = 0;
|
|
230
|
+
}
|
|
231
|
+
withStructuredOutput(_params, _config) {
|
|
232
|
+
return RunnableLambda.from(async (input) => {
|
|
233
|
+
const message = await this.invoke(input);
|
|
234
|
+
if (message.tool_calls?.[0]?.args) return message.tool_calls[0].args;
|
|
235
|
+
if (typeof message.content === "string") return JSON.parse(message.content);
|
|
236
|
+
throw new Error("No structured output found");
|
|
237
|
+
});
|
|
238
|
+
}
|
|
239
|
+
};
|
|
240
|
+
|
|
241
|
+
//#endregion
|
|
242
|
+
export { FakeChatModel, FakeListChatModel, FakeStreamingChatModel };
|
|
243
|
+
//# sourceMappingURL=chat_models.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"chat_models.js","names":["messages: BaseMessage[]","options?: this[\"ParsedCallOptions\"]","runManager?: CallbackManagerForLLMRun","tools: (StructuredTool | ToolSpec)[]","_options: this[\"ParsedCallOptions\"]","_runManager?: CallbackManagerForLLMRun","generation: ChatResult","_messages: BaseMessage[]","params: FakeChatInput","text: string","options: this[\"ParsedCallOptions\"]","_params:\n | StructuredOutputMethodParams<RunOutput, boolean>\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>","_config?: StructuredOutputMethodOptions<boolean>"],"sources":["../../../src/utils/testing/chat_models.ts"],"sourcesContent":["import { CallbackManagerForLLMRun } from \"../../callbacks/manager.js\";\nimport {\n BaseChatModel,\n BaseChatModelCallOptions,\n BaseChatModelParams,\n} from \"../../language_models/chat_models.js\";\nimport { BaseLLMParams } from \"../../language_models/llms.js\";\nimport {\n BaseMessage,\n AIMessage,\n AIMessageChunk,\n} from \"../../messages/index.js\";\nimport { type ChatResult, ChatGenerationChunk } from \"../../outputs.js\";\nimport { Runnable, RunnableLambda } from \"../../runnables/base.js\";\nimport { StructuredTool } from \"../../tools/index.js\";\nimport {\n StructuredOutputMethodParams,\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"../../language_models/base.js\";\n\nimport { toJsonSchema } from \"../json_schema.js\";\nimport { InteropZodType } from \"../types/zod.js\";\n\n/** Minimal shape actually needed by `bindTools` */\nexport interface ToolSpec {\n name: string;\n description?: string;\n schema: InteropZodType | Record<string, unknown>; // Either a Zod schema *or* a plain JSON-Schema object\n}\n\n/**\n * Interface specific to the Fake Streaming Chat model.\n */\nexport interface FakeStreamingChatModelCallOptions\n extends BaseChatModelCallOptions {}\n/**\n * Interface for the Constructor-field specific to the Fake Streaming Chat model (all optional because we fill in defaults).\n */\nexport interface FakeStreamingChatModelFields extends BaseChatModelParams {\n /** Milliseconds to pause between fallback char-by-char chunks */\n sleep?: number;\n\n /** Full AI messages to fall back to when no `chunks` supplied */\n responses?: BaseMessage[];\n\n /** Exact chunks to emit (can include tool-call deltas) */\n chunks?: AIMessageChunk[];\n\n /** How tool specs are formatted in `bindTools` */\n toolStyle?: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\";\n\n /** Throw this error instead of streaming (useful in tests) */\n thrownErrorString?: string;\n}\n\nexport class FakeChatModel extends BaseChatModel {\n _combineLLMOutput() {\n return [];\n }\n\n _llmType(): string {\n return \"fake\";\n }\n\n async _generate(\n messages: BaseMessage[],\n options?: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n if (options?.stop?.length) {\n return {\n generations: [\n {\n message: new AIMessage(options.stop[0]),\n text: options.stop[0],\n },\n ],\n };\n }\n const text = messages\n .map((m) => {\n if (typeof m.content === \"string\") {\n return m.content;\n }\n return JSON.stringify(m.content, null, 2);\n })\n .join(\"\\n\");\n await runManager?.handleLLMNewToken(text);\n return {\n generations: [\n {\n message: new AIMessage(text),\n text,\n },\n ],\n llmOutput: {},\n };\n }\n}\n\nexport class FakeStreamingChatModel extends BaseChatModel<FakeStreamingChatModelCallOptions> {\n sleep = 50;\n\n responses: BaseMessage[] = [];\n\n chunks: AIMessageChunk[] = [];\n\n toolStyle: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\" = \"openai\";\n\n thrownErrorString?: string;\n\n private tools: (StructuredTool | ToolSpec)[] = [];\n\n constructor({\n sleep = 50,\n responses = [],\n chunks = [],\n toolStyle = \"openai\",\n thrownErrorString,\n ...rest\n }: FakeStreamingChatModelFields & BaseLLMParams) {\n super(rest);\n this.sleep = sleep;\n this.responses = responses;\n this.chunks = chunks;\n this.toolStyle = toolStyle;\n this.thrownErrorString = thrownErrorString;\n }\n\n _llmType() {\n return \"fake\";\n }\n\n bindTools(tools: (StructuredTool | ToolSpec)[]) {\n const merged = [...this.tools, ...tools];\n\n const toolDicts = merged.map((t) => {\n switch (this.toolStyle) {\n case \"openai\":\n return {\n type: \"function\",\n function: {\n name: t.name,\n description: t.description,\n parameters: toJsonSchema(t.schema),\n },\n };\n case \"anthropic\":\n return {\n name: t.name,\n description: t.description,\n input_schema: toJsonSchema(t.schema),\n };\n case \"bedrock\":\n return {\n toolSpec: {\n name: t.name,\n description: t.description,\n inputSchema: toJsonSchema(t.schema),\n },\n };\n case \"google\":\n return {\n name: t.name,\n description: t.description,\n parameters: toJsonSchema(t.schema),\n };\n default:\n throw new Error(`Unsupported tool style: ${this.toolStyle}`);\n }\n });\n\n const wrapped =\n this.toolStyle === \"google\"\n ? [{ functionDeclarations: toolDicts }]\n : toolDicts;\n\n /* creating a *new* instance – mirrors LangChain .bind semantics for type-safety and avoiding noise */\n const next = new FakeStreamingChatModel({\n sleep: this.sleep,\n responses: this.responses,\n chunks: this.chunks,\n toolStyle: this.toolStyle,\n thrownErrorString: this.thrownErrorString,\n });\n next.tools = merged;\n\n return next.withConfig({ tools: wrapped } as BaseChatModelCallOptions);\n }\n\n async _generate(\n messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n if (this.thrownErrorString) {\n throw new Error(this.thrownErrorString);\n }\n\n const content = this.responses?.[0]?.content ?? messages[0].content ?? \"\";\n\n const generation: ChatResult = {\n generations: [\n {\n text: \"\",\n message: new AIMessage({\n content,\n tool_calls: this.chunks?.[0]?.tool_calls,\n }),\n },\n ],\n };\n\n return generation;\n }\n\n async *_streamResponseChunks(\n _messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n if (this.thrownErrorString) {\n throw new Error(this.thrownErrorString);\n }\n if (this.chunks?.length) {\n for (const msgChunk of this.chunks) {\n const cg = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: msgChunk.content,\n tool_calls: msgChunk.tool_calls,\n additional_kwargs: msgChunk.additional_kwargs ?? {},\n }),\n text: msgChunk.content?.toString() ?? \"\",\n });\n\n yield cg;\n await runManager?.handleLLMNewToken(\n msgChunk.content as string,\n undefined,\n undefined,\n undefined,\n undefined,\n { chunk: cg }\n );\n }\n return;\n }\n\n const fallback =\n this.responses?.[0] ??\n new AIMessage(\n typeof _messages[0].content === \"string\" ? _messages[0].content : \"\"\n );\n const text = typeof fallback.content === \"string\" ? fallback.content : \"\";\n\n for (const ch of text) {\n await new Promise((r) => setTimeout(r, this.sleep));\n const cg = new ChatGenerationChunk({\n message: new AIMessageChunk({ content: ch }),\n text: ch,\n });\n yield cg;\n await runManager?.handleLLMNewToken(\n ch,\n undefined,\n undefined,\n undefined,\n undefined,\n { chunk: cg }\n );\n }\n }\n}\n\n/**\n * Interface for the input parameters specific to the Fake List Chat model.\n */\nexport interface FakeChatInput extends BaseChatModelParams {\n /** Responses to return */\n responses: string[];\n\n /** Time to sleep in milliseconds between responses */\n sleep?: number;\n\n emitCustomEvent?: boolean;\n}\n\nexport interface FakeListChatModelCallOptions extends BaseChatModelCallOptions {\n thrownErrorString?: string;\n}\n\n/**\n * A fake Chat Model that returns a predefined list of responses. It can be used\n * for testing purposes.\n * @example\n * ```typescript\n * const chat = new FakeListChatModel({\n * responses: [\"I'll callback later.\", \"You 'console' them!\"]\n * });\n *\n * const firstMessage = new HumanMessage(\"You want to hear a JavaScript joke?\");\n * const secondMessage = new HumanMessage(\"How do you cheer up a JavaScript developer?\");\n *\n * // Call the chat model with a message and log the response\n * const firstResponse = await chat.call([firstMessage]);\n * console.log({ firstResponse });\n *\n * const secondResponse = await chat.call([secondMessage]);\n * console.log({ secondResponse });\n * ```\n */\nexport class FakeListChatModel extends BaseChatModel<FakeListChatModelCallOptions> {\n static lc_name() {\n return \"FakeListChatModel\";\n }\n\n lc_serializable = true;\n\n responses: string[];\n\n i = 0;\n\n sleep?: number;\n\n emitCustomEvent = false;\n\n constructor(params: FakeChatInput) {\n super(params);\n const { responses, sleep, emitCustomEvent } = params;\n this.responses = responses;\n this.sleep = sleep;\n this.emitCustomEvent = emitCustomEvent ?? this.emitCustomEvent;\n }\n\n _combineLLMOutput() {\n return [];\n }\n\n _llmType(): string {\n return \"fake-list\";\n }\n\n async _generate(\n _messages: BaseMessage[],\n options?: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n await this._sleepIfRequested();\n if (options?.thrownErrorString) {\n throw new Error(options.thrownErrorString);\n }\n if (this.emitCustomEvent) {\n await runManager?.handleCustomEvent(\"some_test_event\", {\n someval: true,\n });\n }\n\n if (options?.stop?.length) {\n return {\n generations: [this._formatGeneration(options.stop[0])],\n };\n } else {\n const response = this._currentResponse();\n this._incrementResponse();\n\n return {\n generations: [this._formatGeneration(response)],\n llmOutput: {},\n };\n }\n }\n\n _formatGeneration(text: string) {\n return {\n message: new AIMessage(text),\n text,\n };\n }\n\n async *_streamResponseChunks(\n _messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n const response = this._currentResponse();\n this._incrementResponse();\n if (this.emitCustomEvent) {\n await runManager?.handleCustomEvent(\"some_test_event\", {\n someval: true,\n });\n }\n\n for await (const text of response) {\n await this._sleepIfRequested();\n if (options?.thrownErrorString) {\n throw new Error(options.thrownErrorString);\n }\n const chunk = this._createResponseChunk(text);\n yield chunk;\n // eslint-disable-next-line no-void\n void runManager?.handleLLMNewToken(text);\n }\n }\n\n async _sleepIfRequested() {\n if (this.sleep !== undefined) {\n await this._sleep();\n }\n }\n\n async _sleep() {\n return new Promise<void>((resolve) => {\n setTimeout(() => resolve(), this.sleep);\n });\n }\n\n _createResponseChunk(text: string): ChatGenerationChunk {\n return new ChatGenerationChunk({\n message: new AIMessageChunk({ content: text }),\n text,\n });\n }\n\n _currentResponse() {\n return this.responses[this.i];\n }\n\n _incrementResponse() {\n if (this.i < this.responses.length - 1) {\n this.i += 1;\n } else {\n this.i = 0;\n }\n }\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n _params:\n | StructuredOutputMethodParams<RunOutput, false>\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n _params:\n | StructuredOutputMethodParams<RunOutput, true>\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n _params:\n | StructuredOutputMethodParams<RunOutput, boolean>\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n _config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n > {\n return RunnableLambda.from(async (input) => {\n const message = await this.invoke(input);\n if (message.tool_calls?.[0]?.args) {\n return message.tool_calls[0].args as RunOutput;\n }\n if (typeof message.content === \"string\") {\n return JSON.parse(message.content);\n }\n throw new Error(\"No structured output found\");\n }) as Runnable;\n }\n}\n"],"mappings":";;;;;;;;AAwDA,IAAa,gBAAb,cAAmC,cAAc;CAC/C,oBAAoB;AAClB,SAAO,CAAE;CACV;CAED,WAAmB;AACjB,SAAO;CACR;CAED,MAAM,UACJA,UACAC,SACAC,YACqB;AACrB,MAAI,SAAS,MAAM,OACjB,QAAO,EACL,aAAa,CACX;GACE,SAAS,IAAI,UAAU,QAAQ,KAAK;GACpC,MAAM,QAAQ,KAAK;EACpB,CACF,EACF;EAEH,MAAM,OAAO,SACV,IAAI,CAAC,MAAM;AACV,OAAI,OAAO,EAAE,YAAY,SACvB,QAAO,EAAE;AAEX,UAAO,KAAK,UAAU,EAAE,SAAS,MAAM,EAAE;EAC1C,EAAC,CACD,KAAK,KAAK;EACb,MAAM,YAAY,kBAAkB,KAAK;AACzC,SAAO;GACL,aAAa,CACX;IACE,SAAS,IAAI,UAAU;IACvB;GACD,CACF;GACD,WAAW,CAAE;EACd;CACF;AACF;AAED,IAAa,yBAAb,MAAa,+BAA+B,cAAiD;CAC3F,QAAQ;CAER,YAA2B,CAAE;CAE7B,SAA2B,CAAE;CAE7B,YAA2D;CAE3D;CAEA,AAAQ,QAAuC,CAAE;CAEjD,YAAY,EACV,QAAQ,IACR,YAAY,CAAE,GACd,SAAS,CAAE,GACX,YAAY,UACZ,kBACA,GAAG,MAC0C,EAAE;EAC/C,MAAM,KAAK;EACX,KAAK,QAAQ;EACb,KAAK,YAAY;EACjB,KAAK,SAAS;EACd,KAAK,YAAY;EACjB,KAAK,oBAAoB;CAC1B;CAED,WAAW;AACT,SAAO;CACR;CAED,UAAUC,OAAsC;EAC9C,MAAM,SAAS,CAAC,GAAG,KAAK,OAAO,GAAG,KAAM;EAExC,MAAM,YAAY,OAAO,IAAI,CAAC,MAAM;AAClC,WAAQ,KAAK,WAAb;IACE,KAAK,SACH,QAAO;KACL,MAAM;KACN,UAAU;MACR,MAAM,EAAE;MACR,aAAa,EAAE;MACf,YAAY,aAAa,EAAE,OAAO;KACnC;IACF;IACH,KAAK,YACH,QAAO;KACL,MAAM,EAAE;KACR,aAAa,EAAE;KACf,cAAc,aAAa,EAAE,OAAO;IACrC;IACH,KAAK,UACH,QAAO,EACL,UAAU;KACR,MAAM,EAAE;KACR,aAAa,EAAE;KACf,aAAa,aAAa,EAAE,OAAO;IACpC,EACF;IACH,KAAK,SACH,QAAO;KACL,MAAM,EAAE;KACR,aAAa,EAAE;KACf,YAAY,aAAa,EAAE,OAAO;IACnC;IACH,QACE,OAAM,IAAI,MAAM,CAAC,wBAAwB,EAAE,KAAK,WAAW;GAC9D;EACF,EAAC;EAEF,MAAM,UACJ,KAAK,cAAc,WACf,CAAC,EAAE,sBAAsB,UAAW,CAAC,IACrC;EAGN,MAAM,OAAO,IAAI,uBAAuB;GACtC,OAAO,KAAK;GACZ,WAAW,KAAK;GAChB,QAAQ,KAAK;GACb,WAAW,KAAK;GAChB,mBAAmB,KAAK;EACzB;EACD,KAAK,QAAQ;AAEb,SAAO,KAAK,WAAW,EAAE,OAAO,QAAS,EAA6B;CACvE;CAED,MAAM,UACJH,UACAI,UACAC,aACqB;AACrB,MAAI,KAAK,kBACP,OAAM,IAAI,MAAM,KAAK;EAGvB,MAAM,UAAU,KAAK,YAAY,IAAI,WAAW,SAAS,GAAG,WAAW;EAEvE,MAAMC,aAAyB,EAC7B,aAAa,CACX;GACE,MAAM;GACN,SAAS,IAAI,UAAU;IACrB;IACA,YAAY,KAAK,SAAS,IAAI;GAC/B;EACF,CACF,EACF;AAED,SAAO;CACR;CAED,OAAO,sBACLC,WACAH,UACAF,YACqC;AACrC,MAAI,KAAK,kBACP,OAAM,IAAI,MAAM,KAAK;AAEvB,MAAI,KAAK,QAAQ,QAAQ;AACvB,QAAK,MAAM,YAAY,KAAK,QAAQ;IAClC,MAAM,KAAK,IAAI,oBAAoB;KACjC,SAAS,IAAI,eAAe;MAC1B,SAAS,SAAS;MAClB,YAAY,SAAS;MACrB,mBAAmB,SAAS,qBAAqB,CAAE;KACpD;KACD,MAAM,SAAS,SAAS,UAAU,IAAI;IACvC;IAED,MAAM;IACN,MAAM,YAAY,kBAChB,SAAS,SACT,QACA,QACA,QACA,QACA,EAAE,OAAO,GAAI,EACd;GACF;AACD;EACD;EAED,MAAM,WACJ,KAAK,YAAY,MACjB,IAAI,UACF,OAAO,UAAU,GAAG,YAAY,WAAW,UAAU,GAAG,UAAU;EAEtE,MAAM,OAAO,OAAO,SAAS,YAAY,WAAW,SAAS,UAAU;AAEvE,OAAK,MAAM,MAAM,MAAM;GACrB,MAAM,IAAI,QAAQ,CAAC,MAAM,WAAW,GAAG,KAAK,MAAM;GAClD,MAAM,KAAK,IAAI,oBAAoB;IACjC,SAAS,IAAI,eAAe,EAAE,SAAS,GAAI;IAC3C,MAAM;GACP;GACD,MAAM;GACN,MAAM,YAAY,kBAChB,IACA,QACA,QACA,QACA,QACA,EAAE,OAAO,GAAI,EACd;EACF;CACF;AACF;;;;;;;;;;;;;;;;;;;;;AAuCD,IAAa,oBAAb,cAAuC,cAA4C;CACjF,OAAO,UAAU;AACf,SAAO;CACR;CAED,kBAAkB;CAElB;CAEA,IAAI;CAEJ;CAEA,kBAAkB;CAElB,YAAYM,QAAuB;EACjC,MAAM,OAAO;EACb,MAAM,EAAE,WAAW,OAAO,iBAAiB,GAAG;EAC9C,KAAK,YAAY;EACjB,KAAK,QAAQ;EACb,KAAK,kBAAkB,mBAAmB,KAAK;CAChD;CAED,oBAAoB;AAClB,SAAO,CAAE;CACV;CAED,WAAmB;AACjB,SAAO;CACR;CAED,MAAM,UACJD,WACAN,SACAC,YACqB;EACrB,MAAM,KAAK,mBAAmB;AAC9B,MAAI,SAAS,kBACX,OAAM,IAAI,MAAM,QAAQ;AAE1B,MAAI,KAAK,iBACP,MAAM,YAAY,kBAAkB,mBAAmB,EACrD,SAAS,KACV,EAAC;AAGJ,MAAI,SAAS,MAAM,OACjB,QAAO,EACL,aAAa,CAAC,KAAK,kBAAkB,QAAQ,KAAK,GAAG,AAAC,EACvD;OACI;GACL,MAAM,WAAW,KAAK,kBAAkB;GACxC,KAAK,oBAAoB;AAEzB,UAAO;IACL,aAAa,CAAC,KAAK,kBAAkB,SAAS,AAAC;IAC/C,WAAW,CAAE;GACd;EACF;CACF;CAED,kBAAkBO,MAAc;AAC9B,SAAO;GACL,SAAS,IAAI,UAAU;GACvB;EACD;CACF;CAED,OAAO,sBACLF,WACAG,SACAR,YACqC;EACrC,MAAM,WAAW,KAAK,kBAAkB;EACxC,KAAK,oBAAoB;AACzB,MAAI,KAAK,iBACP,MAAM,YAAY,kBAAkB,mBAAmB,EACrD,SAAS,KACV,EAAC;AAGJ,aAAW,MAAM,QAAQ,UAAU;GACjC,MAAM,KAAK,mBAAmB;AAC9B,OAAI,SAAS,kBACX,OAAM,IAAI,MAAM,QAAQ;GAE1B,MAAM,QAAQ,KAAK,qBAAqB,KAAK;GAC7C,MAAM;GAED,YAAY,kBAAkB,KAAK;EACzC;CACF;CAED,MAAM,oBAAoB;AACxB,MAAI,KAAK,UAAU,QACjB,MAAM,KAAK,QAAQ;CAEtB;CAED,MAAM,SAAS;AACb,SAAO,IAAI,QAAc,CAAC,YAAY;GACpC,WAAW,MAAM,SAAS,EAAE,KAAK,MAAM;EACxC;CACF;CAED,qBAAqBO,MAAmC;AACtD,SAAO,IAAI,oBAAoB;GAC7B,SAAS,IAAI,eAAe,EAAE,SAAS,KAAM;GAC7C;EACD;CACF;CAED,mBAAmB;AACjB,SAAO,KAAK,UAAU,KAAK;CAC5B;CAED,qBAAqB;AACnB,MAAI,KAAK,IAAI,KAAK,UAAU,SAAS,GACnC,KAAK,KAAK;OAEV,KAAK,IAAI;CAEZ;CA0BD,qBAIEE,SAKAC,SAMI;AACJ,SAAO,eAAe,KAAK,OAAO,UAAU;GAC1C,MAAM,UAAU,MAAM,KAAK,OAAO,MAAM;AACxC,OAAI,QAAQ,aAAa,IAAI,KAC3B,QAAO,QAAQ,WAAW,GAAG;AAE/B,OAAI,OAAO,QAAQ,YAAY,SAC7B,QAAO,KAAK,MAAM,QAAQ,QAAQ;AAEpC,SAAM,IAAI,MAAM;EACjB,EAAC;CACH;AACF"}
|