@librechat/agents 2.4.43 → 2.4.45

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,328 @@
1
+ import { ChatGoogle } from '@langchain/google-gauth';
2
+ import { ChatConnection } from '@langchain/google-common';
3
+
4
+ class CustomChatConnection extends ChatConnection {
5
+ async formatData(input, parameters) {
6
+ const formattedData = (await super.formatData(input, parameters));
7
+ if (formattedData.generationConfig?.thinkingConfig?.thinkingBudget === -1 &&
8
+ formattedData.generationConfig.thinkingConfig.includeThoughts === false) {
9
+ formattedData.generationConfig.thinkingConfig.includeThoughts = true;
10
+ }
11
+ return formattedData;
12
+ }
13
+ }
14
+ /**
15
+ * Integration with Google Vertex AI chat models.
16
+ *
17
+ * Setup:
18
+ * Install `@langchain/google-vertexai` and set your stringified
19
+ * Vertex AI credentials as an environment variable named `GOOGLE_APPLICATION_CREDENTIALS`.
20
+ *
21
+ * ```bash
22
+ * npm install @langchain/google-vertexai
23
+ * export GOOGLE_APPLICATION_CREDENTIALS="path/to/credentials"
24
+ * ```
25
+ *
26
+ * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_google_vertexai.index.ChatVertexAI.html#constructor.new_ChatVertexAI)
27
+ *
28
+ * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_common_types.GoogleAIBaseLanguageModelCallOptions.html)
29
+ *
30
+ * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
31
+ * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:
32
+ *
33
+ * ```typescript
34
+ * // When calling `.withConfig`, call options should be passed via the first argument
35
+ * const llmWithArgsBound = llm.withConfig({
36
+ * stop: ["\n"],
37
+ * tools: [...],
38
+ * });
39
+ *
40
+ * // When calling `.bindTools`, call options should be passed via the second argument
41
+ * const llmWithTools = llm.bindTools(
42
+ * [...],
43
+ * {
44
+ * tool_choice: "auto",
45
+ * }
46
+ * );
47
+ * ```
48
+ *
49
+ * ## Examples
50
+ *
51
+ * <details open>
52
+ * <summary><strong>Instantiate</strong></summary>
53
+ *
54
+ * ```typescript
55
+ * import { ChatVertexAI } from '@langchain/google-vertexai';
56
+ *
57
+ * const llm = new ChatVertexAI({
58
+ * model: "gemini-1.5-pro",
59
+ * temperature: 0,
60
+ * // other params...
61
+ * });
62
+ * ```
63
+ * </details>
64
+ *
65
+ * <br />
66
+ *
67
+ * <details>
68
+ * <summary><strong>Invoking</strong></summary>
69
+ *
70
+ * ```typescript
71
+ * const input = `Translate "I love programming" into French.`;
72
+ *
73
+ * // Models also accept a list of chat messages or a formatted prompt
74
+ * const result = await llm.invoke(input);
75
+ * console.log(result);
76
+ * ```
77
+ *
78
+ * ```txt
79
+ * AIMessageChunk {
80
+ * "content": "\"J'adore programmer\" \n\nHere's why this is the best translation:\n\n* **J'adore** means \"I love\" and conveys a strong passion.\n* **Programmer** is the French verb for \"to program.\"\n\nThis translation is natural and idiomatic in French. \n",
81
+ * "additional_kwargs": {},
82
+ * "response_metadata": {},
83
+ * "tool_calls": [],
84
+ * "tool_call_chunks": [],
85
+ * "invalid_tool_calls": [],
86
+ * "usage_metadata": {
87
+ * "input_tokens": 9,
88
+ * "output_tokens": 63,
89
+ * "total_tokens": 72
90
+ * }
91
+ * }
92
+ * ```
93
+ * </details>
94
+ *
95
+ * <br />
96
+ *
97
+ * <details>
98
+ * <summary><strong>Streaming Chunks</strong></summary>
99
+ *
100
+ * ```typescript
101
+ * for await (const chunk of await llm.stream(input)) {
102
+ * console.log(chunk);
103
+ * }
104
+ * ```
105
+ *
106
+ * ```txt
107
+ * AIMessageChunk {
108
+ * "content": "\"",
109
+ * "additional_kwargs": {},
110
+ * "response_metadata": {},
111
+ * "tool_calls": [],
112
+ * "tool_call_chunks": [],
113
+ * "invalid_tool_calls": []
114
+ * }
115
+ * AIMessageChunk {
116
+ * "content": "J'adore programmer\" \n",
117
+ * "additional_kwargs": {},
118
+ * "response_metadata": {},
119
+ * "tool_calls": [],
120
+ * "tool_call_chunks": [],
121
+ * "invalid_tool_calls": []
122
+ * }
123
+ * AIMessageChunk {
124
+ * "content": "",
125
+ * "additional_kwargs": {},
126
+ * "response_metadata": {},
127
+ * "tool_calls": [],
128
+ * "tool_call_chunks": [],
129
+ * "invalid_tool_calls": []
130
+ * }
131
+ * AIMessageChunk {
132
+ * "content": "",
133
+ * "additional_kwargs": {},
134
+ * "response_metadata": {
135
+ * "finishReason": "stop"
136
+ * },
137
+ * "tool_calls": [],
138
+ * "tool_call_chunks": [],
139
+ * "invalid_tool_calls": [],
140
+ * "usage_metadata": {
141
+ * "input_tokens": 9,
142
+ * "output_tokens": 8,
143
+ * "total_tokens": 17
144
+ * }
145
+ * }
146
+ * ```
147
+ * </details>
148
+ *
149
+ * <br />
150
+ *
151
+ * <details>
152
+ * <summary><strong>Aggregate Streamed Chunks</strong></summary>
153
+ *
154
+ * ```typescript
155
+ * import { AIMessageChunk } from '@langchain/core/messages';
156
+ * import { concat } from '@langchain/core/utils/stream';
157
+ *
158
+ * const stream = await llm.stream(input);
159
+ * let full: AIMessageChunk | undefined;
160
+ * for await (const chunk of stream) {
161
+ * full = !full ? chunk : concat(full, chunk);
162
+ * }
163
+ * console.log(full);
164
+ * ```
165
+ *
166
+ * ```txt
167
+ * AIMessageChunk {
168
+ * "content": "\"J'adore programmer\" \n",
169
+ * "additional_kwargs": {},
170
+ * "response_metadata": {
171
+ * "finishReason": "stop"
172
+ * },
173
+ * "tool_calls": [],
174
+ * "tool_call_chunks": [],
175
+ * "invalid_tool_calls": [],
176
+ * "usage_metadata": {
177
+ * "input_tokens": 9,
178
+ * "output_tokens": 8,
179
+ * "total_tokens": 17
180
+ * }
181
+ * }
182
+ * ```
183
+ * </details>
184
+ *
185
+ * <br />
186
+ *
187
+ * <details>
188
+ * <summary><strong>Bind tools</strong></summary>
189
+ *
190
+ * ```typescript
191
+ * import { z } from 'zod';
192
+ *
193
+ * const GetWeather = {
194
+ * name: "GetWeather",
195
+ * description: "Get the current weather in a given location",
196
+ * schema: z.object({
197
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
198
+ * }),
199
+ * }
200
+ *
201
+ * const GetPopulation = {
202
+ * name: "GetPopulation",
203
+ * description: "Get the current population in a given location",
204
+ * schema: z.object({
205
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
206
+ * }),
207
+ * }
208
+ *
209
+ * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
210
+ * const aiMsg = await llmWithTools.invoke(
211
+ * "Which city is hotter today and which is bigger: LA or NY?"
212
+ * );
213
+ * console.log(aiMsg.tool_calls);
214
+ * ```
215
+ *
216
+ * ```txt
217
+ * [
218
+ * {
219
+ * name: 'GetPopulation',
220
+ * args: { location: 'New York City, NY' },
221
+ * id: '33c1c1f47e2f492799c77d2800a43912',
222
+ * type: 'tool_call'
223
+ * }
224
+ * ]
225
+ * ```
226
+ * </details>
227
+ *
228
+ * <br />
229
+ *
230
+ * <details>
231
+ * <summary><strong>Structured Output</strong></summary>
232
+ *
233
+ * ```typescript
234
+ * import { z } from 'zod';
235
+ *
236
+ * const Joke = z.object({
237
+ * setup: z.string().describe("The setup of the joke"),
238
+ * punchline: z.string().describe("The punchline to the joke"),
239
+ * rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
240
+ * }).describe('Joke to tell user.');
241
+ *
242
+ * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
243
+ * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
244
+ * console.log(jokeResult);
245
+ * ```
246
+ *
247
+ * ```txt
248
+ * {
249
+ * setup: 'What do you call a cat that loves to bowl?',
250
+ * punchline: 'An alley cat!'
251
+ * }
252
+ * ```
253
+ * </details>
254
+ *
255
+ * <br />
256
+ *
257
+ * <details>
258
+ * <summary><strong>Usage Metadata</strong></summary>
259
+ *
260
+ * ```typescript
261
+ * const aiMsgForMetadata = await llm.invoke(input);
262
+ * console.log(aiMsgForMetadata.usage_metadata);
263
+ * ```
264
+ *
265
+ * ```txt
266
+ * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
267
+ * ```
268
+ * </details>
269
+ *
270
+ * <br />
271
+ *
272
+ * <details>
273
+ * <summary><strong>Stream Usage Metadata</strong></summary>
274
+ *
275
+ * ```typescript
276
+ * const streamForMetadata = await llm.stream(
277
+ * input,
278
+ * {
279
+ * streamUsage: true
280
+ * }
281
+ * );
282
+ * let fullForMetadata: AIMessageChunk | undefined;
283
+ * for await (const chunk of streamForMetadata) {
284
+ * fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);
285
+ * }
286
+ * console.log(fullForMetadata?.usage_metadata);
287
+ * ```
288
+ *
289
+ * ```txt
290
+ * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
291
+ * ```
292
+ * </details>
293
+ *
294
+ * <br />
295
+ */
296
+ class ChatVertexAI extends ChatGoogle {
297
+ lc_namespace = ['langchain', 'chat_models', 'vertexai'];
298
+ dynamicThinkingBudget = false;
299
+ static lc_name() {
300
+ return 'ChatVertexAI';
301
+ }
302
+ constructor(fields) {
303
+ let dynamicThinkingBudget = false;
304
+ if (fields?.thinkingBudget === -1) {
305
+ dynamicThinkingBudget = true;
306
+ fields.thinkingBudget = 1;
307
+ }
308
+ super({
309
+ ...fields,
310
+ platformType: 'gcp',
311
+ });
312
+ this.dynamicThinkingBudget = dynamicThinkingBudget;
313
+ }
314
+ invocationParams(options) {
315
+ const params = super.invocationParams(options);
316
+ if (this.dynamicThinkingBudget) {
317
+ params.maxReasoningTokens = -1;
318
+ }
319
+ return params;
320
+ }
321
+ buildConnection(fields, client) {
322
+ this.connection = new CustomChatConnection({ ...fields, ...this }, this.caller, client, false);
323
+ this.streamedConnection = new CustomChatConnection({ ...fields, ...this }, this.caller, client, true);
324
+ }
325
+ }
326
+
327
+ export { ChatVertexAI };
328
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.mjs","sources":["../../../../src/llm/vertexai/index.ts"],"sourcesContent":["import { ChatGoogle } from '@langchain/google-gauth';\nimport { ChatConnection } from '@langchain/google-common';\nimport type {\n GeminiRequest,\n GoogleAIModelRequestParams,\n GoogleAbstractedClient,\n} from '@langchain/google-common';\nimport type { BaseMessage } from '@langchain/core/messages';\nimport type { VertexAIClientOptions } from '@/types';\n\nclass CustomChatConnection extends ChatConnection<VertexAIClientOptions> {\n async formatData(\n input: BaseMessage[],\n parameters: GoogleAIModelRequestParams\n ): Promise<unknown> {\n const formattedData = (await super.formatData(\n input,\n parameters\n )) as GeminiRequest;\n if (\n formattedData.generationConfig?.thinkingConfig?.thinkingBudget === -1 &&\n formattedData.generationConfig.thinkingConfig.includeThoughts === false\n ) {\n formattedData.generationConfig.thinkingConfig.includeThoughts = true;\n }\n return formattedData;\n }\n}\n\n/**\n * Integration with Google Vertex AI chat models.\n *\n * Setup:\n * Install `@langchain/google-vertexai` and set your stringified\n * Vertex AI credentials as an environment variable named `GOOGLE_APPLICATION_CREDENTIALS`.\n *\n * ```bash\n * npm install @langchain/google-vertexai\n * export GOOGLE_APPLICATION_CREDENTIALS=\"path/to/credentials\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_google_vertexai.index.ChatVertexAI.html#constructor.new_ChatVertexAI)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_common_types.GoogleAIBaseLanguageModelCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatVertexAI } from '@langchain/google-vertexai';\n *\n * const llm = new ChatVertexAI({\n * model: \"gemini-1.5-pro\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\\\"J'adore programmer\\\" \\n\\nHere's why this is the best translation:\\n\\n* **J'adore** means \\\"I love\\\" and conveys a strong passion.\\n* **Programmer** is the French verb for \\\"to program.\\\"\\n\\nThis translation is natural and idiomatic in French. \\n\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": [],\n * \"usage_metadata\": {\n * \"input_tokens\": 9,\n * \"output_tokens\": 63,\n * \"total_tokens\": 72\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\\\"\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"J'adore programmer\\\" \\n\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": [],\n * \"usage_metadata\": {\n * \"input_tokens\": 9,\n * \"output_tokens\": 8,\n * \"total_tokens\": 17\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\\\"J'adore programmer\\\" \\n\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": [],\n * \"usage_metadata\": {\n * \"input_tokens\": 9,\n * \"output_tokens\": 8,\n * \"total_tokens\": 17\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York City, NY' },\n * id: '33c1c1f47e2f492799c77d2800a43912',\n * type: 'tool_call'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: 'What do you call a cat that loves to bowl?',\n * punchline: 'An alley cat!'\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Stream Usage Metadata</strong></summary>\n *\n * ```typescript\n * const streamForMetadata = await llm.stream(\n * input,\n * {\n * streamUsage: true\n * }\n * );\n * let fullForMetadata: AIMessageChunk | undefined;\n * for await (const chunk of streamForMetadata) {\n * fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);\n * }\n * console.log(fullForMetadata?.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatVertexAI extends ChatGoogle {\n lc_namespace = ['langchain', 'chat_models', 'vertexai'];\n dynamicThinkingBudget = false;\n\n static lc_name(): 'ChatVertexAI' {\n return 'ChatVertexAI';\n }\n\n constructor(fields?: VertexAIClientOptions) {\n let dynamicThinkingBudget = false;\n if (fields?.thinkingBudget === -1) {\n dynamicThinkingBudget = true;\n fields.thinkingBudget = 1;\n }\n super({\n ...fields,\n platformType: 'gcp',\n });\n this.dynamicThinkingBudget = dynamicThinkingBudget;\n }\n invocationParams(\n options?: this['ParsedCallOptions'] | undefined\n ): GoogleAIModelRequestParams {\n const params = super.invocationParams(options);\n if (this.dynamicThinkingBudget) {\n params.maxReasoningTokens = -1;\n }\n return params;\n }\n\n buildConnection(\n fields: VertexAIClientOptions,\n client: GoogleAbstractedClient\n ): void {\n this.connection = new CustomChatConnection(\n { ...fields, ...this },\n this.caller,\n client,\n false\n );\n\n this.streamedConnection = new CustomChatConnection(\n { ...fields, ...this },\n this.caller,\n client,\n true\n );\n }\n}\n"],"names":[],"mappings":";;;AAUA,MAAM,oBAAqB,SAAQ,cAAqC,CAAA;AACtE,IAAA,MAAM,UAAU,CACd,KAAoB,EACpB,UAAsC,EAAA;AAEtC,QAAA,MAAM,aAAa,IAAI,MAAM,KAAK,CAAC,UAAU,CAC3C,KAAK,EACL,UAAU,CACX,CAAkB;QACnB,IACE,aAAa,CAAC,gBAAgB,EAAE,cAAc,EAAE,cAAc,KAAK,EAAE;YACrE,aAAa,CAAC,gBAAgB,CAAC,cAAc,CAAC,eAAe,KAAK,KAAK,EACvE;YACA,aAAa,CAAC,gBAAgB,CAAC,cAAc,CAAC,eAAe,GAAG,IAAI;;AAEtE,QAAA,OAAO,aAAa;;AAEvB;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAyRG;AACG,MAAO,YAAa,SAAQ,UAAU,CAAA;IAC1C,YAAY,GAAG,CAAC,WAAW,EAAE,aAAa,EAAE,UAAU,CAAC;IACvD,qBAAqB,GAAG,KAAK;AAE7B,IAAA,OAAO,OAAO,GAAA;AACZ,QAAA,OAAO,cAAc;;AAGvB,IAAA,WAAA,CAAY,MAA8B,EAAA;QACxC,IAAI,qBAAqB,GAAG,KAAK;AACjC,QAAA,IAAI,MAAM,EAAE,cAAc,KAAK,EAAE,EAAE;YACjC,qBAAqB,GAAG,IAAI;AAC5B,YAAA,MAAM,CAAC,cAAc,GAAG,CAAC;;AAE3B,QAAA,KAAK,CAAC;AACJ,YAAA,GAAG,MAAM;AACT,YAAA,YAAY,EAAE,KAAK;AACpB,SAAA,CAAC;AACF,QAAA,IAAI,CAAC,qBAAqB,GAAG,qBAAqB;;AAEpD,IAAA,gBAAgB,CACd,OAA+C,EAAA;QAE/C,MAAM,MAAM,GAAG,KAAK,CAAC,gBAAgB,CAAC,OAAO,CAAC;AAC9C,QAAA,IAAI,IAAI,CAAC,qBAAqB,EAAE;AAC9B,YAAA,MAAM,CAAC,kBAAkB,GAAG,EAAE;;AAEhC,QAAA,OAAO,MAAM;;IAGf,eAAe,CACb,MAA6B,EAC7B,MAA8B,EAAA;QAE9B,IAAI,CAAC,UAAU,GAAG,IAAI,oBAAoB,CACxC,EAAE,GAAG,MAAM,EAAE,GAAG,IAAI,EAAE,EACtB,IAAI,CAAC,MAAM,EACX,MAAM,EACN,KAAK,CACN;QAED,IAAI,CAAC,kBAAkB,GAAG,IAAI,oBAAoB,CAChD,EAAE,GAAG,MAAM,EAAE,GAAG,IAAI,EAAE,EACtB,IAAI,CAAC,MAAM,EACX,MAAM,EACN,IAAI,CACL;;AAEJ;;;;"}
package/dist/esm/main.mjs CHANGED
@@ -1,4 +1,4 @@
1
- export { Run } from './run.mjs';
1
+ export { Run, defaultOmitOptions } from './run.mjs';
2
2
  export { ChatModelStreamHandler, createContentAggregator } from './stream.mjs';
3
3
  export { SEPARATORS, SplitStreamHandler } from './splitStream.mjs';
4
4
  export { HandlerRegistry, LLMStreamHandler, ModelEndHandler, TestChatStreamHandler, TestLLMStreamHandler, ToolEndHandler, createMetadataAggregator } from './events.mjs';
package/dist/esm/run.mjs CHANGED
@@ -12,6 +12,18 @@ import { HandlerRegistry } from './events.mjs';
12
12
  import { isOpenAILike } from './utils/llm.mjs';
13
13
 
14
14
  // src/run.ts
15
+ const defaultOmitOptions = new Set([
16
+ 'stream',
17
+ 'thinking',
18
+ 'streaming',
19
+ 'maxTokens',
20
+ 'clientOptions',
21
+ 'thinkingConfig',
22
+ 'thinkingBudget',
23
+ 'includeThoughts',
24
+ 'maxOutputTokens',
25
+ 'additionalModelRequestFields',
26
+ ]);
15
27
  class Run {
16
28
  graphRunnable;
17
29
  // private collab!: CollabGraph;
@@ -158,7 +170,7 @@ class Run {
158
170
  [Callback.TOOL_END]: this.createSystemCallback(clientCallbacks, Callback.TOOL_END),
159
171
  };
160
172
  }
161
- async generateTitle({ provider, inputText, contentParts, titlePrompt, clientOptions, chainOptions, skipLanguage, }) {
173
+ async generateTitle({ provider, inputText, contentParts, titlePrompt, clientOptions, chainOptions, skipLanguage, omitOptions = defaultOmitOptions, }) {
162
174
  const convoTemplate = PromptTemplate.fromTemplate('User: {input}\nAI: {output}');
163
175
  const response = contentParts
164
176
  .map((part) => {
@@ -170,16 +182,8 @@ class Run {
170
182
  const convo = (await convoTemplate.invoke({ input: inputText, output: response })).value;
171
183
  const model = this.Graph?.getNewModel({
172
184
  provider,
185
+ omitOptions,
173
186
  clientOptions,
174
- omitOptions: new Set([
175
- 'clientOptions',
176
- 'streaming',
177
- 'stream',
178
- 'thinking',
179
- 'maxTokens',
180
- 'maxOutputTokens',
181
- 'additionalModelRequestFields',
182
- ]),
183
187
  });
184
188
  if (!model) {
185
189
  return { language: '', title: '' };
@@ -200,5 +204,5 @@ class Run {
200
204
  }
201
205
  }
202
206
 
203
- export { Run };
207
+ export { Run, defaultOmitOptions };
204
208
  //# sourceMappingURL=run.mjs.map
@@ -1 +1 @@
1
- {"version":3,"file":"run.mjs","sources":["../../src/run.ts"],"sourcesContent":["// src/run.ts\nimport { zodToJsonSchema } from 'zod-to-json-schema';\nimport { PromptTemplate } from '@langchain/core/prompts';\nimport { AzureChatOpenAI, ChatOpenAI } from '@langchain/openai';\nimport { SystemMessage } from '@langchain/core/messages';\nimport type {\n BaseMessage,\n MessageContentComplex,\n} from '@langchain/core/messages';\nimport type { ClientCallbacks, SystemCallbacks } from '@/graphs/Graph';\nimport type { RunnableConfig } from '@langchain/core/runnables';\nimport type * as t from '@/types';\nimport { GraphEvents, Providers, Callback } from '@/common';\nimport { manualToolStreamProviders } from '@/llm/providers';\nimport { shiftIndexTokenCountMap } from '@/messages/format';\nimport { createTitleRunnable } from '@/utils/title';\nimport { createTokenCounter } from '@/utils/tokens';\nimport { StandardGraph } from '@/graphs/Graph';\nimport { HandlerRegistry } from '@/events';\nimport { isOpenAILike } from '@/utils/llm';\n\nexport class Run<T extends t.BaseGraphState> {\n graphRunnable?: t.CompiledWorkflow<T, Partial<T>, string>;\n // private collab!: CollabGraph;\n // private taskManager!: TaskManager;\n private handlerRegistry: HandlerRegistry;\n id: string;\n Graph: StandardGraph | undefined;\n provider: Providers | undefined;\n returnContent: boolean = false;\n\n private constructor(config: Partial<t.RunConfig>) {\n const runId = config.runId ?? '';\n if (!runId) {\n throw new Error('Run ID not provided');\n }\n\n this.id = runId;\n\n const handlerRegistry = new HandlerRegistry();\n\n if (config.customHandlers) {\n for (const [eventType, handler] of Object.entries(\n config.customHandlers\n )) {\n handlerRegistry.register(eventType, handler);\n }\n }\n\n this.handlerRegistry = handlerRegistry;\n\n if (!config.graphConfig) {\n throw new Error('Graph config not provided');\n }\n\n if (config.graphConfig.type === 'standard' || !config.graphConfig.type) {\n this.provider = config.graphConfig.llmConfig.provider;\n this.graphRunnable = this.createStandardGraph(\n config.graphConfig\n ) as unknown as t.CompiledWorkflow<T, Partial<T>, string>;\n if (this.Graph) {\n this.Graph.handlerRegistry = handlerRegistry;\n }\n }\n\n this.returnContent = config.returnContent ?? false;\n }\n\n private createStandardGraph(\n config: t.StandardGraphConfig\n ): t.CompiledWorkflow<t.IState, Partial<t.IState>, string> {\n const { llmConfig, tools = [], ...graphInput } = config;\n const { provider, ...clientOptions } = llmConfig;\n\n const standardGraph = new StandardGraph({\n tools,\n provider,\n clientOptions,\n ...graphInput,\n runId: this.id,\n });\n this.Graph = standardGraph;\n return standardGraph.createWorkflow();\n }\n\n static async create<T extends t.BaseGraphState>(\n config: t.RunConfig\n ): Promise<Run<T>> {\n return new Run<T>(config);\n }\n\n getRunMessages(): BaseMessage[] | undefined {\n if (!this.Graph) {\n throw new Error(\n 'Graph not initialized. Make sure to use Run.create() to instantiate the Run.'\n );\n }\n return this.Graph.getRunMessages();\n }\n\n async processStream(\n inputs: t.IState,\n config: Partial<RunnableConfig> & { version: 'v1' | 'v2'; run_id?: string },\n streamOptions?: t.EventStreamOptions\n ): Promise<MessageContentComplex[] | undefined> {\n if (!this.graphRunnable) {\n throw new Error(\n 'Run not initialized. Make sure to use Run.create() to instantiate the Run.'\n );\n }\n if (!this.Graph) {\n throw new Error(\n 'Graph not initialized. Make sure to use Run.create() to instantiate the Run.'\n );\n }\n\n this.Graph.resetValues(streamOptions?.keepContent);\n const provider = this.Graph.provider;\n const hasTools = this.Graph.tools ? this.Graph.tools.length > 0 : false;\n if (streamOptions?.callbacks) {\n /* TODO: conflicts with callback manager */\n const callbacks = (config.callbacks as t.ProvidedCallbacks) ?? [];\n config.callbacks = callbacks.concat(\n this.getCallbacks(streamOptions.callbacks)\n );\n }\n\n if (!this.id) {\n throw new Error('Run ID not provided');\n }\n\n const tokenCounter =\n streamOptions?.tokenCounter ??\n (streamOptions?.indexTokenCountMap\n ? await createTokenCounter()\n : undefined);\n const toolTokens = tokenCounter\n ? (this.Graph.tools?.reduce((acc, tool) => {\n if (!(tool as Partial<t.GenericTool>).schema) {\n return acc;\n }\n\n const jsonSchema = zodToJsonSchema(\n (tool.schema as t.ZodObjectAny).describe(tool.description ?? ''),\n tool.name\n );\n return (\n acc + tokenCounter(new SystemMessage(JSON.stringify(jsonSchema)))\n );\n }, 0) ?? 0)\n : 0;\n let instructionTokens = toolTokens;\n if (this.Graph.systemMessage && tokenCounter) {\n instructionTokens += tokenCounter(this.Graph.systemMessage);\n }\n const tokenMap = streamOptions?.indexTokenCountMap ?? {};\n if (this.Graph.systemMessage && instructionTokens > 0) {\n this.Graph.indexTokenCountMap = shiftIndexTokenCountMap(\n tokenMap,\n instructionTokens\n );\n } else if (instructionTokens > 0) {\n tokenMap[0] = tokenMap[0] + instructionTokens;\n this.Graph.indexTokenCountMap = tokenMap;\n } else {\n this.Graph.indexTokenCountMap = tokenMap;\n }\n\n this.Graph.maxContextTokens = streamOptions?.maxContextTokens;\n this.Graph.tokenCounter = tokenCounter;\n\n config.run_id = this.id;\n config.configurable = Object.assign(config.configurable ?? {}, {\n run_id: this.id,\n provider: this.provider,\n });\n\n const stream = this.graphRunnable.streamEvents(inputs, config);\n\n for await (const event of stream) {\n const { data, name, metadata, ...info } = event;\n\n let eventName: t.EventName = info.event;\n if (\n hasTools &&\n manualToolStreamProviders.has(provider) &&\n eventName === GraphEvents.CHAT_MODEL_STREAM\n ) {\n /* Skipping CHAT_MODEL_STREAM event due to double-call edge case */\n continue;\n }\n\n if (eventName && eventName === GraphEvents.ON_CUSTOM_EVENT) {\n eventName = name;\n }\n\n const handler = this.handlerRegistry.getHandler(eventName);\n if (handler) {\n handler.handle(eventName, data, metadata, this.Graph);\n }\n }\n\n if (this.returnContent) {\n return this.Graph.getContentParts();\n }\n }\n\n private createSystemCallback<K extends keyof ClientCallbacks>(\n clientCallbacks: ClientCallbacks,\n key: K\n ): SystemCallbacks[K] {\n return ((...args: unknown[]) => {\n const clientCallback = clientCallbacks[key];\n if (clientCallback && this.Graph) {\n (clientCallback as (...args: unknown[]) => void)(this.Graph, ...args);\n }\n }) as SystemCallbacks[K];\n }\n\n getCallbacks(clientCallbacks: ClientCallbacks): SystemCallbacks {\n return {\n [Callback.TOOL_ERROR]: this.createSystemCallback(\n clientCallbacks,\n Callback.TOOL_ERROR\n ),\n [Callback.TOOL_START]: this.createSystemCallback(\n clientCallbacks,\n Callback.TOOL_START\n ),\n [Callback.TOOL_END]: this.createSystemCallback(\n clientCallbacks,\n Callback.TOOL_END\n ),\n };\n }\n\n async generateTitle({\n provider,\n inputText,\n contentParts,\n titlePrompt,\n clientOptions,\n chainOptions,\n skipLanguage,\n }: t.RunTitleOptions): Promise<{ language: string; title: string }> {\n const convoTemplate = PromptTemplate.fromTemplate(\n 'User: {input}\\nAI: {output}'\n );\n const response = contentParts\n .map((part) => {\n if (part?.type === 'text') return part.text;\n return '';\n })\n .join('\\n');\n const convo = (\n await convoTemplate.invoke({ input: inputText, output: response })\n ).value;\n const model = this.Graph?.getNewModel({\n provider,\n clientOptions,\n omitOptions: new Set([\n 'clientOptions',\n 'streaming',\n 'stream',\n 'thinking',\n 'maxTokens',\n 'maxOutputTokens',\n 'additionalModelRequestFields',\n ]),\n });\n if (!model) {\n return { language: '', title: '' };\n }\n if (\n isOpenAILike(provider) &&\n (model instanceof ChatOpenAI || model instanceof AzureChatOpenAI)\n ) {\n model.temperature = (clientOptions as t.OpenAIClientOptions | undefined)\n ?.temperature as number;\n model.topP = (clientOptions as t.OpenAIClientOptions | undefined)\n ?.topP as number;\n model.frequencyPenalty = (\n clientOptions as t.OpenAIClientOptions | undefined\n )?.frequencyPenalty as number;\n model.presencePenalty = (\n clientOptions as t.OpenAIClientOptions | undefined\n )?.presencePenalty as number;\n model.n = (clientOptions as t.OpenAIClientOptions | undefined)\n ?.n as number;\n }\n const chain = await createTitleRunnable(model, titlePrompt);\n return (await chain.invoke(\n { convo, inputText, skipLanguage },\n chainOptions\n )) as { language: string; title: string };\n }\n}\n"],"names":["ChatOpenAI","AzureChatOpenAI"],"mappings":";;;;;;;;;;;;;AAAA;MAqBa,GAAG,CAAA;AACd,IAAA,aAAa;;;AAGL,IAAA,eAAe;AACvB,IAAA,EAAE;AACF,IAAA,KAAK;AACL,IAAA,QAAQ;IACR,aAAa,GAAY,KAAK;AAE9B,IAAA,WAAA,CAAoB,MAA4B,EAAA;AAC9C,QAAA,MAAM,KAAK,GAAG,MAAM,CAAC,KAAK,IAAI,EAAE;QAChC,IAAI,CAAC,KAAK,EAAE;AACV,YAAA,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC;;AAGxC,QAAA,IAAI,CAAC,EAAE,GAAG,KAAK;AAEf,QAAA,MAAM,eAAe,GAAG,IAAI,eAAe,EAAE;AAE7C,QAAA,IAAI,MAAM,CAAC,cAAc,EAAE;AACzB,YAAA,KAAK,MAAM,CAAC,SAAS,EAAE,OAAO,CAAC,IAAI,MAAM,CAAC,OAAO,CAC/C,MAAM,CAAC,cAAc,CACtB,EAAE;AACD,gBAAA,eAAe,CAAC,QAAQ,CAAC,SAAS,EAAE,OAAO,CAAC;;;AAIhD,QAAA,IAAI,CAAC,eAAe,GAAG,eAAe;AAEtC,QAAA,IAAI,CAAC,MAAM,CAAC,WAAW,EAAE;AACvB,YAAA,MAAM,IAAI,KAAK,CAAC,2BAA2B,CAAC;;AAG9C,QAAA,IAAI,MAAM,CAAC,WAAW,CAAC,IAAI,KAAK,UAAU,IAAI,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE;YACtE,IAAI,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,CAAC,SAAS,CAAC,QAAQ;YACrD,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,mBAAmB,CAC3C,MAAM,CAAC,WAAW,CACqC;AACzD,YAAA,IAAI,IAAI,CAAC,KAAK,EAAE;AACd,gBAAA,IAAI,CAAC,KAAK,CAAC,eAAe,GAAG,eAAe;;;QAIhD,IAAI,CAAC,aAAa,GAAG,MAAM,CAAC,aAAa,IAAI,KAAK;;AAG5C,IAAA,mBAAmB,CACzB,MAA6B,EAAA;AAE7B,QAAA,MAAM,EAAE,SAAS,EAAE,KAAK,GAAG,EAAE,EAAE,GAAG,UAAU,EAAE,GAAG,MAAM;QACvD,MAAM,EAAE,QAAQ,EAAE,GAAG,aAAa,EAAE,GAAG,SAAS;AAEhD,QAAA,MAAM,aAAa,GAAG,IAAI,aAAa,CAAC;YACtC,KAAK;YACL,QAAQ;YACR,aAAa;AACb,YAAA,GAAG,UAAU;YACb,KAAK,EAAE,IAAI,CAAC,EAAE;AACf,SAAA,CAAC;AACF,QAAA,IAAI,CAAC,KAAK,GAAG,aAAa;AAC1B,QAAA,OAAO,aAAa,CAAC,cAAc,EAAE;;AAGvC,IAAA,aAAa,MAAM,CACjB,MAAmB,EAAA;AAEnB,QAAA,OAAO,IAAI,GAAG,CAAI,MAAM,CAAC;;IAG3B,cAAc,GAAA;AACZ,QAAA,IAAI,CAAC,IAAI,CAAC,KAAK,EAAE;AACf,YAAA,MAAM,IAAI,KAAK,CACb,8EAA8E,CAC/E;;AAEH,QAAA,OAAO,IAAI,CAAC,KAAK,CAAC,cAAc,EAAE;;AAGpC,IAAA,MAAM,aAAa,CACjB,MAAgB,EAChB,MAA2E,EAC3E,aAAoC,EAAA;AAEpC,QAAA,IAAI,CAAC,IAAI,CAAC,aAAa,EAAE;AACvB,YAAA,MAAM,IAAI,KAAK,CACb,4EAA4E,CAC7E;;AAEH,QAAA,IAAI,CAAC,IAAI,CAAC,KAAK,EAAE;AACf,YAAA,MAAM,IAAI,KAAK,CACb,8EAA8E,CAC/E;;QAGH,IAAI,CAAC,KAAK,CAAC,WAAW,CAAC,aAAa,EAAE,WAAW,CAAC;AAClD,QAAA,MAAM,QAAQ,GAAG,IAAI,CAAC,KAAK,CAAC,QAAQ;QACpC,MAAM,QAAQ,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,MAAM,GAAG,CAAC,GAAG,KAAK;AACvE,QAAA,IAAI,aAAa,EAAE,SAAS,EAAE;;AAE5B,YAAA,MAAM,SAAS,GAAI,MAAM,CAAC,SAAiC,IAAI,EAAE;AACjE,YAAA,MAAM,CAAC,SAAS,GAAG,SAAS,CAAC,MAAM,CACjC,IAAI,CAAC,YAAY,CAAC,aAAa,CAAC,SAAS,CAAC,CAC3C;;AAGH,QAAA,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE;AACZ,YAAA,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC;;AAGxC,QAAA,MAAM,YAAY,GAChB,aAAa,EAAE,YAAY;aAC1B,aAAa,EAAE;kBACZ,MAAM,kBAAkB;kBACxB,SAAS,CAAC;QAChB,MAAM,UAAU,GAAG;AACjB,eAAG,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,GAAG,EAAE,IAAI,KAAI;AACxC,gBAAA,IAAI,CAAE,IAA+B,CAAC,MAAM,EAAE;AAC5C,oBAAA,OAAO,GAAG;;gBAGZ,MAAM,UAAU,GAAG,eAAe,CAC/B,IAAI,CAAC,MAAyB,CAAC,QAAQ,CAAC,IAAI,CAAC,WAAW,IAAI,EAAE,CAAC,EAChE,IAAI,CAAC,IAAI,CACV;AACD,gBAAA,QACE,GAAG,GAAG,YAAY,CAAC,IAAI,aAAa,CAAC,IAAI,CAAC,SAAS,CAAC,UAAU,CAAC,CAAC,CAAC;AAErE,aAAC,EAAE,CAAC,CAAC,IAAI,CAAC;cACR,CAAC;QACL,IAAI,iBAAiB,GAAG,UAAU;QAClC,IAAI,IAAI,CAAC,KAAK,CAAC,aAAa,IAAI,YAAY,EAAE;YAC5C,iBAAiB,IAAI,YAAY,CAAC,IAAI,CAAC,KAAK,CAAC,aAAa,CAAC;;AAE7D,QAAA,MAAM,QAAQ,GAAG,aAAa,EAAE,kBAAkB,IAAI,EAAE;QACxD,IAAI,IAAI,CAAC,KAAK,CAAC,aAAa,IAAI,iBAAiB,GAAG,CAAC,EAAE;YACrD,IAAI,CAAC,KAAK,CAAC,kBAAkB,GAAG,uBAAuB,CACrD,QAAQ,EACR,iBAAiB,CAClB;;AACI,aAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;YAChC,QAAQ,CAAC,CAAC,CAAC,GAAG,QAAQ,CAAC,CAAC,CAAC,GAAG,iBAAiB;AAC7C,YAAA,IAAI,CAAC,KAAK,CAAC,kBAAkB,GAAG,QAAQ;;aACnC;AACL,YAAA,IAAI,CAAC,KAAK,CAAC,kBAAkB,GAAG,QAAQ;;QAG1C,IAAI,CAAC,KAAK,CAAC,gBAAgB,GAAG,aAAa,EAAE,gBAAgB;AAC7D,QAAA,IAAI,CAAC,KAAK,CAAC,YAAY,GAAG,YAAY;AAEtC,QAAA,MAAM,CAAC,MAAM,GAAG,IAAI,CAAC,EAAE;AACvB,QAAA,MAAM,CAAC,YAAY,GAAG,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,YAAY,IAAI,EAAE,EAAE;YAC7D,MAAM,EAAE,IAAI,CAAC,EAAE;YACf,QAAQ,EAAE,IAAI,CAAC,QAAQ;AACxB,SAAA,CAAC;AAEF,QAAA,MAAM,MAAM,GAAG,IAAI,CAAC,aAAa,CAAC,YAAY,CAAC,MAAM,EAAE,MAAM,CAAC;AAE9D,QAAA,WAAW,MAAM,KAAK,IAAI,MAAM,EAAE;AAChC,YAAA,MAAM,EAAE,IAAI,EAAE,IAAI,EAAE,QAAQ,EAAE,GAAG,IAAI,EAAE,GAAG,KAAK;AAE/C,YAAA,IAAI,SAAS,GAAgB,IAAI,CAAC,KAAK;AACvC,YAAA,IACE,QAAQ;AACR,gBAAA,yBAAyB,CAAC,GAAG,CAAC,QAAQ,CAAC;AACvC,gBAAA,SAAS,KAAK,WAAW,CAAC,iBAAiB,EAC3C;;gBAEA;;YAGF,IAAI,SAAS,IAAI,SAAS,KAAK,WAAW,CAAC,eAAe,EAAE;gBAC1D,SAAS,GAAG,IAAI;;YAGlB,MAAM,OAAO,GAAG,IAAI,CAAC,eAAe,CAAC,UAAU,CAAC,SAAS,CAAC;YAC1D,IAAI,OAAO,EAAE;AACX,gBAAA,OAAO,CAAC,MAAM,CAAC,SAAS,EAAE,IAAI,EAAE,QAAQ,EAAE,IAAI,CAAC,KAAK,CAAC;;;AAIzD,QAAA,IAAI,IAAI,CAAC,aAAa,EAAE;AACtB,YAAA,OAAO,IAAI,CAAC,KAAK,CAAC,eAAe,EAAE;;;IAI/B,oBAAoB,CAC1B,eAAgC,EAChC,GAAM,EAAA;AAEN,QAAA,QAAQ,CAAC,GAAG,IAAe,KAAI;AAC7B,YAAA,MAAM,cAAc,GAAG,eAAe,CAAC,GAAG,CAAC;AAC3C,YAAA,IAAI,cAAc,IAAI,IAAI,CAAC,KAAK,EAAE;gBAC/B,cAA+C,CAAC,IAAI,CAAC,KAAK,EAAE,GAAG,IAAI,CAAC;;AAEzE,SAAC;;AAGH,IAAA,YAAY,CAAC,eAAgC,EAAA;QAC3C,OAAO;AACL,YAAA,CAAC,QAAQ,CAAC,UAAU,GAAG,IAAI,CAAC,oBAAoB,CAC9C,eAAe,EACf,QAAQ,CAAC,UAAU,CACpB;AACD,YAAA,CAAC,QAAQ,CAAC,UAAU,GAAG,IAAI,CAAC,oBAAoB,CAC9C,eAAe,EACf,QAAQ,CAAC,UAAU,CACpB;AACD,YAAA,CAAC,QAAQ,CAAC,QAAQ,GAAG,IAAI,CAAC,oBAAoB,CAC5C,eAAe,EACf,QAAQ,CAAC,QAAQ,CAClB;SACF;;AAGH,IAAA,MAAM,aAAa,CAAC,EAClB,QAAQ,EACR,SAAS,EACT,YAAY,EACZ,WAAW,EACX,aAAa,EACb,YAAY,EACZ,YAAY,GACM,EAAA;QAClB,MAAM,aAAa,GAAG,cAAc,CAAC,YAAY,CAC/C,6BAA6B,CAC9B;QACD,MAAM,QAAQ,GAAG;AACd,aAAA,GAAG,CAAC,CAAC,IAAI,KAAI;AACZ,YAAA,IAAI,IAAI,EAAE,IAAI,KAAK,MAAM;gBAAE,OAAO,IAAI,CAAC,IAAI;AAC3C,YAAA,OAAO,EAAE;AACX,SAAC;aACA,IAAI,CAAC,IAAI,CAAC;QACb,MAAM,KAAK,GAAG,CACZ,MAAM,aAAa,CAAC,MAAM,CAAC,EAAE,KAAK,EAAE,SAAS,EAAE,MAAM,EAAE,QAAQ,EAAE,CAAC,EAClE,KAAK;AACP,QAAA,MAAM,KAAK,GAAG,IAAI,CAAC,KAAK,EAAE,WAAW,CAAC;YACpC,QAAQ;YACR,aAAa;YACb,WAAW,EAAE,IAAI,GAAG,CAAC;gBACnB,eAAe;gBACf,WAAW;gBACX,QAAQ;gBACR,UAAU;gBACV,WAAW;gBACX,iBAAiB;gBACjB,8BAA8B;aAC/B,CAAC;AACH,SAAA,CAAC;QACF,IAAI,CAAC,KAAK,EAAE;YACV,OAAO,EAAE,QAAQ,EAAE,EAAE,EAAE,KAAK,EAAE,EAAE,EAAE;;QAEpC,IACE,YAAY,CAAC,QAAQ,CAAC;aACrB,KAAK,YAAYA,YAAU,IAAI,KAAK,YAAYC,iBAAe,CAAC,EACjE;YACA,KAAK,CAAC,WAAW,GAAI;AACnB,kBAAE,WAAqB;YACzB,KAAK,CAAC,IAAI,GAAI;AACZ,kBAAE,IAAc;AAClB,YAAA,KAAK,CAAC,gBAAgB,GACpB,aACD,EAAE,gBAA0B;AAC7B,YAAA,KAAK,CAAC,eAAe,GACnB,aACD,EAAE,eAAyB;YAC5B,KAAK,CAAC,CAAC,GAAI;AACT,kBAAE,CAAW;;QAEjB,MAAM,KAAK,GAAG,MAAM,mBAAmB,CAAC,KAAK,EAAE,WAAW,CAAC;AAC3D,QAAA,QAAQ,MAAM,KAAK,CAAC,MAAM,CACxB,EAAE,KAAK,EAAE,SAAS,EAAE,YAAY,EAAE,EAClC,YAAY,CACb;;AAEJ;;;;"}
1
+ {"version":3,"file":"run.mjs","sources":["../../src/run.ts"],"sourcesContent":["// src/run.ts\nimport { zodToJsonSchema } from 'zod-to-json-schema';\nimport { PromptTemplate } from '@langchain/core/prompts';\nimport { AzureChatOpenAI, ChatOpenAI } from '@langchain/openai';\nimport { SystemMessage } from '@langchain/core/messages';\nimport type {\n BaseMessage,\n MessageContentComplex,\n} from '@langchain/core/messages';\nimport type { ClientCallbacks, SystemCallbacks } from '@/graphs/Graph';\nimport type { RunnableConfig } from '@langchain/core/runnables';\nimport type * as t from '@/types';\nimport { GraphEvents, Providers, Callback } from '@/common';\nimport { manualToolStreamProviders } from '@/llm/providers';\nimport { shiftIndexTokenCountMap } from '@/messages/format';\nimport { createTitleRunnable } from '@/utils/title';\nimport { createTokenCounter } from '@/utils/tokens';\nimport { StandardGraph } from '@/graphs/Graph';\nimport { HandlerRegistry } from '@/events';\nimport { isOpenAILike } from '@/utils/llm';\n\nexport const defaultOmitOptions = new Set([\n 'stream',\n 'thinking',\n 'streaming',\n 'maxTokens',\n 'clientOptions',\n 'thinkingConfig',\n 'thinkingBudget',\n 'includeThoughts',\n 'maxOutputTokens',\n 'additionalModelRequestFields',\n]);\n\nexport class Run<T extends t.BaseGraphState> {\n graphRunnable?: t.CompiledWorkflow<T, Partial<T>, string>;\n // private collab!: CollabGraph;\n // private taskManager!: TaskManager;\n private handlerRegistry: HandlerRegistry;\n id: string;\n Graph: StandardGraph | undefined;\n provider: Providers | undefined;\n returnContent: boolean = false;\n\n private constructor(config: Partial<t.RunConfig>) {\n const runId = config.runId ?? '';\n if (!runId) {\n throw new Error('Run ID not provided');\n }\n\n this.id = runId;\n\n const handlerRegistry = new HandlerRegistry();\n\n if (config.customHandlers) {\n for (const [eventType, handler] of Object.entries(\n config.customHandlers\n )) {\n handlerRegistry.register(eventType, handler);\n }\n }\n\n this.handlerRegistry = handlerRegistry;\n\n if (!config.graphConfig) {\n throw new Error('Graph config not provided');\n }\n\n if (config.graphConfig.type === 'standard' || !config.graphConfig.type) {\n this.provider = config.graphConfig.llmConfig.provider;\n this.graphRunnable = this.createStandardGraph(\n config.graphConfig\n ) as unknown as t.CompiledWorkflow<T, Partial<T>, string>;\n if (this.Graph) {\n this.Graph.handlerRegistry = handlerRegistry;\n }\n }\n\n this.returnContent = config.returnContent ?? false;\n }\n\n private createStandardGraph(\n config: t.StandardGraphConfig\n ): t.CompiledWorkflow<t.IState, Partial<t.IState>, string> {\n const { llmConfig, tools = [], ...graphInput } = config;\n const { provider, ...clientOptions } = llmConfig;\n\n const standardGraph = new StandardGraph({\n tools,\n provider,\n clientOptions,\n ...graphInput,\n runId: this.id,\n });\n this.Graph = standardGraph;\n return standardGraph.createWorkflow();\n }\n\n static async create<T extends t.BaseGraphState>(\n config: t.RunConfig\n ): Promise<Run<T>> {\n return new Run<T>(config);\n }\n\n getRunMessages(): BaseMessage[] | undefined {\n if (!this.Graph) {\n throw new Error(\n 'Graph not initialized. Make sure to use Run.create() to instantiate the Run.'\n );\n }\n return this.Graph.getRunMessages();\n }\n\n async processStream(\n inputs: t.IState,\n config: Partial<RunnableConfig> & { version: 'v1' | 'v2'; run_id?: string },\n streamOptions?: t.EventStreamOptions\n ): Promise<MessageContentComplex[] | undefined> {\n if (!this.graphRunnable) {\n throw new Error(\n 'Run not initialized. Make sure to use Run.create() to instantiate the Run.'\n );\n }\n if (!this.Graph) {\n throw new Error(\n 'Graph not initialized. Make sure to use Run.create() to instantiate the Run.'\n );\n }\n\n this.Graph.resetValues(streamOptions?.keepContent);\n const provider = this.Graph.provider;\n const hasTools = this.Graph.tools ? this.Graph.tools.length > 0 : false;\n if (streamOptions?.callbacks) {\n /* TODO: conflicts with callback manager */\n const callbacks = (config.callbacks as t.ProvidedCallbacks) ?? [];\n config.callbacks = callbacks.concat(\n this.getCallbacks(streamOptions.callbacks)\n );\n }\n\n if (!this.id) {\n throw new Error('Run ID not provided');\n }\n\n const tokenCounter =\n streamOptions?.tokenCounter ??\n (streamOptions?.indexTokenCountMap\n ? await createTokenCounter()\n : undefined);\n const toolTokens = tokenCounter\n ? (this.Graph.tools?.reduce((acc, tool) => {\n if (!(tool as Partial<t.GenericTool>).schema) {\n return acc;\n }\n\n const jsonSchema = zodToJsonSchema(\n (tool.schema as t.ZodObjectAny).describe(tool.description ?? ''),\n tool.name\n );\n return (\n acc + tokenCounter(new SystemMessage(JSON.stringify(jsonSchema)))\n );\n }, 0) ?? 0)\n : 0;\n let instructionTokens = toolTokens;\n if (this.Graph.systemMessage && tokenCounter) {\n instructionTokens += tokenCounter(this.Graph.systemMessage);\n }\n const tokenMap = streamOptions?.indexTokenCountMap ?? {};\n if (this.Graph.systemMessage && instructionTokens > 0) {\n this.Graph.indexTokenCountMap = shiftIndexTokenCountMap(\n tokenMap,\n instructionTokens\n );\n } else if (instructionTokens > 0) {\n tokenMap[0] = tokenMap[0] + instructionTokens;\n this.Graph.indexTokenCountMap = tokenMap;\n } else {\n this.Graph.indexTokenCountMap = tokenMap;\n }\n\n this.Graph.maxContextTokens = streamOptions?.maxContextTokens;\n this.Graph.tokenCounter = tokenCounter;\n\n config.run_id = this.id;\n config.configurable = Object.assign(config.configurable ?? {}, {\n run_id: this.id,\n provider: this.provider,\n });\n\n const stream = this.graphRunnable.streamEvents(inputs, config);\n\n for await (const event of stream) {\n const { data, name, metadata, ...info } = event;\n\n let eventName: t.EventName = info.event;\n if (\n hasTools &&\n manualToolStreamProviders.has(provider) &&\n eventName === GraphEvents.CHAT_MODEL_STREAM\n ) {\n /* Skipping CHAT_MODEL_STREAM event due to double-call edge case */\n continue;\n }\n\n if (eventName && eventName === GraphEvents.ON_CUSTOM_EVENT) {\n eventName = name;\n }\n\n const handler = this.handlerRegistry.getHandler(eventName);\n if (handler) {\n handler.handle(eventName, data, metadata, this.Graph);\n }\n }\n\n if (this.returnContent) {\n return this.Graph.getContentParts();\n }\n }\n\n private createSystemCallback<K extends keyof ClientCallbacks>(\n clientCallbacks: ClientCallbacks,\n key: K\n ): SystemCallbacks[K] {\n return ((...args: unknown[]) => {\n const clientCallback = clientCallbacks[key];\n if (clientCallback && this.Graph) {\n (clientCallback as (...args: unknown[]) => void)(this.Graph, ...args);\n }\n }) as SystemCallbacks[K];\n }\n\n getCallbacks(clientCallbacks: ClientCallbacks): SystemCallbacks {\n return {\n [Callback.TOOL_ERROR]: this.createSystemCallback(\n clientCallbacks,\n Callback.TOOL_ERROR\n ),\n [Callback.TOOL_START]: this.createSystemCallback(\n clientCallbacks,\n Callback.TOOL_START\n ),\n [Callback.TOOL_END]: this.createSystemCallback(\n clientCallbacks,\n Callback.TOOL_END\n ),\n };\n }\n\n async generateTitle({\n provider,\n inputText,\n contentParts,\n titlePrompt,\n clientOptions,\n chainOptions,\n skipLanguage,\n omitOptions = defaultOmitOptions,\n }: t.RunTitleOptions): Promise<{ language: string; title: string }> {\n const convoTemplate = PromptTemplate.fromTemplate(\n 'User: {input}\\nAI: {output}'\n );\n const response = contentParts\n .map((part) => {\n if (part?.type === 'text') return part.text;\n return '';\n })\n .join('\\n');\n const convo = (\n await convoTemplate.invoke({ input: inputText, output: response })\n ).value;\n const model = this.Graph?.getNewModel({\n provider,\n omitOptions,\n clientOptions,\n });\n if (!model) {\n return { language: '', title: '' };\n }\n if (\n isOpenAILike(provider) &&\n (model instanceof ChatOpenAI || model instanceof AzureChatOpenAI)\n ) {\n model.temperature = (clientOptions as t.OpenAIClientOptions | undefined)\n ?.temperature as number;\n model.topP = (clientOptions as t.OpenAIClientOptions | undefined)\n ?.topP as number;\n model.frequencyPenalty = (\n clientOptions as t.OpenAIClientOptions | undefined\n )?.frequencyPenalty as number;\n model.presencePenalty = (\n clientOptions as t.OpenAIClientOptions | undefined\n )?.presencePenalty as number;\n model.n = (clientOptions as t.OpenAIClientOptions | undefined)\n ?.n as number;\n }\n const chain = await createTitleRunnable(model, titlePrompt);\n return (await chain.invoke(\n { convo, inputText, skipLanguage },\n chainOptions\n )) as { language: string; title: string };\n }\n}\n"],"names":["ChatOpenAI","AzureChatOpenAI"],"mappings":";;;;;;;;;;;;;AAAA;AAqBa,MAAA,kBAAkB,GAAG,IAAI,GAAG,CAAC;IACxC,QAAQ;IACR,UAAU;IACV,WAAW;IACX,WAAW;IACX,eAAe;IACf,gBAAgB;IAChB,gBAAgB;IAChB,iBAAiB;IACjB,iBAAiB;IACjB,8BAA8B;AAC/B,CAAA;MAEY,GAAG,CAAA;AACd,IAAA,aAAa;;;AAGL,IAAA,eAAe;AACvB,IAAA,EAAE;AACF,IAAA,KAAK;AACL,IAAA,QAAQ;IACR,aAAa,GAAY,KAAK;AAE9B,IAAA,WAAA,CAAoB,MAA4B,EAAA;AAC9C,QAAA,MAAM,KAAK,GAAG,MAAM,CAAC,KAAK,IAAI,EAAE;QAChC,IAAI,CAAC,KAAK,EAAE;AACV,YAAA,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC;;AAGxC,QAAA,IAAI,CAAC,EAAE,GAAG,KAAK;AAEf,QAAA,MAAM,eAAe,GAAG,IAAI,eAAe,EAAE;AAE7C,QAAA,IAAI,MAAM,CAAC,cAAc,EAAE;AACzB,YAAA,KAAK,MAAM,CAAC,SAAS,EAAE,OAAO,CAAC,IAAI,MAAM,CAAC,OAAO,CAC/C,MAAM,CAAC,cAAc,CACtB,EAAE;AACD,gBAAA,eAAe,CAAC,QAAQ,CAAC,SAAS,EAAE,OAAO,CAAC;;;AAIhD,QAAA,IAAI,CAAC,eAAe,GAAG,eAAe;AAEtC,QAAA,IAAI,CAAC,MAAM,CAAC,WAAW,EAAE;AACvB,YAAA,MAAM,IAAI,KAAK,CAAC,2BAA2B,CAAC;;AAG9C,QAAA,IAAI,MAAM,CAAC,WAAW,CAAC,IAAI,KAAK,UAAU,IAAI,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE;YACtE,IAAI,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,CAAC,SAAS,CAAC,QAAQ;YACrD,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,mBAAmB,CAC3C,MAAM,CAAC,WAAW,CACqC;AACzD,YAAA,IAAI,IAAI,CAAC,KAAK,EAAE;AACd,gBAAA,IAAI,CAAC,KAAK,CAAC,eAAe,GAAG,eAAe;;;QAIhD,IAAI,CAAC,aAAa,GAAG,MAAM,CAAC,aAAa,IAAI,KAAK;;AAG5C,IAAA,mBAAmB,CACzB,MAA6B,EAAA;AAE7B,QAAA,MAAM,EAAE,SAAS,EAAE,KAAK,GAAG,EAAE,EAAE,GAAG,UAAU,EAAE,GAAG,MAAM;QACvD,MAAM,EAAE,QAAQ,EAAE,GAAG,aAAa,EAAE,GAAG,SAAS;AAEhD,QAAA,MAAM,aAAa,GAAG,IAAI,aAAa,CAAC;YACtC,KAAK;YACL,QAAQ;YACR,aAAa;AACb,YAAA,GAAG,UAAU;YACb,KAAK,EAAE,IAAI,CAAC,EAAE;AACf,SAAA,CAAC;AACF,QAAA,IAAI,CAAC,KAAK,GAAG,aAAa;AAC1B,QAAA,OAAO,aAAa,CAAC,cAAc,EAAE;;AAGvC,IAAA,aAAa,MAAM,CACjB,MAAmB,EAAA;AAEnB,QAAA,OAAO,IAAI,GAAG,CAAI,MAAM,CAAC;;IAG3B,cAAc,GAAA;AACZ,QAAA,IAAI,CAAC,IAAI,CAAC,KAAK,EAAE;AACf,YAAA,MAAM,IAAI,KAAK,CACb,8EAA8E,CAC/E;;AAEH,QAAA,OAAO,IAAI,CAAC,KAAK,CAAC,cAAc,EAAE;;AAGpC,IAAA,MAAM,aAAa,CACjB,MAAgB,EAChB,MAA2E,EAC3E,aAAoC,EAAA;AAEpC,QAAA,IAAI,CAAC,IAAI,CAAC,aAAa,EAAE;AACvB,YAAA,MAAM,IAAI,KAAK,CACb,4EAA4E,CAC7E;;AAEH,QAAA,IAAI,CAAC,IAAI,CAAC,KAAK,EAAE;AACf,YAAA,MAAM,IAAI,KAAK,CACb,8EAA8E,CAC/E;;QAGH,IAAI,CAAC,KAAK,CAAC,WAAW,CAAC,aAAa,EAAE,WAAW,CAAC;AAClD,QAAA,MAAM,QAAQ,GAAG,IAAI,CAAC,KAAK,CAAC,QAAQ;QACpC,MAAM,QAAQ,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,MAAM,GAAG,CAAC,GAAG,KAAK;AACvE,QAAA,IAAI,aAAa,EAAE,SAAS,EAAE;;AAE5B,YAAA,MAAM,SAAS,GAAI,MAAM,CAAC,SAAiC,IAAI,EAAE;AACjE,YAAA,MAAM,CAAC,SAAS,GAAG,SAAS,CAAC,MAAM,CACjC,IAAI,CAAC,YAAY,CAAC,aAAa,CAAC,SAAS,CAAC,CAC3C;;AAGH,QAAA,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE;AACZ,YAAA,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC;;AAGxC,QAAA,MAAM,YAAY,GAChB,aAAa,EAAE,YAAY;aAC1B,aAAa,EAAE;kBACZ,MAAM,kBAAkB;kBACxB,SAAS,CAAC;QAChB,MAAM,UAAU,GAAG;AACjB,eAAG,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,GAAG,EAAE,IAAI,KAAI;AACxC,gBAAA,IAAI,CAAE,IAA+B,CAAC,MAAM,EAAE;AAC5C,oBAAA,OAAO,GAAG;;gBAGZ,MAAM,UAAU,GAAG,eAAe,CAC/B,IAAI,CAAC,MAAyB,CAAC,QAAQ,CAAC,IAAI,CAAC,WAAW,IAAI,EAAE,CAAC,EAChE,IAAI,CAAC,IAAI,CACV;AACD,gBAAA,QACE,GAAG,GAAG,YAAY,CAAC,IAAI,aAAa,CAAC,IAAI,CAAC,SAAS,CAAC,UAAU,CAAC,CAAC,CAAC;AAErE,aAAC,EAAE,CAAC,CAAC,IAAI,CAAC;cACR,CAAC;QACL,IAAI,iBAAiB,GAAG,UAAU;QAClC,IAAI,IAAI,CAAC,KAAK,CAAC,aAAa,IAAI,YAAY,EAAE;YAC5C,iBAAiB,IAAI,YAAY,CAAC,IAAI,CAAC,KAAK,CAAC,aAAa,CAAC;;AAE7D,QAAA,MAAM,QAAQ,GAAG,aAAa,EAAE,kBAAkB,IAAI,EAAE;QACxD,IAAI,IAAI,CAAC,KAAK,CAAC,aAAa,IAAI,iBAAiB,GAAG,CAAC,EAAE;YACrD,IAAI,CAAC,KAAK,CAAC,kBAAkB,GAAG,uBAAuB,CACrD,QAAQ,EACR,iBAAiB,CAClB;;AACI,aAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;YAChC,QAAQ,CAAC,CAAC,CAAC,GAAG,QAAQ,CAAC,CAAC,CAAC,GAAG,iBAAiB;AAC7C,YAAA,IAAI,CAAC,KAAK,CAAC,kBAAkB,GAAG,QAAQ;;aACnC;AACL,YAAA,IAAI,CAAC,KAAK,CAAC,kBAAkB,GAAG,QAAQ;;QAG1C,IAAI,CAAC,KAAK,CAAC,gBAAgB,GAAG,aAAa,EAAE,gBAAgB;AAC7D,QAAA,IAAI,CAAC,KAAK,CAAC,YAAY,GAAG,YAAY;AAEtC,QAAA,MAAM,CAAC,MAAM,GAAG,IAAI,CAAC,EAAE;AACvB,QAAA,MAAM,CAAC,YAAY,GAAG,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,YAAY,IAAI,EAAE,EAAE;YAC7D,MAAM,EAAE,IAAI,CAAC,EAAE;YACf,QAAQ,EAAE,IAAI,CAAC,QAAQ;AACxB,SAAA,CAAC;AAEF,QAAA,MAAM,MAAM,GAAG,IAAI,CAAC,aAAa,CAAC,YAAY,CAAC,MAAM,EAAE,MAAM,CAAC;AAE9D,QAAA,WAAW,MAAM,KAAK,IAAI,MAAM,EAAE;AAChC,YAAA,MAAM,EAAE,IAAI,EAAE,IAAI,EAAE,QAAQ,EAAE,GAAG,IAAI,EAAE,GAAG,KAAK;AAE/C,YAAA,IAAI,SAAS,GAAgB,IAAI,CAAC,KAAK;AACvC,YAAA,IACE,QAAQ;AACR,gBAAA,yBAAyB,CAAC,GAAG,CAAC,QAAQ,CAAC;AACvC,gBAAA,SAAS,KAAK,WAAW,CAAC,iBAAiB,EAC3C;;gBAEA;;YAGF,IAAI,SAAS,IAAI,SAAS,KAAK,WAAW,CAAC,eAAe,EAAE;gBAC1D,SAAS,GAAG,IAAI;;YAGlB,MAAM,OAAO,GAAG,IAAI,CAAC,eAAe,CAAC,UAAU,CAAC,SAAS,CAAC;YAC1D,IAAI,OAAO,EAAE;AACX,gBAAA,OAAO,CAAC,MAAM,CAAC,SAAS,EAAE,IAAI,EAAE,QAAQ,EAAE,IAAI,CAAC,KAAK,CAAC;;;AAIzD,QAAA,IAAI,IAAI,CAAC,aAAa,EAAE;AACtB,YAAA,OAAO,IAAI,CAAC,KAAK,CAAC,eAAe,EAAE;;;IAI/B,oBAAoB,CAC1B,eAAgC,EAChC,GAAM,EAAA;AAEN,QAAA,QAAQ,CAAC,GAAG,IAAe,KAAI;AAC7B,YAAA,MAAM,cAAc,GAAG,eAAe,CAAC,GAAG,CAAC;AAC3C,YAAA,IAAI,cAAc,IAAI,IAAI,CAAC,KAAK,EAAE;gBAC/B,cAA+C,CAAC,IAAI,CAAC,KAAK,EAAE,GAAG,IAAI,CAAC;;AAEzE,SAAC;;AAGH,IAAA,YAAY,CAAC,eAAgC,EAAA;QAC3C,OAAO;AACL,YAAA,CAAC,QAAQ,CAAC,UAAU,GAAG,IAAI,CAAC,oBAAoB,CAC9C,eAAe,EACf,QAAQ,CAAC,UAAU,CACpB;AACD,YAAA,CAAC,QAAQ,CAAC,UAAU,GAAG,IAAI,CAAC,oBAAoB,CAC9C,eAAe,EACf,QAAQ,CAAC,UAAU,CACpB;AACD,YAAA,CAAC,QAAQ,CAAC,QAAQ,GAAG,IAAI,CAAC,oBAAoB,CAC5C,eAAe,EACf,QAAQ,CAAC,QAAQ,CAClB;SACF;;IAGH,MAAM,aAAa,CAAC,EAClB,QAAQ,EACR,SAAS,EACT,YAAY,EACZ,WAAW,EACX,aAAa,EACb,YAAY,EACZ,YAAY,EACZ,WAAW,GAAG,kBAAkB,GACd,EAAA;QAClB,MAAM,aAAa,GAAG,cAAc,CAAC,YAAY,CAC/C,6BAA6B,CAC9B;QACD,MAAM,QAAQ,GAAG;AACd,aAAA,GAAG,CAAC,CAAC,IAAI,KAAI;AACZ,YAAA,IAAI,IAAI,EAAE,IAAI,KAAK,MAAM;gBAAE,OAAO,IAAI,CAAC,IAAI;AAC3C,YAAA,OAAO,EAAE;AACX,SAAC;aACA,IAAI,CAAC,IAAI,CAAC;QACb,MAAM,KAAK,GAAG,CACZ,MAAM,aAAa,CAAC,MAAM,CAAC,EAAE,KAAK,EAAE,SAAS,EAAE,MAAM,EAAE,QAAQ,EAAE,CAAC,EAClE,KAAK;AACP,QAAA,MAAM,KAAK,GAAG,IAAI,CAAC,KAAK,EAAE,WAAW,CAAC;YACpC,QAAQ;YACR,WAAW;YACX,aAAa;AACd,SAAA,CAAC;QACF,IAAI,CAAC,KAAK,EAAE;YACV,OAAO,EAAE,QAAQ,EAAE,EAAE,EAAE,KAAK,EAAE,EAAE,EAAE;;QAEpC,IACE,YAAY,CAAC,QAAQ,CAAC;aACrB,KAAK,YAAYA,YAAU,IAAI,KAAK,YAAYC,iBAAe,CAAC,EACjE;YACA,KAAK,CAAC,WAAW,GAAI;AACnB,kBAAE,WAAqB;YACzB,KAAK,CAAC,IAAI,GAAI;AACZ,kBAAE,IAAc;AAClB,YAAA,KAAK,CAAC,gBAAgB,GACpB,aACD,EAAE,gBAA0B;AAC7B,YAAA,KAAK,CAAC,eAAe,GACnB,aACD,EAAE,eAAyB;YAC5B,KAAK,CAAC,CAAC,GAAI;AACT,kBAAE,CAAW;;QAEjB,MAAM,KAAK,GAAG,MAAM,mBAAmB,CAAC,KAAK,EAAE,WAAW,CAAC;AAC3D,QAAA,QAAQ,MAAM,KAAK,CAAC,MAAM,CACxB,EAAE,KAAK,EAAE,SAAS,EAAE,YAAY,EAAE,EAClC,YAAY,CACb;;AAEJ;;;;"}