@mcp-use/inspector 0.9.0-canary.4 → 0.9.0-canary.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client/assets/__vite-browser-external-DFygW7-s.js +1 -0
- package/dist/client/assets/browser-DHySQ_a8.js +408 -0
- package/dist/client/assets/display-A5IEINAP-Diuf_-in.js +17 -0
- package/dist/client/assets/embeddings-Duw58exF.js +1 -0
- package/dist/client/assets/index-8BCSxqXJ.js +20 -0
- package/dist/client/assets/index-BDY1EpHw.js +25 -0
- package/dist/client/assets/index-C9XlRV8D.js +152 -0
- package/dist/client/assets/index-CHOOqKAT.js +1626 -0
- package/dist/client/assets/index-Cf47sZoD.css +1 -0
- package/dist/client/assets/index-Crh4icJt.js +2 -0
- package/dist/client/assets/index-D7lutfXP.js +8 -0
- package/dist/client/assets/index-DLT0Msg0.js +7 -0
- package/dist/client/assets/index-DRz5BQNA.js +1 -0
- package/dist/client/assets/index-JiKw9JSs.js +5 -0
- package/dist/client/assets/index-YgPHcJHh.js +33 -0
- package/dist/client/assets/path-C9FudP8b.js +1 -0
- package/dist/client/assets/transport-wrapper-browser-BI4dDu3b.js +1 -0
- package/dist/client/assets/util-t-trnxQI.js +1 -0
- package/dist/client/assets/winston-B2ZRVknI.js +37 -0
- package/dist/client/index.html +3 -3
- package/package.json +2 -2
- package/dist/client/assets/__vite-browser-external-CHS79mP1.js +0 -8
- package/dist/client/assets/browser-CwknRi82.js +0 -17967
- package/dist/client/assets/display-A5IEINAP-DNDlZQ8k.js +0 -480
- package/dist/client/assets/embeddings-DSM58WFd.js +0 -26
- package/dist/client/assets/index-BMuvzxLw.js +0 -400
- package/dist/client/assets/index-BWKKRrN4.js +0 -43965
- package/dist/client/assets/index-Br7cLVs6.css +0 -5812
- package/dist/client/assets/index-BumNh2YP.js +0 -725
- package/dist/client/assets/index-D-pAR_RA.js +0 -26733
- package/dist/client/assets/index-DD0wWmuA.js +0 -9991
- package/dist/client/assets/index-DEA0KU_h.js +0 -105533
- package/dist/client/assets/index-DX0TIfSM.js +0 -102
- package/dist/client/assets/index-DctLeaKS.js +0 -2817
- package/dist/client/assets/index-DgPlosep.js +0 -1812
- package/dist/client/assets/index-DiEpOjl1.js +0 -5370
- package/dist/client/assets/path-QsnVvLoj.js +0 -62
- package/dist/client/assets/transport-wrapper-browser-ChPHVnHg.js +0 -165
- package/dist/client/assets/util-D59LNlyU.js +0 -23
- package/dist/client/assets/winston-B_texDHP.js +0 -12307
|
@@ -1,725 +0,0 @@
|
|
|
1
|
-
import { i as interopSafeParseAsync, O as OutputParserException, B as BaseCumulativeTransformOutputParser, a as isAIMessage, p as parsePartialJson, _ as __export, b as BaseLanguageModel, C as CallbackManager, G as GenerationChunk, c as callbackHandlerPrefersStreaming, d as concat, R as RUN_KEY, e as parseCallbackConfigArg, f as BaseLangChain, g as ensureConfig, m as mergeConfigs, h as _isToolCall, j as isInteropZodSchema, k as interopParseAsync, T as ToolInputParsingException, v as validate, l as _configHasToolCallId, n as isDirectToolOutput, o as ToolMessage, q as isStructuredToolParams, r as isStructuredTool, s as isRunnableToolLike, t as isLangChainTool, u as objectType, w as stringType, x as isSimpleStringZodSchema, y as validatesOnlyStrings, z as patchConfig, D as AsyncLocalStorageProviderSingleton, E as pickRunnableConfigKeys, F as getAbortSignalError } from "./index-D-pAR_RA.js";
|
|
2
|
-
import { p as prettifyError } from "./index-DEA0KU_h.js";
|
|
3
|
-
function parseToolCall(rawToolCall, options) {
|
|
4
|
-
if (rawToolCall.function === void 0) return void 0;
|
|
5
|
-
let functionArgs;
|
|
6
|
-
if (options?.partial) try {
|
|
7
|
-
functionArgs = parsePartialJson(rawToolCall.function.arguments ?? "{}");
|
|
8
|
-
} catch {
|
|
9
|
-
return void 0;
|
|
10
|
-
}
|
|
11
|
-
else try {
|
|
12
|
-
functionArgs = JSON.parse(rawToolCall.function.arguments);
|
|
13
|
-
} catch (e) {
|
|
14
|
-
throw new OutputParserException([
|
|
15
|
-
`Function "${rawToolCall.function.name}" arguments:`,
|
|
16
|
-
``,
|
|
17
|
-
rawToolCall.function.arguments,
|
|
18
|
-
``,
|
|
19
|
-
`are not valid JSON.`,
|
|
20
|
-
`Error: ${e.message}`
|
|
21
|
-
].join("\n"));
|
|
22
|
-
}
|
|
23
|
-
const parsedToolCall = {
|
|
24
|
-
name: rawToolCall.function.name,
|
|
25
|
-
args: functionArgs,
|
|
26
|
-
type: "tool_call"
|
|
27
|
-
};
|
|
28
|
-
if (options?.returnId) parsedToolCall.id = rawToolCall.id;
|
|
29
|
-
return parsedToolCall;
|
|
30
|
-
}
|
|
31
|
-
function convertLangChainToolCallToOpenAI(toolCall) {
|
|
32
|
-
if (toolCall.id === void 0) throw new Error(`All OpenAI tool calls must have an "id" field.`);
|
|
33
|
-
return {
|
|
34
|
-
id: toolCall.id,
|
|
35
|
-
type: "function",
|
|
36
|
-
function: {
|
|
37
|
-
name: toolCall.name,
|
|
38
|
-
arguments: JSON.stringify(toolCall.args)
|
|
39
|
-
}
|
|
40
|
-
};
|
|
41
|
-
}
|
|
42
|
-
function makeInvalidToolCall(rawToolCall, errorMsg) {
|
|
43
|
-
return {
|
|
44
|
-
name: rawToolCall.function?.name,
|
|
45
|
-
args: rawToolCall.function?.arguments,
|
|
46
|
-
id: rawToolCall.id,
|
|
47
|
-
error: errorMsg,
|
|
48
|
-
type: "invalid_tool_call"
|
|
49
|
-
};
|
|
50
|
-
}
|
|
51
|
-
var JsonOutputToolsParser = class extends BaseCumulativeTransformOutputParser {
|
|
52
|
-
static lc_name() {
|
|
53
|
-
return "JsonOutputToolsParser";
|
|
54
|
-
}
|
|
55
|
-
returnId = false;
|
|
56
|
-
lc_namespace = [
|
|
57
|
-
"langchain",
|
|
58
|
-
"output_parsers",
|
|
59
|
-
"openai_tools"
|
|
60
|
-
];
|
|
61
|
-
lc_serializable = true;
|
|
62
|
-
constructor(fields) {
|
|
63
|
-
super(fields);
|
|
64
|
-
this.returnId = fields?.returnId ?? this.returnId;
|
|
65
|
-
}
|
|
66
|
-
_diff() {
|
|
67
|
-
throw new Error("Not supported.");
|
|
68
|
-
}
|
|
69
|
-
async parse() {
|
|
70
|
-
throw new Error("Not implemented.");
|
|
71
|
-
}
|
|
72
|
-
async parseResult(generations) {
|
|
73
|
-
const result = await this.parsePartialResult(generations, false);
|
|
74
|
-
return result;
|
|
75
|
-
}
|
|
76
|
-
/**
|
|
77
|
-
* Parses the output and returns a JSON object. If `argsOnly` is true,
|
|
78
|
-
* only the arguments of the function call are returned.
|
|
79
|
-
* @param generations The output of the LLM to parse.
|
|
80
|
-
* @returns A JSON object representation of the function call or its arguments.
|
|
81
|
-
*/
|
|
82
|
-
async parsePartialResult(generations, partial = true) {
|
|
83
|
-
const message = generations[0].message;
|
|
84
|
-
let toolCalls;
|
|
85
|
-
if (isAIMessage(message) && message.tool_calls?.length) toolCalls = message.tool_calls.map((toolCall) => {
|
|
86
|
-
const { id, ...rest } = toolCall;
|
|
87
|
-
if (!this.returnId) return rest;
|
|
88
|
-
return {
|
|
89
|
-
id,
|
|
90
|
-
...rest
|
|
91
|
-
};
|
|
92
|
-
});
|
|
93
|
-
else if (message.additional_kwargs.tool_calls !== void 0) {
|
|
94
|
-
const rawToolCalls = JSON.parse(JSON.stringify(message.additional_kwargs.tool_calls));
|
|
95
|
-
toolCalls = rawToolCalls.map((rawToolCall) => {
|
|
96
|
-
return parseToolCall(rawToolCall, {
|
|
97
|
-
returnId: this.returnId,
|
|
98
|
-
partial
|
|
99
|
-
});
|
|
100
|
-
});
|
|
101
|
-
}
|
|
102
|
-
if (!toolCalls) return [];
|
|
103
|
-
const parsedToolCalls = [];
|
|
104
|
-
for (const toolCall of toolCalls) if (toolCall !== void 0) {
|
|
105
|
-
const backwardsCompatibleToolCall = {
|
|
106
|
-
type: toolCall.name,
|
|
107
|
-
args: toolCall.args,
|
|
108
|
-
id: toolCall.id
|
|
109
|
-
};
|
|
110
|
-
parsedToolCalls.push(backwardsCompatibleToolCall);
|
|
111
|
-
}
|
|
112
|
-
return parsedToolCalls;
|
|
113
|
-
}
|
|
114
|
-
};
|
|
115
|
-
var JsonOutputKeyToolsParser = class extends JsonOutputToolsParser {
|
|
116
|
-
static lc_name() {
|
|
117
|
-
return "JsonOutputKeyToolsParser";
|
|
118
|
-
}
|
|
119
|
-
lc_namespace = [
|
|
120
|
-
"langchain",
|
|
121
|
-
"output_parsers",
|
|
122
|
-
"openai_tools"
|
|
123
|
-
];
|
|
124
|
-
lc_serializable = true;
|
|
125
|
-
returnId = false;
|
|
126
|
-
/** The type of tool calls to return. */
|
|
127
|
-
keyName;
|
|
128
|
-
/** Whether to return only the first tool call. */
|
|
129
|
-
returnSingle = false;
|
|
130
|
-
zodSchema;
|
|
131
|
-
constructor(params) {
|
|
132
|
-
super(params);
|
|
133
|
-
this.keyName = params.keyName;
|
|
134
|
-
this.returnSingle = params.returnSingle ?? this.returnSingle;
|
|
135
|
-
this.zodSchema = params.zodSchema;
|
|
136
|
-
}
|
|
137
|
-
async _validateResult(result) {
|
|
138
|
-
if (this.zodSchema === void 0) return result;
|
|
139
|
-
const zodParsedResult = await interopSafeParseAsync(this.zodSchema, result);
|
|
140
|
-
if (zodParsedResult.success) return zodParsedResult.data;
|
|
141
|
-
else throw new OutputParserException(`Failed to parse. Text: "${JSON.stringify(result, null, 2)}". Error: ${JSON.stringify(zodParsedResult.error?.issues)}`, JSON.stringify(result, null, 2));
|
|
142
|
-
}
|
|
143
|
-
async parsePartialResult(generations) {
|
|
144
|
-
const results = await super.parsePartialResult(generations);
|
|
145
|
-
const matchingResults = results.filter((result) => result.type === this.keyName);
|
|
146
|
-
let returnedValues = matchingResults;
|
|
147
|
-
if (!matchingResults.length) return void 0;
|
|
148
|
-
if (!this.returnId) returnedValues = matchingResults.map((result) => result.args);
|
|
149
|
-
if (this.returnSingle) return returnedValues[0];
|
|
150
|
-
return returnedValues;
|
|
151
|
-
}
|
|
152
|
-
async parseResult(generations) {
|
|
153
|
-
const results = await super.parsePartialResult(generations, false);
|
|
154
|
-
const matchingResults = results.filter((result) => result.type === this.keyName);
|
|
155
|
-
let returnedValues = matchingResults;
|
|
156
|
-
if (!matchingResults.length) return void 0;
|
|
157
|
-
if (!this.returnId) returnedValues = matchingResults.map((result) => result.args);
|
|
158
|
-
if (this.returnSingle) return this._validateResult(returnedValues[0]);
|
|
159
|
-
const toolCallResults = await Promise.all(returnedValues.map((value) => this._validateResult(value)));
|
|
160
|
-
return toolCallResults;
|
|
161
|
-
}
|
|
162
|
-
};
|
|
163
|
-
var openai_tools_exports = {};
|
|
164
|
-
__export(openai_tools_exports, {
|
|
165
|
-
JsonOutputKeyToolsParser: () => JsonOutputKeyToolsParser,
|
|
166
|
-
JsonOutputToolsParser: () => JsonOutputToolsParser,
|
|
167
|
-
convertLangChainToolCallToOpenAI: () => convertLangChainToolCallToOpenAI,
|
|
168
|
-
makeInvalidToolCall: () => makeInvalidToolCall,
|
|
169
|
-
parseToolCall: () => parseToolCall
|
|
170
|
-
});
|
|
171
|
-
var llms_exports = {};
|
|
172
|
-
__export(llms_exports, {
|
|
173
|
-
BaseLLM: () => BaseLLM,
|
|
174
|
-
LLM: () => LLM
|
|
175
|
-
});
|
|
176
|
-
var BaseLLM = class BaseLLM2 extends BaseLanguageModel {
|
|
177
|
-
lc_namespace = [
|
|
178
|
-
"langchain",
|
|
179
|
-
"llms",
|
|
180
|
-
this._llmType()
|
|
181
|
-
];
|
|
182
|
-
/**
|
|
183
|
-
* This method takes an input and options, and returns a string. It
|
|
184
|
-
* converts the input to a prompt value and generates a result based on
|
|
185
|
-
* the prompt.
|
|
186
|
-
* @param input Input for the LLM.
|
|
187
|
-
* @param options Options for the LLM call.
|
|
188
|
-
* @returns A string result based on the prompt.
|
|
189
|
-
*/
|
|
190
|
-
async invoke(input, options) {
|
|
191
|
-
const promptValue = BaseLLM2._convertInputToPromptValue(input);
|
|
192
|
-
const result = await this.generatePrompt([promptValue], options, options?.callbacks);
|
|
193
|
-
return result.generations[0][0].text;
|
|
194
|
-
}
|
|
195
|
-
async *_streamResponseChunks(_input, _options, _runManager) {
|
|
196
|
-
throw new Error("Not implemented.");
|
|
197
|
-
}
|
|
198
|
-
_separateRunnableConfigFromCallOptionsCompat(options) {
|
|
199
|
-
const [runnableConfig, callOptions] = super._separateRunnableConfigFromCallOptions(options);
|
|
200
|
-
callOptions.signal = runnableConfig.signal;
|
|
201
|
-
return [runnableConfig, callOptions];
|
|
202
|
-
}
|
|
203
|
-
async *_streamIterator(input, options) {
|
|
204
|
-
if (this._streamResponseChunks === BaseLLM2.prototype._streamResponseChunks) yield this.invoke(input, options);
|
|
205
|
-
else {
|
|
206
|
-
const prompt = BaseLLM2._convertInputToPromptValue(input);
|
|
207
|
-
const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptionsCompat(options);
|
|
208
|
-
const callbackManager_ = await CallbackManager.configure(runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
|
|
209
|
-
const extra = {
|
|
210
|
-
options: callOptions,
|
|
211
|
-
invocation_params: this?.invocationParams(callOptions),
|
|
212
|
-
batch_size: 1
|
|
213
|
-
};
|
|
214
|
-
const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), [prompt.toString()], runnableConfig.runId, void 0, extra, void 0, void 0, runnableConfig.runName);
|
|
215
|
-
let generation = new GenerationChunk({ text: "" });
|
|
216
|
-
try {
|
|
217
|
-
for await (const chunk of this._streamResponseChunks(prompt.toString(), callOptions, runManagers?.[0])) {
|
|
218
|
-
if (!generation) generation = chunk;
|
|
219
|
-
else generation = generation.concat(chunk);
|
|
220
|
-
if (typeof chunk.text === "string") yield chunk.text;
|
|
221
|
-
}
|
|
222
|
-
} catch (err) {
|
|
223
|
-
await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
|
|
224
|
-
throw err;
|
|
225
|
-
}
|
|
226
|
-
await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({ generations: [[generation]] })));
|
|
227
|
-
}
|
|
228
|
-
}
|
|
229
|
-
/**
|
|
230
|
-
* This method takes prompt values, options, and callbacks, and generates
|
|
231
|
-
* a result based on the prompts.
|
|
232
|
-
* @param promptValues Prompt values for the LLM.
|
|
233
|
-
* @param options Options for the LLM call.
|
|
234
|
-
* @param callbacks Callbacks for the LLM call.
|
|
235
|
-
* @returns An LLMResult based on the prompts.
|
|
236
|
-
*/
|
|
237
|
-
async generatePrompt(promptValues, options, callbacks) {
|
|
238
|
-
const prompts = promptValues.map((promptValue) => promptValue.toString());
|
|
239
|
-
return this.generate(prompts, options, callbacks);
|
|
240
|
-
}
|
|
241
|
-
/**
|
|
242
|
-
* Get the parameters used to invoke the model
|
|
243
|
-
*/
|
|
244
|
-
invocationParams(_options) {
|
|
245
|
-
return {};
|
|
246
|
-
}
|
|
247
|
-
_flattenLLMResult(llmResult) {
|
|
248
|
-
const llmResults = [];
|
|
249
|
-
for (let i = 0; i < llmResult.generations.length; i += 1) {
|
|
250
|
-
const genList = llmResult.generations[i];
|
|
251
|
-
if (i === 0) llmResults.push({
|
|
252
|
-
generations: [genList],
|
|
253
|
-
llmOutput: llmResult.llmOutput
|
|
254
|
-
});
|
|
255
|
-
else {
|
|
256
|
-
const llmOutput = llmResult.llmOutput ? {
|
|
257
|
-
...llmResult.llmOutput,
|
|
258
|
-
tokenUsage: {}
|
|
259
|
-
} : void 0;
|
|
260
|
-
llmResults.push({
|
|
261
|
-
generations: [genList],
|
|
262
|
-
llmOutput
|
|
263
|
-
});
|
|
264
|
-
}
|
|
265
|
-
}
|
|
266
|
-
return llmResults;
|
|
267
|
-
}
|
|
268
|
-
/** @ignore */
|
|
269
|
-
async _generateUncached(prompts, parsedOptions, handledOptions, startedRunManagers) {
|
|
270
|
-
let runManagers;
|
|
271
|
-
if (startedRunManagers !== void 0 && startedRunManagers.length === prompts.length) runManagers = startedRunManagers;
|
|
272
|
-
else {
|
|
273
|
-
const callbackManager_ = await CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
|
|
274
|
-
const extra = {
|
|
275
|
-
options: parsedOptions,
|
|
276
|
-
invocation_params: this?.invocationParams(parsedOptions),
|
|
277
|
-
batch_size: prompts.length
|
|
278
|
-
};
|
|
279
|
-
runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, handledOptions.runId, void 0, extra, void 0, void 0, handledOptions?.runName);
|
|
280
|
-
}
|
|
281
|
-
const hasStreamingHandler = !!runManagers?.[0].handlers.find(callbackHandlerPrefersStreaming);
|
|
282
|
-
let output;
|
|
283
|
-
if (hasStreamingHandler && prompts.length === 1 && this._streamResponseChunks !== BaseLLM2.prototype._streamResponseChunks) try {
|
|
284
|
-
const stream = await this._streamResponseChunks(prompts[0], parsedOptions, runManagers?.[0]);
|
|
285
|
-
let aggregated;
|
|
286
|
-
for await (const chunk of stream) if (aggregated === void 0) aggregated = chunk;
|
|
287
|
-
else aggregated = concat(aggregated, chunk);
|
|
288
|
-
if (aggregated === void 0) throw new Error("Received empty response from chat model call.");
|
|
289
|
-
output = {
|
|
290
|
-
generations: [[aggregated]],
|
|
291
|
-
llmOutput: {}
|
|
292
|
-
};
|
|
293
|
-
await runManagers?.[0].handleLLMEnd(output);
|
|
294
|
-
} catch (e) {
|
|
295
|
-
await runManagers?.[0].handleLLMError(e);
|
|
296
|
-
throw e;
|
|
297
|
-
}
|
|
298
|
-
else {
|
|
299
|
-
try {
|
|
300
|
-
output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
|
|
301
|
-
} catch (err) {
|
|
302
|
-
await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
|
|
303
|
-
throw err;
|
|
304
|
-
}
|
|
305
|
-
const flattenedOutputs = this._flattenLLMResult(output);
|
|
306
|
-
await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
|
|
307
|
-
}
|
|
308
|
-
const runIds = runManagers?.map((manager) => manager.runId) || void 0;
|
|
309
|
-
Object.defineProperty(output, RUN_KEY, {
|
|
310
|
-
value: runIds ? { runIds } : void 0,
|
|
311
|
-
configurable: true
|
|
312
|
-
});
|
|
313
|
-
return output;
|
|
314
|
-
}
|
|
315
|
-
async _generateCached({ prompts, cache, llmStringKey, parsedOptions, handledOptions, runId }) {
|
|
316
|
-
const callbackManager_ = await CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
|
|
317
|
-
const extra = {
|
|
318
|
-
options: parsedOptions,
|
|
319
|
-
invocation_params: this?.invocationParams(parsedOptions),
|
|
320
|
-
batch_size: prompts.length
|
|
321
|
-
};
|
|
322
|
-
const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, runId, void 0, extra, void 0, void 0, handledOptions?.runName);
|
|
323
|
-
const missingPromptIndices = [];
|
|
324
|
-
const results = await Promise.allSettled(prompts.map(async (prompt, index) => {
|
|
325
|
-
const result = await cache.lookup(prompt, llmStringKey);
|
|
326
|
-
if (result == null) missingPromptIndices.push(index);
|
|
327
|
-
return result;
|
|
328
|
-
}));
|
|
329
|
-
const cachedResults = results.map((result, index) => ({
|
|
330
|
-
result,
|
|
331
|
-
runManager: runManagers?.[index]
|
|
332
|
-
})).filter(({ result }) => result.status === "fulfilled" && result.value != null || result.status === "rejected");
|
|
333
|
-
const generations = [];
|
|
334
|
-
await Promise.all(cachedResults.map(async ({ result: promiseResult, runManager }, i) => {
|
|
335
|
-
if (promiseResult.status === "fulfilled") {
|
|
336
|
-
const result = promiseResult.value;
|
|
337
|
-
generations[i] = result.map((result$1) => {
|
|
338
|
-
result$1.generationInfo = {
|
|
339
|
-
...result$1.generationInfo,
|
|
340
|
-
tokenUsage: {}
|
|
341
|
-
};
|
|
342
|
-
return result$1;
|
|
343
|
-
});
|
|
344
|
-
if (result.length) await runManager?.handleLLMNewToken(result[0].text);
|
|
345
|
-
return runManager?.handleLLMEnd({ generations: [result] }, void 0, void 0, void 0, { cached: true });
|
|
346
|
-
} else {
|
|
347
|
-
await runManager?.handleLLMError(promiseResult.reason, void 0, void 0, void 0, { cached: true });
|
|
348
|
-
return Promise.reject(promiseResult.reason);
|
|
349
|
-
}
|
|
350
|
-
}));
|
|
351
|
-
const output = {
|
|
352
|
-
generations,
|
|
353
|
-
missingPromptIndices,
|
|
354
|
-
startedRunManagers: runManagers
|
|
355
|
-
};
|
|
356
|
-
Object.defineProperty(output, RUN_KEY, {
|
|
357
|
-
value: runManagers ? { runIds: runManagers?.map((manager) => manager.runId) } : void 0,
|
|
358
|
-
configurable: true
|
|
359
|
-
});
|
|
360
|
-
return output;
|
|
361
|
-
}
|
|
362
|
-
/**
|
|
363
|
-
* Run the LLM on the given prompts and input, handling caching.
|
|
364
|
-
*/
|
|
365
|
-
async generate(prompts, options, callbacks) {
|
|
366
|
-
if (!Array.isArray(prompts)) throw new Error("Argument 'prompts' is expected to be a string[]");
|
|
367
|
-
let parsedOptions;
|
|
368
|
-
if (Array.isArray(options)) parsedOptions = { stop: options };
|
|
369
|
-
else parsedOptions = options;
|
|
370
|
-
const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptionsCompat(parsedOptions);
|
|
371
|
-
runnableConfig.callbacks = runnableConfig.callbacks ?? callbacks;
|
|
372
|
-
if (!this.cache) return this._generateUncached(prompts, callOptions, runnableConfig);
|
|
373
|
-
const { cache } = this;
|
|
374
|
-
const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
|
|
375
|
-
const { generations, missingPromptIndices, startedRunManagers } = await this._generateCached({
|
|
376
|
-
prompts,
|
|
377
|
-
cache,
|
|
378
|
-
llmStringKey,
|
|
379
|
-
parsedOptions: callOptions,
|
|
380
|
-
handledOptions: runnableConfig,
|
|
381
|
-
runId: runnableConfig.runId
|
|
382
|
-
});
|
|
383
|
-
let llmOutput = {};
|
|
384
|
-
if (missingPromptIndices.length > 0) {
|
|
385
|
-
const results = await this._generateUncached(missingPromptIndices.map((i) => prompts[i]), callOptions, runnableConfig, startedRunManagers !== void 0 ? missingPromptIndices.map((i) => startedRunManagers?.[i]) : void 0);
|
|
386
|
-
await Promise.all(results.generations.map(async (generation, index) => {
|
|
387
|
-
const promptIndex = missingPromptIndices[index];
|
|
388
|
-
generations[promptIndex] = generation;
|
|
389
|
-
return cache.update(prompts[promptIndex], llmStringKey, generation);
|
|
390
|
-
}));
|
|
391
|
-
llmOutput = results.llmOutput ?? {};
|
|
392
|
-
}
|
|
393
|
-
return {
|
|
394
|
-
generations,
|
|
395
|
-
llmOutput
|
|
396
|
-
};
|
|
397
|
-
}
|
|
398
|
-
/**
|
|
399
|
-
* Get the identifying parameters of the LLM.
|
|
400
|
-
*/
|
|
401
|
-
_identifyingParams() {
|
|
402
|
-
return {};
|
|
403
|
-
}
|
|
404
|
-
_modelType() {
|
|
405
|
-
return "base_llm";
|
|
406
|
-
}
|
|
407
|
-
};
|
|
408
|
-
var LLM = class extends BaseLLM {
|
|
409
|
-
async _generate(prompts, options, runManager) {
|
|
410
|
-
const generations = await Promise.all(prompts.map((prompt, promptIndex) => this._call(prompt, {
|
|
411
|
-
...options,
|
|
412
|
-
promptIndex
|
|
413
|
-
}, runManager).then((text) => [{ text }])));
|
|
414
|
-
return { generations };
|
|
415
|
-
}
|
|
416
|
-
};
|
|
417
|
-
var tools_exports = {};
|
|
418
|
-
__export(tools_exports, {
|
|
419
|
-
BaseToolkit: () => BaseToolkit,
|
|
420
|
-
DynamicStructuredTool: () => DynamicStructuredTool,
|
|
421
|
-
DynamicTool: () => DynamicTool,
|
|
422
|
-
StructuredTool: () => StructuredTool,
|
|
423
|
-
Tool: () => Tool,
|
|
424
|
-
ToolInputParsingException: () => ToolInputParsingException,
|
|
425
|
-
isLangChainTool: () => isLangChainTool,
|
|
426
|
-
isRunnableToolLike: () => isRunnableToolLike,
|
|
427
|
-
isStructuredTool: () => isStructuredTool,
|
|
428
|
-
isStructuredToolParams: () => isStructuredToolParams,
|
|
429
|
-
tool: () => tool
|
|
430
|
-
});
|
|
431
|
-
var StructuredTool = class extends BaseLangChain {
|
|
432
|
-
/**
|
|
433
|
-
* Whether to return the tool's output directly.
|
|
434
|
-
*
|
|
435
|
-
* Setting this to true means that after the tool is called,
|
|
436
|
-
* an agent should stop looping.
|
|
437
|
-
*/
|
|
438
|
-
returnDirect = false;
|
|
439
|
-
verboseParsingErrors = false;
|
|
440
|
-
get lc_namespace() {
|
|
441
|
-
return ["langchain", "tools"];
|
|
442
|
-
}
|
|
443
|
-
/**
|
|
444
|
-
* The tool response format.
|
|
445
|
-
*
|
|
446
|
-
* If "content" then the output of the tool is interpreted as the contents of a
|
|
447
|
-
* ToolMessage. If "content_and_artifact" then the output is expected to be a
|
|
448
|
-
* two-tuple corresponding to the (content, artifact) of a ToolMessage.
|
|
449
|
-
*
|
|
450
|
-
* @default "content"
|
|
451
|
-
*/
|
|
452
|
-
responseFormat = "content";
|
|
453
|
-
/**
|
|
454
|
-
* Default config object for the tool runnable.
|
|
455
|
-
*/
|
|
456
|
-
defaultConfig;
|
|
457
|
-
constructor(fields) {
|
|
458
|
-
super(fields ?? {});
|
|
459
|
-
this.verboseParsingErrors = fields?.verboseParsingErrors ?? this.verboseParsingErrors;
|
|
460
|
-
this.responseFormat = fields?.responseFormat ?? this.responseFormat;
|
|
461
|
-
this.defaultConfig = fields?.defaultConfig ?? this.defaultConfig;
|
|
462
|
-
this.metadata = fields?.metadata ?? this.metadata;
|
|
463
|
-
}
|
|
464
|
-
/**
|
|
465
|
-
* Invokes the tool with the provided input and configuration.
|
|
466
|
-
* @param input The input for the tool.
|
|
467
|
-
* @param config Optional configuration for the tool.
|
|
468
|
-
* @returns A Promise that resolves with the tool's output.
|
|
469
|
-
*/
|
|
470
|
-
async invoke(input, config) {
|
|
471
|
-
let toolInput;
|
|
472
|
-
let enrichedConfig = ensureConfig(mergeConfigs(this.defaultConfig, config));
|
|
473
|
-
if (_isToolCall(input)) {
|
|
474
|
-
toolInput = input.args;
|
|
475
|
-
enrichedConfig = {
|
|
476
|
-
...enrichedConfig,
|
|
477
|
-
toolCall: input
|
|
478
|
-
};
|
|
479
|
-
} else toolInput = input;
|
|
480
|
-
return this.call(toolInput, enrichedConfig);
|
|
481
|
-
}
|
|
482
|
-
/**
|
|
483
|
-
* @deprecated Use .invoke() instead. Will be removed in 0.3.0.
|
|
484
|
-
*
|
|
485
|
-
* Calls the tool with the provided argument, configuration, and tags. It
|
|
486
|
-
* parses the input according to the schema, handles any errors, and
|
|
487
|
-
* manages callbacks.
|
|
488
|
-
* @param arg The input argument for the tool.
|
|
489
|
-
* @param configArg Optional configuration or callbacks for the tool.
|
|
490
|
-
* @param tags Optional tags for the tool.
|
|
491
|
-
* @returns A Promise that resolves with a string.
|
|
492
|
-
*/
|
|
493
|
-
async call(arg, configArg, tags) {
|
|
494
|
-
const inputForValidation = _isToolCall(arg) ? arg.args : arg;
|
|
495
|
-
let parsed;
|
|
496
|
-
if (isInteropZodSchema(this.schema)) try {
|
|
497
|
-
parsed = await interopParseAsync(this.schema, inputForValidation);
|
|
498
|
-
} catch (e) {
|
|
499
|
-
let message = `Received tool input did not match expected schema`;
|
|
500
|
-
if (this.verboseParsingErrors) message = `${message}
|
|
501
|
-
Details: ${e.message}`;
|
|
502
|
-
if (e instanceof Error && e.constructor.name === "ZodError") message = `${message}
|
|
503
|
-
|
|
504
|
-
${prettifyError(e)}`;
|
|
505
|
-
throw new ToolInputParsingException(message, JSON.stringify(arg));
|
|
506
|
-
}
|
|
507
|
-
else {
|
|
508
|
-
const result$1 = validate(inputForValidation, this.schema);
|
|
509
|
-
if (!result$1.valid) {
|
|
510
|
-
let message = `Received tool input did not match expected schema`;
|
|
511
|
-
if (this.verboseParsingErrors) message = `${message}
|
|
512
|
-
Details: ${result$1.errors.map((e) => `${e.keywordLocation}: ${e.error}`).join("\n")}`;
|
|
513
|
-
throw new ToolInputParsingException(message, JSON.stringify(arg));
|
|
514
|
-
}
|
|
515
|
-
parsed = inputForValidation;
|
|
516
|
-
}
|
|
517
|
-
const config = parseCallbackConfigArg(configArg);
|
|
518
|
-
const callbackManager_ = CallbackManager.configure(config.callbacks, this.callbacks, config.tags || tags, this.tags, config.metadata, this.metadata, { verbose: this.verbose });
|
|
519
|
-
const runManager = await callbackManager_?.handleToolStart(this.toJSON(), typeof arg === "string" ? arg : JSON.stringify(arg), config.runId, void 0, void 0, void 0, config.runName);
|
|
520
|
-
delete config.runId;
|
|
521
|
-
let result;
|
|
522
|
-
try {
|
|
523
|
-
result = await this._call(parsed, runManager, config);
|
|
524
|
-
} catch (e) {
|
|
525
|
-
await runManager?.handleToolError(e);
|
|
526
|
-
throw e;
|
|
527
|
-
}
|
|
528
|
-
let content;
|
|
529
|
-
let artifact;
|
|
530
|
-
if (this.responseFormat === "content_and_artifact") if (Array.isArray(result) && result.length === 2) [content, artifact] = result;
|
|
531
|
-
else throw new Error(`Tool response format is "content_and_artifact" but the output was not a two-tuple.
|
|
532
|
-
Result: ${JSON.stringify(result)}`);
|
|
533
|
-
else content = result;
|
|
534
|
-
let toolCallId;
|
|
535
|
-
if (_isToolCall(arg)) toolCallId = arg.id;
|
|
536
|
-
if (!toolCallId && _configHasToolCallId(config)) toolCallId = config.toolCall.id;
|
|
537
|
-
const formattedOutput = _formatToolOutput({
|
|
538
|
-
content,
|
|
539
|
-
artifact,
|
|
540
|
-
toolCallId,
|
|
541
|
-
name: this.name,
|
|
542
|
-
metadata: this.metadata
|
|
543
|
-
});
|
|
544
|
-
await runManager?.handleToolEnd(formattedOutput);
|
|
545
|
-
return formattedOutput;
|
|
546
|
-
}
|
|
547
|
-
};
|
|
548
|
-
var Tool = class extends StructuredTool {
|
|
549
|
-
schema = objectType({ input: stringType().optional() }).transform((obj) => obj.input);
|
|
550
|
-
constructor(fields) {
|
|
551
|
-
super(fields);
|
|
552
|
-
}
|
|
553
|
-
/**
|
|
554
|
-
* @deprecated Use .invoke() instead. Will be removed in 0.3.0.
|
|
555
|
-
*
|
|
556
|
-
* Calls the tool with the provided argument and callbacks. It handles
|
|
557
|
-
* string inputs specifically.
|
|
558
|
-
* @param arg The input argument for the tool, which can be a string, undefined, or an input of the tool's schema.
|
|
559
|
-
* @param callbacks Optional callbacks for the tool.
|
|
560
|
-
* @returns A Promise that resolves with a string.
|
|
561
|
-
*/
|
|
562
|
-
call(arg, callbacks) {
|
|
563
|
-
const structuredArg = typeof arg === "string" || arg == null ? { input: arg } : arg;
|
|
564
|
-
return super.call(structuredArg, callbacks);
|
|
565
|
-
}
|
|
566
|
-
};
|
|
567
|
-
var DynamicTool = class extends Tool {
|
|
568
|
-
static lc_name() {
|
|
569
|
-
return "DynamicTool";
|
|
570
|
-
}
|
|
571
|
-
name;
|
|
572
|
-
description;
|
|
573
|
-
func;
|
|
574
|
-
constructor(fields) {
|
|
575
|
-
super(fields);
|
|
576
|
-
this.name = fields.name;
|
|
577
|
-
this.description = fields.description;
|
|
578
|
-
this.func = fields.func;
|
|
579
|
-
this.returnDirect = fields.returnDirect ?? this.returnDirect;
|
|
580
|
-
}
|
|
581
|
-
/**
|
|
582
|
-
* @deprecated Use .invoke() instead. Will be removed in 0.3.0.
|
|
583
|
-
*/
|
|
584
|
-
async call(arg, configArg) {
|
|
585
|
-
const config = parseCallbackConfigArg(configArg);
|
|
586
|
-
if (config.runName === void 0) config.runName = this.name;
|
|
587
|
-
return super.call(arg, config);
|
|
588
|
-
}
|
|
589
|
-
/** @ignore */
|
|
590
|
-
async _call(input, runManager, parentConfig) {
|
|
591
|
-
return this.func(input, runManager, parentConfig);
|
|
592
|
-
}
|
|
593
|
-
};
|
|
594
|
-
var DynamicStructuredTool = class extends StructuredTool {
|
|
595
|
-
static lc_name() {
|
|
596
|
-
return "DynamicStructuredTool";
|
|
597
|
-
}
|
|
598
|
-
name;
|
|
599
|
-
description;
|
|
600
|
-
func;
|
|
601
|
-
schema;
|
|
602
|
-
constructor(fields) {
|
|
603
|
-
super(fields);
|
|
604
|
-
this.name = fields.name;
|
|
605
|
-
this.description = fields.description;
|
|
606
|
-
this.func = fields.func;
|
|
607
|
-
this.returnDirect = fields.returnDirect ?? this.returnDirect;
|
|
608
|
-
this.schema = fields.schema;
|
|
609
|
-
}
|
|
610
|
-
/**
|
|
611
|
-
* @deprecated Use .invoke() instead. Will be removed in 0.3.0.
|
|
612
|
-
*/
|
|
613
|
-
async call(arg, configArg, tags) {
|
|
614
|
-
const config = parseCallbackConfigArg(configArg);
|
|
615
|
-
if (config.runName === void 0) config.runName = this.name;
|
|
616
|
-
return super.call(arg, config, tags);
|
|
617
|
-
}
|
|
618
|
-
_call(arg, runManager, parentConfig) {
|
|
619
|
-
return this.func(arg, runManager, parentConfig);
|
|
620
|
-
}
|
|
621
|
-
};
|
|
622
|
-
var BaseToolkit = class {
|
|
623
|
-
getTools() {
|
|
624
|
-
return this.tools;
|
|
625
|
-
}
|
|
626
|
-
};
|
|
627
|
-
function tool(func, fields) {
|
|
628
|
-
const isSimpleStringSchema = isSimpleStringZodSchema(fields.schema);
|
|
629
|
-
const isStringJSONSchema = validatesOnlyStrings(fields.schema);
|
|
630
|
-
if (!fields.schema || isSimpleStringSchema || isStringJSONSchema) return new DynamicTool({
|
|
631
|
-
...fields,
|
|
632
|
-
description: fields.description ?? fields.schema?.description ?? `${fields.name} tool`,
|
|
633
|
-
func: async (input, runManager, config) => {
|
|
634
|
-
return new Promise((resolve, reject) => {
|
|
635
|
-
const childConfig = patchConfig(config, { callbacks: runManager?.getChild() });
|
|
636
|
-
AsyncLocalStorageProviderSingleton.runWithConfig(pickRunnableConfigKeys(childConfig), async () => {
|
|
637
|
-
try {
|
|
638
|
-
resolve(func(input, childConfig));
|
|
639
|
-
} catch (e) {
|
|
640
|
-
reject(e);
|
|
641
|
-
}
|
|
642
|
-
});
|
|
643
|
-
});
|
|
644
|
-
}
|
|
645
|
-
});
|
|
646
|
-
const schema = fields.schema;
|
|
647
|
-
const description = fields.description ?? fields.schema.description ?? `${fields.name} tool`;
|
|
648
|
-
return new DynamicStructuredTool({
|
|
649
|
-
...fields,
|
|
650
|
-
description,
|
|
651
|
-
schema,
|
|
652
|
-
func: async (input, runManager, config) => {
|
|
653
|
-
return new Promise((resolve, reject) => {
|
|
654
|
-
let listener;
|
|
655
|
-
const cleanup = () => {
|
|
656
|
-
if (config?.signal && listener) config.signal.removeEventListener("abort", listener);
|
|
657
|
-
};
|
|
658
|
-
if (config?.signal) {
|
|
659
|
-
listener = () => {
|
|
660
|
-
cleanup();
|
|
661
|
-
reject(getAbortSignalError(config.signal));
|
|
662
|
-
};
|
|
663
|
-
config.signal.addEventListener("abort", listener);
|
|
664
|
-
}
|
|
665
|
-
const childConfig = patchConfig(config, { callbacks: runManager?.getChild() });
|
|
666
|
-
AsyncLocalStorageProviderSingleton.runWithConfig(pickRunnableConfigKeys(childConfig), async () => {
|
|
667
|
-
try {
|
|
668
|
-
const result = await func(input, childConfig);
|
|
669
|
-
if (config?.signal?.aborted) {
|
|
670
|
-
cleanup();
|
|
671
|
-
return;
|
|
672
|
-
}
|
|
673
|
-
cleanup();
|
|
674
|
-
resolve(result);
|
|
675
|
-
} catch (e) {
|
|
676
|
-
cleanup();
|
|
677
|
-
reject(e);
|
|
678
|
-
}
|
|
679
|
-
});
|
|
680
|
-
});
|
|
681
|
-
}
|
|
682
|
-
});
|
|
683
|
-
}
|
|
684
|
-
function _formatToolOutput(params) {
|
|
685
|
-
const { content, artifact, toolCallId, metadata } = params;
|
|
686
|
-
if (toolCallId && !isDirectToolOutput(content)) if (typeof content === "string" || Array.isArray(content) && content.every((item) => typeof item === "object")) return new ToolMessage({
|
|
687
|
-
status: "success",
|
|
688
|
-
content,
|
|
689
|
-
artifact,
|
|
690
|
-
tool_call_id: toolCallId,
|
|
691
|
-
name: params.name,
|
|
692
|
-
metadata
|
|
693
|
-
});
|
|
694
|
-
else return new ToolMessage({
|
|
695
|
-
status: "success",
|
|
696
|
-
content: _stringify(content),
|
|
697
|
-
artifact,
|
|
698
|
-
tool_call_id: toolCallId,
|
|
699
|
-
name: params.name,
|
|
700
|
-
metadata
|
|
701
|
-
});
|
|
702
|
-
else return content;
|
|
703
|
-
}
|
|
704
|
-
function _stringify(content) {
|
|
705
|
-
try {
|
|
706
|
-
return JSON.stringify(content, null, 2) ?? "";
|
|
707
|
-
} catch (_noOp) {
|
|
708
|
-
return `${content}`;
|
|
709
|
-
}
|
|
710
|
-
}
|
|
711
|
-
export {
|
|
712
|
-
DynamicTool as D,
|
|
713
|
-
JsonOutputKeyToolsParser as J,
|
|
714
|
-
LLM as L,
|
|
715
|
-
StructuredTool as S,
|
|
716
|
-
Tool as T,
|
|
717
|
-
tool as a,
|
|
718
|
-
DynamicStructuredTool as b,
|
|
719
|
-
convertLangChainToolCallToOpenAI as c,
|
|
720
|
-
llms_exports as l,
|
|
721
|
-
makeInvalidToolCall as m,
|
|
722
|
-
openai_tools_exports as o,
|
|
723
|
-
parseToolCall as p,
|
|
724
|
-
tools_exports as t
|
|
725
|
-
};
|