@mcp-use/inspector 0.6.1 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/dist/cli.js +295 -19
  2. package/dist/client/assets/__vite-browser-external-CHS79mP1.js +8 -0
  3. package/dist/client/assets/browser-BdCJ_qyB.js +38211 -0
  4. package/dist/client/assets/chunk-VL2OQCWN-CxD8xDNw.js +6475 -0
  5. package/dist/client/assets/display-YIYC6WJE-B6ZSv77R.js +44742 -0
  6. package/dist/client/assets/embeddings-DlCMB9po.js +21 -0
  7. package/dist/client/assets/index-B0NYybvW.js +5371 -0
  8. package/dist/client/assets/index-CKXUnlZB.js +94617 -0
  9. package/dist/client/assets/index-Cb09SlUY.js +17535 -0
  10. package/dist/client/assets/index-CsP5AdwX.js +10699 -0
  11. package/dist/client/assets/index-DX0TIfSM.js +102 -0
  12. package/dist/client/assets/index-Q_pqNaNk.js +1780 -0
  13. package/dist/client/assets/index-kVFYovMy.css +5752 -0
  14. package/dist/client/assets/path-QsnVvLoj.js +62 -0
  15. package/dist/client/assets/transport-wrapper-browser-ChPHVnHg.js +165 -0
  16. package/dist/client/assets/winston-qgF6niUt.js +12326 -0
  17. package/dist/client/index.html +3 -3
  18. package/dist/server/{chunk-37X7HLUV.js → chunk-3T2VCYG6.js} +87 -13
  19. package/dist/server/chunk-CVECQ7BJ.js +78 -0
  20. package/dist/server/{chunk-555LGZ3I.js → chunk-FS77NTZN.js} +133 -10
  21. package/dist/server/chunk-PKBMQBKP.js +7 -0
  22. package/dist/server/{chunk-WYBXXYSP.js → chunk-RRPLH7DL.js} +1 -1
  23. package/dist/server/{chunk-PYGYQT2G.js → chunk-S7NOZBMG.js} +3 -3
  24. package/dist/server/{chunk-DGUMOD7P.js → chunk-ZONLXYBO.js} +92 -10
  25. package/dist/server/cli.js +6 -4
  26. package/dist/server/index.js +7 -5
  27. package/dist/server/middleware.js +7 -5
  28. package/dist/server/rpc-log-bus.d.ts +17 -0
  29. package/dist/server/rpc-log-bus.d.ts.map +1 -0
  30. package/dist/server/rpc-log-bus.js +7 -0
  31. package/dist/server/server.js +6 -4
  32. package/dist/server/shared-routes.d.ts.map +1 -1
  33. package/dist/server/shared-routes.js +4 -2
  34. package/dist/server/shared-static.js +3 -2
  35. package/dist/server/shared-utils-browser.d.ts +2 -1
  36. package/dist/server/shared-utils-browser.d.ts.map +1 -1
  37. package/dist/server/shared-utils-browser.js +2 -1
  38. package/dist/server/shared-utils.d.ts +4 -1
  39. package/dist/server/shared-utils.d.ts.map +1 -1
  40. package/dist/server/shared-utils.js +2 -1
  41. package/dist/server/transport-wrapper.d.ts +6 -0
  42. package/dist/server/transport-wrapper.d.ts.map +1 -0
  43. package/dist/server/transport-wrapper.js +68 -0
  44. package/dist/server/utils.js +1 -0
  45. package/package.json +12 -3
  46. package/dist/client/assets/__vite-browser-external-DFygW7-s.js +0 -1
  47. package/dist/client/assets/chunk-VL2OQCWN-iRVOQjqe.js +0 -8
  48. package/dist/client/assets/display-LIYVTGEU-D2dm8q2H.js +0 -30
  49. package/dist/client/assets/embeddings-Dcyp0Vlp.js +0 -1
  50. package/dist/client/assets/index-6zrNEwtM.js +0 -4
  51. package/dist/client/assets/index-CAnbiFOL.css +0 -1
  52. package/dist/client/assets/index-CB1s6Wr6.js +0 -146
  53. package/dist/client/assets/index-CoMldIFv.js +0 -2
  54. package/dist/client/assets/index-DRz5BQNA.js +0 -1
  55. package/dist/client/assets/index-DUf1336L.js +0 -1559
  56. package/dist/client/assets/index-DmIKR5St.js +0 -1
  57. package/dist/client/assets/index-DpUpZFq2.js +0 -25
  58. package/dist/client/assets/index-DzegZXPW.js +0 -40
  59. package/dist/client/assets/langfuse-C4HKZ3NL-vCtAvQQV.js +0 -564
  60. package/dist/client/assets/path-C9FudP8b.js +0 -1
  61. package/dist/client/assets/winston-BVJ8PyEn.js +0 -37
@@ -0,0 +1,1780 @@
1
+ import { i as isInteropZodSchema, t as toJsonSchema, a as isBaseMessage, b as AIMessage, C as ChatGenerationChunk, c as AIMessageChunk, d as ChatMessage, e as isToolMessage, f as isAIMessage, g as isLangChainTool, h as isOpenAITool, j as isDataContentBlock, k as convertToProviderContentBlock, p as parseBase64DataUrl, B as BaseLLMOutputParser, l as interopSafeParseAsync, O as OutputParserException, m as BaseChatModel, n as getEnvironmentVariable, J as JsonOutputParser, R as RunnablePassthrough, o as RunnableSequence } from "./index-Cb09SlUY.js";
2
+ import { v as v4 } from "./index-CKXUnlZB.js";
3
+ import "./embeddings-DlCMB9po.js";
4
+ import "./chunk-VL2OQCWN-CxD8xDNw.js";
5
+ import "./index-DX0TIfSM.js";
6
+ function removeAdditionalProperties(obj) {
7
+ if (typeof obj === "object" && obj !== null) {
8
+ const newObj = { ...obj };
9
+ if ("additionalProperties" in newObj) delete newObj.additionalProperties;
10
+ if ("$schema" in newObj) delete newObj.$schema;
11
+ if ("strict" in newObj) delete newObj.strict;
12
+ for (const key in newObj) if (key in newObj) {
13
+ if (Array.isArray(newObj[key])) newObj[key] = newObj[key].map(removeAdditionalProperties);
14
+ else if (typeof newObj[key] === "object" && newObj[key] !== null) newObj[key] = removeAdditionalProperties(newObj[key]);
15
+ }
16
+ return newObj;
17
+ }
18
+ return obj;
19
+ }
20
+ function schemaToGenerativeAIParameters(schema) {
21
+ const jsonSchema = removeAdditionalProperties(isInteropZodSchema(schema) ? toJsonSchema(schema) : schema);
22
+ const { $schema, ...rest } = jsonSchema;
23
+ return rest;
24
+ }
25
+ function jsonSchemaToGeminiParameters(schema) {
26
+ const jsonSchema = removeAdditionalProperties(schema);
27
+ const { $schema, ...rest } = jsonSchema;
28
+ return rest;
29
+ }
30
+ function getMessageAuthor(message) {
31
+ const type = message._getType();
32
+ if (ChatMessage.isInstance(message)) return message.role;
33
+ if (type === "tool") return type;
34
+ return message.name ?? type;
35
+ }
36
+ function convertAuthorToRole(author) {
37
+ switch (author) {
38
+ case "supervisor":
39
+ case "ai":
40
+ case "model":
41
+ return "model";
42
+ case "system":
43
+ return "system";
44
+ case "human":
45
+ return "user";
46
+ case "tool":
47
+ case "function":
48
+ return "function";
49
+ default:
50
+ throw new Error(`Unknown / unsupported author: ${author}`);
51
+ }
52
+ }
53
+ function messageContentMedia(content) {
54
+ if ("mimeType" in content && "data" in content) return { inlineData: {
55
+ mimeType: content.mimeType,
56
+ data: content.data
57
+ } };
58
+ if ("mimeType" in content && "fileUri" in content) return { fileData: {
59
+ mimeType: content.mimeType,
60
+ fileUri: content.fileUri
61
+ } };
62
+ throw new Error("Invalid media content");
63
+ }
64
+ function inferToolNameFromPreviousMessages(message, previousMessages) {
65
+ return previousMessages.map((msg) => {
66
+ if (isAIMessage(msg)) return msg.tool_calls ?? [];
67
+ return [];
68
+ }).flat().find((toolCall) => {
69
+ return toolCall.id === message.tool_call_id;
70
+ })?.name;
71
+ }
72
+ function _getStandardContentBlockConverter(isMultimodalModel) {
73
+ const standardContentBlockConverter = {
74
+ providerName: "Google Gemini",
75
+ fromStandardTextBlock(block) {
76
+ return { text: block.text };
77
+ },
78
+ fromStandardImageBlock(block) {
79
+ if (!isMultimodalModel) throw new Error("This model does not support images");
80
+ if (block.source_type === "url") {
81
+ const data = parseBase64DataUrl({ dataUrl: block.url });
82
+ if (data) return { inlineData: {
83
+ mimeType: data.mime_type,
84
+ data: data.data
85
+ } };
86
+ else return { fileData: {
87
+ mimeType: block.mime_type ?? "",
88
+ fileUri: block.url
89
+ } };
90
+ }
91
+ if (block.source_type === "base64") return { inlineData: {
92
+ mimeType: block.mime_type ?? "",
93
+ data: block.data
94
+ } };
95
+ throw new Error(`Unsupported source type: ${block.source_type}`);
96
+ },
97
+ fromStandardAudioBlock(block) {
98
+ if (!isMultimodalModel) throw new Error("This model does not support audio");
99
+ if (block.source_type === "url") {
100
+ const data = parseBase64DataUrl({ dataUrl: block.url });
101
+ if (data) return { inlineData: {
102
+ mimeType: data.mime_type,
103
+ data: data.data
104
+ } };
105
+ else return { fileData: {
106
+ mimeType: block.mime_type ?? "",
107
+ fileUri: block.url
108
+ } };
109
+ }
110
+ if (block.source_type === "base64") return { inlineData: {
111
+ mimeType: block.mime_type ?? "",
112
+ data: block.data
113
+ } };
114
+ throw new Error(`Unsupported source type: ${block.source_type}`);
115
+ },
116
+ fromStandardFileBlock(block) {
117
+ if (!isMultimodalModel) throw new Error("This model does not support files");
118
+ if (block.source_type === "text") return { text: block.text };
119
+ if (block.source_type === "url") {
120
+ const data = parseBase64DataUrl({ dataUrl: block.url });
121
+ if (data) return { inlineData: {
122
+ mimeType: data.mime_type,
123
+ data: data.data
124
+ } };
125
+ else return { fileData: {
126
+ mimeType: block.mime_type ?? "",
127
+ fileUri: block.url
128
+ } };
129
+ }
130
+ if (block.source_type === "base64") return { inlineData: {
131
+ mimeType: block.mime_type ?? "",
132
+ data: block.data
133
+ } };
134
+ throw new Error(`Unsupported source type: ${block.source_type}`);
135
+ }
136
+ };
137
+ return standardContentBlockConverter;
138
+ }
139
+ function _convertLangChainContentToPart(content, isMultimodalModel) {
140
+ if (isDataContentBlock(content)) return convertToProviderContentBlock(content, _getStandardContentBlockConverter(isMultimodalModel));
141
+ if (content.type === "text") return { text: content.text };
142
+ else if (content.type === "executableCode") return { executableCode: content.executableCode };
143
+ else if (content.type === "codeExecutionResult") return { codeExecutionResult: content.codeExecutionResult };
144
+ else if (content.type === "image_url") {
145
+ if (!isMultimodalModel) throw new Error(`This model does not support images`);
146
+ let source;
147
+ if (typeof content.image_url === "string") source = content.image_url;
148
+ else if (typeof content.image_url === "object" && "url" in content.image_url) source = content.image_url.url;
149
+ else throw new Error("Please provide image as base64 encoded data URL");
150
+ const [dm, data] = source.split(",");
151
+ if (!dm.startsWith("data:")) throw new Error("Please provide image as base64 encoded data URL");
152
+ const [mimeType, encoding] = dm.replace(/^data:/, "").split(";");
153
+ if (encoding !== "base64") throw new Error("Please provide image as base64 encoded data URL");
154
+ return { inlineData: {
155
+ data,
156
+ mimeType
157
+ } };
158
+ } else if (content.type === "media") return messageContentMedia(content);
159
+ else if (content.type === "tool_use") return { functionCall: {
160
+ name: content.name,
161
+ args: content.input
162
+ } };
163
+ else if (content.type?.includes("/") && content.type.split("/").length === 2 && "data" in content && typeof content.data === "string") return { inlineData: {
164
+ mimeType: content.type,
165
+ data: content.data
166
+ } };
167
+ else if ("functionCall" in content) return void 0;
168
+ else if ("type" in content) throw new Error(`Unknown content type ${content.type}`);
169
+ else throw new Error(`Unknown content ${JSON.stringify(content)}`);
170
+ }
171
+ function convertMessageContentToParts(message, isMultimodalModel, previousMessages) {
172
+ if (isToolMessage(message)) {
173
+ const messageName = message.name ?? inferToolNameFromPreviousMessages(message, previousMessages);
174
+ if (messageName === void 0) throw new Error(`Google requires a tool name for each tool call response, and we could not infer a called tool name for ToolMessage "${message.id}" from your passed messages. Please populate a "name" field on that ToolMessage explicitly.`);
175
+ const result = Array.isArray(message.content) ? message.content.map((c) => _convertLangChainContentToPart(c, isMultimodalModel)).filter((p) => p !== void 0) : message.content;
176
+ if (message.status === "error") return [{ functionResponse: {
177
+ name: messageName,
178
+ response: { error: { details: result } }
179
+ } }];
180
+ return [{ functionResponse: {
181
+ name: messageName,
182
+ response: { result }
183
+ } }];
184
+ }
185
+ let functionCalls = [];
186
+ const messageParts = [];
187
+ if (typeof message.content === "string" && message.content) messageParts.push({ text: message.content });
188
+ if (Array.isArray(message.content)) messageParts.push(...message.content.map((c) => _convertLangChainContentToPart(c, isMultimodalModel)).filter((p) => p !== void 0));
189
+ if (isAIMessage(message) && message.tool_calls?.length) functionCalls = message.tool_calls.map((tc) => {
190
+ return { functionCall: {
191
+ name: tc.name,
192
+ args: tc.args
193
+ } };
194
+ });
195
+ return [...messageParts, ...functionCalls];
196
+ }
197
+ function convertBaseMessagesToContent(messages, isMultimodalModel, convertSystemMessageToHumanContent = false) {
198
+ return messages.reduce((acc, message, index) => {
199
+ if (!isBaseMessage(message)) throw new Error("Unsupported message input");
200
+ const author = getMessageAuthor(message);
201
+ if (author === "system" && index !== 0) throw new Error("System message should be the first one");
202
+ const role = convertAuthorToRole(author);
203
+ const prevContent = acc.content[acc.content.length];
204
+ if (!acc.mergeWithPreviousContent && prevContent && prevContent.role === role) throw new Error("Google Generative AI requires alternate messages between authors");
205
+ const parts = convertMessageContentToParts(message, isMultimodalModel, messages.slice(0, index));
206
+ if (acc.mergeWithPreviousContent) {
207
+ const prevContent$1 = acc.content[acc.content.length - 1];
208
+ if (!prevContent$1) throw new Error("There was a problem parsing your system message. Please try a prompt without one.");
209
+ prevContent$1.parts.push(...parts);
210
+ return {
211
+ mergeWithPreviousContent: false,
212
+ content: acc.content
213
+ };
214
+ }
215
+ let actualRole = role;
216
+ if (actualRole === "function" || actualRole === "system" && !convertSystemMessageToHumanContent) actualRole = "user";
217
+ const content = {
218
+ role: actualRole,
219
+ parts
220
+ };
221
+ return {
222
+ mergeWithPreviousContent: author === "system" && !convertSystemMessageToHumanContent,
223
+ content: [...acc.content, content]
224
+ };
225
+ }, {
226
+ content: [],
227
+ mergeWithPreviousContent: false
228
+ }).content;
229
+ }
230
+ function mapGenerateContentResultToChatResult(response, extra) {
231
+ if (!response.candidates || response.candidates.length === 0 || !response.candidates[0]) return {
232
+ generations: [],
233
+ llmOutput: { filters: response.promptFeedback }
234
+ };
235
+ const functionCalls = response.functionCalls();
236
+ const [candidate] = response.candidates;
237
+ const { content: candidateContent, ...generationInfo } = candidate;
238
+ let content;
239
+ if (Array.isArray(candidateContent?.parts) && candidateContent.parts.length === 1 && candidateContent.parts[0].text) content = candidateContent.parts[0].text;
240
+ else if (Array.isArray(candidateContent?.parts) && candidateContent.parts.length > 0) content = candidateContent.parts.map((p) => {
241
+ if ("text" in p) return {
242
+ type: "text",
243
+ text: p.text
244
+ };
245
+ else if ("inlineData" in p) return {
246
+ type: "inlineData",
247
+ inlineData: p.inlineData
248
+ };
249
+ else if ("functionCall" in p) return {
250
+ type: "functionCall",
251
+ functionCall: p.functionCall
252
+ };
253
+ else if ("functionResponse" in p) return {
254
+ type: "functionResponse",
255
+ functionResponse: p.functionResponse
256
+ };
257
+ else if ("fileData" in p) return {
258
+ type: "fileData",
259
+ fileData: p.fileData
260
+ };
261
+ else if ("executableCode" in p) return {
262
+ type: "executableCode",
263
+ executableCode: p.executableCode
264
+ };
265
+ else if ("codeExecutionResult" in p) return {
266
+ type: "codeExecutionResult",
267
+ codeExecutionResult: p.codeExecutionResult
268
+ };
269
+ return p;
270
+ });
271
+ else content = [];
272
+ let text = "";
273
+ if (typeof content === "string") text = content;
274
+ else if (Array.isArray(content) && content.length > 0) {
275
+ const block = content.find((b) => "text" in b);
276
+ text = block?.text ?? text;
277
+ }
278
+ const generation = {
279
+ text,
280
+ message: new AIMessage({
281
+ content: content ?? "",
282
+ tool_calls: functionCalls?.map((fc) => {
283
+ return {
284
+ ...fc,
285
+ type: "tool_call",
286
+ id: "id" in fc && typeof fc.id === "string" ? fc.id : v4()
287
+ };
288
+ }),
289
+ additional_kwargs: { ...generationInfo },
290
+ usage_metadata: extra?.usageMetadata
291
+ }),
292
+ generationInfo
293
+ };
294
+ return {
295
+ generations: [generation],
296
+ llmOutput: { tokenUsage: {
297
+ promptTokens: extra?.usageMetadata?.input_tokens,
298
+ completionTokens: extra?.usageMetadata?.output_tokens,
299
+ totalTokens: extra?.usageMetadata?.total_tokens
300
+ } }
301
+ };
302
+ }
303
+ function convertResponseContentToChatGenerationChunk(response, extra) {
304
+ if (!response.candidates || response.candidates.length === 0) return null;
305
+ const functionCalls = response.functionCalls();
306
+ const [candidate] = response.candidates;
307
+ const { content: candidateContent, ...generationInfo } = candidate;
308
+ let content;
309
+ if (Array.isArray(candidateContent?.parts) && candidateContent.parts.every((p) => "text" in p)) content = candidateContent.parts.map((p) => p.text).join("");
310
+ else if (Array.isArray(candidateContent?.parts)) content = candidateContent.parts.map((p) => {
311
+ if ("text" in p) return {
312
+ type: "text",
313
+ text: p.text
314
+ };
315
+ else if ("inlineData" in p) return {
316
+ type: "inlineData",
317
+ inlineData: p.inlineData
318
+ };
319
+ else if ("functionCall" in p) return {
320
+ type: "functionCall",
321
+ functionCall: p.functionCall
322
+ };
323
+ else if ("functionResponse" in p) return {
324
+ type: "functionResponse",
325
+ functionResponse: p.functionResponse
326
+ };
327
+ else if ("fileData" in p) return {
328
+ type: "fileData",
329
+ fileData: p.fileData
330
+ };
331
+ else if ("executableCode" in p) return {
332
+ type: "executableCode",
333
+ executableCode: p.executableCode
334
+ };
335
+ else if ("codeExecutionResult" in p) return {
336
+ type: "codeExecutionResult",
337
+ codeExecutionResult: p.codeExecutionResult
338
+ };
339
+ return p;
340
+ });
341
+ else content = [];
342
+ let text = "";
343
+ if (content && typeof content === "string") text = content;
344
+ else if (Array.isArray(content)) {
345
+ const block = content.find((b) => "text" in b);
346
+ text = block?.text ?? "";
347
+ }
348
+ const toolCallChunks = [];
349
+ if (functionCalls) toolCallChunks.push(...functionCalls.map((fc) => ({
350
+ ...fc,
351
+ args: JSON.stringify(fc.args),
352
+ index: extra.index,
353
+ type: "tool_call_chunk",
354
+ id: "id" in fc && typeof fc.id === "string" ? fc.id : v4()
355
+ })));
356
+ return new ChatGenerationChunk({
357
+ text,
358
+ message: new AIMessageChunk({
359
+ content: content || "",
360
+ name: !candidateContent ? void 0 : candidateContent.role,
361
+ tool_call_chunks: toolCallChunks,
362
+ additional_kwargs: {},
363
+ response_metadata: { model_provider: "google-genai" },
364
+ usage_metadata: extra.usageMetadata
365
+ }),
366
+ generationInfo
367
+ });
368
+ }
369
+ function convertToGenerativeAITools(tools) {
370
+ if (tools.every((tool) => "functionDeclarations" in tool && Array.isArray(tool.functionDeclarations))) return tools;
371
+ return [{ functionDeclarations: tools.map((tool) => {
372
+ if (isLangChainTool(tool)) {
373
+ const jsonSchema = schemaToGenerativeAIParameters(tool.schema);
374
+ if (jsonSchema.type === "object" && "properties" in jsonSchema && Object.keys(jsonSchema.properties).length === 0) return {
375
+ name: tool.name,
376
+ description: tool.description
377
+ };
378
+ return {
379
+ name: tool.name,
380
+ description: tool.description,
381
+ parameters: jsonSchema
382
+ };
383
+ }
384
+ if (isOpenAITool(tool)) return {
385
+ name: tool.function.name,
386
+ description: tool.function.description ?? `A function available to call.`,
387
+ parameters: jsonSchemaToGeminiParameters(tool.function.parameters)
388
+ };
389
+ return tool;
390
+ }) }];
391
+ }
392
+ var GoogleGenerativeAIToolsOutputParser = class extends BaseLLMOutputParser {
393
+ static lc_name() {
394
+ return "GoogleGenerativeAIToolsOutputParser";
395
+ }
396
+ lc_namespace = [
397
+ "langchain",
398
+ "google_genai",
399
+ "output_parsers"
400
+ ];
401
+ returnId = false;
402
+ /** The type of tool calls to return. */
403
+ keyName;
404
+ /** Whether to return only the first tool call. */
405
+ returnSingle = false;
406
+ zodSchema;
407
+ constructor(params) {
408
+ super(params);
409
+ this.keyName = params.keyName;
410
+ this.returnSingle = params.returnSingle ?? this.returnSingle;
411
+ this.zodSchema = params.zodSchema;
412
+ }
413
+ async _validateResult(result) {
414
+ if (this.zodSchema === void 0) return result;
415
+ const zodParsedResult = await interopSafeParseAsync(this.zodSchema, result);
416
+ if (zodParsedResult.success) return zodParsedResult.data;
417
+ else throw new OutputParserException(`Failed to parse. Text: "${JSON.stringify(result, null, 2)}". Error: ${JSON.stringify(zodParsedResult.error.issues)}`, JSON.stringify(result, null, 2));
418
+ }
419
+ async parseResult(generations) {
420
+ const tools = generations.flatMap((generation) => {
421
+ const { message } = generation;
422
+ if (!("tool_calls" in message) || !Array.isArray(message.tool_calls)) return [];
423
+ return message.tool_calls;
424
+ });
425
+ if (tools[0] === void 0) throw new Error("No parseable tool calls provided to GoogleGenerativeAIToolsOutputParser.");
426
+ const [tool] = tools;
427
+ const validatedResult = await this._validateResult(tool.args);
428
+ return validatedResult;
429
+ }
430
+ };
431
+ var SchemaType;
432
+ (function(SchemaType2) {
433
+ SchemaType2["STRING"] = "string";
434
+ SchemaType2["NUMBER"] = "number";
435
+ SchemaType2["INTEGER"] = "integer";
436
+ SchemaType2["BOOLEAN"] = "boolean";
437
+ SchemaType2["ARRAY"] = "array";
438
+ SchemaType2["OBJECT"] = "object";
439
+ })(SchemaType || (SchemaType = {}));
440
+ var ExecutableCodeLanguage;
441
+ (function(ExecutableCodeLanguage2) {
442
+ ExecutableCodeLanguage2["LANGUAGE_UNSPECIFIED"] = "language_unspecified";
443
+ ExecutableCodeLanguage2["PYTHON"] = "python";
444
+ })(ExecutableCodeLanguage || (ExecutableCodeLanguage = {}));
445
+ var Outcome;
446
+ (function(Outcome2) {
447
+ Outcome2["OUTCOME_UNSPECIFIED"] = "outcome_unspecified";
448
+ Outcome2["OUTCOME_OK"] = "outcome_ok";
449
+ Outcome2["OUTCOME_FAILED"] = "outcome_failed";
450
+ Outcome2["OUTCOME_DEADLINE_EXCEEDED"] = "outcome_deadline_exceeded";
451
+ })(Outcome || (Outcome = {}));
452
+ const POSSIBLE_ROLES = ["user", "model", "function", "system"];
453
+ var HarmCategory;
454
+ (function(HarmCategory2) {
455
+ HarmCategory2["HARM_CATEGORY_UNSPECIFIED"] = "HARM_CATEGORY_UNSPECIFIED";
456
+ HarmCategory2["HARM_CATEGORY_HATE_SPEECH"] = "HARM_CATEGORY_HATE_SPEECH";
457
+ HarmCategory2["HARM_CATEGORY_SEXUALLY_EXPLICIT"] = "HARM_CATEGORY_SEXUALLY_EXPLICIT";
458
+ HarmCategory2["HARM_CATEGORY_HARASSMENT"] = "HARM_CATEGORY_HARASSMENT";
459
+ HarmCategory2["HARM_CATEGORY_DANGEROUS_CONTENT"] = "HARM_CATEGORY_DANGEROUS_CONTENT";
460
+ HarmCategory2["HARM_CATEGORY_CIVIC_INTEGRITY"] = "HARM_CATEGORY_CIVIC_INTEGRITY";
461
+ })(HarmCategory || (HarmCategory = {}));
462
+ var HarmBlockThreshold;
463
+ (function(HarmBlockThreshold2) {
464
+ HarmBlockThreshold2["HARM_BLOCK_THRESHOLD_UNSPECIFIED"] = "HARM_BLOCK_THRESHOLD_UNSPECIFIED";
465
+ HarmBlockThreshold2["BLOCK_LOW_AND_ABOVE"] = "BLOCK_LOW_AND_ABOVE";
466
+ HarmBlockThreshold2["BLOCK_MEDIUM_AND_ABOVE"] = "BLOCK_MEDIUM_AND_ABOVE";
467
+ HarmBlockThreshold2["BLOCK_ONLY_HIGH"] = "BLOCK_ONLY_HIGH";
468
+ HarmBlockThreshold2["BLOCK_NONE"] = "BLOCK_NONE";
469
+ })(HarmBlockThreshold || (HarmBlockThreshold = {}));
470
+ var HarmProbability;
471
+ (function(HarmProbability2) {
472
+ HarmProbability2["HARM_PROBABILITY_UNSPECIFIED"] = "HARM_PROBABILITY_UNSPECIFIED";
473
+ HarmProbability2["NEGLIGIBLE"] = "NEGLIGIBLE";
474
+ HarmProbability2["LOW"] = "LOW";
475
+ HarmProbability2["MEDIUM"] = "MEDIUM";
476
+ HarmProbability2["HIGH"] = "HIGH";
477
+ })(HarmProbability || (HarmProbability = {}));
478
+ var BlockReason;
479
+ (function(BlockReason2) {
480
+ BlockReason2["BLOCKED_REASON_UNSPECIFIED"] = "BLOCKED_REASON_UNSPECIFIED";
481
+ BlockReason2["SAFETY"] = "SAFETY";
482
+ BlockReason2["OTHER"] = "OTHER";
483
+ })(BlockReason || (BlockReason = {}));
484
+ var FinishReason;
485
+ (function(FinishReason2) {
486
+ FinishReason2["FINISH_REASON_UNSPECIFIED"] = "FINISH_REASON_UNSPECIFIED";
487
+ FinishReason2["STOP"] = "STOP";
488
+ FinishReason2["MAX_TOKENS"] = "MAX_TOKENS";
489
+ FinishReason2["SAFETY"] = "SAFETY";
490
+ FinishReason2["RECITATION"] = "RECITATION";
491
+ FinishReason2["LANGUAGE"] = "LANGUAGE";
492
+ FinishReason2["BLOCKLIST"] = "BLOCKLIST";
493
+ FinishReason2["PROHIBITED_CONTENT"] = "PROHIBITED_CONTENT";
494
+ FinishReason2["SPII"] = "SPII";
495
+ FinishReason2["MALFORMED_FUNCTION_CALL"] = "MALFORMED_FUNCTION_CALL";
496
+ FinishReason2["OTHER"] = "OTHER";
497
+ })(FinishReason || (FinishReason = {}));
498
+ var TaskType;
499
+ (function(TaskType2) {
500
+ TaskType2["TASK_TYPE_UNSPECIFIED"] = "TASK_TYPE_UNSPECIFIED";
501
+ TaskType2["RETRIEVAL_QUERY"] = "RETRIEVAL_QUERY";
502
+ TaskType2["RETRIEVAL_DOCUMENT"] = "RETRIEVAL_DOCUMENT";
503
+ TaskType2["SEMANTIC_SIMILARITY"] = "SEMANTIC_SIMILARITY";
504
+ TaskType2["CLASSIFICATION"] = "CLASSIFICATION";
505
+ TaskType2["CLUSTERING"] = "CLUSTERING";
506
+ })(TaskType || (TaskType = {}));
507
+ var FunctionCallingMode;
508
+ (function(FunctionCallingMode2) {
509
+ FunctionCallingMode2["MODE_UNSPECIFIED"] = "MODE_UNSPECIFIED";
510
+ FunctionCallingMode2["AUTO"] = "AUTO";
511
+ FunctionCallingMode2["ANY"] = "ANY";
512
+ FunctionCallingMode2["NONE"] = "NONE";
513
+ })(FunctionCallingMode || (FunctionCallingMode = {}));
514
+ var DynamicRetrievalMode;
515
+ (function(DynamicRetrievalMode2) {
516
+ DynamicRetrievalMode2["MODE_UNSPECIFIED"] = "MODE_UNSPECIFIED";
517
+ DynamicRetrievalMode2["MODE_DYNAMIC"] = "MODE_DYNAMIC";
518
+ })(DynamicRetrievalMode || (DynamicRetrievalMode = {}));
519
+ class GoogleGenerativeAIError extends Error {
520
+ constructor(message) {
521
+ super(`[GoogleGenerativeAI Error]: ${message}`);
522
+ }
523
+ }
524
+ class GoogleGenerativeAIResponseError extends GoogleGenerativeAIError {
525
+ constructor(message, response) {
526
+ super(message);
527
+ this.response = response;
528
+ }
529
+ }
530
+ class GoogleGenerativeAIFetchError extends GoogleGenerativeAIError {
531
+ constructor(message, status, statusText, errorDetails) {
532
+ super(message);
533
+ this.status = status;
534
+ this.statusText = statusText;
535
+ this.errorDetails = errorDetails;
536
+ }
537
+ }
538
+ class GoogleGenerativeAIRequestInputError extends GoogleGenerativeAIError {
539
+ }
540
+ class GoogleGenerativeAIAbortError extends GoogleGenerativeAIError {
541
+ }
542
+ const DEFAULT_BASE_URL = "https://generativelanguage.googleapis.com";
543
+ const DEFAULT_API_VERSION = "v1beta";
544
+ const PACKAGE_VERSION = "0.24.1";
545
+ const PACKAGE_LOG_HEADER = "genai-js";
546
+ var Task;
547
+ (function(Task2) {
548
+ Task2["GENERATE_CONTENT"] = "generateContent";
549
+ Task2["STREAM_GENERATE_CONTENT"] = "streamGenerateContent";
550
+ Task2["COUNT_TOKENS"] = "countTokens";
551
+ Task2["EMBED_CONTENT"] = "embedContent";
552
+ Task2["BATCH_EMBED_CONTENTS"] = "batchEmbedContents";
553
+ })(Task || (Task = {}));
554
+ class RequestUrl {
555
+ constructor(model, task, apiKey, stream, requestOptions) {
556
+ this.model = model;
557
+ this.task = task;
558
+ this.apiKey = apiKey;
559
+ this.stream = stream;
560
+ this.requestOptions = requestOptions;
561
+ }
562
+ toString() {
563
+ var _a, _b;
564
+ const apiVersion = ((_a = this.requestOptions) === null || _a === void 0 ? void 0 : _a.apiVersion) || DEFAULT_API_VERSION;
565
+ const baseUrl = ((_b = this.requestOptions) === null || _b === void 0 ? void 0 : _b.baseUrl) || DEFAULT_BASE_URL;
566
+ let url = `${baseUrl}/${apiVersion}/${this.model}:${this.task}`;
567
+ if (this.stream) {
568
+ url += "?alt=sse";
569
+ }
570
+ return url;
571
+ }
572
+ }
573
+ function getClientHeaders(requestOptions) {
574
+ const clientHeaders = [];
575
+ if (requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.apiClient) {
576
+ clientHeaders.push(requestOptions.apiClient);
577
+ }
578
+ clientHeaders.push(`${PACKAGE_LOG_HEADER}/${PACKAGE_VERSION}`);
579
+ return clientHeaders.join(" ");
580
+ }
581
+ async function getHeaders(url) {
582
+ var _a;
583
+ const headers = new Headers();
584
+ headers.append("Content-Type", "application/json");
585
+ headers.append("x-goog-api-client", getClientHeaders(url.requestOptions));
586
+ headers.append("x-goog-api-key", url.apiKey);
587
+ let customHeaders = (_a = url.requestOptions) === null || _a === void 0 ? void 0 : _a.customHeaders;
588
+ if (customHeaders) {
589
+ if (!(customHeaders instanceof Headers)) {
590
+ try {
591
+ customHeaders = new Headers(customHeaders);
592
+ } catch (e) {
593
+ throw new GoogleGenerativeAIRequestInputError(`unable to convert customHeaders value ${JSON.stringify(customHeaders)} to Headers: ${e.message}`);
594
+ }
595
+ }
596
+ for (const [headerName, headerValue] of customHeaders.entries()) {
597
+ if (headerName === "x-goog-api-key") {
598
+ throw new GoogleGenerativeAIRequestInputError(`Cannot set reserved header name ${headerName}`);
599
+ } else if (headerName === "x-goog-api-client") {
600
+ throw new GoogleGenerativeAIRequestInputError(`Header name ${headerName} can only be set using the apiClient field`);
601
+ }
602
+ headers.append(headerName, headerValue);
603
+ }
604
+ }
605
+ return headers;
606
+ }
607
+ async function constructModelRequest(model, task, apiKey, stream, body, requestOptions) {
608
+ const url = new RequestUrl(model, task, apiKey, stream, requestOptions);
609
+ return {
610
+ url: url.toString(),
611
+ fetchOptions: Object.assign(Object.assign({}, buildFetchOptions(requestOptions)), { method: "POST", headers: await getHeaders(url), body })
612
+ };
613
+ }
614
+ async function makeModelRequest(model, task, apiKey, stream, body, requestOptions = {}, fetchFn = fetch) {
615
+ const { url, fetchOptions } = await constructModelRequest(model, task, apiKey, stream, body, requestOptions);
616
+ return makeRequest(url, fetchOptions, fetchFn);
617
+ }
618
+ async function makeRequest(url, fetchOptions, fetchFn = fetch) {
619
+ let response;
620
+ try {
621
+ response = await fetchFn(url, fetchOptions);
622
+ } catch (e) {
623
+ handleResponseError(e, url);
624
+ }
625
+ if (!response.ok) {
626
+ await handleResponseNotOk(response, url);
627
+ }
628
+ return response;
629
+ }
630
+ function handleResponseError(e, url) {
631
+ let err = e;
632
+ if (err.name === "AbortError") {
633
+ err = new GoogleGenerativeAIAbortError(`Request aborted when fetching ${url.toString()}: ${e.message}`);
634
+ err.stack = e.stack;
635
+ } else if (!(e instanceof GoogleGenerativeAIFetchError || e instanceof GoogleGenerativeAIRequestInputError)) {
636
+ err = new GoogleGenerativeAIError(`Error fetching from ${url.toString()}: ${e.message}`);
637
+ err.stack = e.stack;
638
+ }
639
+ throw err;
640
+ }
641
+ async function handleResponseNotOk(response, url) {
642
+ let message = "";
643
+ let errorDetails;
644
+ try {
645
+ const json = await response.json();
646
+ message = json.error.message;
647
+ if (json.error.details) {
648
+ message += ` ${JSON.stringify(json.error.details)}`;
649
+ errorDetails = json.error.details;
650
+ }
651
+ } catch (e) {
652
+ }
653
+ throw new GoogleGenerativeAIFetchError(`Error fetching from ${url.toString()}: [${response.status} ${response.statusText}] ${message}`, response.status, response.statusText, errorDetails);
654
+ }
655
+ function buildFetchOptions(requestOptions) {
656
+ const fetchOptions = {};
657
+ if ((requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.signal) !== void 0 || (requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.timeout) >= 0) {
658
+ const controller = new AbortController();
659
+ if ((requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.timeout) >= 0) {
660
+ setTimeout(() => controller.abort(), requestOptions.timeout);
661
+ }
662
+ if (requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.signal) {
663
+ requestOptions.signal.addEventListener("abort", () => {
664
+ controller.abort();
665
+ });
666
+ }
667
+ fetchOptions.signal = controller.signal;
668
+ }
669
+ return fetchOptions;
670
+ }
671
+ function addHelpers(response) {
672
+ response.text = () => {
673
+ if (response.candidates && response.candidates.length > 0) {
674
+ if (response.candidates.length > 1) {
675
+ console.warn(`This response had ${response.candidates.length} candidates. Returning text from the first candidate only. Access response.candidates directly to use the other candidates.`);
676
+ }
677
+ if (hadBadFinishReason(response.candidates[0])) {
678
+ throw new GoogleGenerativeAIResponseError(`${formatBlockErrorMessage(response)}`, response);
679
+ }
680
+ return getText(response);
681
+ } else if (response.promptFeedback) {
682
+ throw new GoogleGenerativeAIResponseError(`Text not available. ${formatBlockErrorMessage(response)}`, response);
683
+ }
684
+ return "";
685
+ };
686
+ response.functionCall = () => {
687
+ if (response.candidates && response.candidates.length > 0) {
688
+ if (response.candidates.length > 1) {
689
+ console.warn(`This response had ${response.candidates.length} candidates. Returning function calls from the first candidate only. Access response.candidates directly to use the other candidates.`);
690
+ }
691
+ if (hadBadFinishReason(response.candidates[0])) {
692
+ throw new GoogleGenerativeAIResponseError(`${formatBlockErrorMessage(response)}`, response);
693
+ }
694
+ console.warn(`response.functionCall() is deprecated. Use response.functionCalls() instead.`);
695
+ return getFunctionCalls(response)[0];
696
+ } else if (response.promptFeedback) {
697
+ throw new GoogleGenerativeAIResponseError(`Function call not available. ${formatBlockErrorMessage(response)}`, response);
698
+ }
699
+ return void 0;
700
+ };
701
+ response.functionCalls = () => {
702
+ if (response.candidates && response.candidates.length > 0) {
703
+ if (response.candidates.length > 1) {
704
+ console.warn(`This response had ${response.candidates.length} candidates. Returning function calls from the first candidate only. Access response.candidates directly to use the other candidates.`);
705
+ }
706
+ if (hadBadFinishReason(response.candidates[0])) {
707
+ throw new GoogleGenerativeAIResponseError(`${formatBlockErrorMessage(response)}`, response);
708
+ }
709
+ return getFunctionCalls(response);
710
+ } else if (response.promptFeedback) {
711
+ throw new GoogleGenerativeAIResponseError(`Function call not available. ${formatBlockErrorMessage(response)}`, response);
712
+ }
713
+ return void 0;
714
+ };
715
+ return response;
716
+ }
717
+ function getText(response) {
718
+ var _a, _b, _c, _d;
719
+ const textStrings = [];
720
+ if ((_b = (_a = response.candidates) === null || _a === void 0 ? void 0 : _a[0].content) === null || _b === void 0 ? void 0 : _b.parts) {
721
+ for (const part of (_d = (_c = response.candidates) === null || _c === void 0 ? void 0 : _c[0].content) === null || _d === void 0 ? void 0 : _d.parts) {
722
+ if (part.text) {
723
+ textStrings.push(part.text);
724
+ }
725
+ if (part.executableCode) {
726
+ textStrings.push("\n```" + part.executableCode.language + "\n" + part.executableCode.code + "\n```\n");
727
+ }
728
+ if (part.codeExecutionResult) {
729
+ textStrings.push("\n```\n" + part.codeExecutionResult.output + "\n```\n");
730
+ }
731
+ }
732
+ }
733
+ if (textStrings.length > 0) {
734
+ return textStrings.join("");
735
+ } else {
736
+ return "";
737
+ }
738
+ }
739
+ function getFunctionCalls(response) {
740
+ var _a, _b, _c, _d;
741
+ const functionCalls = [];
742
+ if ((_b = (_a = response.candidates) === null || _a === void 0 ? void 0 : _a[0].content) === null || _b === void 0 ? void 0 : _b.parts) {
743
+ for (const part of (_d = (_c = response.candidates) === null || _c === void 0 ? void 0 : _c[0].content) === null || _d === void 0 ? void 0 : _d.parts) {
744
+ if (part.functionCall) {
745
+ functionCalls.push(part.functionCall);
746
+ }
747
+ }
748
+ }
749
+ if (functionCalls.length > 0) {
750
+ return functionCalls;
751
+ } else {
752
+ return void 0;
753
+ }
754
+ }
755
+ const badFinishReasons = [
756
+ FinishReason.RECITATION,
757
+ FinishReason.SAFETY,
758
+ FinishReason.LANGUAGE
759
+ ];
760
+ function hadBadFinishReason(candidate) {
761
+ return !!candidate.finishReason && badFinishReasons.includes(candidate.finishReason);
762
+ }
763
+ function formatBlockErrorMessage(response) {
764
+ var _a, _b, _c;
765
+ let message = "";
766
+ if ((!response.candidates || response.candidates.length === 0) && response.promptFeedback) {
767
+ message += "Response was blocked";
768
+ if ((_a = response.promptFeedback) === null || _a === void 0 ? void 0 : _a.blockReason) {
769
+ message += ` due to ${response.promptFeedback.blockReason}`;
770
+ }
771
+ if ((_b = response.promptFeedback) === null || _b === void 0 ? void 0 : _b.blockReasonMessage) {
772
+ message += `: ${response.promptFeedback.blockReasonMessage}`;
773
+ }
774
+ } else if ((_c = response.candidates) === null || _c === void 0 ? void 0 : _c[0]) {
775
+ const firstCandidate = response.candidates[0];
776
+ if (hadBadFinishReason(firstCandidate)) {
777
+ message += `Candidate was blocked due to ${firstCandidate.finishReason}`;
778
+ if (firstCandidate.finishMessage) {
779
+ message += `: ${firstCandidate.finishMessage}`;
780
+ }
781
+ }
782
+ }
783
+ return message;
784
+ }
785
+ function __await(v) {
786
+ return this instanceof __await ? (this.v = v, this) : new __await(v);
787
+ }
788
+ function __asyncGenerator(thisArg, _arguments, generator) {
789
+ if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
790
+ var g = generator.apply(thisArg, _arguments || []), i, q = [];
791
+ return i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function() {
792
+ return this;
793
+ }, i;
794
+ function verb(n) {
795
+ if (g[n]) i[n] = function(v) {
796
+ return new Promise(function(a, b) {
797
+ q.push([n, v, a, b]) > 1 || resume(n, v);
798
+ });
799
+ };
800
+ }
801
+ function resume(n, v) {
802
+ try {
803
+ step(g[n](v));
804
+ } catch (e) {
805
+ settle(q[0][3], e);
806
+ }
807
+ }
808
+ function step(r) {
809
+ r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r);
810
+ }
811
+ function fulfill(value) {
812
+ resume("next", value);
813
+ }
814
+ function reject(value) {
815
+ resume("throw", value);
816
+ }
817
+ function settle(f, v) {
818
+ if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]);
819
+ }
820
+ }
821
+ typeof SuppressedError === "function" ? SuppressedError : function(error, suppressed, message) {
822
+ var e = new Error(message);
823
+ return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
824
+ };
825
+ const responseLineRE = /^data\: (.*)(?:\n\n|\r\r|\r\n\r\n)/;
826
+ function processStream(response) {
827
+ const inputStream = response.body.pipeThrough(new TextDecoderStream("utf8", { fatal: true }));
828
+ const responseStream = getResponseStream(inputStream);
829
+ const [stream1, stream2] = responseStream.tee();
830
+ return {
831
+ stream: generateResponseSequence(stream1),
832
+ response: getResponsePromise(stream2)
833
+ };
834
+ }
835
+ async function getResponsePromise(stream) {
836
+ const allResponses = [];
837
+ const reader = stream.getReader();
838
+ while (true) {
839
+ const { done, value } = await reader.read();
840
+ if (done) {
841
+ return addHelpers(aggregateResponses(allResponses));
842
+ }
843
+ allResponses.push(value);
844
+ }
845
+ }
846
+ function generateResponseSequence(stream) {
847
+ return __asyncGenerator(this, arguments, function* generateResponseSequence_1() {
848
+ const reader = stream.getReader();
849
+ while (true) {
850
+ const { value, done } = yield __await(reader.read());
851
+ if (done) {
852
+ break;
853
+ }
854
+ yield yield __await(addHelpers(value));
855
+ }
856
+ });
857
+ }
858
+ function getResponseStream(inputStream) {
859
+ const reader = inputStream.getReader();
860
+ const stream = new ReadableStream({
861
+ start(controller) {
862
+ let currentText = "";
863
+ return pump();
864
+ function pump() {
865
+ return reader.read().then(({ value, done }) => {
866
+ if (done) {
867
+ if (currentText.trim()) {
868
+ controller.error(new GoogleGenerativeAIError("Failed to parse stream"));
869
+ return;
870
+ }
871
+ controller.close();
872
+ return;
873
+ }
874
+ currentText += value;
875
+ let match = currentText.match(responseLineRE);
876
+ let parsedResponse;
877
+ while (match) {
878
+ try {
879
+ parsedResponse = JSON.parse(match[1]);
880
+ } catch (e) {
881
+ controller.error(new GoogleGenerativeAIError(`Error parsing JSON response: "${match[1]}"`));
882
+ return;
883
+ }
884
+ controller.enqueue(parsedResponse);
885
+ currentText = currentText.substring(match[0].length);
886
+ match = currentText.match(responseLineRE);
887
+ }
888
+ return pump();
889
+ }).catch((e) => {
890
+ let err = e;
891
+ err.stack = e.stack;
892
+ if (err.name === "AbortError") {
893
+ err = new GoogleGenerativeAIAbortError("Request aborted when reading from the stream");
894
+ } else {
895
+ err = new GoogleGenerativeAIError("Error reading from the stream");
896
+ }
897
+ throw err;
898
+ });
899
+ }
900
+ }
901
+ });
902
+ return stream;
903
+ }
904
+ function aggregateResponses(responses) {
905
+ const lastResponse = responses[responses.length - 1];
906
+ const aggregatedResponse = {
907
+ promptFeedback: lastResponse === null || lastResponse === void 0 ? void 0 : lastResponse.promptFeedback
908
+ };
909
+ for (const response of responses) {
910
+ if (response.candidates) {
911
+ let candidateIndex = 0;
912
+ for (const candidate of response.candidates) {
913
+ if (!aggregatedResponse.candidates) {
914
+ aggregatedResponse.candidates = [];
915
+ }
916
+ if (!aggregatedResponse.candidates[candidateIndex]) {
917
+ aggregatedResponse.candidates[candidateIndex] = {
918
+ index: candidateIndex
919
+ };
920
+ }
921
+ aggregatedResponse.candidates[candidateIndex].citationMetadata = candidate.citationMetadata;
922
+ aggregatedResponse.candidates[candidateIndex].groundingMetadata = candidate.groundingMetadata;
923
+ aggregatedResponse.candidates[candidateIndex].finishReason = candidate.finishReason;
924
+ aggregatedResponse.candidates[candidateIndex].finishMessage = candidate.finishMessage;
925
+ aggregatedResponse.candidates[candidateIndex].safetyRatings = candidate.safetyRatings;
926
+ if (candidate.content && candidate.content.parts) {
927
+ if (!aggregatedResponse.candidates[candidateIndex].content) {
928
+ aggregatedResponse.candidates[candidateIndex].content = {
929
+ role: candidate.content.role || "user",
930
+ parts: []
931
+ };
932
+ }
933
+ const newPart = {};
934
+ for (const part of candidate.content.parts) {
935
+ if (part.text) {
936
+ newPart.text = part.text;
937
+ }
938
+ if (part.functionCall) {
939
+ newPart.functionCall = part.functionCall;
940
+ }
941
+ if (part.executableCode) {
942
+ newPart.executableCode = part.executableCode;
943
+ }
944
+ if (part.codeExecutionResult) {
945
+ newPart.codeExecutionResult = part.codeExecutionResult;
946
+ }
947
+ if (Object.keys(newPart).length === 0) {
948
+ newPart.text = "";
949
+ }
950
+ aggregatedResponse.candidates[candidateIndex].content.parts.push(newPart);
951
+ }
952
+ }
953
+ }
954
+ candidateIndex++;
955
+ }
956
+ if (response.usageMetadata) {
957
+ aggregatedResponse.usageMetadata = response.usageMetadata;
958
+ }
959
+ }
960
+ return aggregatedResponse;
961
+ }
962
+ async function generateContentStream(apiKey, model, params, requestOptions) {
963
+ const response = await makeModelRequest(
964
+ model,
965
+ Task.STREAM_GENERATE_CONTENT,
966
+ apiKey,
967
+ /* stream */
968
+ true,
969
+ JSON.stringify(params),
970
+ requestOptions
971
+ );
972
+ return processStream(response);
973
+ }
974
+ async function generateContent(apiKey, model, params, requestOptions) {
975
+ const response = await makeModelRequest(
976
+ model,
977
+ Task.GENERATE_CONTENT,
978
+ apiKey,
979
+ /* stream */
980
+ false,
981
+ JSON.stringify(params),
982
+ requestOptions
983
+ );
984
+ const responseJson = await response.json();
985
+ const enhancedResponse = addHelpers(responseJson);
986
+ return {
987
+ response: enhancedResponse
988
+ };
989
+ }
990
+ function formatSystemInstruction(input) {
991
+ if (input == null) {
992
+ return void 0;
993
+ } else if (typeof input === "string") {
994
+ return { role: "system", parts: [{ text: input }] };
995
+ } else if (input.text) {
996
+ return { role: "system", parts: [input] };
997
+ } else if (input.parts) {
998
+ if (!input.role) {
999
+ return { role: "system", parts: input.parts };
1000
+ } else {
1001
+ return input;
1002
+ }
1003
+ }
1004
+ }
1005
+ function formatNewContent(request) {
1006
+ let newParts = [];
1007
+ if (typeof request === "string") {
1008
+ newParts = [{ text: request }];
1009
+ } else {
1010
+ for (const partOrString of request) {
1011
+ if (typeof partOrString === "string") {
1012
+ newParts.push({ text: partOrString });
1013
+ } else {
1014
+ newParts.push(partOrString);
1015
+ }
1016
+ }
1017
+ }
1018
+ return assignRoleToPartsAndValidateSendMessageRequest(newParts);
1019
+ }
1020
+ function assignRoleToPartsAndValidateSendMessageRequest(parts) {
1021
+ const userContent = { role: "user", parts: [] };
1022
+ const functionContent = { role: "function", parts: [] };
1023
+ let hasUserContent = false;
1024
+ let hasFunctionContent = false;
1025
+ for (const part of parts) {
1026
+ if ("functionResponse" in part) {
1027
+ functionContent.parts.push(part);
1028
+ hasFunctionContent = true;
1029
+ } else {
1030
+ userContent.parts.push(part);
1031
+ hasUserContent = true;
1032
+ }
1033
+ }
1034
+ if (hasUserContent && hasFunctionContent) {
1035
+ throw new GoogleGenerativeAIError("Within a single message, FunctionResponse cannot be mixed with other type of part in the request for sending chat message.");
1036
+ }
1037
+ if (!hasUserContent && !hasFunctionContent) {
1038
+ throw new GoogleGenerativeAIError("No content is provided for sending chat message.");
1039
+ }
1040
+ if (hasUserContent) {
1041
+ return userContent;
1042
+ }
1043
+ return functionContent;
1044
+ }
1045
+ function formatCountTokensInput(params, modelParams) {
1046
+ var _a;
1047
+ let formattedGenerateContentRequest = {
1048
+ model: modelParams === null || modelParams === void 0 ? void 0 : modelParams.model,
1049
+ generationConfig: modelParams === null || modelParams === void 0 ? void 0 : modelParams.generationConfig,
1050
+ safetySettings: modelParams === null || modelParams === void 0 ? void 0 : modelParams.safetySettings,
1051
+ tools: modelParams === null || modelParams === void 0 ? void 0 : modelParams.tools,
1052
+ toolConfig: modelParams === null || modelParams === void 0 ? void 0 : modelParams.toolConfig,
1053
+ systemInstruction: modelParams === null || modelParams === void 0 ? void 0 : modelParams.systemInstruction,
1054
+ cachedContent: (_a = modelParams === null || modelParams === void 0 ? void 0 : modelParams.cachedContent) === null || _a === void 0 ? void 0 : _a.name,
1055
+ contents: []
1056
+ };
1057
+ const containsGenerateContentRequest = params.generateContentRequest != null;
1058
+ if (params.contents) {
1059
+ if (containsGenerateContentRequest) {
1060
+ throw new GoogleGenerativeAIRequestInputError("CountTokensRequest must have one of contents or generateContentRequest, not both.");
1061
+ }
1062
+ formattedGenerateContentRequest.contents = params.contents;
1063
+ } else if (containsGenerateContentRequest) {
1064
+ formattedGenerateContentRequest = Object.assign(Object.assign({}, formattedGenerateContentRequest), params.generateContentRequest);
1065
+ } else {
1066
+ const content = formatNewContent(params);
1067
+ formattedGenerateContentRequest.contents = [content];
1068
+ }
1069
+ return { generateContentRequest: formattedGenerateContentRequest };
1070
+ }
1071
+ function formatGenerateContentInput(params) {
1072
+ let formattedRequest;
1073
+ if (params.contents) {
1074
+ formattedRequest = params;
1075
+ } else {
1076
+ const content = formatNewContent(params);
1077
+ formattedRequest = { contents: [content] };
1078
+ }
1079
+ if (params.systemInstruction) {
1080
+ formattedRequest.systemInstruction = formatSystemInstruction(params.systemInstruction);
1081
+ }
1082
+ return formattedRequest;
1083
+ }
1084
+ function formatEmbedContentInput(params) {
1085
+ if (typeof params === "string" || Array.isArray(params)) {
1086
+ const content = formatNewContent(params);
1087
+ return { content };
1088
+ }
1089
+ return params;
1090
+ }
1091
+ const VALID_PART_FIELDS = [
1092
+ "text",
1093
+ "inlineData",
1094
+ "functionCall",
1095
+ "functionResponse",
1096
+ "executableCode",
1097
+ "codeExecutionResult"
1098
+ ];
1099
+ const VALID_PARTS_PER_ROLE = {
1100
+ user: ["text", "inlineData"],
1101
+ function: ["functionResponse"],
1102
+ model: ["text", "functionCall", "executableCode", "codeExecutionResult"],
1103
+ // System instructions shouldn't be in history anyway.
1104
+ system: ["text"]
1105
+ };
1106
+ function validateChatHistory(history) {
1107
+ let prevContent = false;
1108
+ for (const currContent of history) {
1109
+ const { role, parts } = currContent;
1110
+ if (!prevContent && role !== "user") {
1111
+ throw new GoogleGenerativeAIError(`First content should be with role 'user', got ${role}`);
1112
+ }
1113
+ if (!POSSIBLE_ROLES.includes(role)) {
1114
+ throw new GoogleGenerativeAIError(`Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(POSSIBLE_ROLES)}`);
1115
+ }
1116
+ if (!Array.isArray(parts)) {
1117
+ throw new GoogleGenerativeAIError("Content should have 'parts' property with an array of Parts");
1118
+ }
1119
+ if (parts.length === 0) {
1120
+ throw new GoogleGenerativeAIError("Each Content should have at least one part");
1121
+ }
1122
+ const countFields = {
1123
+ text: 0,
1124
+ inlineData: 0,
1125
+ functionCall: 0,
1126
+ functionResponse: 0,
1127
+ fileData: 0,
1128
+ executableCode: 0,
1129
+ codeExecutionResult: 0
1130
+ };
1131
+ for (const part of parts) {
1132
+ for (const key of VALID_PART_FIELDS) {
1133
+ if (key in part) {
1134
+ countFields[key] += 1;
1135
+ }
1136
+ }
1137
+ }
1138
+ const validParts = VALID_PARTS_PER_ROLE[role];
1139
+ for (const key of VALID_PART_FIELDS) {
1140
+ if (!validParts.includes(key) && countFields[key] > 0) {
1141
+ throw new GoogleGenerativeAIError(`Content with role '${role}' can't contain '${key}' part`);
1142
+ }
1143
+ }
1144
+ prevContent = true;
1145
+ }
1146
+ }
1147
+ function isValidResponse(response) {
1148
+ var _a;
1149
+ if (response.candidates === void 0 || response.candidates.length === 0) {
1150
+ return false;
1151
+ }
1152
+ const content = (_a = response.candidates[0]) === null || _a === void 0 ? void 0 : _a.content;
1153
+ if (content === void 0) {
1154
+ return false;
1155
+ }
1156
+ if (content.parts === void 0 || content.parts.length === 0) {
1157
+ return false;
1158
+ }
1159
+ for (const part of content.parts) {
1160
+ if (part === void 0 || Object.keys(part).length === 0) {
1161
+ return false;
1162
+ }
1163
+ if (part.text !== void 0 && part.text === "") {
1164
+ return false;
1165
+ }
1166
+ }
1167
+ return true;
1168
+ }
1169
+ const SILENT_ERROR = "SILENT_ERROR";
1170
+ class ChatSession {
1171
+ constructor(apiKey, model, params, _requestOptions = {}) {
1172
+ this.model = model;
1173
+ this.params = params;
1174
+ this._requestOptions = _requestOptions;
1175
+ this._history = [];
1176
+ this._sendPromise = Promise.resolve();
1177
+ this._apiKey = apiKey;
1178
+ if (params === null || params === void 0 ? void 0 : params.history) {
1179
+ validateChatHistory(params.history);
1180
+ this._history = params.history;
1181
+ }
1182
+ }
1183
+ /**
1184
+ * Gets the chat history so far. Blocked prompts are not added to history.
1185
+ * Blocked candidates are not added to history, nor are the prompts that
1186
+ * generated them.
1187
+ */
1188
+ async getHistory() {
1189
+ await this._sendPromise;
1190
+ return this._history;
1191
+ }
1192
+ /**
1193
+ * Sends a chat message and receives a non-streaming
1194
+ * {@link GenerateContentResult}.
1195
+ *
1196
+ * Fields set in the optional {@link SingleRequestOptions} parameter will
1197
+ * take precedence over the {@link RequestOptions} values provided to
1198
+ * {@link GoogleGenerativeAI.getGenerativeModel }.
1199
+ */
1200
+ async sendMessage(request, requestOptions = {}) {
1201
+ var _a, _b, _c, _d, _e, _f;
1202
+ await this._sendPromise;
1203
+ const newContent = formatNewContent(request);
1204
+ const generateContentRequest = {
1205
+ safetySettings: (_a = this.params) === null || _a === void 0 ? void 0 : _a.safetySettings,
1206
+ generationConfig: (_b = this.params) === null || _b === void 0 ? void 0 : _b.generationConfig,
1207
+ tools: (_c = this.params) === null || _c === void 0 ? void 0 : _c.tools,
1208
+ toolConfig: (_d = this.params) === null || _d === void 0 ? void 0 : _d.toolConfig,
1209
+ systemInstruction: (_e = this.params) === null || _e === void 0 ? void 0 : _e.systemInstruction,
1210
+ cachedContent: (_f = this.params) === null || _f === void 0 ? void 0 : _f.cachedContent,
1211
+ contents: [...this._history, newContent]
1212
+ };
1213
+ const chatSessionRequestOptions = Object.assign(Object.assign({}, this._requestOptions), requestOptions);
1214
+ let finalResult;
1215
+ this._sendPromise = this._sendPromise.then(() => generateContent(this._apiKey, this.model, generateContentRequest, chatSessionRequestOptions)).then((result) => {
1216
+ var _a2;
1217
+ if (isValidResponse(result.response)) {
1218
+ this._history.push(newContent);
1219
+ const responseContent = Object.assign({
1220
+ parts: [],
1221
+ // Response seems to come back without a role set.
1222
+ role: "model"
1223
+ }, (_a2 = result.response.candidates) === null || _a2 === void 0 ? void 0 : _a2[0].content);
1224
+ this._history.push(responseContent);
1225
+ } else {
1226
+ const blockErrorMessage = formatBlockErrorMessage(result.response);
1227
+ if (blockErrorMessage) {
1228
+ console.warn(`sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`);
1229
+ }
1230
+ }
1231
+ finalResult = result;
1232
+ }).catch((e) => {
1233
+ this._sendPromise = Promise.resolve();
1234
+ throw e;
1235
+ });
1236
+ await this._sendPromise;
1237
+ return finalResult;
1238
+ }
1239
+ /**
1240
+ * Sends a chat message and receives the response as a
1241
+ * {@link GenerateContentStreamResult} containing an iterable stream
1242
+ * and a response promise.
1243
+ *
1244
+ * Fields set in the optional {@link SingleRequestOptions} parameter will
1245
+ * take precedence over the {@link RequestOptions} values provided to
1246
+ * {@link GoogleGenerativeAI.getGenerativeModel }.
1247
+ */
1248
+ async sendMessageStream(request, requestOptions = {}) {
1249
+ var _a, _b, _c, _d, _e, _f;
1250
+ await this._sendPromise;
1251
+ const newContent = formatNewContent(request);
1252
+ const generateContentRequest = {
1253
+ safetySettings: (_a = this.params) === null || _a === void 0 ? void 0 : _a.safetySettings,
1254
+ generationConfig: (_b = this.params) === null || _b === void 0 ? void 0 : _b.generationConfig,
1255
+ tools: (_c = this.params) === null || _c === void 0 ? void 0 : _c.tools,
1256
+ toolConfig: (_d = this.params) === null || _d === void 0 ? void 0 : _d.toolConfig,
1257
+ systemInstruction: (_e = this.params) === null || _e === void 0 ? void 0 : _e.systemInstruction,
1258
+ cachedContent: (_f = this.params) === null || _f === void 0 ? void 0 : _f.cachedContent,
1259
+ contents: [...this._history, newContent]
1260
+ };
1261
+ const chatSessionRequestOptions = Object.assign(Object.assign({}, this._requestOptions), requestOptions);
1262
+ const streamPromise = generateContentStream(this._apiKey, this.model, generateContentRequest, chatSessionRequestOptions);
1263
+ this._sendPromise = this._sendPromise.then(() => streamPromise).catch((_ignored) => {
1264
+ throw new Error(SILENT_ERROR);
1265
+ }).then((streamResult) => streamResult.response).then((response) => {
1266
+ if (isValidResponse(response)) {
1267
+ this._history.push(newContent);
1268
+ const responseContent = Object.assign({}, response.candidates[0].content);
1269
+ if (!responseContent.role) {
1270
+ responseContent.role = "model";
1271
+ }
1272
+ this._history.push(responseContent);
1273
+ } else {
1274
+ const blockErrorMessage = formatBlockErrorMessage(response);
1275
+ if (blockErrorMessage) {
1276
+ console.warn(`sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`);
1277
+ }
1278
+ }
1279
+ }).catch((e) => {
1280
+ if (e.message !== SILENT_ERROR) {
1281
+ console.error(e);
1282
+ }
1283
+ });
1284
+ return streamPromise;
1285
+ }
1286
+ }
1287
+ async function countTokens(apiKey, model, params, singleRequestOptions) {
1288
+ const response = await makeModelRequest(model, Task.COUNT_TOKENS, apiKey, false, JSON.stringify(params), singleRequestOptions);
1289
+ return response.json();
1290
+ }
1291
+ async function embedContent(apiKey, model, params, requestOptions) {
1292
+ const response = await makeModelRequest(model, Task.EMBED_CONTENT, apiKey, false, JSON.stringify(params), requestOptions);
1293
+ return response.json();
1294
+ }
1295
+ async function batchEmbedContents(apiKey, model, params, requestOptions) {
1296
+ const requestsWithModel = params.requests.map((request) => {
1297
+ return Object.assign(Object.assign({}, request), { model });
1298
+ });
1299
+ const response = await makeModelRequest(model, Task.BATCH_EMBED_CONTENTS, apiKey, false, JSON.stringify({ requests: requestsWithModel }), requestOptions);
1300
+ return response.json();
1301
+ }
1302
+ class GenerativeModel {
1303
+ constructor(apiKey, modelParams, _requestOptions = {}) {
1304
+ this.apiKey = apiKey;
1305
+ this._requestOptions = _requestOptions;
1306
+ if (modelParams.model.includes("/")) {
1307
+ this.model = modelParams.model;
1308
+ } else {
1309
+ this.model = `models/${modelParams.model}`;
1310
+ }
1311
+ this.generationConfig = modelParams.generationConfig || {};
1312
+ this.safetySettings = modelParams.safetySettings || [];
1313
+ this.tools = modelParams.tools;
1314
+ this.toolConfig = modelParams.toolConfig;
1315
+ this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction);
1316
+ this.cachedContent = modelParams.cachedContent;
1317
+ }
1318
+ /**
1319
+ * Makes a single non-streaming call to the model
1320
+ * and returns an object containing a single {@link GenerateContentResponse}.
1321
+ *
1322
+ * Fields set in the optional {@link SingleRequestOptions} parameter will
1323
+ * take precedence over the {@link RequestOptions} values provided to
1324
+ * {@link GoogleGenerativeAI.getGenerativeModel }.
1325
+ */
1326
+ async generateContent(request, requestOptions = {}) {
1327
+ var _a;
1328
+ const formattedParams = formatGenerateContentInput(request);
1329
+ const generativeModelRequestOptions = Object.assign(Object.assign({}, this._requestOptions), requestOptions);
1330
+ return generateContent(this.apiKey, this.model, Object.assign({ generationConfig: this.generationConfig, safetySettings: this.safetySettings, tools: this.tools, toolConfig: this.toolConfig, systemInstruction: this.systemInstruction, cachedContent: (_a = this.cachedContent) === null || _a === void 0 ? void 0 : _a.name }, formattedParams), generativeModelRequestOptions);
1331
+ }
1332
+ /**
1333
+ * Makes a single streaming call to the model and returns an object
1334
+ * containing an iterable stream that iterates over all chunks in the
1335
+ * streaming response as well as a promise that returns the final
1336
+ * aggregated response.
1337
+ *
1338
+ * Fields set in the optional {@link SingleRequestOptions} parameter will
1339
+ * take precedence over the {@link RequestOptions} values provided to
1340
+ * {@link GoogleGenerativeAI.getGenerativeModel }.
1341
+ */
1342
+ async generateContentStream(request, requestOptions = {}) {
1343
+ var _a;
1344
+ const formattedParams = formatGenerateContentInput(request);
1345
+ const generativeModelRequestOptions = Object.assign(Object.assign({}, this._requestOptions), requestOptions);
1346
+ return generateContentStream(this.apiKey, this.model, Object.assign({ generationConfig: this.generationConfig, safetySettings: this.safetySettings, tools: this.tools, toolConfig: this.toolConfig, systemInstruction: this.systemInstruction, cachedContent: (_a = this.cachedContent) === null || _a === void 0 ? void 0 : _a.name }, formattedParams), generativeModelRequestOptions);
1347
+ }
1348
+ /**
1349
+ * Gets a new {@link ChatSession} instance which can be used for
1350
+ * multi-turn chats.
1351
+ */
1352
+ startChat(startChatParams) {
1353
+ var _a;
1354
+ return new ChatSession(this.apiKey, this.model, Object.assign({ generationConfig: this.generationConfig, safetySettings: this.safetySettings, tools: this.tools, toolConfig: this.toolConfig, systemInstruction: this.systemInstruction, cachedContent: (_a = this.cachedContent) === null || _a === void 0 ? void 0 : _a.name }, startChatParams), this._requestOptions);
1355
+ }
1356
+ /**
1357
+ * Counts the tokens in the provided request.
1358
+ *
1359
+ * Fields set in the optional {@link SingleRequestOptions} parameter will
1360
+ * take precedence over the {@link RequestOptions} values provided to
1361
+ * {@link GoogleGenerativeAI.getGenerativeModel }.
1362
+ */
1363
+ async countTokens(request, requestOptions = {}) {
1364
+ const formattedParams = formatCountTokensInput(request, {
1365
+ model: this.model,
1366
+ generationConfig: this.generationConfig,
1367
+ safetySettings: this.safetySettings,
1368
+ tools: this.tools,
1369
+ toolConfig: this.toolConfig,
1370
+ systemInstruction: this.systemInstruction,
1371
+ cachedContent: this.cachedContent
1372
+ });
1373
+ const generativeModelRequestOptions = Object.assign(Object.assign({}, this._requestOptions), requestOptions);
1374
+ return countTokens(this.apiKey, this.model, formattedParams, generativeModelRequestOptions);
1375
+ }
1376
+ /**
1377
+ * Embeds the provided content.
1378
+ *
1379
+ * Fields set in the optional {@link SingleRequestOptions} parameter will
1380
+ * take precedence over the {@link RequestOptions} values provided to
1381
+ * {@link GoogleGenerativeAI.getGenerativeModel }.
1382
+ */
1383
+ async embedContent(request, requestOptions = {}) {
1384
+ const formattedParams = formatEmbedContentInput(request);
1385
+ const generativeModelRequestOptions = Object.assign(Object.assign({}, this._requestOptions), requestOptions);
1386
+ return embedContent(this.apiKey, this.model, formattedParams, generativeModelRequestOptions);
1387
+ }
1388
+ /**
1389
+ * Embeds an array of {@link EmbedContentRequest}s.
1390
+ *
1391
+ * Fields set in the optional {@link SingleRequestOptions} parameter will
1392
+ * take precedence over the {@link RequestOptions} values provided to
1393
+ * {@link GoogleGenerativeAI.getGenerativeModel }.
1394
+ */
1395
+ async batchEmbedContents(batchEmbedContentRequest, requestOptions = {}) {
1396
+ const generativeModelRequestOptions = Object.assign(Object.assign({}, this._requestOptions), requestOptions);
1397
+ return batchEmbedContents(this.apiKey, this.model, batchEmbedContentRequest, generativeModelRequestOptions);
1398
+ }
1399
+ }
1400
+ class GoogleGenerativeAI {
1401
+ constructor(apiKey) {
1402
+ this.apiKey = apiKey;
1403
+ }
1404
+ /**
1405
+ * Gets a {@link GenerativeModel} instance for the provided model name.
1406
+ */
1407
+ getGenerativeModel(modelParams, requestOptions) {
1408
+ if (!modelParams.model) {
1409
+ throw new GoogleGenerativeAIError(`Must provide a model name. Example: genai.getGenerativeModel({ model: 'my-model-name' })`);
1410
+ }
1411
+ return new GenerativeModel(this.apiKey, modelParams, requestOptions);
1412
+ }
1413
+ /**
1414
+ * Creates a {@link GenerativeModel} instance from provided content cache.
1415
+ */
1416
+ getGenerativeModelFromCachedContent(cachedContent, modelParams, requestOptions) {
1417
+ if (!cachedContent.name) {
1418
+ throw new GoogleGenerativeAIRequestInputError("Cached content must contain a `name` field.");
1419
+ }
1420
+ if (!cachedContent.model) {
1421
+ throw new GoogleGenerativeAIRequestInputError("Cached content must contain a `model` field.");
1422
+ }
1423
+ const disallowedDuplicates = ["model", "systemInstruction"];
1424
+ for (const key of disallowedDuplicates) {
1425
+ if ((modelParams === null || modelParams === void 0 ? void 0 : modelParams[key]) && cachedContent[key] && (modelParams === null || modelParams === void 0 ? void 0 : modelParams[key]) !== cachedContent[key]) {
1426
+ if (key === "model") {
1427
+ const modelParamsComp = modelParams.model.startsWith("models/") ? modelParams.model.replace("models/", "") : modelParams.model;
1428
+ const cachedContentComp = cachedContent.model.startsWith("models/") ? cachedContent.model.replace("models/", "") : cachedContent.model;
1429
+ if (modelParamsComp === cachedContentComp) {
1430
+ continue;
1431
+ }
1432
+ }
1433
+ throw new GoogleGenerativeAIRequestInputError(`Different value for "${key}" specified in modelParams (${modelParams[key]}) and cachedContent (${cachedContent[key]})`);
1434
+ }
1435
+ }
1436
+ const modelParamsFromCache = Object.assign(Object.assign({}, modelParams), { model: cachedContent.model, tools: cachedContent.tools, toolConfig: cachedContent.toolConfig, systemInstruction: cachedContent.systemInstruction, cachedContent });
1437
+ return new GenerativeModel(this.apiKey, modelParamsFromCache, requestOptions);
1438
+ }
1439
+ }
1440
+ function convertToolsToGenAI(tools, extra) {
1441
+ const genAITools = processTools(tools);
1442
+ const toolConfig = createToolConfig(genAITools, extra);
1443
+ return {
1444
+ tools: genAITools,
1445
+ toolConfig
1446
+ };
1447
+ }
1448
+ function processTools(tools) {
1449
+ let functionDeclarationTools = [];
1450
+ const genAITools = [];
1451
+ tools.forEach((tool) => {
1452
+ if (isLangChainTool(tool)) {
1453
+ const [convertedTool] = convertToGenerativeAITools([tool]);
1454
+ if (convertedTool.functionDeclarations) functionDeclarationTools.push(...convertedTool.functionDeclarations);
1455
+ } else if (isOpenAITool(tool)) {
1456
+ const { functionDeclarations } = convertOpenAIToolToGenAI(tool);
1457
+ if (functionDeclarations) functionDeclarationTools.push(...functionDeclarations);
1458
+ else throw new Error("Failed to convert OpenAI structured tool to GenerativeAI tool");
1459
+ } else genAITools.push(tool);
1460
+ });
1461
+ const genAIFunctionDeclaration = genAITools.find((t) => "functionDeclarations" in t);
1462
+ if (genAIFunctionDeclaration) return genAITools.map((tool) => {
1463
+ if (functionDeclarationTools?.length > 0 && "functionDeclarations" in tool) {
1464
+ const newTool = { functionDeclarations: [...tool.functionDeclarations || [], ...functionDeclarationTools] };
1465
+ functionDeclarationTools = [];
1466
+ return newTool;
1467
+ }
1468
+ return tool;
1469
+ });
1470
+ return [...genAITools, ...functionDeclarationTools.length > 0 ? [{ functionDeclarations: functionDeclarationTools }] : []];
1471
+ }
1472
+ function convertOpenAIToolToGenAI(tool) {
1473
+ return { functionDeclarations: [{
1474
+ name: tool.function.name,
1475
+ description: tool.function.description,
1476
+ parameters: removeAdditionalProperties(tool.function.parameters)
1477
+ }] };
1478
+ }
1479
+ function createToolConfig(genAITools, extra) {
1480
+ if (!genAITools.length || !extra) return void 0;
1481
+ const { toolChoice, allowedFunctionNames } = extra;
1482
+ const modeMap = {
1483
+ any: FunctionCallingMode.ANY,
1484
+ auto: FunctionCallingMode.AUTO,
1485
+ none: FunctionCallingMode.NONE
1486
+ };
1487
+ if (toolChoice && [
1488
+ "any",
1489
+ "auto",
1490
+ "none"
1491
+ ].includes(toolChoice)) return { functionCallingConfig: {
1492
+ mode: modeMap[toolChoice] ?? "MODE_UNSPECIFIED",
1493
+ allowedFunctionNames
1494
+ } };
1495
+ if (typeof toolChoice === "string" || allowedFunctionNames) return { functionCallingConfig: {
1496
+ mode: FunctionCallingMode.ANY,
1497
+ allowedFunctionNames: [...allowedFunctionNames ?? [], ...toolChoice && typeof toolChoice === "string" ? [toolChoice] : []]
1498
+ } };
1499
+ return void 0;
1500
+ }
1501
+ var ChatGoogleGenerativeAI = class extends BaseChatModel {
1502
+ static lc_name() {
1503
+ return "ChatGoogleGenerativeAI";
1504
+ }
1505
+ lc_serializable = true;
1506
+ get lc_secrets() {
1507
+ return { apiKey: "GOOGLE_API_KEY" };
1508
+ }
1509
+ lc_namespace = [
1510
+ "langchain",
1511
+ "chat_models",
1512
+ "google_genai"
1513
+ ];
1514
+ get lc_aliases() {
1515
+ return { apiKey: "google_api_key" };
1516
+ }
1517
+ model;
1518
+ temperature;
1519
+ maxOutputTokens;
1520
+ topP;
1521
+ topK;
1522
+ stopSequences = [];
1523
+ safetySettings;
1524
+ apiKey;
1525
+ streaming = false;
1526
+ json;
1527
+ streamUsage = true;
1528
+ convertSystemMessageToHumanContent;
1529
+ client;
1530
+ get _isMultimodalModel() {
1531
+ return this.model.includes("vision") || this.model.startsWith("gemini-1.5") || this.model.startsWith("gemini-2") || this.model.startsWith("gemma-3-") && !this.model.startsWith("gemma-3-1b");
1532
+ }
1533
+ constructor(fields) {
1534
+ super(fields);
1535
+ this.model = fields.model.replace(/^models\//, "");
1536
+ this.maxOutputTokens = fields.maxOutputTokens ?? this.maxOutputTokens;
1537
+ if (this.maxOutputTokens && this.maxOutputTokens < 0) throw new Error("`maxOutputTokens` must be a positive integer");
1538
+ this.temperature = fields.temperature ?? this.temperature;
1539
+ if (this.temperature && (this.temperature < 0 || this.temperature > 2)) throw new Error("`temperature` must be in the range of [0.0,2.0]");
1540
+ this.topP = fields.topP ?? this.topP;
1541
+ if (this.topP && this.topP < 0) throw new Error("`topP` must be a positive integer");
1542
+ if (this.topP && this.topP > 1) throw new Error("`topP` must be below 1.");
1543
+ this.topK = fields.topK ?? this.topK;
1544
+ if (this.topK && this.topK < 0) throw new Error("`topK` must be a positive integer");
1545
+ this.stopSequences = fields.stopSequences ?? this.stopSequences;
1546
+ this.apiKey = fields.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
1547
+ if (!this.apiKey) throw new Error("Please set an API key for Google GenerativeAI in the environment variable GOOGLE_API_KEY or in the `apiKey` field of the ChatGoogleGenerativeAI constructor");
1548
+ this.safetySettings = fields.safetySettings ?? this.safetySettings;
1549
+ if (this.safetySettings && this.safetySettings.length > 0) {
1550
+ const safetySettingsSet = new Set(this.safetySettings.map((s) => s.category));
1551
+ if (safetySettingsSet.size !== this.safetySettings.length) throw new Error("The categories in `safetySettings` array must be unique");
1552
+ }
1553
+ this.streaming = fields.streaming ?? this.streaming;
1554
+ this.json = fields.json;
1555
+ this.client = new GoogleGenerativeAI(this.apiKey).getGenerativeModel({
1556
+ model: this.model,
1557
+ safetySettings: this.safetySettings,
1558
+ generationConfig: {
1559
+ stopSequences: this.stopSequences,
1560
+ maxOutputTokens: this.maxOutputTokens,
1561
+ temperature: this.temperature,
1562
+ topP: this.topP,
1563
+ topK: this.topK,
1564
+ ...this.json ? { responseMimeType: "application/json" } : {}
1565
+ }
1566
+ }, {
1567
+ apiVersion: fields.apiVersion,
1568
+ baseUrl: fields.baseUrl
1569
+ });
1570
+ this.streamUsage = fields.streamUsage ?? this.streamUsage;
1571
+ }
1572
+ useCachedContent(cachedContent, modelParams, requestOptions) {
1573
+ if (!this.apiKey) return;
1574
+ this.client = new GoogleGenerativeAI(this.apiKey).getGenerativeModelFromCachedContent(cachedContent, modelParams, requestOptions);
1575
+ }
1576
+ get useSystemInstruction() {
1577
+ return typeof this.convertSystemMessageToHumanContent === "boolean" ? !this.convertSystemMessageToHumanContent : this.computeUseSystemInstruction;
1578
+ }
1579
+ get computeUseSystemInstruction() {
1580
+ if (this.model === "gemini-1.0-pro-001") return false;
1581
+ else if (this.model.startsWith("gemini-pro-vision")) return false;
1582
+ else if (this.model.startsWith("gemini-1.0-pro-vision")) return false;
1583
+ else if (this.model === "gemini-pro") return false;
1584
+ return true;
1585
+ }
1586
+ getLsParams(options) {
1587
+ return {
1588
+ ls_provider: "google_genai",
1589
+ ls_model_name: this.model,
1590
+ ls_model_type: "chat",
1591
+ ls_temperature: this.client.generationConfig.temperature,
1592
+ ls_max_tokens: this.client.generationConfig.maxOutputTokens,
1593
+ ls_stop: options.stop
1594
+ };
1595
+ }
1596
+ _combineLLMOutput() {
1597
+ return [];
1598
+ }
1599
+ _llmType() {
1600
+ return "googlegenerativeai";
1601
+ }
1602
+ bindTools(tools, kwargs) {
1603
+ return this.withConfig({
1604
+ tools: convertToolsToGenAI(tools)?.tools,
1605
+ ...kwargs
1606
+ });
1607
+ }
1608
+ invocationParams(options) {
1609
+ const toolsAndConfig = options?.tools?.length ? convertToolsToGenAI(options.tools, {
1610
+ toolChoice: options.tool_choice,
1611
+ allowedFunctionNames: options.allowedFunctionNames
1612
+ }) : void 0;
1613
+ if (options?.responseSchema) {
1614
+ this.client.generationConfig.responseSchema = options.responseSchema;
1615
+ this.client.generationConfig.responseMimeType = "application/json";
1616
+ } else {
1617
+ this.client.generationConfig.responseSchema = void 0;
1618
+ this.client.generationConfig.responseMimeType = this.json ? "application/json" : void 0;
1619
+ }
1620
+ return {
1621
+ ...toolsAndConfig?.tools ? { tools: toolsAndConfig.tools } : {},
1622
+ ...toolsAndConfig?.toolConfig ? { toolConfig: toolsAndConfig.toolConfig } : {}
1623
+ };
1624
+ }
1625
+ async _generate(messages, options, runManager) {
1626
+ const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel, this.useSystemInstruction);
1627
+ let actualPrompt = prompt;
1628
+ if (prompt[0].role === "system") {
1629
+ const [systemInstruction] = prompt;
1630
+ this.client.systemInstruction = systemInstruction;
1631
+ actualPrompt = prompt.slice(1);
1632
+ }
1633
+ const parameters = this.invocationParams(options);
1634
+ if (this.streaming) {
1635
+ const tokenUsage = {};
1636
+ const stream = this._streamResponseChunks(messages, options, runManager);
1637
+ const finalChunks = {};
1638
+ for await (const chunk of stream) {
1639
+ const index = chunk.generationInfo?.completion ?? 0;
1640
+ if (finalChunks[index] === void 0) finalChunks[index] = chunk;
1641
+ else finalChunks[index] = finalChunks[index].concat(chunk);
1642
+ }
1643
+ const generations = Object.entries(finalChunks).sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10)).map(([_, value]) => value);
1644
+ return {
1645
+ generations,
1646
+ llmOutput: { estimatedTokenUsage: tokenUsage }
1647
+ };
1648
+ }
1649
+ const res = await this.completionWithRetry({
1650
+ ...parameters,
1651
+ contents: actualPrompt
1652
+ });
1653
+ let usageMetadata;
1654
+ if ("usageMetadata" in res.response) {
1655
+ const genAIUsageMetadata = res.response.usageMetadata;
1656
+ usageMetadata = {
1657
+ input_tokens: genAIUsageMetadata.promptTokenCount ?? 0,
1658
+ output_tokens: genAIUsageMetadata.candidatesTokenCount ?? 0,
1659
+ total_tokens: genAIUsageMetadata.totalTokenCount ?? 0
1660
+ };
1661
+ }
1662
+ const generationResult = mapGenerateContentResultToChatResult(res.response, { usageMetadata });
1663
+ if (generationResult.generations?.length > 0) await runManager?.handleLLMNewToken(generationResult.generations[0]?.text ?? "");
1664
+ return generationResult;
1665
+ }
1666
+ async *_streamResponseChunks(messages, options, runManager) {
1667
+ const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel, this.useSystemInstruction);
1668
+ let actualPrompt = prompt;
1669
+ if (prompt[0].role === "system") {
1670
+ const [systemInstruction] = prompt;
1671
+ this.client.systemInstruction = systemInstruction;
1672
+ actualPrompt = prompt.slice(1);
1673
+ }
1674
+ const parameters = this.invocationParams(options);
1675
+ const request = {
1676
+ ...parameters,
1677
+ contents: actualPrompt
1678
+ };
1679
+ const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
1680
+ const { stream: stream$1 } = await this.client.generateContentStream(request);
1681
+ return stream$1;
1682
+ });
1683
+ let usageMetadata;
1684
+ let prevPromptTokenCount = 0;
1685
+ let prevCandidatesTokenCount = 0;
1686
+ let prevTotalTokenCount = 0;
1687
+ let index = 0;
1688
+ for await (const response of stream) {
1689
+ if ("usageMetadata" in response && response.usageMetadata !== void 0 && this.streamUsage !== false && options.streamUsage !== false) {
1690
+ usageMetadata = {
1691
+ input_tokens: response.usageMetadata.promptTokenCount ?? 0,
1692
+ output_tokens: response.usageMetadata.candidatesTokenCount ?? 0,
1693
+ total_tokens: response.usageMetadata.totalTokenCount ?? 0
1694
+ };
1695
+ const newPromptTokenCount = response.usageMetadata.promptTokenCount ?? 0;
1696
+ usageMetadata.input_tokens = Math.max(0, newPromptTokenCount - prevPromptTokenCount);
1697
+ prevPromptTokenCount = newPromptTokenCount;
1698
+ const newCandidatesTokenCount = response.usageMetadata.candidatesTokenCount ?? 0;
1699
+ usageMetadata.output_tokens = Math.max(0, newCandidatesTokenCount - prevCandidatesTokenCount);
1700
+ prevCandidatesTokenCount = newCandidatesTokenCount;
1701
+ const newTotalTokenCount = response.usageMetadata.totalTokenCount ?? 0;
1702
+ usageMetadata.total_tokens = Math.max(0, newTotalTokenCount - prevTotalTokenCount);
1703
+ prevTotalTokenCount = newTotalTokenCount;
1704
+ }
1705
+ const chunk = convertResponseContentToChatGenerationChunk(response, {
1706
+ usageMetadata,
1707
+ index
1708
+ });
1709
+ index += 1;
1710
+ if (!chunk) continue;
1711
+ yield chunk;
1712
+ await runManager?.handleLLMNewToken(chunk.text ?? "");
1713
+ }
1714
+ }
1715
+ async completionWithRetry(request, options) {
1716
+ return this.caller.callWithOptions({ signal: options?.signal }, async () => {
1717
+ try {
1718
+ return await this.client.generateContent(request);
1719
+ } catch (e) {
1720
+ if (e.message?.includes("400 Bad Request")) e.status = 400;
1721
+ throw e;
1722
+ }
1723
+ });
1724
+ }
1725
+ withStructuredOutput(outputSchema, config) {
1726
+ const schema = outputSchema;
1727
+ const name = config?.name;
1728
+ const method = config?.method;
1729
+ const includeRaw = config?.includeRaw;
1730
+ if (method === "jsonMode") throw new Error(`ChatGoogleGenerativeAI only supports "jsonSchema" or "functionCalling" as a method.`);
1731
+ let llm;
1732
+ let outputParser;
1733
+ if (method === "functionCalling") {
1734
+ let functionName = name ?? "extract";
1735
+ let tools;
1736
+ if (isInteropZodSchema(schema)) {
1737
+ const jsonSchema = schemaToGenerativeAIParameters(schema);
1738
+ tools = [{ functionDeclarations: [{
1739
+ name: functionName,
1740
+ description: jsonSchema.description ?? "A function available to call.",
1741
+ parameters: jsonSchema
1742
+ }] }];
1743
+ outputParser = new GoogleGenerativeAIToolsOutputParser({
1744
+ returnSingle: true,
1745
+ keyName: functionName,
1746
+ zodSchema: schema
1747
+ });
1748
+ } else {
1749
+ let geminiFunctionDefinition;
1750
+ if (typeof schema.name === "string" && typeof schema.parameters === "object" && schema.parameters != null) {
1751
+ geminiFunctionDefinition = schema;
1752
+ geminiFunctionDefinition.parameters = removeAdditionalProperties(schema.parameters);
1753
+ functionName = schema.name;
1754
+ } else geminiFunctionDefinition = {
1755
+ name: functionName,
1756
+ description: schema.description ?? "",
1757
+ parameters: removeAdditionalProperties(schema)
1758
+ };
1759
+ tools = [{ functionDeclarations: [geminiFunctionDefinition] }];
1760
+ outputParser = new GoogleGenerativeAIToolsOutputParser({
1761
+ returnSingle: true,
1762
+ keyName: functionName
1763
+ });
1764
+ }
1765
+ llm = this.bindTools(tools).withConfig({ allowedFunctionNames: [functionName] });
1766
+ } else {
1767
+ const jsonSchema = schemaToGenerativeAIParameters(schema);
1768
+ llm = this.withConfig({ responseSchema: jsonSchema });
1769
+ outputParser = new JsonOutputParser();
1770
+ }
1771
+ if (!includeRaw) return llm.pipe(outputParser).withConfig({ runName: "ChatGoogleGenerativeAIStructuredOutput" });
1772
+ const parserAssign = RunnablePassthrough.assign({ parsed: (input, config$1) => outputParser.invoke(input.raw, config$1) });
1773
+ const parserNone = RunnablePassthrough.assign({ parsed: () => null });
1774
+ const parsedWithFallback = parserAssign.withFallbacks({ fallbacks: [parserNone] });
1775
+ return RunnableSequence.from([{ raw: llm }, parsedWithFallback]).withConfig({ runName: "StructuredOutputRunnable" });
1776
+ }
1777
+ };
1778
+ export {
1779
+ ChatGoogleGenerativeAI
1780
+ };