@langchain/google-common 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/types.d.ts CHANGED
@@ -118,6 +118,34 @@ export interface GoogleAIModelParams {
118
118
  * among the 3 most probable tokens (using temperature).
119
119
  */
120
120
  topK?: number;
121
+ /**
122
+ * Presence penalty applied to the next token's logprobs
123
+ * if the token has already been seen in the response.
124
+ * This penalty is binary on/off and not dependant on the
125
+ * number of times the token is used (after the first).
126
+ * Use frequencyPenalty for a penalty that increases with each use.
127
+ * A positive penalty will discourage the use of tokens that have
128
+ * already been used in the response, increasing the vocabulary.
129
+ * A negative penalty will encourage the use of tokens that have
130
+ * already been used in the response, decreasing the vocabulary.
131
+ */
132
+ presencePenalty?: number;
133
+ /**
134
+ * Frequency penalty applied to the next token's logprobs,
135
+ * multiplied by the number of times each token has been seen
136
+ * in the respponse so far.
137
+ * A positive penalty will discourage the use of tokens that
138
+ * have already been used, proportional to the number of times
139
+ * the token has been used:
140
+ * The more a token is used, the more dificult it is for the model
141
+ * to use that token again increasing the vocabulary of responses.
142
+ * Caution: A _negative_ penalty will encourage the model to reuse
143
+ * tokens proportional to the number of times the token has been used.
144
+ * Small negative values will reduce the vocabulary of a response.
145
+ * Larger negative values will cause the model to start repeating
146
+ * a common token until it hits the maxOutputTokens limit.
147
+ */
148
+ frequencyPenalty?: number;
121
149
  stopSequences?: string[];
122
150
  safetySettings?: GoogleAISafetySetting[];
123
151
  convertSystemMessageToHumanContent?: boolean;
@@ -136,6 +164,19 @@ export interface GoogleAIModelParams {
136
164
  * @default false
137
165
  */
138
166
  streaming?: boolean;
167
+ /**
168
+ * Whether to return log probabilities of the output tokens or not.
169
+ * If true, returns the log probabilities of each output token
170
+ * returned in the content of message.
171
+ */
172
+ logprobs?: boolean;
173
+ /**
174
+ * An integer between 0 and 5 specifying the number of
175
+ * most likely tokens to return at each token position,
176
+ * each with an associated log probability.
177
+ * logprobs must be set to true if this parameter is used.
178
+ */
179
+ topLogprobs?: number;
139
180
  }
140
181
  export type GoogleAIToolType = BindToolsInput | GeminiTool;
141
182
  /**
@@ -219,6 +260,72 @@ export type GeminiSafetyRating = {
219
260
  category: string;
220
261
  probability: string;
221
262
  } & Record<string, unknown>;
263
+ export interface GeminiCitationMetadata {
264
+ citations: GeminiCitation[];
265
+ }
266
+ export interface GeminiCitation {
267
+ startIndex: number;
268
+ endIndex: number;
269
+ uri: string;
270
+ title: string;
271
+ license: string;
272
+ publicationDate: GoogleTypeDate;
273
+ }
274
+ export interface GoogleTypeDate {
275
+ year: number;
276
+ month: number;
277
+ day: number;
278
+ }
279
+ export interface GeminiGroundingMetadata {
280
+ webSearchQueries?: string[];
281
+ searchEntryPoint?: GeminiSearchEntryPoint;
282
+ groundingChunks: GeminiGroundingChunk[];
283
+ groundingSupports?: GeminiGroundingSupport[];
284
+ retrievalMetadata?: GeminiRetrievalMetadata;
285
+ }
286
+ export interface GeminiSearchEntryPoint {
287
+ renderedContent?: string;
288
+ sdkBlob?: string;
289
+ }
290
+ export interface GeminiGroundingChunk {
291
+ web: GeminiGroundingChunkWeb;
292
+ retrievedContext: GeminiGroundingChunkRetrievedContext;
293
+ }
294
+ export interface GeminiGroundingChunkWeb {
295
+ uri: string;
296
+ title: string;
297
+ }
298
+ export interface GeminiGroundingChunkRetrievedContext {
299
+ uri: string;
300
+ title: string;
301
+ text: string;
302
+ }
303
+ export interface GeminiGroundingSupport {
304
+ segment: GeminiSegment;
305
+ groundingChunkIndices: number[];
306
+ confidenceScores: number[];
307
+ }
308
+ export interface GeminiSegment {
309
+ partIndex: number;
310
+ startIndex: number;
311
+ endIndex: number;
312
+ text: string;
313
+ }
314
+ export interface GeminiRetrievalMetadata {
315
+ googleSearchDynamicRetrievalScore: number;
316
+ }
317
+ export interface GeminiLogprobsResult {
318
+ topCandidates: GeminiLogprobsTopCandidate[];
319
+ chosenCandidates: GeminiLogprobsResultCandidate[];
320
+ }
321
+ export interface GeminiLogprobsTopCandidate {
322
+ candidates: GeminiLogprobsResultCandidate[];
323
+ }
324
+ export interface GeminiLogprobsResultCandidate {
325
+ token: string;
326
+ tokenId: number;
327
+ logProbability: number;
328
+ }
222
329
  export type GeminiRole = "system" | "user" | "model" | "function";
223
330
  export interface GeminiContent {
224
331
  parts: GeminiPart[];
@@ -227,14 +334,20 @@ export interface GeminiContent {
227
334
  export interface GeminiTool {
228
335
  functionDeclarations?: GeminiFunctionDeclaration[];
229
336
  googleSearchRetrieval?: GoogleSearchRetrieval;
337
+ googleSearch?: GoogleSearch;
230
338
  retrieval?: VertexAIRetrieval;
231
339
  }
340
+ export type GoogleSearchToolSetting = boolean | "googleSearchRetrieval" | "googleSearch" | string;
341
+ export declare const GeminiSearchToolAttributes: string[];
342
+ export declare const GeminiToolAttributes: string[];
232
343
  export interface GoogleSearchRetrieval {
233
344
  dynamicRetrievalConfig?: {
234
345
  mode?: string;
235
346
  dynamicThreshold?: number;
236
347
  };
237
348
  }
349
+ export interface GoogleSearch {
350
+ }
238
351
  export interface VertexAIRetrieval {
239
352
  vertexAiSearch: {
240
353
  datastore: string;
@@ -264,7 +377,11 @@ export interface GeminiGenerationConfig {
264
377
  temperature?: number;
265
378
  topP?: number;
266
379
  topK?: number;
380
+ presencePenalty?: number;
381
+ frequencyPenalty?: number;
267
382
  responseMimeType?: GoogleAIResponseMimeType;
383
+ responseLogprobs?: boolean;
384
+ logprobs?: number;
268
385
  }
269
386
  export interface GeminiRequest {
270
387
  contents?: GeminiContent[];
@@ -279,7 +396,7 @@ export interface GeminiRequest {
279
396
  safetySettings?: GeminiSafetySetting[];
280
397
  generationConfig?: GeminiGenerationConfig;
281
398
  }
282
- interface GeminiResponseCandidate {
399
+ export interface GeminiResponseCandidate {
283
400
  content: {
284
401
  parts: GeminiPart[];
285
402
  role: string;
@@ -288,6 +405,10 @@ interface GeminiResponseCandidate {
288
405
  index: number;
289
406
  tokenCount?: number;
290
407
  safetyRatings: GeminiSafetyRating[];
408
+ citationMetadata?: GeminiCitationMetadata;
409
+ groundingMetadata?: GeminiGroundingMetadata;
410
+ avgLogprobs?: number;
411
+ logprobsResult: GeminiLogprobsResult;
291
412
  }
292
413
  interface GeminiResponsePromptFeedback {
293
414
  blockReason?: string;
@@ -339,6 +460,17 @@ export interface GeminiAPIConfig {
339
460
  safetyHandler?: GoogleAISafetyHandler;
340
461
  mediaManager?: MediaManager;
341
462
  useSystemInstruction?: boolean;
463
+ /**
464
+ * How to handle the Google Search tool, since the name (and format)
465
+ * of the tool changes between Gemini 1.5 and Gemini 2.0.
466
+ * true - Change based on the model version. (Default)
467
+ * false - Do not change the tool name provided
468
+ * string value - Use this as the attribute name for the search
469
+ * tool, adapting any tool attributes if possible.
470
+ * When the model is created, a "true" or default setting
471
+ * will be changed to a string based on the model.
472
+ */
473
+ googleSearchToolAdjustment?: GoogleSearchToolSetting;
342
474
  }
343
475
  export type GoogleAIAPIConfig = GeminiAPIConfig | AnthropicAPIConfig;
344
476
  export interface GoogleAIAPIParams {
package/dist/types.js CHANGED
@@ -37,3 +37,12 @@ export const GoogleAISafetyMethod = {
37
37
  Severity: "SEVERITY",
38
38
  Probability: "PROBABILITY",
39
39
  };
40
+ export const GeminiSearchToolAttributes = [
41
+ "googleSearchRetrieval",
42
+ "googleSearch",
43
+ ];
44
+ export const GeminiToolAttributes = [
45
+ "functionDeclaration",
46
+ "retrieval",
47
+ ...GeminiSearchToolAttributes,
48
+ ];
@@ -4,6 +4,7 @@ exports.copyAndValidateModelParamsInto = exports.validateModelParams = exports.m
4
4
  const base_1 = require("@langchain/core/language_models/base");
5
5
  const function_calling_1 = require("@langchain/core/utils/function_calling");
6
6
  const gemini_js_1 = require("./gemini.cjs");
7
+ const types_js_1 = require("../types.cjs");
7
8
  const zod_to_gemini_parameters_js_1 = require("./zod_to_gemini_parameters.cjs");
8
9
  const anthropic_js_1 = require("./anthropic.cjs");
9
10
  function copyAIModelParams(params, options) {
@@ -37,11 +38,22 @@ function processToolChoice(toolChoice, allowedFunctionNames) {
37
38
  }
38
39
  throw new Error("Object inputs for tool_choice not supported.");
39
40
  }
41
+ function isGeminiTool(tool) {
42
+ for (const toolAttribute of types_js_1.GeminiToolAttributes) {
43
+ if (toolAttribute in tool) {
44
+ return true;
45
+ }
46
+ }
47
+ return false;
48
+ }
49
+ function isGeminiNonFunctionTool(tool) {
50
+ return isGeminiTool(tool) && !("functionDeclaration" in tool);
51
+ }
40
52
  function convertToGeminiTools(tools) {
41
53
  const geminiTools = [];
42
54
  let functionDeclarationsIndex = -1;
43
55
  tools.forEach((tool) => {
44
- if ("googleSearchRetrieval" in tool || "retrieval" in tool) {
56
+ if (isGeminiNonFunctionTool(tool)) {
45
57
  geminiTools.push(tool);
46
58
  }
47
59
  else {
@@ -93,10 +105,21 @@ function copyAIModelParamsInto(params, options, target) {
93
105
  target.maxOutputTokens;
94
106
  ret.topP = options?.topP ?? params?.topP ?? target.topP;
95
107
  ret.topK = options?.topK ?? params?.topK ?? target.topK;
108
+ ret.presencePenalty =
109
+ options?.presencePenalty ??
110
+ params?.presencePenalty ??
111
+ target.presencePenalty;
112
+ ret.frequencyPenalty =
113
+ options?.frequencyPenalty ??
114
+ params?.frequencyPenalty ??
115
+ target.frequencyPenalty;
96
116
  ret.stopSequences =
97
117
  options?.stopSequences ?? params?.stopSequences ?? target.stopSequences;
98
118
  ret.safetySettings =
99
119
  options?.safetySettings ?? params?.safetySettings ?? target.safetySettings;
120
+ ret.logprobs = options?.logprobs ?? params?.logprobs ?? target.logprobs;
121
+ ret.topLogprobs =
122
+ options?.topLogprobs ?? params?.topLogprobs ?? target.topLogprobs;
100
123
  ret.convertSystemMessageToHumanContent =
101
124
  options?.convertSystemMessageToHumanContent ??
102
125
  params?.convertSystemMessageToHumanContent ??
@@ -1,4 +1,4 @@
1
- import type { GeminiTool, GoogleAIBaseLanguageModelCallOptions, GoogleAIModelParams, GoogleAIModelRequestParams, GoogleAIToolType, VertexModelFamily } from "../types.js";
1
+ import { GeminiTool, GoogleAIBaseLanguageModelCallOptions, GoogleAIModelParams, GoogleAIModelRequestParams, GoogleAIToolType, VertexModelFamily } from "../types.js";
2
2
  export declare function copyAIModelParams(params: GoogleAIModelParams | undefined, options: GoogleAIBaseLanguageModelCallOptions | undefined): GoogleAIModelRequestParams;
3
3
  export declare function convertToGeminiTools(tools: GoogleAIToolType[]): GeminiTool[];
4
4
  export declare function copyAIModelParamsInto(params: GoogleAIModelParams | undefined, options: GoogleAIBaseLanguageModelCallOptions | undefined, target: GoogleAIModelParams): GoogleAIModelRequestParams;
@@ -1,6 +1,7 @@
1
1
  import { isOpenAITool } from "@langchain/core/language_models/base";
2
2
  import { isLangChainTool } from "@langchain/core/utils/function_calling";
3
3
  import { isModelGemini, validateGeminiParams } from "./gemini.js";
4
+ import { GeminiToolAttributes, } from "../types.js";
4
5
  import { jsonSchemaToGeminiParameters, zodToGeminiParameters, } from "./zod_to_gemini_parameters.js";
5
6
  import { isModelClaude, validateClaudeParams } from "./anthropic.js";
6
7
  export function copyAIModelParams(params, options) {
@@ -33,11 +34,22 @@ function processToolChoice(toolChoice, allowedFunctionNames) {
33
34
  }
34
35
  throw new Error("Object inputs for tool_choice not supported.");
35
36
  }
37
+ function isGeminiTool(tool) {
38
+ for (const toolAttribute of GeminiToolAttributes) {
39
+ if (toolAttribute in tool) {
40
+ return true;
41
+ }
42
+ }
43
+ return false;
44
+ }
45
+ function isGeminiNonFunctionTool(tool) {
46
+ return isGeminiTool(tool) && !("functionDeclaration" in tool);
47
+ }
36
48
  export function convertToGeminiTools(tools) {
37
49
  const geminiTools = [];
38
50
  let functionDeclarationsIndex = -1;
39
51
  tools.forEach((tool) => {
40
- if ("googleSearchRetrieval" in tool || "retrieval" in tool) {
52
+ if (isGeminiNonFunctionTool(tool)) {
41
53
  geminiTools.push(tool);
42
54
  }
43
55
  else {
@@ -88,10 +100,21 @@ export function copyAIModelParamsInto(params, options, target) {
88
100
  target.maxOutputTokens;
89
101
  ret.topP = options?.topP ?? params?.topP ?? target.topP;
90
102
  ret.topK = options?.topK ?? params?.topK ?? target.topK;
103
+ ret.presencePenalty =
104
+ options?.presencePenalty ??
105
+ params?.presencePenalty ??
106
+ target.presencePenalty;
107
+ ret.frequencyPenalty =
108
+ options?.frequencyPenalty ??
109
+ params?.frequencyPenalty ??
110
+ target.frequencyPenalty;
91
111
  ret.stopSequences =
92
112
  options?.stopSequences ?? params?.stopSequences ?? target.stopSequences;
93
113
  ret.safetySettings =
94
114
  options?.safetySettings ?? params?.safetySettings ?? target.safetySettings;
115
+ ret.logprobs = options?.logprobs ?? params?.logprobs ?? target.logprobs;
116
+ ret.topLogprobs =
117
+ options?.topLogprobs ?? params?.topLogprobs ?? target.topLogprobs;
95
118
  ret.convertSystemMessageToHumanContent =
96
119
  options?.convertSystemMessageToHumanContent ??
97
120
  params?.convertSystemMessageToHumanContent ??
@@ -6,6 +6,7 @@ const messages_1 = require("@langchain/core/messages");
6
6
  const outputs_1 = require("@langchain/core/outputs");
7
7
  const function_calling_1 = require("@langchain/core/utils/function_calling");
8
8
  const safety_js_1 = require("./safety.cjs");
9
+ const types_js_1 = require("../types.cjs");
9
10
  const zod_to_gemini_parameters_js_1 = require("./zod_to_gemini_parameters.cjs");
10
11
  class DefaultGeminiSafetyHandler {
11
12
  constructor(settings) {
@@ -505,6 +506,33 @@ function getGeminiAPI(config) {
505
506
  function safeResponseToString(response) {
506
507
  return safeResponseTo(response, responseToString);
507
508
  }
509
+ function logprobResultToLogprob(result) {
510
+ const token = result?.token;
511
+ const logprob = result?.logProbability;
512
+ const encoder = new TextEncoder();
513
+ const bytes = Array.from(encoder.encode(token));
514
+ return {
515
+ token,
516
+ logprob,
517
+ bytes,
518
+ };
519
+ }
520
+ function candidateToLogprobs(candidate) {
521
+ const logprobs = candidate?.logprobsResult;
522
+ const chosenTokens = logprobs?.chosenCandidates ?? [];
523
+ const topTokens = logprobs?.topCandidates ?? [];
524
+ const content = [];
525
+ for (let co = 0; co < chosenTokens.length; co += 1) {
526
+ const chosen = chosenTokens[co];
527
+ const top = topTokens[co]?.candidates ?? [];
528
+ const logprob = logprobResultToLogprob(chosen);
529
+ logprob.top_logprobs = top.map((l) => logprobResultToLogprob(l));
530
+ content.push(logprob);
531
+ }
532
+ return {
533
+ content,
534
+ };
535
+ }
508
536
  function responseToGenerationInfo(response) {
509
537
  if (!Array.isArray(response.data)) {
510
538
  return {};
@@ -523,7 +551,11 @@ function getGeminiAPI(config) {
523
551
  severity: rating.severity,
524
552
  severity_score: rating.severityScore,
525
553
  })),
554
+ citation_metadata: data.candidates[0]?.citationMetadata,
555
+ grounding_metadata: data.candidates[0]?.groundingMetadata,
526
556
  finish_reason: data.candidates[0]?.finishReason,
557
+ avgLogprobs: data.candidates[0]?.avgLogprobs,
558
+ logprobs: candidateToLogprobs(data.candidates[0]),
527
559
  };
528
560
  }
529
561
  function responseToChatGeneration(response) {
@@ -577,12 +609,59 @@ function getGeminiAPI(config) {
577
609
  message,
578
610
  });
579
611
  }
580
- function responseToChatGenerations(response) {
612
+ function groundingSupportByPart(groundingSupports) {
613
+ const ret = [];
614
+ if (!groundingSupports || groundingSupports.length === 0) {
615
+ return [];
616
+ }
617
+ groundingSupports?.forEach((groundingSupport) => {
618
+ const segment = groundingSupport?.segment;
619
+ const partIndex = segment?.partIndex ?? 0;
620
+ if (ret[partIndex]) {
621
+ ret[partIndex].push(groundingSupport);
622
+ }
623
+ else {
624
+ ret[partIndex] = [groundingSupport];
625
+ }
626
+ });
627
+ return ret;
628
+ }
629
+ function responseToGroundedChatGenerations(response) {
581
630
  const parts = responseToParts(response);
582
631
  if (parts.length === 0) {
583
632
  return [];
584
633
  }
585
- let ret = parts.map((part) => partToChatGeneration(part));
634
+ // Citation and grounding information connected to each part / ChatGeneration
635
+ // to make sure they are available in downstream filters.
636
+ const candidate = response?.data
637
+ ?.candidates?.[0];
638
+ const groundingMetadata = candidate?.groundingMetadata;
639
+ const citationMetadata = candidate?.citationMetadata;
640
+ const groundingParts = groundingSupportByPart(groundingMetadata?.groundingSupports);
641
+ const ret = parts.map((part, index) => {
642
+ const gen = partToChatGeneration(part);
643
+ if (!gen.generationInfo) {
644
+ gen.generationInfo = {};
645
+ }
646
+ if (groundingMetadata) {
647
+ gen.generationInfo.groundingMetadata = groundingMetadata;
648
+ const groundingPart = groundingParts[index];
649
+ if (groundingPart) {
650
+ gen.generationInfo.groundingSupport = groundingPart;
651
+ }
652
+ }
653
+ if (citationMetadata) {
654
+ gen.generationInfo.citationMetadata = citationMetadata;
655
+ }
656
+ return gen;
657
+ });
658
+ return ret;
659
+ }
660
+ function responseToChatGenerations(response) {
661
+ let ret = responseToGroundedChatGenerations(response);
662
+ if (ret.length === 0) {
663
+ return [];
664
+ }
586
665
  if (ret.every((item) => typeof item.message.content === "string")) {
587
666
  const combinedContent = ret.map((item) => item.message.content).join("");
588
667
  const combinedText = ret.map((item) => item.text).join("");
@@ -615,6 +694,18 @@ function getGeminiAPI(config) {
615
694
  }),
616
695
  ];
617
696
  }
697
+ // Add logprobs information to the message
698
+ const candidate = response?.data
699
+ ?.candidates?.[0];
700
+ const avgLogprobs = candidate?.avgLogprobs;
701
+ const logprobs = candidateToLogprobs(candidate);
702
+ if (logprobs) {
703
+ ret[0].message.response_metadata = {
704
+ ...ret[0].message.response_metadata,
705
+ logprobs,
706
+ avgLogprobs,
707
+ };
708
+ }
618
709
  return ret;
619
710
  }
620
711
  function responseToBaseMessageFields(response) {
@@ -742,9 +833,13 @@ function getGeminiAPI(config) {
742
833
  temperature: parameters.temperature,
743
834
  topK: parameters.topK,
744
835
  topP: parameters.topP,
836
+ presencePenalty: parameters.presencePenalty,
837
+ frequencyPenalty: parameters.frequencyPenalty,
745
838
  maxOutputTokens: parameters.maxOutputTokens,
746
839
  stopSequences: parameters.stopSequences,
747
840
  responseMimeType: parameters.responseMimeType,
841
+ responseLogprobs: parameters.logprobs,
842
+ logprobs: parameters.topLogprobs,
748
843
  };
749
844
  }
750
845
  function formatSafetySettings(parameters) {
@@ -788,14 +883,43 @@ function getGeminiAPI(config) {
788
883
  parameters: jsonSchema,
789
884
  };
790
885
  }
886
+ function searchToolName(tool) {
887
+ for (const name of types_js_1.GeminiSearchToolAttributes) {
888
+ if (name in tool) {
889
+ return name;
890
+ }
891
+ }
892
+ return undefined;
893
+ }
894
+ function cleanGeminiTool(tool) {
895
+ const orig = searchToolName(tool);
896
+ const adj = config?.googleSearchToolAdjustment;
897
+ if (orig && adj && adj !== orig) {
898
+ return {
899
+ [adj]: {},
900
+ };
901
+ }
902
+ else {
903
+ return tool;
904
+ }
905
+ }
791
906
  function formatTools(parameters) {
792
907
  const tools = parameters?.tools;
793
908
  if (!tools || tools.length === 0) {
794
909
  return [];
795
910
  }
796
- // Group all LangChain tools into a single functionDeclarations array
797
- const langChainTools = tools.filter(function_calling_1.isLangChainTool);
798
- const otherTools = tools.filter((tool) => !(0, function_calling_1.isLangChainTool)(tool));
911
+ // Group all LangChain tools into a single functionDeclarations array.
912
+ // Gemini Tools may be normalized to different tool names
913
+ const langChainTools = [];
914
+ const otherTools = [];
915
+ tools.forEach((tool) => {
916
+ if ((0, function_calling_1.isLangChainTool)(tool)) {
917
+ langChainTools.push(tool);
918
+ }
919
+ else {
920
+ otherTools.push(cleanGeminiTool(tool));
921
+ }
922
+ });
799
923
  const result = [...otherTools];
800
924
  if (langChainTools.length > 0) {
801
925
  result.push({