@langchain/google-common 0.1.6 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -151,12 +151,36 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
151
151
  writable: true,
152
152
  value: 40
153
153
  });
154
+ Object.defineProperty(this, "presencePenalty", {
155
+ enumerable: true,
156
+ configurable: true,
157
+ writable: true,
158
+ value: void 0
159
+ });
160
+ Object.defineProperty(this, "frequencyPenalty", {
161
+ enumerable: true,
162
+ configurable: true,
163
+ writable: true,
164
+ value: void 0
165
+ });
154
166
  Object.defineProperty(this, "stopSequences", {
155
167
  enumerable: true,
156
168
  configurable: true,
157
169
  writable: true,
158
170
  value: []
159
171
  });
172
+ Object.defineProperty(this, "logprobs", {
173
+ enumerable: true,
174
+ configurable: true,
175
+ writable: true,
176
+ value: void 0
177
+ });
178
+ Object.defineProperty(this, "topLogprobs", {
179
+ enumerable: true,
180
+ configurable: true,
181
+ writable: true,
182
+ value: 0
183
+ });
160
184
  Object.defineProperty(this, "safetySettings", {
161
185
  enumerable: true,
162
186
  configurable: true,
@@ -41,7 +41,11 @@ export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<
41
41
  maxOutputTokens: number;
42
42
  topP: number;
43
43
  topK: number;
44
+ presencePenalty: number;
45
+ frequencyPenalty: number;
44
46
  stopSequences: string[];
47
+ logprobs: boolean;
48
+ topLogprobs: number;
45
49
  safetySettings: GoogleAISafetySetting[];
46
50
  convertSystemMessageToHumanContent: boolean | undefined;
47
51
  safetyHandler: GoogleAISafetyHandler;
@@ -147,12 +147,36 @@ export class ChatGoogleBase extends BaseChatModel {
147
147
  writable: true,
148
148
  value: 40
149
149
  });
150
+ Object.defineProperty(this, "presencePenalty", {
151
+ enumerable: true,
152
+ configurable: true,
153
+ writable: true,
154
+ value: void 0
155
+ });
156
+ Object.defineProperty(this, "frequencyPenalty", {
157
+ enumerable: true,
158
+ configurable: true,
159
+ writable: true,
160
+ value: void 0
161
+ });
150
162
  Object.defineProperty(this, "stopSequences", {
151
163
  enumerable: true,
152
164
  configurable: true,
153
165
  writable: true,
154
166
  value: []
155
167
  });
168
+ Object.defineProperty(this, "logprobs", {
169
+ enumerable: true,
170
+ configurable: true,
171
+ writable: true,
172
+ value: void 0
173
+ });
174
+ Object.defineProperty(this, "topLogprobs", {
175
+ enumerable: true,
176
+ configurable: true,
177
+ writable: true,
178
+ value: 0
179
+ });
156
180
  Object.defineProperty(this, "safetySettings", {
157
181
  enumerable: true,
158
182
  configurable: true,
@@ -15,8 +15,8 @@ class BaseGoogleSearchOutputParser extends output_parsers_1.BaseLLMOutputParser
15
15
  generationToGroundingInfo(generation) {
16
16
  if ("message" in generation) {
17
17
  const responseMetadata = generation?.message?.response_metadata;
18
- const metadata = responseMetadata.groundingMetadata;
19
- const supports = responseMetadata.groundingSupport ?? metadata.groundingSupports ?? [];
18
+ const metadata = responseMetadata?.groundingMetadata;
19
+ const supports = responseMetadata?.groundingSupport ?? metadata?.groundingSupports ?? [];
20
20
  if (metadata) {
21
21
  return {
22
22
  metadata,
@@ -90,7 +90,7 @@ class BaseGoogleSearchOutputParser extends output_parsers_1.BaseLLMOutputParser
90
90
  * @param grounding
91
91
  */
92
92
  searchSuggestion(grounding) {
93
- return grounding.metadata.searchEntryPoint?.renderedContent ?? "";
93
+ return grounding?.metadata?.searchEntryPoint?.renderedContent ?? "";
94
94
  }
95
95
  annotateText(text, grounding) {
96
96
  const prefix = this.textPrefix(text, grounding) ?? "";
@@ -125,7 +125,7 @@ class SimpleGoogleSearchOutputParser extends BaseGoogleSearchOutputParser {
125
125
  }
126
126
  textSuffix(_text, grounding) {
127
127
  let ret = "\n";
128
- const chunks = grounding.metadata.groundingChunks;
128
+ const chunks = grounding?.metadata?.groundingChunks ?? [];
129
129
  chunks.forEach((chunk, index) => {
130
130
  ret = `${ret}${this.chunkToString(chunk, index)}\n`;
131
131
  });
@@ -12,8 +12,8 @@ export class BaseGoogleSearchOutputParser extends BaseLLMOutputParser {
12
12
  generationToGroundingInfo(generation) {
13
13
  if ("message" in generation) {
14
14
  const responseMetadata = generation?.message?.response_metadata;
15
- const metadata = responseMetadata.groundingMetadata;
16
- const supports = responseMetadata.groundingSupport ?? metadata.groundingSupports ?? [];
15
+ const metadata = responseMetadata?.groundingMetadata;
16
+ const supports = responseMetadata?.groundingSupport ?? metadata?.groundingSupports ?? [];
17
17
  if (metadata) {
18
18
  return {
19
19
  metadata,
@@ -87,7 +87,7 @@ export class BaseGoogleSearchOutputParser extends BaseLLMOutputParser {
87
87
  * @param grounding
88
88
  */
89
89
  searchSuggestion(grounding) {
90
- return grounding.metadata.searchEntryPoint?.renderedContent ?? "";
90
+ return grounding?.metadata?.searchEntryPoint?.renderedContent ?? "";
91
91
  }
92
92
  annotateText(text, grounding) {
93
93
  const prefix = this.textPrefix(text, grounding) ?? "";
@@ -121,7 +121,7 @@ export class SimpleGoogleSearchOutputParser extends BaseGoogleSearchOutputParser
121
121
  }
122
122
  textSuffix(_text, grounding) {
123
123
  let ret = "\n";
124
- const chunks = grounding.metadata.groundingChunks;
124
+ const chunks = grounding?.metadata?.groundingChunks ?? [];
125
125
  chunks.forEach((chunk, index) => {
126
126
  ret = `${ret}${this.chunkToString(chunk, index)}\n`;
127
127
  });
package/dist/types.d.ts CHANGED
@@ -118,6 +118,34 @@ export interface GoogleAIModelParams {
118
118
  * among the 3 most probable tokens (using temperature).
119
119
  */
120
120
  topK?: number;
121
+ /**
122
+ * Presence penalty applied to the next token's logprobs
123
+ * if the token has already been seen in the response.
124
+ * This penalty is binary on/off and not dependant on the
125
+ * number of times the token is used (after the first).
126
+ * Use frequencyPenalty for a penalty that increases with each use.
127
+ * A positive penalty will discourage the use of tokens that have
128
+ * already been used in the response, increasing the vocabulary.
129
+ * A negative penalty will encourage the use of tokens that have
130
+ * already been used in the response, decreasing the vocabulary.
131
+ */
132
+ presencePenalty?: number;
133
+ /**
134
+ * Frequency penalty applied to the next token's logprobs,
135
+ * multiplied by the number of times each token has been seen
136
+ * in the respponse so far.
137
+ * A positive penalty will discourage the use of tokens that
138
+ * have already been used, proportional to the number of times
139
+ * the token has been used:
140
+ * The more a token is used, the more dificult it is for the model
141
+ * to use that token again increasing the vocabulary of responses.
142
+ * Caution: A _negative_ penalty will encourage the model to reuse
143
+ * tokens proportional to the number of times the token has been used.
144
+ * Small negative values will reduce the vocabulary of a response.
145
+ * Larger negative values will cause the model to start repeating
146
+ * a common token until it hits the maxOutputTokens limit.
147
+ */
148
+ frequencyPenalty?: number;
121
149
  stopSequences?: string[];
122
150
  safetySettings?: GoogleAISafetySetting[];
123
151
  convertSystemMessageToHumanContent?: boolean;
@@ -136,6 +164,19 @@ export interface GoogleAIModelParams {
136
164
  * @default false
137
165
  */
138
166
  streaming?: boolean;
167
+ /**
168
+ * Whether to return log probabilities of the output tokens or not.
169
+ * If true, returns the log probabilities of each output token
170
+ * returned in the content of message.
171
+ */
172
+ logprobs?: boolean;
173
+ /**
174
+ * An integer between 0 and 5 specifying the number of
175
+ * most likely tokens to return at each token position,
176
+ * each with an associated log probability.
177
+ * logprobs must be set to true if this parameter is used.
178
+ */
179
+ topLogprobs?: number;
139
180
  }
140
181
  export type GoogleAIToolType = BindToolsInput | GeminiTool;
141
182
  /**
@@ -273,6 +314,18 @@ export interface GeminiSegment {
273
314
  export interface GeminiRetrievalMetadata {
274
315
  googleSearchDynamicRetrievalScore: number;
275
316
  }
317
+ export interface GeminiLogprobsResult {
318
+ topCandidates: GeminiLogprobsTopCandidate[];
319
+ chosenCandidates: GeminiLogprobsResultCandidate[];
320
+ }
321
+ export interface GeminiLogprobsTopCandidate {
322
+ candidates: GeminiLogprobsResultCandidate[];
323
+ }
324
+ export interface GeminiLogprobsResultCandidate {
325
+ token: string;
326
+ tokenId: number;
327
+ logProbability: number;
328
+ }
276
329
  export type GeminiRole = "system" | "user" | "model" | "function";
277
330
  export interface GeminiContent {
278
331
  parts: GeminiPart[];
@@ -324,7 +377,11 @@ export interface GeminiGenerationConfig {
324
377
  temperature?: number;
325
378
  topP?: number;
326
379
  topK?: number;
380
+ presencePenalty?: number;
381
+ frequencyPenalty?: number;
327
382
  responseMimeType?: GoogleAIResponseMimeType;
383
+ responseLogprobs?: boolean;
384
+ logprobs?: number;
328
385
  }
329
386
  export interface GeminiRequest {
330
387
  contents?: GeminiContent[];
@@ -339,7 +396,7 @@ export interface GeminiRequest {
339
396
  safetySettings?: GeminiSafetySetting[];
340
397
  generationConfig?: GeminiGenerationConfig;
341
398
  }
342
- interface GeminiResponseCandidate {
399
+ export interface GeminiResponseCandidate {
343
400
  content: {
344
401
  parts: GeminiPart[];
345
402
  role: string;
@@ -350,6 +407,8 @@ interface GeminiResponseCandidate {
350
407
  safetyRatings: GeminiSafetyRating[];
351
408
  citationMetadata?: GeminiCitationMetadata;
352
409
  groundingMetadata?: GeminiGroundingMetadata;
410
+ avgLogprobs?: number;
411
+ logprobsResult: GeminiLogprobsResult;
353
412
  }
354
413
  interface GeminiResponsePromptFeedback {
355
414
  blockReason?: string;
@@ -105,10 +105,21 @@ function copyAIModelParamsInto(params, options, target) {
105
105
  target.maxOutputTokens;
106
106
  ret.topP = options?.topP ?? params?.topP ?? target.topP;
107
107
  ret.topK = options?.topK ?? params?.topK ?? target.topK;
108
+ ret.presencePenalty =
109
+ options?.presencePenalty ??
110
+ params?.presencePenalty ??
111
+ target.presencePenalty;
112
+ ret.frequencyPenalty =
113
+ options?.frequencyPenalty ??
114
+ params?.frequencyPenalty ??
115
+ target.frequencyPenalty;
108
116
  ret.stopSequences =
109
117
  options?.stopSequences ?? params?.stopSequences ?? target.stopSequences;
110
118
  ret.safetySettings =
111
119
  options?.safetySettings ?? params?.safetySettings ?? target.safetySettings;
120
+ ret.logprobs = options?.logprobs ?? params?.logprobs ?? target.logprobs;
121
+ ret.topLogprobs =
122
+ options?.topLogprobs ?? params?.topLogprobs ?? target.topLogprobs;
112
123
  ret.convertSystemMessageToHumanContent =
113
124
  options?.convertSystemMessageToHumanContent ??
114
125
  params?.convertSystemMessageToHumanContent ??
@@ -100,10 +100,21 @@ export function copyAIModelParamsInto(params, options, target) {
100
100
  target.maxOutputTokens;
101
101
  ret.topP = options?.topP ?? params?.topP ?? target.topP;
102
102
  ret.topK = options?.topK ?? params?.topK ?? target.topK;
103
+ ret.presencePenalty =
104
+ options?.presencePenalty ??
105
+ params?.presencePenalty ??
106
+ target.presencePenalty;
107
+ ret.frequencyPenalty =
108
+ options?.frequencyPenalty ??
109
+ params?.frequencyPenalty ??
110
+ target.frequencyPenalty;
103
111
  ret.stopSequences =
104
112
  options?.stopSequences ?? params?.stopSequences ?? target.stopSequences;
105
113
  ret.safetySettings =
106
114
  options?.safetySettings ?? params?.safetySettings ?? target.safetySettings;
115
+ ret.logprobs = options?.logprobs ?? params?.logprobs ?? target.logprobs;
116
+ ret.topLogprobs =
117
+ options?.topLogprobs ?? params?.topLogprobs ?? target.topLogprobs;
107
118
  ret.convertSystemMessageToHumanContent =
108
119
  options?.convertSystemMessageToHumanContent ??
109
120
  params?.convertSystemMessageToHumanContent ??
@@ -19,12 +19,12 @@ function failedAttemptHandler(error) {
19
19
  if (status === 0) {
20
20
  // What is this?
21
21
  console.error("failedAttemptHandler", error);
22
+ throw error;
22
23
  }
23
24
  // What errors shouldn't be retried?
24
25
  if (STATUS_NO_RETRY.includes(+status)) {
25
26
  throw error;
26
27
  }
27
- throw error;
28
28
  }
29
29
  exports.failedAttemptHandler = failedAttemptHandler;
30
30
  function ensureParams(params) {
@@ -16,12 +16,12 @@ export function failedAttemptHandler(error) {
16
16
  if (status === 0) {
17
17
  // What is this?
18
18
  console.error("failedAttemptHandler", error);
19
+ throw error;
19
20
  }
20
21
  // What errors shouldn't be retried?
21
22
  if (STATUS_NO_RETRY.includes(+status)) {
22
23
  throw error;
23
24
  }
24
- throw error;
25
25
  }
26
26
  export function ensureParams(params) {
27
27
  const base = params ?? {};
@@ -506,6 +506,33 @@ function getGeminiAPI(config) {
506
506
  function safeResponseToString(response) {
507
507
  return safeResponseTo(response, responseToString);
508
508
  }
509
+ function logprobResultToLogprob(result) {
510
+ const token = result?.token;
511
+ const logprob = result?.logProbability;
512
+ const encoder = new TextEncoder();
513
+ const bytes = Array.from(encoder.encode(token));
514
+ return {
515
+ token,
516
+ logprob,
517
+ bytes,
518
+ };
519
+ }
520
+ function candidateToLogprobs(candidate) {
521
+ const logprobs = candidate?.logprobsResult;
522
+ const chosenTokens = logprobs?.chosenCandidates ?? [];
523
+ const topTokens = logprobs?.topCandidates ?? [];
524
+ const content = [];
525
+ for (let co = 0; co < chosenTokens.length; co += 1) {
526
+ const chosen = chosenTokens[co];
527
+ const top = topTokens[co]?.candidates ?? [];
528
+ const logprob = logprobResultToLogprob(chosen);
529
+ logprob.top_logprobs = top.map((l) => logprobResultToLogprob(l));
530
+ content.push(logprob);
531
+ }
532
+ return {
533
+ content,
534
+ };
535
+ }
509
536
  function responseToGenerationInfo(response) {
510
537
  if (!Array.isArray(response.data)) {
511
538
  return {};
@@ -527,6 +554,8 @@ function getGeminiAPI(config) {
527
554
  citation_metadata: data.candidates[0]?.citationMetadata,
528
555
  grounding_metadata: data.candidates[0]?.groundingMetadata,
529
556
  finish_reason: data.candidates[0]?.finishReason,
557
+ avgLogprobs: data.candidates[0]?.avgLogprobs,
558
+ logprobs: candidateToLogprobs(data.candidates[0]),
530
559
  };
531
560
  }
532
561
  function responseToChatGeneration(response) {
@@ -665,6 +694,18 @@ function getGeminiAPI(config) {
665
694
  }),
666
695
  ];
667
696
  }
697
+ // Add logprobs information to the message
698
+ const candidate = response?.data
699
+ ?.candidates?.[0];
700
+ const avgLogprobs = candidate?.avgLogprobs;
701
+ const logprobs = candidateToLogprobs(candidate);
702
+ if (logprobs) {
703
+ ret[0].message.response_metadata = {
704
+ ...ret[0].message.response_metadata,
705
+ logprobs,
706
+ avgLogprobs,
707
+ };
708
+ }
668
709
  return ret;
669
710
  }
670
711
  function responseToBaseMessageFields(response) {
@@ -788,14 +829,25 @@ function getGeminiAPI(config) {
788
829
  }
789
830
  }
790
831
  function formatGenerationConfig(parameters) {
791
- return {
832
+ const ret = {
792
833
  temperature: parameters.temperature,
793
834
  topK: parameters.topK,
794
835
  topP: parameters.topP,
836
+ presencePenalty: parameters.presencePenalty,
837
+ frequencyPenalty: parameters.frequencyPenalty,
795
838
  maxOutputTokens: parameters.maxOutputTokens,
796
839
  stopSequences: parameters.stopSequences,
797
840
  responseMimeType: parameters.responseMimeType,
798
841
  };
842
+ // Add the logprobs if explicitly set
843
+ if (typeof parameters.logprobs !== "undefined") {
844
+ ret.responseLogprobs = parameters.logprobs;
845
+ if (parameters.logprobs &&
846
+ typeof parameters.topLogprobs !== "undefined") {
847
+ ret.logprobs = parameters.topLogprobs;
848
+ }
849
+ }
850
+ return ret;
799
851
  }
800
852
  function formatSafetySettings(parameters) {
801
853
  return parameters.safetySettings ?? [];
@@ -501,6 +501,33 @@ export function getGeminiAPI(config) {
501
501
  function safeResponseToString(response) {
502
502
  return safeResponseTo(response, responseToString);
503
503
  }
504
+ function logprobResultToLogprob(result) {
505
+ const token = result?.token;
506
+ const logprob = result?.logProbability;
507
+ const encoder = new TextEncoder();
508
+ const bytes = Array.from(encoder.encode(token));
509
+ return {
510
+ token,
511
+ logprob,
512
+ bytes,
513
+ };
514
+ }
515
+ function candidateToLogprobs(candidate) {
516
+ const logprobs = candidate?.logprobsResult;
517
+ const chosenTokens = logprobs?.chosenCandidates ?? [];
518
+ const topTokens = logprobs?.topCandidates ?? [];
519
+ const content = [];
520
+ for (let co = 0; co < chosenTokens.length; co += 1) {
521
+ const chosen = chosenTokens[co];
522
+ const top = topTokens[co]?.candidates ?? [];
523
+ const logprob = logprobResultToLogprob(chosen);
524
+ logprob.top_logprobs = top.map((l) => logprobResultToLogprob(l));
525
+ content.push(logprob);
526
+ }
527
+ return {
528
+ content,
529
+ };
530
+ }
504
531
  function responseToGenerationInfo(response) {
505
532
  if (!Array.isArray(response.data)) {
506
533
  return {};
@@ -522,6 +549,8 @@ export function getGeminiAPI(config) {
522
549
  citation_metadata: data.candidates[0]?.citationMetadata,
523
550
  grounding_metadata: data.candidates[0]?.groundingMetadata,
524
551
  finish_reason: data.candidates[0]?.finishReason,
552
+ avgLogprobs: data.candidates[0]?.avgLogprobs,
553
+ logprobs: candidateToLogprobs(data.candidates[0]),
525
554
  };
526
555
  }
527
556
  function responseToChatGeneration(response) {
@@ -660,6 +689,18 @@ export function getGeminiAPI(config) {
660
689
  }),
661
690
  ];
662
691
  }
692
+ // Add logprobs information to the message
693
+ const candidate = response?.data
694
+ ?.candidates?.[0];
695
+ const avgLogprobs = candidate?.avgLogprobs;
696
+ const logprobs = candidateToLogprobs(candidate);
697
+ if (logprobs) {
698
+ ret[0].message.response_metadata = {
699
+ ...ret[0].message.response_metadata,
700
+ logprobs,
701
+ avgLogprobs,
702
+ };
703
+ }
663
704
  return ret;
664
705
  }
665
706
  function responseToBaseMessageFields(response) {
@@ -783,14 +824,25 @@ export function getGeminiAPI(config) {
783
824
  }
784
825
  }
785
826
  function formatGenerationConfig(parameters) {
786
- return {
827
+ const ret = {
787
828
  temperature: parameters.temperature,
788
829
  topK: parameters.topK,
789
830
  topP: parameters.topP,
831
+ presencePenalty: parameters.presencePenalty,
832
+ frequencyPenalty: parameters.frequencyPenalty,
790
833
  maxOutputTokens: parameters.maxOutputTokens,
791
834
  stopSequences: parameters.stopSequences,
792
835
  responseMimeType: parameters.responseMimeType,
793
836
  };
837
+ // Add the logprobs if explicitly set
838
+ if (typeof parameters.logprobs !== "undefined") {
839
+ ret.responseLogprobs = parameters.logprobs;
840
+ if (parameters.logprobs &&
841
+ typeof parameters.topLogprobs !== "undefined") {
842
+ ret.logprobs = parameters.topLogprobs;
843
+ }
844
+ }
845
+ return ret;
794
846
  }
795
847
  function formatSafetySettings(parameters) {
796
848
  return parameters.safetySettings ?? [];
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/google-common",
3
- "version": "0.1.6",
3
+ "version": "0.1.8",
4
4
  "description": "Core types and classes for Google services.",
5
5
  "type": "module",
6
6
  "engines": {