@langchain/google-common 0.2.18 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +23 -0
- package/LICENSE +6 -6
- package/dist/_virtual/rolldown_runtime.cjs +25 -0
- package/dist/auth.cjs +82 -116
- package/dist/auth.cjs.map +1 -0
- package/dist/auth.d.cts +46 -0
- package/dist/auth.d.cts.map +1 -0
- package/dist/auth.d.ts +41 -36
- package/dist/auth.d.ts.map +1 -0
- package/dist/auth.js +80 -110
- package/dist/auth.js.map +1 -0
- package/dist/chat_models.cjs +264 -466
- package/dist/chat_models.cjs.map +1 -0
- package/dist/chat_models.d.cts +109 -0
- package/dist/chat_models.d.cts.map +1 -0
- package/dist/chat_models.d.ts +98 -73
- package/dist/chat_models.d.ts.map +1 -0
- package/dist/chat_models.js +258 -457
- package/dist/chat_models.js.map +1 -0
- package/dist/connection.cjs +321 -466
- package/dist/connection.cjs.map +1 -0
- package/dist/connection.d.cts +109 -0
- package/dist/connection.d.cts.map +1 -0
- package/dist/connection.d.ts +98 -91
- package/dist/connection.d.ts.map +1 -0
- package/dist/connection.js +317 -459
- package/dist/connection.js.map +1 -0
- package/dist/embeddings.cjs +135 -186
- package/dist/embeddings.cjs.map +1 -0
- package/dist/embeddings.d.cts +44 -0
- package/dist/embeddings.d.cts.map +1 -0
- package/dist/embeddings.d.ts +38 -32
- package/dist/embeddings.d.ts.map +1 -0
- package/dist/embeddings.js +133 -181
- package/dist/embeddings.js.map +1 -0
- package/dist/experimental/media.cjs +380 -482
- package/dist/experimental/media.cjs.map +1 -0
- package/dist/experimental/media.d.cts +198 -0
- package/dist/experimental/media.d.cts.map +1 -0
- package/dist/experimental/media.d.ts +190 -202
- package/dist/experimental/media.d.ts.map +1 -0
- package/dist/experimental/media.js +369 -468
- package/dist/experimental/media.js.map +1 -0
- package/dist/experimental/utils/media_core.cjs +403 -517
- package/dist/experimental/utils/media_core.cjs.map +1 -0
- package/dist/experimental/utils/media_core.d.cts +215 -0
- package/dist/experimental/utils/media_core.d.cts.map +1 -0
- package/dist/experimental/utils/media_core.d.ts +171 -165
- package/dist/experimental/utils/media_core.d.ts.map +1 -0
- package/dist/experimental/utils/media_core.js +395 -506
- package/dist/experimental/utils/media_core.js.map +1 -0
- package/dist/index.cjs +58 -27
- package/dist/index.d.cts +13 -0
- package/dist/index.d.ts +13 -11
- package/dist/index.js +13 -11
- package/dist/llms.cjs +157 -244
- package/dist/llms.cjs.map +1 -0
- package/dist/llms.d.cts +72 -0
- package/dist/llms.d.cts.map +1 -0
- package/dist/llms.d.ts +64 -54
- package/dist/llms.d.ts.map +1 -0
- package/dist/llms.js +154 -238
- package/dist/llms.js.map +1 -0
- package/dist/output_parsers.cjs +148 -173
- package/dist/output_parsers.cjs.map +1 -0
- package/dist/output_parsers.d.cts +53 -0
- package/dist/output_parsers.d.cts.map +1 -0
- package/dist/output_parsers.d.ts +46 -42
- package/dist/output_parsers.d.ts.map +1 -0
- package/dist/output_parsers.js +146 -168
- package/dist/output_parsers.js.map +1 -0
- package/dist/profiles.cjs +219 -0
- package/dist/profiles.cjs.map +1 -0
- package/dist/profiles.js +218 -0
- package/dist/profiles.js.map +1 -0
- package/dist/types-anthropic.d.cts +229 -0
- package/dist/types-anthropic.d.cts.map +1 -0
- package/dist/types-anthropic.d.ts +221 -215
- package/dist/types-anthropic.d.ts.map +1 -0
- package/dist/types.cjs +51 -62
- package/dist/types.cjs.map +1 -0
- package/dist/types.d.cts +748 -0
- package/dist/types.d.cts.map +1 -0
- package/dist/types.d.ts +669 -656
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +46 -45
- package/dist/types.js.map +1 -0
- package/dist/utils/anthropic.cjs +598 -821
- package/dist/utils/anthropic.cjs.map +1 -0
- package/dist/utils/anthropic.js +597 -818
- package/dist/utils/anthropic.js.map +1 -0
- package/dist/utils/common.cjs +130 -211
- package/dist/utils/common.cjs.map +1 -0
- package/dist/utils/common.d.cts +13 -0
- package/dist/utils/common.d.cts.map +1 -0
- package/dist/utils/common.d.ts +12 -7
- package/dist/utils/common.d.ts.map +1 -0
- package/dist/utils/common.js +128 -207
- package/dist/utils/common.js.map +1 -0
- package/dist/utils/failed_handler.cjs +28 -30
- package/dist/utils/failed_handler.cjs.map +1 -0
- package/dist/utils/failed_handler.d.cts +9 -0
- package/dist/utils/failed_handler.d.cts.map +1 -0
- package/dist/utils/failed_handler.d.ts +8 -2
- package/dist/utils/failed_handler.d.ts.map +1 -0
- package/dist/utils/failed_handler.js +28 -28
- package/dist/utils/failed_handler.js.map +1 -0
- package/dist/utils/gemini.cjs +1020 -1488
- package/dist/utils/gemini.cjs.map +1 -0
- package/dist/utils/gemini.d.cts +51 -0
- package/dist/utils/gemini.d.cts.map +1 -0
- package/dist/utils/gemini.d.ts +51 -48
- package/dist/utils/gemini.d.ts.map +1 -0
- package/dist/utils/gemini.js +1015 -1479
- package/dist/utils/gemini.js.map +1 -0
- package/dist/utils/index.cjs +38 -23
- package/dist/utils/index.d.cts +8 -0
- package/dist/utils/index.d.ts +8 -7
- package/dist/utils/index.js +8 -7
- package/dist/utils/palm.d.cts +11 -0
- package/dist/utils/palm.d.cts.map +1 -0
- package/dist/utils/palm.d.ts +9 -4
- package/dist/utils/palm.d.ts.map +1 -0
- package/dist/utils/safety.cjs +13 -22
- package/dist/utils/safety.cjs.map +1 -0
- package/dist/utils/safety.d.cts +12 -0
- package/dist/utils/safety.d.cts.map +1 -0
- package/dist/utils/safety.d.ts +10 -4
- package/dist/utils/safety.d.ts.map +1 -0
- package/dist/utils/safety.js +13 -19
- package/dist/utils/safety.js.map +1 -0
- package/dist/utils/stream.cjs +296 -475
- package/dist/utils/stream.cjs.map +1 -0
- package/dist/utils/stream.d.cts +165 -0
- package/dist/utils/stream.d.cts.map +1 -0
- package/dist/utils/stream.d.ts +156 -131
- package/dist/utils/stream.d.ts.map +1 -0
- package/dist/utils/stream.js +293 -469
- package/dist/utils/stream.js.map +1 -0
- package/dist/utils/zod_to_gemini_parameters.cjs +43 -81
- package/dist/utils/zod_to_gemini_parameters.cjs.map +1 -0
- package/dist/utils/zod_to_gemini_parameters.d.cts +22 -0
- package/dist/utils/zod_to_gemini_parameters.d.cts.map +1 -0
- package/dist/utils/zod_to_gemini_parameters.d.ts +21 -6
- package/dist/utils/zod_to_gemini_parameters.d.ts.map +1 -0
- package/dist/utils/zod_to_gemini_parameters.js +40 -76
- package/dist/utils/zod_to_gemini_parameters.js.map +1 -0
- package/package.json +72 -85
- package/dist/types-anthropic.cjs +0 -2
- package/dist/types-anthropic.js +0 -1
- package/dist/utils/anthropic.d.ts +0 -4
- package/dist/utils/palm.cjs +0 -2
- package/dist/utils/palm.js +0 -1
- package/experimental/media.cjs +0 -1
- package/experimental/media.d.cts +0 -1
- package/experimental/media.d.ts +0 -1
- package/experimental/media.js +0 -1
- package/experimental/utils/media_core.cjs +0 -1
- package/experimental/utils/media_core.d.cts +0 -1
- package/experimental/utils/media_core.d.ts +0 -1
- package/experimental/utils/media_core.js +0 -1
- package/index.cjs +0 -1
- package/index.d.cts +0 -1
- package/index.d.ts +0 -1
- package/index.js +0 -1
- package/types.cjs +0 -1
- package/types.d.cts +0 -1
- package/types.d.ts +0 -1
- package/types.js +0 -1
- package/utils.cjs +0 -1
- package/utils.d.cts +0 -1
- package/utils.d.ts +0 -1
- package/utils.js +0 -1
package/dist/llms.d.ts
CHANGED
|
@@ -1,62 +1,72 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { LLM } from "@langchain/core/language_models/llms";
|
|
3
|
-
import { type BaseLanguageModelCallOptions, BaseLanguageModelInput } from "@langchain/core/language_models/base";
|
|
4
|
-
import { BaseMessage, MessageContent } from "@langchain/core/messages";
|
|
5
|
-
import { AbstractGoogleLLMConnection } from "./connection.js";
|
|
6
|
-
import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GooglePlatformType, GeminiContent, GoogleAIResponseMimeType } from "./types.js";
|
|
1
|
+
import { GeminiContent, GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAIResponseMimeType, GoogleAISafetyHandler, GoogleAISafetySetting, GoogleBaseLLMInput, GooglePlatformType } from "./types.js";
|
|
7
2
|
import { GoogleAbstractedClient } from "./auth.js";
|
|
3
|
+
import { AbstractGoogleLLMConnection } from "./connection.js";
|
|
8
4
|
import { ChatGoogleBase } from "./chat_models.js";
|
|
9
|
-
import
|
|
10
|
-
|
|
5
|
+
import { BaseMessage, MessageContent } from "@langchain/core/messages";
|
|
6
|
+
import { BaseLanguageModelCallOptions, BaseLanguageModelInput } from "@langchain/core/language_models/base";
|
|
7
|
+
import { Callbacks } from "@langchain/core/callbacks/manager";
|
|
8
|
+
import { LLM } from "@langchain/core/language_models/llms";
|
|
9
|
+
|
|
10
|
+
//#region src/llms.d.ts
|
|
11
11
|
declare class GoogleLLMConnection<AuthOptions> extends AbstractGoogleLLMConnection<MessageContent, AuthOptions> {
|
|
12
|
-
|
|
12
|
+
formatContents(input: MessageContent, _parameters: GoogleAIModelParams): Promise<GeminiContent[]>;
|
|
13
13
|
}
|
|
14
14
|
/**
|
|
15
15
|
* Integration with an LLM.
|
|
16
16
|
*/
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
17
|
+
declare abstract class GoogleBaseLLM<AuthOptions> extends LLM<BaseLanguageModelCallOptions> implements GoogleBaseLLMInput<AuthOptions> {
|
|
18
|
+
// Used for tracing, replace with the same name as your class
|
|
19
|
+
static lc_name(): string;
|
|
20
|
+
get lc_secrets(): {
|
|
21
|
+
[key: string]: string;
|
|
22
|
+
} | undefined;
|
|
23
|
+
originalFields?: GoogleBaseLLMInput<AuthOptions>;
|
|
24
|
+
lc_serializable: boolean;
|
|
25
|
+
modelName: string;
|
|
26
|
+
model: string;
|
|
27
|
+
temperature: number;
|
|
28
|
+
maxOutputTokens: number;
|
|
29
|
+
topP: number;
|
|
30
|
+
topK: number;
|
|
31
|
+
stopSequences: string[];
|
|
32
|
+
safetySettings: GoogleAISafetySetting[];
|
|
33
|
+
safetyHandler: GoogleAISafetyHandler;
|
|
34
|
+
responseMimeType: GoogleAIResponseMimeType;
|
|
35
|
+
protected connection: GoogleLLMConnection<AuthOptions>;
|
|
36
|
+
protected streamedConnection: GoogleLLMConnection<AuthOptions>;
|
|
37
|
+
constructor(fields?: GoogleBaseLLMInput<AuthOptions>);
|
|
38
|
+
abstract buildAbstractedClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;
|
|
39
|
+
buildApiKeyClient(apiKey: string): GoogleAbstractedClient;
|
|
40
|
+
buildApiKey(fields?: GoogleAIBaseLLMInput<AuthOptions>): string | undefined;
|
|
41
|
+
buildClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;
|
|
42
|
+
buildConnection(fields: GoogleBaseLLMInput<AuthOptions>, client: GoogleAbstractedClient): void;
|
|
43
|
+
get platform(): GooglePlatformType;
|
|
44
|
+
// Replace
|
|
45
|
+
_llmType(): string;
|
|
46
|
+
formatPrompt(prompt: string): MessageContent;
|
|
47
|
+
/**
|
|
48
|
+
* For some given input string and options, return a string output.
|
|
49
|
+
*
|
|
50
|
+
* Despite the fact that `invoke` is overridden below, we still need this
|
|
51
|
+
* in order to handle public APi calls to `generate()`.
|
|
52
|
+
*/
|
|
53
|
+
_call(prompt: string, options: this["ParsedCallOptions"]): Promise<string>;
|
|
54
|
+
// Normally, you should not override this method and instead should override
|
|
55
|
+
// _streamResponseChunks. We are doing so here to allow for multimodal inputs into
|
|
56
|
+
// the LLM.
|
|
57
|
+
_streamIterator(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): AsyncGenerator<string>;
|
|
58
|
+
predictMessages(messages: BaseMessage[], options?: string[] | BaseLanguageModelCallOptions, _callbacks?: Callbacks): Promise<BaseMessage>;
|
|
59
|
+
/**
|
|
60
|
+
* Internal implementation detail to allow Google LLMs to support
|
|
61
|
+
* multimodal input by delegating to the chat model implementation.
|
|
62
|
+
*
|
|
63
|
+
* TODO: Replace with something less hacky.
|
|
64
|
+
*/
|
|
65
|
+
protected createProxyChat(): ChatGoogleBase<AuthOptions>;
|
|
66
|
+
// TODO: Remove the need to override this - we are doing it to
|
|
67
|
+
// allow the LLM to handle multimodal types of input.
|
|
68
|
+
invoke(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): Promise<string>;
|
|
62
69
|
}
|
|
70
|
+
//#endregion
|
|
71
|
+
export { GoogleBaseLLM };
|
|
72
|
+
//# sourceMappingURL=llms.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"llms.d.ts","names":["Callbacks","LLM","BaseLanguageModelCallOptions","BaseLanguageModelInput","BaseMessage","MessageContent","AbstractGoogleLLMConnection","GoogleAIBaseLLMInput","GoogleAIModelParams","GoogleAISafetySetting","GooglePlatformType","GeminiContent","GoogleAIResponseMimeType","GoogleAbstractedClient","ChatGoogleBase","GoogleBaseLLMInput","GoogleAISafetyHandler","GoogleLLMConnection","AuthOptions","Promise","GoogleBaseLLM","AsyncGenerator"],"sources":["../src/llms.d.ts"],"sourcesContent":["import { Callbacks } from \"@langchain/core/callbacks/manager\";\nimport { LLM } from \"@langchain/core/language_models/llms\";\nimport { type BaseLanguageModelCallOptions, BaseLanguageModelInput } from \"@langchain/core/language_models/base\";\nimport { BaseMessage, MessageContent } from \"@langchain/core/messages\";\nimport { AbstractGoogleLLMConnection } from \"./connection.js\";\nimport { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GooglePlatformType, GeminiContent, GoogleAIResponseMimeType } from \"./types.js\";\nimport { GoogleAbstractedClient } from \"./auth.js\";\nimport { ChatGoogleBase } from \"./chat_models.js\";\nimport type { GoogleBaseLLMInput, GoogleAISafetyHandler } from \"./types.js\";\nexport { GoogleBaseLLMInput };\ndeclare class GoogleLLMConnection<AuthOptions> extends AbstractGoogleLLMConnection<MessageContent, AuthOptions> {\n formatContents(input: MessageContent, _parameters: GoogleAIModelParams): Promise<GeminiContent[]>;\n}\n/**\n * Integration with an LLM.\n */\nexport declare abstract class GoogleBaseLLM<AuthOptions> extends LLM<BaseLanguageModelCallOptions> implements GoogleBaseLLMInput<AuthOptions> {\n // Used for tracing, replace with the same name as your class\n static lc_name(): string;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n originalFields?: GoogleBaseLLMInput<AuthOptions>;\n lc_serializable: boolean;\n modelName: string;\n model: string;\n temperature: number;\n maxOutputTokens: number;\n topP: number;\n topK: number;\n stopSequences: string[];\n safetySettings: GoogleAISafetySetting[];\n safetyHandler: GoogleAISafetyHandler;\n responseMimeType: GoogleAIResponseMimeType;\n protected connection: GoogleLLMConnection<AuthOptions>;\n protected streamedConnection: GoogleLLMConnection<AuthOptions>;\n constructor(fields?: GoogleBaseLLMInput<AuthOptions>);\n abstract buildAbstractedClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;\n buildApiKeyClient(apiKey: string): GoogleAbstractedClient;\n buildApiKey(fields?: GoogleAIBaseLLMInput<AuthOptions>): string | undefined;\n buildClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;\n buildConnection(fields: GoogleBaseLLMInput<AuthOptions>, client: GoogleAbstractedClient): void;\n get platform(): GooglePlatformType;\n // Replace\n _llmType(): string;\n formatPrompt(prompt: string): MessageContent;\n /**\n * For some given input string and options, return a string output.\n *\n * Despite the fact that `invoke` is overridden below, we still need this\n * in order to handle public APi calls to `generate()`.\n */\n _call(prompt: string, options: this[\"ParsedCallOptions\"]): Promise<string>;\n // Normally, you should not override this method and instead should override\n // _streamResponseChunks. We are doing so here to allow for multimodal inputs into\n // the LLM.\n _streamIterator(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): AsyncGenerator<string>;\n predictMessages(messages: BaseMessage[], options?: string[] | BaseLanguageModelCallOptions, _callbacks?: Callbacks): Promise<BaseMessage>;\n /**\n * Internal implementation detail to allow Google LLMs to support\n * multimodal input by delegating to the chat model implementation.\n *\n * TODO: Replace with something less hacky.\n */\n protected createProxyChat(): ChatGoogleBase<AuthOptions>;\n // TODO: Remove the need to override this - we are doing it to\n // allow the LLM to handle multimodal types of input.\n invoke(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): Promise<string>;\n}\n"],"mappings":";;;;;;;;;;AAS8B,cAChBiB,mBAAmB,CAAA,WAAA,CAAA,SAAsBX,2BAAtB,CAAkDD,cAAlD,EAAkEa,WAAlE,CAAA,CAAA;EAAA,cAAA,CAAA,KAAA,EACPb,cADO,EAAA,WAAA,EACsBG,mBADtB,CAAA,EAC4CW,OAD5C,CACoDR,aADpD,EAAA,CAAA;;;;;AACoDA,uBAKvDS,aALuDT,CAAAA,WAAAA,CAAAA,SAKpBV,GALoBU,CAKhBT,4BALgBS,CAAAA,YAKyBI,kBALzBJ,CAK4CO,WAL5CP,CAAAA,CAAAA;EAAa;EAAd,OAD7BL,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAA2B,IAAA,UAAA,CAAA,CAAA,EAAA;IAMpDc,CAAAA,GAAAA,EAAAA,MAAa,CAAA,EAAA,MAAA;EAAA,CAAA,GAAA,SAAA;EAAA,cAA0BlB,CAAAA,EAMhDa,kBANgDb,CAM7BgB,WAN6BhB,CAAAA;EAA4B,eAAgCgB,EAAAA,OAAAA;EAAW,SAMpGA,EAAAA,MAAAA;EAAW,KAA9BH,EAAAA,MAAAA;EAAkB,WASnBN,EAAAA,MAAAA;EAAqB,eACtBO,EAAAA,MAAAA;EAAqB,IAClBJ,EAAAA,MAAAA;EAAwB,IACAM,EAAAA,MAAAA;EAAW,aAA/BD,EAAAA,MAAAA,EAAAA;EAAmB,cACSC,EAJlCT,qBAIkCS,EAAAA;EAAW,aAA/BD,EAHfD,qBAGeC;EAAmB,gBACTC,EAHtBN,wBAGsBM;EAAW,UAA9BH,UAAAA,EAFCE,mBAEDF,CAFqBG,WAErBH,CAAAA;EAAkB,UACsBG,kBAAAA,EAF/BD,mBAE+BC,CAFXA,WAEWA,CAAAA;EAAW,WAAhCX,CAAAA,MAAAA,CAAAA,EADnBQ,kBACmBR,CADAW,WACAX,CAAAA;EAAoB,SAAgBM,qBAAAA,CAAAA,MAAAA,CAAAA,EAApCN,oBAAoCM,CAAfK,WAAeL,CAAAA,CAAAA,EAAAA,sBAAAA;EAAsB,iBAC/DA,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA,EAAAA,sBAAAA;EAAsB,WACfK,CAAAA,MAAAA,CAAAA,EAArBX,oBAAqBW,CAAAA,WAAAA,CAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAW,WAAhCX,CAAAA,MAAAA,CAAAA,EACAA,oBADAA,CACqBW,WADrBX,CAAAA,CAAAA,EACoCM,sBADpCN;EAAoB,eACCW,CAAAA,MAAAA,EAClBH,kBADkBG,CACCA,WADDA,CAAAA,EAAAA,MAAAA,EACuBL,sBADvBK,CAAAA,EAAAA,IAAAA;EAAW,IAAhCX,QAAAA,CAAAA,CAAAA,EAELG,kBAFKH;EAAoB;EAAsC,QACpCW,CAAAA,CAAAA,EAAAA,MAAAA;EAAW,YAA9BH,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA,EAIMV,cAJNU;EAAkB;;;;;;EAe2C,KAAGM,CAAAA,MAAAA,EAAAA,MAAAA,EAAAA,OAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,CAAAA,EAJ7BF,OAI6BE,CAAAA,MAAAA,CAAAA;EAAc;EACjE;EAAqD;EAAwB,eAAWjB,CAAAA,KAAAA,EADtGD,sBACsGC,EAAAA,OAAAA,CAAAA,EADpEF,4BACoEE,CAAAA,EADrCiB,cACqCjB,CAAAA,MAAAA,CAAAA;EAAW,eAAnBe,CAAAA,QAAAA,EAA3Ff,WAA2Fe,EAAAA,EAAAA,OAAAA,CAAAA,EAAAA,MAAAA,EAAAA,GAAvDjB,4BAAuDiB,EAAAA,UAAAA,CAAAA,EAAZnB,SAAYmB,CAAAA,EAAAA,OAAAA,CAAQf,WAARe,CAAAA;EAAO;;;;;;EAzC5D,UAA0CJ,eAAAA,CAAAA,CAAAA,EAgD7ED,cAhD6EC,CAgD9DG,WAhD8DH,CAAAA;EAAkB;;gBAmD9GZ,kCAAkCD,+BAA+BiB"}
|
package/dist/llms.js
CHANGED
|
@@ -1,242 +1,158 @@
|
|
|
1
|
-
import { CallbackManager } from "@langchain/core/callbacks/manager";
|
|
2
|
-
import { BaseLLM, LLM } from "@langchain/core/language_models/llms";
|
|
3
|
-
import { GenerationChunk } from "@langchain/core/outputs";
|
|
4
|
-
import { getEnvironmentVariable } from "@langchain/core/utils/env";
|
|
5
|
-
import { AbstractGoogleLLMConnection } from "./connection.js";
|
|
6
|
-
import { copyAIModelParams, copyAndValidateModelParamsInto, } from "./utils/common.js";
|
|
7
1
|
import { DefaultGeminiSafetyHandler } from "./utils/gemini.js";
|
|
8
|
-
import {
|
|
2
|
+
import { copyAIModelParams, copyAndValidateModelParamsInto } from "./utils/common.js";
|
|
9
3
|
import { ensureParams } from "./utils/failed_handler.js";
|
|
4
|
+
import { AbstractGoogleLLMConnection } from "./connection.js";
|
|
5
|
+
import { ApiKeyGoogleAuth } from "./auth.js";
|
|
10
6
|
import { ChatGoogleBase } from "./chat_models.js";
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
}
|
|
7
|
+
import { getEnvironmentVariable } from "@langchain/core/utils/env";
|
|
8
|
+
import { GenerationChunk } from "@langchain/core/outputs";
|
|
9
|
+
import { CallbackManager } from "@langchain/core/callbacks/manager";
|
|
10
|
+
import { BaseLLM, LLM } from "@langchain/core/language_models/llms";
|
|
11
|
+
|
|
12
|
+
//#region src/llms.ts
|
|
13
|
+
var GoogleLLMConnection = class extends AbstractGoogleLLMConnection {
|
|
14
|
+
async formatContents(input, _parameters) {
|
|
15
|
+
const parts = await this.api.messageContentToParts(input);
|
|
16
|
+
const contents = [{
|
|
17
|
+
role: "user",
|
|
18
|
+
parts
|
|
19
|
+
}];
|
|
20
|
+
return contents;
|
|
21
|
+
}
|
|
22
|
+
};
|
|
23
|
+
var ProxyChatGoogle = class extends ChatGoogleBase {
|
|
24
|
+
constructor(fields) {
|
|
25
|
+
super(fields);
|
|
26
|
+
}
|
|
27
|
+
buildAbstractedClient(fields) {
|
|
28
|
+
return fields.connection.client;
|
|
29
|
+
}
|
|
30
|
+
};
|
|
31
31
|
/**
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
// Replace
|
|
160
|
-
_llmType() {
|
|
161
|
-
return "googlellm";
|
|
162
|
-
}
|
|
163
|
-
formatPrompt(prompt) {
|
|
164
|
-
return prompt;
|
|
165
|
-
}
|
|
166
|
-
/**
|
|
167
|
-
* For some given input string and options, return a string output.
|
|
168
|
-
*
|
|
169
|
-
* Despite the fact that `invoke` is overridden below, we still need this
|
|
170
|
-
* in order to handle public APi calls to `generate()`.
|
|
171
|
-
*/
|
|
172
|
-
async _call(prompt, options) {
|
|
173
|
-
const parameters = copyAIModelParams(this, options);
|
|
174
|
-
const result = await this.connection.request(prompt, parameters, options);
|
|
175
|
-
const ret = this.connection.api.responseToString(result);
|
|
176
|
-
return ret;
|
|
177
|
-
}
|
|
178
|
-
// Normally, you should not override this method and instead should override
|
|
179
|
-
// _streamResponseChunks. We are doing so here to allow for multimodal inputs into
|
|
180
|
-
// the LLM.
|
|
181
|
-
async *_streamIterator(input, options) {
|
|
182
|
-
// TODO: Refactor callback setup and teardown code into core
|
|
183
|
-
const prompt = BaseLLM._convertInputToPromptValue(input);
|
|
184
|
-
const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(options);
|
|
185
|
-
const callbackManager_ = await CallbackManager.configure(runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
|
|
186
|
-
const extra = {
|
|
187
|
-
options: callOptions,
|
|
188
|
-
invocation_params: this?.invocationParams(callOptions),
|
|
189
|
-
batch_size: 1,
|
|
190
|
-
};
|
|
191
|
-
const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), [prompt.toString()], undefined, undefined, extra, undefined, undefined, runnableConfig.runName);
|
|
192
|
-
let generation = new GenerationChunk({
|
|
193
|
-
text: "",
|
|
194
|
-
});
|
|
195
|
-
const proxyChat = this.createProxyChat();
|
|
196
|
-
try {
|
|
197
|
-
for await (const chunk of proxyChat._streamIterator(input, options)) {
|
|
198
|
-
const stringValue = this.connection.api.chunkToString(chunk);
|
|
199
|
-
const generationChunk = new GenerationChunk({
|
|
200
|
-
text: stringValue,
|
|
201
|
-
});
|
|
202
|
-
generation = generation.concat(generationChunk);
|
|
203
|
-
yield stringValue;
|
|
204
|
-
}
|
|
205
|
-
}
|
|
206
|
-
catch (err) {
|
|
207
|
-
await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
|
|
208
|
-
throw err;
|
|
209
|
-
}
|
|
210
|
-
await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({
|
|
211
|
-
generations: [[generation]],
|
|
212
|
-
})));
|
|
213
|
-
}
|
|
214
|
-
async predictMessages(messages, options, _callbacks) {
|
|
215
|
-
const { content } = messages[0];
|
|
216
|
-
const result = await this.connection.request(content, {}, options);
|
|
217
|
-
const ret = this.connection.api.responseToBaseMessage(result);
|
|
218
|
-
return ret;
|
|
219
|
-
}
|
|
220
|
-
/**
|
|
221
|
-
* Internal implementation detail to allow Google LLMs to support
|
|
222
|
-
* multimodal input by delegating to the chat model implementation.
|
|
223
|
-
*
|
|
224
|
-
* TODO: Replace with something less hacky.
|
|
225
|
-
*/
|
|
226
|
-
createProxyChat() {
|
|
227
|
-
return new ProxyChatGoogle({
|
|
228
|
-
...this.originalFields,
|
|
229
|
-
connection: this.connection,
|
|
230
|
-
});
|
|
231
|
-
}
|
|
232
|
-
// TODO: Remove the need to override this - we are doing it to
|
|
233
|
-
// allow the LLM to handle multimodal types of input.
|
|
234
|
-
async invoke(input, options) {
|
|
235
|
-
const stream = await this._streamIterator(input, options);
|
|
236
|
-
let generatedOutput = "";
|
|
237
|
-
for await (const chunk of stream) {
|
|
238
|
-
generatedOutput += chunk;
|
|
239
|
-
}
|
|
240
|
-
return generatedOutput;
|
|
241
|
-
}
|
|
242
|
-
}
|
|
32
|
+
* Integration with an LLM.
|
|
33
|
+
*/
|
|
34
|
+
var GoogleBaseLLM = class extends LLM {
|
|
35
|
+
static lc_name() {
|
|
36
|
+
return "GoogleLLM";
|
|
37
|
+
}
|
|
38
|
+
get lc_secrets() {
|
|
39
|
+
return { authOptions: "GOOGLE_AUTH_OPTIONS" };
|
|
40
|
+
}
|
|
41
|
+
originalFields;
|
|
42
|
+
lc_serializable = true;
|
|
43
|
+
modelName = "gemini-pro";
|
|
44
|
+
model = "gemini-pro";
|
|
45
|
+
temperature = .7;
|
|
46
|
+
maxOutputTokens = 1024;
|
|
47
|
+
topP = .8;
|
|
48
|
+
topK = 40;
|
|
49
|
+
stopSequences = [];
|
|
50
|
+
safetySettings = [];
|
|
51
|
+
safetyHandler;
|
|
52
|
+
responseMimeType = "text/plain";
|
|
53
|
+
connection;
|
|
54
|
+
streamedConnection;
|
|
55
|
+
constructor(fields) {
|
|
56
|
+
super(ensureParams(fields));
|
|
57
|
+
this.originalFields = fields;
|
|
58
|
+
copyAndValidateModelParamsInto(fields, this);
|
|
59
|
+
this.safetyHandler = fields?.safetyHandler ?? new DefaultGeminiSafetyHandler();
|
|
60
|
+
const client = this.buildClient(fields);
|
|
61
|
+
this.buildConnection(fields ?? {}, client);
|
|
62
|
+
}
|
|
63
|
+
buildApiKeyClient(apiKey) {
|
|
64
|
+
return new ApiKeyGoogleAuth(apiKey);
|
|
65
|
+
}
|
|
66
|
+
buildApiKey(fields) {
|
|
67
|
+
return fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
|
|
68
|
+
}
|
|
69
|
+
buildClient(fields) {
|
|
70
|
+
const apiKey = this.buildApiKey(fields);
|
|
71
|
+
if (apiKey) return this.buildApiKeyClient(apiKey);
|
|
72
|
+
else return this.buildAbstractedClient(fields);
|
|
73
|
+
}
|
|
74
|
+
buildConnection(fields, client) {
|
|
75
|
+
this.connection = new GoogleLLMConnection({
|
|
76
|
+
...fields,
|
|
77
|
+
...this
|
|
78
|
+
}, this.caller, client, false);
|
|
79
|
+
this.streamedConnection = new GoogleLLMConnection({
|
|
80
|
+
...fields,
|
|
81
|
+
...this
|
|
82
|
+
}, this.caller, client, true);
|
|
83
|
+
}
|
|
84
|
+
get platform() {
|
|
85
|
+
return this.connection.platform;
|
|
86
|
+
}
|
|
87
|
+
_llmType() {
|
|
88
|
+
return "googlellm";
|
|
89
|
+
}
|
|
90
|
+
formatPrompt(prompt) {
|
|
91
|
+
return prompt;
|
|
92
|
+
}
|
|
93
|
+
/**
|
|
94
|
+
* For some given input string and options, return a string output.
|
|
95
|
+
*
|
|
96
|
+
* Despite the fact that `invoke` is overridden below, we still need this
|
|
97
|
+
* in order to handle public APi calls to `generate()`.
|
|
98
|
+
*/
|
|
99
|
+
async _call(prompt, options) {
|
|
100
|
+
const parameters = copyAIModelParams(this, options);
|
|
101
|
+
const result = await this.connection.request(prompt, parameters, options);
|
|
102
|
+
const ret = this.connection.api.responseToString(result);
|
|
103
|
+
return ret;
|
|
104
|
+
}
|
|
105
|
+
async *_streamIterator(input, options) {
|
|
106
|
+
const prompt = BaseLLM._convertInputToPromptValue(input);
|
|
107
|
+
const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(options);
|
|
108
|
+
const callbackManager_ = await CallbackManager.configure(runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
|
|
109
|
+
const extra = {
|
|
110
|
+
options: callOptions,
|
|
111
|
+
invocation_params: this?.invocationParams(callOptions),
|
|
112
|
+
batch_size: 1
|
|
113
|
+
};
|
|
114
|
+
const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), [prompt.toString()], void 0, void 0, extra, void 0, void 0, runnableConfig.runName);
|
|
115
|
+
let generation = new GenerationChunk({ text: "" });
|
|
116
|
+
const proxyChat = this.createProxyChat();
|
|
117
|
+
try {
|
|
118
|
+
for await (const chunk of proxyChat._streamIterator(input, options)) {
|
|
119
|
+
const stringValue = this.connection.api.chunkToString(chunk);
|
|
120
|
+
const generationChunk = new GenerationChunk({ text: stringValue });
|
|
121
|
+
generation = generation.concat(generationChunk);
|
|
122
|
+
yield stringValue;
|
|
123
|
+
}
|
|
124
|
+
} catch (err) {
|
|
125
|
+
await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
|
|
126
|
+
throw err;
|
|
127
|
+
}
|
|
128
|
+
await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({ generations: [[generation]] })));
|
|
129
|
+
}
|
|
130
|
+
async predictMessages(messages, options, _callbacks) {
|
|
131
|
+
const { content } = messages[0];
|
|
132
|
+
const result = await this.connection.request(content, {}, options);
|
|
133
|
+
const ret = this.connection.api.responseToBaseMessage(result);
|
|
134
|
+
return ret;
|
|
135
|
+
}
|
|
136
|
+
/**
|
|
137
|
+
* Internal implementation detail to allow Google LLMs to support
|
|
138
|
+
* multimodal input by delegating to the chat model implementation.
|
|
139
|
+
*
|
|
140
|
+
* TODO: Replace with something less hacky.
|
|
141
|
+
*/
|
|
142
|
+
createProxyChat() {
|
|
143
|
+
return new ProxyChatGoogle({
|
|
144
|
+
...this.originalFields,
|
|
145
|
+
connection: this.connection
|
|
146
|
+
});
|
|
147
|
+
}
|
|
148
|
+
async invoke(input, options) {
|
|
149
|
+
const stream = await this._streamIterator(input, options);
|
|
150
|
+
let generatedOutput = "";
|
|
151
|
+
for await (const chunk of stream) generatedOutput += chunk;
|
|
152
|
+
return generatedOutput;
|
|
153
|
+
}
|
|
154
|
+
};
|
|
155
|
+
|
|
156
|
+
//#endregion
|
|
157
|
+
export { GoogleBaseLLM };
|
|
158
|
+
//# sourceMappingURL=llms.js.map
|
package/dist/llms.js.map
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"llms.js","names":["input: MessageContent","_parameters: GoogleAIModelParams","contents: GeminiContent[]","fields: ProxyChatInput<AuthOptions>","fields?: GoogleBaseLLMInput<AuthOptions>","apiKey: string","fields?: GoogleAIBaseLLMInput<AuthOptions>","fields: GoogleBaseLLMInput<AuthOptions>","client: GoogleAbstractedClient","prompt: string","options: this[\"ParsedCallOptions\"]","input: BaseLanguageModelInput","options?: BaseLanguageModelCallOptions","messages: BaseMessage[]","options?: string[] | BaseLanguageModelCallOptions","_callbacks?: Callbacks"],"sources":["../src/llms.ts"],"sourcesContent":["import { CallbackManager, Callbacks } from \"@langchain/core/callbacks/manager\";\nimport { BaseLLM, LLM } from \"@langchain/core/language_models/llms\";\nimport {\n type BaseLanguageModelCallOptions,\n BaseLanguageModelInput,\n} from \"@langchain/core/language_models/base\";\nimport { BaseMessage, MessageContent } from \"@langchain/core/messages\";\nimport { GenerationChunk } from \"@langchain/core/outputs\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\n\nimport { AbstractGoogleLLMConnection } from \"./connection.js\";\nimport {\n GoogleAIBaseLLMInput,\n GoogleAIModelParams,\n GoogleAISafetySetting,\n GooglePlatformType,\n GeminiContent,\n GoogleAIResponseMimeType,\n} from \"./types.js\";\nimport {\n copyAIModelParams,\n copyAndValidateModelParamsInto,\n} from \"./utils/common.js\";\nimport { DefaultGeminiSafetyHandler } from \"./utils/gemini.js\";\nimport { ApiKeyGoogleAuth, GoogleAbstractedClient } from \"./auth.js\";\nimport { ensureParams } from \"./utils/failed_handler.js\";\nimport { ChatGoogleBase } from \"./chat_models.js\";\nimport type { GoogleBaseLLMInput, GoogleAISafetyHandler } from \"./types.js\";\n\nexport { GoogleBaseLLMInput };\n\nclass GoogleLLMConnection<AuthOptions> extends AbstractGoogleLLMConnection<\n MessageContent,\n AuthOptions\n> {\n async formatContents(\n input: MessageContent,\n _parameters: GoogleAIModelParams\n ): Promise<GeminiContent[]> {\n const parts = await this.api.messageContentToParts!(input);\n const contents: GeminiContent[] = [\n {\n role: \"user\", // Required by Vertex AI\n parts,\n },\n ];\n return contents;\n }\n}\n\ntype ProxyChatInput<AuthOptions> = GoogleAIBaseLLMInput<AuthOptions> & {\n connection: GoogleLLMConnection<AuthOptions>;\n};\n\nclass ProxyChatGoogle<AuthOptions> extends ChatGoogleBase<AuthOptions> {\n constructor(fields: ProxyChatInput<AuthOptions>) {\n super(fields);\n }\n\n buildAbstractedClient(\n fields: ProxyChatInput<AuthOptions>\n ): GoogleAbstractedClient {\n return fields.connection.client;\n }\n}\n\n/**\n * Integration with an LLM.\n */\nexport abstract class GoogleBaseLLM<AuthOptions>\n extends LLM<BaseLanguageModelCallOptions>\n implements GoogleBaseLLMInput<AuthOptions>\n{\n // Used for tracing, replace with the same name as your class\n static lc_name() {\n return \"GoogleLLM\";\n }\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n authOptions: \"GOOGLE_AUTH_OPTIONS\",\n };\n }\n\n originalFields?: GoogleBaseLLMInput<AuthOptions>;\n\n lc_serializable = true;\n\n modelName = \"gemini-pro\";\n\n model = \"gemini-pro\";\n\n temperature = 0.7;\n\n maxOutputTokens = 1024;\n\n topP = 0.8;\n\n topK = 40;\n\n stopSequences: string[] = [];\n\n safetySettings: GoogleAISafetySetting[] = [];\n\n safetyHandler: GoogleAISafetyHandler;\n\n responseMimeType: GoogleAIResponseMimeType = \"text/plain\";\n\n protected connection: GoogleLLMConnection<AuthOptions>;\n\n protected streamedConnection: GoogleLLMConnection<AuthOptions>;\n\n constructor(fields?: GoogleBaseLLMInput<AuthOptions>) {\n super(ensureParams(fields));\n this.originalFields = fields;\n\n copyAndValidateModelParamsInto(fields, this);\n this.safetyHandler =\n fields?.safetyHandler ?? new DefaultGeminiSafetyHandler();\n\n const client = this.buildClient(fields);\n this.buildConnection(fields ?? {}, client);\n }\n\n abstract buildAbstractedClient(\n fields?: GoogleAIBaseLLMInput<AuthOptions>\n ): GoogleAbstractedClient;\n\n buildApiKeyClient(apiKey: string): GoogleAbstractedClient {\n return new ApiKeyGoogleAuth(apiKey);\n }\n\n buildApiKey(fields?: GoogleAIBaseLLMInput<AuthOptions>): string | undefined {\n return fields?.apiKey ?? getEnvironmentVariable(\"GOOGLE_API_KEY\");\n }\n\n buildClient(\n fields?: GoogleAIBaseLLMInput<AuthOptions>\n ): GoogleAbstractedClient {\n const apiKey = this.buildApiKey(fields);\n if (apiKey) {\n return this.buildApiKeyClient(apiKey);\n } else {\n return this.buildAbstractedClient(fields);\n }\n }\n\n buildConnection(\n fields: GoogleBaseLLMInput<AuthOptions>,\n client: GoogleAbstractedClient\n ) {\n this.connection = new GoogleLLMConnection(\n { ...fields, ...this },\n this.caller,\n client,\n false\n );\n\n this.streamedConnection = new GoogleLLMConnection(\n { ...fields, ...this },\n this.caller,\n client,\n true\n );\n }\n\n get platform(): GooglePlatformType {\n return this.connection.platform;\n }\n\n // Replace\n _llmType() {\n return \"googlellm\";\n }\n\n formatPrompt(prompt: string): MessageContent {\n return prompt;\n }\n\n /**\n * For some given input string and options, return a string output.\n *\n * Despite the fact that `invoke` is overridden below, we still need this\n * in order to handle public APi calls to `generate()`.\n */\n async _call(\n prompt: string,\n options: this[\"ParsedCallOptions\"]\n ): Promise<string> {\n const parameters = copyAIModelParams(this, options);\n const result = await this.connection.request(prompt, parameters, options);\n const ret = this.connection.api.responseToString(result);\n return ret;\n }\n\n // Normally, you should not override this method and instead should override\n // _streamResponseChunks. We are doing so here to allow for multimodal inputs into\n // the LLM.\n async *_streamIterator(\n input: BaseLanguageModelInput,\n options?: BaseLanguageModelCallOptions\n ): AsyncGenerator<string> {\n // TODO: Refactor callback setup and teardown code into core\n const prompt = BaseLLM._convertInputToPromptValue(input);\n const [runnableConfig, callOptions] =\n this._separateRunnableConfigFromCallOptions(options);\n const callbackManager_ = await CallbackManager.configure(\n runnableConfig.callbacks,\n this.callbacks,\n runnableConfig.tags,\n this.tags,\n runnableConfig.metadata,\n this.metadata,\n { verbose: this.verbose }\n );\n const extra = {\n options: callOptions,\n invocation_params: this?.invocationParams(callOptions),\n batch_size: 1,\n };\n const runManagers = await callbackManager_?.handleLLMStart(\n this.toJSON(),\n [prompt.toString()],\n undefined,\n undefined,\n extra,\n undefined,\n undefined,\n runnableConfig.runName\n );\n let generation = new GenerationChunk({\n text: \"\",\n });\n const proxyChat = this.createProxyChat();\n try {\n for await (const chunk of proxyChat._streamIterator(input, options)) {\n const stringValue = this.connection.api.chunkToString(chunk);\n const generationChunk = new GenerationChunk({\n text: stringValue,\n });\n generation = generation.concat(generationChunk);\n yield stringValue;\n }\n } catch (err) {\n await Promise.all(\n (runManagers ?? []).map((runManager) => runManager?.handleLLMError(err))\n );\n throw err;\n }\n await Promise.all(\n (runManagers ?? []).map((runManager) =>\n runManager?.handleLLMEnd({\n generations: [[generation]],\n })\n )\n );\n }\n\n async predictMessages(\n messages: BaseMessage[],\n options?: string[] | BaseLanguageModelCallOptions,\n _callbacks?: Callbacks\n ): Promise<BaseMessage> {\n const { content } = messages[0];\n const result = await this.connection.request(\n content,\n {},\n options as BaseLanguageModelCallOptions\n );\n const ret = this.connection.api.responseToBaseMessage(result);\n return ret;\n }\n\n /**\n * Internal implementation detail to allow Google LLMs to support\n * multimodal input by delegating to the chat model implementation.\n *\n * TODO: Replace with something less hacky.\n */\n protected createProxyChat(): ChatGoogleBase<AuthOptions> {\n return new ProxyChatGoogle<AuthOptions>({\n ...this.originalFields,\n connection: this.connection,\n });\n }\n\n // TODO: Remove the need to override this - we are doing it to\n // allow the LLM to handle multimodal types of input.\n async invoke(\n input: BaseLanguageModelInput,\n options?: BaseLanguageModelCallOptions\n ): Promise<string> {\n const stream = await this._streamIterator(input, options);\n let generatedOutput = \"\";\n for await (const chunk of stream) {\n generatedOutput += chunk;\n }\n return generatedOutput;\n }\n}\n"],"mappings":";;;;;;;;;;;;AA+BA,IAAM,sBAAN,cAA+C,4BAG7C;CACA,MAAM,eACJA,OACAC,aAC0B;EAC1B,MAAM,QAAQ,MAAM,KAAK,IAAI,sBAAuB,MAAM;EAC1D,MAAMC,WAA4B,CAChC;GACE,MAAM;GACN;EACD,CACF;AACD,SAAO;CACR;AACF;AAMD,IAAM,kBAAN,cAA2C,eAA4B;CACrE,YAAYC,QAAqC;EAC/C,MAAM,OAAO;CACd;CAED,sBACEA,QACwB;AACxB,SAAO,OAAO,WAAW;CAC1B;AACF;;;;AAKD,IAAsB,gBAAtB,cACU,IAEV;CAEE,OAAO,UAAU;AACf,SAAO;CACR;CAED,IAAI,aAAoD;AACtD,SAAO,EACL,aAAa,sBACd;CACF;CAED;CAEA,kBAAkB;CAElB,YAAY;CAEZ,QAAQ;CAER,cAAc;CAEd,kBAAkB;CAElB,OAAO;CAEP,OAAO;CAEP,gBAA0B,CAAE;CAE5B,iBAA0C,CAAE;CAE5C;CAEA,mBAA6C;CAE7C,AAAU;CAEV,AAAU;CAEV,YAAYC,QAA0C;EACpD,MAAM,aAAa,OAAO,CAAC;EAC3B,KAAK,iBAAiB;EAEtB,+BAA+B,QAAQ,KAAK;EAC5C,KAAK,gBACH,QAAQ,iBAAiB,IAAI;EAE/B,MAAM,SAAS,KAAK,YAAY,OAAO;EACvC,KAAK,gBAAgB,UAAU,CAAE,GAAE,OAAO;CAC3C;CAMD,kBAAkBC,QAAwC;AACxD,SAAO,IAAI,iBAAiB;CAC7B;CAED,YAAYC,QAAgE;AAC1E,SAAO,QAAQ,UAAU,uBAAuB,iBAAiB;CAClE;CAED,YACEA,QACwB;EACxB,MAAM,SAAS,KAAK,YAAY,OAAO;AACvC,MAAI,OACF,QAAO,KAAK,kBAAkB,OAAO;MAErC,QAAO,KAAK,sBAAsB,OAAO;CAE5C;CAED,gBACEC,QACAC,QACA;EACA,KAAK,aAAa,IAAI,oBACpB;GAAE,GAAG;GAAQ,GAAG;EAAM,GACtB,KAAK,QACL,QACA;EAGF,KAAK,qBAAqB,IAAI,oBAC5B;GAAE,GAAG;GAAQ,GAAG;EAAM,GACtB,KAAK,QACL,QACA;CAEH;CAED,IAAI,WAA+B;AACjC,SAAO,KAAK,WAAW;CACxB;CAGD,WAAW;AACT,SAAO;CACR;CAED,aAAaC,QAAgC;AAC3C,SAAO;CACR;;;;;;;CAQD,MAAM,MACJA,QACAC,SACiB;EACjB,MAAM,aAAa,kBAAkB,MAAM,QAAQ;EACnD,MAAM,SAAS,MAAM,KAAK,WAAW,QAAQ,QAAQ,YAAY,QAAQ;EACzE,MAAM,MAAM,KAAK,WAAW,IAAI,iBAAiB,OAAO;AACxD,SAAO;CACR;CAKD,OAAO,gBACLC,OACAC,SACwB;EAExB,MAAM,SAAS,QAAQ,2BAA2B,MAAM;EACxD,MAAM,CAAC,gBAAgB,YAAY,GACjC,KAAK,uCAAuC,QAAQ;EACtD,MAAM,mBAAmB,MAAM,gBAAgB,UAC7C,eAAe,WACf,KAAK,WACL,eAAe,MACf,KAAK,MACL,eAAe,UACf,KAAK,UACL,EAAE,SAAS,KAAK,QAAS,EAC1B;EACD,MAAM,QAAQ;GACZ,SAAS;GACT,mBAAmB,MAAM,iBAAiB,YAAY;GACtD,YAAY;EACb;EACD,MAAM,cAAc,MAAM,kBAAkB,eAC1C,KAAK,QAAQ,EACb,CAAC,OAAO,UAAU,AAAC,GACnB,QACA,QACA,OACA,QACA,QACA,eAAe,QAChB;EACD,IAAI,aAAa,IAAI,gBAAgB,EACnC,MAAM,GACP;EACD,MAAM,YAAY,KAAK,iBAAiB;AACxC,MAAI;AACF,cAAW,MAAM,SAAS,UAAU,gBAAgB,OAAO,QAAQ,EAAE;IACnE,MAAM,cAAc,KAAK,WAAW,IAAI,cAAc,MAAM;IAC5D,MAAM,kBAAkB,IAAI,gBAAgB,EAC1C,MAAM,YACP;IACD,aAAa,WAAW,OAAO,gBAAgB;IAC/C,MAAM;GACP;EACF,SAAQ,KAAK;GACZ,MAAM,QAAQ,KACX,eAAe,CAAE,GAAE,IAAI,CAAC,eAAe,YAAY,eAAe,IAAI,CAAC,CACzE;AACD,SAAM;EACP;EACD,MAAM,QAAQ,KACX,eAAe,CAAE,GAAE,IAAI,CAAC,eACvB,YAAY,aAAa,EACvB,aAAa,CAAC,CAAC,UAAW,CAAC,EAC5B,EAAC,CACH,CACF;CACF;CAED,MAAM,gBACJC,UACAC,SACAC,YACsB;EACtB,MAAM,EAAE,SAAS,GAAG,SAAS;EAC7B,MAAM,SAAS,MAAM,KAAK,WAAW,QACnC,SACA,CAAE,GACF,QACD;EACD,MAAM,MAAM,KAAK,WAAW,IAAI,sBAAsB,OAAO;AAC7D,SAAO;CACR;;;;;;;CAQD,AAAU,kBAA+C;AACvD,SAAO,IAAI,gBAA6B;GACtC,GAAG,KAAK;GACR,YAAY,KAAK;EAClB;CACF;CAID,MAAM,OACJJ,OACAC,SACiB;EACjB,MAAM,SAAS,MAAM,KAAK,gBAAgB,OAAO,QAAQ;EACzD,IAAI,kBAAkB;AACtB,aAAW,MAAM,SAAS,QACxB,mBAAmB;AAErB,SAAO;CACR;AACF"}
|