@langchain/google-genai 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ The MIT License
2
+
3
+ Copyright (c) 2023 LangChain
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in
13
+ all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
+ THE SOFTWARE.
@@ -0,0 +1,203 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ChatGoogleGenerativeAI = void 0;
4
+ const generative_ai_1 = require("@google/generative-ai");
5
+ const env_1 = require("@langchain/core/utils/env");
6
+ const chat_models_1 = require("@langchain/core/language_models/chat_models");
7
+ const utils_js_1 = require("./utils.cjs");
8
+ /**
9
+ * A class that wraps the Google Palm chat model.
10
+ * @example
11
+ * ```typescript
12
+ * const model = new ChatGoogleGenerativeAI({
13
+ * apiKey: "<YOUR API KEY>",
14
+ * temperature: 0.7,
15
+ * modelName: "gemini-pro",
16
+ * topK: 40,
17
+ * topP: 1,
18
+ * });
19
+ * const questions = [
20
+ * new HumanMessage({
21
+ * content: [
22
+ * {
23
+ * type: "text",
24
+ * text: "You are a funny assistant that answers in pirate language.",
25
+ * },
26
+ * {
27
+ * type: "text",
28
+ * text: "What is your favorite food?",
29
+ * },
30
+ * ]
31
+ * })
32
+ * ];
33
+ * const res = await model.call(questions);
34
+ * console.log({ res });
35
+ * ```
36
+ */
37
+ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
38
+ static lc_name() {
39
+ return "googlegenerativeai";
40
+ }
41
+ get lc_secrets() {
42
+ return {
43
+ apiKey: "GOOGLE_API_KEY",
44
+ };
45
+ }
46
+ get _isMultimodalModel() {
47
+ return this.modelName.includes("vision");
48
+ }
49
+ constructor(fields) {
50
+ super(fields ?? {});
51
+ Object.defineProperty(this, "lc_serializable", {
52
+ enumerable: true,
53
+ configurable: true,
54
+ writable: true,
55
+ value: true
56
+ });
57
+ Object.defineProperty(this, "modelName", {
58
+ enumerable: true,
59
+ configurable: true,
60
+ writable: true,
61
+ value: "gemini-pro"
62
+ });
63
+ Object.defineProperty(this, "temperature", {
64
+ enumerable: true,
65
+ configurable: true,
66
+ writable: true,
67
+ value: void 0
68
+ }); // default value chosen based on model
69
+ Object.defineProperty(this, "maxOutputTokens", {
70
+ enumerable: true,
71
+ configurable: true,
72
+ writable: true,
73
+ value: void 0
74
+ });
75
+ Object.defineProperty(this, "topP", {
76
+ enumerable: true,
77
+ configurable: true,
78
+ writable: true,
79
+ value: void 0
80
+ }); // default value chosen based on model
81
+ Object.defineProperty(this, "topK", {
82
+ enumerable: true,
83
+ configurable: true,
84
+ writable: true,
85
+ value: void 0
86
+ }); // default value chosen based on model
87
+ Object.defineProperty(this, "stopSequences", {
88
+ enumerable: true,
89
+ configurable: true,
90
+ writable: true,
91
+ value: []
92
+ });
93
+ Object.defineProperty(this, "safetySettings", {
94
+ enumerable: true,
95
+ configurable: true,
96
+ writable: true,
97
+ value: void 0
98
+ });
99
+ Object.defineProperty(this, "apiKey", {
100
+ enumerable: true,
101
+ configurable: true,
102
+ writable: true,
103
+ value: void 0
104
+ });
105
+ Object.defineProperty(this, "client", {
106
+ enumerable: true,
107
+ configurable: true,
108
+ writable: true,
109
+ value: void 0
110
+ });
111
+ this.modelName =
112
+ fields?.modelName?.replace(/^models\//, "") ?? this.modelName;
113
+ this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens;
114
+ if (this.maxOutputTokens && this.maxOutputTokens < 0) {
115
+ throw new Error("`maxOutputTokens` must be a positive integer");
116
+ }
117
+ this.temperature = fields?.temperature ?? this.temperature;
118
+ if (this.temperature && (this.temperature < 0 || this.temperature > 1)) {
119
+ throw new Error("`temperature` must be in the range of [0.0,1.0]");
120
+ }
121
+ this.topP = fields?.topP ?? this.topP;
122
+ if (this.topP && this.topP < 0) {
123
+ throw new Error("`topP` must be a positive integer");
124
+ }
125
+ if (this.topP && this.topP > 1) {
126
+ throw new Error("`topP` must be below 1.");
127
+ }
128
+ this.topK = fields?.topK ?? this.topK;
129
+ if (this.topK && this.topK < 0) {
130
+ throw new Error("`topK` must be a positive integer");
131
+ }
132
+ this.apiKey = fields?.apiKey ?? (0, env_1.getEnvironmentVariable)("GOOGLE_API_KEY");
133
+ if (!this.apiKey) {
134
+ throw new Error("Please set an API key for Google GenerativeAI " +
135
+ "in the environment variable GOOGLE_API_KEY " +
136
+ "or in the `apiKey` field of the " +
137
+ "ChatGoogleGenerativeAI constructor");
138
+ }
139
+ this.safetySettings = fields?.safetySettings ?? this.safetySettings;
140
+ if (this.safetySettings && this.safetySettings.length > 0) {
141
+ const safetySettingsSet = new Set(this.safetySettings.map((s) => s.category));
142
+ if (safetySettingsSet.size !== this.safetySettings.length) {
143
+ throw new Error("The categories in `safetySettings` array must be unique");
144
+ }
145
+ }
146
+ this.client = new generative_ai_1.GoogleGenerativeAI(this.apiKey).getGenerativeModel({
147
+ model: this.modelName,
148
+ safetySettings: this.safetySettings,
149
+ generationConfig: {
150
+ candidateCount: 1,
151
+ stopSequences: this.stopSequences,
152
+ maxOutputTokens: this.maxOutputTokens,
153
+ temperature: this.temperature,
154
+ topP: this.topP,
155
+ topK: this.topK,
156
+ },
157
+ });
158
+ }
159
+ _combineLLMOutput() {
160
+ return [];
161
+ }
162
+ _llmType() {
163
+ return "googlegenerativeai";
164
+ }
165
+ async _generate(messages, options, _runManager) {
166
+ const prompt = (0, utils_js_1.convertBaseMessagesToContent)(messages, this._isMultimodalModel);
167
+ const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
168
+ let output;
169
+ try {
170
+ output = await this.client.generateContent({
171
+ contents: prompt,
172
+ });
173
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
174
+ }
175
+ catch (e) {
176
+ // TODO: Improve error handling
177
+ if (e.message?.includes("400 Bad Request")) {
178
+ e.status = 400;
179
+ }
180
+ throw e;
181
+ }
182
+ return output;
183
+ });
184
+ return (0, utils_js_1.mapGenerateContentResultToChatResult)(res.response);
185
+ }
186
+ async *_streamResponseChunks(messages, options, _runManager) {
187
+ const prompt = (0, utils_js_1.convertBaseMessagesToContent)(messages, this._isMultimodalModel);
188
+ const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
189
+ const { stream } = await this.client.generateContentStream({
190
+ contents: prompt,
191
+ });
192
+ return stream;
193
+ });
194
+ for await (const response of stream) {
195
+ const chunk = (0, utils_js_1.convertResponseContentToChatGenerationChunk)(response);
196
+ if (!chunk) {
197
+ continue;
198
+ }
199
+ yield chunk;
200
+ }
201
+ }
202
+ }
203
+ exports.ChatGoogleGenerativeAI = ChatGoogleGenerativeAI;
@@ -0,0 +1,130 @@
1
+ import type { SafetySetting } from "@google/generative-ai";
2
+ import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
3
+ import { BaseMessage } from "@langchain/core/messages";
4
+ import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
5
+ import { BaseChatModel, type BaseChatModelParams } from "@langchain/core/language_models/chat_models";
6
+ export type BaseMessageExamplePair = {
7
+ input: BaseMessage;
8
+ output: BaseMessage;
9
+ };
10
+ /**
11
+ * An interface defining the input to the ChatGoogleGenerativeAI class.
12
+ */
13
+ export interface GoogleGenerativeAIChatInput extends BaseChatModelParams {
14
+ /**
15
+ * Model Name to use
16
+ *
17
+ * Note: The format must follow the pattern - `{model}`
18
+ */
19
+ modelName?: string;
20
+ /**
21
+ * Controls the randomness of the output.
22
+ *
23
+ * Values can range from [0.0,1.0], inclusive. A value closer to 1.0
24
+ * will produce responses that are more varied and creative, while
25
+ * a value closer to 0.0 will typically result in less surprising
26
+ * responses from the model.
27
+ *
28
+ * Note: The default value varies by model
29
+ */
30
+ temperature?: number;
31
+ /**
32
+ * Maximum number of tokens to generate in the completion.
33
+ */
34
+ maxOutputTokens?: number;
35
+ /**
36
+ * Top-p changes how the model selects tokens for output.
37
+ *
38
+ * Tokens are selected from most probable to least until the sum
39
+ * of their probabilities equals the top-p value.
40
+ *
41
+ * For example, if tokens A, B, and C have a probability of
42
+ * .3, .2, and .1 and the top-p value is .5, then the model will
43
+ * select either A or B as the next token (using temperature).
44
+ *
45
+ * Note: The default value varies by model
46
+ */
47
+ topP?: number;
48
+ /**
49
+ * Top-k changes how the model selects tokens for output.
50
+ *
51
+ * A top-k of 1 means the selected token is the most probable among
52
+ * all tokens in the model’s vocabulary (also called greedy decoding),
53
+ * while a top-k of 3 means that the next token is selected from
54
+ * among the 3 most probable tokens (using temperature).
55
+ *
56
+ * Note: The default value varies by model
57
+ */
58
+ topK?: number;
59
+ /**
60
+ * The set of character sequences (up to 5) that will stop output generation.
61
+ * If specified, the API will stop at the first appearance of a stop
62
+ * sequence.
63
+ *
64
+ * Note: The stop sequence will not be included as part of the response.
65
+ * Note: stopSequences is only supported for Gemini models
66
+ */
67
+ stopSequences?: string[];
68
+ /**
69
+ * A list of unique `SafetySetting` instances for blocking unsafe content. The API will block
70
+ * any prompts and responses that fail to meet the thresholds set by these settings. If there
71
+ * is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use
72
+ * the default safety setting for that category.
73
+ */
74
+ safetySettings?: SafetySetting[];
75
+ /**
76
+ * Google API key to use
77
+ */
78
+ apiKey?: string;
79
+ }
80
+ /**
81
+ * A class that wraps the Google Palm chat model.
82
+ * @example
83
+ * ```typescript
84
+ * const model = new ChatGoogleGenerativeAI({
85
+ * apiKey: "<YOUR API KEY>",
86
+ * temperature: 0.7,
87
+ * modelName: "gemini-pro",
88
+ * topK: 40,
89
+ * topP: 1,
90
+ * });
91
+ * const questions = [
92
+ * new HumanMessage({
93
+ * content: [
94
+ * {
95
+ * type: "text",
96
+ * text: "You are a funny assistant that answers in pirate language.",
97
+ * },
98
+ * {
99
+ * type: "text",
100
+ * text: "What is your favorite food?",
101
+ * },
102
+ * ]
103
+ * })
104
+ * ];
105
+ * const res = await model.call(questions);
106
+ * console.log({ res });
107
+ * ```
108
+ */
109
+ export declare class ChatGoogleGenerativeAI extends BaseChatModel implements GoogleGenerativeAIChatInput {
110
+ static lc_name(): string;
111
+ lc_serializable: boolean;
112
+ get lc_secrets(): {
113
+ [key: string]: string;
114
+ } | undefined;
115
+ modelName: string;
116
+ temperature?: number;
117
+ maxOutputTokens?: number;
118
+ topP?: number;
119
+ topK?: number;
120
+ stopSequences: string[];
121
+ safetySettings?: SafetySetting[];
122
+ apiKey?: string;
123
+ private client;
124
+ get _isMultimodalModel(): boolean;
125
+ constructor(fields?: GoogleGenerativeAIChatInput);
126
+ _combineLLMOutput(): never[];
127
+ _llmType(): string;
128
+ _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
129
+ _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
130
+ }
@@ -0,0 +1,199 @@
1
+ import { GoogleGenerativeAI as GenerativeAI, } from "@google/generative-ai";
2
+ import { getEnvironmentVariable } from "@langchain/core/utils/env";
3
+ import { BaseChatModel, } from "@langchain/core/language_models/chat_models";
4
+ import { convertBaseMessagesToContent, convertResponseContentToChatGenerationChunk, mapGenerateContentResultToChatResult, } from "./utils.js";
5
+ /**
6
+ * A class that wraps the Google Palm chat model.
7
+ * @example
8
+ * ```typescript
9
+ * const model = new ChatGoogleGenerativeAI({
10
+ * apiKey: "<YOUR API KEY>",
11
+ * temperature: 0.7,
12
+ * modelName: "gemini-pro",
13
+ * topK: 40,
14
+ * topP: 1,
15
+ * });
16
+ * const questions = [
17
+ * new HumanMessage({
18
+ * content: [
19
+ * {
20
+ * type: "text",
21
+ * text: "You are a funny assistant that answers in pirate language.",
22
+ * },
23
+ * {
24
+ * type: "text",
25
+ * text: "What is your favorite food?",
26
+ * },
27
+ * ]
28
+ * })
29
+ * ];
30
+ * const res = await model.call(questions);
31
+ * console.log({ res });
32
+ * ```
33
+ */
34
+ export class ChatGoogleGenerativeAI extends BaseChatModel {
35
+ static lc_name() {
36
+ return "googlegenerativeai";
37
+ }
38
+ get lc_secrets() {
39
+ return {
40
+ apiKey: "GOOGLE_API_KEY",
41
+ };
42
+ }
43
+ get _isMultimodalModel() {
44
+ return this.modelName.includes("vision");
45
+ }
46
+ constructor(fields) {
47
+ super(fields ?? {});
48
+ Object.defineProperty(this, "lc_serializable", {
49
+ enumerable: true,
50
+ configurable: true,
51
+ writable: true,
52
+ value: true
53
+ });
54
+ Object.defineProperty(this, "modelName", {
55
+ enumerable: true,
56
+ configurable: true,
57
+ writable: true,
58
+ value: "gemini-pro"
59
+ });
60
+ Object.defineProperty(this, "temperature", {
61
+ enumerable: true,
62
+ configurable: true,
63
+ writable: true,
64
+ value: void 0
65
+ }); // default value chosen based on model
66
+ Object.defineProperty(this, "maxOutputTokens", {
67
+ enumerable: true,
68
+ configurable: true,
69
+ writable: true,
70
+ value: void 0
71
+ });
72
+ Object.defineProperty(this, "topP", {
73
+ enumerable: true,
74
+ configurable: true,
75
+ writable: true,
76
+ value: void 0
77
+ }); // default value chosen based on model
78
+ Object.defineProperty(this, "topK", {
79
+ enumerable: true,
80
+ configurable: true,
81
+ writable: true,
82
+ value: void 0
83
+ }); // default value chosen based on model
84
+ Object.defineProperty(this, "stopSequences", {
85
+ enumerable: true,
86
+ configurable: true,
87
+ writable: true,
88
+ value: []
89
+ });
90
+ Object.defineProperty(this, "safetySettings", {
91
+ enumerable: true,
92
+ configurable: true,
93
+ writable: true,
94
+ value: void 0
95
+ });
96
+ Object.defineProperty(this, "apiKey", {
97
+ enumerable: true,
98
+ configurable: true,
99
+ writable: true,
100
+ value: void 0
101
+ });
102
+ Object.defineProperty(this, "client", {
103
+ enumerable: true,
104
+ configurable: true,
105
+ writable: true,
106
+ value: void 0
107
+ });
108
+ this.modelName =
109
+ fields?.modelName?.replace(/^models\//, "") ?? this.modelName;
110
+ this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens;
111
+ if (this.maxOutputTokens && this.maxOutputTokens < 0) {
112
+ throw new Error("`maxOutputTokens` must be a positive integer");
113
+ }
114
+ this.temperature = fields?.temperature ?? this.temperature;
115
+ if (this.temperature && (this.temperature < 0 || this.temperature > 1)) {
116
+ throw new Error("`temperature` must be in the range of [0.0,1.0]");
117
+ }
118
+ this.topP = fields?.topP ?? this.topP;
119
+ if (this.topP && this.topP < 0) {
120
+ throw new Error("`topP` must be a positive integer");
121
+ }
122
+ if (this.topP && this.topP > 1) {
123
+ throw new Error("`topP` must be below 1.");
124
+ }
125
+ this.topK = fields?.topK ?? this.topK;
126
+ if (this.topK && this.topK < 0) {
127
+ throw new Error("`topK` must be a positive integer");
128
+ }
129
+ this.apiKey = fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
130
+ if (!this.apiKey) {
131
+ throw new Error("Please set an API key for Google GenerativeAI " +
132
+ "in the environment variable GOOGLE_API_KEY " +
133
+ "or in the `apiKey` field of the " +
134
+ "ChatGoogleGenerativeAI constructor");
135
+ }
136
+ this.safetySettings = fields?.safetySettings ?? this.safetySettings;
137
+ if (this.safetySettings && this.safetySettings.length > 0) {
138
+ const safetySettingsSet = new Set(this.safetySettings.map((s) => s.category));
139
+ if (safetySettingsSet.size !== this.safetySettings.length) {
140
+ throw new Error("The categories in `safetySettings` array must be unique");
141
+ }
142
+ }
143
+ this.client = new GenerativeAI(this.apiKey).getGenerativeModel({
144
+ model: this.modelName,
145
+ safetySettings: this.safetySettings,
146
+ generationConfig: {
147
+ candidateCount: 1,
148
+ stopSequences: this.stopSequences,
149
+ maxOutputTokens: this.maxOutputTokens,
150
+ temperature: this.temperature,
151
+ topP: this.topP,
152
+ topK: this.topK,
153
+ },
154
+ });
155
+ }
156
+ _combineLLMOutput() {
157
+ return [];
158
+ }
159
+ _llmType() {
160
+ return "googlegenerativeai";
161
+ }
162
+ async _generate(messages, options, _runManager) {
163
+ const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel);
164
+ const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
165
+ let output;
166
+ try {
167
+ output = await this.client.generateContent({
168
+ contents: prompt,
169
+ });
170
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
171
+ }
172
+ catch (e) {
173
+ // TODO: Improve error handling
174
+ if (e.message?.includes("400 Bad Request")) {
175
+ e.status = 400;
176
+ }
177
+ throw e;
178
+ }
179
+ return output;
180
+ });
181
+ return mapGenerateContentResultToChatResult(res.response);
182
+ }
183
+ async *_streamResponseChunks(messages, options, _runManager) {
184
+ const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel);
185
+ const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
186
+ const { stream } = await this.client.generateContentStream({
187
+ contents: prompt,
188
+ });
189
+ return stream;
190
+ });
191
+ for await (const response of stream) {
192
+ const chunk = convertResponseContentToChatGenerationChunk(response);
193
+ if (!chunk) {
194
+ continue;
195
+ }
196
+ yield chunk;
197
+ }
198
+ }
199
+ }
@@ -0,0 +1,126 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.GoogleGenerativeAIEmbeddings = void 0;
4
+ const generative_ai_1 = require("@google/generative-ai");
5
+ const env_1 = require("@langchain/core/utils/env");
6
+ const embeddings_1 = require("@langchain/core/embeddings");
7
+ /**
8
+ * Class that extends the Embeddings class and provides methods for
9
+ * generating embeddings using the Google Palm API.
10
+ * @example
11
+ * ```typescript
12
+ * const model = new GoogleGenerativeAIEmbeddings({
13
+ * apiKey: "<YOUR API KEY>",
14
+ * modelName: "embedding-001",
15
+ * });
16
+ *
17
+ * // Embed a single query
18
+ * const res = await model.embedQuery(
19
+ * "What would be a good company name for a company that makes colorful socks?"
20
+ * );
21
+ * console.log({ res });
22
+ *
23
+ * // Embed multiple documents
24
+ * const documentRes = await model.embedDocuments(["Hello world", "Bye bye"]);
25
+ * console.log({ documentRes });
26
+ * ```
27
+ */
28
+ class GoogleGenerativeAIEmbeddings extends embeddings_1.Embeddings {
29
+ constructor(fields) {
30
+ super(fields ?? {});
31
+ Object.defineProperty(this, "apiKey", {
32
+ enumerable: true,
33
+ configurable: true,
34
+ writable: true,
35
+ value: void 0
36
+ });
37
+ Object.defineProperty(this, "modelName", {
38
+ enumerable: true,
39
+ configurable: true,
40
+ writable: true,
41
+ value: "embedding-001"
42
+ });
43
+ Object.defineProperty(this, "taskType", {
44
+ enumerable: true,
45
+ configurable: true,
46
+ writable: true,
47
+ value: void 0
48
+ });
49
+ Object.defineProperty(this, "title", {
50
+ enumerable: true,
51
+ configurable: true,
52
+ writable: true,
53
+ value: void 0
54
+ });
55
+ Object.defineProperty(this, "stripNewLines", {
56
+ enumerable: true,
57
+ configurable: true,
58
+ writable: true,
59
+ value: true
60
+ });
61
+ Object.defineProperty(this, "client", {
62
+ enumerable: true,
63
+ configurable: true,
64
+ writable: true,
65
+ value: void 0
66
+ });
67
+ this.modelName =
68
+ fields?.modelName?.replace(/^models\//, "") ?? this.modelName;
69
+ this.taskType = fields?.taskType ?? this.taskType;
70
+ this.title = fields?.title ?? this.title;
71
+ if (this.title && this.taskType !== "RETRIEVAL_DOCUMENT") {
72
+ throw new Error("title can only be sepcified with TaskType.RETRIEVAL_DOCUMENT");
73
+ }
74
+ this.apiKey = fields?.apiKey ?? (0, env_1.getEnvironmentVariable)("GOOGLE_API_KEY");
75
+ if (!this.apiKey) {
76
+ throw new Error("Please set an API key for Google GenerativeAI " +
77
+ "in the environmentb variable GOOGLE_API_KEY " +
78
+ "or in the `apiKey` field of the " +
79
+ "GoogleGenerativeAIEmbeddings constructor");
80
+ }
81
+ this.client = new generative_ai_1.GoogleGenerativeAI(this.apiKey).getGenerativeModel({
82
+ model: this.modelName,
83
+ });
84
+ }
85
+ _convertToContent(text) {
86
+ const cleanedText = this.stripNewLines ? text.replace(/\n/g, " ") : text;
87
+ return {
88
+ content: { role: "user", parts: [{ text: cleanedText }] },
89
+ taskType: this.taskType,
90
+ title: this.title,
91
+ };
92
+ }
93
+ async _embedQueryContent(text) {
94
+ const req = this._convertToContent(text);
95
+ const res = await this.client.embedContent(req);
96
+ return res.embedding.values ?? [];
97
+ }
98
+ async _embedDocumentsContent(documents) {
99
+ const req = {
100
+ requests: documents.map((doc) => this._convertToContent(doc)),
101
+ };
102
+ const res = await this.client.batchEmbedContents(req);
103
+ return res.embeddings.map((e) => e.values || []) ?? [];
104
+ }
105
+ /**
106
+ * Method that takes a document as input and returns a promise that
107
+ * resolves to an embedding for the document. It calls the _embedText
108
+ * method with the document as the input.
109
+ * @param document Document for which to generate an embedding.
110
+ * @returns Promise that resolves to an embedding for the input document.
111
+ */
112
+ embedQuery(document) {
113
+ return this.caller.call(this._embedQueryContent.bind(this), document);
114
+ }
115
+ /**
116
+ * Method that takes an array of documents as input and returns a promise
117
+ * that resolves to a 2D array of embeddings for each document. It calls
118
+ * the _embedText method for each document in the array.
119
+ * @param documents Array of documents for which to generate embeddings.
120
+ * @returns Promise that resolves to a 2D array of embeddings for each input document.
121
+ */
122
+ embedDocuments(documents) {
123
+ return this.caller.call(this._embedDocumentsContent.bind(this), documents);
124
+ }
125
+ }
126
+ exports.GoogleGenerativeAIEmbeddings = GoogleGenerativeAIEmbeddings;
@@ -0,0 +1,84 @@
1
+ import type { TaskType } from "@google/generative-ai";
2
+ import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings";
3
+ /**
4
+ * Interface that extends EmbeddingsParams and defines additional
5
+ * parameters specific to the GoogleGenerativeAIEmbeddings class.
6
+ */
7
+ export interface GoogleGenerativeAIEmbeddingsParams extends EmbeddingsParams {
8
+ /**
9
+ * Model Name to use
10
+ *
11
+ * Note: The format must follow the pattern - `{model}`
12
+ */
13
+ modelName?: string;
14
+ /**
15
+ * Type of task for which the embedding will be used
16
+ *
17
+ * Note: currently only supported by `embedding-001` model
18
+ */
19
+ taskType?: TaskType;
20
+ /**
21
+ * An optional title for the text. Only applicable when TaskType is
22
+ * `RETRIEVAL_DOCUMENT`
23
+ *
24
+ * Note: currently only supported by `embedding-001` model
25
+ */
26
+ title?: string;
27
+ /**
28
+ * Whether to strip new lines from the input text. Default to true
29
+ */
30
+ stripNewLines?: boolean;
31
+ /**
32
+ * Google API key to use
33
+ */
34
+ apiKey?: string;
35
+ }
36
+ /**
37
+ * Class that extends the Embeddings class and provides methods for
38
+ * generating embeddings using the Google Palm API.
39
+ * @example
40
+ * ```typescript
41
+ * const model = new GoogleGenerativeAIEmbeddings({
42
+ * apiKey: "<YOUR API KEY>",
43
+ * modelName: "embedding-001",
44
+ * });
45
+ *
46
+ * // Embed a single query
47
+ * const res = await model.embedQuery(
48
+ * "What would be a good company name for a company that makes colorful socks?"
49
+ * );
50
+ * console.log({ res });
51
+ *
52
+ * // Embed multiple documents
53
+ * const documentRes = await model.embedDocuments(["Hello world", "Bye bye"]);
54
+ * console.log({ documentRes });
55
+ * ```
56
+ */
57
+ export declare class GoogleGenerativeAIEmbeddings extends Embeddings implements GoogleGenerativeAIEmbeddingsParams {
58
+ apiKey?: string;
59
+ modelName: string;
60
+ taskType?: TaskType;
61
+ title?: string;
62
+ stripNewLines: boolean;
63
+ private client;
64
+ constructor(fields?: GoogleGenerativeAIEmbeddingsParams);
65
+ private _convertToContent;
66
+ protected _embedQueryContent(text: string): Promise<number[]>;
67
+ protected _embedDocumentsContent(documents: string[]): Promise<number[][]>;
68
+ /**
69
+ * Method that takes a document as input and returns a promise that
70
+ * resolves to an embedding for the document. It calls the _embedText
71
+ * method with the document as the input.
72
+ * @param document Document for which to generate an embedding.
73
+ * @returns Promise that resolves to an embedding for the input document.
74
+ */
75
+ embedQuery(document: string): Promise<number[]>;
76
+ /**
77
+ * Method that takes an array of documents as input and returns a promise
78
+ * that resolves to a 2D array of embeddings for each document. It calls
79
+ * the _embedText method for each document in the array.
80
+ * @param documents Array of documents for which to generate embeddings.
81
+ * @returns Promise that resolves to a 2D array of embeddings for each input document.
82
+ */
83
+ embedDocuments(documents: string[]): Promise<number[][]>;
84
+ }
@@ -0,0 +1,122 @@
1
+ import { GoogleGenerativeAI } from "@google/generative-ai";
2
+ import { getEnvironmentVariable } from "@langchain/core/utils/env";
3
+ import { Embeddings } from "@langchain/core/embeddings";
4
+ /**
5
+ * Class that extends the Embeddings class and provides methods for
6
+ * generating embeddings using the Google Palm API.
7
+ * @example
8
+ * ```typescript
9
+ * const model = new GoogleGenerativeAIEmbeddings({
10
+ * apiKey: "<YOUR API KEY>",
11
+ * modelName: "embedding-001",
12
+ * });
13
+ *
14
+ * // Embed a single query
15
+ * const res = await model.embedQuery(
16
+ * "What would be a good company name for a company that makes colorful socks?"
17
+ * );
18
+ * console.log({ res });
19
+ *
20
+ * // Embed multiple documents
21
+ * const documentRes = await model.embedDocuments(["Hello world", "Bye bye"]);
22
+ * console.log({ documentRes });
23
+ * ```
24
+ */
25
+ export class GoogleGenerativeAIEmbeddings extends Embeddings {
26
+ constructor(fields) {
27
+ super(fields ?? {});
28
+ Object.defineProperty(this, "apiKey", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: void 0
33
+ });
34
+ Object.defineProperty(this, "modelName", {
35
+ enumerable: true,
36
+ configurable: true,
37
+ writable: true,
38
+ value: "embedding-001"
39
+ });
40
+ Object.defineProperty(this, "taskType", {
41
+ enumerable: true,
42
+ configurable: true,
43
+ writable: true,
44
+ value: void 0
45
+ });
46
+ Object.defineProperty(this, "title", {
47
+ enumerable: true,
48
+ configurable: true,
49
+ writable: true,
50
+ value: void 0
51
+ });
52
+ Object.defineProperty(this, "stripNewLines", {
53
+ enumerable: true,
54
+ configurable: true,
55
+ writable: true,
56
+ value: true
57
+ });
58
+ Object.defineProperty(this, "client", {
59
+ enumerable: true,
60
+ configurable: true,
61
+ writable: true,
62
+ value: void 0
63
+ });
64
+ this.modelName =
65
+ fields?.modelName?.replace(/^models\//, "") ?? this.modelName;
66
+ this.taskType = fields?.taskType ?? this.taskType;
67
+ this.title = fields?.title ?? this.title;
68
+ if (this.title && this.taskType !== "RETRIEVAL_DOCUMENT") {
69
+ throw new Error("title can only be sepcified with TaskType.RETRIEVAL_DOCUMENT");
70
+ }
71
+ this.apiKey = fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
72
+ if (!this.apiKey) {
73
+ throw new Error("Please set an API key for Google GenerativeAI " +
74
+ "in the environmentb variable GOOGLE_API_KEY " +
75
+ "or in the `apiKey` field of the " +
76
+ "GoogleGenerativeAIEmbeddings constructor");
77
+ }
78
+ this.client = new GoogleGenerativeAI(this.apiKey).getGenerativeModel({
79
+ model: this.modelName,
80
+ });
81
+ }
82
+ _convertToContent(text) {
83
+ const cleanedText = this.stripNewLines ? text.replace(/\n/g, " ") : text;
84
+ return {
85
+ content: { role: "user", parts: [{ text: cleanedText }] },
86
+ taskType: this.taskType,
87
+ title: this.title,
88
+ };
89
+ }
90
+ async _embedQueryContent(text) {
91
+ const req = this._convertToContent(text);
92
+ const res = await this.client.embedContent(req);
93
+ return res.embedding.values ?? [];
94
+ }
95
+ async _embedDocumentsContent(documents) {
96
+ const req = {
97
+ requests: documents.map((doc) => this._convertToContent(doc)),
98
+ };
99
+ const res = await this.client.batchEmbedContents(req);
100
+ return res.embeddings.map((e) => e.values || []) ?? [];
101
+ }
102
+ /**
103
+ * Method that takes a document as input and returns a promise that
104
+ * resolves to an embedding for the document. It calls the _embedText
105
+ * method with the document as the input.
106
+ * @param document Document for which to generate an embedding.
107
+ * @returns Promise that resolves to an embedding for the input document.
108
+ */
109
+ embedQuery(document) {
110
+ return this.caller.call(this._embedQueryContent.bind(this), document);
111
+ }
112
+ /**
113
+ * Method that takes an array of documents as input and returns a promise
114
+ * that resolves to a 2D array of embeddings for each document. It calls
115
+ * the _embedText method for each document in the array.
116
+ * @param documents Array of documents for which to generate embeddings.
117
+ * @returns Promise that resolves to a 2D array of embeddings for each input document.
118
+ */
119
+ embedDocuments(documents) {
120
+ return this.caller.call(this._embedDocumentsContent.bind(this), documents);
121
+ }
122
+ }
package/dist/index.cjs ADDED
@@ -0,0 +1,18 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
+ };
16
+ Object.defineProperty(exports, "__esModule", { value: true });
17
+ __exportStar(require("./chat_models.cjs"), exports);
18
+ __exportStar(require("./embeddings.cjs"), exports);
@@ -0,0 +1,2 @@
1
+ export * from "./chat_models.js";
2
+ export * from "./embeddings.js";
package/dist/index.js ADDED
@@ -0,0 +1,2 @@
1
+ export * from "./chat_models.js";
2
+ export * from "./embeddings.js";
package/dist/utils.cjs ADDED
@@ -0,0 +1,157 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.convertResponseContentToChatGenerationChunk = exports.mapGenerateContentResultToChatResult = exports.convertBaseMessagesToContent = exports.convertMessageContentToParts = exports.convertAuthorToRole = exports.getMessageAuthor = void 0;
4
+ const messages_1 = require("@langchain/core/messages");
5
+ const outputs_1 = require("@langchain/core/outputs");
6
+ function getMessageAuthor(message) {
7
+ const type = message._getType();
8
+ if (messages_1.ChatMessage.isInstance(message)) {
9
+ return message.role;
10
+ }
11
+ return message.name ?? type;
12
+ }
13
+ exports.getMessageAuthor = getMessageAuthor;
14
+ /**
15
+ * Maps a message type to a Google Generative AI chat author.
16
+ * @param message The message to map.
17
+ * @param model The model to use for mapping.
18
+ * @returns The message type mapped to a Google Generative AI chat author.
19
+ */
20
+ function convertAuthorToRole(author) {
21
+ switch (author) {
22
+ /**
23
+ * Note: Gemini currently is not supporting system messages
24
+ * we will convert them to human messages and merge with following
25
+ * */
26
+ case "ai":
27
+ return "model";
28
+ case "system":
29
+ case "human":
30
+ return "user";
31
+ default:
32
+ throw new Error(`Unknown / unsupported author: ${author}`);
33
+ }
34
+ }
35
+ exports.convertAuthorToRole = convertAuthorToRole;
36
+ function convertMessageContentToParts(content, isMultimodalModel) {
37
+ if (typeof content === "string") {
38
+ return [{ text: content }];
39
+ }
40
+ return content.map((c) => {
41
+ if (c.type === "text") {
42
+ return {
43
+ text: c.text,
44
+ };
45
+ }
46
+ if (c.type === "image_url") {
47
+ if (!isMultimodalModel) {
48
+ throw new Error(`This model does not support images`);
49
+ }
50
+ if (typeof c.image_url !== "string") {
51
+ throw new Error("Please provide image as base64 encoded data URL");
52
+ }
53
+ const [dm, data] = c.image_url.split(",");
54
+ if (!dm.startsWith("data:")) {
55
+ throw new Error("Please provide image as base64 encoded data URL");
56
+ }
57
+ const [mimeType, encoding] = dm.replace(/^data:/, "").split(";");
58
+ if (encoding !== "base64") {
59
+ throw new Error("Please provide image as base64 encoded data URL");
60
+ }
61
+ return {
62
+ inlineData: {
63
+ data,
64
+ mimeType,
65
+ },
66
+ };
67
+ }
68
+ throw new Error(`Unknown content type ${c.type}`);
69
+ });
70
+ }
71
+ exports.convertMessageContentToParts = convertMessageContentToParts;
72
+ function convertBaseMessagesToContent(messages, isMultimodalModel) {
73
+ return messages.reduce((acc, message, index) => {
74
+ if (!(0, messages_1.isBaseMessage)(message)) {
75
+ throw new Error("Unsupported message input");
76
+ }
77
+ const author = getMessageAuthor(message);
78
+ if (author === "system" && index !== 0) {
79
+ throw new Error("System message should be the first one");
80
+ }
81
+ const role = convertAuthorToRole(author);
82
+ const prevContent = acc.content[acc.content.length];
83
+ if (!acc.mergeWithPreviousContent &&
84
+ prevContent &&
85
+ prevContent.role === role) {
86
+ throw new Error("Google Generative AI requires alternate messages between authors");
87
+ }
88
+ const parts = convertMessageContentToParts(message.content, isMultimodalModel);
89
+ if (acc.mergeWithPreviousContent) {
90
+ const prevContent = acc.content[acc.content.length - 1];
91
+ if (!prevContent) {
92
+ throw new Error("There was a problem parsing your system message. Please try a prompt without one.");
93
+ }
94
+ prevContent.parts.push(...parts);
95
+ return {
96
+ mergeWithPreviousContent: false,
97
+ content: acc.content,
98
+ };
99
+ }
100
+ const content = {
101
+ role,
102
+ parts,
103
+ };
104
+ return {
105
+ mergeWithPreviousContent: author === "system",
106
+ content: [...acc.content, content],
107
+ };
108
+ }, { content: [], mergeWithPreviousContent: false }).content;
109
+ }
110
+ exports.convertBaseMessagesToContent = convertBaseMessagesToContent;
111
+ function mapGenerateContentResultToChatResult(response) {
112
+ // if rejected or error, return empty generations with reason in filters
113
+ if (!response.candidates ||
114
+ response.candidates.length === 0 ||
115
+ !response.candidates[0]) {
116
+ return {
117
+ generations: [],
118
+ llmOutput: {
119
+ filters: response.promptFeedback,
120
+ },
121
+ };
122
+ }
123
+ const [candidate] = response.candidates;
124
+ const { content, ...generationInfo } = candidate;
125
+ const text = content.parts[0]?.text ?? "";
126
+ const generation = {
127
+ text,
128
+ message: new messages_1.AIMessage({
129
+ content: text,
130
+ name: content === null ? undefined : content.role,
131
+ additional_kwargs: {},
132
+ }),
133
+ generationInfo,
134
+ };
135
+ return {
136
+ generations: [generation],
137
+ };
138
+ }
139
+ exports.mapGenerateContentResultToChatResult = mapGenerateContentResultToChatResult;
140
+ function convertResponseContentToChatGenerationChunk(response) {
141
+ if (!response.candidates || response.candidates.length === 0) {
142
+ return null;
143
+ }
144
+ const [candidate] = response.candidates;
145
+ const { content, ...generationInfo } = candidate;
146
+ const text = content.parts[0]?.text ?? "";
147
+ return new outputs_1.ChatGenerationChunk({
148
+ text,
149
+ message: new messages_1.AIMessageChunk({
150
+ content: text,
151
+ name: content === null ? undefined : content.role,
152
+ additional_kwargs: {},
153
+ }),
154
+ generationInfo,
155
+ });
156
+ }
157
+ exports.convertResponseContentToChatGenerationChunk = convertResponseContentToChatGenerationChunk;
@@ -0,0 +1,15 @@
1
+ import { EnhancedGenerateContentResponse, Content, Part } from "@google/generative-ai";
2
+ import { BaseMessage, MessageContent } from "@langchain/core/messages";
3
+ import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
4
+ export declare function getMessageAuthor(message: BaseMessage): string;
5
+ /**
6
+ * Maps a message type to a Google Generative AI chat author.
7
+ * @param message The message to map.
8
+ * @param model The model to use for mapping.
9
+ * @returns The message type mapped to a Google Generative AI chat author.
10
+ */
11
+ export declare function convertAuthorToRole(author: string): "model" | "user";
12
+ export declare function convertMessageContentToParts(content: MessageContent, isMultimodalModel: boolean): Part[];
13
+ export declare function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean): Content[];
14
+ export declare function mapGenerateContentResultToChatResult(response: EnhancedGenerateContentResponse): ChatResult;
15
+ export declare function convertResponseContentToChatGenerationChunk(response: EnhancedGenerateContentResponse): ChatGenerationChunk | null;
package/dist/utils.js ADDED
@@ -0,0 +1,148 @@
1
+ import { AIMessage, AIMessageChunk, ChatMessage, isBaseMessage, } from "@langchain/core/messages";
2
+ import { ChatGenerationChunk, } from "@langchain/core/outputs";
3
+ export function getMessageAuthor(message) {
4
+ const type = message._getType();
5
+ if (ChatMessage.isInstance(message)) {
6
+ return message.role;
7
+ }
8
+ return message.name ?? type;
9
+ }
10
+ /**
11
+ * Maps a message type to a Google Generative AI chat author.
12
+ * @param message The message to map.
13
+ * @param model The model to use for mapping.
14
+ * @returns The message type mapped to a Google Generative AI chat author.
15
+ */
16
+ export function convertAuthorToRole(author) {
17
+ switch (author) {
18
+ /**
19
+ * Note: Gemini currently is not supporting system messages
20
+ * we will convert them to human messages and merge with following
21
+ * */
22
+ case "ai":
23
+ return "model";
24
+ case "system":
25
+ case "human":
26
+ return "user";
27
+ default:
28
+ throw new Error(`Unknown / unsupported author: ${author}`);
29
+ }
30
+ }
31
+ export function convertMessageContentToParts(content, isMultimodalModel) {
32
+ if (typeof content === "string") {
33
+ return [{ text: content }];
34
+ }
35
+ return content.map((c) => {
36
+ if (c.type === "text") {
37
+ return {
38
+ text: c.text,
39
+ };
40
+ }
41
+ if (c.type === "image_url") {
42
+ if (!isMultimodalModel) {
43
+ throw new Error(`This model does not support images`);
44
+ }
45
+ if (typeof c.image_url !== "string") {
46
+ throw new Error("Please provide image as base64 encoded data URL");
47
+ }
48
+ const [dm, data] = c.image_url.split(",");
49
+ if (!dm.startsWith("data:")) {
50
+ throw new Error("Please provide image as base64 encoded data URL");
51
+ }
52
+ const [mimeType, encoding] = dm.replace(/^data:/, "").split(";");
53
+ if (encoding !== "base64") {
54
+ throw new Error("Please provide image as base64 encoded data URL");
55
+ }
56
+ return {
57
+ inlineData: {
58
+ data,
59
+ mimeType,
60
+ },
61
+ };
62
+ }
63
+ throw new Error(`Unknown content type ${c.type}`);
64
+ });
65
+ }
66
+ export function convertBaseMessagesToContent(messages, isMultimodalModel) {
67
+ return messages.reduce((acc, message, index) => {
68
+ if (!isBaseMessage(message)) {
69
+ throw new Error("Unsupported message input");
70
+ }
71
+ const author = getMessageAuthor(message);
72
+ if (author === "system" && index !== 0) {
73
+ throw new Error("System message should be the first one");
74
+ }
75
+ const role = convertAuthorToRole(author);
76
+ const prevContent = acc.content[acc.content.length];
77
+ if (!acc.mergeWithPreviousContent &&
78
+ prevContent &&
79
+ prevContent.role === role) {
80
+ throw new Error("Google Generative AI requires alternate messages between authors");
81
+ }
82
+ const parts = convertMessageContentToParts(message.content, isMultimodalModel);
83
+ if (acc.mergeWithPreviousContent) {
84
+ const prevContent = acc.content[acc.content.length - 1];
85
+ if (!prevContent) {
86
+ throw new Error("There was a problem parsing your system message. Please try a prompt without one.");
87
+ }
88
+ prevContent.parts.push(...parts);
89
+ return {
90
+ mergeWithPreviousContent: false,
91
+ content: acc.content,
92
+ };
93
+ }
94
+ const content = {
95
+ role,
96
+ parts,
97
+ };
98
+ return {
99
+ mergeWithPreviousContent: author === "system",
100
+ content: [...acc.content, content],
101
+ };
102
+ }, { content: [], mergeWithPreviousContent: false }).content;
103
+ }
104
+ export function mapGenerateContentResultToChatResult(response) {
105
+ // if rejected or error, return empty generations with reason in filters
106
+ if (!response.candidates ||
107
+ response.candidates.length === 0 ||
108
+ !response.candidates[0]) {
109
+ return {
110
+ generations: [],
111
+ llmOutput: {
112
+ filters: response.promptFeedback,
113
+ },
114
+ };
115
+ }
116
+ const [candidate] = response.candidates;
117
+ const { content, ...generationInfo } = candidate;
118
+ const text = content.parts[0]?.text ?? "";
119
+ const generation = {
120
+ text,
121
+ message: new AIMessage({
122
+ content: text,
123
+ name: content === null ? undefined : content.role,
124
+ additional_kwargs: {},
125
+ }),
126
+ generationInfo,
127
+ };
128
+ return {
129
+ generations: [generation],
130
+ };
131
+ }
132
+ export function convertResponseContentToChatGenerationChunk(response) {
133
+ if (!response.candidates || response.candidates.length === 0) {
134
+ return null;
135
+ }
136
+ const [candidate] = response.candidates;
137
+ const { content, ...generationInfo } = candidate;
138
+ const text = content.parts[0]?.text ?? "";
139
+ return new ChatGenerationChunk({
140
+ text,
141
+ message: new AIMessageChunk({
142
+ content: text,
143
+ name: content === null ? undefined : content.role,
144
+ additional_kwargs: {},
145
+ }),
146
+ generationInfo,
147
+ });
148
+ }
package/index.cjs ADDED
@@ -0,0 +1 @@
1
+ module.exports = require('./dist/index.cjs');
package/index.d.ts ADDED
@@ -0,0 +1 @@
1
+ export * from './dist/index.js'
package/index.js ADDED
@@ -0,0 +1 @@
1
+ export * from './dist/index.js'
package/package.json ADDED
@@ -0,0 +1,81 @@
1
+ {
2
+ "name": "@langchain/google-genai",
3
+ "version": "0.0.1",
4
+ "description": "Sample integration for LangChain.js",
5
+ "type": "module",
6
+ "engines": {
7
+ "node": ">=18"
8
+ },
9
+ "main": "./index.js",
10
+ "types": "./index.d.ts",
11
+ "repository": {
12
+ "type": "git",
13
+ "url": "git@github.com:langchain-ai/langchainjs.git"
14
+ },
15
+ "scripts": {
16
+ "build": "yarn run build:deps && yarn clean && yarn build:esm && yarn build:cjs && yarn build:scripts",
17
+ "build:deps": "yarn run turbo:command build --filter=@langchain/core",
18
+ "build:esm": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist/ && rm -rf dist/tests dist/**/tests",
19
+ "build:cjs": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist-cjs/ -p tsconfig.cjs.json && node scripts/move-cjs-to-dist.js && rm -rf dist-cjs",
20
+ "build:watch": "node scripts/create-entrypoints.js && tsc --outDir dist/ --watch",
21
+ "build:scripts": "node scripts/create-entrypoints.js && node scripts/check-tree-shaking.js",
22
+ "lint": "NODE_OPTIONS=--max-old-space-size=4096 eslint src && dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
23
+ "lint:fix": "yarn lint --fix",
24
+ "clean": "rm -rf dist/ && NODE_OPTIONS=--max-old-space-size=4096 node scripts/create-entrypoints.js pre",
25
+ "prepack": "yarn build",
26
+ "release": "release-it --only-version --config .release-it.json",
27
+ "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
28
+ "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
29
+ "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
30
+ "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
31
+ "format": "prettier --write \"src\"",
32
+ "format:check": "prettier --check \"src\""
33
+ },
34
+ "author": "LangChain",
35
+ "license": "MIT",
36
+ "dependencies": {
37
+ "@google/generative-ai": "^0.1.0",
38
+ "@langchain/core": "~0.1.1"
39
+ },
40
+ "devDependencies": {
41
+ "@jest/globals": "^29.5.0",
42
+ "@langchain/community": "workspace:*",
43
+ "@swc/core": "^1.3.90",
44
+ "@swc/jest": "^0.2.29",
45
+ "@tsconfig/recommended": "^1.0.3",
46
+ "@typescript-eslint/eslint-plugin": "^6.12.0",
47
+ "@typescript-eslint/parser": "^6.12.0",
48
+ "dotenv": "^16.3.1",
49
+ "dpdm": "^3.12.0",
50
+ "eslint": "^8.33.0",
51
+ "eslint-config-airbnb-base": "^15.0.0",
52
+ "eslint-config-prettier": "^8.6.0",
53
+ "eslint-plugin-import": "^2.27.5",
54
+ "eslint-plugin-no-instanceof": "^1.0.1",
55
+ "eslint-plugin-prettier": "^4.2.1",
56
+ "hnswlib-node": "^1.4.2",
57
+ "jest": "^29.5.0",
58
+ "jest-environment-node": "^29.6.4",
59
+ "prettier": "^2.8.3",
60
+ "rollup": "^4.5.2",
61
+ "ts-jest": "^29.1.0",
62
+ "typescript": "<5.2.0"
63
+ },
64
+ "publishConfig": {
65
+ "access": "public"
66
+ },
67
+ "exports": {
68
+ ".": {
69
+ "types": "./index.d.ts",
70
+ "import": "./index.js",
71
+ "require": "./index.cjs"
72
+ },
73
+ "./package.json": "./package.json"
74
+ },
75
+ "files": [
76
+ "dist/",
77
+ "index.cjs",
78
+ "index.js",
79
+ "index.d.ts"
80
+ ]
81
+ }