@langchain/google-common 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +47 -0
- package/dist/auth.cjs +90 -0
- package/dist/auth.d.ts +39 -0
- package/dist/auth.js +83 -0
- package/dist/chat_models.cjs +153 -0
- package/dist/chat_models.d.ts +46 -0
- package/dist/chat_models.js +149 -0
- package/dist/connection.cjs +231 -0
- package/dist/connection.d.ts +47 -0
- package/dist/connection.js +224 -0
- package/dist/index.cjs +23 -0
- package/dist/index.d.ts +7 -0
- package/dist/index.js +7 -0
- package/dist/llms.cjs +162 -0
- package/dist/llms.d.ts +48 -0
- package/dist/llms.js +158 -0
- package/dist/types.cjs +2 -0
- package/dist/types.d.ts +146 -0
- package/dist/types.js +1 -0
- package/dist/utils/common.cjs +48 -0
- package/dist/utils/common.d.ts +6 -0
- package/dist/utils/common.js +40 -0
- package/dist/utils/gemini.cjs +239 -0
- package/dist/utils/gemini.d.ts +20 -0
- package/dist/utils/gemini.js +219 -0
- package/dist/utils/palm.cjs +2 -0
- package/dist/utils/palm.d.ts +6 -0
- package/dist/utils/palm.js +1 -0
- package/dist/utils/stream.cjs +304 -0
- package/dist/utils/stream.d.ts +70 -0
- package/dist/utils/stream.js +296 -0
- package/index.cjs +1 -0
- package/index.d.ts +1 -0
- package/index.js +1 -0
- package/package.json +83 -0
package/dist/llms.cjs
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.GoogleBaseLLM = void 0;
|
|
4
|
+
const llms_1 = require("@langchain/core/language_models/llms");
|
|
5
|
+
const outputs_1 = require("@langchain/core/outputs");
|
|
6
|
+
const env_1 = require("@langchain/core/utils/env");
|
|
7
|
+
const connection_js_1 = require("./connection.cjs");
|
|
8
|
+
const common_js_1 = require("./utils/common.cjs");
|
|
9
|
+
const gemini_js_1 = require("./utils/gemini.cjs");
|
|
10
|
+
const auth_js_1 = require("./auth.cjs");
|
|
11
|
+
class GoogleLLMConnection extends connection_js_1.AbstractGoogleLLMConnection {
|
|
12
|
+
formatContents(input, _parameters) {
|
|
13
|
+
const parts = (0, gemini_js_1.messageContentToParts)(input);
|
|
14
|
+
const contents = [
|
|
15
|
+
{
|
|
16
|
+
role: "user",
|
|
17
|
+
parts,
|
|
18
|
+
},
|
|
19
|
+
];
|
|
20
|
+
return contents;
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Integration with an LLM.
|
|
25
|
+
*/
|
|
26
|
+
class GoogleBaseLLM extends llms_1.LLM {
|
|
27
|
+
// Used for tracing, replace with the same name as your class
|
|
28
|
+
static lc_name() {
|
|
29
|
+
return "GoogleLLM";
|
|
30
|
+
}
|
|
31
|
+
constructor(fields) {
|
|
32
|
+
super(fields ?? {});
|
|
33
|
+
Object.defineProperty(this, "lc_serializable", {
|
|
34
|
+
enumerable: true,
|
|
35
|
+
configurable: true,
|
|
36
|
+
writable: true,
|
|
37
|
+
value: true
|
|
38
|
+
});
|
|
39
|
+
Object.defineProperty(this, "model", {
|
|
40
|
+
enumerable: true,
|
|
41
|
+
configurable: true,
|
|
42
|
+
writable: true,
|
|
43
|
+
value: "gemini-pro"
|
|
44
|
+
});
|
|
45
|
+
Object.defineProperty(this, "temperature", {
|
|
46
|
+
enumerable: true,
|
|
47
|
+
configurable: true,
|
|
48
|
+
writable: true,
|
|
49
|
+
value: 0.7
|
|
50
|
+
});
|
|
51
|
+
Object.defineProperty(this, "maxOutputTokens", {
|
|
52
|
+
enumerable: true,
|
|
53
|
+
configurable: true,
|
|
54
|
+
writable: true,
|
|
55
|
+
value: 1024
|
|
56
|
+
});
|
|
57
|
+
Object.defineProperty(this, "topP", {
|
|
58
|
+
enumerable: true,
|
|
59
|
+
configurable: true,
|
|
60
|
+
writable: true,
|
|
61
|
+
value: 0.8
|
|
62
|
+
});
|
|
63
|
+
Object.defineProperty(this, "topK", {
|
|
64
|
+
enumerable: true,
|
|
65
|
+
configurable: true,
|
|
66
|
+
writable: true,
|
|
67
|
+
value: 40
|
|
68
|
+
});
|
|
69
|
+
Object.defineProperty(this, "stopSequences", {
|
|
70
|
+
enumerable: true,
|
|
71
|
+
configurable: true,
|
|
72
|
+
writable: true,
|
|
73
|
+
value: []
|
|
74
|
+
});
|
|
75
|
+
Object.defineProperty(this, "safetySettings", {
|
|
76
|
+
enumerable: true,
|
|
77
|
+
configurable: true,
|
|
78
|
+
writable: true,
|
|
79
|
+
value: []
|
|
80
|
+
});
|
|
81
|
+
Object.defineProperty(this, "connection", {
|
|
82
|
+
enumerable: true,
|
|
83
|
+
configurable: true,
|
|
84
|
+
writable: true,
|
|
85
|
+
value: void 0
|
|
86
|
+
});
|
|
87
|
+
Object.defineProperty(this, "streamedConnection", {
|
|
88
|
+
enumerable: true,
|
|
89
|
+
configurable: true,
|
|
90
|
+
writable: true,
|
|
91
|
+
value: void 0
|
|
92
|
+
});
|
|
93
|
+
(0, common_js_1.copyAndValidateModelParamsInto)(fields, this);
|
|
94
|
+
const client = this.buildClient(fields);
|
|
95
|
+
this.buildConnection(fields ?? {}, client);
|
|
96
|
+
}
|
|
97
|
+
buildApiKeyClient(apiKey) {
|
|
98
|
+
return new auth_js_1.ApiKeyGoogleAuth(apiKey);
|
|
99
|
+
}
|
|
100
|
+
buildApiKey(fields) {
|
|
101
|
+
return fields?.apiKey ?? (0, env_1.getEnvironmentVariable)("GOOGLE_API_KEY");
|
|
102
|
+
}
|
|
103
|
+
buildClient(fields) {
|
|
104
|
+
const apiKey = this.buildApiKey(fields);
|
|
105
|
+
if (apiKey) {
|
|
106
|
+
return this.buildApiKeyClient(apiKey);
|
|
107
|
+
}
|
|
108
|
+
else {
|
|
109
|
+
return this.buildAbstractedClient(fields);
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
buildConnection(fields, client) {
|
|
113
|
+
this.connection = new GoogleLLMConnection({ ...fields, ...this }, this.caller, client, false);
|
|
114
|
+
this.streamedConnection = new GoogleLLMConnection({ ...fields, ...this }, this.caller, client, true);
|
|
115
|
+
}
|
|
116
|
+
get platform() {
|
|
117
|
+
return this.connection.platform;
|
|
118
|
+
}
|
|
119
|
+
// Replace
|
|
120
|
+
_llmType() {
|
|
121
|
+
return "googlellm";
|
|
122
|
+
}
|
|
123
|
+
formatPrompt(prompt) {
|
|
124
|
+
return prompt;
|
|
125
|
+
}
|
|
126
|
+
/**
|
|
127
|
+
* For some given input string and options, return a string output.
|
|
128
|
+
*/
|
|
129
|
+
async _call(_prompt, _options, _runManager) {
|
|
130
|
+
const parameters = (0, common_js_1.copyAIModelParams)(this);
|
|
131
|
+
const result = await this.connection.request(_prompt, parameters, _options);
|
|
132
|
+
const ret = (0, gemini_js_1.responseToString)(result);
|
|
133
|
+
return ret;
|
|
134
|
+
}
|
|
135
|
+
async *_streamResponseChunks(_prompt, _options, _runManager) {
|
|
136
|
+
// Make the call as a streaming request
|
|
137
|
+
const parameters = (0, common_js_1.copyAIModelParams)(this);
|
|
138
|
+
const result = await this.streamedConnection.request(_prompt, parameters, _options);
|
|
139
|
+
// Get the streaming parser of the response
|
|
140
|
+
const stream = result.data;
|
|
141
|
+
// Loop until the end of the stream
|
|
142
|
+
// During the loop, yield each time we get a chunk from the streaming parser
|
|
143
|
+
// that is either available or added to the queue
|
|
144
|
+
while (!stream.streamDone) {
|
|
145
|
+
const output = await stream.nextChunk();
|
|
146
|
+
const chunk = output !== null
|
|
147
|
+
? new outputs_1.GenerationChunk((0, gemini_js_1.responseToGeneration)({ data: output }))
|
|
148
|
+
: new outputs_1.GenerationChunk({
|
|
149
|
+
text: "",
|
|
150
|
+
generationInfo: { finishReason: "stop" },
|
|
151
|
+
});
|
|
152
|
+
yield chunk;
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
async predictMessages(messages, options, _callbacks) {
|
|
156
|
+
const { content } = messages[0];
|
|
157
|
+
const result = await this.connection.request(content, {}, options);
|
|
158
|
+
const ret = (0, gemini_js_1.responseToBaseMessage)(result);
|
|
159
|
+
return ret;
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
exports.GoogleBaseLLM = GoogleBaseLLM;
|
package/dist/llms.d.ts
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import { CallbackManagerForLLMRun, Callbacks } from "@langchain/core/callbacks/manager";
|
|
2
|
+
import { LLM } from "@langchain/core/language_models/llms";
|
|
3
|
+
import { type BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
|
|
4
|
+
import { BaseMessage, MessageContent } from "@langchain/core/messages";
|
|
5
|
+
import { GenerationChunk } from "@langchain/core/outputs";
|
|
6
|
+
import { AbstractGoogleLLMConnection } from "./connection.js";
|
|
7
|
+
import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GooglePlatformType, GeminiContent } from "./types.js";
|
|
8
|
+
import { GoogleAbstractedClient } from "./auth.js";
|
|
9
|
+
declare class GoogleLLMConnection<AuthOptions> extends AbstractGoogleLLMConnection<MessageContent, AuthOptions> {
|
|
10
|
+
formatContents(input: MessageContent, _parameters: GoogleAIModelParams): GeminiContent[];
|
|
11
|
+
}
|
|
12
|
+
/**
|
|
13
|
+
* Input to LLM class.
|
|
14
|
+
*/
|
|
15
|
+
export interface GoogleBaseLLMInput<AuthOptions> extends GoogleAIBaseLLMInput<AuthOptions> {
|
|
16
|
+
}
|
|
17
|
+
/**
|
|
18
|
+
* Integration with an LLM.
|
|
19
|
+
*/
|
|
20
|
+
export declare abstract class GoogleBaseLLM<AuthOptions> extends LLM<BaseLanguageModelCallOptions> implements GoogleBaseLLMInput<AuthOptions> {
|
|
21
|
+
static lc_name(): string;
|
|
22
|
+
lc_serializable: boolean;
|
|
23
|
+
model: string;
|
|
24
|
+
temperature: number;
|
|
25
|
+
maxOutputTokens: number;
|
|
26
|
+
topP: number;
|
|
27
|
+
topK: number;
|
|
28
|
+
stopSequences: string[];
|
|
29
|
+
safetySettings: GoogleAISafetySetting[];
|
|
30
|
+
protected connection: GoogleLLMConnection<AuthOptions>;
|
|
31
|
+
protected streamedConnection: GoogleLLMConnection<AuthOptions>;
|
|
32
|
+
constructor(fields?: GoogleBaseLLMInput<AuthOptions>);
|
|
33
|
+
abstract buildAbstractedClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;
|
|
34
|
+
buildApiKeyClient(apiKey: string): GoogleAbstractedClient;
|
|
35
|
+
buildApiKey(fields?: GoogleAIBaseLLMInput<AuthOptions>): string | undefined;
|
|
36
|
+
buildClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;
|
|
37
|
+
buildConnection(fields: GoogleBaseLLMInput<AuthOptions>, client: GoogleAbstractedClient): void;
|
|
38
|
+
get platform(): GooglePlatformType;
|
|
39
|
+
_llmType(): string;
|
|
40
|
+
formatPrompt(prompt: string): MessageContent;
|
|
41
|
+
/**
|
|
42
|
+
* For some given input string and options, return a string output.
|
|
43
|
+
*/
|
|
44
|
+
_call(_prompt: string, _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): Promise<string>;
|
|
45
|
+
_streamResponseChunks(_prompt: string, _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
|
|
46
|
+
predictMessages(messages: BaseMessage[], options?: string[] | BaseLanguageModelCallOptions, _callbacks?: Callbacks): Promise<BaseMessage>;
|
|
47
|
+
}
|
|
48
|
+
export {};
|
package/dist/llms.js
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import { LLM } from "@langchain/core/language_models/llms";
|
|
2
|
+
import { GenerationChunk } from "@langchain/core/outputs";
|
|
3
|
+
import { getEnvironmentVariable } from "@langchain/core/utils/env";
|
|
4
|
+
import { AbstractGoogleLLMConnection } from "./connection.js";
|
|
5
|
+
import { copyAIModelParams, copyAndValidateModelParamsInto, } from "./utils/common.js";
|
|
6
|
+
import { messageContentToParts, responseToBaseMessage, responseToGeneration, responseToString, } from "./utils/gemini.js";
|
|
7
|
+
import { ApiKeyGoogleAuth } from "./auth.js";
|
|
8
|
+
class GoogleLLMConnection extends AbstractGoogleLLMConnection {
|
|
9
|
+
formatContents(input, _parameters) {
|
|
10
|
+
const parts = messageContentToParts(input);
|
|
11
|
+
const contents = [
|
|
12
|
+
{
|
|
13
|
+
role: "user",
|
|
14
|
+
parts,
|
|
15
|
+
},
|
|
16
|
+
];
|
|
17
|
+
return contents;
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Integration with an LLM.
|
|
22
|
+
*/
|
|
23
|
+
export class GoogleBaseLLM extends LLM {
|
|
24
|
+
// Used for tracing, replace with the same name as your class
|
|
25
|
+
static lc_name() {
|
|
26
|
+
return "GoogleLLM";
|
|
27
|
+
}
|
|
28
|
+
constructor(fields) {
|
|
29
|
+
super(fields ?? {});
|
|
30
|
+
Object.defineProperty(this, "lc_serializable", {
|
|
31
|
+
enumerable: true,
|
|
32
|
+
configurable: true,
|
|
33
|
+
writable: true,
|
|
34
|
+
value: true
|
|
35
|
+
});
|
|
36
|
+
Object.defineProperty(this, "model", {
|
|
37
|
+
enumerable: true,
|
|
38
|
+
configurable: true,
|
|
39
|
+
writable: true,
|
|
40
|
+
value: "gemini-pro"
|
|
41
|
+
});
|
|
42
|
+
Object.defineProperty(this, "temperature", {
|
|
43
|
+
enumerable: true,
|
|
44
|
+
configurable: true,
|
|
45
|
+
writable: true,
|
|
46
|
+
value: 0.7
|
|
47
|
+
});
|
|
48
|
+
Object.defineProperty(this, "maxOutputTokens", {
|
|
49
|
+
enumerable: true,
|
|
50
|
+
configurable: true,
|
|
51
|
+
writable: true,
|
|
52
|
+
value: 1024
|
|
53
|
+
});
|
|
54
|
+
Object.defineProperty(this, "topP", {
|
|
55
|
+
enumerable: true,
|
|
56
|
+
configurable: true,
|
|
57
|
+
writable: true,
|
|
58
|
+
value: 0.8
|
|
59
|
+
});
|
|
60
|
+
Object.defineProperty(this, "topK", {
|
|
61
|
+
enumerable: true,
|
|
62
|
+
configurable: true,
|
|
63
|
+
writable: true,
|
|
64
|
+
value: 40
|
|
65
|
+
});
|
|
66
|
+
Object.defineProperty(this, "stopSequences", {
|
|
67
|
+
enumerable: true,
|
|
68
|
+
configurable: true,
|
|
69
|
+
writable: true,
|
|
70
|
+
value: []
|
|
71
|
+
});
|
|
72
|
+
Object.defineProperty(this, "safetySettings", {
|
|
73
|
+
enumerable: true,
|
|
74
|
+
configurable: true,
|
|
75
|
+
writable: true,
|
|
76
|
+
value: []
|
|
77
|
+
});
|
|
78
|
+
Object.defineProperty(this, "connection", {
|
|
79
|
+
enumerable: true,
|
|
80
|
+
configurable: true,
|
|
81
|
+
writable: true,
|
|
82
|
+
value: void 0
|
|
83
|
+
});
|
|
84
|
+
Object.defineProperty(this, "streamedConnection", {
|
|
85
|
+
enumerable: true,
|
|
86
|
+
configurable: true,
|
|
87
|
+
writable: true,
|
|
88
|
+
value: void 0
|
|
89
|
+
});
|
|
90
|
+
copyAndValidateModelParamsInto(fields, this);
|
|
91
|
+
const client = this.buildClient(fields);
|
|
92
|
+
this.buildConnection(fields ?? {}, client);
|
|
93
|
+
}
|
|
94
|
+
buildApiKeyClient(apiKey) {
|
|
95
|
+
return new ApiKeyGoogleAuth(apiKey);
|
|
96
|
+
}
|
|
97
|
+
buildApiKey(fields) {
|
|
98
|
+
return fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
|
|
99
|
+
}
|
|
100
|
+
buildClient(fields) {
|
|
101
|
+
const apiKey = this.buildApiKey(fields);
|
|
102
|
+
if (apiKey) {
|
|
103
|
+
return this.buildApiKeyClient(apiKey);
|
|
104
|
+
}
|
|
105
|
+
else {
|
|
106
|
+
return this.buildAbstractedClient(fields);
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
buildConnection(fields, client) {
|
|
110
|
+
this.connection = new GoogleLLMConnection({ ...fields, ...this }, this.caller, client, false);
|
|
111
|
+
this.streamedConnection = new GoogleLLMConnection({ ...fields, ...this }, this.caller, client, true);
|
|
112
|
+
}
|
|
113
|
+
get platform() {
|
|
114
|
+
return this.connection.platform;
|
|
115
|
+
}
|
|
116
|
+
// Replace
|
|
117
|
+
_llmType() {
|
|
118
|
+
return "googlellm";
|
|
119
|
+
}
|
|
120
|
+
formatPrompt(prompt) {
|
|
121
|
+
return prompt;
|
|
122
|
+
}
|
|
123
|
+
/**
|
|
124
|
+
* For some given input string and options, return a string output.
|
|
125
|
+
*/
|
|
126
|
+
async _call(_prompt, _options, _runManager) {
|
|
127
|
+
const parameters = copyAIModelParams(this);
|
|
128
|
+
const result = await this.connection.request(_prompt, parameters, _options);
|
|
129
|
+
const ret = responseToString(result);
|
|
130
|
+
return ret;
|
|
131
|
+
}
|
|
132
|
+
async *_streamResponseChunks(_prompt, _options, _runManager) {
|
|
133
|
+
// Make the call as a streaming request
|
|
134
|
+
const parameters = copyAIModelParams(this);
|
|
135
|
+
const result = await this.streamedConnection.request(_prompt, parameters, _options);
|
|
136
|
+
// Get the streaming parser of the response
|
|
137
|
+
const stream = result.data;
|
|
138
|
+
// Loop until the end of the stream
|
|
139
|
+
// During the loop, yield each time we get a chunk from the streaming parser
|
|
140
|
+
// that is either available or added to the queue
|
|
141
|
+
while (!stream.streamDone) {
|
|
142
|
+
const output = await stream.nextChunk();
|
|
143
|
+
const chunk = output !== null
|
|
144
|
+
? new GenerationChunk(responseToGeneration({ data: output }))
|
|
145
|
+
: new GenerationChunk({
|
|
146
|
+
text: "",
|
|
147
|
+
generationInfo: { finishReason: "stop" },
|
|
148
|
+
});
|
|
149
|
+
yield chunk;
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
async predictMessages(messages, options, _callbacks) {
|
|
153
|
+
const { content } = messages[0];
|
|
154
|
+
const result = await this.connection.request(content, {}, options);
|
|
155
|
+
const ret = responseToBaseMessage(result);
|
|
156
|
+
return ret;
|
|
157
|
+
}
|
|
158
|
+
}
|
package/dist/types.cjs
ADDED
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
import type { BaseLLMParams } from "@langchain/core/language_models/llms";
|
|
2
|
+
import type { JsonStream } from "./utils/stream.js";
|
|
3
|
+
/**
|
|
4
|
+
* Parameters needed to setup the client connection.
|
|
5
|
+
* AuthOptions are something like GoogleAuthOptions (from google-auth-library)
|
|
6
|
+
* or WebGoogleAuthOptions.
|
|
7
|
+
*/
|
|
8
|
+
export interface GoogleClientParams<AuthOptions> {
|
|
9
|
+
authOptions?: AuthOptions;
|
|
10
|
+
/** Some APIs allow an API key instead */
|
|
11
|
+
apiKey?: string;
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* What platform is this running on?
|
|
15
|
+
* gai - Google AI Studio / MakerSuite / Generative AI platform
|
|
16
|
+
* gcp - Google Cloud Platform
|
|
17
|
+
*/
|
|
18
|
+
export type GooglePlatformType = "gai" | "gcp";
|
|
19
|
+
export interface GoogleConnectionParams<AuthOptions> extends GoogleClientParams<AuthOptions> {
|
|
20
|
+
/** Hostname for the API call (if this is running on GCP) */
|
|
21
|
+
endpoint?: string;
|
|
22
|
+
/** Region where the LLM is stored (if this is running on GCP) */
|
|
23
|
+
location?: string;
|
|
24
|
+
/** The version of the API functions. Part of the path. */
|
|
25
|
+
apiVersion?: string;
|
|
26
|
+
/**
|
|
27
|
+
* What platform to run the service on.
|
|
28
|
+
* If not specified, the class should determine this from other
|
|
29
|
+
* means. Either way, the platform actually used will be in
|
|
30
|
+
* the "platform" getter.
|
|
31
|
+
*/
|
|
32
|
+
platformType?: GooglePlatformType;
|
|
33
|
+
}
|
|
34
|
+
export interface GoogleAISafetySetting {
|
|
35
|
+
category: string;
|
|
36
|
+
threshold: string;
|
|
37
|
+
}
|
|
38
|
+
export interface GoogleAIModelParams {
|
|
39
|
+
/** Model to use */
|
|
40
|
+
model?: string;
|
|
41
|
+
/** Sampling temperature to use */
|
|
42
|
+
temperature?: number;
|
|
43
|
+
/**
|
|
44
|
+
* Maximum number of tokens to generate in the completion.
|
|
45
|
+
*/
|
|
46
|
+
maxOutputTokens?: number;
|
|
47
|
+
/**
|
|
48
|
+
* Top-p changes how the model selects tokens for output.
|
|
49
|
+
*
|
|
50
|
+
* Tokens are selected from most probable to least until the sum
|
|
51
|
+
* of their probabilities equals the top-p value.
|
|
52
|
+
*
|
|
53
|
+
* For example, if tokens A, B, and C have a probability of
|
|
54
|
+
* .3, .2, and .1 and the top-p value is .5, then the model will
|
|
55
|
+
* select either A or B as the next token (using temperature).
|
|
56
|
+
*/
|
|
57
|
+
topP?: number;
|
|
58
|
+
/**
|
|
59
|
+
* Top-k changes how the model selects tokens for output.
|
|
60
|
+
*
|
|
61
|
+
* A top-k of 1 means the selected token is the most probable among
|
|
62
|
+
* all tokens in the model’s vocabulary (also called greedy decoding),
|
|
63
|
+
* while a top-k of 3 means that the next token is selected from
|
|
64
|
+
* among the 3 most probable tokens (using temperature).
|
|
65
|
+
*/
|
|
66
|
+
topK?: number;
|
|
67
|
+
stopSequences?: string[];
|
|
68
|
+
safetySettings?: GoogleAISafetySetting[];
|
|
69
|
+
}
|
|
70
|
+
export interface GoogleAIBaseLLMInput<AuthOptions> extends BaseLLMParams, GoogleConnectionParams<AuthOptions>, GoogleAIModelParams {
|
|
71
|
+
}
|
|
72
|
+
export interface GoogleResponse {
|
|
73
|
+
data: any;
|
|
74
|
+
}
|
|
75
|
+
export interface GeminiPartText {
|
|
76
|
+
text: string;
|
|
77
|
+
}
|
|
78
|
+
export interface GeminiPartInlineData {
|
|
79
|
+
mimeType: string;
|
|
80
|
+
data: string;
|
|
81
|
+
}
|
|
82
|
+
export interface GeminiPartFileData {
|
|
83
|
+
mimeType: string;
|
|
84
|
+
fileUri: string;
|
|
85
|
+
}
|
|
86
|
+
export interface GeminiPartFunctionCall {
|
|
87
|
+
name: string;
|
|
88
|
+
args?: object;
|
|
89
|
+
}
|
|
90
|
+
export interface GeminiPartFunctionResponse {
|
|
91
|
+
name: string;
|
|
92
|
+
response: object;
|
|
93
|
+
}
|
|
94
|
+
export type GeminiPart = GeminiPartText | GeminiPartInlineData | GeminiPartFileData | GeminiPartFunctionCall | GeminiPartFunctionResponse;
|
|
95
|
+
export interface GeminiSafetySetting {
|
|
96
|
+
category: string;
|
|
97
|
+
threshold: string;
|
|
98
|
+
}
|
|
99
|
+
export interface GeminiSafetyRating {
|
|
100
|
+
category: string;
|
|
101
|
+
probability: string;
|
|
102
|
+
}
|
|
103
|
+
export type GeminiRole = "user" | "model";
|
|
104
|
+
export interface GeminiContent {
|
|
105
|
+
parts: GeminiPart[];
|
|
106
|
+
role: GeminiRole;
|
|
107
|
+
}
|
|
108
|
+
export interface GeminiTool {
|
|
109
|
+
}
|
|
110
|
+
export interface GeminiGenerationConfig {
|
|
111
|
+
stopSequences?: string[];
|
|
112
|
+
candidateCount?: number;
|
|
113
|
+
maxOutputTokens?: number;
|
|
114
|
+
temperature?: number;
|
|
115
|
+
topP?: number;
|
|
116
|
+
topK?: number;
|
|
117
|
+
}
|
|
118
|
+
export interface GeminiRequest {
|
|
119
|
+
contents?: GeminiContent[];
|
|
120
|
+
tools?: GeminiTool[];
|
|
121
|
+
safetySettings?: GeminiSafetySetting[];
|
|
122
|
+
generationConfig?: GeminiGenerationConfig;
|
|
123
|
+
}
|
|
124
|
+
interface GeminiResponseCandidate {
|
|
125
|
+
content: {
|
|
126
|
+
parts: GeminiPart[];
|
|
127
|
+
role: string;
|
|
128
|
+
};
|
|
129
|
+
finishReason: string;
|
|
130
|
+
index: number;
|
|
131
|
+
tokenCount?: number;
|
|
132
|
+
safetyRatings: GeminiSafetyRating[];
|
|
133
|
+
}
|
|
134
|
+
interface GeminiResponsePromptFeedback {
|
|
135
|
+
safetyRatings: GeminiSafetyRating[];
|
|
136
|
+
}
|
|
137
|
+
export interface GenerateContentResponseData {
|
|
138
|
+
candidates: GeminiResponseCandidate[];
|
|
139
|
+
promptFeedback: GeminiResponsePromptFeedback;
|
|
140
|
+
}
|
|
141
|
+
export type GoogleLLMModelFamily = null | "palm" | "gemini";
|
|
142
|
+
export type GoogleLLMResponseData = JsonStream | GenerateContentResponseData | GenerateContentResponseData[];
|
|
143
|
+
export interface GoogleLLMResponse extends GoogleResponse {
|
|
144
|
+
data: GoogleLLMResponseData;
|
|
145
|
+
}
|
|
146
|
+
export {};
|
package/dist/types.js
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.copyAndValidateModelParamsInto = exports.validateModelParams = exports.modelToFamily = exports.copyAIModelParamsInto = exports.copyAIModelParams = void 0;
|
|
4
|
+
const gemini_js_1 = require("./gemini.cjs");
|
|
5
|
+
function copyAIModelParams(params) {
|
|
6
|
+
return copyAIModelParamsInto(params, {});
|
|
7
|
+
}
|
|
8
|
+
exports.copyAIModelParams = copyAIModelParams;
|
|
9
|
+
function copyAIModelParamsInto(params, target) {
|
|
10
|
+
const ret = target || {};
|
|
11
|
+
ret.model = params?.model ?? target.model;
|
|
12
|
+
ret.temperature = params?.temperature ?? target.temperature;
|
|
13
|
+
ret.maxOutputTokens = params?.maxOutputTokens ?? target.maxOutputTokens;
|
|
14
|
+
ret.topP = params?.topP ?? target.topP;
|
|
15
|
+
ret.topK = params?.topK ?? target.topK;
|
|
16
|
+
ret.stopSequences = params?.stopSequences ?? target.stopSequences;
|
|
17
|
+
ret.safetySettings = params?.safetySettings ?? target.safetySettings;
|
|
18
|
+
return ret;
|
|
19
|
+
}
|
|
20
|
+
exports.copyAIModelParamsInto = copyAIModelParamsInto;
|
|
21
|
+
function modelToFamily(modelName) {
|
|
22
|
+
if (!modelName) {
|
|
23
|
+
return null;
|
|
24
|
+
}
|
|
25
|
+
else if ((0, gemini_js_1.isModelGemini)(modelName)) {
|
|
26
|
+
return "gemini";
|
|
27
|
+
}
|
|
28
|
+
else {
|
|
29
|
+
return null;
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
exports.modelToFamily = modelToFamily;
|
|
33
|
+
function validateModelParams(params) {
|
|
34
|
+
const testParams = params ?? {};
|
|
35
|
+
switch (modelToFamily(testParams.model)) {
|
|
36
|
+
case "gemini":
|
|
37
|
+
return (0, gemini_js_1.validateGeminiParams)(testParams);
|
|
38
|
+
default:
|
|
39
|
+
throw new Error(`Unable to verify model params: ${JSON.stringify(params)}`);
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
exports.validateModelParams = validateModelParams;
|
|
43
|
+
function copyAndValidateModelParamsInto(params, target) {
|
|
44
|
+
copyAIModelParamsInto(params, target);
|
|
45
|
+
validateModelParams(target);
|
|
46
|
+
return target;
|
|
47
|
+
}
|
|
48
|
+
exports.copyAndValidateModelParamsInto = copyAndValidateModelParamsInto;
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
import type { GoogleAIModelParams, GoogleLLMModelFamily } from "../types.js";
|
|
2
|
+
export declare function copyAIModelParams(params: GoogleAIModelParams | undefined): GoogleAIModelParams;
|
|
3
|
+
export declare function copyAIModelParamsInto(params: GoogleAIModelParams | undefined, target: GoogleAIModelParams): GoogleAIModelParams;
|
|
4
|
+
export declare function modelToFamily(modelName: string | undefined): GoogleLLMModelFamily;
|
|
5
|
+
export declare function validateModelParams(params: GoogleAIModelParams | undefined): void;
|
|
6
|
+
export declare function copyAndValidateModelParamsInto(params: GoogleAIModelParams | undefined, target: GoogleAIModelParams): GoogleAIModelParams;
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import { isModelGemini, validateGeminiParams } from "./gemini.js";
|
|
2
|
+
export function copyAIModelParams(params) {
|
|
3
|
+
return copyAIModelParamsInto(params, {});
|
|
4
|
+
}
|
|
5
|
+
export function copyAIModelParamsInto(params, target) {
|
|
6
|
+
const ret = target || {};
|
|
7
|
+
ret.model = params?.model ?? target.model;
|
|
8
|
+
ret.temperature = params?.temperature ?? target.temperature;
|
|
9
|
+
ret.maxOutputTokens = params?.maxOutputTokens ?? target.maxOutputTokens;
|
|
10
|
+
ret.topP = params?.topP ?? target.topP;
|
|
11
|
+
ret.topK = params?.topK ?? target.topK;
|
|
12
|
+
ret.stopSequences = params?.stopSequences ?? target.stopSequences;
|
|
13
|
+
ret.safetySettings = params?.safetySettings ?? target.safetySettings;
|
|
14
|
+
return ret;
|
|
15
|
+
}
|
|
16
|
+
export function modelToFamily(modelName) {
|
|
17
|
+
if (!modelName) {
|
|
18
|
+
return null;
|
|
19
|
+
}
|
|
20
|
+
else if (isModelGemini(modelName)) {
|
|
21
|
+
return "gemini";
|
|
22
|
+
}
|
|
23
|
+
else {
|
|
24
|
+
return null;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
export function validateModelParams(params) {
|
|
28
|
+
const testParams = params ?? {};
|
|
29
|
+
switch (modelToFamily(testParams.model)) {
|
|
30
|
+
case "gemini":
|
|
31
|
+
return validateGeminiParams(testParams);
|
|
32
|
+
default:
|
|
33
|
+
throw new Error(`Unable to verify model params: ${JSON.stringify(params)}`);
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
export function copyAndValidateModelParamsInto(params, target) {
|
|
37
|
+
copyAIModelParamsInto(params, target);
|
|
38
|
+
validateModelParams(target);
|
|
39
|
+
return target;
|
|
40
|
+
}
|