@langchain/google-common 0.2.14 → 0.2.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/connection.d.ts +2 -2
- package/dist/embeddings.cjs +93 -9
- package/dist/embeddings.d.ts +7 -42
- package/dist/embeddings.js +93 -9
- package/dist/types.d.ts +83 -1
- package/package.json +1 -1
package/dist/connection.d.ts
CHANGED
|
@@ -2,7 +2,7 @@ import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/ba
|
|
|
2
2
|
import { AsyncCaller, AsyncCallerCallOptions } from "@langchain/core/utils/async_caller";
|
|
3
3
|
import { BaseRunManager } from "@langchain/core/callbacks/manager";
|
|
4
4
|
import { BaseCallbackHandler } from "@langchain/core/callbacks/base";
|
|
5
|
-
import type { GoogleAIBaseLLMInput, GoogleConnectionParams, GooglePlatformType, GoogleResponse, GoogleLLMResponse, GoogleAIModelRequestParams, GoogleRawResponse, GoogleAIAPI, VertexModelFamily, GoogleAIAPIConfig } from "./types.js";
|
|
5
|
+
import type { GoogleAIBaseLLMInput, GoogleConnectionParams, GooglePlatformType, GoogleResponse, GoogleLLMResponse, GoogleAIModelRequestParams, GoogleRawResponse, GoogleAIAPI, VertexModelFamily, GoogleAIAPIConfig, GoogleModelParams } from "./types.js";
|
|
6
6
|
import { GoogleAbstractedClient, GoogleAbstractedClientOps, GoogleAbstractedClientOpsMethod } from "./auth.js";
|
|
7
7
|
export declare abstract class GoogleConnection<CallOptions extends AsyncCallerCallOptions, ResponseType extends GoogleResponse> {
|
|
8
8
|
caller: AsyncCaller;
|
|
@@ -64,7 +64,7 @@ export declare abstract class GoogleAIConnection<CallOptions extends AsyncCaller
|
|
|
64
64
|
buildUrlVertexLocation(): Promise<string>;
|
|
65
65
|
buildUrlVertex(): Promise<string>;
|
|
66
66
|
buildUrl(): Promise<string>;
|
|
67
|
-
abstract formatData(input: InputType, parameters:
|
|
67
|
+
abstract formatData(input: InputType, parameters: GoogleModelParams): Promise<unknown>;
|
|
68
68
|
request(input: InputType, parameters: GoogleAIModelRequestParams, options: CallOptions, runManager?: BaseRunManager): Promise<ResponseType>;
|
|
69
69
|
}
|
|
70
70
|
export declare abstract class AbstractGoogleLLMConnection<MessageType, AuthOptions> extends GoogleAIConnection<BaseLanguageModelCallOptions, MessageType, AuthOptions, GoogleLLMResponse> {
|
package/dist/embeddings.cjs
CHANGED
|
@@ -16,19 +16,63 @@ class EmbeddingsConnection extends connection_js_1.GoogleAIConnection {
|
|
|
16
16
|
value: void 0
|
|
17
17
|
});
|
|
18
18
|
}
|
|
19
|
-
|
|
19
|
+
buildUrlMethodAiStudio() {
|
|
20
|
+
return "embedContent";
|
|
21
|
+
}
|
|
22
|
+
buildUrlMethodVertex() {
|
|
20
23
|
return "predict";
|
|
21
24
|
}
|
|
25
|
+
async buildUrlMethod() {
|
|
26
|
+
switch (this.platform) {
|
|
27
|
+
case "gcp":
|
|
28
|
+
return this.buildUrlMethodVertex();
|
|
29
|
+
case "gai":
|
|
30
|
+
return this.buildUrlMethodAiStudio();
|
|
31
|
+
default:
|
|
32
|
+
throw new Error(`Unknown platform when building method: ${this.platform}`);
|
|
33
|
+
}
|
|
34
|
+
}
|
|
22
35
|
get modelPublisher() {
|
|
23
36
|
// All the embedding models are currently published by "google"
|
|
24
37
|
return "google";
|
|
25
38
|
}
|
|
26
|
-
|
|
39
|
+
formatDataAiStudio(input, parameters) {
|
|
40
|
+
const parts = input.map((instance) => ({
|
|
41
|
+
text: instance.content,
|
|
42
|
+
}));
|
|
43
|
+
const content = {
|
|
44
|
+
parts,
|
|
45
|
+
};
|
|
46
|
+
const outputDimensionality = parameters?.outputDimensionality;
|
|
47
|
+
const ret = {
|
|
48
|
+
content,
|
|
49
|
+
outputDimensionality,
|
|
50
|
+
};
|
|
51
|
+
// Remove undefined attributes
|
|
52
|
+
let key;
|
|
53
|
+
for (key in ret) {
|
|
54
|
+
if (ret[key] === undefined) {
|
|
55
|
+
delete ret[key];
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
return ret;
|
|
59
|
+
}
|
|
60
|
+
formatDataVertex(input, parameters) {
|
|
27
61
|
return {
|
|
28
62
|
instances: input,
|
|
29
63
|
parameters,
|
|
30
64
|
};
|
|
31
65
|
}
|
|
66
|
+
async formatData(input, parameters) {
|
|
67
|
+
switch (this.platform) {
|
|
68
|
+
case "gcp":
|
|
69
|
+
return this.formatDataVertex(input, parameters);
|
|
70
|
+
case "gai":
|
|
71
|
+
return this.formatDataAiStudio(input, parameters);
|
|
72
|
+
default:
|
|
73
|
+
throw new Error(`Unknown platform to format embeddings ${this.platform}`);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
32
76
|
}
|
|
33
77
|
/**
|
|
34
78
|
* Enables calls to Google APIs for generating
|
|
@@ -43,6 +87,12 @@ class BaseGoogleEmbeddings extends embeddings_1.Embeddings {
|
|
|
43
87
|
writable: true,
|
|
44
88
|
value: void 0
|
|
45
89
|
});
|
|
90
|
+
Object.defineProperty(this, "dimensions", {
|
|
91
|
+
enumerable: true,
|
|
92
|
+
configurable: true,
|
|
93
|
+
writable: true,
|
|
94
|
+
value: void 0
|
|
95
|
+
});
|
|
46
96
|
Object.defineProperty(this, "connection", {
|
|
47
97
|
enumerable: true,
|
|
48
98
|
configurable: true,
|
|
@@ -50,6 +100,7 @@ class BaseGoogleEmbeddings extends embeddings_1.Embeddings {
|
|
|
50
100
|
value: void 0
|
|
51
101
|
});
|
|
52
102
|
this.model = fields.model;
|
|
103
|
+
this.dimensions = fields.dimensions ?? fields.outputDimensionality;
|
|
53
104
|
this.connection = new EmbeddingsConnection({ ...fields, ...this }, this.caller, this.buildClient(fields), false);
|
|
54
105
|
}
|
|
55
106
|
buildApiKeyClient(apiKey) {
|
|
@@ -67,6 +118,37 @@ class BaseGoogleEmbeddings extends embeddings_1.Embeddings {
|
|
|
67
118
|
return this.buildAbstractedClient(fields);
|
|
68
119
|
}
|
|
69
120
|
}
|
|
121
|
+
buildParameters() {
|
|
122
|
+
const ret = {
|
|
123
|
+
outputDimensionality: this.dimensions,
|
|
124
|
+
};
|
|
125
|
+
// Remove undefined attributes
|
|
126
|
+
let key;
|
|
127
|
+
for (key in ret) {
|
|
128
|
+
if (ret[key] === undefined) {
|
|
129
|
+
delete ret[key];
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
return ret;
|
|
133
|
+
}
|
|
134
|
+
vertexResponseToValues(response) {
|
|
135
|
+
const predictions = response?.data?.predictions ?? [];
|
|
136
|
+
return predictions.map((prediction) => prediction.embeddings.values);
|
|
137
|
+
}
|
|
138
|
+
aiStudioResponseToValues(response) {
|
|
139
|
+
const value = response?.data?.embedding?.values ?? [];
|
|
140
|
+
return [value];
|
|
141
|
+
}
|
|
142
|
+
responseToValues(response) {
|
|
143
|
+
switch (this.connection.platform) {
|
|
144
|
+
case "gcp":
|
|
145
|
+
return this.vertexResponseToValues(response);
|
|
146
|
+
case "gai":
|
|
147
|
+
return this.aiStudioResponseToValues(response);
|
|
148
|
+
default:
|
|
149
|
+
throw new Error(`Unknown response platform: ${this.connection.platform}`);
|
|
150
|
+
}
|
|
151
|
+
}
|
|
70
152
|
/**
|
|
71
153
|
* Takes an array of documents as input and returns a promise that
|
|
72
154
|
* resolves to a 2D array of embeddings for each document. It splits the
|
|
@@ -76,17 +158,19 @@ class BaseGoogleEmbeddings extends embeddings_1.Embeddings {
|
|
|
76
158
|
* @returns A promise that resolves to a 2D array of embeddings for each document.
|
|
77
159
|
*/
|
|
78
160
|
async embedDocuments(documents) {
|
|
161
|
+
// Vertex "text-" models could do up 5 documents at once,
|
|
162
|
+
// but the "gemini-embedding-001" can only do 1.
|
|
163
|
+
// AI Studio can only do a chunk size of 1.
|
|
164
|
+
// TODO: Make this configurable
|
|
165
|
+
const chunkSize = 1;
|
|
79
166
|
const instanceChunks = (0, chunk_array_1.chunkArray)(documents.map((document) => ({
|
|
80
167
|
content: document,
|
|
81
|
-
})),
|
|
82
|
-
const parameters =
|
|
168
|
+
})), chunkSize);
|
|
169
|
+
const parameters = this.buildParameters();
|
|
83
170
|
const options = {};
|
|
84
171
|
const responses = await Promise.all(instanceChunks.map((instances) => this.connection.request(instances, parameters, options)));
|
|
85
|
-
const result = responses
|
|
86
|
-
|
|
87
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
88
|
-
(result) => result.embeddings?.values) ?? [])
|
|
89
|
-
.flat() ?? [];
|
|
172
|
+
const result = responses?.map((response) => this.responseToValues(response)).flat() ??
|
|
173
|
+
[];
|
|
90
174
|
return result;
|
|
91
175
|
}
|
|
92
176
|
/**
|
package/dist/embeddings.d.ts
CHANGED
|
@@ -1,58 +1,23 @@
|
|
|
1
|
-
import { Embeddings
|
|
2
|
-
import { AsyncCallerCallOptions } from "@langchain/core/utils/async_caller";
|
|
1
|
+
import { Embeddings } from "@langchain/core/embeddings";
|
|
3
2
|
import { GoogleAbstractedClient } from "./auth.js";
|
|
4
|
-
import { GoogleConnectionParams,
|
|
5
|
-
/**
|
|
6
|
-
* Defines the parameters required to initialize a
|
|
7
|
-
* GoogleEmbeddings instance. It extends EmbeddingsParams and
|
|
8
|
-
* GoogleConnectionParams.
|
|
9
|
-
*/
|
|
10
|
-
export interface BaseGoogleEmbeddingsParams<AuthOptions> extends EmbeddingsParams, GoogleConnectionParams<AuthOptions> {
|
|
11
|
-
model: string;
|
|
12
|
-
}
|
|
13
|
-
/**
|
|
14
|
-
* Defines additional options specific to the
|
|
15
|
-
* GoogleEmbeddingsInstance. It extends AsyncCallerCallOptions.
|
|
16
|
-
*/
|
|
17
|
-
export interface BaseGoogleEmbeddingsOptions extends AsyncCallerCallOptions {
|
|
18
|
-
}
|
|
19
|
-
/**
|
|
20
|
-
* Represents an instance for generating embeddings using the Google
|
|
21
|
-
* Vertex AI API. It contains the content to be embedded.
|
|
22
|
-
*/
|
|
23
|
-
export interface GoogleEmbeddingsInstance {
|
|
24
|
-
content: string;
|
|
25
|
-
}
|
|
26
|
-
/**
|
|
27
|
-
* Defines the structure of the embeddings results returned by the Google
|
|
28
|
-
* Vertex AI API. It extends GoogleBasePrediction and contains the
|
|
29
|
-
* embeddings and their statistics.
|
|
30
|
-
*/
|
|
31
|
-
export interface GoogleEmbeddingsResponse extends GoogleResponse {
|
|
32
|
-
data: {
|
|
33
|
-
predictions: {
|
|
34
|
-
embeddings: {
|
|
35
|
-
statistics: {
|
|
36
|
-
token_count: number;
|
|
37
|
-
truncated: boolean;
|
|
38
|
-
};
|
|
39
|
-
values: number[];
|
|
40
|
-
};
|
|
41
|
-
}[];
|
|
42
|
-
};
|
|
43
|
-
}
|
|
3
|
+
import { BaseGoogleEmbeddingsParams, GoogleConnectionParams, GoogleEmbeddingsResponse, VertexEmbeddingsParameters, VertexEmbeddingsResponse, AIStudioEmbeddingsResponse } from "./types.js";
|
|
44
4
|
/**
|
|
45
5
|
* Enables calls to Google APIs for generating
|
|
46
6
|
* text embeddings.
|
|
47
7
|
*/
|
|
48
8
|
export declare abstract class BaseGoogleEmbeddings<AuthOptions> extends Embeddings implements BaseGoogleEmbeddingsParams<AuthOptions> {
|
|
49
9
|
model: string;
|
|
10
|
+
dimensions?: number;
|
|
50
11
|
private connection;
|
|
51
12
|
constructor(fields: BaseGoogleEmbeddingsParams<AuthOptions>);
|
|
52
13
|
abstract buildAbstractedClient(fields?: GoogleConnectionParams<AuthOptions>): GoogleAbstractedClient;
|
|
53
14
|
buildApiKeyClient(apiKey: string): GoogleAbstractedClient;
|
|
54
15
|
buildApiKey(fields?: GoogleConnectionParams<AuthOptions>): string | undefined;
|
|
55
16
|
buildClient(fields?: GoogleConnectionParams<AuthOptions>): GoogleAbstractedClient;
|
|
17
|
+
buildParameters(): VertexEmbeddingsParameters;
|
|
18
|
+
vertexResponseToValues(response: VertexEmbeddingsResponse): number[][];
|
|
19
|
+
aiStudioResponseToValues(response: AIStudioEmbeddingsResponse): number[][];
|
|
20
|
+
responseToValues(response: GoogleEmbeddingsResponse): number[][];
|
|
56
21
|
/**
|
|
57
22
|
* Takes an array of documents as input and returns a promise that
|
|
58
23
|
* resolves to a 2D array of embeddings for each document. It splits the
|
package/dist/embeddings.js
CHANGED
|
@@ -13,19 +13,63 @@ class EmbeddingsConnection extends GoogleAIConnection {
|
|
|
13
13
|
value: void 0
|
|
14
14
|
});
|
|
15
15
|
}
|
|
16
|
-
|
|
16
|
+
buildUrlMethodAiStudio() {
|
|
17
|
+
return "embedContent";
|
|
18
|
+
}
|
|
19
|
+
buildUrlMethodVertex() {
|
|
17
20
|
return "predict";
|
|
18
21
|
}
|
|
22
|
+
async buildUrlMethod() {
|
|
23
|
+
switch (this.platform) {
|
|
24
|
+
case "gcp":
|
|
25
|
+
return this.buildUrlMethodVertex();
|
|
26
|
+
case "gai":
|
|
27
|
+
return this.buildUrlMethodAiStudio();
|
|
28
|
+
default:
|
|
29
|
+
throw new Error(`Unknown platform when building method: ${this.platform}`);
|
|
30
|
+
}
|
|
31
|
+
}
|
|
19
32
|
get modelPublisher() {
|
|
20
33
|
// All the embedding models are currently published by "google"
|
|
21
34
|
return "google";
|
|
22
35
|
}
|
|
23
|
-
|
|
36
|
+
formatDataAiStudio(input, parameters) {
|
|
37
|
+
const parts = input.map((instance) => ({
|
|
38
|
+
text: instance.content,
|
|
39
|
+
}));
|
|
40
|
+
const content = {
|
|
41
|
+
parts,
|
|
42
|
+
};
|
|
43
|
+
const outputDimensionality = parameters?.outputDimensionality;
|
|
44
|
+
const ret = {
|
|
45
|
+
content,
|
|
46
|
+
outputDimensionality,
|
|
47
|
+
};
|
|
48
|
+
// Remove undefined attributes
|
|
49
|
+
let key;
|
|
50
|
+
for (key in ret) {
|
|
51
|
+
if (ret[key] === undefined) {
|
|
52
|
+
delete ret[key];
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
return ret;
|
|
56
|
+
}
|
|
57
|
+
formatDataVertex(input, parameters) {
|
|
24
58
|
return {
|
|
25
59
|
instances: input,
|
|
26
60
|
parameters,
|
|
27
61
|
};
|
|
28
62
|
}
|
|
63
|
+
async formatData(input, parameters) {
|
|
64
|
+
switch (this.platform) {
|
|
65
|
+
case "gcp":
|
|
66
|
+
return this.formatDataVertex(input, parameters);
|
|
67
|
+
case "gai":
|
|
68
|
+
return this.formatDataAiStudio(input, parameters);
|
|
69
|
+
default:
|
|
70
|
+
throw new Error(`Unknown platform to format embeddings ${this.platform}`);
|
|
71
|
+
}
|
|
72
|
+
}
|
|
29
73
|
}
|
|
30
74
|
/**
|
|
31
75
|
* Enables calls to Google APIs for generating
|
|
@@ -40,6 +84,12 @@ export class BaseGoogleEmbeddings extends Embeddings {
|
|
|
40
84
|
writable: true,
|
|
41
85
|
value: void 0
|
|
42
86
|
});
|
|
87
|
+
Object.defineProperty(this, "dimensions", {
|
|
88
|
+
enumerable: true,
|
|
89
|
+
configurable: true,
|
|
90
|
+
writable: true,
|
|
91
|
+
value: void 0
|
|
92
|
+
});
|
|
43
93
|
Object.defineProperty(this, "connection", {
|
|
44
94
|
enumerable: true,
|
|
45
95
|
configurable: true,
|
|
@@ -47,6 +97,7 @@ export class BaseGoogleEmbeddings extends Embeddings {
|
|
|
47
97
|
value: void 0
|
|
48
98
|
});
|
|
49
99
|
this.model = fields.model;
|
|
100
|
+
this.dimensions = fields.dimensions ?? fields.outputDimensionality;
|
|
50
101
|
this.connection = new EmbeddingsConnection({ ...fields, ...this }, this.caller, this.buildClient(fields), false);
|
|
51
102
|
}
|
|
52
103
|
buildApiKeyClient(apiKey) {
|
|
@@ -64,6 +115,37 @@ export class BaseGoogleEmbeddings extends Embeddings {
|
|
|
64
115
|
return this.buildAbstractedClient(fields);
|
|
65
116
|
}
|
|
66
117
|
}
|
|
118
|
+
buildParameters() {
|
|
119
|
+
const ret = {
|
|
120
|
+
outputDimensionality: this.dimensions,
|
|
121
|
+
};
|
|
122
|
+
// Remove undefined attributes
|
|
123
|
+
let key;
|
|
124
|
+
for (key in ret) {
|
|
125
|
+
if (ret[key] === undefined) {
|
|
126
|
+
delete ret[key];
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
return ret;
|
|
130
|
+
}
|
|
131
|
+
vertexResponseToValues(response) {
|
|
132
|
+
const predictions = response?.data?.predictions ?? [];
|
|
133
|
+
return predictions.map((prediction) => prediction.embeddings.values);
|
|
134
|
+
}
|
|
135
|
+
aiStudioResponseToValues(response) {
|
|
136
|
+
const value = response?.data?.embedding?.values ?? [];
|
|
137
|
+
return [value];
|
|
138
|
+
}
|
|
139
|
+
responseToValues(response) {
|
|
140
|
+
switch (this.connection.platform) {
|
|
141
|
+
case "gcp":
|
|
142
|
+
return this.vertexResponseToValues(response);
|
|
143
|
+
case "gai":
|
|
144
|
+
return this.aiStudioResponseToValues(response);
|
|
145
|
+
default:
|
|
146
|
+
throw new Error(`Unknown response platform: ${this.connection.platform}`);
|
|
147
|
+
}
|
|
148
|
+
}
|
|
67
149
|
/**
|
|
68
150
|
* Takes an array of documents as input and returns a promise that
|
|
69
151
|
* resolves to a 2D array of embeddings for each document. It splits the
|
|
@@ -73,17 +155,19 @@ export class BaseGoogleEmbeddings extends Embeddings {
|
|
|
73
155
|
* @returns A promise that resolves to a 2D array of embeddings for each document.
|
|
74
156
|
*/
|
|
75
157
|
async embedDocuments(documents) {
|
|
158
|
+
// Vertex "text-" models could do up 5 documents at once,
|
|
159
|
+
// but the "gemini-embedding-001" can only do 1.
|
|
160
|
+
// AI Studio can only do a chunk size of 1.
|
|
161
|
+
// TODO: Make this configurable
|
|
162
|
+
const chunkSize = 1;
|
|
76
163
|
const instanceChunks = chunkArray(documents.map((document) => ({
|
|
77
164
|
content: document,
|
|
78
|
-
})),
|
|
79
|
-
const parameters =
|
|
165
|
+
})), chunkSize);
|
|
166
|
+
const parameters = this.buildParameters();
|
|
80
167
|
const options = {};
|
|
81
168
|
const responses = await Promise.all(instanceChunks.map((instances) => this.connection.request(instances, parameters, options)));
|
|
82
|
-
const result = responses
|
|
83
|
-
|
|
84
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
85
|
-
(result) => result.embeddings?.values) ?? [])
|
|
86
|
-
.flat() ?? [];
|
|
169
|
+
const result = responses?.map((response) => this.responseToValues(response)).flat() ??
|
|
170
|
+
[];
|
|
87
171
|
return result;
|
|
88
172
|
}
|
|
89
173
|
/**
|
package/dist/types.d.ts
CHANGED
|
@@ -2,6 +2,8 @@ import type { BaseLLMParams } from "@langchain/core/language_models/llms";
|
|
|
2
2
|
import type { BaseChatModelCallOptions, BindToolsInput } from "@langchain/core/language_models/chat_models";
|
|
3
3
|
import { BaseMessage, BaseMessageChunk, MessageContent } from "@langchain/core/messages";
|
|
4
4
|
import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
|
|
5
|
+
import { EmbeddingsParams } from "@langchain/core/embeddings";
|
|
6
|
+
import { AsyncCallerCallOptions } from "@langchain/core/utils/async_caller";
|
|
5
7
|
import type { JsonStream } from "./utils/stream.js";
|
|
6
8
|
import { MediaManager } from "./experimental/utils/media_core.js";
|
|
7
9
|
import { AnthropicResponseData, AnthropicAPIConfig } from "./types-anthropic.js";
|
|
@@ -144,7 +146,7 @@ export type GoogleSpeechSimplifiedLanguage = GoogleSpeechVoiceLanguage | GoogleS
|
|
|
144
146
|
* It can either be the voice (or voices), or the voice or voices with language configuration
|
|
145
147
|
*/
|
|
146
148
|
export type GoogleSpeechConfigSimplified = GoogleSpeechVoice | GoogleSpeechSimplifiedLanguage;
|
|
147
|
-
export interface
|
|
149
|
+
export interface GoogleModelParams {
|
|
148
150
|
/** Model to use */
|
|
149
151
|
model?: string;
|
|
150
152
|
/**
|
|
@@ -152,6 +154,8 @@ export interface GoogleAIModelParams {
|
|
|
152
154
|
* Alias for `model`
|
|
153
155
|
*/
|
|
154
156
|
modelName?: string;
|
|
157
|
+
}
|
|
158
|
+
export interface GoogleAIModelParams extends GoogleModelParams {
|
|
155
159
|
/** Sampling temperature to use */
|
|
156
160
|
temperature?: number;
|
|
157
161
|
/**
|
|
@@ -630,3 +634,81 @@ export interface GoogleAIAPIParams {
|
|
|
630
634
|
apiName?: string;
|
|
631
635
|
apiConfig?: GoogleAIAPIConfig;
|
|
632
636
|
}
|
|
637
|
+
/**
|
|
638
|
+
* Defines the parameters required to initialize a
|
|
639
|
+
* GoogleEmbeddings instance. It extends EmbeddingsParams and
|
|
640
|
+
* GoogleConnectionParams.
|
|
641
|
+
*/
|
|
642
|
+
export interface BaseGoogleEmbeddingsParams<AuthOptions> extends EmbeddingsParams, GoogleConnectionParams<AuthOptions> {
|
|
643
|
+
model: string;
|
|
644
|
+
/**
|
|
645
|
+
* Used to specify output embedding size.
|
|
646
|
+
* If set, output embeddings will be truncated to the size specified.
|
|
647
|
+
*/
|
|
648
|
+
dimensions?: number;
|
|
649
|
+
/**
|
|
650
|
+
* An alias for "dimensions"
|
|
651
|
+
*/
|
|
652
|
+
outputDimensionality?: number;
|
|
653
|
+
}
|
|
654
|
+
/**
|
|
655
|
+
* Defines additional options specific to the
|
|
656
|
+
* GoogleEmbeddingsInstance. It extends AsyncCallerCallOptions.
|
|
657
|
+
*/
|
|
658
|
+
export interface BaseGoogleEmbeddingsOptions extends AsyncCallerCallOptions {
|
|
659
|
+
}
|
|
660
|
+
export type GoogleEmbeddingsTaskType = "RETRIEVAL_QUERY" | "RETRIEVAL_DOCUMENT" | "SEMANTIC_SIMILARITY" | "CLASSIFICATION" | "CLUSTERING" | "QUESTION_ANSWERING" | "FACT_VERIFICATION" | "CODE_RETRIEVAL_QUERY" | string;
|
|
661
|
+
/**
|
|
662
|
+
* Represents an instance for generating embeddings using the Google
|
|
663
|
+
* Vertex AI API. It contains the content to be embedded.
|
|
664
|
+
*/
|
|
665
|
+
export interface VertexEmbeddingsInstance {
|
|
666
|
+
content: string;
|
|
667
|
+
taskType?: GoogleEmbeddingsTaskType;
|
|
668
|
+
title?: string;
|
|
669
|
+
}
|
|
670
|
+
export interface VertexEmbeddingsParameters extends GoogleModelParams {
|
|
671
|
+
autoTruncate?: boolean;
|
|
672
|
+
outputDimensionality?: number;
|
|
673
|
+
}
|
|
674
|
+
export interface VertexEmbeddingsRequest {
|
|
675
|
+
instances: VertexEmbeddingsInstance[];
|
|
676
|
+
parameters?: VertexEmbeddingsParameters;
|
|
677
|
+
}
|
|
678
|
+
export interface AIStudioEmbeddingsRequest {
|
|
679
|
+
content: {
|
|
680
|
+
parts: GeminiPartText[];
|
|
681
|
+
};
|
|
682
|
+
model?: string;
|
|
683
|
+
taskType?: GoogleEmbeddingsTaskType;
|
|
684
|
+
title?: string;
|
|
685
|
+
outputDimensionality?: number;
|
|
686
|
+
}
|
|
687
|
+
export type GoogleEmbeddingsRequest = VertexEmbeddingsRequest | AIStudioEmbeddingsRequest;
|
|
688
|
+
export interface VertexEmbeddingsResponsePrediction {
|
|
689
|
+
embeddings: {
|
|
690
|
+
statistics: {
|
|
691
|
+
token_count: number;
|
|
692
|
+
truncated: boolean;
|
|
693
|
+
};
|
|
694
|
+
values: number[];
|
|
695
|
+
};
|
|
696
|
+
}
|
|
697
|
+
/**
|
|
698
|
+
* Defines the structure of the embeddings results returned by the Google
|
|
699
|
+
* Vertex AI API. It extends GoogleBasePrediction and contains the
|
|
700
|
+
* embeddings and their statistics.
|
|
701
|
+
*/
|
|
702
|
+
export interface VertexEmbeddingsResponse extends GoogleResponse {
|
|
703
|
+
data: {
|
|
704
|
+
predictions: VertexEmbeddingsResponsePrediction[];
|
|
705
|
+
};
|
|
706
|
+
}
|
|
707
|
+
export interface AIStudioEmbeddingsResponse extends GoogleResponse {
|
|
708
|
+
data: {
|
|
709
|
+
embedding: {
|
|
710
|
+
values: number[];
|
|
711
|
+
};
|
|
712
|
+
};
|
|
713
|
+
}
|
|
714
|
+
export type GoogleEmbeddingsResponse = VertexEmbeddingsResponse | AIStudioEmbeddingsResponse;
|