@maxim_mazurok/gapi.client.aiplatform-v1 0.0.20240507 → 0.0.20240520
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.d.ts +83 -933
- package/package.json +1 -1
package/index.d.ts
CHANGED
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
// This file was generated by https://github.com/Maxim-Mazurok/google-api-typings-generator. Please do not edit it manually.
|
|
10
10
|
// In case of any problems please post issue to https://github.com/Maxim-Mazurok/google-api-typings-generator
|
|
11
11
|
// Generated from: https://aiplatform.googleapis.com/$discovery/rest?version=v1
|
|
12
|
-
// Revision:
|
|
12
|
+
// Revision: 20240520
|
|
13
13
|
|
|
14
14
|
/// <reference types="gapi.client" />
|
|
15
15
|
|
|
@@ -24,14 +24,10 @@ declare namespace gapi.client {
|
|
|
24
24
|
function load(name: 'aiplatform', version: 'v1', callback: () => any): void;
|
|
25
25
|
|
|
26
26
|
namespace aiplatform {
|
|
27
|
-
interface CloudAiLargeModelsVisionEmbedVideoResponse {
|
|
28
|
-
/** The embedding vector for the video. */
|
|
29
|
-
videoEmbeddings?: any[];
|
|
30
|
-
}
|
|
31
27
|
interface CloudAiLargeModelsVisionFilteredText {
|
|
32
|
-
/** Confidence level */
|
|
33
|
-
category?: string;
|
|
34
28
|
/** Filtered category */
|
|
29
|
+
category?: string;
|
|
30
|
+
/** Confidence score */
|
|
35
31
|
confidence?: string;
|
|
36
32
|
/** Input prompt */
|
|
37
33
|
prompt?: string;
|
|
@@ -76,10 +72,6 @@ declare namespace gapi.client {
|
|
|
76
72
|
/** Video */
|
|
77
73
|
video?: CloudAiLargeModelsVisionVideo;
|
|
78
74
|
}
|
|
79
|
-
interface CloudAiLargeModelsVisionMediaGenerateContentResponse {
|
|
80
|
-
/** Response to the user's request. */
|
|
81
|
-
response?: CloudAiNlLlmProtoServiceGenerateMultiModalResponse;
|
|
82
|
-
}
|
|
83
75
|
interface CloudAiLargeModelsVisionNamedBoundingBox {
|
|
84
76
|
classes?: string[];
|
|
85
77
|
entities?: string[];
|
|
@@ -90,26 +82,19 @@ declare namespace gapi.client {
|
|
|
90
82
|
y2?: number;
|
|
91
83
|
}
|
|
92
84
|
interface CloudAiLargeModelsVisionRaiInfo {
|
|
85
|
+
detectedLabels?: CloudAiLargeModelsVisionRaiInfoDetectedLabels[];
|
|
93
86
|
/** List of rai categories' information to return */
|
|
94
87
|
raiCategories?: string[];
|
|
95
88
|
/** List of rai scores mapping to the rai categories. Rounded to 1 decimal place. */
|
|
96
89
|
scores?: number[];
|
|
97
90
|
}
|
|
98
|
-
interface
|
|
99
|
-
/**
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
/**
|
|
104
|
-
|
|
105
|
-
/** Text information */
|
|
106
|
-
text?: string;
|
|
107
|
-
}
|
|
108
|
-
interface CloudAiLargeModelsVisionRelativeTemporalPartition {
|
|
109
|
-
/** End time offset of the partition. */
|
|
110
|
-
endOffset?: string;
|
|
111
|
-
/** Start time offset of the partition. */
|
|
112
|
-
startOffset?: string;
|
|
91
|
+
interface CloudAiLargeModelsVisionRaiInfoDetectedLabels {
|
|
92
|
+
/** Descriptions of the detected labels. */
|
|
93
|
+
descriptions?: string[];
|
|
94
|
+
/** The RAI category for the deteceted labels. */
|
|
95
|
+
raiCategory?: string;
|
|
96
|
+
/** Confidence scores mapping to the labels. */
|
|
97
|
+
scores?: number[];
|
|
113
98
|
}
|
|
114
99
|
interface CloudAiLargeModelsVisionSemanticFilterResponse {
|
|
115
100
|
/** Class labels of the bounding boxes that failed the semantic filtering. Bounding box coordinates. */
|
|
@@ -123,229 +108,6 @@ declare namespace gapi.client {
|
|
|
123
108
|
/** Raw bytes. */
|
|
124
109
|
video?: string;
|
|
125
110
|
}
|
|
126
|
-
interface CloudAiNlLlmProtoServiceCandidate {
|
|
127
|
-
/** Source attribution of the generated content. */
|
|
128
|
-
citationMetadata?: CloudAiNlLlmProtoServiceCitationMetadata;
|
|
129
|
-
/** Content of the candidate. */
|
|
130
|
-
content?: CloudAiNlLlmProtoServiceContent;
|
|
131
|
-
/** A string that describes the filtering behavior in more detail. Only filled when reason is set. */
|
|
132
|
-
finishMessage?: string;
|
|
133
|
-
/** The reason why the model stopped generating tokens. */
|
|
134
|
-
finishReason?: string;
|
|
135
|
-
/** Grounding metadata. Combine with the facts list from response to generate grounding citations for this choice. */
|
|
136
|
-
groundingMetadata?: LearningGenaiRootGroundingMetadata;
|
|
137
|
-
/** Index of the candidate. */
|
|
138
|
-
index?: number;
|
|
139
|
-
/** Safety ratings of the generated content. */
|
|
140
|
-
safetyRatings?: CloudAiNlLlmProtoServiceSafetyRating[];
|
|
141
|
-
}
|
|
142
|
-
interface CloudAiNlLlmProtoServiceCitation {
|
|
143
|
-
/** End index into the content. */
|
|
144
|
-
endIndex?: number;
|
|
145
|
-
/** License of the attribution. */
|
|
146
|
-
license?: string;
|
|
147
|
-
/** Publication date of the attribution. */
|
|
148
|
-
publicationDate?: GoogleTypeDate;
|
|
149
|
-
/** Start index into the content. */
|
|
150
|
-
startIndex?: number;
|
|
151
|
-
/** Title of the attribution. */
|
|
152
|
-
title?: string;
|
|
153
|
-
/** Url reference of the attribution. */
|
|
154
|
-
uri?: string;
|
|
155
|
-
}
|
|
156
|
-
interface CloudAiNlLlmProtoServiceCitationMetadata {
|
|
157
|
-
/** List of citations. */
|
|
158
|
-
citations?: CloudAiNlLlmProtoServiceCitation[];
|
|
159
|
-
}
|
|
160
|
-
interface CloudAiNlLlmProtoServiceContent {
|
|
161
|
-
/** If true, the content is from a cached content. */
|
|
162
|
-
isCached?: boolean;
|
|
163
|
-
/** The parts of the message. */
|
|
164
|
-
parts?: CloudAiNlLlmProtoServicePart[];
|
|
165
|
-
/** The role of the current conversation participant. */
|
|
166
|
-
role?: string;
|
|
167
|
-
}
|
|
168
|
-
interface CloudAiNlLlmProtoServiceFact {
|
|
169
|
-
/** Query that is used to retrieve this fact. */
|
|
170
|
-
query?: string;
|
|
171
|
-
/** If present, the summary/snippet of the fact. */
|
|
172
|
-
summary?: string;
|
|
173
|
-
/** If present, it refers to the title of this fact. */
|
|
174
|
-
title?: string;
|
|
175
|
-
/** If present, this URL links to the webpage of the fact. */
|
|
176
|
-
url?: string;
|
|
177
|
-
}
|
|
178
|
-
interface CloudAiNlLlmProtoServiceFunctionCall {
|
|
179
|
-
/** The function parameters and values in JSON format. */
|
|
180
|
-
args?: {[P in string]: any};
|
|
181
|
-
/** Required. The name of the function to call. */
|
|
182
|
-
name?: string;
|
|
183
|
-
}
|
|
184
|
-
interface CloudAiNlLlmProtoServiceFunctionResponse {
|
|
185
|
-
/** Required. The name of the function to call. */
|
|
186
|
-
name?: string;
|
|
187
|
-
/** Required. The function response in JSON object format. */
|
|
188
|
-
response?: {[P in string]: any};
|
|
189
|
-
}
|
|
190
|
-
interface CloudAiNlLlmProtoServiceGenerateMultiModalResponse {
|
|
191
|
-
/** Possible candidate responses to the conversation up until this point. */
|
|
192
|
-
candidates?: CloudAiNlLlmProtoServiceCandidate[];
|
|
193
|
-
/** Debug information containing message metadata. Clients should not consume this field, and this is only populated for Flow Runner path. */
|
|
194
|
-
debugMetadata?: CloudAiNlLlmProtoServiceMessageMetadata;
|
|
195
|
-
/** External facts retrieved for factuality/grounding. */
|
|
196
|
-
facts?: CloudAiNlLlmProtoServiceFact[];
|
|
197
|
-
/** Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations. */
|
|
198
|
-
promptFeedback?: CloudAiNlLlmProtoServicePromptFeedback;
|
|
199
|
-
/** Billable prediction metrics. */
|
|
200
|
-
reportingMetrics?: IntelligenceCloudAutomlXpsReportingMetrics;
|
|
201
|
-
/** Usage metadata about the response(s). */
|
|
202
|
-
usageMetadata?: CloudAiNlLlmProtoServiceUsageMetadata;
|
|
203
|
-
}
|
|
204
|
-
interface CloudAiNlLlmProtoServiceMessageMetadata {
|
|
205
|
-
/** Factuality-related debug metadata. */
|
|
206
|
-
factualityDebugMetadata?: LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata;
|
|
207
|
-
/** Filter metadata of the input messages. */
|
|
208
|
-
inputFilterInfo?: LearningServingLlmMessageMetadata;
|
|
209
|
-
/** This score is generated by the router model to decide which model to use */
|
|
210
|
-
modelRoutingDecision?: LearningGenaiRootRoutingDecision;
|
|
211
|
-
/** Filter metadata of the output messages. */
|
|
212
|
-
outputFilterInfo?: LearningServingLlmMessageMetadata[];
|
|
213
|
-
}
|
|
214
|
-
interface CloudAiNlLlmProtoServicePart {
|
|
215
|
-
/** Document metadata. The metadata should only be used by the Cloud LLM when supporting document mime types. It will only be populated when this image input part is converted from a document input part. */
|
|
216
|
-
documentMetadata?: CloudAiNlLlmProtoServicePartDocumentMetadata;
|
|
217
|
-
/** URI-based data. */
|
|
218
|
-
fileData?: CloudAiNlLlmProtoServicePartFileData;
|
|
219
|
-
/** Function call data. */
|
|
220
|
-
functionCall?: CloudAiNlLlmProtoServiceFunctionCall;
|
|
221
|
-
/** Function response data. */
|
|
222
|
-
functionResponse?: CloudAiNlLlmProtoServiceFunctionResponse;
|
|
223
|
-
/** Inline bytes data */
|
|
224
|
-
inlineData?: CloudAiNlLlmProtoServicePartBlob;
|
|
225
|
-
/** Metadata provides extra info for building the LM Root request. Note: High enough tag number for internal only fields. */
|
|
226
|
-
lmRootMetadata?: CloudAiNlLlmProtoServicePartLMRootMetadata;
|
|
227
|
-
/** Text input. */
|
|
228
|
-
text?: string;
|
|
229
|
-
/** Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. */
|
|
230
|
-
videoMetadata?: CloudAiNlLlmProtoServicePartVideoMetadata;
|
|
231
|
-
}
|
|
232
|
-
interface CloudAiNlLlmProtoServicePartBlob {
|
|
233
|
-
/** Inline data. */
|
|
234
|
-
data?: string;
|
|
235
|
-
/** The mime type corresponding to this input. */
|
|
236
|
-
mimeType?: string;
|
|
237
|
-
/** Original file data where the blob comes from. */
|
|
238
|
-
originalFileData?: CloudAiNlLlmProtoServicePartFileData;
|
|
239
|
-
}
|
|
240
|
-
interface CloudAiNlLlmProtoServicePartDocumentMetadata {
|
|
241
|
-
/** The original document blob. */
|
|
242
|
-
originalDocumentBlob?: CloudAiNlLlmProtoServicePartBlob;
|
|
243
|
-
/** The (1-indexed) page number of the image in the original document. The first page carries the original document content and mime type. */
|
|
244
|
-
pageNumber?: number;
|
|
245
|
-
}
|
|
246
|
-
interface CloudAiNlLlmProtoServicePartFileData {
|
|
247
|
-
/** Inline data. */
|
|
248
|
-
fileUri?: string;
|
|
249
|
-
/** The mime type corresponding to this input. */
|
|
250
|
-
mimeType?: string;
|
|
251
|
-
}
|
|
252
|
-
interface CloudAiNlLlmProtoServicePartLMRootMetadata {
|
|
253
|
-
/** Chunk id that will be used when mapping the part to the LM Root's chunk. */
|
|
254
|
-
chunkId?: string;
|
|
255
|
-
}
|
|
256
|
-
interface CloudAiNlLlmProtoServicePartVideoMetadata {
|
|
257
|
-
/** The end offset of the video. */
|
|
258
|
-
endOffset?: string;
|
|
259
|
-
/** The start offset of the video. */
|
|
260
|
-
startOffset?: string;
|
|
261
|
-
}
|
|
262
|
-
interface CloudAiNlLlmProtoServicePromptFeedback {
|
|
263
|
-
/** Blocked reason. */
|
|
264
|
-
blockReason?: string;
|
|
265
|
-
/** A readable block reason message. */
|
|
266
|
-
blockReasonMessage?: string;
|
|
267
|
-
/** Safety ratings. */
|
|
268
|
-
safetyRatings?: CloudAiNlLlmProtoServiceSafetyRating[];
|
|
269
|
-
}
|
|
270
|
-
interface CloudAiNlLlmProtoServiceRaiResult {
|
|
271
|
-
/** Recitation result from Aida recitation checker. */
|
|
272
|
-
aidaRecitationResult?: LanguageLabsAidaTrustRecitationProtoRecitationResult;
|
|
273
|
-
/** Use `triggered_blocklist`. */
|
|
274
|
-
blocked?: boolean;
|
|
275
|
-
/** The error codes indicate which RAI filters block the response. */
|
|
276
|
-
errorCodes?: number[];
|
|
277
|
-
/** Whether the text should be filtered and not shown to the end user. This is determined based on a combination of `triggered_recitation`, `triggered_blocklist`, `language_filter_result`, and `triggered_safety_filter`. */
|
|
278
|
-
filtered?: boolean;
|
|
279
|
-
/** Language filter result from SAFT LangId. */
|
|
280
|
-
languageFilterResult?: LearningGenaiRootLanguageFilterResult;
|
|
281
|
-
/** Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked. */
|
|
282
|
-
mmRecitationResult?: LearningGenaiRecitationMMRecitationCheckResult;
|
|
283
|
-
/** The RAI signals for the text. */
|
|
284
|
-
raiSignals?: CloudAiNlLlmProtoServiceRaiSignal[];
|
|
285
|
-
/** Translation request info during RAI for debugging purpose. Each TranslationRequestInfo corresponds to a request sent to the translation server. */
|
|
286
|
-
translationRequestInfos?: LearningGenaiRootTranslationRequestInfo[];
|
|
287
|
-
/** Whether the text triggered the blocklist. */
|
|
288
|
-
triggeredBlocklist?: boolean;
|
|
289
|
-
/** Whether the text should be blocked by the recitation result from Aida recitation checker. It is determined from aida_recitation_result. */
|
|
290
|
-
triggeredRecitation?: boolean;
|
|
291
|
-
/** Whether the text triggered the safety filter. Currently, this is due to CSAI triggering or one of four categories (derogatory, sexual, toxic, violent) having a score over the filter threshold. */
|
|
292
|
-
triggeredSafetyFilter?: boolean;
|
|
293
|
-
}
|
|
294
|
-
interface CloudAiNlLlmProtoServiceRaiSignal {
|
|
295
|
-
/** The confidence level for the RAI category. */
|
|
296
|
-
confidence?: string;
|
|
297
|
-
/** Whether the category is flagged as being present. Currently, this is set to true if score >= 0.5. */
|
|
298
|
-
flagged?: boolean;
|
|
299
|
-
/** The influential terms that could potentially block the response. */
|
|
300
|
-
influentialTerms?: CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm[];
|
|
301
|
-
/** The RAI category. */
|
|
302
|
-
raiCategory?: string;
|
|
303
|
-
/** The score for the category, in the range [0.0, 1.0]. */
|
|
304
|
-
score?: number;
|
|
305
|
-
}
|
|
306
|
-
interface CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm {
|
|
307
|
-
/** The beginning offset of the influential term. */
|
|
308
|
-
beginOffset?: number;
|
|
309
|
-
/** The confidence score of the influential term. */
|
|
310
|
-
confidence?: number;
|
|
311
|
-
/** The source of the influential term, prompt or response. */
|
|
312
|
-
source?: string;
|
|
313
|
-
/** The influential term. */
|
|
314
|
-
term?: string;
|
|
315
|
-
}
|
|
316
|
-
interface CloudAiNlLlmProtoServiceSafetyRating {
|
|
317
|
-
/** Indicates whether the content was filtered out because of this rating. */
|
|
318
|
-
blocked?: boolean;
|
|
319
|
-
/** Harm category. */
|
|
320
|
-
category?: string;
|
|
321
|
-
/** The influential terms that could potentially block the response. */
|
|
322
|
-
influentialTerms?: CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm[];
|
|
323
|
-
/** Harm probability levels in the content. */
|
|
324
|
-
probability?: string;
|
|
325
|
-
/** Harm probability score. */
|
|
326
|
-
probabilityScore?: number;
|
|
327
|
-
/** Harm severity levels in the content. */
|
|
328
|
-
severity?: string;
|
|
329
|
-
/** Harm severity score. */
|
|
330
|
-
severityScore?: number;
|
|
331
|
-
}
|
|
332
|
-
interface CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm {
|
|
333
|
-
/** The beginning offset of the influential term. */
|
|
334
|
-
beginOffset?: number;
|
|
335
|
-
/** The confidence score of the influential term. */
|
|
336
|
-
confidence?: number;
|
|
337
|
-
/** The source of the influential term, prompt or response. */
|
|
338
|
-
source?: string;
|
|
339
|
-
/** The influential term. */
|
|
340
|
-
term?: string;
|
|
341
|
-
}
|
|
342
|
-
interface CloudAiNlLlmProtoServiceUsageMetadata {
|
|
343
|
-
/** Number of tokens in the response(s). */
|
|
344
|
-
candidatesTokenCount?: number;
|
|
345
|
-
/** Number of tokens in the request. */
|
|
346
|
-
promptTokenCount?: number;
|
|
347
|
-
totalTokenCount?: number;
|
|
348
|
-
}
|
|
349
111
|
interface GoogleApiHttpBody {
|
|
350
112
|
/** The HTTP Content-Type header value specifying the content type of the body. */
|
|
351
113
|
contentType?: string;
|
|
@@ -1322,8 +1084,14 @@ declare namespace gapi.client {
|
|
|
1322
1084
|
createTime?: string;
|
|
1323
1085
|
/** Required. The underlying DedicatedResources that the DeploymentResourcePool uses. */
|
|
1324
1086
|
dedicatedResources?: GoogleCloudAiplatformV1DedicatedResources;
|
|
1087
|
+
/** If the DeploymentResourcePool is deployed with custom-trained Models or AutoML Tabular Models, the container(s) of the DeploymentResourcePool will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. */
|
|
1088
|
+
disableContainerLogging?: boolean;
|
|
1089
|
+
/** Customer-managed encryption key spec for a DeploymentResourcePool. If set, this DeploymentResourcePool will be secured by this key. Endpoints and the DeploymentResourcePool they deploy in need to have the same EncryptionSpec. */
|
|
1090
|
+
encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec;
|
|
1325
1091
|
/** Immutable. The resource name of the DeploymentResourcePool. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */
|
|
1326
1092
|
name?: string;
|
|
1093
|
+
/** The service account that the DeploymentResourcePool's container(s) run as. Specify the email address of the service account. If this service account is not specified, the container(s) run as a service account that doesn't have access to the resource project. Users deploying the Models to this DeploymentResourcePool must have the `iam.serviceAccounts.actAs` permission on this service account. */
|
|
1094
|
+
serviceAccount?: string;
|
|
1327
1095
|
}
|
|
1328
1096
|
interface GoogleCloudAiplatformV1DeployModelOperationMetadata {
|
|
1329
1097
|
/** The operation generic information. */
|
|
@@ -2007,6 +1775,8 @@ declare namespace gapi.client {
|
|
|
2007
1775
|
stringArrayValue?: GoogleCloudAiplatformV1StringArray;
|
|
2008
1776
|
/** String feature value. */
|
|
2009
1777
|
stringValue?: string;
|
|
1778
|
+
/** A struct type feature value. */
|
|
1779
|
+
structValue?: GoogleCloudAiplatformV1StructValue;
|
|
2010
1780
|
}
|
|
2011
1781
|
interface GoogleCloudAiplatformV1FeatureValueDestination {
|
|
2012
1782
|
/** Output in BigQuery format. BigQueryDestination.output_uri in FeatureValueDestination.bigquery_destination must refer to a table. */
|
|
@@ -2172,6 +1942,12 @@ declare namespace gapi.client {
|
|
|
2172
1942
|
neighborCount?: number;
|
|
2173
1943
|
/** Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. This field is the maximum number of matches with the same crowding tag. */
|
|
2174
1944
|
perCrowdingAttributeNeighborCount?: number;
|
|
1945
|
+
/** Optional. Represents RRF algorithm that combines search results. */
|
|
1946
|
+
rrf?: GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF;
|
|
1947
|
+
}
|
|
1948
|
+
interface GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF {
|
|
1949
|
+
/** Required. Users can provide an alpha value to give more weight to dense vs sparse results. For example, if the alpha is 0, we only return sparse and if the alpha is 1, we only return dense. */
|
|
1950
|
+
alpha?: number;
|
|
2175
1951
|
}
|
|
2176
1952
|
interface GoogleCloudAiplatformV1FindNeighborsResponse {
|
|
2177
1953
|
/** The nearest neighbors of the query datapoints. */
|
|
@@ -2188,6 +1964,8 @@ declare namespace gapi.client {
|
|
|
2188
1964
|
datapoint?: GoogleCloudAiplatformV1IndexDatapoint;
|
|
2189
1965
|
/** The distance between the neighbor and the dense embedding query. */
|
|
2190
1966
|
distance?: number;
|
|
1967
|
+
/** The distance between the neighbor and the query sparse_embedding. */
|
|
1968
|
+
sparseDistance?: number;
|
|
2191
1969
|
}
|
|
2192
1970
|
interface GoogleCloudAiplatformV1FractionSplit {
|
|
2193
1971
|
/** The fraction of the input data that is to be used to evaluate the Model. */
|
|
@@ -2271,8 +2049,6 @@ declare namespace gapi.client {
|
|
|
2271
2049
|
presencePenalty?: number;
|
|
2272
2050
|
/** Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. */
|
|
2273
2051
|
responseMimeType?: string;
|
|
2274
|
-
/** Optional. Control Three levels of creativity in the model output. Default: RESPONSE_STYLE_BALANCED */
|
|
2275
|
-
responseStyle?: string;
|
|
2276
2052
|
/** Optional. Stop sequences. */
|
|
2277
2053
|
stopSequences?: string[];
|
|
2278
2054
|
/** Optional. Controls the randomness of predictions. */
|
|
@@ -2294,6 +2070,7 @@ declare namespace gapi.client {
|
|
|
2294
2070
|
/** Required. The public base model URI. */
|
|
2295
2071
|
baseModelUri?: string;
|
|
2296
2072
|
}
|
|
2073
|
+
interface GoogleCloudAiplatformV1GoogleSearchRetrieval {}
|
|
2297
2074
|
interface GoogleCloudAiplatformV1GroundingMetadata {
|
|
2298
2075
|
/** Optional. Google search entry for the following-up web searches. */
|
|
2299
2076
|
searchEntryPoint?: GoogleCloudAiplatformV1SearchEntryPoint;
|
|
@@ -2451,6 +2228,8 @@ declare namespace gapi.client {
|
|
|
2451
2228
|
numericRestricts?: GoogleCloudAiplatformV1IndexDatapointNumericRestriction[];
|
|
2452
2229
|
/** Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses categorical tokens. See: https://cloud.google.com/vertex-ai/docs/matching-engine/filtering */
|
|
2453
2230
|
restricts?: GoogleCloudAiplatformV1IndexDatapointRestriction[];
|
|
2231
|
+
/** Optional. Feature embedding vector for sparse index. */
|
|
2232
|
+
sparseEmbedding?: GoogleCloudAiplatformV1IndexDatapointSparseEmbedding;
|
|
2454
2233
|
}
|
|
2455
2234
|
interface GoogleCloudAiplatformV1IndexDatapointCrowdingTag {
|
|
2456
2235
|
/** The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. */
|
|
@@ -2476,6 +2255,12 @@ declare namespace gapi.client {
|
|
|
2476
2255
|
/** The namespace of this restriction. e.g.: color. */
|
|
2477
2256
|
namespace?: string;
|
|
2478
2257
|
}
|
|
2258
|
+
interface GoogleCloudAiplatformV1IndexDatapointSparseEmbedding {
|
|
2259
|
+
/** Required. The list of indexes for the embedding values of the sparse vector. */
|
|
2260
|
+
dimensions?: string[];
|
|
2261
|
+
/** Required. The list of embedding values of the sparse vector. */
|
|
2262
|
+
values?: number[];
|
|
2263
|
+
}
|
|
2479
2264
|
interface GoogleCloudAiplatformV1IndexEndpoint {
|
|
2480
2265
|
/** Output only. Timestamp when this IndexEndpoint was created. */
|
|
2481
2266
|
createTime?: string;
|
|
@@ -2517,6 +2302,8 @@ declare namespace gapi.client {
|
|
|
2517
2302
|
interface GoogleCloudAiplatformV1IndexStats {
|
|
2518
2303
|
/** Output only. The number of shards in the Index. */
|
|
2519
2304
|
shardsCount?: number;
|
|
2305
|
+
/** Output only. The number of sparse vectors in the Index. */
|
|
2306
|
+
sparseVectorsCount?: string;
|
|
2520
2307
|
/** Output only. The number of dense vectors in the Index. */
|
|
2521
2308
|
vectorsCount?: string;
|
|
2522
2309
|
}
|
|
@@ -2903,6 +2690,8 @@ declare namespace gapi.client {
|
|
|
2903
2690
|
interface GoogleCloudAiplatformV1MetadataStore {
|
|
2904
2691
|
/** Output only. Timestamp when this MetadataStore was created. */
|
|
2905
2692
|
createTime?: string;
|
|
2693
|
+
/** Optional. Dataplex integration settings. */
|
|
2694
|
+
dataplexConfig?: GoogleCloudAiplatformV1MetadataStoreDataplexConfig;
|
|
2906
2695
|
/** Description of the MetadataStore. */
|
|
2907
2696
|
description?: string;
|
|
2908
2697
|
/** Customer-managed encryption key spec for a Metadata Store. If set, this Metadata Store and all sub-resources of this Metadata Store are secured using this key. */
|
|
@@ -2914,6 +2703,10 @@ declare namespace gapi.client {
|
|
|
2914
2703
|
/** Output only. Timestamp when this MetadataStore was last updated. */
|
|
2915
2704
|
updateTime?: string;
|
|
2916
2705
|
}
|
|
2706
|
+
interface GoogleCloudAiplatformV1MetadataStoreDataplexConfig {
|
|
2707
|
+
/** Optional. Whether or not Data Lineage synchronization is enabled for Vertex Pipelines. */
|
|
2708
|
+
enabledPipelinesLineage?: boolean;
|
|
2709
|
+
}
|
|
2917
2710
|
interface GoogleCloudAiplatformV1MetadataStoreMetadataStoreState {
|
|
2918
2711
|
/** The disk utilization of the MetadataStore in bytes. */
|
|
2919
2712
|
diskUtilizationBytes?: string;
|
|
@@ -3628,14 +3421,6 @@ declare namespace gapi.client {
|
|
|
3628
3421
|
/** Required. Duration is accurate to the second. In Notebook, Idle Timeout is accurate to minute so the range of idle_timeout (second) is: 10 * 60 ~ 1440 * 60. */
|
|
3629
3422
|
idleTimeout?: string;
|
|
3630
3423
|
}
|
|
3631
|
-
interface GoogleCloudAiplatformV1NotebookReservationAffinity {
|
|
3632
|
-
/** Required. Specifies the type of reservation from which this instance can consume resources: RESERVATION_ANY (default), RESERVATION_SPECIFIC, or RESERVATION_NONE. See Consuming reserved instances for examples. */
|
|
3633
|
-
consumeReservationType?: string;
|
|
3634
|
-
/** Optional. Corresponds to the label key of a reservation resource. To target a RESERVATION_SPECIFIC by name, use compute.googleapis.com/reservation-name as the key and specify the name of your reservation as its value. */
|
|
3635
|
-
key?: string;
|
|
3636
|
-
/** Optional. Corresponds to the label values of a reservation resource. This must be the full path name of Reservation. */
|
|
3637
|
-
values?: string[];
|
|
3638
|
-
}
|
|
3639
3424
|
interface GoogleCloudAiplatformV1NotebookRuntime {
|
|
3640
3425
|
/** Output only. Timestamp when this NotebookRuntime was created. */
|
|
3641
3426
|
createTime?: string;
|
|
@@ -3647,6 +3432,8 @@ declare namespace gapi.client {
|
|
|
3647
3432
|
expirationTime?: string;
|
|
3648
3433
|
/** Output only. The health state of the NotebookRuntime. */
|
|
3649
3434
|
healthState?: string;
|
|
3435
|
+
/** Output only. The idle shutdown configuration of the notebook runtime. */
|
|
3436
|
+
idleShutdownConfig?: GoogleCloudAiplatformV1NotebookIdleShutdownConfig;
|
|
3650
3437
|
/** Output only. Whether NotebookRuntime is upgradable. */
|
|
3651
3438
|
isUpgradable?: boolean;
|
|
3652
3439
|
/** The labels with user-defined metadata to organize your NotebookRuntime. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one NotebookRuntime (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for NotebookRuntime: * "aiplatform.googleapis.com/notebook_runtime_gce_instance_id": output only, its value is the Compute Engine instance id. * "aiplatform.googleapis.com/colab_enterprise_entry_service": its value is either "bigquery" or "vertex"; if absent, it should be "vertex". This is to describe the entry service, either BigQuery or Vertex. */
|
|
@@ -3661,8 +3448,6 @@ declare namespace gapi.client {
|
|
|
3661
3448
|
notebookRuntimeType?: string;
|
|
3662
3449
|
/** Output only. The proxy endpoint used to access the NotebookRuntime. */
|
|
3663
3450
|
proxyUri?: string;
|
|
3664
|
-
/** Output only. Reservation Affinity of the notebook runtime. */
|
|
3665
|
-
reservationAffinity?: GoogleCloudAiplatformV1NotebookReservationAffinity;
|
|
3666
3451
|
/** Output only. The runtime (instance) state of the NotebookRuntime. */
|
|
3667
3452
|
runtimeState?: string;
|
|
3668
3453
|
/** Required. The user email of the NotebookRuntime. */
|
|
@@ -3687,6 +3472,8 @@ declare namespace gapi.client {
|
|
|
3687
3472
|
description?: string;
|
|
3688
3473
|
/** Required. The display name of the NotebookRuntimeTemplate. The name can be up to 128 characters long and can consist of any UTF-8 characters. */
|
|
3689
3474
|
displayName?: string;
|
|
3475
|
+
/** Customer-managed encryption key spec for the notebook runtime. */
|
|
3476
|
+
encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec;
|
|
3690
3477
|
/** Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */
|
|
3691
3478
|
etag?: string;
|
|
3692
3479
|
/** EUC configuration of the NotebookRuntimeTemplate. */
|
|
@@ -3707,8 +3494,6 @@ declare namespace gapi.client {
|
|
|
3707
3494
|
networkTags?: string[];
|
|
3708
3495
|
/** Optional. Immutable. The type of the notebook runtime template. */
|
|
3709
3496
|
notebookRuntimeType?: string;
|
|
3710
|
-
/** Optional. Reservation Affinity of the notebook runtime template. */
|
|
3711
|
-
reservationAffinity?: GoogleCloudAiplatformV1NotebookReservationAffinity;
|
|
3712
3497
|
/** The service account that the runtime workload runs as. You can use any service account within the same project, but you must have the service account user permission to use the instance. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used. */
|
|
3713
3498
|
serviceAccount?: string;
|
|
3714
3499
|
/** Optional. Immutable. Runtime Shielded VM spec. */
|
|
@@ -3762,7 +3547,7 @@ declare namespace gapi.client {
|
|
|
3762
3547
|
/** Required. The spec of the pools of different resources. */
|
|
3763
3548
|
resourcePools?: GoogleCloudAiplatformV1ResourcePool[];
|
|
3764
3549
|
/** Output only. Runtime information of the Persistent Resource. */
|
|
3765
|
-
resourceRuntime?:
|
|
3550
|
+
resourceRuntime?: GoogleCloudAiplatformV1ResourceRuntime;
|
|
3766
3551
|
/** Optional. Persistent Resource runtime spec. For example, used for Ray cluster configuration. */
|
|
3767
3552
|
resourceRuntimeSpec?: GoogleCloudAiplatformV1ResourceRuntimeSpec;
|
|
3768
3553
|
/** Output only. Time when the PersistentResource for the first time entered the `RUNNING` state. */
|
|
@@ -3975,7 +3760,7 @@ declare namespace gapi.client {
|
|
|
3975
3760
|
projectAllowlist?: string[];
|
|
3976
3761
|
}
|
|
3977
3762
|
interface GoogleCloudAiplatformV1Probe {
|
|
3978
|
-
/**
|
|
3763
|
+
/** ExecAction probes the health of a container by executing a command. */
|
|
3979
3764
|
exec?: GoogleCloudAiplatformV1ProbeExecAction;
|
|
3980
3765
|
/** How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Must be less than timeout_seconds. Maps to Kubernetes probe argument 'periodSeconds'. */
|
|
3981
3766
|
periodSeconds?: number;
|
|
@@ -4190,7 +3975,20 @@ declare namespace gapi.client {
|
|
|
4190
3975
|
/** The prediction input. Supports HTTP headers and arbitrary data payload. A DeployedModel may have an upper limit on the number of instances it supports per request. When this limit it is exceeded for an AutoML model, the RawPredict method returns an error. When this limit is exceeded for a custom-trained model, the behavior varies depending on the model. You can specify the schema for each instance in the predict_schemata.instance_schema_uri field when you create a Model. This schema applies when you deploy the `Model` as a `DeployedModel` to an Endpoint and use the `RawPredict` method. */
|
|
4191
3976
|
httpBody?: GoogleApiHttpBody;
|
|
4192
3977
|
}
|
|
4193
|
-
interface
|
|
3978
|
+
interface GoogleCloudAiplatformV1RayMetricSpec {
|
|
3979
|
+
/** Optional. Flag to disable the Ray metrics collection. */
|
|
3980
|
+
disabled?: boolean;
|
|
3981
|
+
}
|
|
3982
|
+
interface GoogleCloudAiplatformV1RaySpec {
|
|
3983
|
+
/** Optional. This will be used to indicate which resource pool will serve as the Ray head node(the first node within that pool). Will use the machine from the first workerpool as the head node by default if this field isn't set. */
|
|
3984
|
+
headNodeResourcePoolId?: string;
|
|
3985
|
+
/** Optional. Default image for user to choose a preferred ML framework (for example, TensorFlow or Pytorch) by choosing from [Vertex prebuilt images](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). Either this or the resource_pool_images is required. Use this field if you need all the resource pools to have the same Ray image. Otherwise, use the {@code resource_pool_images} field. */
|
|
3986
|
+
imageUri?: string;
|
|
3987
|
+
/** Optional. Ray metrics configurations. */
|
|
3988
|
+
rayMetricSpec?: GoogleCloudAiplatformV1RayMetricSpec;
|
|
3989
|
+
/** Optional. Required if image_uri isn't set. A map of resource_pool_id to prebuild Ray image if user need to use different images for different head/worker pools. This map needs to cover all the resource pool ids. Example: { "ray_head_node_pool": "head image" "ray_worker_node_pool1": "worker image" "ray_worker_node_pool2": "another worker image" } */
|
|
3990
|
+
resourcePoolImages?: {[P in string]: string};
|
|
3991
|
+
}
|
|
4194
3992
|
interface GoogleCloudAiplatformV1ReadFeatureValuesRequest {
|
|
4195
3993
|
/** Required. ID for a specific entity. For example, for a machine learning model predicting user clicks on a website, an entity ID could be `user_123`. */
|
|
4196
3994
|
entityId?: string;
|
|
@@ -4300,10 +4098,13 @@ declare namespace gapi.client {
|
|
|
4300
4098
|
/** Optional. min replicas in the node pool, must be ≤ replica_count and < max_replica_count or will throw error */
|
|
4301
4099
|
minReplicaCount?: string;
|
|
4302
4100
|
}
|
|
4303
|
-
interface GoogleCloudAiplatformV1ResourceRuntime {
|
|
4101
|
+
interface GoogleCloudAiplatformV1ResourceRuntime {
|
|
4102
|
+
/** Output only. URIs for user to connect to the Cluster. Example: { "RAY_HEAD_NODE_INTERNAL_IP": "head-node-IP:10001" "RAY_DASHBOARD_URI": "ray-dashboard-address:8888" } */
|
|
4103
|
+
accessUris?: {[P in string]: string};
|
|
4104
|
+
}
|
|
4304
4105
|
interface GoogleCloudAiplatformV1ResourceRuntimeSpec {
|
|
4305
4106
|
/** Optional. Ray cluster configuration. Required when creating a dedicated RayCluster on the PersistentResource. */
|
|
4306
|
-
raySpec?:
|
|
4107
|
+
raySpec?: GoogleCloudAiplatformV1RaySpec;
|
|
4307
4108
|
/** Optional. Configure the use of workload identity on the PersistentResource */
|
|
4308
4109
|
serviceAccountSpec?: GoogleCloudAiplatformV1ServiceAccountSpec;
|
|
4309
4110
|
}
|
|
@@ -5161,6 +4962,8 @@ declare namespace gapi.client {
|
|
|
5161
4962
|
gcsUri?: string;
|
|
5162
4963
|
/** Grounding checking configuration. */
|
|
5163
4964
|
groundingConfig?: GoogleCloudAiplatformV1SchemaPredictParamsGroundingConfig;
|
|
4965
|
+
/** Whether the prompt dataset has prompt variable. */
|
|
4966
|
+
hasPromptVariable?: boolean;
|
|
5164
4967
|
/** Value of the maximum number of tokens generated set when the dataset was saved. */
|
|
5165
4968
|
maxOutputTokens?: string;
|
|
5166
4969
|
/** User-created prompt note. Note size limit is 2KB. */
|
|
@@ -5940,6 +5743,16 @@ declare namespace gapi.client {
|
|
|
5940
5743
|
/** A list of string values. */
|
|
5941
5744
|
values?: string[];
|
|
5942
5745
|
}
|
|
5746
|
+
interface GoogleCloudAiplatformV1StructFieldValue {
|
|
5747
|
+
/** Name of the field in the struct feature. */
|
|
5748
|
+
name?: string;
|
|
5749
|
+
/** The value for this field. */
|
|
5750
|
+
value?: GoogleCloudAiplatformV1FeatureValue;
|
|
5751
|
+
}
|
|
5752
|
+
interface GoogleCloudAiplatformV1StructValue {
|
|
5753
|
+
/** A list of field values. */
|
|
5754
|
+
values?: GoogleCloudAiplatformV1StructFieldValue[];
|
|
5755
|
+
}
|
|
5943
5756
|
interface GoogleCloudAiplatformV1Study {
|
|
5944
5757
|
/** Output only. Time at which the study was created. */
|
|
5945
5758
|
createTime?: string;
|
|
@@ -6368,6 +6181,8 @@ declare namespace gapi.client {
|
|
|
6368
6181
|
interface GoogleCloudAiplatformV1Tool {
|
|
6369
6182
|
/** Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. */
|
|
6370
6183
|
functionDeclarations?: GoogleCloudAiplatformV1FunctionDeclaration[];
|
|
6184
|
+
/** Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that is powered by Google search. */
|
|
6185
|
+
googleSearchRetrieval?: any;
|
|
6371
6186
|
/** Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. */
|
|
6372
6187
|
retrieval?: GoogleCloudAiplatformV1Retrieval;
|
|
6373
6188
|
}
|
|
@@ -6796,671 +6611,6 @@ declare namespace gapi.client {
|
|
|
6796
6611
|
/** The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar. */
|
|
6797
6612
|
units?: string;
|
|
6798
6613
|
}
|
|
6799
|
-
interface IntelligenceCloudAutomlXpsMetricEntry {
|
|
6800
|
-
/** For billing metrics that are using legacy sku's, set the legacy billing metric id here. This will be sent to Chemist as the "cloudbilling.googleapis.com/argentum_metric_id" label. Otherwise leave empty. */
|
|
6801
|
-
argentumMetricId?: string;
|
|
6802
|
-
/** A double value. */
|
|
6803
|
-
doubleValue?: number;
|
|
6804
|
-
/** A signed 64-bit integer value. */
|
|
6805
|
-
int64Value?: string;
|
|
6806
|
-
/** The metric name defined in the service configuration. */
|
|
6807
|
-
metricName?: string;
|
|
6808
|
-
/** Billing system labels for this (metric, value) pair. */
|
|
6809
|
-
systemLabels?: IntelligenceCloudAutomlXpsMetricEntryLabel[];
|
|
6810
|
-
}
|
|
6811
|
-
interface IntelligenceCloudAutomlXpsMetricEntryLabel {
|
|
6812
|
-
/** The name of the label. */
|
|
6813
|
-
labelName?: string;
|
|
6814
|
-
/** The value of the label. */
|
|
6815
|
-
labelValue?: string;
|
|
6816
|
-
}
|
|
6817
|
-
interface IntelligenceCloudAutomlXpsReportingMetrics {
|
|
6818
|
-
/** The effective time training used. If set, this is used for quota management and billing. Deprecated. AutoML BE doesn't use this. Don't set. */
|
|
6819
|
-
effectiveTrainingDuration?: string;
|
|
6820
|
-
/** One entry per metric name. The values must be aggregated per metric name. */
|
|
6821
|
-
metricEntries?: IntelligenceCloudAutomlXpsMetricEntry[];
|
|
6822
|
-
}
|
|
6823
|
-
interface LanguageLabsAidaTrustRecitationProtoDocAttribution {
|
|
6824
|
-
amarnaId?: string;
|
|
6825
|
-
arxivId?: string;
|
|
6826
|
-
author?: string;
|
|
6827
|
-
bibkey?: string;
|
|
6828
|
-
/** ID of the paper in bioarxiv like ddoi.org/{biorxiv_id} eg: https://doi.org/10.1101/343517 */
|
|
6829
|
-
biorxivId?: string;
|
|
6830
|
-
bookTitle?: string;
|
|
6831
|
-
/** The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set. */
|
|
6832
|
-
bookVolumeId?: string;
|
|
6833
|
-
category?: string;
|
|
6834
|
-
conversationId?: string;
|
|
6835
|
-
/** The dataset this document comes from. */
|
|
6836
|
-
dataset?: string;
|
|
6837
|
-
filepath?: string;
|
|
6838
|
-
geminiId?: string;
|
|
6839
|
-
gnewsArticleTitle?: string;
|
|
6840
|
-
goodallExampleId?: string;
|
|
6841
|
-
/** Whether the document is opted out. */
|
|
6842
|
-
isOptOut?: boolean;
|
|
6843
|
-
isPrompt?: boolean;
|
|
6844
|
-
lamdaExampleId?: string;
|
|
6845
|
-
license?: string;
|
|
6846
|
-
meenaConversationId?: string;
|
|
6847
|
-
/** Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii. */
|
|
6848
|
-
naturalLanguageCode?: string;
|
|
6849
|
-
/** True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available. */
|
|
6850
|
-
noAttribution?: boolean;
|
|
6851
|
-
podcastUtteranceId?: string;
|
|
6852
|
-
publicationDate?: GoogleTypeDate;
|
|
6853
|
-
/** This field is for opt-out experiment only, MUST never be used during actual production/serving. */
|
|
6854
|
-
qualityScoreExperimentOnly?: number;
|
|
6855
|
-
/** Github repository */
|
|
6856
|
-
repo?: string;
|
|
6857
|
-
/** URL of a webdoc */
|
|
6858
|
-
url?: string;
|
|
6859
|
-
volumeId?: string;
|
|
6860
|
-
/** Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset. */
|
|
6861
|
-
wikipediaArticleTitle?: string;
|
|
6862
|
-
/** The unique video id from Youtube. Example: AkoGsW52Ir0 */
|
|
6863
|
-
youtubeVideoId?: string;
|
|
6864
|
-
}
|
|
6865
|
-
interface LanguageLabsAidaTrustRecitationProtoRecitationResult {
|
|
6866
|
-
dynamicSegmentResults?: LanguageLabsAidaTrustRecitationProtoSegmentResult[];
|
|
6867
|
-
/** The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. When the given input is not found in any source, the recitation action will not be specified. */
|
|
6868
|
-
recitationAction?: string;
|
|
6869
|
-
trainingSegmentResults?: LanguageLabsAidaTrustRecitationProtoSegmentResult[];
|
|
6870
|
-
}
|
|
6871
|
-
interface LanguageLabsAidaTrustRecitationProtoSegmentResult {
|
|
6872
|
-
/** The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly. */
|
|
6873
|
-
attributionDataset?: string;
|
|
6874
|
-
/** human-friendly string that contains information from doc_attribution which could be shown by clients */
|
|
6875
|
-
displayAttributionMessage?: string;
|
|
6876
|
-
docAttribution?: LanguageLabsAidaTrustRecitationProtoDocAttribution;
|
|
6877
|
-
/** number of documents that contained this segment */
|
|
6878
|
-
docOccurrences?: number;
|
|
6879
|
-
endIndex?: number;
|
|
6880
|
-
/** The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options. */
|
|
6881
|
-
rawText?: string;
|
|
6882
|
-
segmentRecitationAction?: string;
|
|
6883
|
-
/** The category of the source dataset where the segment came from. This is more stable than Dataset. */
|
|
6884
|
-
sourceCategory?: string;
|
|
6885
|
-
/** The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units. */
|
|
6886
|
-
startIndex?: number;
|
|
6887
|
-
}
|
|
6888
|
-
interface LanguageLabsAidaTrustRecitationProtoStreamRecitationResult {
|
|
6889
|
-
/** The recitation result against the given dynamic data source. */
|
|
6890
|
-
dynamicSegmentResults?: LanguageLabsAidaTrustRecitationProtoSegmentResult[];
|
|
6891
|
-
/** Last index of input text fully checked for recitation in the entire streaming context. Would return `-1` if no Input was checked for recitation. */
|
|
6892
|
-
fullyCheckedTextIndex?: number;
|
|
6893
|
-
/** The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. */
|
|
6894
|
-
recitationAction?: string;
|
|
6895
|
-
/** The recitation result against model training data. */
|
|
6896
|
-
trainingSegmentResults?: LanguageLabsAidaTrustRecitationProtoSegmentResult[];
|
|
6897
|
-
}
|
|
6898
|
-
interface LearningGenaiRecitationContentChunkRecitationCheckResult {
|
|
6899
|
-
imageResult?: LearningGenaiRecitationImageRecitationCheckResult;
|
|
6900
|
-
textResult?: LearningGenaiRecitationRecitationResult;
|
|
6901
|
-
}
|
|
6902
|
-
interface LearningGenaiRecitationDocAttribution {
|
|
6903
|
-
amarnaId?: string;
|
|
6904
|
-
arxivId?: string;
|
|
6905
|
-
author?: string;
|
|
6906
|
-
bibkey?: string;
|
|
6907
|
-
/** ID of the paper in bioarxiv like ddoi.org/{biorxiv_id} eg: https://doi.org/10.1101/343517 */
|
|
6908
|
-
biorxivId?: string;
|
|
6909
|
-
bookTitle?: string;
|
|
6910
|
-
/** The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set. */
|
|
6911
|
-
bookVolumeId?: string;
|
|
6912
|
-
conversationId?: string;
|
|
6913
|
-
/** The dataset this document comes from. */
|
|
6914
|
-
dataset?: string;
|
|
6915
|
-
filepath?: string;
|
|
6916
|
-
geminiId?: string;
|
|
6917
|
-
gnewsArticleTitle?: string;
|
|
6918
|
-
goodallExampleId?: string;
|
|
6919
|
-
/** Whether the document is opted out. */
|
|
6920
|
-
isOptOut?: boolean;
|
|
6921
|
-
/** When true, this attribution came from the user's prompt. */
|
|
6922
|
-
isPrompt?: boolean;
|
|
6923
|
-
lamdaExampleId?: string;
|
|
6924
|
-
license?: string;
|
|
6925
|
-
meenaConversationId?: string;
|
|
6926
|
-
/** Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii. */
|
|
6927
|
-
naturalLanguageCode?: string;
|
|
6928
|
-
/** True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available. */
|
|
6929
|
-
noAttribution?: boolean;
|
|
6930
|
-
podcastUtteranceId?: string;
|
|
6931
|
-
publicationDate?: GoogleTypeDate;
|
|
6932
|
-
/** This field is for opt-out experiment only, MUST never be used during actual production/serving. */
|
|
6933
|
-
qualityScoreExperimentOnly?: number;
|
|
6934
|
-
/** Github repository */
|
|
6935
|
-
repo?: string;
|
|
6936
|
-
/** URL of a webdoc */
|
|
6937
|
-
url?: string;
|
|
6938
|
-
volumeId?: string;
|
|
6939
|
-
/** Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset. */
|
|
6940
|
-
wikipediaArticleTitle?: string;
|
|
6941
|
-
youtubeVideoId?: string;
|
|
6942
|
-
}
|
|
6943
|
-
interface LearningGenaiRecitationImageDocAttribution {
|
|
6944
|
-
/** Unique ID of the image. */
|
|
6945
|
-
datasetName?: string;
|
|
6946
|
-
/** Doc ID to identify the image. These could be urls of images or amarna id. */
|
|
6947
|
-
stringDocids?: string;
|
|
6948
|
-
}
|
|
6949
|
-
interface LearningGenaiRecitationImageRecitationCheckResult {
|
|
6950
|
-
/** Only has NO_ACTION or BLOCK to start with. */
|
|
6951
|
-
recitationAction?: string;
|
|
6952
|
-
/** Images that are similar to the requested image. */
|
|
6953
|
-
recitedImages?: LearningGenaiRecitationImageRecitationCheckResultSimilarImage[];
|
|
6954
|
-
}
|
|
6955
|
-
interface LearningGenaiRecitationImageRecitationCheckResultSimilarImage {
|
|
6956
|
-
/** Attribution information about the image */
|
|
6957
|
-
docAttribution?: LearningGenaiRecitationImageDocAttribution;
|
|
6958
|
-
/** The memorization embedding model that returned this image */
|
|
6959
|
-
embeddingModel?: string;
|
|
6960
|
-
/** Image ID corresponding of the image corresponding to the score. `image_id` serves for debugging purposes and can't be used by clients to retrieve an image. */
|
|
6961
|
-
imageId?: string;
|
|
6962
|
-
/** Similarity score of requested image compared with image in training data. */
|
|
6963
|
-
scores?: number;
|
|
6964
|
-
}
|
|
6965
|
-
interface LearningGenaiRecitationMMRecitationCheckResult {
|
|
6966
|
-
chunkResults?: LearningGenaiRecitationContentChunkRecitationCheckResult[];
|
|
6967
|
-
/** Overall recommended recitation action for the content. */
|
|
6968
|
-
recitationAction?: string;
|
|
6969
|
-
}
|
|
6970
|
-
interface LearningGenaiRecitationRecitationResult {
|
|
6971
|
-
dynamicSegmentResults?: LearningGenaiRecitationSegmentResult[];
|
|
6972
|
-
/** The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. When the given input is not found in any source, the recitation action will be NO_ACTION. */
|
|
6973
|
-
recitationAction?: string;
|
|
6974
|
-
trainingSegmentResults?: LearningGenaiRecitationSegmentResult[];
|
|
6975
|
-
}
|
|
6976
|
-
interface LearningGenaiRecitationSegmentResult {
|
|
6977
|
-
/** The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly. */
|
|
6978
|
-
attributionDataset?: string;
|
|
6979
|
-
/** human-friendly string that contains information from doc_attribution which could be shown by clients */
|
|
6980
|
-
displayAttributionMessage?: string;
|
|
6981
|
-
docAttribution?: LearningGenaiRecitationDocAttribution;
|
|
6982
|
-
/** number of documents that contained this segment */
|
|
6983
|
-
docOccurrences?: number;
|
|
6984
|
-
endIndex?: number;
|
|
6985
|
-
/** The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options. */
|
|
6986
|
-
rawText?: string;
|
|
6987
|
-
segmentRecitationAction?: string;
|
|
6988
|
-
/** The category of the source dataset where the segment came from. This is more stable than Dataset. */
|
|
6989
|
-
sourceCategory?: string;
|
|
6990
|
-
/** The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units. */
|
|
6991
|
-
startIndex?: number;
|
|
6992
|
-
}
|
|
6993
|
-
interface LearningGenaiRootCalculationType {
|
|
6994
|
-
scoreType?: string;
|
|
6995
|
-
weights?: number;
|
|
6996
|
-
}
|
|
6997
|
-
interface LearningGenaiRootClassifierOutput {
|
|
6998
|
-
/** If set, this is the output of the first matching rule. */
|
|
6999
|
-
ruleOutput?: LearningGenaiRootRuleOutput;
|
|
7000
|
-
/** outputs of all matching rule. */
|
|
7001
|
-
ruleOutputs?: LearningGenaiRootRuleOutput[];
|
|
7002
|
-
/** The results of data_providers and metrics. */
|
|
7003
|
-
state?: LearningGenaiRootClassifierState;
|
|
7004
|
-
}
|
|
7005
|
-
interface LearningGenaiRootClassifierOutputSummary {
|
|
7006
|
-
metrics?: LearningGenaiRootMetricOutput[];
|
|
7007
|
-
/** Output of the first matching rule. */
|
|
7008
|
-
ruleOutput?: LearningGenaiRootRuleOutput;
|
|
7009
|
-
/** outputs of all matching rule. */
|
|
7010
|
-
ruleOutputs?: LearningGenaiRootRuleOutput[];
|
|
7011
|
-
}
|
|
7012
|
-
interface LearningGenaiRootClassifierState {
|
|
7013
|
-
dataProviderOutput?: LearningGenaiRootDataProviderOutput[];
|
|
7014
|
-
metricOutput?: LearningGenaiRootMetricOutput[];
|
|
7015
|
-
}
|
|
7016
|
-
interface LearningGenaiRootCodeyChatMetadata {
|
|
7017
|
-
/** Indicates the programming language of the code if the message is a code chunk. */
|
|
7018
|
-
codeLanguage?: string;
|
|
7019
|
-
}
|
|
7020
|
-
interface LearningGenaiRootCodeyCheckpoint {
|
|
7021
|
-
/** Metadata that describes what was truncated at this checkpoint. */
|
|
7022
|
-
codeyTruncatorMetadata?: LearningGenaiRootCodeyTruncatorMetadata;
|
|
7023
|
-
/** Current state of the sample after truncator. */
|
|
7024
|
-
currentSample?: string;
|
|
7025
|
-
/** Postprocessor run that yielded this checkpoint. */
|
|
7026
|
-
postInferenceStep?: string;
|
|
7027
|
-
}
|
|
7028
|
-
interface LearningGenaiRootCodeyCompletionMetadata {
|
|
7029
|
-
checkpoints?: LearningGenaiRootCodeyCheckpoint[];
|
|
7030
|
-
}
|
|
7031
|
-
interface LearningGenaiRootCodeyGenerationMetadata {
|
|
7032
|
-
/** Last state of the sample before getting dropped/returned. */
|
|
7033
|
-
output?: string;
|
|
7034
|
-
/** Last Codey postprocessing step for this sample before getting dropped/returned. */
|
|
7035
|
-
postInferenceStep?: string;
|
|
7036
|
-
}
|
|
7037
|
-
interface LearningGenaiRootCodeyOutput {
|
|
7038
|
-
codeyChatMetadata?: LearningGenaiRootCodeyChatMetadata;
|
|
7039
|
-
codeyCompletionMetadata?: LearningGenaiRootCodeyCompletionMetadata;
|
|
7040
|
-
codeyGenerationMetadata?: LearningGenaiRootCodeyGenerationMetadata;
|
|
7041
|
-
}
|
|
7042
|
-
interface LearningGenaiRootCodeyTruncatorMetadata {
|
|
7043
|
-
/** Index of the current sample that trims off truncated text. */
|
|
7044
|
-
cutoffIndex?: number;
|
|
7045
|
-
/** Text that was truncated at a specific checkpoint. */
|
|
7046
|
-
truncatedText?: string;
|
|
7047
|
-
}
|
|
7048
|
-
interface LearningGenaiRootControlDecodingConfigThreshold {
|
|
7049
|
-
policy?: string;
|
|
7050
|
-
scoreMax?: number;
|
|
7051
|
-
}
|
|
7052
|
-
interface LearningGenaiRootControlDecodingRecord {
|
|
7053
|
-
/** Prefixes feeded into scorer. */
|
|
7054
|
-
prefixes?: string;
|
|
7055
|
-
/** Per policy scores returned from Scorer. Expect to have the same number of scores as in `thresholds`. */
|
|
7056
|
-
scores?: LearningGenaiRootControlDecodingRecordPolicyScore[];
|
|
7057
|
-
/** Suffixes feeded into scorer. */
|
|
7058
|
-
suffiexes?: string;
|
|
7059
|
-
/** Per policy thresholds from user config. */
|
|
7060
|
-
thresholds?: LearningGenaiRootControlDecodingConfigThreshold[];
|
|
7061
|
-
}
|
|
7062
|
-
interface LearningGenaiRootControlDecodingRecordPolicyScore {
|
|
7063
|
-
policy?: string;
|
|
7064
|
-
score?: number;
|
|
7065
|
-
}
|
|
7066
|
-
interface LearningGenaiRootControlDecodingRecords {
|
|
7067
|
-
/** One ControlDecodingRecord record maps to one rewind. */
|
|
7068
|
-
records?: LearningGenaiRootControlDecodingRecord[];
|
|
7069
|
-
}
|
|
7070
|
-
interface LearningGenaiRootDataProviderOutput {
|
|
7071
|
-
name?: string;
|
|
7072
|
-
/** If set, this DataProvider failed and this is the error message. */
|
|
7073
|
-
status?: UtilStatusProto;
|
|
7074
|
-
}
|
|
7075
|
-
interface LearningGenaiRootFilterMetadata {
|
|
7076
|
-
/** Filter confidence. */
|
|
7077
|
-
confidence?: string;
|
|
7078
|
-
/** Debug info for the message. */
|
|
7079
|
-
debugInfo?: LearningGenaiRootFilterMetadataFilterDebugInfo;
|
|
7080
|
-
/** A fallback message chosen by the applied filter. */
|
|
7081
|
-
fallback?: string;
|
|
7082
|
-
/** Additional info for the filter. */
|
|
7083
|
-
info?: string;
|
|
7084
|
-
/** Name of the filter that triggered. */
|
|
7085
|
-
name?: string;
|
|
7086
|
-
/** Filter reason. */
|
|
7087
|
-
reason?: string;
|
|
7088
|
-
/** The input query or generated response that is getting filtered. */
|
|
7089
|
-
text?: string;
|
|
7090
|
-
}
|
|
7091
|
-
interface LearningGenaiRootFilterMetadataFilterDebugInfo {
|
|
7092
|
-
classifierOutput?: LearningGenaiRootClassifierOutput;
|
|
7093
|
-
defaultMetadata?: string;
|
|
7094
|
-
languageFilterResult?: LearningGenaiRootLanguageFilterResult;
|
|
7095
|
-
/** Safety filter output information for LLM Root RAI harm check. */
|
|
7096
|
-
raiOutput?: LearningGenaiRootRAIOutput;
|
|
7097
|
-
raiResult?: CloudAiNlLlmProtoServiceRaiResult;
|
|
7098
|
-
raiSignal?: CloudAiNlLlmProtoServiceRaiSignal;
|
|
7099
|
-
/** Number of rewinds by controlled decoding. */
|
|
7100
|
-
records?: LearningGenaiRootControlDecodingRecords;
|
|
7101
|
-
streamRecitationResult?: LanguageLabsAidaTrustRecitationProtoStreamRecitationResult;
|
|
7102
|
-
takedownResult?: LearningGenaiRootTakedownResult;
|
|
7103
|
-
toxicityResult?: LearningGenaiRootToxicityResult;
|
|
7104
|
-
}
|
|
7105
|
-
interface LearningGenaiRootGroundingMetadata {
|
|
7106
|
-
citations?: LearningGenaiRootGroundingMetadataCitation[];
|
|
7107
|
-
/** True if grounding is cancelled, for example, no facts being retrieved. */
|
|
7108
|
-
groundingCancelled?: boolean;
|
|
7109
|
-
searchQueries?: string[];
|
|
7110
|
-
}
|
|
7111
|
-
interface LearningGenaiRootGroundingMetadataCitation {
|
|
7112
|
-
/** Index in the prediction output where the citation ends (exclusive). Must be > start_index and <= len(output). */
|
|
7113
|
-
endIndex?: number;
|
|
7114
|
-
/** Index of the fact supporting this claim. Should be within the range of the `world_facts` in the GenerateResponse. */
|
|
7115
|
-
factIndex?: number;
|
|
7116
|
-
/** Confidence score of this entailment. Value is [0,1] with 1 is the most confidence. */
|
|
7117
|
-
score?: number;
|
|
7118
|
-
/** Index in the prediction output where the citation starts (inclusive). Must be >= 0 and < end_index. */
|
|
7119
|
-
startIndex?: number;
|
|
7120
|
-
}
|
|
7121
|
-
interface LearningGenaiRootHarm {
|
|
7122
|
-
/** Please do not use, this is still under development. */
|
|
7123
|
-
contextualDangerous?: boolean;
|
|
7124
|
-
csam?: boolean;
|
|
7125
|
-
fringe?: boolean;
|
|
7126
|
-
grailImageHarmType?: LearningGenaiRootHarmGrailImageHarmType;
|
|
7127
|
-
grailTextHarmType?: LearningGenaiRootHarmGrailTextHarmType;
|
|
7128
|
-
imageChild?: boolean;
|
|
7129
|
-
imageCsam?: boolean;
|
|
7130
|
-
imagePedo?: boolean;
|
|
7131
|
-
/** Image signals */
|
|
7132
|
-
imagePorn?: boolean;
|
|
7133
|
-
imageViolence?: boolean;
|
|
7134
|
-
pqc?: boolean;
|
|
7135
|
-
safetycat?: LearningGenaiRootHarmSafetyCatCategories;
|
|
7136
|
-
/** Spii Filter uses buckets http://google3/google/privacy/dlp/v2/storage.proto;l=77;rcl=584719820 to classify the input. LMRoot converts the bucket into double score. For example the score for "POSSIBLE" is 3 / 5 = 0.6 . */
|
|
7137
|
-
spii?: LearningGenaiRootHarmSpiiFilter;
|
|
7138
|
-
threshold?: number;
|
|
7139
|
-
videoFrameChild?: boolean;
|
|
7140
|
-
videoFrameCsam?: boolean;
|
|
7141
|
-
videoFramePedo?: boolean;
|
|
7142
|
-
/** Video frame signals */
|
|
7143
|
-
videoFramePorn?: boolean;
|
|
7144
|
-
videoFrameViolence?: boolean;
|
|
7145
|
-
}
|
|
7146
|
-
interface LearningGenaiRootHarmGrailImageHarmType {
|
|
7147
|
-
imageHarmType?: string[];
|
|
7148
|
-
}
|
|
7149
|
-
interface LearningGenaiRootHarmGrailTextHarmType {
|
|
7150
|
-
harmType?: string[];
|
|
7151
|
-
}
|
|
7152
|
-
interface LearningGenaiRootHarmSafetyCatCategories {
|
|
7153
|
-
categories?: string[];
|
|
7154
|
-
}
|
|
7155
|
-
interface LearningGenaiRootHarmSpiiFilter {
|
|
7156
|
-
usBankRoutingMicr?: boolean;
|
|
7157
|
-
usEmployerIdentificationNumber?: boolean;
|
|
7158
|
-
usSocialSecurityNumber?: boolean;
|
|
7159
|
-
}
|
|
7160
|
-
interface LearningGenaiRootInternalMetadata {
|
|
7161
|
-
scoredTokens?: LearningGenaiRootScoredToken[];
|
|
7162
|
-
}
|
|
7163
|
-
interface LearningGenaiRootLanguageFilterResult {
|
|
7164
|
-
/** False when query or response should be filtered out due to unsupported language. */
|
|
7165
|
-
allowed?: boolean;
|
|
7166
|
-
/** Language of the query or response. */
|
|
7167
|
-
detectedLanguage?: string;
|
|
7168
|
-
/** Probability of the language predicted as returned by LangID. */
|
|
7169
|
-
detectedLanguageProbability?: number;
|
|
7170
|
-
}
|
|
7171
|
-
interface LearningGenaiRootMetricOutput {
|
|
7172
|
-
debug?: string;
|
|
7173
|
-
/** Name of the metric. */
|
|
7174
|
-
name?: string;
|
|
7175
|
-
numericValue?: number;
|
|
7176
|
-
status?: UtilStatusProto;
|
|
7177
|
-
stringValue?: string;
|
|
7178
|
-
}
|
|
7179
|
-
interface LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata {
|
|
7180
|
-
/** Latency spent on fact retrievals. There might be multiple retrievals from different fact providers. */
|
|
7181
|
-
factRetrievalMillisecondsByProvider?: {[P in string]: string};
|
|
7182
|
-
/** Latency spent on prompt2query. The procedure generates a search-friendly query given the original prompt. */
|
|
7183
|
-
prompt2queryMilliseconds?: string;
|
|
7184
|
-
/** Latency if use GroundedGeneration service for the whole retrieval & augmentation. */
|
|
7185
|
-
retrievalAugmentMilliseconds?: string;
|
|
7186
|
-
}
|
|
7187
|
-
interface LearningGenaiRootRAIOutput {
|
|
7188
|
-
allowed?: boolean;
|
|
7189
|
-
harm?: LearningGenaiRootHarm;
|
|
7190
|
-
name?: string;
|
|
7191
|
-
score?: number;
|
|
7192
|
-
}
|
|
7193
|
-
interface LearningGenaiRootRegexTakedownResult {
|
|
7194
|
-
/** False when query or response should be taken down due to match with a blocked regex, true otherwise. */
|
|
7195
|
-
allowed?: boolean;
|
|
7196
|
-
/** Regex used to decide that query or response should be taken down. Empty when query or response is kept. */
|
|
7197
|
-
takedownRegex?: string;
|
|
7198
|
-
}
|
|
7199
|
-
interface LearningGenaiRootRequestMetrics {
|
|
7200
|
-
/** Metrics for audio samples in the request. */
|
|
7201
|
-
audioMetrics?: LearningGenaiRootRequestMetricsAudioMetrics;
|
|
7202
|
-
/** Metrics for image samples in the request. */
|
|
7203
|
-
imageMetrics?: LearningGenaiRootRequestMetricsImageMetrics;
|
|
7204
|
-
/** Number of text tokens extracted from the request. */
|
|
7205
|
-
textTokenCount?: number;
|
|
7206
|
-
/** Total number of tokens in the request. */
|
|
7207
|
-
totalTokenCount?: number;
|
|
7208
|
-
/** Metrics for video samples in the request. */
|
|
7209
|
-
videoMetrics?: LearningGenaiRootRequestMetricsVideoMetrics;
|
|
7210
|
-
}
|
|
7211
|
-
interface LearningGenaiRootRequestMetricsAudioMetrics {
|
|
7212
|
-
/** Duration of the audio sample in seconds. */
|
|
7213
|
-
audioDuration?: string;
|
|
7214
|
-
/** Number of tokens derived directly from audio data. */
|
|
7215
|
-
audioTokenCount?: number;
|
|
7216
|
-
/** Number of audio frames in the audio. */
|
|
7217
|
-
numAudioFrames?: number;
|
|
7218
|
-
}
|
|
7219
|
-
interface LearningGenaiRootRequestMetricsImageMetrics {
|
|
7220
|
-
/** Number of tokens extracted from image bytes. */
|
|
7221
|
-
imageTokenCount?: number;
|
|
7222
|
-
/** Number of images in the request. */
|
|
7223
|
-
numImages?: number;
|
|
7224
|
-
}
|
|
7225
|
-
interface LearningGenaiRootRequestMetricsVideoMetrics {
|
|
7226
|
-
/** Metrics associated with audio sample in the video. */
|
|
7227
|
-
audioSample?: LearningGenaiRootRequestMetricsAudioMetrics;
|
|
7228
|
-
/** Number of video frames in the video. */
|
|
7229
|
-
numVideoFrames?: number;
|
|
7230
|
-
/** Duration of the video sample in seconds. */
|
|
7231
|
-
videoDuration?: string;
|
|
7232
|
-
/** Number of tokens extracted from video frames. */
|
|
7233
|
-
videoFramesTokenCount?: number;
|
|
7234
|
-
}
|
|
7235
|
-
interface LearningGenaiRootRequestResponseTakedownResult {
|
|
7236
|
-
/** False when response has to be taken down per above config. */
|
|
7237
|
-
allowed?: boolean;
|
|
7238
|
-
/** Regex used to match the request. */
|
|
7239
|
-
requestTakedownRegex?: string;
|
|
7240
|
-
/** Regex used to decide that response should be taken down. Empty when response is kept. */
|
|
7241
|
-
responseTakedownRegex?: string;
|
|
7242
|
-
}
|
|
7243
|
-
interface LearningGenaiRootRoutingDecision {
|
|
7244
|
-
metadata?: LearningGenaiRootRoutingDecisionMetadata;
|
|
7245
|
-
/** The selected model to route traffic to. */
|
|
7246
|
-
modelConfigId?: string;
|
|
7247
|
-
}
|
|
7248
|
-
interface LearningGenaiRootRoutingDecisionMetadata {
|
|
7249
|
-
scoreBasedRoutingMetadata?: LearningGenaiRootRoutingDecisionMetadataScoreBased;
|
|
7250
|
-
tokenLengthBasedRoutingMetadata?: LearningGenaiRootRoutingDecisionMetadataTokenLengthBased;
|
|
7251
|
-
}
|
|
7252
|
-
interface LearningGenaiRootRoutingDecisionMetadataScoreBased {
|
|
7253
|
-
/** The rule that was matched. */
|
|
7254
|
-
matchedRule?: LearningGenaiRootScoreBasedRoutingConfigRule;
|
|
7255
|
-
/** The score that was generated by the router i.e. the model. */
|
|
7256
|
-
score?: LearningGenaiRootScore;
|
|
7257
|
-
/** No rules were matched & therefore used the default fallback. */
|
|
7258
|
-
usedDefaultFallback?: boolean;
|
|
7259
|
-
}
|
|
7260
|
-
interface LearningGenaiRootRoutingDecisionMetadataTokenLengthBased {
|
|
7261
|
-
modelInputTokenMetadata?: LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata[];
|
|
7262
|
-
modelMaxTokenMetadata?: LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata[];
|
|
7263
|
-
}
|
|
7264
|
-
interface LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata {
|
|
7265
|
-
/** The length computed by backends using the formatter & tokenizer specific to the model */
|
|
7266
|
-
computedInputTokenLength?: number;
|
|
7267
|
-
modelId?: string;
|
|
7268
|
-
/** If true, the model was selected as a fallback, since no model met requirements. */
|
|
7269
|
-
pickedAsFallback?: boolean;
|
|
7270
|
-
/** If true, the model was selected since it met the requriements. */
|
|
7271
|
-
selected?: boolean;
|
|
7272
|
-
}
|
|
7273
|
-
interface LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata {
|
|
7274
|
-
maxNumInputTokens?: number;
|
|
7275
|
-
maxNumOutputTokens?: number;
|
|
7276
|
-
modelId?: string;
|
|
7277
|
-
}
|
|
7278
|
-
interface LearningGenaiRootRuleOutput {
|
|
7279
|
-
decision?: string;
|
|
7280
|
-
name?: string;
|
|
7281
|
-
}
|
|
7282
|
-
interface LearningGenaiRootScore {
|
|
7283
|
-
calculationType?: LearningGenaiRootCalculationType;
|
|
7284
|
-
/** The internal_metadata is intended to be used by internal processors and will be cleared before returns. */
|
|
7285
|
-
internalMetadata?: LearningGenaiRootInternalMetadata;
|
|
7286
|
-
thresholdType?: LearningGenaiRootThresholdType;
|
|
7287
|
-
/** Top candidate tokens and log probabilities at each decoding step. */
|
|
7288
|
-
tokensAndLogprobPerDecodingStep?: LearningGenaiRootTokensAndLogProbPerDecodingStep;
|
|
7289
|
-
value?: number;
|
|
7290
|
-
}
|
|
7291
|
-
interface LearningGenaiRootScoreBasedRoutingConfigRule {
|
|
7292
|
-
/** NOTE: Hardest examples have smaller values in their routing scores. */
|
|
7293
|
-
equalOrGreaterThan?: LearningGenaiRootScore;
|
|
7294
|
-
lessThan?: LearningGenaiRootScore;
|
|
7295
|
-
/** This model_config_id points to ModelConfig::id which allows us to find the ModelConfig to route to. This is part of the banks specified in the ModelBankConfig. */
|
|
7296
|
-
modelConfigId?: string;
|
|
7297
|
-
}
|
|
7298
|
-
interface LearningGenaiRootScoredSimilarityTakedownPhrase {
|
|
7299
|
-
phrase?: LearningGenaiRootSimilarityTakedownPhrase;
|
|
7300
|
-
similarityScore?: number;
|
|
7301
|
-
}
|
|
7302
|
-
interface LearningGenaiRootScoredToken {
|
|
7303
|
-
/** Each end_token_score is a logprob for how well the completion would end at a particular token. See http://google3/labs/language/aida/config/proto/model_config.proto;l=376;rcl=573039459 */
|
|
7304
|
-
endTokenScore?: number;
|
|
7305
|
-
/** Each score is the logprob for the token in model response. */
|
|
7306
|
-
score?: number;
|
|
7307
|
-
token?: string;
|
|
7308
|
-
}
|
|
7309
|
-
interface LearningGenaiRootSimilarityTakedownPhrase {
|
|
7310
|
-
blockedPhrase?: string;
|
|
7311
|
-
}
|
|
7312
|
-
interface LearningGenaiRootSimilarityTakedownResult {
|
|
7313
|
-
/** False when query or response should be taken down by any of the takedown rules, true otherwise. */
|
|
7314
|
-
allowed?: boolean;
|
|
7315
|
-
/** List of similar phrases with score. Set only if allowed=false. */
|
|
7316
|
-
scoredPhrases?: LearningGenaiRootScoredSimilarityTakedownPhrase[];
|
|
7317
|
-
}
|
|
7318
|
-
interface LearningGenaiRootTakedownResult {
|
|
7319
|
-
/** False when query or response should be taken down by any of the takedown rules, true otherwise. */
|
|
7320
|
-
allowed?: boolean;
|
|
7321
|
-
regexTakedownResult?: LearningGenaiRootRegexTakedownResult;
|
|
7322
|
-
requestResponseTakedownResult?: LearningGenaiRootRequestResponseTakedownResult;
|
|
7323
|
-
similarityTakedownResult?: LearningGenaiRootSimilarityTakedownResult;
|
|
7324
|
-
}
|
|
7325
|
-
interface LearningGenaiRootThresholdType {
|
|
7326
|
-
scoreType?: string;
|
|
7327
|
-
threshold?: number;
|
|
7328
|
-
}
|
|
7329
|
-
interface LearningGenaiRootTokensAndLogProbPerDecodingStep {
|
|
7330
|
-
/** Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates. */
|
|
7331
|
-
chosenCandidates?: LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate[];
|
|
7332
|
-
/** Length = total number of decoding steps. */
|
|
7333
|
-
topCandidates?: LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates[];
|
|
7334
|
-
}
|
|
7335
|
-
interface LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate {
|
|
7336
|
-
/** The candidate's log probability. */
|
|
7337
|
-
logProbability?: number;
|
|
7338
|
-
/** The candidate’s token value. */
|
|
7339
|
-
token?: string;
|
|
7340
|
-
}
|
|
7341
|
-
interface LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates {
|
|
7342
|
-
/** Sorted by log probability in descending order. */
|
|
7343
|
-
candidates?: LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate[];
|
|
7344
|
-
}
|
|
7345
|
-
interface LearningGenaiRootToxicityResult {
|
|
7346
|
-
signals?: LearningGenaiRootToxicitySignal[];
|
|
7347
|
-
}
|
|
7348
|
-
interface LearningGenaiRootToxicitySignal {
|
|
7349
|
-
allowed?: boolean;
|
|
7350
|
-
label?: string;
|
|
7351
|
-
score?: number;
|
|
7352
|
-
}
|
|
7353
|
-
interface LearningGenaiRootTranslationRequestInfo {
|
|
7354
|
-
/** The ISO-639 language code of source text in the initial request, detected automatically, if no source language was passed within the initial request. If the source language was passed, auto-detection of the language does not occur and this field is empty. */
|
|
7355
|
-
detectedLanguageCodes?: string[];
|
|
7356
|
-
/** The sum of the size of all the contents in the request. */
|
|
7357
|
-
totalContentSize?: string;
|
|
7358
|
-
}
|
|
7359
|
-
interface LearningServingLlmAtlasOutputMetadata {
|
|
7360
|
-
requestTopic?: string;
|
|
7361
|
-
source?: string;
|
|
7362
|
-
}
|
|
7363
|
-
interface LearningServingLlmMessageMetadata {
|
|
7364
|
-
atlasMetadata?: LearningServingLlmAtlasOutputMetadata;
|
|
7365
|
-
/** Summary of classifier output. We attach this to all messages regardless of whether classification rules triggered or not. */
|
|
7366
|
-
classifierSummary?: LearningGenaiRootClassifierOutputSummary;
|
|
7367
|
-
/** Contains metadata related to Codey Processors. */
|
|
7368
|
-
codeyOutput?: LearningGenaiRootCodeyOutput;
|
|
7369
|
-
currentStreamTextLength?: number;
|
|
7370
|
-
/** Whether the corresponding message has been deleted. */
|
|
7371
|
-
deleted?: boolean;
|
|
7372
|
-
/** Metadata for filters that triggered. */
|
|
7373
|
-
filterMeta?: LearningGenaiRootFilterMetadata[];
|
|
7374
|
-
/** This score is finally used for ranking the message. This will be same as the score present in `Message.score` field. */
|
|
7375
|
-
finalMessageScore?: LearningGenaiRootScore;
|
|
7376
|
-
/** NOT YET IMPLEMENTED. */
|
|
7377
|
-
finishReason?: string;
|
|
7378
|
-
groundingMetadata?: LearningGenaiRootGroundingMetadata;
|
|
7379
|
-
/** Applies to streaming response message only. Whether the message is a code. */
|
|
7380
|
-
isCode?: boolean;
|
|
7381
|
-
/** Applies to Response message only. Indicates whether the message is a fallback and the response would have otherwise been empty. */
|
|
7382
|
-
isFallback?: boolean;
|
|
7383
|
-
/** Result from nlp_saft DetectLanguage method. Currently the predicted language code and language probability is used. */
|
|
7384
|
-
langidResult?: NlpSaftLangIdResult;
|
|
7385
|
-
/** Detected language. */
|
|
7386
|
-
language?: string;
|
|
7387
|
-
/** The LM prefix used to generate this response. */
|
|
7388
|
-
lmPrefix?: string;
|
|
7389
|
-
/** FOR LMROOT INTERNAL USE ONLY. Externally, use learning.genai.root.RequestMetadata.RequestMetrics. Request metrics per modality including token count, duration, num_frames. */
|
|
7390
|
-
lmrootInternalRequestMetrics?: LearningGenaiRootRequestMetrics;
|
|
7391
|
-
/** Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked. */
|
|
7392
|
-
mmRecitationResult?: LearningGenaiRecitationMMRecitationCheckResult;
|
|
7393
|
-
/** Number of Controlled Decoding rewind and repeats that have happened for this response. */
|
|
7394
|
-
numRewinds?: number;
|
|
7395
|
-
/** The original text generated by LLM. This is the raw output for debugging purposes. */
|
|
7396
|
-
originalText?: string;
|
|
7397
|
-
/** Number of tokens decoded by the model as part of a stream. This count may be different from `per_stream_returned_token_count` which, is counted after any response rewriting or truncation. Applies to streaming response only. */
|
|
7398
|
-
perStreamDecodedTokenCount?: number;
|
|
7399
|
-
/** Number of tokens returned per stream in a response candidate after any response rewriting or truncation. Applies to streaming response only. Applies to Gemini models only. */
|
|
7400
|
-
perStreamReturnedTokenCount?: number;
|
|
7401
|
-
/** Results of running RAI on the query or this response candidate. One output per rai_config. It will be populated regardless of whether the threshold is exceeded or not. */
|
|
7402
|
-
raiOutputs?: LearningGenaiRootRAIOutput[];
|
|
7403
|
-
/** Recitation Results. It will be populated as long as Recitation processing is enabled, regardless of recitation outcome. */
|
|
7404
|
-
recitationResult?: LearningGenaiRecitationRecitationResult;
|
|
7405
|
-
/** All the different scores for a message are logged here. */
|
|
7406
|
-
scores?: LearningGenaiRootScore[];
|
|
7407
|
-
/** Whether the response is terminated during streaming return. Only used for streaming requests. */
|
|
7408
|
-
streamTerminated?: boolean;
|
|
7409
|
-
/** Total tokens decoded so far per response_candidate. For streaming: Count of all the tokens decoded so far (aggregated count). For unary: Count of all the tokens decoded per response_candidate. */
|
|
7410
|
-
totalDecodedTokenCount?: number;
|
|
7411
|
-
/** Total number of tokens returned in a response candidate. For streaming, it is the aggregated count (i.e. total so far) Applies to Gemini models only. */
|
|
7412
|
-
totalReturnedTokenCount?: number;
|
|
7413
|
-
/** Translated user-prompt used for RAI post processing. This is for internal processing only. We will translate in pre-processor and pass the translated text to the post processor using this field. It will be empty if non of the signals requested need translation. */
|
|
7414
|
-
translatedUserPrompts?: string[];
|
|
7415
|
-
/** The metadata from Vertex SafetyCat processors */
|
|
7416
|
-
vertexRaiResult?: CloudAiNlLlmProtoServiceRaiResult;
|
|
7417
|
-
}
|
|
7418
|
-
interface NlpSaftLangIdLocalesResult {
|
|
7419
|
-
/** List of locales in which the text would be considered acceptable. Sorted in descending order according to each locale's respective likelihood. For example, if a Portuguese text is acceptable in both Brazil and Portugal, but is more strongly associated with Brazil, then the predictions would be ["pt-BR", "pt-PT"], in that order. May be empty, indicating that the model did not predict any acceptable locales. */
|
|
7420
|
-
predictions?: NlpSaftLangIdLocalesResultLocale[];
|
|
7421
|
-
}
|
|
7422
|
-
interface NlpSaftLangIdLocalesResultLocale {
|
|
7423
|
-
/** A BCP 47 language code that includes region information. For example, "pt-BR" or "pt-PT". This field will always be populated. */
|
|
7424
|
-
languageCode?: string;
|
|
7425
|
-
}
|
|
7426
|
-
interface NlpSaftLangIdResult {
|
|
7427
|
-
/** The version of the model used to create these annotations. */
|
|
7428
|
-
modelVersion?: string;
|
|
7429
|
-
/** This field stores the n-best list of possible BCP 47 language code strings for a given input sorted in descending order according to each code's respective probability. */
|
|
7430
|
-
predictions?: NlpSaftLanguageSpan[];
|
|
7431
|
-
/** This field stores language predictions of subspans of the input, when available. Each LanguageSpanSequence is a sequence of LanguageSpans. A particular sequence of LanguageSpans has an associated probability, and need not necessarily cover the entire input. If no language could be predicted for any span, then this field may be empty. */
|
|
7432
|
-
spanPredictions?: NlpSaftLanguageSpanSequence[];
|
|
7433
|
-
}
|
|
7434
|
-
interface NlpSaftLanguageSpan {
|
|
7435
|
-
end?: number;
|
|
7436
|
-
/** A BCP 47 language code for this span. */
|
|
7437
|
-
languageCode?: string;
|
|
7438
|
-
/** Optional field containing any information that was predicted about the specific locale(s) of the span. */
|
|
7439
|
-
locales?: NlpSaftLangIdLocalesResult;
|
|
7440
|
-
/** A probability associated with this prediction. */
|
|
7441
|
-
probability?: number;
|
|
7442
|
-
/** Start and end byte offsets, inclusive, within the given input string. A value of -1 implies that this field is not set. Both fields must either be set with a nonnegative value or both are unset. If both are unset then this LanguageSpan applies to the entire input. */
|
|
7443
|
-
start?: number;
|
|
7444
|
-
}
|
|
7445
|
-
interface NlpSaftLanguageSpanSequence {
|
|
7446
|
-
/** A sequence of LanguageSpan objects, each assigning a language to a subspan of the input. */
|
|
7447
|
-
languageSpans?: NlpSaftLanguageSpan[];
|
|
7448
|
-
/** The probability of this sequence of LanguageSpans. */
|
|
7449
|
-
probability?: number;
|
|
7450
|
-
}
|
|
7451
|
-
interface Proto2BridgeMessageSet {}
|
|
7452
|
-
interface UtilStatusProto {
|
|
7453
|
-
/** The canonical error code (see codes.proto) that most closely corresponds to this status. This may be missing, and in the common case of the generic space, it definitely will be. */
|
|
7454
|
-
canonicalCode?: number;
|
|
7455
|
-
/** Numeric code drawn from the space specified below. Often, this is the canonical error space, and code is drawn from google3/util/task/codes.proto */
|
|
7456
|
-
code?: number;
|
|
7457
|
-
/** Detail message */
|
|
7458
|
-
message?: string;
|
|
7459
|
-
/** message_set associates an arbitrary proto message with the status. */
|
|
7460
|
-
messageSet?: any;
|
|
7461
|
-
/** The following are usually only present when code != 0 Space to which this status belongs */
|
|
7462
|
-
space?: string;
|
|
7463
|
-
}
|
|
7464
6614
|
interface BatchPredictionJobsResource {
|
|
7465
6615
|
/** Cancels a BatchPredictionJob. Starts asynchronous cancellation on the BatchPredictionJob. The server makes the best effort to cancel the job, but success is not guaranteed. Clients can use JobService.GetBatchPredictionJob or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On a successful cancellation, the BatchPredictionJob is not deleted;instead its BatchPredictionJob.state is set to `CANCELLED`. Any files already outputted by the job are not deleted. */
|
|
7466
6616
|
cancel(request: {
|