@maxim_mazurok/gapi.client.aiplatform-v1beta1 0.0.20240507 → 0.0.20240520
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.d.ts +218 -931
- package/package.json +1 -1
package/index.d.ts
CHANGED
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
// This file was generated by https://github.com/Maxim-Mazurok/google-api-typings-generator. Please do not edit it manually.
|
|
10
10
|
// In case of any problems please post issue to https://github.com/Maxim-Mazurok/google-api-typings-generator
|
|
11
11
|
// Generated from: https://aiplatform.googleapis.com/$discovery/rest?version=v1beta1
|
|
12
|
-
// Revision:
|
|
12
|
+
// Revision: 20240520
|
|
13
13
|
|
|
14
14
|
/// <reference types="gapi.client" />
|
|
15
15
|
|
|
@@ -28,14 +28,10 @@ declare namespace gapi.client {
|
|
|
28
28
|
): void;
|
|
29
29
|
|
|
30
30
|
namespace aiplatform {
|
|
31
|
-
interface CloudAiLargeModelsVisionEmbedVideoResponse {
|
|
32
|
-
/** The embedding vector for the video. */
|
|
33
|
-
videoEmbeddings?: any[];
|
|
34
|
-
}
|
|
35
31
|
interface CloudAiLargeModelsVisionFilteredText {
|
|
36
|
-
/** Confidence level */
|
|
37
|
-
category?: string;
|
|
38
32
|
/** Filtered category */
|
|
33
|
+
category?: string;
|
|
34
|
+
/** Confidence score */
|
|
39
35
|
confidence?: string;
|
|
40
36
|
/** Input prompt */
|
|
41
37
|
prompt?: string;
|
|
@@ -80,10 +76,6 @@ declare namespace gapi.client {
|
|
|
80
76
|
/** Video */
|
|
81
77
|
video?: CloudAiLargeModelsVisionVideo;
|
|
82
78
|
}
|
|
83
|
-
interface CloudAiLargeModelsVisionMediaGenerateContentResponse {
|
|
84
|
-
/** Response to the user's request. */
|
|
85
|
-
response?: CloudAiNlLlmProtoServiceGenerateMultiModalResponse;
|
|
86
|
-
}
|
|
87
79
|
interface CloudAiLargeModelsVisionNamedBoundingBox {
|
|
88
80
|
classes?: string[];
|
|
89
81
|
entities?: string[];
|
|
@@ -94,26 +86,19 @@ declare namespace gapi.client {
|
|
|
94
86
|
y2?: number;
|
|
95
87
|
}
|
|
96
88
|
interface CloudAiLargeModelsVisionRaiInfo {
|
|
89
|
+
detectedLabels?: CloudAiLargeModelsVisionRaiInfoDetectedLabels[];
|
|
97
90
|
/** List of rai categories' information to return */
|
|
98
91
|
raiCategories?: string[];
|
|
99
92
|
/** List of rai scores mapping to the rai categories. Rounded to 1 decimal place. */
|
|
100
93
|
scores?: number[];
|
|
101
94
|
}
|
|
102
|
-
interface
|
|
103
|
-
/**
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
/**
|
|
108
|
-
|
|
109
|
-
/** Text information */
|
|
110
|
-
text?: string;
|
|
111
|
-
}
|
|
112
|
-
interface CloudAiLargeModelsVisionRelativeTemporalPartition {
|
|
113
|
-
/** End time offset of the partition. */
|
|
114
|
-
endOffset?: string;
|
|
115
|
-
/** Start time offset of the partition. */
|
|
116
|
-
startOffset?: string;
|
|
95
|
+
interface CloudAiLargeModelsVisionRaiInfoDetectedLabels {
|
|
96
|
+
/** Descriptions of the detected labels. */
|
|
97
|
+
descriptions?: string[];
|
|
98
|
+
/** The RAI category for the deteceted labels. */
|
|
99
|
+
raiCategory?: string;
|
|
100
|
+
/** Confidence scores mapping to the labels. */
|
|
101
|
+
scores?: number[];
|
|
117
102
|
}
|
|
118
103
|
interface CloudAiLargeModelsVisionSemanticFilterResponse {
|
|
119
104
|
/** Class labels of the bounding boxes that failed the semantic filtering. Bounding box coordinates. */
|
|
@@ -127,229 +112,6 @@ declare namespace gapi.client {
|
|
|
127
112
|
/** Raw bytes. */
|
|
128
113
|
video?: string;
|
|
129
114
|
}
|
|
130
|
-
interface CloudAiNlLlmProtoServiceCandidate {
|
|
131
|
-
/** Source attribution of the generated content. */
|
|
132
|
-
citationMetadata?: CloudAiNlLlmProtoServiceCitationMetadata;
|
|
133
|
-
/** Content of the candidate. */
|
|
134
|
-
content?: CloudAiNlLlmProtoServiceContent;
|
|
135
|
-
/** A string that describes the filtering behavior in more detail. Only filled when reason is set. */
|
|
136
|
-
finishMessage?: string;
|
|
137
|
-
/** The reason why the model stopped generating tokens. */
|
|
138
|
-
finishReason?: string;
|
|
139
|
-
/** Grounding metadata. Combine with the facts list from response to generate grounding citations for this choice. */
|
|
140
|
-
groundingMetadata?: LearningGenaiRootGroundingMetadata;
|
|
141
|
-
/** Index of the candidate. */
|
|
142
|
-
index?: number;
|
|
143
|
-
/** Safety ratings of the generated content. */
|
|
144
|
-
safetyRatings?: CloudAiNlLlmProtoServiceSafetyRating[];
|
|
145
|
-
}
|
|
146
|
-
interface CloudAiNlLlmProtoServiceCitation {
|
|
147
|
-
/** End index into the content. */
|
|
148
|
-
endIndex?: number;
|
|
149
|
-
/** License of the attribution. */
|
|
150
|
-
license?: string;
|
|
151
|
-
/** Publication date of the attribution. */
|
|
152
|
-
publicationDate?: GoogleTypeDate;
|
|
153
|
-
/** Start index into the content. */
|
|
154
|
-
startIndex?: number;
|
|
155
|
-
/** Title of the attribution. */
|
|
156
|
-
title?: string;
|
|
157
|
-
/** Url reference of the attribution. */
|
|
158
|
-
uri?: string;
|
|
159
|
-
}
|
|
160
|
-
interface CloudAiNlLlmProtoServiceCitationMetadata {
|
|
161
|
-
/** List of citations. */
|
|
162
|
-
citations?: CloudAiNlLlmProtoServiceCitation[];
|
|
163
|
-
}
|
|
164
|
-
interface CloudAiNlLlmProtoServiceContent {
|
|
165
|
-
/** If true, the content is from a cached content. */
|
|
166
|
-
isCached?: boolean;
|
|
167
|
-
/** The parts of the message. */
|
|
168
|
-
parts?: CloudAiNlLlmProtoServicePart[];
|
|
169
|
-
/** The role of the current conversation participant. */
|
|
170
|
-
role?: string;
|
|
171
|
-
}
|
|
172
|
-
interface CloudAiNlLlmProtoServiceFact {
|
|
173
|
-
/** Query that is used to retrieve this fact. */
|
|
174
|
-
query?: string;
|
|
175
|
-
/** If present, the summary/snippet of the fact. */
|
|
176
|
-
summary?: string;
|
|
177
|
-
/** If present, it refers to the title of this fact. */
|
|
178
|
-
title?: string;
|
|
179
|
-
/** If present, this URL links to the webpage of the fact. */
|
|
180
|
-
url?: string;
|
|
181
|
-
}
|
|
182
|
-
interface CloudAiNlLlmProtoServiceFunctionCall {
|
|
183
|
-
/** The function parameters and values in JSON format. */
|
|
184
|
-
args?: {[P in string]: any};
|
|
185
|
-
/** Required. The name of the function to call. */
|
|
186
|
-
name?: string;
|
|
187
|
-
}
|
|
188
|
-
interface CloudAiNlLlmProtoServiceFunctionResponse {
|
|
189
|
-
/** Required. The name of the function to call. */
|
|
190
|
-
name?: string;
|
|
191
|
-
/** Required. The function response in JSON object format. */
|
|
192
|
-
response?: {[P in string]: any};
|
|
193
|
-
}
|
|
194
|
-
interface CloudAiNlLlmProtoServiceGenerateMultiModalResponse {
|
|
195
|
-
/** Possible candidate responses to the conversation up until this point. */
|
|
196
|
-
candidates?: CloudAiNlLlmProtoServiceCandidate[];
|
|
197
|
-
/** Debug information containing message metadata. Clients should not consume this field, and this is only populated for Flow Runner path. */
|
|
198
|
-
debugMetadata?: CloudAiNlLlmProtoServiceMessageMetadata;
|
|
199
|
-
/** External facts retrieved for factuality/grounding. */
|
|
200
|
-
facts?: CloudAiNlLlmProtoServiceFact[];
|
|
201
|
-
/** Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations. */
|
|
202
|
-
promptFeedback?: CloudAiNlLlmProtoServicePromptFeedback;
|
|
203
|
-
/** Billable prediction metrics. */
|
|
204
|
-
reportingMetrics?: IntelligenceCloudAutomlXpsReportingMetrics;
|
|
205
|
-
/** Usage metadata about the response(s). */
|
|
206
|
-
usageMetadata?: CloudAiNlLlmProtoServiceUsageMetadata;
|
|
207
|
-
}
|
|
208
|
-
interface CloudAiNlLlmProtoServiceMessageMetadata {
|
|
209
|
-
/** Factuality-related debug metadata. */
|
|
210
|
-
factualityDebugMetadata?: LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata;
|
|
211
|
-
/** Filter metadata of the input messages. */
|
|
212
|
-
inputFilterInfo?: LearningServingLlmMessageMetadata;
|
|
213
|
-
/** This score is generated by the router model to decide which model to use */
|
|
214
|
-
modelRoutingDecision?: LearningGenaiRootRoutingDecision;
|
|
215
|
-
/** Filter metadata of the output messages. */
|
|
216
|
-
outputFilterInfo?: LearningServingLlmMessageMetadata[];
|
|
217
|
-
}
|
|
218
|
-
interface CloudAiNlLlmProtoServicePart {
|
|
219
|
-
/** Document metadata. The metadata should only be used by the Cloud LLM when supporting document mime types. It will only be populated when this image input part is converted from a document input part. */
|
|
220
|
-
documentMetadata?: CloudAiNlLlmProtoServicePartDocumentMetadata;
|
|
221
|
-
/** URI-based data. */
|
|
222
|
-
fileData?: CloudAiNlLlmProtoServicePartFileData;
|
|
223
|
-
/** Function call data. */
|
|
224
|
-
functionCall?: CloudAiNlLlmProtoServiceFunctionCall;
|
|
225
|
-
/** Function response data. */
|
|
226
|
-
functionResponse?: CloudAiNlLlmProtoServiceFunctionResponse;
|
|
227
|
-
/** Inline bytes data */
|
|
228
|
-
inlineData?: CloudAiNlLlmProtoServicePartBlob;
|
|
229
|
-
/** Metadata provides extra info for building the LM Root request. Note: High enough tag number for internal only fields. */
|
|
230
|
-
lmRootMetadata?: CloudAiNlLlmProtoServicePartLMRootMetadata;
|
|
231
|
-
/** Text input. */
|
|
232
|
-
text?: string;
|
|
233
|
-
/** Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. */
|
|
234
|
-
videoMetadata?: CloudAiNlLlmProtoServicePartVideoMetadata;
|
|
235
|
-
}
|
|
236
|
-
interface CloudAiNlLlmProtoServicePartBlob {
|
|
237
|
-
/** Inline data. */
|
|
238
|
-
data?: string;
|
|
239
|
-
/** The mime type corresponding to this input. */
|
|
240
|
-
mimeType?: string;
|
|
241
|
-
/** Original file data where the blob comes from. */
|
|
242
|
-
originalFileData?: CloudAiNlLlmProtoServicePartFileData;
|
|
243
|
-
}
|
|
244
|
-
interface CloudAiNlLlmProtoServicePartDocumentMetadata {
|
|
245
|
-
/** The original document blob. */
|
|
246
|
-
originalDocumentBlob?: CloudAiNlLlmProtoServicePartBlob;
|
|
247
|
-
/** The (1-indexed) page number of the image in the original document. The first page carries the original document content and mime type. */
|
|
248
|
-
pageNumber?: number;
|
|
249
|
-
}
|
|
250
|
-
interface CloudAiNlLlmProtoServicePartFileData {
|
|
251
|
-
/** Inline data. */
|
|
252
|
-
fileUri?: string;
|
|
253
|
-
/** The mime type corresponding to this input. */
|
|
254
|
-
mimeType?: string;
|
|
255
|
-
}
|
|
256
|
-
interface CloudAiNlLlmProtoServicePartLMRootMetadata {
|
|
257
|
-
/** Chunk id that will be used when mapping the part to the LM Root's chunk. */
|
|
258
|
-
chunkId?: string;
|
|
259
|
-
}
|
|
260
|
-
interface CloudAiNlLlmProtoServicePartVideoMetadata {
|
|
261
|
-
/** The end offset of the video. */
|
|
262
|
-
endOffset?: string;
|
|
263
|
-
/** The start offset of the video. */
|
|
264
|
-
startOffset?: string;
|
|
265
|
-
}
|
|
266
|
-
interface CloudAiNlLlmProtoServicePromptFeedback {
|
|
267
|
-
/** Blocked reason. */
|
|
268
|
-
blockReason?: string;
|
|
269
|
-
/** A readable block reason message. */
|
|
270
|
-
blockReasonMessage?: string;
|
|
271
|
-
/** Safety ratings. */
|
|
272
|
-
safetyRatings?: CloudAiNlLlmProtoServiceSafetyRating[];
|
|
273
|
-
}
|
|
274
|
-
interface CloudAiNlLlmProtoServiceRaiResult {
|
|
275
|
-
/** Recitation result from Aida recitation checker. */
|
|
276
|
-
aidaRecitationResult?: LanguageLabsAidaTrustRecitationProtoRecitationResult;
|
|
277
|
-
/** Use `triggered_blocklist`. */
|
|
278
|
-
blocked?: boolean;
|
|
279
|
-
/** The error codes indicate which RAI filters block the response. */
|
|
280
|
-
errorCodes?: number[];
|
|
281
|
-
/** Whether the text should be filtered and not shown to the end user. This is determined based on a combination of `triggered_recitation`, `triggered_blocklist`, `language_filter_result`, and `triggered_safety_filter`. */
|
|
282
|
-
filtered?: boolean;
|
|
283
|
-
/** Language filter result from SAFT LangId. */
|
|
284
|
-
languageFilterResult?: LearningGenaiRootLanguageFilterResult;
|
|
285
|
-
/** Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked. */
|
|
286
|
-
mmRecitationResult?: LearningGenaiRecitationMMRecitationCheckResult;
|
|
287
|
-
/** The RAI signals for the text. */
|
|
288
|
-
raiSignals?: CloudAiNlLlmProtoServiceRaiSignal[];
|
|
289
|
-
/** Translation request info during RAI for debugging purpose. Each TranslationRequestInfo corresponds to a request sent to the translation server. */
|
|
290
|
-
translationRequestInfos?: LearningGenaiRootTranslationRequestInfo[];
|
|
291
|
-
/** Whether the text triggered the blocklist. */
|
|
292
|
-
triggeredBlocklist?: boolean;
|
|
293
|
-
/** Whether the text should be blocked by the recitation result from Aida recitation checker. It is determined from aida_recitation_result. */
|
|
294
|
-
triggeredRecitation?: boolean;
|
|
295
|
-
/** Whether the text triggered the safety filter. Currently, this is due to CSAI triggering or one of four categories (derogatory, sexual, toxic, violent) having a score over the filter threshold. */
|
|
296
|
-
triggeredSafetyFilter?: boolean;
|
|
297
|
-
}
|
|
298
|
-
interface CloudAiNlLlmProtoServiceRaiSignal {
|
|
299
|
-
/** The confidence level for the RAI category. */
|
|
300
|
-
confidence?: string;
|
|
301
|
-
/** Whether the category is flagged as being present. Currently, this is set to true if score >= 0.5. */
|
|
302
|
-
flagged?: boolean;
|
|
303
|
-
/** The influential terms that could potentially block the response. */
|
|
304
|
-
influentialTerms?: CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm[];
|
|
305
|
-
/** The RAI category. */
|
|
306
|
-
raiCategory?: string;
|
|
307
|
-
/** The score for the category, in the range [0.0, 1.0]. */
|
|
308
|
-
score?: number;
|
|
309
|
-
}
|
|
310
|
-
interface CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm {
|
|
311
|
-
/** The beginning offset of the influential term. */
|
|
312
|
-
beginOffset?: number;
|
|
313
|
-
/** The confidence score of the influential term. */
|
|
314
|
-
confidence?: number;
|
|
315
|
-
/** The source of the influential term, prompt or response. */
|
|
316
|
-
source?: string;
|
|
317
|
-
/** The influential term. */
|
|
318
|
-
term?: string;
|
|
319
|
-
}
|
|
320
|
-
interface CloudAiNlLlmProtoServiceSafetyRating {
|
|
321
|
-
/** Indicates whether the content was filtered out because of this rating. */
|
|
322
|
-
blocked?: boolean;
|
|
323
|
-
/** Harm category. */
|
|
324
|
-
category?: string;
|
|
325
|
-
/** The influential terms that could potentially block the response. */
|
|
326
|
-
influentialTerms?: CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm[];
|
|
327
|
-
/** Harm probability levels in the content. */
|
|
328
|
-
probability?: string;
|
|
329
|
-
/** Harm probability score. */
|
|
330
|
-
probabilityScore?: number;
|
|
331
|
-
/** Harm severity levels in the content. */
|
|
332
|
-
severity?: string;
|
|
333
|
-
/** Harm severity score. */
|
|
334
|
-
severityScore?: number;
|
|
335
|
-
}
|
|
336
|
-
interface CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm {
|
|
337
|
-
/** The beginning offset of the influential term. */
|
|
338
|
-
beginOffset?: number;
|
|
339
|
-
/** The confidence score of the influential term. */
|
|
340
|
-
confidence?: number;
|
|
341
|
-
/** The source of the influential term, prompt or response. */
|
|
342
|
-
source?: string;
|
|
343
|
-
/** The influential term. */
|
|
344
|
-
term?: string;
|
|
345
|
-
}
|
|
346
|
-
interface CloudAiNlLlmProtoServiceUsageMetadata {
|
|
347
|
-
/** Number of tokens in the response(s). */
|
|
348
|
-
candidatesTokenCount?: number;
|
|
349
|
-
/** Number of tokens in the request. */
|
|
350
|
-
promptTokenCount?: number;
|
|
351
|
-
totalTokenCount?: number;
|
|
352
|
-
}
|
|
353
115
|
interface GoogleApiHttpBody {
|
|
354
116
|
/** The HTTP Content-Type header value specifying the content type of the body. */
|
|
355
117
|
contentType?: string;
|
|
@@ -1459,8 +1221,14 @@ declare namespace gapi.client {
|
|
|
1459
1221
|
createTime?: string;
|
|
1460
1222
|
/** Required. The underlying DedicatedResources that the DeploymentResourcePool uses. */
|
|
1461
1223
|
dedicatedResources?: GoogleCloudAiplatformV1beta1DedicatedResources;
|
|
1224
|
+
/** If the DeploymentResourcePool is deployed with custom-trained Models or AutoML Tabular Models, the container(s) of the DeploymentResourcePool will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. */
|
|
1225
|
+
disableContainerLogging?: boolean;
|
|
1226
|
+
/** Customer-managed encryption key spec for a DeploymentResourcePool. If set, this DeploymentResourcePool will be secured by this key. Endpoints and the DeploymentResourcePool they deploy in need to have the same EncryptionSpec. */
|
|
1227
|
+
encryptionSpec?: GoogleCloudAiplatformV1beta1EncryptionSpec;
|
|
1462
1228
|
/** Immutable. The resource name of the DeploymentResourcePool. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */
|
|
1463
1229
|
name?: string;
|
|
1230
|
+
/** The service account that the DeploymentResourcePool's container(s) run as. Specify the email address of the service account. If this service account is not specified, the container(s) run as a service account that doesn't have access to the resource project. Users deploying the Models to this DeploymentResourcePool must have the `iam.serviceAccounts.actAs` permission on this service account. */
|
|
1231
|
+
serviceAccount?: string;
|
|
1464
1232
|
}
|
|
1465
1233
|
interface GoogleCloudAiplatformV1beta1DeployModelOperationMetadata {
|
|
1466
1234
|
/** The operation generic information. */
|
|
@@ -1515,6 +1283,30 @@ declare namespace gapi.client {
|
|
|
1515
1283
|
/** Type of the boot disk (default is "pd-ssd"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). */
|
|
1516
1284
|
bootDiskType?: string;
|
|
1517
1285
|
}
|
|
1286
|
+
interface GoogleCloudAiplatformV1beta1DistillationHyperParameters {
|
|
1287
|
+
/** Optional. Adapter size for distillation. */
|
|
1288
|
+
adapterSize?: string;
|
|
1289
|
+
/** Optional. Number of complete passes the model makes over the entire training dataset during training. */
|
|
1290
|
+
epochCount?: string;
|
|
1291
|
+
/** Optional. Multiplier for adjusting the default learning rate. */
|
|
1292
|
+
learningRateMultiplier?: number;
|
|
1293
|
+
}
|
|
1294
|
+
interface GoogleCloudAiplatformV1beta1DistillationSpec {
|
|
1295
|
+
/** The base teacher model that is being distilled, e.g., "gemini-1.0-pro-002". */
|
|
1296
|
+
baseTeacherModel?: string;
|
|
1297
|
+
/** Optional. Hyperparameters for Distillation. */
|
|
1298
|
+
hyperParameters?: GoogleCloudAiplatformV1beta1DistillationHyperParameters;
|
|
1299
|
+
/** Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the distillation pipeline. It is used by the system to generate the paths of output artifacts. */
|
|
1300
|
+
pipelineRootDirectory?: string;
|
|
1301
|
+
/** The student model that is being tuned, e.g., "google/gemma-2b-it". */
|
|
1302
|
+
studentModel?: string;
|
|
1303
|
+
/** Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file. */
|
|
1304
|
+
trainingDatasetUri?: string;
|
|
1305
|
+
/** The resource name of the Tuned teacher model. Format: `projects/{project}/locations/{location}/models/{model}`. */
|
|
1306
|
+
tunedTeacherModelSource?: string;
|
|
1307
|
+
/** Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file. */
|
|
1308
|
+
validationDatasetUri?: string;
|
|
1309
|
+
}
|
|
1518
1310
|
interface GoogleCloudAiplatformV1beta1DoubleArray {
|
|
1519
1311
|
/** A list of double values. */
|
|
1520
1312
|
values?: number[];
|
|
@@ -2332,6 +2124,8 @@ declare namespace gapi.client {
|
|
|
2332
2124
|
stringArrayValue?: GoogleCloudAiplatformV1beta1StringArray;
|
|
2333
2125
|
/** String feature value. */
|
|
2334
2126
|
stringValue?: string;
|
|
2127
|
+
/** A struct type feature value. */
|
|
2128
|
+
structValue?: GoogleCloudAiplatformV1beta1StructValue;
|
|
2335
2129
|
}
|
|
2336
2130
|
interface GoogleCloudAiplatformV1beta1FeatureValueDestination {
|
|
2337
2131
|
/** Output in BigQuery format. BigQueryDestination.output_uri in FeatureValueDestination.bigquery_destination must refer to a table. */
|
|
@@ -2528,6 +2322,12 @@ declare namespace gapi.client {
|
|
|
2528
2322
|
neighborCount?: number;
|
|
2529
2323
|
/** Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. This field is the maximum number of matches with the same crowding tag. */
|
|
2530
2324
|
perCrowdingAttributeNeighborCount?: number;
|
|
2325
|
+
/** Optional. Represents RRF algorithm that combines search results. */
|
|
2326
|
+
rrf?: GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF;
|
|
2327
|
+
}
|
|
2328
|
+
interface GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF {
|
|
2329
|
+
/** Required. Users can provide an alpha value to give more weight to dense vs sparse results. For example, if the alpha is 0, we only return sparse and if the alpha is 1, we only return dense. */
|
|
2330
|
+
alpha?: number;
|
|
2531
2331
|
}
|
|
2532
2332
|
interface GoogleCloudAiplatformV1beta1FindNeighborsResponse {
|
|
2533
2333
|
/** The nearest neighbors of the query datapoints. */
|
|
@@ -2544,6 +2344,8 @@ declare namespace gapi.client {
|
|
|
2544
2344
|
datapoint?: GoogleCloudAiplatformV1beta1IndexDatapoint;
|
|
2545
2345
|
/** The distance between the neighbor and the dense embedding query. */
|
|
2546
2346
|
distance?: number;
|
|
2347
|
+
/** The distance between the neighbor and the query sparse_embedding. */
|
|
2348
|
+
sparseDistance?: number;
|
|
2547
2349
|
}
|
|
2548
2350
|
interface GoogleCloudAiplatformV1beta1FluencyInput {
|
|
2549
2351
|
/** Required. Fluency instance. */
|
|
@@ -2697,8 +2499,6 @@ declare namespace gapi.client {
|
|
|
2697
2499
|
presencePenalty?: number;
|
|
2698
2500
|
/** Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. */
|
|
2699
2501
|
responseMimeType?: string;
|
|
2700
|
-
/** Optional. Control Three levels of creativity in the model output. Default: RESPONSE_STYLE_BALANCED */
|
|
2701
|
-
responseStyle?: string;
|
|
2702
2502
|
/** Optional. Stop sequences. */
|
|
2703
2503
|
stopSequences?: string[];
|
|
2704
2504
|
/** Optional. Controls the randomness of predictions. */
|
|
@@ -2730,6 +2530,7 @@ declare namespace gapi.client {
|
|
|
2730
2530
|
/** Required. The type of the Google Drive resource. */
|
|
2731
2531
|
resourceType?: string;
|
|
2732
2532
|
}
|
|
2533
|
+
interface GoogleCloudAiplatformV1beta1GoogleSearchRetrieval {}
|
|
2733
2534
|
interface GoogleCloudAiplatformV1beta1GroundednessInput {
|
|
2734
2535
|
/** Required. Groundedness instance. */
|
|
2735
2536
|
instance?: GoogleCloudAiplatformV1beta1GroundednessInstance;
|
|
@@ -2929,6 +2730,8 @@ declare namespace gapi.client {
|
|
|
2929
2730
|
numericRestricts?: GoogleCloudAiplatformV1beta1IndexDatapointNumericRestriction[];
|
|
2930
2731
|
/** Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses categorical tokens. See: https://cloud.google.com/vertex-ai/docs/matching-engine/filtering */
|
|
2931
2732
|
restricts?: GoogleCloudAiplatformV1beta1IndexDatapointRestriction[];
|
|
2733
|
+
/** Optional. Feature embedding vector for sparse index. */
|
|
2734
|
+
sparseEmbedding?: GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding;
|
|
2932
2735
|
}
|
|
2933
2736
|
interface GoogleCloudAiplatformV1beta1IndexDatapointCrowdingTag {
|
|
2934
2737
|
/** The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. */
|
|
@@ -2954,6 +2757,12 @@ declare namespace gapi.client {
|
|
|
2954
2757
|
/** The namespace of this restriction. e.g.: color. */
|
|
2955
2758
|
namespace?: string;
|
|
2956
2759
|
}
|
|
2760
|
+
interface GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding {
|
|
2761
|
+
/** Required. The list of indexes for the embedding values of the sparse vector. */
|
|
2762
|
+
dimensions?: string[];
|
|
2763
|
+
/** Required. The list of embedding values of the sparse vector. */
|
|
2764
|
+
values?: number[];
|
|
2765
|
+
}
|
|
2957
2766
|
interface GoogleCloudAiplatformV1beta1IndexEndpoint {
|
|
2958
2767
|
/** Output only. Timestamp when this IndexEndpoint was created. */
|
|
2959
2768
|
createTime?: string;
|
|
@@ -2995,6 +2804,8 @@ declare namespace gapi.client {
|
|
|
2995
2804
|
interface GoogleCloudAiplatformV1beta1IndexStats {
|
|
2996
2805
|
/** Output only. The number of shards in the Index. */
|
|
2997
2806
|
shardsCount?: number;
|
|
2807
|
+
/** Output only. The number of sparse vectors in the Index. */
|
|
2808
|
+
sparseVectorsCount?: string;
|
|
2998
2809
|
/** Output only. The number of dense vectors in the Index. */
|
|
2999
2810
|
vectorsCount?: string;
|
|
3000
2811
|
}
|
|
@@ -3435,6 +3246,8 @@ declare namespace gapi.client {
|
|
|
3435
3246
|
interface GoogleCloudAiplatformV1beta1MetadataStore {
|
|
3436
3247
|
/** Output only. Timestamp when this MetadataStore was created. */
|
|
3437
3248
|
createTime?: string;
|
|
3249
|
+
/** Optional. Dataplex integration settings. */
|
|
3250
|
+
dataplexConfig?: GoogleCloudAiplatformV1beta1MetadataStoreDataplexConfig;
|
|
3438
3251
|
/** Description of the MetadataStore. */
|
|
3439
3252
|
description?: string;
|
|
3440
3253
|
/** Customer-managed encryption key spec for a Metadata Store. If set, this Metadata Store and all sub-resources of this Metadata Store are secured using this key. */
|
|
@@ -3446,6 +3259,10 @@ declare namespace gapi.client {
|
|
|
3446
3259
|
/** Output only. Timestamp when this MetadataStore was last updated. */
|
|
3447
3260
|
updateTime?: string;
|
|
3448
3261
|
}
|
|
3262
|
+
interface GoogleCloudAiplatformV1beta1MetadataStoreDataplexConfig {
|
|
3263
|
+
/** Optional. Whether or not Data Lineage synchronization is enabled for Vertex Pipelines. */
|
|
3264
|
+
enabledPipelinesLineage?: boolean;
|
|
3265
|
+
}
|
|
3449
3266
|
interface GoogleCloudAiplatformV1beta1MetadataStoreMetadataStoreState {
|
|
3450
3267
|
/** The disk utilization of the MetadataStore in bytes. */
|
|
3451
3268
|
diskUtilizationBytes?: string;
|
|
@@ -4175,7 +3992,7 @@ declare namespace gapi.client {
|
|
|
4175
3992
|
doubleValue?: number;
|
|
4176
3993
|
}
|
|
4177
3994
|
interface GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPointTypedValueDistributionDataValue {
|
|
4178
|
-
/** tensorflow.metadata.v0.DatasetFeatureStatistics format. */
|
|
3995
|
+
/** Predictive monitoring drift distribution in `tensorflow.metadata.v0.DatasetFeatureStatistics` format. */
|
|
4179
3996
|
distribution?: any;
|
|
4180
3997
|
/** Distribution distance deviation from the current dataset's statistics to baseline dataset's statistics. * For categorical feature, the distribution distance is calculated by L-inifinity norm or Jensen–Shannon divergence. * For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. */
|
|
4181
3998
|
distributionDeviation?: number;
|
|
@@ -4441,8 +4258,6 @@ declare namespace gapi.client {
|
|
|
4441
4258
|
interface GoogleCloudAiplatformV1beta1NotebookExecutionJob {
|
|
4442
4259
|
/** Output only. Timestamp when this NotebookExecutionJob was created. */
|
|
4443
4260
|
createTime?: string;
|
|
4444
|
-
/** The custom compute configuration for an execution job. */
|
|
4445
|
-
customEnvironmentSpec?: GoogleCloudAiplatformV1beta1NotebookExecutionJobCustomEnvironmentSpec;
|
|
4446
4261
|
/** The Dataform Repository pointing to a single file notebook repository. */
|
|
4447
4262
|
dataformRepositorySource?: GoogleCloudAiplatformV1beta1NotebookExecutionJobDataformRepositorySource;
|
|
4448
4263
|
/** The contents of an input notebook file. */
|
|
@@ -4474,14 +4289,6 @@ declare namespace gapi.client {
|
|
|
4474
4289
|
/** Output only. Timestamp when this NotebookExecutionJob was most recently updated. */
|
|
4475
4290
|
updateTime?: string;
|
|
4476
4291
|
}
|
|
4477
|
-
interface GoogleCloudAiplatformV1beta1NotebookExecutionJobCustomEnvironmentSpec {
|
|
4478
|
-
/** The specification of a single machine for the execution job. */
|
|
4479
|
-
machineSpec?: GoogleCloudAiplatformV1beta1MachineSpec;
|
|
4480
|
-
/** The network configuration to use for the execution job. */
|
|
4481
|
-
networkSpec?: GoogleCloudAiplatformV1beta1NetworkSpec;
|
|
4482
|
-
/** The specification of a persistent disk to attach for the execution job. */
|
|
4483
|
-
persistentDiskSpec?: GoogleCloudAiplatformV1beta1PersistentDiskSpec;
|
|
4484
|
-
}
|
|
4485
4292
|
interface GoogleCloudAiplatformV1beta1NotebookExecutionJobDataformRepositorySource {
|
|
4486
4293
|
/** The commit SHA to read repository with. If unset, the file will be read at HEAD. */
|
|
4487
4294
|
commitSha?: string;
|
|
@@ -4519,10 +4326,14 @@ declare namespace gapi.client {
|
|
|
4519
4326
|
description?: string;
|
|
4520
4327
|
/** Required. The display name of the NotebookRuntime. The name can be up to 128 characters long and can consist of any UTF-8 characters. */
|
|
4521
4328
|
displayName?: string;
|
|
4329
|
+
/** Output only. Customer-managed encryption key spec for the notebook runtime. */
|
|
4330
|
+
encryptionSpec?: GoogleCloudAiplatformV1beta1EncryptionSpec;
|
|
4522
4331
|
/** Output only. Timestamp when this NotebookRuntime will be expired: 1. System Predefined NotebookRuntime: 24 hours after creation. After expiration, system predifined runtime will be deleted. 2. User created NotebookRuntime: 6 months after last upgrade. After expiration, user created runtime will be stopped and allowed for upgrade. */
|
|
4523
4332
|
expirationTime?: string;
|
|
4524
4333
|
/** Output only. The health state of the NotebookRuntime. */
|
|
4525
4334
|
healthState?: string;
|
|
4335
|
+
/** Output only. The idle shutdown configuration of the notebook runtime. */
|
|
4336
|
+
idleShutdownConfig?: GoogleCloudAiplatformV1beta1NotebookIdleShutdownConfig;
|
|
4526
4337
|
/** Output only. Whether NotebookRuntime is upgradable. */
|
|
4527
4338
|
isUpgradable?: boolean;
|
|
4528
4339
|
/** The labels with user-defined metadata to organize your NotebookRuntime. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one NotebookRuntime (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for NotebookRuntime: * "aiplatform.googleapis.com/notebook_runtime_gce_instance_id": output only, its value is the Compute Engine instance id. * "aiplatform.googleapis.com/colab_enterprise_entry_service": its value is either "bigquery" or "vertex"; if absent, it should be "vertex". This is to describe the entry service, either BigQuery or Vertex. */
|
|
@@ -4563,6 +4374,8 @@ declare namespace gapi.client {
|
|
|
4563
4374
|
description?: string;
|
|
4564
4375
|
/** Required. The display name of the NotebookRuntimeTemplate. The name can be up to 128 characters long and can consist of any UTF-8 characters. */
|
|
4565
4376
|
displayName?: string;
|
|
4377
|
+
/** Customer-managed encryption key spec for the notebook runtime. */
|
|
4378
|
+
encryptionSpec?: GoogleCloudAiplatformV1beta1EncryptionSpec;
|
|
4566
4379
|
/** Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */
|
|
4567
4380
|
etag?: string;
|
|
4568
4381
|
/** EUC configuration of the NotebookRuntimeTemplate. */
|
|
@@ -4917,7 +4730,7 @@ declare namespace gapi.client {
|
|
|
4917
4730
|
projectAllowlist?: string[];
|
|
4918
4731
|
}
|
|
4919
4732
|
interface GoogleCloudAiplatformV1beta1Probe {
|
|
4920
|
-
/**
|
|
4733
|
+
/** ExecAction probes the health of a container by executing a command. */
|
|
4921
4734
|
exec?: GoogleCloudAiplatformV1beta1ProbeExecAction;
|
|
4922
4735
|
/** How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Must be less than timeout_seconds. Maps to Kubernetes probe argument 'periodSeconds'. */
|
|
4923
4736
|
periodSeconds?: number;
|
|
@@ -5616,9 +5429,9 @@ declare namespace gapi.client {
|
|
|
5616
5429
|
fileOutputGcsBucket?: string;
|
|
5617
5430
|
}
|
|
5618
5431
|
interface GoogleCloudAiplatformV1beta1RuntimeConfigVertexAISearchRuntimeConfig {
|
|
5619
|
-
/** Vertex AI Search
|
|
5620
|
-
|
|
5621
|
-
/**
|
|
5432
|
+
/** Optional. Vertex AI Search engine ID. This is used to construct the search request. By setting this engine_id, API will construct the serving config using the default value to call search API for the user. The engine_id and serving_config_name cannot both be empty at the same time. */
|
|
5433
|
+
engineId?: string;
|
|
5434
|
+
/** Optional. Vertex AI Search serving config name. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}` */
|
|
5622
5435
|
servingConfigName?: string;
|
|
5623
5436
|
}
|
|
5624
5437
|
interface GoogleCloudAiplatformV1beta1SafetyInput {
|
|
@@ -7280,6 +7093,16 @@ declare namespace gapi.client {
|
|
|
7280
7093
|
/** A list of string values. */
|
|
7281
7094
|
values?: string[];
|
|
7282
7095
|
}
|
|
7096
|
+
interface GoogleCloudAiplatformV1beta1StructFieldValue {
|
|
7097
|
+
/** Name of the field in the struct feature. */
|
|
7098
|
+
name?: string;
|
|
7099
|
+
/** The value for this field. */
|
|
7100
|
+
value?: GoogleCloudAiplatformV1beta1FeatureValue;
|
|
7101
|
+
}
|
|
7102
|
+
interface GoogleCloudAiplatformV1beta1StructValue {
|
|
7103
|
+
/** A list of field values. */
|
|
7104
|
+
values?: GoogleCloudAiplatformV1beta1StructFieldValue[];
|
|
7105
|
+
}
|
|
7283
7106
|
interface GoogleCloudAiplatformV1beta1Study {
|
|
7284
7107
|
/** Output only. Time at which the study was created. */
|
|
7285
7108
|
createTime?: string;
|
|
@@ -7820,6 +7643,8 @@ declare namespace gapi.client {
|
|
|
7820
7643
|
interface GoogleCloudAiplatformV1beta1Tool {
|
|
7821
7644
|
/** Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. */
|
|
7822
7645
|
functionDeclarations?: GoogleCloudAiplatformV1beta1FunctionDeclaration[];
|
|
7646
|
+
/** Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that is powered by Google search. */
|
|
7647
|
+
googleSearchRetrieval?: any;
|
|
7823
7648
|
/** Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. */
|
|
7824
7649
|
retrieval?: GoogleCloudAiplatformV1beta1Retrieval;
|
|
7825
7650
|
}
|
|
@@ -8031,6 +7856,8 @@ declare namespace gapi.client {
|
|
|
8031
7856
|
createTime?: string;
|
|
8032
7857
|
/** Optional. The description of the TuningJob. */
|
|
8033
7858
|
description?: string;
|
|
7859
|
+
/** Tuning Spec for Distillation. */
|
|
7860
|
+
distillationSpec?: GoogleCloudAiplatformV1beta1DistillationSpec;
|
|
8034
7861
|
/** Customer-managed encryption key options for a TuningJob. If this is set, then all resources created by the TuningJob will be encrypted with the provided encryption key. */
|
|
8035
7862
|
encryptionSpec?: GoogleCloudAiplatformV1beta1EncryptionSpec;
|
|
8036
7863
|
/** Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`. */
|
|
@@ -8043,6 +7870,8 @@ declare namespace gapi.client {
|
|
|
8043
7870
|
labels?: {[P in string]: string};
|
|
8044
7871
|
/** Output only. Identifier. Resource name of a TuningJob. Format: `projects/{project}/locations/{location}/tuningJobs/{tuning_job}` */
|
|
8045
7872
|
name?: string;
|
|
7873
|
+
/** Output only. The resource name of the PipelineJob associated with the TuningJob. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`. */
|
|
7874
|
+
pipelineJob?: string;
|
|
8046
7875
|
/** Output only. Time when the TuningJob for the first time entered the `JOB_STATE_RUNNING` state. */
|
|
8047
7876
|
startTime?: string;
|
|
8048
7877
|
/** Output only. The detailed state of the job. */
|
|
@@ -8413,671 +8242,6 @@ declare namespace gapi.client {
|
|
|
8413
8242
|
/** The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar. */
|
|
8414
8243
|
units?: string;
|
|
8415
8244
|
}
|
|
8416
|
-
interface IntelligenceCloudAutomlXpsMetricEntry {
|
|
8417
|
-
/** For billing metrics that are using legacy sku's, set the legacy billing metric id here. This will be sent to Chemist as the "cloudbilling.googleapis.com/argentum_metric_id" label. Otherwise leave empty. */
|
|
8418
|
-
argentumMetricId?: string;
|
|
8419
|
-
/** A double value. */
|
|
8420
|
-
doubleValue?: number;
|
|
8421
|
-
/** A signed 64-bit integer value. */
|
|
8422
|
-
int64Value?: string;
|
|
8423
|
-
/** The metric name defined in the service configuration. */
|
|
8424
|
-
metricName?: string;
|
|
8425
|
-
/** Billing system labels for this (metric, value) pair. */
|
|
8426
|
-
systemLabels?: IntelligenceCloudAutomlXpsMetricEntryLabel[];
|
|
8427
|
-
}
|
|
8428
|
-
interface IntelligenceCloudAutomlXpsMetricEntryLabel {
|
|
8429
|
-
/** The name of the label. */
|
|
8430
|
-
labelName?: string;
|
|
8431
|
-
/** The value of the label. */
|
|
8432
|
-
labelValue?: string;
|
|
8433
|
-
}
|
|
8434
|
-
interface IntelligenceCloudAutomlXpsReportingMetrics {
|
|
8435
|
-
/** The effective time training used. If set, this is used for quota management and billing. Deprecated. AutoML BE doesn't use this. Don't set. */
|
|
8436
|
-
effectiveTrainingDuration?: string;
|
|
8437
|
-
/** One entry per metric name. The values must be aggregated per metric name. */
|
|
8438
|
-
metricEntries?: IntelligenceCloudAutomlXpsMetricEntry[];
|
|
8439
|
-
}
|
|
8440
|
-
interface LanguageLabsAidaTrustRecitationProtoDocAttribution {
|
|
8441
|
-
amarnaId?: string;
|
|
8442
|
-
arxivId?: string;
|
|
8443
|
-
author?: string;
|
|
8444
|
-
bibkey?: string;
|
|
8445
|
-
/** ID of the paper in bioarxiv like ddoi.org/{biorxiv_id} eg: https://doi.org/10.1101/343517 */
|
|
8446
|
-
biorxivId?: string;
|
|
8447
|
-
bookTitle?: string;
|
|
8448
|
-
/** The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set. */
|
|
8449
|
-
bookVolumeId?: string;
|
|
8450
|
-
category?: string;
|
|
8451
|
-
conversationId?: string;
|
|
8452
|
-
/** The dataset this document comes from. */
|
|
8453
|
-
dataset?: string;
|
|
8454
|
-
filepath?: string;
|
|
8455
|
-
geminiId?: string;
|
|
8456
|
-
gnewsArticleTitle?: string;
|
|
8457
|
-
goodallExampleId?: string;
|
|
8458
|
-
/** Whether the document is opted out. */
|
|
8459
|
-
isOptOut?: boolean;
|
|
8460
|
-
isPrompt?: boolean;
|
|
8461
|
-
lamdaExampleId?: string;
|
|
8462
|
-
license?: string;
|
|
8463
|
-
meenaConversationId?: string;
|
|
8464
|
-
/** Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii. */
|
|
8465
|
-
naturalLanguageCode?: string;
|
|
8466
|
-
/** True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available. */
|
|
8467
|
-
noAttribution?: boolean;
|
|
8468
|
-
podcastUtteranceId?: string;
|
|
8469
|
-
publicationDate?: GoogleTypeDate;
|
|
8470
|
-
/** This field is for opt-out experiment only, MUST never be used during actual production/serving. */
|
|
8471
|
-
qualityScoreExperimentOnly?: number;
|
|
8472
|
-
/** Github repository */
|
|
8473
|
-
repo?: string;
|
|
8474
|
-
/** URL of a webdoc */
|
|
8475
|
-
url?: string;
|
|
8476
|
-
volumeId?: string;
|
|
8477
|
-
/** Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset. */
|
|
8478
|
-
wikipediaArticleTitle?: string;
|
|
8479
|
-
/** The unique video id from Youtube. Example: AkoGsW52Ir0 */
|
|
8480
|
-
youtubeVideoId?: string;
|
|
8481
|
-
}
|
|
8482
|
-
interface LanguageLabsAidaTrustRecitationProtoRecitationResult {
|
|
8483
|
-
dynamicSegmentResults?: LanguageLabsAidaTrustRecitationProtoSegmentResult[];
|
|
8484
|
-
/** The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. When the given input is not found in any source, the recitation action will not be specified. */
|
|
8485
|
-
recitationAction?: string;
|
|
8486
|
-
trainingSegmentResults?: LanguageLabsAidaTrustRecitationProtoSegmentResult[];
|
|
8487
|
-
}
|
|
8488
|
-
interface LanguageLabsAidaTrustRecitationProtoSegmentResult {
|
|
8489
|
-
/** The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly. */
|
|
8490
|
-
attributionDataset?: string;
|
|
8491
|
-
/** human-friendly string that contains information from doc_attribution which could be shown by clients */
|
|
8492
|
-
displayAttributionMessage?: string;
|
|
8493
|
-
docAttribution?: LanguageLabsAidaTrustRecitationProtoDocAttribution;
|
|
8494
|
-
/** number of documents that contained this segment */
|
|
8495
|
-
docOccurrences?: number;
|
|
8496
|
-
endIndex?: number;
|
|
8497
|
-
/** The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options. */
|
|
8498
|
-
rawText?: string;
|
|
8499
|
-
segmentRecitationAction?: string;
|
|
8500
|
-
/** The category of the source dataset where the segment came from. This is more stable than Dataset. */
|
|
8501
|
-
sourceCategory?: string;
|
|
8502
|
-
/** The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units. */
|
|
8503
|
-
startIndex?: number;
|
|
8504
|
-
}
|
|
8505
|
-
interface LanguageLabsAidaTrustRecitationProtoStreamRecitationResult {
|
|
8506
|
-
/** The recitation result against the given dynamic data source. */
|
|
8507
|
-
dynamicSegmentResults?: LanguageLabsAidaTrustRecitationProtoSegmentResult[];
|
|
8508
|
-
/** Last index of input text fully checked for recitation in the entire streaming context. Would return `-1` if no Input was checked for recitation. */
|
|
8509
|
-
fullyCheckedTextIndex?: number;
|
|
8510
|
-
/** The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. */
|
|
8511
|
-
recitationAction?: string;
|
|
8512
|
-
/** The recitation result against model training data. */
|
|
8513
|
-
trainingSegmentResults?: LanguageLabsAidaTrustRecitationProtoSegmentResult[];
|
|
8514
|
-
}
|
|
8515
|
-
interface LearningGenaiRecitationContentChunkRecitationCheckResult {
|
|
8516
|
-
imageResult?: LearningGenaiRecitationImageRecitationCheckResult;
|
|
8517
|
-
textResult?: LearningGenaiRecitationRecitationResult;
|
|
8518
|
-
}
|
|
8519
|
-
interface LearningGenaiRecitationDocAttribution {
|
|
8520
|
-
amarnaId?: string;
|
|
8521
|
-
arxivId?: string;
|
|
8522
|
-
author?: string;
|
|
8523
|
-
bibkey?: string;
|
|
8524
|
-
/** ID of the paper in bioarxiv like ddoi.org/{biorxiv_id} eg: https://doi.org/10.1101/343517 */
|
|
8525
|
-
biorxivId?: string;
|
|
8526
|
-
bookTitle?: string;
|
|
8527
|
-
/** The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set. */
|
|
8528
|
-
bookVolumeId?: string;
|
|
8529
|
-
conversationId?: string;
|
|
8530
|
-
/** The dataset this document comes from. */
|
|
8531
|
-
dataset?: string;
|
|
8532
|
-
filepath?: string;
|
|
8533
|
-
geminiId?: string;
|
|
8534
|
-
gnewsArticleTitle?: string;
|
|
8535
|
-
goodallExampleId?: string;
|
|
8536
|
-
/** Whether the document is opted out. */
|
|
8537
|
-
isOptOut?: boolean;
|
|
8538
|
-
/** When true, this attribution came from the user's prompt. */
|
|
8539
|
-
isPrompt?: boolean;
|
|
8540
|
-
lamdaExampleId?: string;
|
|
8541
|
-
license?: string;
|
|
8542
|
-
meenaConversationId?: string;
|
|
8543
|
-
/** Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii. */
|
|
8544
|
-
naturalLanguageCode?: string;
|
|
8545
|
-
/** True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available. */
|
|
8546
|
-
noAttribution?: boolean;
|
|
8547
|
-
podcastUtteranceId?: string;
|
|
8548
|
-
publicationDate?: GoogleTypeDate;
|
|
8549
|
-
/** This field is for opt-out experiment only, MUST never be used during actual production/serving. */
|
|
8550
|
-
qualityScoreExperimentOnly?: number;
|
|
8551
|
-
/** Github repository */
|
|
8552
|
-
repo?: string;
|
|
8553
|
-
/** URL of a webdoc */
|
|
8554
|
-
url?: string;
|
|
8555
|
-
volumeId?: string;
|
|
8556
|
-
/** Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset. */
|
|
8557
|
-
wikipediaArticleTitle?: string;
|
|
8558
|
-
youtubeVideoId?: string;
|
|
8559
|
-
}
|
|
8560
|
-
interface LearningGenaiRecitationImageDocAttribution {
|
|
8561
|
-
/** Unique ID of the image. */
|
|
8562
|
-
datasetName?: string;
|
|
8563
|
-
/** Doc ID to identify the image. These could be urls of images or amarna id. */
|
|
8564
|
-
stringDocids?: string;
|
|
8565
|
-
}
|
|
8566
|
-
interface LearningGenaiRecitationImageRecitationCheckResult {
|
|
8567
|
-
/** Only has NO_ACTION or BLOCK to start with. */
|
|
8568
|
-
recitationAction?: string;
|
|
8569
|
-
/** Images that are similar to the requested image. */
|
|
8570
|
-
recitedImages?: LearningGenaiRecitationImageRecitationCheckResultSimilarImage[];
|
|
8571
|
-
}
|
|
8572
|
-
interface LearningGenaiRecitationImageRecitationCheckResultSimilarImage {
|
|
8573
|
-
/** Attribution information about the image */
|
|
8574
|
-
docAttribution?: LearningGenaiRecitationImageDocAttribution;
|
|
8575
|
-
/** The memorization embedding model that returned this image */
|
|
8576
|
-
embeddingModel?: string;
|
|
8577
|
-
/** Image ID corresponding of the image corresponding to the score. `image_id` serves for debugging purposes and can't be used by clients to retrieve an image. */
|
|
8578
|
-
imageId?: string;
|
|
8579
|
-
/** Similarity score of requested image compared with image in training data. */
|
|
8580
|
-
scores?: number;
|
|
8581
|
-
}
|
|
8582
|
-
interface LearningGenaiRecitationMMRecitationCheckResult {
|
|
8583
|
-
chunkResults?: LearningGenaiRecitationContentChunkRecitationCheckResult[];
|
|
8584
|
-
/** Overall recommended recitation action for the content. */
|
|
8585
|
-
recitationAction?: string;
|
|
8586
|
-
}
|
|
8587
|
-
interface LearningGenaiRecitationRecitationResult {
|
|
8588
|
-
dynamicSegmentResults?: LearningGenaiRecitationSegmentResult[];
|
|
8589
|
-
/** The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. When the given input is not found in any source, the recitation action will be NO_ACTION. */
|
|
8590
|
-
recitationAction?: string;
|
|
8591
|
-
trainingSegmentResults?: LearningGenaiRecitationSegmentResult[];
|
|
8592
|
-
}
|
|
8593
|
-
interface LearningGenaiRecitationSegmentResult {
|
|
8594
|
-
/** The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly. */
|
|
8595
|
-
attributionDataset?: string;
|
|
8596
|
-
/** human-friendly string that contains information from doc_attribution which could be shown by clients */
|
|
8597
|
-
displayAttributionMessage?: string;
|
|
8598
|
-
docAttribution?: LearningGenaiRecitationDocAttribution;
|
|
8599
|
-
/** number of documents that contained this segment */
|
|
8600
|
-
docOccurrences?: number;
|
|
8601
|
-
endIndex?: number;
|
|
8602
|
-
/** The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options. */
|
|
8603
|
-
rawText?: string;
|
|
8604
|
-
segmentRecitationAction?: string;
|
|
8605
|
-
/** The category of the source dataset where the segment came from. This is more stable than Dataset. */
|
|
8606
|
-
sourceCategory?: string;
|
|
8607
|
-
/** The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units. */
|
|
8608
|
-
startIndex?: number;
|
|
8609
|
-
}
|
|
8610
|
-
interface LearningGenaiRootCalculationType {
|
|
8611
|
-
scoreType?: string;
|
|
8612
|
-
weights?: number;
|
|
8613
|
-
}
|
|
8614
|
-
interface LearningGenaiRootClassifierOutput {
|
|
8615
|
-
/** If set, this is the output of the first matching rule. */
|
|
8616
|
-
ruleOutput?: LearningGenaiRootRuleOutput;
|
|
8617
|
-
/** outputs of all matching rule. */
|
|
8618
|
-
ruleOutputs?: LearningGenaiRootRuleOutput[];
|
|
8619
|
-
/** The results of data_providers and metrics. */
|
|
8620
|
-
state?: LearningGenaiRootClassifierState;
|
|
8621
|
-
}
|
|
8622
|
-
interface LearningGenaiRootClassifierOutputSummary {
|
|
8623
|
-
metrics?: LearningGenaiRootMetricOutput[];
|
|
8624
|
-
/** Output of the first matching rule. */
|
|
8625
|
-
ruleOutput?: LearningGenaiRootRuleOutput;
|
|
8626
|
-
/** outputs of all matching rule. */
|
|
8627
|
-
ruleOutputs?: LearningGenaiRootRuleOutput[];
|
|
8628
|
-
}
|
|
8629
|
-
interface LearningGenaiRootClassifierState {
|
|
8630
|
-
dataProviderOutput?: LearningGenaiRootDataProviderOutput[];
|
|
8631
|
-
metricOutput?: LearningGenaiRootMetricOutput[];
|
|
8632
|
-
}
|
|
8633
|
-
interface LearningGenaiRootCodeyChatMetadata {
|
|
8634
|
-
/** Indicates the programming language of the code if the message is a code chunk. */
|
|
8635
|
-
codeLanguage?: string;
|
|
8636
|
-
}
|
|
8637
|
-
interface LearningGenaiRootCodeyCheckpoint {
|
|
8638
|
-
/** Metadata that describes what was truncated at this checkpoint. */
|
|
8639
|
-
codeyTruncatorMetadata?: LearningGenaiRootCodeyTruncatorMetadata;
|
|
8640
|
-
/** Current state of the sample after truncator. */
|
|
8641
|
-
currentSample?: string;
|
|
8642
|
-
/** Postprocessor run that yielded this checkpoint. */
|
|
8643
|
-
postInferenceStep?: string;
|
|
8644
|
-
}
|
|
8645
|
-
interface LearningGenaiRootCodeyCompletionMetadata {
|
|
8646
|
-
checkpoints?: LearningGenaiRootCodeyCheckpoint[];
|
|
8647
|
-
}
|
|
8648
|
-
interface LearningGenaiRootCodeyGenerationMetadata {
|
|
8649
|
-
/** Last state of the sample before getting dropped/returned. */
|
|
8650
|
-
output?: string;
|
|
8651
|
-
/** Last Codey postprocessing step for this sample before getting dropped/returned. */
|
|
8652
|
-
postInferenceStep?: string;
|
|
8653
|
-
}
|
|
8654
|
-
interface LearningGenaiRootCodeyOutput {
|
|
8655
|
-
codeyChatMetadata?: LearningGenaiRootCodeyChatMetadata;
|
|
8656
|
-
codeyCompletionMetadata?: LearningGenaiRootCodeyCompletionMetadata;
|
|
8657
|
-
codeyGenerationMetadata?: LearningGenaiRootCodeyGenerationMetadata;
|
|
8658
|
-
}
|
|
8659
|
-
interface LearningGenaiRootCodeyTruncatorMetadata {
|
|
8660
|
-
/** Index of the current sample that trims off truncated text. */
|
|
8661
|
-
cutoffIndex?: number;
|
|
8662
|
-
/** Text that was truncated at a specific checkpoint. */
|
|
8663
|
-
truncatedText?: string;
|
|
8664
|
-
}
|
|
8665
|
-
interface LearningGenaiRootControlDecodingConfigThreshold {
|
|
8666
|
-
policy?: string;
|
|
8667
|
-
scoreMax?: number;
|
|
8668
|
-
}
|
|
8669
|
-
interface LearningGenaiRootControlDecodingRecord {
|
|
8670
|
-
/** Prefixes feeded into scorer. */
|
|
8671
|
-
prefixes?: string;
|
|
8672
|
-
/** Per policy scores returned from Scorer. Expect to have the same number of scores as in `thresholds`. */
|
|
8673
|
-
scores?: LearningGenaiRootControlDecodingRecordPolicyScore[];
|
|
8674
|
-
/** Suffixes feeded into scorer. */
|
|
8675
|
-
suffiexes?: string;
|
|
8676
|
-
/** Per policy thresholds from user config. */
|
|
8677
|
-
thresholds?: LearningGenaiRootControlDecodingConfigThreshold[];
|
|
8678
|
-
}
|
|
8679
|
-
interface LearningGenaiRootControlDecodingRecordPolicyScore {
|
|
8680
|
-
policy?: string;
|
|
8681
|
-
score?: number;
|
|
8682
|
-
}
|
|
8683
|
-
interface LearningGenaiRootControlDecodingRecords {
|
|
8684
|
-
/** One ControlDecodingRecord record maps to one rewind. */
|
|
8685
|
-
records?: LearningGenaiRootControlDecodingRecord[];
|
|
8686
|
-
}
|
|
8687
|
-
interface LearningGenaiRootDataProviderOutput {
|
|
8688
|
-
name?: string;
|
|
8689
|
-
/** If set, this DataProvider failed and this is the error message. */
|
|
8690
|
-
status?: UtilStatusProto;
|
|
8691
|
-
}
|
|
8692
|
-
interface LearningGenaiRootFilterMetadata {
|
|
8693
|
-
/** Filter confidence. */
|
|
8694
|
-
confidence?: string;
|
|
8695
|
-
/** Debug info for the message. */
|
|
8696
|
-
debugInfo?: LearningGenaiRootFilterMetadataFilterDebugInfo;
|
|
8697
|
-
/** A fallback message chosen by the applied filter. */
|
|
8698
|
-
fallback?: string;
|
|
8699
|
-
/** Additional info for the filter. */
|
|
8700
|
-
info?: string;
|
|
8701
|
-
/** Name of the filter that triggered. */
|
|
8702
|
-
name?: string;
|
|
8703
|
-
/** Filter reason. */
|
|
8704
|
-
reason?: string;
|
|
8705
|
-
/** The input query or generated response that is getting filtered. */
|
|
8706
|
-
text?: string;
|
|
8707
|
-
}
|
|
8708
|
-
interface LearningGenaiRootFilterMetadataFilterDebugInfo {
|
|
8709
|
-
classifierOutput?: LearningGenaiRootClassifierOutput;
|
|
8710
|
-
defaultMetadata?: string;
|
|
8711
|
-
languageFilterResult?: LearningGenaiRootLanguageFilterResult;
|
|
8712
|
-
/** Safety filter output information for LLM Root RAI harm check. */
|
|
8713
|
-
raiOutput?: LearningGenaiRootRAIOutput;
|
|
8714
|
-
raiResult?: CloudAiNlLlmProtoServiceRaiResult;
|
|
8715
|
-
raiSignal?: CloudAiNlLlmProtoServiceRaiSignal;
|
|
8716
|
-
/** Number of rewinds by controlled decoding. */
|
|
8717
|
-
records?: LearningGenaiRootControlDecodingRecords;
|
|
8718
|
-
streamRecitationResult?: LanguageLabsAidaTrustRecitationProtoStreamRecitationResult;
|
|
8719
|
-
takedownResult?: LearningGenaiRootTakedownResult;
|
|
8720
|
-
toxicityResult?: LearningGenaiRootToxicityResult;
|
|
8721
|
-
}
|
|
8722
|
-
interface LearningGenaiRootGroundingMetadata {
|
|
8723
|
-
citations?: LearningGenaiRootGroundingMetadataCitation[];
|
|
8724
|
-
/** True if grounding is cancelled, for example, no facts being retrieved. */
|
|
8725
|
-
groundingCancelled?: boolean;
|
|
8726
|
-
searchQueries?: string[];
|
|
8727
|
-
}
|
|
8728
|
-
interface LearningGenaiRootGroundingMetadataCitation {
|
|
8729
|
-
/** Index in the prediction output where the citation ends (exclusive). Must be > start_index and <= len(output). */
|
|
8730
|
-
endIndex?: number;
|
|
8731
|
-
/** Index of the fact supporting this claim. Should be within the range of the `world_facts` in the GenerateResponse. */
|
|
8732
|
-
factIndex?: number;
|
|
8733
|
-
/** Confidence score of this entailment. Value is [0,1] with 1 is the most confidence. */
|
|
8734
|
-
score?: number;
|
|
8735
|
-
/** Index in the prediction output where the citation starts (inclusive). Must be >= 0 and < end_index. */
|
|
8736
|
-
startIndex?: number;
|
|
8737
|
-
}
|
|
8738
|
-
interface LearningGenaiRootHarm {
|
|
8739
|
-
/** Please do not use, this is still under development. */
|
|
8740
|
-
contextualDangerous?: boolean;
|
|
8741
|
-
csam?: boolean;
|
|
8742
|
-
fringe?: boolean;
|
|
8743
|
-
grailImageHarmType?: LearningGenaiRootHarmGrailImageHarmType;
|
|
8744
|
-
grailTextHarmType?: LearningGenaiRootHarmGrailTextHarmType;
|
|
8745
|
-
imageChild?: boolean;
|
|
8746
|
-
imageCsam?: boolean;
|
|
8747
|
-
imagePedo?: boolean;
|
|
8748
|
-
/** Image signals */
|
|
8749
|
-
imagePorn?: boolean;
|
|
8750
|
-
imageViolence?: boolean;
|
|
8751
|
-
pqc?: boolean;
|
|
8752
|
-
safetycat?: LearningGenaiRootHarmSafetyCatCategories;
|
|
8753
|
-
/** Spii Filter uses buckets http://google3/google/privacy/dlp/v2/storage.proto;l=77;rcl=584719820 to classify the input. LMRoot converts the bucket into double score. For example the score for "POSSIBLE" is 3 / 5 = 0.6 . */
|
|
8754
|
-
spii?: LearningGenaiRootHarmSpiiFilter;
|
|
8755
|
-
threshold?: number;
|
|
8756
|
-
videoFrameChild?: boolean;
|
|
8757
|
-
videoFrameCsam?: boolean;
|
|
8758
|
-
videoFramePedo?: boolean;
|
|
8759
|
-
/** Video frame signals */
|
|
8760
|
-
videoFramePorn?: boolean;
|
|
8761
|
-
videoFrameViolence?: boolean;
|
|
8762
|
-
}
|
|
8763
|
-
interface LearningGenaiRootHarmGrailImageHarmType {
|
|
8764
|
-
imageHarmType?: string[];
|
|
8765
|
-
}
|
|
8766
|
-
interface LearningGenaiRootHarmGrailTextHarmType {
|
|
8767
|
-
harmType?: string[];
|
|
8768
|
-
}
|
|
8769
|
-
interface LearningGenaiRootHarmSafetyCatCategories {
|
|
8770
|
-
categories?: string[];
|
|
8771
|
-
}
|
|
8772
|
-
interface LearningGenaiRootHarmSpiiFilter {
|
|
8773
|
-
usBankRoutingMicr?: boolean;
|
|
8774
|
-
usEmployerIdentificationNumber?: boolean;
|
|
8775
|
-
usSocialSecurityNumber?: boolean;
|
|
8776
|
-
}
|
|
8777
|
-
interface LearningGenaiRootInternalMetadata {
|
|
8778
|
-
scoredTokens?: LearningGenaiRootScoredToken[];
|
|
8779
|
-
}
|
|
8780
|
-
interface LearningGenaiRootLanguageFilterResult {
|
|
8781
|
-
/** False when query or response should be filtered out due to unsupported language. */
|
|
8782
|
-
allowed?: boolean;
|
|
8783
|
-
/** Language of the query or response. */
|
|
8784
|
-
detectedLanguage?: string;
|
|
8785
|
-
/** Probability of the language predicted as returned by LangID. */
|
|
8786
|
-
detectedLanguageProbability?: number;
|
|
8787
|
-
}
|
|
8788
|
-
interface LearningGenaiRootMetricOutput {
|
|
8789
|
-
debug?: string;
|
|
8790
|
-
/** Name of the metric. */
|
|
8791
|
-
name?: string;
|
|
8792
|
-
numericValue?: number;
|
|
8793
|
-
status?: UtilStatusProto;
|
|
8794
|
-
stringValue?: string;
|
|
8795
|
-
}
|
|
8796
|
-
interface LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata {
|
|
8797
|
-
/** Latency spent on fact retrievals. There might be multiple retrievals from different fact providers. */
|
|
8798
|
-
factRetrievalMillisecondsByProvider?: {[P in string]: string};
|
|
8799
|
-
/** Latency spent on prompt2query. The procedure generates a search-friendly query given the original prompt. */
|
|
8800
|
-
prompt2queryMilliseconds?: string;
|
|
8801
|
-
/** Latency if use GroundedGeneration service for the whole retrieval & augmentation. */
|
|
8802
|
-
retrievalAugmentMilliseconds?: string;
|
|
8803
|
-
}
|
|
8804
|
-
interface LearningGenaiRootRAIOutput {
|
|
8805
|
-
allowed?: boolean;
|
|
8806
|
-
harm?: LearningGenaiRootHarm;
|
|
8807
|
-
name?: string;
|
|
8808
|
-
score?: number;
|
|
8809
|
-
}
|
|
8810
|
-
interface LearningGenaiRootRegexTakedownResult {
|
|
8811
|
-
/** False when query or response should be taken down due to match with a blocked regex, true otherwise. */
|
|
8812
|
-
allowed?: boolean;
|
|
8813
|
-
/** Regex used to decide that query or response should be taken down. Empty when query or response is kept. */
|
|
8814
|
-
takedownRegex?: string;
|
|
8815
|
-
}
|
|
8816
|
-
interface LearningGenaiRootRequestMetrics {
|
|
8817
|
-
/** Metrics for audio samples in the request. */
|
|
8818
|
-
audioMetrics?: LearningGenaiRootRequestMetricsAudioMetrics;
|
|
8819
|
-
/** Metrics for image samples in the request. */
|
|
8820
|
-
imageMetrics?: LearningGenaiRootRequestMetricsImageMetrics;
|
|
8821
|
-
/** Number of text tokens extracted from the request. */
|
|
8822
|
-
textTokenCount?: number;
|
|
8823
|
-
/** Total number of tokens in the request. */
|
|
8824
|
-
totalTokenCount?: number;
|
|
8825
|
-
/** Metrics for video samples in the request. */
|
|
8826
|
-
videoMetrics?: LearningGenaiRootRequestMetricsVideoMetrics;
|
|
8827
|
-
}
|
|
8828
|
-
interface LearningGenaiRootRequestMetricsAudioMetrics {
|
|
8829
|
-
/** Duration of the audio sample in seconds. */
|
|
8830
|
-
audioDuration?: string;
|
|
8831
|
-
/** Number of tokens derived directly from audio data. */
|
|
8832
|
-
audioTokenCount?: number;
|
|
8833
|
-
/** Number of audio frames in the audio. */
|
|
8834
|
-
numAudioFrames?: number;
|
|
8835
|
-
}
|
|
8836
|
-
interface LearningGenaiRootRequestMetricsImageMetrics {
|
|
8837
|
-
/** Number of tokens extracted from image bytes. */
|
|
8838
|
-
imageTokenCount?: number;
|
|
8839
|
-
/** Number of images in the request. */
|
|
8840
|
-
numImages?: number;
|
|
8841
|
-
}
|
|
8842
|
-
interface LearningGenaiRootRequestMetricsVideoMetrics {
|
|
8843
|
-
/** Metrics associated with audio sample in the video. */
|
|
8844
|
-
audioSample?: LearningGenaiRootRequestMetricsAudioMetrics;
|
|
8845
|
-
/** Number of video frames in the video. */
|
|
8846
|
-
numVideoFrames?: number;
|
|
8847
|
-
/** Duration of the video sample in seconds. */
|
|
8848
|
-
videoDuration?: string;
|
|
8849
|
-
/** Number of tokens extracted from video frames. */
|
|
8850
|
-
videoFramesTokenCount?: number;
|
|
8851
|
-
}
|
|
8852
|
-
interface LearningGenaiRootRequestResponseTakedownResult {
|
|
8853
|
-
/** False when response has to be taken down per above config. */
|
|
8854
|
-
allowed?: boolean;
|
|
8855
|
-
/** Regex used to match the request. */
|
|
8856
|
-
requestTakedownRegex?: string;
|
|
8857
|
-
/** Regex used to decide that response should be taken down. Empty when response is kept. */
|
|
8858
|
-
responseTakedownRegex?: string;
|
|
8859
|
-
}
|
|
8860
|
-
interface LearningGenaiRootRoutingDecision {
|
|
8861
|
-
metadata?: LearningGenaiRootRoutingDecisionMetadata;
|
|
8862
|
-
/** The selected model to route traffic to. */
|
|
8863
|
-
modelConfigId?: string;
|
|
8864
|
-
}
|
|
8865
|
-
interface LearningGenaiRootRoutingDecisionMetadata {
|
|
8866
|
-
scoreBasedRoutingMetadata?: LearningGenaiRootRoutingDecisionMetadataScoreBased;
|
|
8867
|
-
tokenLengthBasedRoutingMetadata?: LearningGenaiRootRoutingDecisionMetadataTokenLengthBased;
|
|
8868
|
-
}
|
|
8869
|
-
interface LearningGenaiRootRoutingDecisionMetadataScoreBased {
|
|
8870
|
-
/** The rule that was matched. */
|
|
8871
|
-
matchedRule?: LearningGenaiRootScoreBasedRoutingConfigRule;
|
|
8872
|
-
/** The score that was generated by the router i.e. the model. */
|
|
8873
|
-
score?: LearningGenaiRootScore;
|
|
8874
|
-
/** No rules were matched & therefore used the default fallback. */
|
|
8875
|
-
usedDefaultFallback?: boolean;
|
|
8876
|
-
}
|
|
8877
|
-
interface LearningGenaiRootRoutingDecisionMetadataTokenLengthBased {
|
|
8878
|
-
modelInputTokenMetadata?: LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata[];
|
|
8879
|
-
modelMaxTokenMetadata?: LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata[];
|
|
8880
|
-
}
|
|
8881
|
-
interface LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata {
|
|
8882
|
-
/** The length computed by backends using the formatter & tokenizer specific to the model */
|
|
8883
|
-
computedInputTokenLength?: number;
|
|
8884
|
-
modelId?: string;
|
|
8885
|
-
/** If true, the model was selected as a fallback, since no model met requirements. */
|
|
8886
|
-
pickedAsFallback?: boolean;
|
|
8887
|
-
/** If true, the model was selected since it met the requriements. */
|
|
8888
|
-
selected?: boolean;
|
|
8889
|
-
}
|
|
8890
|
-
interface LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata {
|
|
8891
|
-
maxNumInputTokens?: number;
|
|
8892
|
-
maxNumOutputTokens?: number;
|
|
8893
|
-
modelId?: string;
|
|
8894
|
-
}
|
|
8895
|
-
interface LearningGenaiRootRuleOutput {
|
|
8896
|
-
decision?: string;
|
|
8897
|
-
name?: string;
|
|
8898
|
-
}
|
|
8899
|
-
interface LearningGenaiRootScore {
|
|
8900
|
-
calculationType?: LearningGenaiRootCalculationType;
|
|
8901
|
-
/** The internal_metadata is intended to be used by internal processors and will be cleared before returns. */
|
|
8902
|
-
internalMetadata?: LearningGenaiRootInternalMetadata;
|
|
8903
|
-
thresholdType?: LearningGenaiRootThresholdType;
|
|
8904
|
-
/** Top candidate tokens and log probabilities at each decoding step. */
|
|
8905
|
-
tokensAndLogprobPerDecodingStep?: LearningGenaiRootTokensAndLogProbPerDecodingStep;
|
|
8906
|
-
value?: number;
|
|
8907
|
-
}
|
|
8908
|
-
interface LearningGenaiRootScoreBasedRoutingConfigRule {
|
|
8909
|
-
/** NOTE: Hardest examples have smaller values in their routing scores. */
|
|
8910
|
-
equalOrGreaterThan?: LearningGenaiRootScore;
|
|
8911
|
-
lessThan?: LearningGenaiRootScore;
|
|
8912
|
-
/** This model_config_id points to ModelConfig::id which allows us to find the ModelConfig to route to. This is part of the banks specified in the ModelBankConfig. */
|
|
8913
|
-
modelConfigId?: string;
|
|
8914
|
-
}
|
|
8915
|
-
interface LearningGenaiRootScoredSimilarityTakedownPhrase {
|
|
8916
|
-
phrase?: LearningGenaiRootSimilarityTakedownPhrase;
|
|
8917
|
-
similarityScore?: number;
|
|
8918
|
-
}
|
|
8919
|
-
interface LearningGenaiRootScoredToken {
|
|
8920
|
-
/** Each end_token_score is a logprob for how well the completion would end at a particular token. See http://google3/labs/language/aida/config/proto/model_config.proto;l=376;rcl=573039459 */
|
|
8921
|
-
endTokenScore?: number;
|
|
8922
|
-
/** Each score is the logprob for the token in model response. */
|
|
8923
|
-
score?: number;
|
|
8924
|
-
token?: string;
|
|
8925
|
-
}
|
|
8926
|
-
interface LearningGenaiRootSimilarityTakedownPhrase {
|
|
8927
|
-
blockedPhrase?: string;
|
|
8928
|
-
}
|
|
8929
|
-
interface LearningGenaiRootSimilarityTakedownResult {
|
|
8930
|
-
/** False when query or response should be taken down by any of the takedown rules, true otherwise. */
|
|
8931
|
-
allowed?: boolean;
|
|
8932
|
-
/** List of similar phrases with score. Set only if allowed=false. */
|
|
8933
|
-
scoredPhrases?: LearningGenaiRootScoredSimilarityTakedownPhrase[];
|
|
8934
|
-
}
|
|
8935
|
-
interface LearningGenaiRootTakedownResult {
|
|
8936
|
-
/** False when query or response should be taken down by any of the takedown rules, true otherwise. */
|
|
8937
|
-
allowed?: boolean;
|
|
8938
|
-
regexTakedownResult?: LearningGenaiRootRegexTakedownResult;
|
|
8939
|
-
requestResponseTakedownResult?: LearningGenaiRootRequestResponseTakedownResult;
|
|
8940
|
-
similarityTakedownResult?: LearningGenaiRootSimilarityTakedownResult;
|
|
8941
|
-
}
|
|
8942
|
-
interface LearningGenaiRootThresholdType {
|
|
8943
|
-
scoreType?: string;
|
|
8944
|
-
threshold?: number;
|
|
8945
|
-
}
|
|
8946
|
-
interface LearningGenaiRootTokensAndLogProbPerDecodingStep {
|
|
8947
|
-
/** Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates. */
|
|
8948
|
-
chosenCandidates?: LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate[];
|
|
8949
|
-
/** Length = total number of decoding steps. */
|
|
8950
|
-
topCandidates?: LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates[];
|
|
8951
|
-
}
|
|
8952
|
-
interface LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate {
|
|
8953
|
-
/** The candidate's log probability. */
|
|
8954
|
-
logProbability?: number;
|
|
8955
|
-
/** The candidate’s token value. */
|
|
8956
|
-
token?: string;
|
|
8957
|
-
}
|
|
8958
|
-
interface LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates {
|
|
8959
|
-
/** Sorted by log probability in descending order. */
|
|
8960
|
-
candidates?: LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate[];
|
|
8961
|
-
}
|
|
8962
|
-
interface LearningGenaiRootToxicityResult {
|
|
8963
|
-
signals?: LearningGenaiRootToxicitySignal[];
|
|
8964
|
-
}
|
|
8965
|
-
interface LearningGenaiRootToxicitySignal {
|
|
8966
|
-
allowed?: boolean;
|
|
8967
|
-
label?: string;
|
|
8968
|
-
score?: number;
|
|
8969
|
-
}
|
|
8970
|
-
interface LearningGenaiRootTranslationRequestInfo {
|
|
8971
|
-
/** The ISO-639 language code of source text in the initial request, detected automatically, if no source language was passed within the initial request. If the source language was passed, auto-detection of the language does not occur and this field is empty. */
|
|
8972
|
-
detectedLanguageCodes?: string[];
|
|
8973
|
-
/** The sum of the size of all the contents in the request. */
|
|
8974
|
-
totalContentSize?: string;
|
|
8975
|
-
}
|
|
8976
|
-
interface LearningServingLlmAtlasOutputMetadata {
|
|
8977
|
-
requestTopic?: string;
|
|
8978
|
-
source?: string;
|
|
8979
|
-
}
|
|
8980
|
-
interface LearningServingLlmMessageMetadata {
|
|
8981
|
-
atlasMetadata?: LearningServingLlmAtlasOutputMetadata;
|
|
8982
|
-
/** Summary of classifier output. We attach this to all messages regardless of whether classification rules triggered or not. */
|
|
8983
|
-
classifierSummary?: LearningGenaiRootClassifierOutputSummary;
|
|
8984
|
-
/** Contains metadata related to Codey Processors. */
|
|
8985
|
-
codeyOutput?: LearningGenaiRootCodeyOutput;
|
|
8986
|
-
currentStreamTextLength?: number;
|
|
8987
|
-
/** Whether the corresponding message has been deleted. */
|
|
8988
|
-
deleted?: boolean;
|
|
8989
|
-
/** Metadata for filters that triggered. */
|
|
8990
|
-
filterMeta?: LearningGenaiRootFilterMetadata[];
|
|
8991
|
-
/** This score is finally used for ranking the message. This will be same as the score present in `Message.score` field. */
|
|
8992
|
-
finalMessageScore?: LearningGenaiRootScore;
|
|
8993
|
-
/** NOT YET IMPLEMENTED. */
|
|
8994
|
-
finishReason?: string;
|
|
8995
|
-
groundingMetadata?: LearningGenaiRootGroundingMetadata;
|
|
8996
|
-
/** Applies to streaming response message only. Whether the message is a code. */
|
|
8997
|
-
isCode?: boolean;
|
|
8998
|
-
/** Applies to Response message only. Indicates whether the message is a fallback and the response would have otherwise been empty. */
|
|
8999
|
-
isFallback?: boolean;
|
|
9000
|
-
/** Result from nlp_saft DetectLanguage method. Currently the predicted language code and language probability is used. */
|
|
9001
|
-
langidResult?: NlpSaftLangIdResult;
|
|
9002
|
-
/** Detected language. */
|
|
9003
|
-
language?: string;
|
|
9004
|
-
/** The LM prefix used to generate this response. */
|
|
9005
|
-
lmPrefix?: string;
|
|
9006
|
-
/** FOR LMROOT INTERNAL USE ONLY. Externally, use learning.genai.root.RequestMetadata.RequestMetrics. Request metrics per modality including token count, duration, num_frames. */
|
|
9007
|
-
lmrootInternalRequestMetrics?: LearningGenaiRootRequestMetrics;
|
|
9008
|
-
/** Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked. */
|
|
9009
|
-
mmRecitationResult?: LearningGenaiRecitationMMRecitationCheckResult;
|
|
9010
|
-
/** Number of Controlled Decoding rewind and repeats that have happened for this response. */
|
|
9011
|
-
numRewinds?: number;
|
|
9012
|
-
/** The original text generated by LLM. This is the raw output for debugging purposes. */
|
|
9013
|
-
originalText?: string;
|
|
9014
|
-
/** Number of tokens decoded by the model as part of a stream. This count may be different from `per_stream_returned_token_count` which, is counted after any response rewriting or truncation. Applies to streaming response only. */
|
|
9015
|
-
perStreamDecodedTokenCount?: number;
|
|
9016
|
-
/** Number of tokens returned per stream in a response candidate after any response rewriting or truncation. Applies to streaming response only. Applies to Gemini models only. */
|
|
9017
|
-
perStreamReturnedTokenCount?: number;
|
|
9018
|
-
/** Results of running RAI on the query or this response candidate. One output per rai_config. It will be populated regardless of whether the threshold is exceeded or not. */
|
|
9019
|
-
raiOutputs?: LearningGenaiRootRAIOutput[];
|
|
9020
|
-
/** Recitation Results. It will be populated as long as Recitation processing is enabled, regardless of recitation outcome. */
|
|
9021
|
-
recitationResult?: LearningGenaiRecitationRecitationResult;
|
|
9022
|
-
/** All the different scores for a message are logged here. */
|
|
9023
|
-
scores?: LearningGenaiRootScore[];
|
|
9024
|
-
/** Whether the response is terminated during streaming return. Only used for streaming requests. */
|
|
9025
|
-
streamTerminated?: boolean;
|
|
9026
|
-
/** Total tokens decoded so far per response_candidate. For streaming: Count of all the tokens decoded so far (aggregated count). For unary: Count of all the tokens decoded per response_candidate. */
|
|
9027
|
-
totalDecodedTokenCount?: number;
|
|
9028
|
-
/** Total number of tokens returned in a response candidate. For streaming, it is the aggregated count (i.e. total so far) Applies to Gemini models only. */
|
|
9029
|
-
totalReturnedTokenCount?: number;
|
|
9030
|
-
/** Translated user-prompt used for RAI post processing. This is for internal processing only. We will translate in pre-processor and pass the translated text to the post processor using this field. It will be empty if non of the signals requested need translation. */
|
|
9031
|
-
translatedUserPrompts?: string[];
|
|
9032
|
-
/** The metadata from Vertex SafetyCat processors */
|
|
9033
|
-
vertexRaiResult?: CloudAiNlLlmProtoServiceRaiResult;
|
|
9034
|
-
}
|
|
9035
|
-
interface NlpSaftLangIdLocalesResult {
|
|
9036
|
-
/** List of locales in which the text would be considered acceptable. Sorted in descending order according to each locale's respective likelihood. For example, if a Portuguese text is acceptable in both Brazil and Portugal, but is more strongly associated with Brazil, then the predictions would be ["pt-BR", "pt-PT"], in that order. May be empty, indicating that the model did not predict any acceptable locales. */
|
|
9037
|
-
predictions?: NlpSaftLangIdLocalesResultLocale[];
|
|
9038
|
-
}
|
|
9039
|
-
interface NlpSaftLangIdLocalesResultLocale {
|
|
9040
|
-
/** A BCP 47 language code that includes region information. For example, "pt-BR" or "pt-PT". This field will always be populated. */
|
|
9041
|
-
languageCode?: string;
|
|
9042
|
-
}
|
|
9043
|
-
interface NlpSaftLangIdResult {
|
|
9044
|
-
/** The version of the model used to create these annotations. */
|
|
9045
|
-
modelVersion?: string;
|
|
9046
|
-
/** This field stores the n-best list of possible BCP 47 language code strings for a given input sorted in descending order according to each code's respective probability. */
|
|
9047
|
-
predictions?: NlpSaftLanguageSpan[];
|
|
9048
|
-
/** This field stores language predictions of subspans of the input, when available. Each LanguageSpanSequence is a sequence of LanguageSpans. A particular sequence of LanguageSpans has an associated probability, and need not necessarily cover the entire input. If no language could be predicted for any span, then this field may be empty. */
|
|
9049
|
-
spanPredictions?: NlpSaftLanguageSpanSequence[];
|
|
9050
|
-
}
|
|
9051
|
-
interface NlpSaftLanguageSpan {
|
|
9052
|
-
end?: number;
|
|
9053
|
-
/** A BCP 47 language code for this span. */
|
|
9054
|
-
languageCode?: string;
|
|
9055
|
-
/** Optional field containing any information that was predicted about the specific locale(s) of the span. */
|
|
9056
|
-
locales?: NlpSaftLangIdLocalesResult;
|
|
9057
|
-
/** A probability associated with this prediction. */
|
|
9058
|
-
probability?: number;
|
|
9059
|
-
/** Start and end byte offsets, inclusive, within the given input string. A value of -1 implies that this field is not set. Both fields must either be set with a nonnegative value or both are unset. If both are unset then this LanguageSpan applies to the entire input. */
|
|
9060
|
-
start?: number;
|
|
9061
|
-
}
|
|
9062
|
-
interface NlpSaftLanguageSpanSequence {
|
|
9063
|
-
/** A sequence of LanguageSpan objects, each assigning a language to a subspan of the input. */
|
|
9064
|
-
languageSpans?: NlpSaftLanguageSpan[];
|
|
9065
|
-
/** The probability of this sequence of LanguageSpans. */
|
|
9066
|
-
probability?: number;
|
|
9067
|
-
}
|
|
9068
|
-
interface Proto2BridgeMessageSet {}
|
|
9069
|
-
interface UtilStatusProto {
|
|
9070
|
-
/** The canonical error code (see codes.proto) that most closely corresponds to this status. This may be missing, and in the common case of the generic space, it definitely will be. */
|
|
9071
|
-
canonicalCode?: number;
|
|
9072
|
-
/** Numeric code drawn from the space specified below. Often, this is the canonical error space, and code is drawn from google3/util/task/codes.proto */
|
|
9073
|
-
code?: number;
|
|
9074
|
-
/** Detail message */
|
|
9075
|
-
message?: string;
|
|
9076
|
-
/** message_set associates an arbitrary proto message with the status. */
|
|
9077
|
-
messageSet?: any;
|
|
9078
|
-
/** The following are usually only present when code != 0 Space to which this status belongs */
|
|
9079
|
-
space?: string;
|
|
9080
|
-
}
|
|
9081
8245
|
interface MediaResource {
|
|
9082
8246
|
/** Upload a file into a RagCorpus. */
|
|
9083
8247
|
upload(request: {
|
|
@@ -12355,6 +11519,66 @@ declare namespace gapi.client {
|
|
|
12355
11519
|
interface EdgeDevicesResource {
|
|
12356
11520
|
operations: OperationsResource;
|
|
12357
11521
|
}
|
|
11522
|
+
interface ChatResource {
|
|
11523
|
+
/** Exposes an OpenAI-compatible endpoint for chat completions. */
|
|
11524
|
+
completions(request: {
|
|
11525
|
+
/** V1 error format. */
|
|
11526
|
+
'$.xgafv'?: string;
|
|
11527
|
+
/** OAuth access token. */
|
|
11528
|
+
access_token?: string;
|
|
11529
|
+
/** Data format for response. */
|
|
11530
|
+
alt?: string;
|
|
11531
|
+
/** JSONP */
|
|
11532
|
+
callback?: string;
|
|
11533
|
+
/** Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/openapi` */
|
|
11534
|
+
endpoint: string;
|
|
11535
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
11536
|
+
fields?: string;
|
|
11537
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
11538
|
+
key?: string;
|
|
11539
|
+
/** OAuth 2.0 token for the current user. */
|
|
11540
|
+
oauth_token?: string;
|
|
11541
|
+
/** Returns response with indentations and line breaks. */
|
|
11542
|
+
prettyPrint?: boolean;
|
|
11543
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
11544
|
+
quotaUser?: string;
|
|
11545
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
11546
|
+
upload_protocol?: string;
|
|
11547
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
11548
|
+
uploadType?: string;
|
|
11549
|
+
/** Request body */
|
|
11550
|
+
resource: GoogleApiHttpBody;
|
|
11551
|
+
}): Request<GoogleApiHttpBody>;
|
|
11552
|
+
completions(
|
|
11553
|
+
request: {
|
|
11554
|
+
/** V1 error format. */
|
|
11555
|
+
'$.xgafv'?: string;
|
|
11556
|
+
/** OAuth access token. */
|
|
11557
|
+
access_token?: string;
|
|
11558
|
+
/** Data format for response. */
|
|
11559
|
+
alt?: string;
|
|
11560
|
+
/** JSONP */
|
|
11561
|
+
callback?: string;
|
|
11562
|
+
/** Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/openapi` */
|
|
11563
|
+
endpoint: string;
|
|
11564
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
11565
|
+
fields?: string;
|
|
11566
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
11567
|
+
key?: string;
|
|
11568
|
+
/** OAuth 2.0 token for the current user. */
|
|
11569
|
+
oauth_token?: string;
|
|
11570
|
+
/** Returns response with indentations and line breaks. */
|
|
11571
|
+
prettyPrint?: boolean;
|
|
11572
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
11573
|
+
quotaUser?: string;
|
|
11574
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
11575
|
+
upload_protocol?: string;
|
|
11576
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
11577
|
+
uploadType?: string;
|
|
11578
|
+
},
|
|
11579
|
+
body: GoogleApiHttpBody
|
|
11580
|
+
): Request<GoogleApiHttpBody>;
|
|
11581
|
+
}
|
|
12358
11582
|
interface OperationsResource {
|
|
12359
11583
|
/** Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`. */
|
|
12360
11584
|
cancel(request?: {
|
|
@@ -13556,6 +12780,7 @@ declare namespace gapi.client {
|
|
|
13556
12780
|
},
|
|
13557
12781
|
body: GoogleCloudAiplatformV1beta1UndeployModelRequest
|
|
13558
12782
|
): Request<GoogleLongrunningOperation>;
|
|
12783
|
+
chat: ChatResource;
|
|
13559
12784
|
operations: OperationsResource;
|
|
13560
12785
|
}
|
|
13561
12786
|
interface OperationsResource {
|
|
@@ -27550,6 +26775,68 @@ declare namespace gapi.client {
|
|
|
27550
26775
|
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
27551
26776
|
uploadType?: string;
|
|
27552
26777
|
}): Request<GoogleCloudAiplatformV1beta1ListReasoningEnginesResponse>;
|
|
26778
|
+
/** Updates a reasoning engine. */
|
|
26779
|
+
patch(request: {
|
|
26780
|
+
/** V1 error format. */
|
|
26781
|
+
'$.xgafv'?: string;
|
|
26782
|
+
/** OAuth access token. */
|
|
26783
|
+
access_token?: string;
|
|
26784
|
+
/** Data format for response. */
|
|
26785
|
+
alt?: string;
|
|
26786
|
+
/** JSONP */
|
|
26787
|
+
callback?: string;
|
|
26788
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
26789
|
+
fields?: string;
|
|
26790
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
26791
|
+
key?: string;
|
|
26792
|
+
/** Identifier. The resource name of the ReasoningEngine. */
|
|
26793
|
+
name: string;
|
|
26794
|
+
/** OAuth 2.0 token for the current user. */
|
|
26795
|
+
oauth_token?: string;
|
|
26796
|
+
/** Returns response with indentations and line breaks. */
|
|
26797
|
+
prettyPrint?: boolean;
|
|
26798
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
26799
|
+
quotaUser?: string;
|
|
26800
|
+
/** Required. Mask specifying which fields to update. */
|
|
26801
|
+
updateMask?: string;
|
|
26802
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
26803
|
+
upload_protocol?: string;
|
|
26804
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
26805
|
+
uploadType?: string;
|
|
26806
|
+
/** Request body */
|
|
26807
|
+
resource: GoogleCloudAiplatformV1beta1ReasoningEngine;
|
|
26808
|
+
}): Request<GoogleLongrunningOperation>;
|
|
26809
|
+
patch(
|
|
26810
|
+
request: {
|
|
26811
|
+
/** V1 error format. */
|
|
26812
|
+
'$.xgafv'?: string;
|
|
26813
|
+
/** OAuth access token. */
|
|
26814
|
+
access_token?: string;
|
|
26815
|
+
/** Data format for response. */
|
|
26816
|
+
alt?: string;
|
|
26817
|
+
/** JSONP */
|
|
26818
|
+
callback?: string;
|
|
26819
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
26820
|
+
fields?: string;
|
|
26821
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
26822
|
+
key?: string;
|
|
26823
|
+
/** Identifier. The resource name of the ReasoningEngine. */
|
|
26824
|
+
name: string;
|
|
26825
|
+
/** OAuth 2.0 token for the current user. */
|
|
26826
|
+
oauth_token?: string;
|
|
26827
|
+
/** Returns response with indentations and line breaks. */
|
|
26828
|
+
prettyPrint?: boolean;
|
|
26829
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
26830
|
+
quotaUser?: string;
|
|
26831
|
+
/** Required. Mask specifying which fields to update. */
|
|
26832
|
+
updateMask?: string;
|
|
26833
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
26834
|
+
upload_protocol?: string;
|
|
26835
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
26836
|
+
uploadType?: string;
|
|
26837
|
+
},
|
|
26838
|
+
body: GoogleCloudAiplatformV1beta1ReasoningEngine
|
|
26839
|
+
): Request<GoogleLongrunningOperation>;
|
|
27553
26840
|
/** Queries using a reasoning engine. */
|
|
27554
26841
|
query(request: {
|
|
27555
26842
|
/** V1 error format. */
|