@maxim_mazurok/gapi.client.aiplatform-v1beta1 0.0.20240507 → 0.0.20240510

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.d.ts +19 -913
  2. package/package.json +1 -1
package/index.d.ts CHANGED
@@ -9,7 +9,7 @@
9
9
  // This file was generated by https://github.com/Maxim-Mazurok/google-api-typings-generator. Please do not edit it manually.
10
10
  // In case of any problems please post issue to https://github.com/Maxim-Mazurok/google-api-typings-generator
11
11
  // Generated from: https://aiplatform.googleapis.com/$discovery/rest?version=v1beta1
12
- // Revision: 20240507
12
+ // Revision: 20240510
13
13
 
14
14
  /// <reference types="gapi.client" />
15
15
 
@@ -28,10 +28,6 @@ declare namespace gapi.client {
28
28
  ): void;
29
29
 
30
30
  namespace aiplatform {
31
- interface CloudAiLargeModelsVisionEmbedVideoResponse {
32
- /** The embedding vector for the video. */
33
- videoEmbeddings?: any[];
34
- }
35
31
  interface CloudAiLargeModelsVisionFilteredText {
36
32
  /** Confidence level */
37
33
  category?: string;
@@ -80,10 +76,6 @@ declare namespace gapi.client {
80
76
  /** Video */
81
77
  video?: CloudAiLargeModelsVisionVideo;
82
78
  }
83
- interface CloudAiLargeModelsVisionMediaGenerateContentResponse {
84
- /** Response to the user's request. */
85
- response?: CloudAiNlLlmProtoServiceGenerateMultiModalResponse;
86
- }
87
79
  interface CloudAiLargeModelsVisionNamedBoundingBox {
88
80
  classes?: string[];
89
81
  entities?: string[];
@@ -99,22 +91,6 @@ declare namespace gapi.client {
99
91
  /** List of rai scores mapping to the rai categories. Rounded to 1 decimal place. */
100
92
  scores?: number[];
101
93
  }
102
- interface CloudAiLargeModelsVisionReasonVideoResponse {
103
- /** Generated text responses. The generated responses for different segments within the same video. */
104
- responses?: CloudAiLargeModelsVisionReasonVideoResponseTextResponse[];
105
- }
106
- interface CloudAiLargeModelsVisionReasonVideoResponseTextResponse {
107
- /** Partition of the caption's video in time. This field is intended for video captioning. To represent the start time and end time of the caption's video. */
108
- relativeTemporalPartition?: CloudAiLargeModelsVisionRelativeTemporalPartition;
109
- /** Text information */
110
- text?: string;
111
- }
112
- interface CloudAiLargeModelsVisionRelativeTemporalPartition {
113
- /** End time offset of the partition. */
114
- endOffset?: string;
115
- /** Start time offset of the partition. */
116
- startOffset?: string;
117
- }
118
94
  interface CloudAiLargeModelsVisionSemanticFilterResponse {
119
95
  /** Class labels of the bounding boxes that failed the semantic filtering. Bounding box coordinates. */
120
96
  namedBoundingBoxes?: CloudAiLargeModelsVisionNamedBoundingBox[];
@@ -127,229 +103,6 @@ declare namespace gapi.client {
127
103
  /** Raw bytes. */
128
104
  video?: string;
129
105
  }
130
- interface CloudAiNlLlmProtoServiceCandidate {
131
- /** Source attribution of the generated content. */
132
- citationMetadata?: CloudAiNlLlmProtoServiceCitationMetadata;
133
- /** Content of the candidate. */
134
- content?: CloudAiNlLlmProtoServiceContent;
135
- /** A string that describes the filtering behavior in more detail. Only filled when reason is set. */
136
- finishMessage?: string;
137
- /** The reason why the model stopped generating tokens. */
138
- finishReason?: string;
139
- /** Grounding metadata. Combine with the facts list from response to generate grounding citations for this choice. */
140
- groundingMetadata?: LearningGenaiRootGroundingMetadata;
141
- /** Index of the candidate. */
142
- index?: number;
143
- /** Safety ratings of the generated content. */
144
- safetyRatings?: CloudAiNlLlmProtoServiceSafetyRating[];
145
- }
146
- interface CloudAiNlLlmProtoServiceCitation {
147
- /** End index into the content. */
148
- endIndex?: number;
149
- /** License of the attribution. */
150
- license?: string;
151
- /** Publication date of the attribution. */
152
- publicationDate?: GoogleTypeDate;
153
- /** Start index into the content. */
154
- startIndex?: number;
155
- /** Title of the attribution. */
156
- title?: string;
157
- /** Url reference of the attribution. */
158
- uri?: string;
159
- }
160
- interface CloudAiNlLlmProtoServiceCitationMetadata {
161
- /** List of citations. */
162
- citations?: CloudAiNlLlmProtoServiceCitation[];
163
- }
164
- interface CloudAiNlLlmProtoServiceContent {
165
- /** If true, the content is from a cached content. */
166
- isCached?: boolean;
167
- /** The parts of the message. */
168
- parts?: CloudAiNlLlmProtoServicePart[];
169
- /** The role of the current conversation participant. */
170
- role?: string;
171
- }
172
- interface CloudAiNlLlmProtoServiceFact {
173
- /** Query that is used to retrieve this fact. */
174
- query?: string;
175
- /** If present, the summary/snippet of the fact. */
176
- summary?: string;
177
- /** If present, it refers to the title of this fact. */
178
- title?: string;
179
- /** If present, this URL links to the webpage of the fact. */
180
- url?: string;
181
- }
182
- interface CloudAiNlLlmProtoServiceFunctionCall {
183
- /** The function parameters and values in JSON format. */
184
- args?: {[P in string]: any};
185
- /** Required. The name of the function to call. */
186
- name?: string;
187
- }
188
- interface CloudAiNlLlmProtoServiceFunctionResponse {
189
- /** Required. The name of the function to call. */
190
- name?: string;
191
- /** Required. The function response in JSON object format. */
192
- response?: {[P in string]: any};
193
- }
194
- interface CloudAiNlLlmProtoServiceGenerateMultiModalResponse {
195
- /** Possible candidate responses to the conversation up until this point. */
196
- candidates?: CloudAiNlLlmProtoServiceCandidate[];
197
- /** Debug information containing message metadata. Clients should not consume this field, and this is only populated for Flow Runner path. */
198
- debugMetadata?: CloudAiNlLlmProtoServiceMessageMetadata;
199
- /** External facts retrieved for factuality/grounding. */
200
- facts?: CloudAiNlLlmProtoServiceFact[];
201
- /** Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations. */
202
- promptFeedback?: CloudAiNlLlmProtoServicePromptFeedback;
203
- /** Billable prediction metrics. */
204
- reportingMetrics?: IntelligenceCloudAutomlXpsReportingMetrics;
205
- /** Usage metadata about the response(s). */
206
- usageMetadata?: CloudAiNlLlmProtoServiceUsageMetadata;
207
- }
208
- interface CloudAiNlLlmProtoServiceMessageMetadata {
209
- /** Factuality-related debug metadata. */
210
- factualityDebugMetadata?: LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata;
211
- /** Filter metadata of the input messages. */
212
- inputFilterInfo?: LearningServingLlmMessageMetadata;
213
- /** This score is generated by the router model to decide which model to use */
214
- modelRoutingDecision?: LearningGenaiRootRoutingDecision;
215
- /** Filter metadata of the output messages. */
216
- outputFilterInfo?: LearningServingLlmMessageMetadata[];
217
- }
218
- interface CloudAiNlLlmProtoServicePart {
219
- /** Document metadata. The metadata should only be used by the Cloud LLM when supporting document mime types. It will only be populated when this image input part is converted from a document input part. */
220
- documentMetadata?: CloudAiNlLlmProtoServicePartDocumentMetadata;
221
- /** URI-based data. */
222
- fileData?: CloudAiNlLlmProtoServicePartFileData;
223
- /** Function call data. */
224
- functionCall?: CloudAiNlLlmProtoServiceFunctionCall;
225
- /** Function response data. */
226
- functionResponse?: CloudAiNlLlmProtoServiceFunctionResponse;
227
- /** Inline bytes data */
228
- inlineData?: CloudAiNlLlmProtoServicePartBlob;
229
- /** Metadata provides extra info for building the LM Root request. Note: High enough tag number for internal only fields. */
230
- lmRootMetadata?: CloudAiNlLlmProtoServicePartLMRootMetadata;
231
- /** Text input. */
232
- text?: string;
233
- /** Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. */
234
- videoMetadata?: CloudAiNlLlmProtoServicePartVideoMetadata;
235
- }
236
- interface CloudAiNlLlmProtoServicePartBlob {
237
- /** Inline data. */
238
- data?: string;
239
- /** The mime type corresponding to this input. */
240
- mimeType?: string;
241
- /** Original file data where the blob comes from. */
242
- originalFileData?: CloudAiNlLlmProtoServicePartFileData;
243
- }
244
- interface CloudAiNlLlmProtoServicePartDocumentMetadata {
245
- /** The original document blob. */
246
- originalDocumentBlob?: CloudAiNlLlmProtoServicePartBlob;
247
- /** The (1-indexed) page number of the image in the original document. The first page carries the original document content and mime type. */
248
- pageNumber?: number;
249
- }
250
- interface CloudAiNlLlmProtoServicePartFileData {
251
- /** Inline data. */
252
- fileUri?: string;
253
- /** The mime type corresponding to this input. */
254
- mimeType?: string;
255
- }
256
- interface CloudAiNlLlmProtoServicePartLMRootMetadata {
257
- /** Chunk id that will be used when mapping the part to the LM Root's chunk. */
258
- chunkId?: string;
259
- }
260
- interface CloudAiNlLlmProtoServicePartVideoMetadata {
261
- /** The end offset of the video. */
262
- endOffset?: string;
263
- /** The start offset of the video. */
264
- startOffset?: string;
265
- }
266
- interface CloudAiNlLlmProtoServicePromptFeedback {
267
- /** Blocked reason. */
268
- blockReason?: string;
269
- /** A readable block reason message. */
270
- blockReasonMessage?: string;
271
- /** Safety ratings. */
272
- safetyRatings?: CloudAiNlLlmProtoServiceSafetyRating[];
273
- }
274
- interface CloudAiNlLlmProtoServiceRaiResult {
275
- /** Recitation result from Aida recitation checker. */
276
- aidaRecitationResult?: LanguageLabsAidaTrustRecitationProtoRecitationResult;
277
- /** Use `triggered_blocklist`. */
278
- blocked?: boolean;
279
- /** The error codes indicate which RAI filters block the response. */
280
- errorCodes?: number[];
281
- /** Whether the text should be filtered and not shown to the end user. This is determined based on a combination of `triggered_recitation`, `triggered_blocklist`, `language_filter_result`, and `triggered_safety_filter`. */
282
- filtered?: boolean;
283
- /** Language filter result from SAFT LangId. */
284
- languageFilterResult?: LearningGenaiRootLanguageFilterResult;
285
- /** Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked. */
286
- mmRecitationResult?: LearningGenaiRecitationMMRecitationCheckResult;
287
- /** The RAI signals for the text. */
288
- raiSignals?: CloudAiNlLlmProtoServiceRaiSignal[];
289
- /** Translation request info during RAI for debugging purpose. Each TranslationRequestInfo corresponds to a request sent to the translation server. */
290
- translationRequestInfos?: LearningGenaiRootTranslationRequestInfo[];
291
- /** Whether the text triggered the blocklist. */
292
- triggeredBlocklist?: boolean;
293
- /** Whether the text should be blocked by the recitation result from Aida recitation checker. It is determined from aida_recitation_result. */
294
- triggeredRecitation?: boolean;
295
- /** Whether the text triggered the safety filter. Currently, this is due to CSAI triggering or one of four categories (derogatory, sexual, toxic, violent) having a score over the filter threshold. */
296
- triggeredSafetyFilter?: boolean;
297
- }
298
- interface CloudAiNlLlmProtoServiceRaiSignal {
299
- /** The confidence level for the RAI category. */
300
- confidence?: string;
301
- /** Whether the category is flagged as being present. Currently, this is set to true if score >= 0.5. */
302
- flagged?: boolean;
303
- /** The influential terms that could potentially block the response. */
304
- influentialTerms?: CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm[];
305
- /** The RAI category. */
306
- raiCategory?: string;
307
- /** The score for the category, in the range [0.0, 1.0]. */
308
- score?: number;
309
- }
310
- interface CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm {
311
- /** The beginning offset of the influential term. */
312
- beginOffset?: number;
313
- /** The confidence score of the influential term. */
314
- confidence?: number;
315
- /** The source of the influential term, prompt or response. */
316
- source?: string;
317
- /** The influential term. */
318
- term?: string;
319
- }
320
- interface CloudAiNlLlmProtoServiceSafetyRating {
321
- /** Indicates whether the content was filtered out because of this rating. */
322
- blocked?: boolean;
323
- /** Harm category. */
324
- category?: string;
325
- /** The influential terms that could potentially block the response. */
326
- influentialTerms?: CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm[];
327
- /** Harm probability levels in the content. */
328
- probability?: string;
329
- /** Harm probability score. */
330
- probabilityScore?: number;
331
- /** Harm severity levels in the content. */
332
- severity?: string;
333
- /** Harm severity score. */
334
- severityScore?: number;
335
- }
336
- interface CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm {
337
- /** The beginning offset of the influential term. */
338
- beginOffset?: number;
339
- /** The confidence score of the influential term. */
340
- confidence?: number;
341
- /** The source of the influential term, prompt or response. */
342
- source?: string;
343
- /** The influential term. */
344
- term?: string;
345
- }
346
- interface CloudAiNlLlmProtoServiceUsageMetadata {
347
- /** Number of tokens in the response(s). */
348
- candidatesTokenCount?: number;
349
- /** Number of tokens in the request. */
350
- promptTokenCount?: number;
351
- totalTokenCount?: number;
352
- }
353
106
  interface GoogleApiHttpBody {
354
107
  /** The HTTP Content-Type header value specifying the content type of the body. */
355
108
  contentType?: string;
@@ -2528,6 +2281,12 @@ declare namespace gapi.client {
2528
2281
  neighborCount?: number;
2529
2282
  /** Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. This field is the maximum number of matches with the same crowding tag. */
2530
2283
  perCrowdingAttributeNeighborCount?: number;
2284
+ /** Optional. Represents RRF algorithm that combines search results. */
2285
+ rrf?: GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF;
2286
+ }
2287
+ interface GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF {
2288
+ /** Required. Users can provide an alpha value to give more weight to dense vs sparse results. For example, if the alpha is 0, we only return sparse and if the alpha is 1, we only return dense. */
2289
+ alpha?: number;
2531
2290
  }
2532
2291
  interface GoogleCloudAiplatformV1beta1FindNeighborsResponse {
2533
2292
  /** The nearest neighbors of the query datapoints. */
@@ -2544,6 +2303,8 @@ declare namespace gapi.client {
2544
2303
  datapoint?: GoogleCloudAiplatformV1beta1IndexDatapoint;
2545
2304
  /** The distance between the neighbor and the dense embedding query. */
2546
2305
  distance?: number;
2306
+ /** The distance between the neighbor and the query sparse_embedding. */
2307
+ sparseDistance?: number;
2547
2308
  }
2548
2309
  interface GoogleCloudAiplatformV1beta1FluencyInput {
2549
2310
  /** Required. Fluency instance. */
@@ -2929,6 +2690,8 @@ declare namespace gapi.client {
2929
2690
  numericRestricts?: GoogleCloudAiplatformV1beta1IndexDatapointNumericRestriction[];
2930
2691
  /** Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses categorical tokens. See: https://cloud.google.com/vertex-ai/docs/matching-engine/filtering */
2931
2692
  restricts?: GoogleCloudAiplatformV1beta1IndexDatapointRestriction[];
2693
+ /** Optional. Feature embedding vector for sparse index. */
2694
+ sparseEmbedding?: GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding;
2932
2695
  }
2933
2696
  interface GoogleCloudAiplatformV1beta1IndexDatapointCrowdingTag {
2934
2697
  /** The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. */
@@ -2954,6 +2717,12 @@ declare namespace gapi.client {
2954
2717
  /** The namespace of this restriction. e.g.: color. */
2955
2718
  namespace?: string;
2956
2719
  }
2720
+ interface GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding {
2721
+ /** Required. The list of indexes for the embedding values of the sparse vector. */
2722
+ dimensions?: string[];
2723
+ /** Required. The list of embedding values of the sparse vector. */
2724
+ values?: number[];
2725
+ }
2957
2726
  interface GoogleCloudAiplatformV1beta1IndexEndpoint {
2958
2727
  /** Output only. Timestamp when this IndexEndpoint was created. */
2959
2728
  createTime?: string;
@@ -2995,6 +2764,8 @@ declare namespace gapi.client {
2995
2764
  interface GoogleCloudAiplatformV1beta1IndexStats {
2996
2765
  /** Output only. The number of shards in the Index. */
2997
2766
  shardsCount?: number;
2767
+ /** Output only. The number of sparse vectors in the Index. */
2768
+ sparseVectorsCount?: string;
2998
2769
  /** Output only. The number of dense vectors in the Index. */
2999
2770
  vectorsCount?: string;
3000
2771
  }
@@ -8413,671 +8184,6 @@ declare namespace gapi.client {
8413
8184
  /** The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar. */
8414
8185
  units?: string;
8415
8186
  }
8416
- interface IntelligenceCloudAutomlXpsMetricEntry {
8417
- /** For billing metrics that are using legacy sku's, set the legacy billing metric id here. This will be sent to Chemist as the "cloudbilling.googleapis.com/argentum_metric_id" label. Otherwise leave empty. */
8418
- argentumMetricId?: string;
8419
- /** A double value. */
8420
- doubleValue?: number;
8421
- /** A signed 64-bit integer value. */
8422
- int64Value?: string;
8423
- /** The metric name defined in the service configuration. */
8424
- metricName?: string;
8425
- /** Billing system labels for this (metric, value) pair. */
8426
- systemLabels?: IntelligenceCloudAutomlXpsMetricEntryLabel[];
8427
- }
8428
- interface IntelligenceCloudAutomlXpsMetricEntryLabel {
8429
- /** The name of the label. */
8430
- labelName?: string;
8431
- /** The value of the label. */
8432
- labelValue?: string;
8433
- }
8434
- interface IntelligenceCloudAutomlXpsReportingMetrics {
8435
- /** The effective time training used. If set, this is used for quota management and billing. Deprecated. AutoML BE doesn't use this. Don't set. */
8436
- effectiveTrainingDuration?: string;
8437
- /** One entry per metric name. The values must be aggregated per metric name. */
8438
- metricEntries?: IntelligenceCloudAutomlXpsMetricEntry[];
8439
- }
8440
- interface LanguageLabsAidaTrustRecitationProtoDocAttribution {
8441
- amarnaId?: string;
8442
- arxivId?: string;
8443
- author?: string;
8444
- bibkey?: string;
8445
- /** ID of the paper in bioarxiv like ddoi.org/{biorxiv_id} eg: https://doi.org/10.1101/343517 */
8446
- biorxivId?: string;
8447
- bookTitle?: string;
8448
- /** The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set. */
8449
- bookVolumeId?: string;
8450
- category?: string;
8451
- conversationId?: string;
8452
- /** The dataset this document comes from. */
8453
- dataset?: string;
8454
- filepath?: string;
8455
- geminiId?: string;
8456
- gnewsArticleTitle?: string;
8457
- goodallExampleId?: string;
8458
- /** Whether the document is opted out. */
8459
- isOptOut?: boolean;
8460
- isPrompt?: boolean;
8461
- lamdaExampleId?: string;
8462
- license?: string;
8463
- meenaConversationId?: string;
8464
- /** Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii. */
8465
- naturalLanguageCode?: string;
8466
- /** True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available. */
8467
- noAttribution?: boolean;
8468
- podcastUtteranceId?: string;
8469
- publicationDate?: GoogleTypeDate;
8470
- /** This field is for opt-out experiment only, MUST never be used during actual production/serving. */
8471
- qualityScoreExperimentOnly?: number;
8472
- /** Github repository */
8473
- repo?: string;
8474
- /** URL of a webdoc */
8475
- url?: string;
8476
- volumeId?: string;
8477
- /** Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset. */
8478
- wikipediaArticleTitle?: string;
8479
- /** The unique video id from Youtube. Example: AkoGsW52Ir0 */
8480
- youtubeVideoId?: string;
8481
- }
8482
- interface LanguageLabsAidaTrustRecitationProtoRecitationResult {
8483
- dynamicSegmentResults?: LanguageLabsAidaTrustRecitationProtoSegmentResult[];
8484
- /** The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. When the given input is not found in any source, the recitation action will not be specified. */
8485
- recitationAction?: string;
8486
- trainingSegmentResults?: LanguageLabsAidaTrustRecitationProtoSegmentResult[];
8487
- }
8488
- interface LanguageLabsAidaTrustRecitationProtoSegmentResult {
8489
- /** The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly. */
8490
- attributionDataset?: string;
8491
- /** human-friendly string that contains information from doc_attribution which could be shown by clients */
8492
- displayAttributionMessage?: string;
8493
- docAttribution?: LanguageLabsAidaTrustRecitationProtoDocAttribution;
8494
- /** number of documents that contained this segment */
8495
- docOccurrences?: number;
8496
- endIndex?: number;
8497
- /** The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options. */
8498
- rawText?: string;
8499
- segmentRecitationAction?: string;
8500
- /** The category of the source dataset where the segment came from. This is more stable than Dataset. */
8501
- sourceCategory?: string;
8502
- /** The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units. */
8503
- startIndex?: number;
8504
- }
8505
- interface LanguageLabsAidaTrustRecitationProtoStreamRecitationResult {
8506
- /** The recitation result against the given dynamic data source. */
8507
- dynamicSegmentResults?: LanguageLabsAidaTrustRecitationProtoSegmentResult[];
8508
- /** Last index of input text fully checked for recitation in the entire streaming context. Would return `-1` if no Input was checked for recitation. */
8509
- fullyCheckedTextIndex?: number;
8510
- /** The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. */
8511
- recitationAction?: string;
8512
- /** The recitation result against model training data. */
8513
- trainingSegmentResults?: LanguageLabsAidaTrustRecitationProtoSegmentResult[];
8514
- }
8515
- interface LearningGenaiRecitationContentChunkRecitationCheckResult {
8516
- imageResult?: LearningGenaiRecitationImageRecitationCheckResult;
8517
- textResult?: LearningGenaiRecitationRecitationResult;
8518
- }
8519
- interface LearningGenaiRecitationDocAttribution {
8520
- amarnaId?: string;
8521
- arxivId?: string;
8522
- author?: string;
8523
- bibkey?: string;
8524
- /** ID of the paper in bioarxiv like ddoi.org/{biorxiv_id} eg: https://doi.org/10.1101/343517 */
8525
- biorxivId?: string;
8526
- bookTitle?: string;
8527
- /** The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set. */
8528
- bookVolumeId?: string;
8529
- conversationId?: string;
8530
- /** The dataset this document comes from. */
8531
- dataset?: string;
8532
- filepath?: string;
8533
- geminiId?: string;
8534
- gnewsArticleTitle?: string;
8535
- goodallExampleId?: string;
8536
- /** Whether the document is opted out. */
8537
- isOptOut?: boolean;
8538
- /** When true, this attribution came from the user's prompt. */
8539
- isPrompt?: boolean;
8540
- lamdaExampleId?: string;
8541
- license?: string;
8542
- meenaConversationId?: string;
8543
- /** Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii. */
8544
- naturalLanguageCode?: string;
8545
- /** True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available. */
8546
- noAttribution?: boolean;
8547
- podcastUtteranceId?: string;
8548
- publicationDate?: GoogleTypeDate;
8549
- /** This field is for opt-out experiment only, MUST never be used during actual production/serving. */
8550
- qualityScoreExperimentOnly?: number;
8551
- /** Github repository */
8552
- repo?: string;
8553
- /** URL of a webdoc */
8554
- url?: string;
8555
- volumeId?: string;
8556
- /** Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset. */
8557
- wikipediaArticleTitle?: string;
8558
- youtubeVideoId?: string;
8559
- }
8560
- interface LearningGenaiRecitationImageDocAttribution {
8561
- /** Unique ID of the image. */
8562
- datasetName?: string;
8563
- /** Doc ID to identify the image. These could be urls of images or amarna id. */
8564
- stringDocids?: string;
8565
- }
8566
- interface LearningGenaiRecitationImageRecitationCheckResult {
8567
- /** Only has NO_ACTION or BLOCK to start with. */
8568
- recitationAction?: string;
8569
- /** Images that are similar to the requested image. */
8570
- recitedImages?: LearningGenaiRecitationImageRecitationCheckResultSimilarImage[];
8571
- }
8572
- interface LearningGenaiRecitationImageRecitationCheckResultSimilarImage {
8573
- /** Attribution information about the image */
8574
- docAttribution?: LearningGenaiRecitationImageDocAttribution;
8575
- /** The memorization embedding model that returned this image */
8576
- embeddingModel?: string;
8577
- /** Image ID corresponding of the image corresponding to the score. `image_id` serves for debugging purposes and can't be used by clients to retrieve an image. */
8578
- imageId?: string;
8579
- /** Similarity score of requested image compared with image in training data. */
8580
- scores?: number;
8581
- }
8582
- interface LearningGenaiRecitationMMRecitationCheckResult {
8583
- chunkResults?: LearningGenaiRecitationContentChunkRecitationCheckResult[];
8584
- /** Overall recommended recitation action for the content. */
8585
- recitationAction?: string;
8586
- }
8587
- interface LearningGenaiRecitationRecitationResult {
8588
- dynamicSegmentResults?: LearningGenaiRecitationSegmentResult[];
8589
- /** The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. When the given input is not found in any source, the recitation action will be NO_ACTION. */
8590
- recitationAction?: string;
8591
- trainingSegmentResults?: LearningGenaiRecitationSegmentResult[];
8592
- }
8593
- interface LearningGenaiRecitationSegmentResult {
8594
- /** The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly. */
8595
- attributionDataset?: string;
8596
- /** human-friendly string that contains information from doc_attribution which could be shown by clients */
8597
- displayAttributionMessage?: string;
8598
- docAttribution?: LearningGenaiRecitationDocAttribution;
8599
- /** number of documents that contained this segment */
8600
- docOccurrences?: number;
8601
- endIndex?: number;
8602
- /** The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options. */
8603
- rawText?: string;
8604
- segmentRecitationAction?: string;
8605
- /** The category of the source dataset where the segment came from. This is more stable than Dataset. */
8606
- sourceCategory?: string;
8607
- /** The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units. */
8608
- startIndex?: number;
8609
- }
8610
- interface LearningGenaiRootCalculationType {
8611
- scoreType?: string;
8612
- weights?: number;
8613
- }
8614
- interface LearningGenaiRootClassifierOutput {
8615
- /** If set, this is the output of the first matching rule. */
8616
- ruleOutput?: LearningGenaiRootRuleOutput;
8617
- /** outputs of all matching rule. */
8618
- ruleOutputs?: LearningGenaiRootRuleOutput[];
8619
- /** The results of data_providers and metrics. */
8620
- state?: LearningGenaiRootClassifierState;
8621
- }
8622
- interface LearningGenaiRootClassifierOutputSummary {
8623
- metrics?: LearningGenaiRootMetricOutput[];
8624
- /** Output of the first matching rule. */
8625
- ruleOutput?: LearningGenaiRootRuleOutput;
8626
- /** outputs of all matching rule. */
8627
- ruleOutputs?: LearningGenaiRootRuleOutput[];
8628
- }
8629
- interface LearningGenaiRootClassifierState {
8630
- dataProviderOutput?: LearningGenaiRootDataProviderOutput[];
8631
- metricOutput?: LearningGenaiRootMetricOutput[];
8632
- }
8633
- interface LearningGenaiRootCodeyChatMetadata {
8634
- /** Indicates the programming language of the code if the message is a code chunk. */
8635
- codeLanguage?: string;
8636
- }
8637
- interface LearningGenaiRootCodeyCheckpoint {
8638
- /** Metadata that describes what was truncated at this checkpoint. */
8639
- codeyTruncatorMetadata?: LearningGenaiRootCodeyTruncatorMetadata;
8640
- /** Current state of the sample after truncator. */
8641
- currentSample?: string;
8642
- /** Postprocessor run that yielded this checkpoint. */
8643
- postInferenceStep?: string;
8644
- }
8645
- interface LearningGenaiRootCodeyCompletionMetadata {
8646
- checkpoints?: LearningGenaiRootCodeyCheckpoint[];
8647
- }
8648
- interface LearningGenaiRootCodeyGenerationMetadata {
8649
- /** Last state of the sample before getting dropped/returned. */
8650
- output?: string;
8651
- /** Last Codey postprocessing step for this sample before getting dropped/returned. */
8652
- postInferenceStep?: string;
8653
- }
8654
- interface LearningGenaiRootCodeyOutput {
8655
- codeyChatMetadata?: LearningGenaiRootCodeyChatMetadata;
8656
- codeyCompletionMetadata?: LearningGenaiRootCodeyCompletionMetadata;
8657
- codeyGenerationMetadata?: LearningGenaiRootCodeyGenerationMetadata;
8658
- }
8659
- interface LearningGenaiRootCodeyTruncatorMetadata {
8660
- /** Index of the current sample that trims off truncated text. */
8661
- cutoffIndex?: number;
8662
- /** Text that was truncated at a specific checkpoint. */
8663
- truncatedText?: string;
8664
- }
8665
- interface LearningGenaiRootControlDecodingConfigThreshold {
8666
- policy?: string;
8667
- scoreMax?: number;
8668
- }
8669
- interface LearningGenaiRootControlDecodingRecord {
8670
- /** Prefixes feeded into scorer. */
8671
- prefixes?: string;
8672
- /** Per policy scores returned from Scorer. Expect to have the same number of scores as in `thresholds`. */
8673
- scores?: LearningGenaiRootControlDecodingRecordPolicyScore[];
8674
- /** Suffixes feeded into scorer. */
8675
- suffiexes?: string;
8676
- /** Per policy thresholds from user config. */
8677
- thresholds?: LearningGenaiRootControlDecodingConfigThreshold[];
8678
- }
8679
- interface LearningGenaiRootControlDecodingRecordPolicyScore {
8680
- policy?: string;
8681
- score?: number;
8682
- }
8683
- interface LearningGenaiRootControlDecodingRecords {
8684
- /** One ControlDecodingRecord record maps to one rewind. */
8685
- records?: LearningGenaiRootControlDecodingRecord[];
8686
- }
8687
- interface LearningGenaiRootDataProviderOutput {
8688
- name?: string;
8689
- /** If set, this DataProvider failed and this is the error message. */
8690
- status?: UtilStatusProto;
8691
- }
8692
- interface LearningGenaiRootFilterMetadata {
8693
- /** Filter confidence. */
8694
- confidence?: string;
8695
- /** Debug info for the message. */
8696
- debugInfo?: LearningGenaiRootFilterMetadataFilterDebugInfo;
8697
- /** A fallback message chosen by the applied filter. */
8698
- fallback?: string;
8699
- /** Additional info for the filter. */
8700
- info?: string;
8701
- /** Name of the filter that triggered. */
8702
- name?: string;
8703
- /** Filter reason. */
8704
- reason?: string;
8705
- /** The input query or generated response that is getting filtered. */
8706
- text?: string;
8707
- }
8708
- interface LearningGenaiRootFilterMetadataFilterDebugInfo {
8709
- classifierOutput?: LearningGenaiRootClassifierOutput;
8710
- defaultMetadata?: string;
8711
- languageFilterResult?: LearningGenaiRootLanguageFilterResult;
8712
- /** Safety filter output information for LLM Root RAI harm check. */
8713
- raiOutput?: LearningGenaiRootRAIOutput;
8714
- raiResult?: CloudAiNlLlmProtoServiceRaiResult;
8715
- raiSignal?: CloudAiNlLlmProtoServiceRaiSignal;
8716
- /** Number of rewinds by controlled decoding. */
8717
- records?: LearningGenaiRootControlDecodingRecords;
8718
- streamRecitationResult?: LanguageLabsAidaTrustRecitationProtoStreamRecitationResult;
8719
- takedownResult?: LearningGenaiRootTakedownResult;
8720
- toxicityResult?: LearningGenaiRootToxicityResult;
8721
- }
8722
- interface LearningGenaiRootGroundingMetadata {
8723
- citations?: LearningGenaiRootGroundingMetadataCitation[];
8724
- /** True if grounding is cancelled, for example, no facts being retrieved. */
8725
- groundingCancelled?: boolean;
8726
- searchQueries?: string[];
8727
- }
8728
- interface LearningGenaiRootGroundingMetadataCitation {
8729
- /** Index in the prediction output where the citation ends (exclusive). Must be > start_index and <= len(output). */
8730
- endIndex?: number;
8731
- /** Index of the fact supporting this claim. Should be within the range of the `world_facts` in the GenerateResponse. */
8732
- factIndex?: number;
8733
- /** Confidence score of this entailment. Value is [0,1] with 1 is the most confidence. */
8734
- score?: number;
8735
- /** Index in the prediction output where the citation starts (inclusive). Must be >= 0 and < end_index. */
8736
- startIndex?: number;
8737
- }
8738
- interface LearningGenaiRootHarm {
8739
- /** Please do not use, this is still under development. */
8740
- contextualDangerous?: boolean;
8741
- csam?: boolean;
8742
- fringe?: boolean;
8743
- grailImageHarmType?: LearningGenaiRootHarmGrailImageHarmType;
8744
- grailTextHarmType?: LearningGenaiRootHarmGrailTextHarmType;
8745
- imageChild?: boolean;
8746
- imageCsam?: boolean;
8747
- imagePedo?: boolean;
8748
- /** Image signals */
8749
- imagePorn?: boolean;
8750
- imageViolence?: boolean;
8751
- pqc?: boolean;
8752
- safetycat?: LearningGenaiRootHarmSafetyCatCategories;
8753
- /** Spii Filter uses buckets http://google3/google/privacy/dlp/v2/storage.proto;l=77;rcl=584719820 to classify the input. LMRoot converts the bucket into double score. For example the score for "POSSIBLE" is 3 / 5 = 0.6 . */
8754
- spii?: LearningGenaiRootHarmSpiiFilter;
8755
- threshold?: number;
8756
- videoFrameChild?: boolean;
8757
- videoFrameCsam?: boolean;
8758
- videoFramePedo?: boolean;
8759
- /** Video frame signals */
8760
- videoFramePorn?: boolean;
8761
- videoFrameViolence?: boolean;
8762
- }
8763
- interface LearningGenaiRootHarmGrailImageHarmType {
8764
- imageHarmType?: string[];
8765
- }
8766
- interface LearningGenaiRootHarmGrailTextHarmType {
8767
- harmType?: string[];
8768
- }
8769
- interface LearningGenaiRootHarmSafetyCatCategories {
8770
- categories?: string[];
8771
- }
8772
- interface LearningGenaiRootHarmSpiiFilter {
8773
- usBankRoutingMicr?: boolean;
8774
- usEmployerIdentificationNumber?: boolean;
8775
- usSocialSecurityNumber?: boolean;
8776
- }
8777
- interface LearningGenaiRootInternalMetadata {
8778
- scoredTokens?: LearningGenaiRootScoredToken[];
8779
- }
8780
- interface LearningGenaiRootLanguageFilterResult {
8781
- /** False when query or response should be filtered out due to unsupported language. */
8782
- allowed?: boolean;
8783
- /** Language of the query or response. */
8784
- detectedLanguage?: string;
8785
- /** Probability of the language predicted as returned by LangID. */
8786
- detectedLanguageProbability?: number;
8787
- }
8788
- interface LearningGenaiRootMetricOutput {
8789
- debug?: string;
8790
- /** Name of the metric. */
8791
- name?: string;
8792
- numericValue?: number;
8793
- status?: UtilStatusProto;
8794
- stringValue?: string;
8795
- }
8796
- interface LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata {
8797
- /** Latency spent on fact retrievals. There might be multiple retrievals from different fact providers. */
8798
- factRetrievalMillisecondsByProvider?: {[P in string]: string};
8799
- /** Latency spent on prompt2query. The procedure generates a search-friendly query given the original prompt. */
8800
- prompt2queryMilliseconds?: string;
8801
- /** Latency if use GroundedGeneration service for the whole retrieval & augmentation. */
8802
- retrievalAugmentMilliseconds?: string;
8803
- }
8804
- interface LearningGenaiRootRAIOutput {
8805
- allowed?: boolean;
8806
- harm?: LearningGenaiRootHarm;
8807
- name?: string;
8808
- score?: number;
8809
- }
8810
- interface LearningGenaiRootRegexTakedownResult {
8811
- /** False when query or response should be taken down due to match with a blocked regex, true otherwise. */
8812
- allowed?: boolean;
8813
- /** Regex used to decide that query or response should be taken down. Empty when query or response is kept. */
8814
- takedownRegex?: string;
8815
- }
8816
- interface LearningGenaiRootRequestMetrics {
8817
- /** Metrics for audio samples in the request. */
8818
- audioMetrics?: LearningGenaiRootRequestMetricsAudioMetrics;
8819
- /** Metrics for image samples in the request. */
8820
- imageMetrics?: LearningGenaiRootRequestMetricsImageMetrics;
8821
- /** Number of text tokens extracted from the request. */
8822
- textTokenCount?: number;
8823
- /** Total number of tokens in the request. */
8824
- totalTokenCount?: number;
8825
- /** Metrics for video samples in the request. */
8826
- videoMetrics?: LearningGenaiRootRequestMetricsVideoMetrics;
8827
- }
8828
- interface LearningGenaiRootRequestMetricsAudioMetrics {
8829
- /** Duration of the audio sample in seconds. */
8830
- audioDuration?: string;
8831
- /** Number of tokens derived directly from audio data. */
8832
- audioTokenCount?: number;
8833
- /** Number of audio frames in the audio. */
8834
- numAudioFrames?: number;
8835
- }
8836
- interface LearningGenaiRootRequestMetricsImageMetrics {
8837
- /** Number of tokens extracted from image bytes. */
8838
- imageTokenCount?: number;
8839
- /** Number of images in the request. */
8840
- numImages?: number;
8841
- }
8842
- interface LearningGenaiRootRequestMetricsVideoMetrics {
8843
- /** Metrics associated with audio sample in the video. */
8844
- audioSample?: LearningGenaiRootRequestMetricsAudioMetrics;
8845
- /** Number of video frames in the video. */
8846
- numVideoFrames?: number;
8847
- /** Duration of the video sample in seconds. */
8848
- videoDuration?: string;
8849
- /** Number of tokens extracted from video frames. */
8850
- videoFramesTokenCount?: number;
8851
- }
8852
- interface LearningGenaiRootRequestResponseTakedownResult {
8853
- /** False when response has to be taken down per above config. */
8854
- allowed?: boolean;
8855
- /** Regex used to match the request. */
8856
- requestTakedownRegex?: string;
8857
- /** Regex used to decide that response should be taken down. Empty when response is kept. */
8858
- responseTakedownRegex?: string;
8859
- }
8860
- interface LearningGenaiRootRoutingDecision {
8861
- metadata?: LearningGenaiRootRoutingDecisionMetadata;
8862
- /** The selected model to route traffic to. */
8863
- modelConfigId?: string;
8864
- }
8865
- interface LearningGenaiRootRoutingDecisionMetadata {
8866
- scoreBasedRoutingMetadata?: LearningGenaiRootRoutingDecisionMetadataScoreBased;
8867
- tokenLengthBasedRoutingMetadata?: LearningGenaiRootRoutingDecisionMetadataTokenLengthBased;
8868
- }
8869
- interface LearningGenaiRootRoutingDecisionMetadataScoreBased {
8870
- /** The rule that was matched. */
8871
- matchedRule?: LearningGenaiRootScoreBasedRoutingConfigRule;
8872
- /** The score that was generated by the router i.e. the model. */
8873
- score?: LearningGenaiRootScore;
8874
- /** No rules were matched & therefore used the default fallback. */
8875
- usedDefaultFallback?: boolean;
8876
- }
8877
- interface LearningGenaiRootRoutingDecisionMetadataTokenLengthBased {
8878
- modelInputTokenMetadata?: LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata[];
8879
- modelMaxTokenMetadata?: LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata[];
8880
- }
8881
- interface LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata {
8882
- /** The length computed by backends using the formatter & tokenizer specific to the model */
8883
- computedInputTokenLength?: number;
8884
- modelId?: string;
8885
- /** If true, the model was selected as a fallback, since no model met requirements. */
8886
- pickedAsFallback?: boolean;
8887
- /** If true, the model was selected since it met the requriements. */
8888
- selected?: boolean;
8889
- }
8890
- interface LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata {
8891
- maxNumInputTokens?: number;
8892
- maxNumOutputTokens?: number;
8893
- modelId?: string;
8894
- }
8895
- interface LearningGenaiRootRuleOutput {
8896
- decision?: string;
8897
- name?: string;
8898
- }
8899
- interface LearningGenaiRootScore {
8900
- calculationType?: LearningGenaiRootCalculationType;
8901
- /** The internal_metadata is intended to be used by internal processors and will be cleared before returns. */
8902
- internalMetadata?: LearningGenaiRootInternalMetadata;
8903
- thresholdType?: LearningGenaiRootThresholdType;
8904
- /** Top candidate tokens and log probabilities at each decoding step. */
8905
- tokensAndLogprobPerDecodingStep?: LearningGenaiRootTokensAndLogProbPerDecodingStep;
8906
- value?: number;
8907
- }
8908
- interface LearningGenaiRootScoreBasedRoutingConfigRule {
8909
- /** NOTE: Hardest examples have smaller values in their routing scores. */
8910
- equalOrGreaterThan?: LearningGenaiRootScore;
8911
- lessThan?: LearningGenaiRootScore;
8912
- /** This model_config_id points to ModelConfig::id which allows us to find the ModelConfig to route to. This is part of the banks specified in the ModelBankConfig. */
8913
- modelConfigId?: string;
8914
- }
8915
- interface LearningGenaiRootScoredSimilarityTakedownPhrase {
8916
- phrase?: LearningGenaiRootSimilarityTakedownPhrase;
8917
- similarityScore?: number;
8918
- }
8919
- interface LearningGenaiRootScoredToken {
8920
- /** Each end_token_score is a logprob for how well the completion would end at a particular token. See http://google3/labs/language/aida/config/proto/model_config.proto;l=376;rcl=573039459 */
8921
- endTokenScore?: number;
8922
- /** Each score is the logprob for the token in model response. */
8923
- score?: number;
8924
- token?: string;
8925
- }
8926
- interface LearningGenaiRootSimilarityTakedownPhrase {
8927
- blockedPhrase?: string;
8928
- }
8929
- interface LearningGenaiRootSimilarityTakedownResult {
8930
- /** False when query or response should be taken down by any of the takedown rules, true otherwise. */
8931
- allowed?: boolean;
8932
- /** List of similar phrases with score. Set only if allowed=false. */
8933
- scoredPhrases?: LearningGenaiRootScoredSimilarityTakedownPhrase[];
8934
- }
8935
- interface LearningGenaiRootTakedownResult {
8936
- /** False when query or response should be taken down by any of the takedown rules, true otherwise. */
8937
- allowed?: boolean;
8938
- regexTakedownResult?: LearningGenaiRootRegexTakedownResult;
8939
- requestResponseTakedownResult?: LearningGenaiRootRequestResponseTakedownResult;
8940
- similarityTakedownResult?: LearningGenaiRootSimilarityTakedownResult;
8941
- }
8942
- interface LearningGenaiRootThresholdType {
8943
- scoreType?: string;
8944
- threshold?: number;
8945
- }
8946
- interface LearningGenaiRootTokensAndLogProbPerDecodingStep {
8947
- /** Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates. */
8948
- chosenCandidates?: LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate[];
8949
- /** Length = total number of decoding steps. */
8950
- topCandidates?: LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates[];
8951
- }
8952
- interface LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate {
8953
- /** The candidate's log probability. */
8954
- logProbability?: number;
8955
- /** The candidate’s token value. */
8956
- token?: string;
8957
- }
8958
- interface LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates {
8959
- /** Sorted by log probability in descending order. */
8960
- candidates?: LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate[];
8961
- }
8962
- interface LearningGenaiRootToxicityResult {
8963
- signals?: LearningGenaiRootToxicitySignal[];
8964
- }
8965
- interface LearningGenaiRootToxicitySignal {
8966
- allowed?: boolean;
8967
- label?: string;
8968
- score?: number;
8969
- }
8970
- interface LearningGenaiRootTranslationRequestInfo {
8971
- /** The ISO-639 language code of source text in the initial request, detected automatically, if no source language was passed within the initial request. If the source language was passed, auto-detection of the language does not occur and this field is empty. */
8972
- detectedLanguageCodes?: string[];
8973
- /** The sum of the size of all the contents in the request. */
8974
- totalContentSize?: string;
8975
- }
8976
- interface LearningServingLlmAtlasOutputMetadata {
8977
- requestTopic?: string;
8978
- source?: string;
8979
- }
8980
- interface LearningServingLlmMessageMetadata {
8981
- atlasMetadata?: LearningServingLlmAtlasOutputMetadata;
8982
- /** Summary of classifier output. We attach this to all messages regardless of whether classification rules triggered or not. */
8983
- classifierSummary?: LearningGenaiRootClassifierOutputSummary;
8984
- /** Contains metadata related to Codey Processors. */
8985
- codeyOutput?: LearningGenaiRootCodeyOutput;
8986
- currentStreamTextLength?: number;
8987
- /** Whether the corresponding message has been deleted. */
8988
- deleted?: boolean;
8989
- /** Metadata for filters that triggered. */
8990
- filterMeta?: LearningGenaiRootFilterMetadata[];
8991
- /** This score is finally used for ranking the message. This will be same as the score present in `Message.score` field. */
8992
- finalMessageScore?: LearningGenaiRootScore;
8993
- /** NOT YET IMPLEMENTED. */
8994
- finishReason?: string;
8995
- groundingMetadata?: LearningGenaiRootGroundingMetadata;
8996
- /** Applies to streaming response message only. Whether the message is a code. */
8997
- isCode?: boolean;
8998
- /** Applies to Response message only. Indicates whether the message is a fallback and the response would have otherwise been empty. */
8999
- isFallback?: boolean;
9000
- /** Result from nlp_saft DetectLanguage method. Currently the predicted language code and language probability is used. */
9001
- langidResult?: NlpSaftLangIdResult;
9002
- /** Detected language. */
9003
- language?: string;
9004
- /** The LM prefix used to generate this response. */
9005
- lmPrefix?: string;
9006
- /** FOR LMROOT INTERNAL USE ONLY. Externally, use learning.genai.root.RequestMetadata.RequestMetrics. Request metrics per modality including token count, duration, num_frames. */
9007
- lmrootInternalRequestMetrics?: LearningGenaiRootRequestMetrics;
9008
- /** Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked. */
9009
- mmRecitationResult?: LearningGenaiRecitationMMRecitationCheckResult;
9010
- /** Number of Controlled Decoding rewind and repeats that have happened for this response. */
9011
- numRewinds?: number;
9012
- /** The original text generated by LLM. This is the raw output for debugging purposes. */
9013
- originalText?: string;
9014
- /** Number of tokens decoded by the model as part of a stream. This count may be different from `per_stream_returned_token_count` which, is counted after any response rewriting or truncation. Applies to streaming response only. */
9015
- perStreamDecodedTokenCount?: number;
9016
- /** Number of tokens returned per stream in a response candidate after any response rewriting or truncation. Applies to streaming response only. Applies to Gemini models only. */
9017
- perStreamReturnedTokenCount?: number;
9018
- /** Results of running RAI on the query or this response candidate. One output per rai_config. It will be populated regardless of whether the threshold is exceeded or not. */
9019
- raiOutputs?: LearningGenaiRootRAIOutput[];
9020
- /** Recitation Results. It will be populated as long as Recitation processing is enabled, regardless of recitation outcome. */
9021
- recitationResult?: LearningGenaiRecitationRecitationResult;
9022
- /** All the different scores for a message are logged here. */
9023
- scores?: LearningGenaiRootScore[];
9024
- /** Whether the response is terminated during streaming return. Only used for streaming requests. */
9025
- streamTerminated?: boolean;
9026
- /** Total tokens decoded so far per response_candidate. For streaming: Count of all the tokens decoded so far (aggregated count). For unary: Count of all the tokens decoded per response_candidate. */
9027
- totalDecodedTokenCount?: number;
9028
- /** Total number of tokens returned in a response candidate. For streaming, it is the aggregated count (i.e. total so far) Applies to Gemini models only. */
9029
- totalReturnedTokenCount?: number;
9030
- /** Translated user-prompt used for RAI post processing. This is for internal processing only. We will translate in pre-processor and pass the translated text to the post processor using this field. It will be empty if non of the signals requested need translation. */
9031
- translatedUserPrompts?: string[];
9032
- /** The metadata from Vertex SafetyCat processors */
9033
- vertexRaiResult?: CloudAiNlLlmProtoServiceRaiResult;
9034
- }
9035
- interface NlpSaftLangIdLocalesResult {
9036
- /** List of locales in which the text would be considered acceptable. Sorted in descending order according to each locale's respective likelihood. For example, if a Portuguese text is acceptable in both Brazil and Portugal, but is more strongly associated with Brazil, then the predictions would be ["pt-BR", "pt-PT"], in that order. May be empty, indicating that the model did not predict any acceptable locales. */
9037
- predictions?: NlpSaftLangIdLocalesResultLocale[];
9038
- }
9039
- interface NlpSaftLangIdLocalesResultLocale {
9040
- /** A BCP 47 language code that includes region information. For example, "pt-BR" or "pt-PT". This field will always be populated. */
9041
- languageCode?: string;
9042
- }
9043
- interface NlpSaftLangIdResult {
9044
- /** The version of the model used to create these annotations. */
9045
- modelVersion?: string;
9046
- /** This field stores the n-best list of possible BCP 47 language code strings for a given input sorted in descending order according to each code's respective probability. */
9047
- predictions?: NlpSaftLanguageSpan[];
9048
- /** This field stores language predictions of subspans of the input, when available. Each LanguageSpanSequence is a sequence of LanguageSpans. A particular sequence of LanguageSpans has an associated probability, and need not necessarily cover the entire input. If no language could be predicted for any span, then this field may be empty. */
9049
- spanPredictions?: NlpSaftLanguageSpanSequence[];
9050
- }
9051
- interface NlpSaftLanguageSpan {
9052
- end?: number;
9053
- /** A BCP 47 language code for this span. */
9054
- languageCode?: string;
9055
- /** Optional field containing any information that was predicted about the specific locale(s) of the span. */
9056
- locales?: NlpSaftLangIdLocalesResult;
9057
- /** A probability associated with this prediction. */
9058
- probability?: number;
9059
- /** Start and end byte offsets, inclusive, within the given input string. A value of -1 implies that this field is not set. Both fields must either be set with a nonnegative value or both are unset. If both are unset then this LanguageSpan applies to the entire input. */
9060
- start?: number;
9061
- }
9062
- interface NlpSaftLanguageSpanSequence {
9063
- /** A sequence of LanguageSpan objects, each assigning a language to a subspan of the input. */
9064
- languageSpans?: NlpSaftLanguageSpan[];
9065
- /** The probability of this sequence of LanguageSpans. */
9066
- probability?: number;
9067
- }
9068
- interface Proto2BridgeMessageSet {}
9069
- interface UtilStatusProto {
9070
- /** The canonical error code (see codes.proto) that most closely corresponds to this status. This may be missing, and in the common case of the generic space, it definitely will be. */
9071
- canonicalCode?: number;
9072
- /** Numeric code drawn from the space specified below. Often, this is the canonical error space, and code is drawn from google3/util/task/codes.proto */
9073
- code?: number;
9074
- /** Detail message */
9075
- message?: string;
9076
- /** message_set associates an arbitrary proto message with the status. */
9077
- messageSet?: any;
9078
- /** The following are usually only present when code != 0 Space to which this status belongs */
9079
- space?: string;
9080
- }
9081
8187
  interface MediaResource {
9082
8188
  /** Upload a file into a RagCorpus. */
9083
8189
  upload(request: {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@maxim_mazurok/gapi.client.aiplatform-v1beta1",
3
- "version": "0.0.20240507",
3
+ "version": "0.0.20240510",
4
4
  "description": "TypeScript typings for Vertex AI API v1beta1",
5
5
  "repository": {
6
6
  "type": "git",