@maxim_mazurok/gapi.client.aiplatform-v1 0.0.20240501 → 0.0.20240507

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.d.ts +159 -22
  2. package/package.json +1 -1
package/index.d.ts CHANGED
@@ -9,7 +9,7 @@
9
9
  // This file was generated by https://github.com/Maxim-Mazurok/google-api-typings-generator. Please do not edit it manually.
10
10
  // In case of any problems please post issue to https://github.com/Maxim-Mazurok/google-api-typings-generator
11
11
  // Generated from: https://aiplatform.googleapis.com/$discovery/rest?version=v1
12
- // Revision: 20240501
12
+ // Revision: 20240507
13
13
 
14
14
  /// <reference types="gapi.client" />
15
15
 
@@ -158,6 +158,8 @@ declare namespace gapi.client {
158
158
  citations?: CloudAiNlLlmProtoServiceCitation[];
159
159
  }
160
160
  interface CloudAiNlLlmProtoServiceContent {
161
+ /** If true, the content is from a cached content. */
162
+ isCached?: boolean;
161
163
  /** The parts of the message. */
162
164
  parts?: CloudAiNlLlmProtoServicePart[];
163
165
  /** The role of the current conversation participant. */
@@ -276,6 +278,8 @@ declare namespace gapi.client {
276
278
  filtered?: boolean;
277
279
  /** Language filter result from SAFT LangId. */
278
280
  languageFilterResult?: LearningGenaiRootLanguageFilterResult;
281
+ /** Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked. */
282
+ mmRecitationResult?: LearningGenaiRecitationMMRecitationCheckResult;
279
283
  /** The RAI signals for the text. */
280
284
  raiSignals?: CloudAiNlLlmProtoServiceRaiSignal[];
281
285
  /** Translation request info during RAI for debugging purpose. Each TranslationRequestInfo corresponds to a request sent to the translation server. */
@@ -1132,6 +1136,8 @@ declare namespace gapi.client {
1132
1136
  metadataArtifact?: string;
1133
1137
  /** Required. Points to a YAML file stored on Google Cloud Storage describing additional information about the Dataset. The schema is defined as an OpenAPI 3.0.2 Schema Object. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/metadata/. */
1134
1138
  metadataSchemaUri?: string;
1139
+ /** Optional. Reference to the public base model last used by the dataset. Only set for prompt datasets. */
1140
+ modelReference?: string;
1135
1141
  /** Output only. The resource name of the Dataset. */
1136
1142
  name?: string;
1137
1143
  /** All SavedQueries belong to the Dataset will be returned in List/Get Dataset response. The annotation_specs field will not be populated except for UI cases which will only use annotation_spec_count. In CreateDataset request, a SavedQuery is created together if this field is set, up to one SavedQuery can be set in CreateDatasetRequest. The SavedQuery should not contain any AnnotationSpec. */
@@ -1150,6 +1156,8 @@ declare namespace gapi.client {
1150
1156
  etag?: string;
1151
1157
  /** Required. Output only. Additional information about the DatasetVersion. */
1152
1158
  metadata?: any;
1159
+ /** Output only. Reference to the public base model last used by the dataset version. Only set for prompt dataset versions. */
1160
+ modelReference?: string;
1153
1161
  /** Output only. The resource name of the DatasetVersion. */
1154
1162
  name?: string;
1155
1163
  /** Output only. Timestamp when this DatasetVersion was last updated. */
@@ -1822,7 +1830,7 @@ declare namespace gapi.client {
1822
1830
  versionColumnName?: string;
1823
1831
  }
1824
1832
  interface GoogleCloudAiplatformV1FeatureGroup {
1825
- /** Indicates that features for this group come from BigQuery Table/View. By default treats the source as a sparse time series source, which is required to have an entity_id and a feature_timestamp column in the source. */
1833
+ /** Indicates that features for this group come from BigQuery Table/View. By default treats the source as a sparse time series source. The BigQuery source table or view must have at least one entity ID column and a column named `feature_timestamp`. */
1826
1834
  bigQuery?: GoogleCloudAiplatformV1FeatureGroupBigQuery;
1827
1835
  /** Output only. Timestamp when this FeatureGroup was created. */
1828
1836
  createTime?: string;
@@ -2164,12 +2172,6 @@ declare namespace gapi.client {
2164
2172
  neighborCount?: number;
2165
2173
  /** Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. This field is the maximum number of matches with the same crowding tag. */
2166
2174
  perCrowdingAttributeNeighborCount?: number;
2167
- /** Optional. Represents RRF algorithm that combines search results. */
2168
- rrf?: GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF;
2169
- }
2170
- interface GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF {
2171
- /** Required. Users can provide an alpha value to give more weight to dense vs sparse results. For example, if the alpha is 0, we only return sparse and if the alpha is 1, we only return dense. */
2172
- alpha?: number;
2173
2175
  }
2174
2176
  interface GoogleCloudAiplatformV1FindNeighborsResponse {
2175
2177
  /** The nearest neighbors of the query datapoints. */
@@ -2186,8 +2188,6 @@ declare namespace gapi.client {
2186
2188
  datapoint?: GoogleCloudAiplatformV1IndexDatapoint;
2187
2189
  /** The distance between the neighbor and the dense embedding query. */
2188
2190
  distance?: number;
2189
- /** The distance between the neighbor and the query sparse_embedding. */
2190
- sparseDistance?: number;
2191
2191
  }
2192
2192
  interface GoogleCloudAiplatformV1FractionSplit {
2193
2193
  /** The fraction of the input data that is to be used to evaluate the Model. */
@@ -2271,6 +2271,8 @@ declare namespace gapi.client {
2271
2271
  presencePenalty?: number;
2272
2272
  /** Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. */
2273
2273
  responseMimeType?: string;
2274
+ /** Optional. Control Three levels of creativity in the model output. Default: RESPONSE_STYLE_BALANCED */
2275
+ responseStyle?: string;
2274
2276
  /** Optional. Stop sequences. */
2275
2277
  stopSequences?: string[];
2276
2278
  /** Optional. Controls the randomness of predictions. */
@@ -2449,8 +2451,6 @@ declare namespace gapi.client {
2449
2451
  numericRestricts?: GoogleCloudAiplatformV1IndexDatapointNumericRestriction[];
2450
2452
  /** Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses categorical tokens. See: https://cloud.google.com/vertex-ai/docs/matching-engine/filtering */
2451
2453
  restricts?: GoogleCloudAiplatformV1IndexDatapointRestriction[];
2452
- /** Optional. Feature embedding vector for sparse index. */
2453
- sparseEmbedding?: GoogleCloudAiplatformV1IndexDatapointSparseEmbedding;
2454
2454
  }
2455
2455
  interface GoogleCloudAiplatformV1IndexDatapointCrowdingTag {
2456
2456
  /** The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. */
@@ -2476,12 +2476,6 @@ declare namespace gapi.client {
2476
2476
  /** The namespace of this restriction. e.g.: color. */
2477
2477
  namespace?: string;
2478
2478
  }
2479
- interface GoogleCloudAiplatformV1IndexDatapointSparseEmbedding {
2480
- /** Optional. The list of indexes for the embedding values of the sparse vector. */
2481
- dimensions?: string[];
2482
- /** Optional. The list of embedding values of the sparse vector. */
2483
- values?: number[];
2484
- }
2485
2479
  interface GoogleCloudAiplatformV1IndexEndpoint {
2486
2480
  /** Output only. Timestamp when this IndexEndpoint was created. */
2487
2481
  createTime?: string;
@@ -2523,8 +2517,6 @@ declare namespace gapi.client {
2523
2517
  interface GoogleCloudAiplatformV1IndexStats {
2524
2518
  /** Output only. The number of shards in the Index. */
2525
2519
  shardsCount?: number;
2526
- /** Output only. The number of sparse vectors in the Index. */
2527
- sparseVectorsCount?: string;
2528
2520
  /** Output only. The number of dense vectors in the Index. */
2529
2521
  vectorsCount?: string;
2530
2522
  }
@@ -3571,12 +3563,16 @@ declare namespace gapi.client {
3571
3563
  interface GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats {
3572
3564
  /** Number of records in this file we skipped due to validate errors. */
3573
3565
  invalidRecordCount?: string;
3566
+ /** Number of sparse records in this file we skipped due to validate errors. */
3567
+ invalidSparseRecordCount?: string;
3574
3568
  /** The detail information of the partial failures encountered for those invalid records that couldn't be parsed. Up to 50 partial errors will be reported. */
3575
3569
  partialErrors?: GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataRecordError[];
3576
3570
  /** Cloud Storage URI pointing to the original file in user's bucket. */
3577
3571
  sourceGcsUri?: string;
3578
3572
  /** Number of records in this file that were successfully processed. */
3579
3573
  validRecordCount?: string;
3574
+ /** Number of sparse records in this file that were successfully processed. */
3575
+ validSparseRecordCount?: string;
3580
3576
  }
3581
3577
  interface GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataRecordError {
3582
3578
  /** Empty if the embedding id is failed to parse. */
@@ -3671,6 +3667,10 @@ declare namespace gapi.client {
3671
3667
  runtimeState?: string;
3672
3668
  /** Required. The user email of the NotebookRuntime. */
3673
3669
  runtimeUser?: string;
3670
+ /** Output only. Reserved for future use. */
3671
+ satisfiesPzi?: boolean;
3672
+ /** Output only. Reserved for future use. */
3673
+ satisfiesPzs?: boolean;
3674
3674
  /** Output only. The service account that the NotebookRuntime workload runs as. */
3675
3675
  serviceAccount?: string;
3676
3676
  /** Output only. Timestamp when this NotebookRuntime was most recently updated. */
@@ -6466,6 +6466,8 @@ declare namespace gapi.client {
6466
6466
  createTime?: string;
6467
6467
  /** Optional. The description of the TuningJob. */
6468
6468
  description?: string;
6469
+ /** Customer-managed encryption key options for a TuningJob. If this is set, then all resources created by the TuningJob will be encrypted with the provided encryption key. */
6470
+ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec;
6469
6471
  /** Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`. */
6470
6472
  endTime?: string;
6471
6473
  /** Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. */
@@ -6893,6 +6895,10 @@ declare namespace gapi.client {
6893
6895
  /** The recitation result against model training data. */
6894
6896
  trainingSegmentResults?: LanguageLabsAidaTrustRecitationProtoSegmentResult[];
6895
6897
  }
6898
+ interface LearningGenaiRecitationContentChunkRecitationCheckResult {
6899
+ imageResult?: LearningGenaiRecitationImageRecitationCheckResult;
6900
+ textResult?: LearningGenaiRecitationRecitationResult;
6901
+ }
6896
6902
  interface LearningGenaiRecitationDocAttribution {
6897
6903
  amarnaId?: string;
6898
6904
  arxivId?: string;
@@ -6934,6 +6940,33 @@ declare namespace gapi.client {
6934
6940
  wikipediaArticleTitle?: string;
6935
6941
  youtubeVideoId?: string;
6936
6942
  }
6943
+ interface LearningGenaiRecitationImageDocAttribution {
6944
+ /** Unique ID of the image. */
6945
+ datasetName?: string;
6946
+ /** Doc ID to identify the image. These could be urls of images or amarna id. */
6947
+ stringDocids?: string;
6948
+ }
6949
+ interface LearningGenaiRecitationImageRecitationCheckResult {
6950
+ /** Only has NO_ACTION or BLOCK to start with. */
6951
+ recitationAction?: string;
6952
+ /** Images that are similar to the requested image. */
6953
+ recitedImages?: LearningGenaiRecitationImageRecitationCheckResultSimilarImage[];
6954
+ }
6955
+ interface LearningGenaiRecitationImageRecitationCheckResultSimilarImage {
6956
+ /** Attribution information about the image */
6957
+ docAttribution?: LearningGenaiRecitationImageDocAttribution;
6958
+ /** The memorization embedding model that returned this image */
6959
+ embeddingModel?: string;
6960
+ /** Image ID corresponding of the image corresponding to the score. `image_id` serves for debugging purposes and can't be used by clients to retrieve an image. */
6961
+ imageId?: string;
6962
+ /** Similarity score of requested image compared with image in training data. */
6963
+ scores?: number;
6964
+ }
6965
+ interface LearningGenaiRecitationMMRecitationCheckResult {
6966
+ chunkResults?: LearningGenaiRecitationContentChunkRecitationCheckResult[];
6967
+ /** Overall recommended recitation action for the content. */
6968
+ recitationAction?: string;
6969
+ }
6937
6970
  interface LearningGenaiRecitationRecitationResult {
6938
6971
  dynamicSegmentResults?: LearningGenaiRecitationSegmentResult[];
6939
6972
  /** The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. When the given input is not found in any source, the recitation action will be NO_ACTION. */
@@ -7148,6 +7181,8 @@ declare namespace gapi.client {
7148
7181
  factRetrievalMillisecondsByProvider?: {[P in string]: string};
7149
7182
  /** Latency spent on prompt2query. The procedure generates a search-friendly query given the original prompt. */
7150
7183
  prompt2queryMilliseconds?: string;
7184
+ /** Latency if use GroundedGeneration service for the whole retrieval & augmentation. */
7185
+ retrievalAugmentMilliseconds?: string;
7151
7186
  }
7152
7187
  interface LearningGenaiRootRAIOutput {
7153
7188
  allowed?: boolean;
@@ -7161,6 +7196,42 @@ declare namespace gapi.client {
7161
7196
  /** Regex used to decide that query or response should be taken down. Empty when query or response is kept. */
7162
7197
  takedownRegex?: string;
7163
7198
  }
7199
+ interface LearningGenaiRootRequestMetrics {
7200
+ /** Metrics for audio samples in the request. */
7201
+ audioMetrics?: LearningGenaiRootRequestMetricsAudioMetrics;
7202
+ /** Metrics for image samples in the request. */
7203
+ imageMetrics?: LearningGenaiRootRequestMetricsImageMetrics;
7204
+ /** Number of text tokens extracted from the request. */
7205
+ textTokenCount?: number;
7206
+ /** Total number of tokens in the request. */
7207
+ totalTokenCount?: number;
7208
+ /** Metrics for video samples in the request. */
7209
+ videoMetrics?: LearningGenaiRootRequestMetricsVideoMetrics;
7210
+ }
7211
+ interface LearningGenaiRootRequestMetricsAudioMetrics {
7212
+ /** Duration of the audio sample in seconds. */
7213
+ audioDuration?: string;
7214
+ /** Number of tokens derived directly from audio data. */
7215
+ audioTokenCount?: number;
7216
+ /** Number of audio frames in the audio. */
7217
+ numAudioFrames?: number;
7218
+ }
7219
+ interface LearningGenaiRootRequestMetricsImageMetrics {
7220
+ /** Number of tokens extracted from image bytes. */
7221
+ imageTokenCount?: number;
7222
+ /** Number of images in the request. */
7223
+ numImages?: number;
7224
+ }
7225
+ interface LearningGenaiRootRequestMetricsVideoMetrics {
7226
+ /** Metrics associated with audio sample in the video. */
7227
+ audioSample?: LearningGenaiRootRequestMetricsAudioMetrics;
7228
+ /** Number of video frames in the video. */
7229
+ numVideoFrames?: number;
7230
+ /** Duration of the video sample in seconds. */
7231
+ videoDuration?: string;
7232
+ /** Number of tokens extracted from video frames. */
7233
+ videoFramesTokenCount?: number;
7234
+ }
7164
7235
  interface LearningGenaiRootRequestResponseTakedownResult {
7165
7236
  /** False when response has to be taken down per above config. */
7166
7237
  allowed?: boolean;
@@ -7315,6 +7386,12 @@ declare namespace gapi.client {
7315
7386
  language?: string;
7316
7387
  /** The LM prefix used to generate this response. */
7317
7388
  lmPrefix?: string;
7389
+ /** FOR LMROOT INTERNAL USE ONLY. Externally, use learning.genai.root.RequestMetadata.RequestMetrics. Request metrics per modality including token count, duration, num_frames. */
7390
+ lmrootInternalRequestMetrics?: LearningGenaiRootRequestMetrics;
7391
+ /** Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked. */
7392
+ mmRecitationResult?: LearningGenaiRecitationMMRecitationCheckResult;
7393
+ /** Number of Controlled Decoding rewind and repeats that have happened for this response. */
7394
+ numRewinds?: number;
7318
7395
  /** The original text generated by LLM. This is the raw output for debugging purposes. */
7319
7396
  originalText?: string;
7320
7397
  /** Number of tokens decoded by the model as part of a stream. This count may be different from `per_stream_returned_token_count` which, is counted after any response rewriting or truncation. Applies to streaming response only. */
@@ -7325,8 +7402,6 @@ declare namespace gapi.client {
7325
7402
  raiOutputs?: LearningGenaiRootRAIOutput[];
7326
7403
  /** Recitation Results. It will be populated as long as Recitation processing is enabled, regardless of recitation outcome. */
7327
7404
  recitationResult?: LearningGenaiRecitationRecitationResult;
7328
- /** NOT IMPLEMENTED TODO (b/334187574) Remove this field after Labs migrates to per_stream_returned_token_count and total_returned_token_count. */
7329
- returnTokenCount?: number;
7330
7405
  /** All the different scores for a message are logged here. */
7331
7406
  scores?: LearningGenaiRootScore[];
7332
7407
  /** Whether the response is terminated during streaming return. Only used for streaming requests. */
@@ -9001,6 +9076,68 @@ declare namespace gapi.client {
9001
9076
  /** Legacy upload protocol for media (e.g. "media", "multipart"). */
9002
9077
  uploadType?: string;
9003
9078
  }): Request<GoogleCloudAiplatformV1ListDatasetVersionsResponse>;
9079
+ /** Updates a DatasetVersion. */
9080
+ patch(request: {
9081
+ /** V1 error format. */
9082
+ '$.xgafv'?: string;
9083
+ /** OAuth access token. */
9084
+ access_token?: string;
9085
+ /** Data format for response. */
9086
+ alt?: string;
9087
+ /** JSONP */
9088
+ callback?: string;
9089
+ /** Selector specifying which fields to include in a partial response. */
9090
+ fields?: string;
9091
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
9092
+ key?: string;
9093
+ /** Output only. The resource name of the DatasetVersion. */
9094
+ name: string;
9095
+ /** OAuth 2.0 token for the current user. */
9096
+ oauth_token?: string;
9097
+ /** Returns response with indentations and line breaks. */
9098
+ prettyPrint?: boolean;
9099
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
9100
+ quotaUser?: string;
9101
+ /** Required. The update mask applies to the resource. For the `FieldMask` definition, see google.protobuf.FieldMask. Updatable fields: * `display_name` */
9102
+ updateMask?: string;
9103
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
9104
+ upload_protocol?: string;
9105
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
9106
+ uploadType?: string;
9107
+ /** Request body */
9108
+ resource: GoogleCloudAiplatformV1DatasetVersion;
9109
+ }): Request<GoogleCloudAiplatformV1DatasetVersion>;
9110
+ patch(
9111
+ request: {
9112
+ /** V1 error format. */
9113
+ '$.xgafv'?: string;
9114
+ /** OAuth access token. */
9115
+ access_token?: string;
9116
+ /** Data format for response. */
9117
+ alt?: string;
9118
+ /** JSONP */
9119
+ callback?: string;
9120
+ /** Selector specifying which fields to include in a partial response. */
9121
+ fields?: string;
9122
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
9123
+ key?: string;
9124
+ /** Output only. The resource name of the DatasetVersion. */
9125
+ name: string;
9126
+ /** OAuth 2.0 token for the current user. */
9127
+ oauth_token?: string;
9128
+ /** Returns response with indentations and line breaks. */
9129
+ prettyPrint?: boolean;
9130
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
9131
+ quotaUser?: string;
9132
+ /** Required. The update mask applies to the resource. For the `FieldMask` definition, see google.protobuf.FieldMask. Updatable fields: * `display_name` */
9133
+ updateMask?: string;
9134
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
9135
+ upload_protocol?: string;
9136
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
9137
+ uploadType?: string;
9138
+ },
9139
+ body: GoogleCloudAiplatformV1DatasetVersion
9140
+ ): Request<GoogleCloudAiplatformV1DatasetVersion>;
9004
9141
  /** Restores a dataset version. */
9005
9142
  restore(request?: {
9006
9143
  /** V1 error format. */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@maxim_mazurok/gapi.client.aiplatform-v1",
3
- "version": "0.0.20240501",
3
+ "version": "0.0.20240507",
4
4
  "description": "TypeScript typings for Vertex AI API v1",
5
5
  "repository": {
6
6
  "type": "git",