@maxim_mazurok/gapi.client.aiplatform-v1 0.0.20231129 → 0.0.20231214

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.d.ts +264 -26
  2. package/package.json +1 -1
package/index.d.ts CHANGED
@@ -9,7 +9,7 @@
9
9
  // This file was generated by https://github.com/Maxim-Mazurok/google-api-typings-generator. Please do not edit it manually.
10
10
  // In case of any problems please post issue to https://github.com/Maxim-Mazurok/google-api-typings-generator
11
11
  // Generated from: https://aiplatform.googleapis.com/$discovery/rest?version=v1
12
- // Revision: 20231129
12
+ // Revision: 20231214
13
13
 
14
14
  /// <reference types="gapi.client" />
15
15
 
@@ -24,6 +24,218 @@ declare namespace gapi.client {
24
24
  function load(name: 'aiplatform', version: 'v1', callback: () => any): void;
25
25
 
26
26
  namespace aiplatform {
27
+ interface CloudAiLargeModelsVisionEmbedVideoResponse {
28
+ /** The embedding vector for the video. */
29
+ videoEmbeddings?: any[];
30
+ }
31
+ interface CloudAiLargeModelsVisionFilteredText {
32
+ /** Confidence level */
33
+ category?: string;
34
+ /** Filtered category */
35
+ confidence?: string;
36
+ /** Input prompt */
37
+ prompt?: string;
38
+ /** Score for category */
39
+ score?: number;
40
+ }
41
+ interface CloudAiLargeModelsVisionGenerateVideoResponse {
42
+ /** The generates samples. */
43
+ generatedSamples?: CloudAiLargeModelsVisionMedia[];
44
+ /** Returns if any videos were filtered due to RAI policies. */
45
+ raiMediaFilteredCount?: number;
46
+ /** Returns rai failure reasons if any. */
47
+ raiMediaFilteredReasons?: string[];
48
+ /** Returns filtered text rai info. */
49
+ raiTextFilteredReason?: CloudAiLargeModelsVisionFilteredText;
50
+ }
51
+ interface CloudAiLargeModelsVisionImage {
52
+ /** Image encoding, encoded as "image/png" or "image/jpg". */
53
+ encoding?: string;
54
+ /** Raw bytes. */
55
+ image?: string;
56
+ /** RAI scores for generated image. */
57
+ imageRaiScores?: CloudAiLargeModelsVisionImageRAIScores;
58
+ /** RAI info for image */
59
+ raiInfo?: CloudAiLargeModelsVisionRaiInfo;
60
+ /** Semantic filter info for image. */
61
+ semanticFilterResponse?: CloudAiLargeModelsVisionSemanticFilterResponse;
62
+ /** Path to another storage (typically Google Cloud Storage). */
63
+ uri?: string;
64
+ }
65
+ interface CloudAiLargeModelsVisionImageRAIScores {
66
+ /** Agile watermark score for image. */
67
+ agileWatermarkDetectionScore?: number;
68
+ }
69
+ interface CloudAiLargeModelsVisionMedia {
70
+ /** Image. */
71
+ image?: CloudAiLargeModelsVisionImage;
72
+ /** Video */
73
+ video?: CloudAiLargeModelsVisionVideo;
74
+ }
75
+ interface CloudAiLargeModelsVisionMediaGenerateContentResponse {
76
+ /** Response to the user's request. */
77
+ response?: CloudAiNlLlmProtoServiceGenerateMultiModalResponse;
78
+ }
79
+ interface CloudAiLargeModelsVisionNamedBoundingBox {
80
+ classes?: string[];
81
+ entities?: string[];
82
+ scores?: number[];
83
+ x1?: number;
84
+ x2?: number;
85
+ y1?: number;
86
+ y2?: number;
87
+ }
88
+ interface CloudAiLargeModelsVisionRaiInfo {
89
+ /** List of rai categories' information to return */
90
+ raiCategories?: string[];
91
+ /** List of rai scores mapping to the rai categories. Rounded to 1 decimal place. */
92
+ scores?: number[];
93
+ }
94
+ interface CloudAiLargeModelsVisionReasonVideoResponse {
95
+ /** Generated text responses. The generated responses for different segments within the same video. */
96
+ responses?: CloudAiLargeModelsVisionReasonVideoResponseTextResponse[];
97
+ }
98
+ interface CloudAiLargeModelsVisionReasonVideoResponseTextResponse {
99
+ /** Partition of the caption's video in time. This field is intended for video captioning. To represent the start time and end time of the caption's video. */
100
+ relativeTemporalPartition?: CloudAiLargeModelsVisionRelativeTemporalPartition;
101
+ /** Text information */
102
+ text?: string;
103
+ }
104
+ interface CloudAiLargeModelsVisionRelativeTemporalPartition {
105
+ /** End time offset of the partition. */
106
+ endOffset?: string;
107
+ /** Start time offset of the partition. */
108
+ startOffset?: string;
109
+ }
110
+ interface CloudAiLargeModelsVisionSemanticFilterResponse {
111
+ /** Class labels of the bounding boxes that failed the semantic filtering. Bounding box coordinates. */
112
+ namedBoundingBoxes?: CloudAiLargeModelsVisionNamedBoundingBox[];
113
+ /** This response is added when semantic filter config is turned on in EditConfig. It reports if this image is passed semantic filter response. If passed_semantic_filter is false, the bounding box information will be populated for user to check what caused the semantic filter to fail. */
114
+ passedSemanticFilter?: boolean;
115
+ }
116
+ interface CloudAiLargeModelsVisionVideo {
117
+ /** Path to another storage (typically Google Cloud Storage). */
118
+ uri?: string;
119
+ /** Raw bytes. */
120
+ video?: string;
121
+ }
122
+ interface CloudAiNlLlmProtoServiceCandidate {
123
+ /** Source attribution of the generated content. */
124
+ citationMetadata?: CloudAiNlLlmProtoServiceCitationMetadata;
125
+ /** Content of the candidate. */
126
+ content?: CloudAiNlLlmProtoServiceContent;
127
+ /** A string that describes the filtering behavior in more detail. Only filled when reason is set. */
128
+ finishMessage?: string;
129
+ /** The reason why the model stopped generating tokens. */
130
+ finishReason?: string;
131
+ /** Index of the candidate. */
132
+ index?: number;
133
+ /** Safety ratings of the generated content. */
134
+ safetyRatings?: CloudAiNlLlmProtoServiceSafetyRating[];
135
+ }
136
+ interface CloudAiNlLlmProtoServiceCitation {
137
+ /** End index into the content. */
138
+ endIndex?: number;
139
+ /** License of the attribution. */
140
+ license?: string;
141
+ /** Publication date of the attribution. */
142
+ publicationDate?: GoogleTypeDate;
143
+ /** Start index into the content. */
144
+ startIndex?: number;
145
+ /** Title of the attribution. */
146
+ title?: string;
147
+ /** Url reference of the attribution. */
148
+ uri?: string;
149
+ }
150
+ interface CloudAiNlLlmProtoServiceCitationMetadata {
151
+ /** List of citations. */
152
+ citations?: CloudAiNlLlmProtoServiceCitation[];
153
+ }
154
+ interface CloudAiNlLlmProtoServiceContent {
155
+ /** The parts of the message. */
156
+ parts?: CloudAiNlLlmProtoServicePart[];
157
+ /** The role of the current conversation participant. */
158
+ role?: string;
159
+ }
160
+ interface CloudAiNlLlmProtoServiceFunctionCall {
161
+ /** The function parameters and values in JSON format. */
162
+ args?: {[P in string]: any};
163
+ /** Required. The name of the function to call. */
164
+ name?: string;
165
+ }
166
+ interface CloudAiNlLlmProtoServiceFunctionResponse {
167
+ /** Required. The name of the function to call. */
168
+ name?: string;
169
+ /** Required. The function response in JSON object format. */
170
+ response?: {[P in string]: any};
171
+ }
172
+ interface CloudAiNlLlmProtoServiceGenerateMultiModalResponse {
173
+ /** Possible candidate responses to the conversation up until this point. */
174
+ candidates?: CloudAiNlLlmProtoServiceCandidate[];
175
+ /** Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations. */
176
+ promptFeedback?: CloudAiNlLlmProtoServicePromptFeedback;
177
+ /** Billable prediction metrics. */
178
+ reportingMetrics?: IntelligenceCloudAutomlXpsReportingMetrics;
179
+ /** Usage metadata about the response(s). */
180
+ usageMetadata?: CloudAiNlLlmProtoServiceUsageMetadata;
181
+ }
182
+ interface CloudAiNlLlmProtoServicePart {
183
+ /** URI-based data. */
184
+ fileData?: CloudAiNlLlmProtoServicePartFileData;
185
+ /** Function call data. */
186
+ functionCall?: CloudAiNlLlmProtoServiceFunctionCall;
187
+ /** Function response data. */
188
+ functionResponse?: CloudAiNlLlmProtoServiceFunctionResponse;
189
+ /** Inline bytes data */
190
+ inlineData?: CloudAiNlLlmProtoServicePartBlob;
191
+ /** Text input. */
192
+ text?: string;
193
+ /** Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. */
194
+ videoMetadata?: CloudAiNlLlmProtoServicePartVideoMetadata;
195
+ }
196
+ interface CloudAiNlLlmProtoServicePartBlob {
197
+ /** Inline data. */
198
+ data?: string;
199
+ /** The mime type corresponding to this input. */
200
+ mimeType?: string;
201
+ /** Original file data where the blob comes from. */
202
+ originalFileData?: CloudAiNlLlmProtoServicePartFileData;
203
+ }
204
+ interface CloudAiNlLlmProtoServicePartFileData {
205
+ /** Inline data. */
206
+ fileUri?: string;
207
+ /** The mime type corresponding to this input. */
208
+ mimeType?: string;
209
+ }
210
+ interface CloudAiNlLlmProtoServicePartVideoMetadata {
211
+ /** The end offset of the video. */
212
+ endOffset?: string;
213
+ /** The start offset of the video. */
214
+ startOffset?: string;
215
+ }
216
+ interface CloudAiNlLlmProtoServicePromptFeedback {
217
+ /** Blocked reason. */
218
+ blockReason?: string;
219
+ /** A readable block reason message. */
220
+ blockReasonMessage?: string;
221
+ /** Safety ratings. */
222
+ safetyRatings?: CloudAiNlLlmProtoServiceSafetyRating[];
223
+ }
224
+ interface CloudAiNlLlmProtoServiceSafetyRating {
225
+ /** Indicates whether the content was filtered out because of this rating. */
226
+ blocked?: boolean;
227
+ /** Harm category. */
228
+ category?: string;
229
+ /** Harm probability levels in the content. */
230
+ probability?: string;
231
+ }
232
+ interface CloudAiNlLlmProtoServiceUsageMetadata {
233
+ /** Number of tokens in the response(s). */
234
+ candidatesTokenCount?: number;
235
+ /** Number of tokens in the request. */
236
+ promptTokenCount?: number;
237
+ totalTokenCount?: number;
238
+ }
27
239
  interface GoogleApiHttpBody {
28
240
  /** The HTTP Content-Type header value specifying the content type of the body. */
29
241
  contentType?: string;
@@ -306,9 +518,9 @@ declare namespace gapi.client {
306
518
  instancesFormat?: string;
307
519
  }
308
520
  interface GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig {
309
- /** Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. */
521
+ /** Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. */
310
522
  excludedFields?: string[];
311
- /** Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. */
523
+ /** Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. */
312
524
  includedFields?: string[];
313
525
  /** The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the file. */
314
526
  instanceType?: string;
@@ -375,9 +587,9 @@ declare namespace gapi.client {
375
587
  inputUri?: string;
376
588
  }
377
589
  interface GoogleCloudAiplatformV1Blob {
378
- /** Required. Data. */
590
+ /** Required. Raw bytes for media formats. */
379
591
  data?: string;
380
- /** Required. Mime type of the data. */
592
+ /** Required. The IANA standard MIME type of the source data. */
381
593
  mimeType?: string;
382
594
  }
383
595
  interface GoogleCloudAiplatformV1BlurBaselineConfig {
@@ -449,9 +661,9 @@ declare namespace gapi.client {
449
661
  imageUri?: string;
450
662
  }
451
663
  interface GoogleCloudAiplatformV1Content {
452
- /** Required. Ordered parts that make up a message. Parts may have different MIME types. */
664
+ /** Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. */
453
665
  parts?: GoogleCloudAiplatformV1Part[];
454
- /** Optional. The role in a conversation associated with this content. Set it only if a content represents a turn in a conversations, otherwise no need to set role. Possible values: user, model. */
666
+ /** Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. */
455
667
  role?: string;
456
668
  }
457
669
  interface GoogleCloudAiplatformV1Context {
@@ -665,6 +877,8 @@ declare namespace gapi.client {
665
877
  experiment?: string;
666
878
  /** Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` */
667
879
  experimentRun?: string;
880
+ /** Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. */
881
+ models?: string[];
668
882
  /** Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. */
669
883
  network?: string;
670
884
  /** The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations */
@@ -1301,7 +1515,7 @@ declare namespace gapi.client {
1301
1515
  parameters?: GoogleCloudAiplatformV1ExplanationParameters;
1302
1516
  }
1303
1517
  interface GoogleCloudAiplatformV1ExportDataConfig {
1304
- /** Only used for custom training data export use cases. Only applicable to Datasets that have DataItems and Annotations. Cloud Storage URI that points to a YAML file describing the annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with metadata of the Dataset specified by dataset_id. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both annotations_filter and annotation_schema_uri. */
1518
+ /** The Cloud Storage URI that points to a YAML file describing the annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with metadata of the Dataset specified by dataset_id. Only used for custom training data export use cases. Only applicable to Datasets that have DataItems and Annotations. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both annotations_filter and annotation_schema_uri. */
1305
1519
  annotationSchemaUri?: string;
1306
1520
  /** An expression for filtering what part of the Dataset is to be exported. Only Annotations that match this filter will be exported. The filter syntax is the same as in ListAnnotations. */
1307
1521
  annotationsFilter?: string;
@@ -1313,7 +1527,7 @@ declare namespace gapi.client {
1313
1527
  fractionSplit?: GoogleCloudAiplatformV1ExportFractionSplit;
1314
1528
  /** The Google Cloud Storage location where the output is to be written to. In the given directory a new directory will be created with name: `export-data--` where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export output will be written into that directory. Inside that directory, annotations with the same schema will be grouped into sub directories which are named with the corresponding annotations' schema title. Inside these sub directories, a schema.yaml will be created to describe the output format. */
1315
1529
  gcsDestination?: GoogleCloudAiplatformV1GcsDestination;
1316
- /** Only used for custom training data export use cases. Only applicable to Datasets that have SavedQueries. The ID of a SavedQuery (annotation set) under the Dataset specified by dataset_id used for filtering Annotations for training. Only Annotations that are associated with this SavedQuery are used in respectively training. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both saved_query_id and annotations_filter. Only one of saved_query_id and annotation_schema_uri should be specified as both of them represent the same thing: problem type. */
1530
+ /** The ID of a SavedQuery (annotation set) under the Dataset specified by dataset_id used for filtering Annotations for training. Only used for custom training data export use cases. Only applicable to Datasets that have SavedQueries. Only Annotations that are associated with this SavedQuery are used in respectively training. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both saved_query_id and annotations_filter. Only one of saved_query_id and annotation_schema_uri should be specified as both of them represent the same thing: problem type. */
1317
1531
  savedQueryId?: string;
1318
1532
  }
1319
1533
  interface GoogleCloudAiplatformV1ExportDataOperationMetadata {
@@ -1703,7 +1917,7 @@ declare namespace gapi.client {
1703
1917
  interface GoogleCloudAiplatformV1FileData {
1704
1918
  /** Required. URI. */
1705
1919
  fileUri?: string;
1706
- /** Required. Mime type of the data. */
1920
+ /** Required. The IANA standard MIME type of the source data. */
1707
1921
  mimeType?: string;
1708
1922
  }
1709
1923
  interface GoogleCloudAiplatformV1FilterSplit {
@@ -1758,18 +1972,6 @@ declare namespace gapi.client {
1758
1972
  /** The fraction of the input data that is to be used to validate the Model. */
1759
1973
  validationFraction?: number;
1760
1974
  }
1761
- interface GoogleCloudAiplatformV1FunctionCall {
1762
- /** Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. */
1763
- args?: {[P in string]: any};
1764
- /** Required. The name of the function to call. Matches [FunctionDeclaration.name]. */
1765
- name?: string;
1766
- }
1767
- interface GoogleCloudAiplatformV1FunctionResponse {
1768
- /** Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. */
1769
- name?: string;
1770
- /** Required. The function response in JSON object format. */
1771
- response?: {[P in string]: any};
1772
- }
1773
1975
  interface GoogleCloudAiplatformV1GcsDestination {
1774
1976
  /** Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. */
1775
1977
  outputUriPrefix?: string;
@@ -3110,10 +3312,6 @@ declare namespace gapi.client {
3110
3312
  interface GoogleCloudAiplatformV1Part {
3111
3313
  /** Optional. URI based data. */
3112
3314
  fileData?: GoogleCloudAiplatformV1FileData;
3113
- /** Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. */
3114
- functionCall?: GoogleCloudAiplatformV1FunctionCall;
3115
- /** Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. */
3116
- functionResponse?: GoogleCloudAiplatformV1FunctionResponse;
3117
3315
  /** Optional. Inlined bytes data. */
3118
3316
  inlineData?: GoogleCloudAiplatformV1Blob;
3119
3317
  /** Optional. Text part (can be code). */
@@ -3360,6 +3558,8 @@ declare namespace gapi.client {
3360
3558
  supportedActions?: GoogleCloudAiplatformV1PublisherModelCallToAction;
3361
3559
  /** Output only. Immutable. The version ID of the PublisherModel. A new version is committed when a new model version is uploaded under an existing model id. It is an auto-incrementing decimal number in string representation. */
3362
3560
  versionId?: string;
3561
+ /** Optional. Indicates the state of the model version. */
3562
+ versionState?: string;
3363
3563
  }
3364
3564
  interface GoogleCloudAiplatformV1PublisherModelCallToAction {
3365
3565
  /** Optional. Create application using the PublisherModel. */
@@ -3396,6 +3596,8 @@ declare namespace gapi.client {
3396
3596
  largeModelReference?: GoogleCloudAiplatformV1LargeModelReference;
3397
3597
  /** Optional. Default model display name. */
3398
3598
  modelDisplayName?: string;
3599
+ /** Optional. The signed URI for ephemeral Cloud Storage access to model artifact. */
3600
+ publicArtifactUri?: string;
3399
3601
  /** The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */
3400
3602
  sharedResources?: string;
3401
3603
  /** Required. The title of the regional resource reference. */
@@ -3422,10 +3624,14 @@ declare namespace gapi.client {
3422
3624
  title?: string;
3423
3625
  }
3424
3626
  interface GoogleCloudAiplatformV1PublisherModelResourceReference {
3627
+ /** Description of the resource. */
3628
+ description?: string;
3425
3629
  /** The resource name of the Google Cloud resource. */
3426
3630
  resourceName?: string;
3427
3631
  /** The URI of the resource. */
3428
3632
  uri?: string;
3633
+ /** Use case (CUJ) of the resource. */
3634
+ useCase?: string;
3429
3635
  }
3430
3636
  interface GoogleCloudAiplatformV1PurgeArtifactsMetadata {
3431
3637
  /** Operation metadata for purging Artifacts. */
@@ -5790,6 +5996,14 @@ declare namespace gapi.client {
5790
5996
  /** The amount of red in the color as a value in the interval [0, 1]. */
5791
5997
  red?: number;
5792
5998
  }
5999
+ interface GoogleTypeDate {
6000
+ /** Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */
6001
+ day?: number;
6002
+ /** Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */
6003
+ month?: number;
6004
+ /** Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */
6005
+ year?: number;
6006
+ }
5793
6007
  interface GoogleTypeExpr {
5794
6008
  /** Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */
5795
6009
  description?: string;
@@ -5814,6 +6028,30 @@ declare namespace gapi.client {
5814
6028
  /** The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar. */
5815
6029
  units?: string;
5816
6030
  }
6031
+ interface IntelligenceCloudAutomlXpsMetricEntry {
6032
+ /** For billing metrics that are using legacy sku's, set the legacy billing metric id here. This will be sent to Chemist as the "cloudbilling.googleapis.com/argentum_metric_id" label. Otherwise leave empty. */
6033
+ argentumMetricId?: string;
6034
+ /** A double value. */
6035
+ doubleValue?: number;
6036
+ /** A signed 64-bit integer value. */
6037
+ int64Value?: string;
6038
+ /** The metric name defined in the service configuration. */
6039
+ metricName?: string;
6040
+ /** Billing system labels for this (metric, value) pair. */
6041
+ systemLabels?: IntelligenceCloudAutomlXpsMetricEntryLabel[];
6042
+ }
6043
+ interface IntelligenceCloudAutomlXpsMetricEntryLabel {
6044
+ /** The name of the label. */
6045
+ labelName?: string;
6046
+ /** The value of the label. */
6047
+ labelValue?: string;
6048
+ }
6049
+ interface IntelligenceCloudAutomlXpsReportingMetrics {
6050
+ /** The effective time training used. If set, this is used for quota management and billing. Deprecated. AutoML BE doesn't use this. Don't set. */
6051
+ effectiveTrainingDuration?: string;
6052
+ /** One entry per metric name. The values must be aggregated per metric name. */
6053
+ metricEntries?: IntelligenceCloudAutomlXpsMetricEntry[];
6054
+ }
5817
6055
  interface BatchPredictionJobsResource {
5818
6056
  /** Cancels a BatchPredictionJob. Starts asynchronous cancellation on the BatchPredictionJob. The server makes the best effort to cancel the job, but success is not guaranteed. Clients can use JobService.GetBatchPredictionJob or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On a successful cancellation, the BatchPredictionJob is not deleted;instead its BatchPredictionJob.state is set to `CANCELLED`. Any files already outputted by the job are not deleted. */
5819
6057
  cancel(request: {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@maxim_mazurok/gapi.client.aiplatform-v1",
3
- "version": "0.0.20231129",
3
+ "version": "0.0.20231214",
4
4
  "description": "TypeScript typings for Vertex AI API v1",
5
5
  "repository": {
6
6
  "type": "git",