@maxim_mazurok/gapi.client.aiplatform-v1 0.0.20250205 → 0.0.20250212

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.d.ts +103 -9
  2. package/package.json +1 -1
package/index.d.ts CHANGED
@@ -9,7 +9,7 @@
9
9
  // This file was generated by https://github.com/Maxim-Mazurok/google-api-typings-generator. Please do not edit it manually.
10
10
  // In case of any problems please post issue to https://github.com/Maxim-Mazurok/google-api-typings-generator
11
11
  // Generated from: https://aiplatform.googleapis.com/$discovery/rest?version=v1
12
- // Revision: 20250205
12
+ // Revision: 20250212
13
13
 
14
14
  /// <reference types="gapi.client" />
15
15
 
@@ -292,9 +292,9 @@ declare namespace gapi.client {
292
292
  facts?: GoogleCloudAiplatformV1Fact[];
293
293
  }
294
294
  interface GoogleCloudAiplatformV1AutomaticResources {
295
- /** Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. */
295
+ /** Immutable. The maximum number of replicas that may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale to that many replicas is guaranteed (barring service outages). If traffic increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. */
296
296
  maxReplicaCount?: number;
297
- /** Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. */
297
+ /** Immutable. The minimum number of replicas that will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. */
298
298
  minReplicaCount?: number;
299
299
  }
300
300
  interface GoogleCloudAiplatformV1AutoraterConfig {
@@ -591,7 +591,7 @@ declare namespace gapi.client {
591
591
  displayName?: string;
592
592
  /** Timestamp of when this resource is considered expired. This is *always* provided on output, regardless of what was sent on input. */
593
593
  expireTime?: string;
594
- /** Immutable. The name of the publisher model to use for cached content. Format: projects/{project}/locations/{location}/publishers/{publisher}/models/{model} */
594
+ /** Immutable. The name of the `Model` to use for cached content. Currently, only the published Gemini base models are supported, in form of projects/{PROJECT}/locations/{LOCATION}/publishers/google/models/{MODEL} */
595
595
  model?: string;
596
596
  /** Immutable. Identifier. The server-generated resource name of the cached content Format: projects/{project}/locations/{location}/cachedContents/{cached_content} */
597
597
  name?: string;
@@ -1206,13 +1206,13 @@ declare namespace gapi.client {
1206
1206
  interface GoogleCloudAiplatformV1DedicatedResources {
1207
1207
  /** Immutable. The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and autoscaling_metric_specs.target to `80`. */
1208
1208
  autoscalingMetricSpecs?: GoogleCloudAiplatformV1AutoscalingMetricSpec[];
1209
- /** Required. Immutable. The specification of a single machine used by the prediction. */
1209
+ /** Required. Immutable. The specification of a single machine being used. */
1210
1210
  machineSpec?: GoogleCloudAiplatformV1MachineSpec;
1211
- /** Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for (max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). */
1211
+ /** Immutable. The maximum number of replicas that may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale to that many replicas is guaranteed (barring service outages). If traffic increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for (max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). */
1212
1212
  maxReplicaCount?: number;
1213
- /** Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. */
1213
+ /** Required. Immutable. The minimum number of machine replicas that will be always deployed on. This value must be greater than or equal to 1. If traffic increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. */
1214
1214
  minReplicaCount?: number;
1215
- /** Optional. Number of required available replicas for the deployment to succeed. This field is only needed when partial model deployment/mutation is desired. If set, the model deploy/mutate operation will succeed once available_replica_count reaches required_replica_count, and the rest of the replicas will be retried. If not set, the default required_replica_count will be min_replica_count. */
1215
+ /** Optional. Number of required available replicas for the deployment to succeed. This field is only needed when partial deployment/mutation is desired. If set, the deploy/mutate operation will succeed once available_replica_count reaches required_replica_count, and the rest of the replicas will be retried. If not set, the default required_replica_count will be min_replica_count. */
1216
1216
  requiredReplicaCount?: number;
1217
1217
  /** Optional. If true, schedule the deployment workload on [spot VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms). */
1218
1218
  spot?: boolean;
@@ -1576,6 +1576,16 @@ declare namespace gapi.client {
1576
1576
  /** Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai` */
1577
1577
  explanationType?: string;
1578
1578
  }
1579
+ interface GoogleCloudAiplatformV1EvaluateDatasetRequest {
1580
+ /** Optional. Autorater config used for evaluation. */
1581
+ autoraterConfig?: GoogleCloudAiplatformV1AutoraterConfig;
1582
+ /** Required. The dataset used for evaluation. */
1583
+ dataset?: GoogleCloudAiplatformV1EvaluationDataset;
1584
+ /** Required. The metrics used for evaluation. */
1585
+ metrics?: GoogleCloudAiplatformV1Metric[];
1586
+ /** Required. Config for evaluation output. */
1587
+ outputConfig?: GoogleCloudAiplatformV1OutputConfig;
1588
+ }
1579
1589
  interface GoogleCloudAiplatformV1EvaluateInstancesRequest {
1580
1590
  /** Optional. Autorater config used for evaluation. */
1581
1591
  autoraterConfig?: GoogleCloudAiplatformV1AutoraterConfig;
@@ -1706,6 +1716,12 @@ declare namespace gapi.client {
1706
1716
  /** Results for trajectory single tool use metric. */
1707
1717
  trajectorySingleToolUseResults?: GoogleCloudAiplatformV1TrajectorySingleToolUseResults;
1708
1718
  }
1719
+ interface GoogleCloudAiplatformV1EvaluationDataset {
1720
+ /** BigQuery source holds the dataset. */
1721
+ bigquerySource?: GoogleCloudAiplatformV1BigQuerySource;
1722
+ /** Cloud storage source holds the dataset. */
1723
+ gcsSource?: GoogleCloudAiplatformV1GcsSource;
1724
+ }
1709
1725
  interface GoogleCloudAiplatformV1Event {
1710
1726
  /** Required. The relative resource name of the Artifact in the Event. */
1711
1727
  artifact?: string;
@@ -3575,6 +3591,20 @@ declare namespace gapi.client {
3575
3591
  /** The disk utilization of the MetadataStore in bytes. */
3576
3592
  diskUtilizationBytes?: string;
3577
3593
  }
3594
+ interface GoogleCloudAiplatformV1Metric {
3595
+ /** Optional. The aggregation metrics to use. */
3596
+ aggregationMetrics?: string[];
3597
+ /** Spec for bleu metric. */
3598
+ bleuSpec?: GoogleCloudAiplatformV1BleuSpec;
3599
+ /** Spec for exact match metric. */
3600
+ exactMatchSpec?: any;
3601
+ /** Spec for pairwise metric. */
3602
+ pairwiseMetricSpec?: GoogleCloudAiplatformV1PairwiseMetricSpec;
3603
+ /** Spec for pointwise metric. */
3604
+ pointwiseMetricSpec?: GoogleCloudAiplatformV1PointwiseMetricSpec;
3605
+ /** Spec for rouge metric. */
3606
+ rougeSpec?: GoogleCloudAiplatformV1RougeSpec;
3607
+ }
3578
3608
  interface GoogleCloudAiplatformV1MetricxInput {
3579
3609
  /** Required. Metricx instance. */
3580
3610
  instance?: GoogleCloudAiplatformV1MetricxInstance;
@@ -4102,6 +4132,8 @@ declare namespace gapi.client {
4102
4132
  checkpointId?: string;
4103
4133
  /** The epoch of the checkpoint. */
4104
4134
  epoch?: string;
4135
+ /** Identifier. The resource name of the ModelVersionCheckpoint. Format: `projects/{project}/locations/{location}/models/{model}/versions/{version}/checkpoints/{checkpoint}` */
4136
+ name?: string;
4105
4137
  /** The step of the checkpoint. */
4106
4138
  step?: string;
4107
4139
  }
@@ -4532,6 +4564,10 @@ declare namespace gapi.client {
4532
4564
  /** Optional. Post startup script config. */
4533
4565
  postStartupScriptConfig?: GoogleCloudAiplatformV1PostStartupScriptConfig;
4534
4566
  }
4567
+ interface GoogleCloudAiplatformV1OutputConfig {
4568
+ /** Cloud storage destination for evaluation output. */
4569
+ gcsDestination?: GoogleCloudAiplatformV1GcsDestination;
4570
+ }
4535
4571
  interface GoogleCloudAiplatformV1PairwiseMetricInput {
4536
4572
  /** Required. Pairwise metric instance. */
4537
4573
  instance?: GoogleCloudAiplatformV1PairwiseMetricInstance;
@@ -6686,7 +6722,7 @@ declare namespace gapi.client {
6686
6722
  displayName?: string;
6687
6723
  }
6688
6724
  interface GoogleCloudAiplatformV1SchemaTextDataItem {
6689
- /** Output only. Google Cloud Storage URI points to the original text in user's bucket. The text file is up to 10MB in size. */
6725
+ /** Output only. Google Cloud Storage URI points to a copy of the original text in the Vertex-managed bucket in the user's project. The text file is up to 10MB in size. */
6690
6726
  gcsUri?: string;
6691
6727
  }
6692
6728
  interface GoogleCloudAiplatformV1SchemaTextDatasetMetadata {
@@ -31905,6 +31941,64 @@ declare namespace gapi.client {
31905
31941
  },
31906
31942
  body: GoogleCloudAiplatformV1CorroborateContentRequest,
31907
31943
  ): Request<GoogleCloudAiplatformV1CorroborateContentResponse>;
31944
+ /** Evaluates a dataset based on a set of given metrics. */
31945
+ evaluateDataset(request: {
31946
+ /** V1 error format. */
31947
+ '$.xgafv'?: string;
31948
+ /** OAuth access token. */
31949
+ access_token?: string;
31950
+ /** Data format for response. */
31951
+ alt?: string;
31952
+ /** JSONP */
31953
+ callback?: string;
31954
+ /** Selector specifying which fields to include in a partial response. */
31955
+ fields?: string;
31956
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
31957
+ key?: string;
31958
+ /** Required. The resource name of the Location to evaluate the dataset. Format: `projects/{project}/locations/{location}` */
31959
+ location: string;
31960
+ /** OAuth 2.0 token for the current user. */
31961
+ oauth_token?: string;
31962
+ /** Returns response with indentations and line breaks. */
31963
+ prettyPrint?: boolean;
31964
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
31965
+ quotaUser?: string;
31966
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
31967
+ upload_protocol?: string;
31968
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
31969
+ uploadType?: string;
31970
+ /** Request body */
31971
+ resource: GoogleCloudAiplatformV1EvaluateDatasetRequest;
31972
+ }): Request<GoogleLongrunningOperation>;
31973
+ evaluateDataset(
31974
+ request: {
31975
+ /** V1 error format. */
31976
+ '$.xgafv'?: string;
31977
+ /** OAuth access token. */
31978
+ access_token?: string;
31979
+ /** Data format for response. */
31980
+ alt?: string;
31981
+ /** JSONP */
31982
+ callback?: string;
31983
+ /** Selector specifying which fields to include in a partial response. */
31984
+ fields?: string;
31985
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
31986
+ key?: string;
31987
+ /** Required. The resource name of the Location to evaluate the dataset. Format: `projects/{project}/locations/{location}` */
31988
+ location: string;
31989
+ /** OAuth 2.0 token for the current user. */
31990
+ oauth_token?: string;
31991
+ /** Returns response with indentations and line breaks. */
31992
+ prettyPrint?: boolean;
31993
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
31994
+ quotaUser?: string;
31995
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
31996
+ upload_protocol?: string;
31997
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
31998
+ uploadType?: string;
31999
+ },
32000
+ body: GoogleCloudAiplatformV1EvaluateDatasetRequest,
32001
+ ): Request<GoogleLongrunningOperation>;
31908
32002
  /** Evaluates instances based on a given metric. */
31909
32003
  evaluateInstances(request: {
31910
32004
  /** V1 error format. */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@maxim_mazurok/gapi.client.aiplatform-v1",
3
- "version": "0.0.20250205",
3
+ "version": "0.0.20250212",
4
4
  "description": "TypeScript typings for Vertex AI API v1",
5
5
  "repository": {
6
6
  "type": "git",