@maxim_mazurok/gapi.client.aiplatform-v1 0.3.20251028 → 0.4.20251028

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.d.ts +216 -122
  2. package/package.json +1 -1
package/index.d.ts CHANGED
@@ -641,9 +641,9 @@ declare namespace gapi.client {
641
641
  useEffectiveOrder?: boolean;
642
642
  }
643
643
  interface GoogleCloudAiplatformV1Blob {
644
- /** Required. Raw bytes. */
644
+ /** Required. The raw bytes of the data. */
645
645
  data?: string;
646
- /** Optional. Display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled. */
646
+ /** Optional. The display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server-side tools (`code_execution`, `google_search`, and `url_context`) are enabled. */
647
647
  displayName?: string;
648
648
  /** Required. The IANA standard MIME type of the source data. */
649
649
  mimeType?: string;
@@ -712,25 +712,25 @@ declare namespace gapi.client {
712
712
  interface GoogleCloudAiplatformV1CancelTrainingPipelineRequest {}
713
713
  interface GoogleCloudAiplatformV1CancelTuningJobRequest {}
714
714
  interface GoogleCloudAiplatformV1Candidate {
715
- /** Output only. Average log probability score of the candidate. */
715
+ /** Output only. The average log probability of the tokens in this candidate. This is a length-normalized score that can be used to compare the quality of candidates of different lengths. A higher average log probability suggests a more confident and coherent response. */
716
716
  avgLogprobs?: number;
717
- /** Output only. Source attribution of the generated content. */
717
+ /** Output only. A collection of citations that apply to the generated content. */
718
718
  citationMetadata?: GoogleCloudAiplatformV1CitationMetadata;
719
- /** Output only. Content parts of the candidate. */
719
+ /** Output only. The content of the candidate. */
720
720
  content?: GoogleCloudAiplatformV1Content;
721
- /** Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when `finish_reason` is set. */
721
+ /** Output only. Describes the reason the model stopped generating tokens in more detail. This field is returned only when `finish_reason` is set. */
722
722
  finishMessage?: string;
723
- /** Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. */
723
+ /** Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating. */
724
724
  finishReason?: string;
725
- /** Output only. Metadata specifies sources used to ground generated content. */
725
+ /** Output only. Metadata returned when grounding is enabled. It contains the sources used to ground the generated content. */
726
726
  groundingMetadata?: GoogleCloudAiplatformV1GroundingMetadata;
727
- /** Output only. Index of the candidate. */
727
+ /** Output only. The 0-based index of this candidate in the list of generated responses. This is useful for distinguishing between multiple candidates when `candidate_count` > 1. */
728
728
  index?: number;
729
- /** Output only. Log-likelihood scores for the response tokens and top tokens */
729
+ /** Output only. The detailed log probability information for the tokens in this candidate. This is useful for debugging, understanding model uncertainty, and identifying potential "hallucinations". */
730
730
  logprobsResult?: GoogleCloudAiplatformV1LogprobsResult;
731
- /** Output only. List of ratings for the safety of a response candidate. There is at most one rating per category. */
731
+ /** Output only. A list of ratings for the safety of a response candidate. There is at most one rating per category. */
732
732
  safetyRatings?: GoogleCloudAiplatformV1SafetyRating[];
733
- /** Output only. Metadata related to url context retrieval tool. */
733
+ /** Output only. Metadata returned when the model uses the `url_context` tool to get information from a user-provided URL. */
734
734
  urlContextMetadata?: GoogleCloudAiplatformV1UrlContextMetadata;
735
735
  }
736
736
  interface GoogleCloudAiplatformV1CandidateResponse {
@@ -777,21 +777,21 @@ declare namespace gapi.client {
777
777
  shouldStop?: boolean;
778
778
  }
779
779
  interface GoogleCloudAiplatformV1Citation {
780
- /** Output only. End index into the content. */
780
+ /** Output only. The end index of the citation in the content. */
781
781
  endIndex?: number;
782
- /** Output only. License of the attribution. */
782
+ /** Output only. The license of the source of the citation. */
783
783
  license?: string;
784
- /** Output only. Publication date of the attribution. */
784
+ /** Output only. The publication date of the source of the citation. */
785
785
  publicationDate?: GoogleTypeDate;
786
- /** Output only. Start index into the content. */
786
+ /** Output only. The start index of the citation in the content. */
787
787
  startIndex?: number;
788
- /** Output only. Title of the attribution. */
788
+ /** Output only. The title of the source of the citation. */
789
789
  title?: string;
790
- /** Output only. Url reference of the attribution. */
790
+ /** Output only. The URI of the source of the citation. */
791
791
  uri?: string;
792
792
  }
793
793
  interface GoogleCloudAiplatformV1CitationMetadata {
794
- /** Output only. List of citations. */
794
+ /** Output only. A list of citations for the content. */
795
795
  citations?: GoogleCloudAiplatformV1Citation[];
796
796
  }
797
797
  interface GoogleCloudAiplatformV1Claim {
@@ -913,9 +913,9 @@ declare namespace gapi.client {
913
913
  imageUri?: string;
914
914
  }
915
915
  interface GoogleCloudAiplatformV1Content {
916
- /** Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. */
916
+ /** Required. A list of Part objects that make up a single message. Parts of a message can have different MIME types. A Content message must have at least one Part. */
917
917
  parts?: GoogleCloudAiplatformV1Part[];
918
- /** Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. */
918
+ /** Optional. The producer of the content. Must be either 'user' or 'model'. If not set, the service will default to 'user'. */
919
919
  role?: string;
920
920
  }
921
921
  interface GoogleCloudAiplatformV1ContentMap {
@@ -1154,6 +1154,10 @@ declare namespace gapi.client {
1154
1154
  /** Required. Google Cloud Storage location. */
1155
1155
  gcsSource?: GoogleCloudAiplatformV1GcsSource;
1156
1156
  }
1157
+ interface GoogleCloudAiplatformV1CustomCodeExecutionSpec {
1158
+ /** Required. Python function. Expected user to define the following function, e.g.: def evaluate(instance: dict[str, Any]) -> float: Please include this function signature in the code snippet. Instance is the evaluation instance, any fields populated in the instance are available to the function as instance[field_name]. Example: Example input: ``` instance= EvaluationInstance( response=EvaluationInstance.InstanceData(text="The answer is 4."), reference=EvaluationInstance.InstanceData(text="4") ) ``` Example converted input: ``` { 'response': {'text': 'The answer is 4.'}, 'reference': {'text': '4'} } ``` Example python function: ``` def evaluate(instance: dict[str, Any]) -> float: if instance'response' == instance'reference': return 1.0 return 0.0 ``` */
1159
+ evaluationFunction?: string;
1160
+ }
1157
1161
  interface GoogleCloudAiplatformV1CustomJob {
1158
1162
  /** Output only. Time when the CustomJob was created. */
1159
1163
  createTime?: string;
@@ -1322,6 +1326,32 @@ declare namespace gapi.client {
1322
1326
  /** Output only. Timestamp when this Dataset was last updated. */
1323
1327
  updateTime?: string;
1324
1328
  }
1329
+ interface GoogleCloudAiplatformV1DatasetDistribution {
1330
+ /** Output only. Defines the histogram bucket. */
1331
+ buckets?: GoogleCloudAiplatformV1DatasetDistributionDistributionBucket[];
1332
+ /** Output only. The maximum of the population values. */
1333
+ max?: number;
1334
+ /** Output only. The arithmetic mean of the values in the population. */
1335
+ mean?: number;
1336
+ /** Output only. The median of the values in the population. */
1337
+ median?: number;
1338
+ /** Output only. The minimum of the population values. */
1339
+ min?: number;
1340
+ /** Output only. The 5th percentile of the values in the population. */
1341
+ p5?: number;
1342
+ /** Output only. The 95th percentile of the values in the population. */
1343
+ p95?: number;
1344
+ /** Output only. Sum of a given population of values. */
1345
+ sum?: number;
1346
+ }
1347
+ interface GoogleCloudAiplatformV1DatasetDistributionDistributionBucket {
1348
+ /** Output only. Number of values in the bucket. */
1349
+ count?: string;
1350
+ /** Output only. Left bound of the bucket. */
1351
+ left?: number;
1352
+ /** Output only. Right bound of the bucket. */
1353
+ right?: number;
1354
+ }
1325
1355
  interface GoogleCloudAiplatformV1DatasetVersion {
1326
1356
  /** Output only. Name of the associated BigQuery dataset. */
1327
1357
  bigQueryDatasetName?: string;
@@ -3116,9 +3146,9 @@ declare namespace gapi.client {
3116
3146
  operationName?: string;
3117
3147
  }
3118
3148
  interface GoogleCloudAiplatformV1FileData {
3119
- /** Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled. */
3149
+ /** Optional. The display name of the file. Used to provide a label or filename to distinguish files. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server side tools (`code_execution`, `google_search`, and `url_context`) are enabled. */
3120
3150
  displayName?: string;
3121
- /** Required. URI. */
3151
+ /** Required. The URI of the file in Google Cloud Storage. */
3122
3152
  fileUri?: string;
3123
3153
  /** Required. The IANA standard MIME type of the source data. */
3124
3154
  mimeType?: string;
@@ -3303,6 +3333,18 @@ declare namespace gapi.client {
3303
3333
  /** GDC zone. A cluster will be designated for the Vertex AI workload in this zone. */
3304
3334
  zone?: string;
3305
3335
  }
3336
+ interface GoogleCloudAiplatformV1GeminiPreferenceExample {
3337
+ /** List of completions for a given prompt. */
3338
+ completions?: GoogleCloudAiplatformV1GeminiPreferenceExampleCompletion[];
3339
+ /** Multi-turn contents that represents the Prompt. */
3340
+ contents?: GoogleCloudAiplatformV1Content[];
3341
+ }
3342
+ interface GoogleCloudAiplatformV1GeminiPreferenceExampleCompletion {
3343
+ /** Single turn completion for the given prompt. */
3344
+ completion?: GoogleCloudAiplatformV1Content;
3345
+ /** The score for the given completion. */
3346
+ score?: number;
3347
+ }
3306
3348
  interface GoogleCloudAiplatformV1GenAiAdvancedFeaturesConfig {
3307
3349
  /** Configuration for Retrieval Augmented Generation feature. */
3308
3350
  ragConfig?: GoogleCloudAiplatformV1GenAiAdvancedFeaturesConfigRagConfig;
@@ -3431,55 +3473,55 @@ declare namespace gapi.client {
3431
3473
  mimeType?: string;
3432
3474
  }
3433
3475
  interface GoogleCloudAiplatformV1GenerationConfig {
3434
- /** Optional. If enabled, audio timestamp will be included in the request to the model. */
3476
+ /** Optional. If enabled, audio timestamps will be included in the request to the model. This can be useful for synchronizing audio with other modalities in the response. */
3435
3477
  audioTimestamp?: boolean;
3436
- /** Optional. Number of candidates to generate. */
3478
+ /** Optional. The number of candidate responses to generate. A higher `candidate_count` can provide more options to choose from, but it also consumes more resources. This can be useful for generating a variety of responses and selecting the best one. */
3437
3479
  candidateCount?: number;
3438
- /** Optional. If enabled, the model will detect emotions and adapt its responses accordingly. */
3480
+ /** Optional. If enabled, the model will detect emotions and adapt its responses accordingly. For example, if the model detects that the user is frustrated, it may provide a more empathetic response. */
3439
3481
  enableAffectiveDialog?: boolean;
3440
- /** Optional. Frequency penalties. */
3482
+ /** Optional. Penalizes tokens based on their frequency in the generated text. A positive value helps to reduce the repetition of words and phrases. Valid values can range from [-2.0, 2.0]. */
3441
3483
  frequencyPenalty?: number;
3442
3484
  /** Optional. Config for image generation features. */
3443
3485
  imageConfig?: GoogleCloudAiplatformV1ImageConfig;
3444
- /** Optional. Logit probabilities. */
3486
+ /** Optional. The number of top log probabilities to return for each token. This can be used to see which other tokens were considered likely candidates for a given position. A higher value will return more options, but it will also increase the size of the response. */
3445
3487
  logprobs?: number;
3446
- /** Optional. The maximum number of output tokens to generate per message. */
3488
+ /** Optional. The maximum number of tokens to generate in the response. A token is approximately four characters. The default value varies by model. This parameter can be used to control the length of the generated text and prevent overly long responses. */
3447
3489
  maxOutputTokens?: number;
3448
- /** Optional. If specified, the media resolution specified will be used. */
3490
+ /** Optional. The token resolution at which input media content is sampled. This is used to control the trade-off between the quality of the response and the number of tokens used to represent the media. A higher resolution allows the model to perceive more detail, which can lead to a more nuanced response, but it will also use more tokens. This does not affect the image dimensions sent to the model. */
3449
3491
  mediaResolution?: string;
3450
- /** Optional. Positive penalties. */
3492
+ /** Optional. Penalizes tokens that have already appeared in the generated text. A positive value encourages the model to generate more diverse and less repetitive text. Valid values can range from [-2.0, 2.0]. */
3451
3493
  presencePenalty?: number;
3452
- /** Optional. Output schema of the generated response. This is an alternative to `response_schema` that accepts [JSON Schema](https://json-schema.org/). If set, `response_schema` must be omitted, but `response_mime_type` is required. While the full JSON Schema may be sent, not all features are supported. Specifically, only the following properties are supported: - `$id` - `$defs` - `$ref` - `$anchor` - `type` - `format` - `title` - `description` - `enum` (for strings and numbers) - `items` - `prefixItems` - `minItems` - `maxItems` - `minimum` - `maximum` - `anyOf` - `oneOf` (interpreted the same as `anyOf`) - `properties` - `additionalProperties` - `required` The non-standard `propertyOrdering` property may also be set. Cyclic references are unrolled to a limited degree and, as such, may only be used within non-required properties. (Nullable properties are not sufficient.) If `$ref` is set on a sub-schema, no other properties, except for than those starting as a `$`, may be set. */
3494
+ /** Optional. When this field is set, response_schema must be omitted and response_mime_type must be set to `application/json`. */
3453
3495
  responseJsonSchema?: any;
3454
- /** Optional. If true, export the logprobs results in response. */
3496
+ /** Optional. If set to true, the log probabilities of the output tokens are returned. Log probabilities are the logarithm of the probability of a token appearing in the output. A higher log probability means the token is more likely to be generated. This can be useful for analyzing the model's confidence in its own output and for debugging. */
3455
3497
  responseLogprobs?: boolean;
3456
- /** Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. */
3498
+ /** Optional. The IANA standard MIME type of the response. The model will generate output that conforms to this MIME type. Supported values include 'text/plain' (default) and 'application/json'. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. */
3457
3499
  responseMimeType?: string;
3458
- /** Optional. The modalities of the response. */
3500
+ /** Optional. The modalities of the response. The model will generate a response that includes all the specified modalities. For example, if this is set to `[TEXT, IMAGE]`, the response will include both text and an image. */
3459
3501
  responseModalities?: string[];
3460
- /** Optional. The `Schema` object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). If set, a compatible response_mime_type must also be set. Compatible mimetypes: `application/json`: Schema for JSON response. */
3502
+ /** Optional. Lets you to specify a schema for the model's response, ensuring that the output conforms to a particular structure. This is useful for generating structured data such as JSON. The schema is a subset of the [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema) object. When this field is set, you must also set the `response_mime_type` to `application/json`. */
3461
3503
  responseSchema?: GoogleCloudAiplatformV1Schema;
3462
3504
  /** Optional. Routing configuration. */
3463
3505
  routingConfig?: GoogleCloudAiplatformV1GenerationConfigRoutingConfig;
3464
- /** Optional. Seed. */
3506
+ /** Optional. A seed for the random number generator. By setting a seed, you can make the model's output mostly deterministic. For a given prompt and parameters (like temperature, top_p, etc.), the model will produce the same response every time. However, it's not a guaranteed absolute deterministic behavior. This is different from parameters like `temperature`, which control the *level* of randomness. `seed` ensures that the "random" choices the model makes are the same on every run, making it essential for testing and ensuring reproducible results. */
3465
3507
  seed?: number;
3466
3508
  /** Optional. The speech generation config. */
3467
3509
  speechConfig?: GoogleCloudAiplatformV1SpeechConfig;
3468
- /** Optional. Stop sequences. */
3510
+ /** Optional. A list of character sequences that will stop the model from generating further tokens. If a stop sequence is generated, the output will end at that point. This is useful for controlling the length and structure of the output. For example, you can use ["\n", "###"] to stop generation at a new line or a specific marker. */
3469
3511
  stopSequences?: string[];
3470
- /** Optional. Controls the randomness of predictions. */
3512
+ /** Optional. Controls the randomness of the output. A higher temperature results in more creative and diverse responses, while a lower temperature makes the output more predictable and focused. The valid range is (0.0, 2.0]. */
3471
3513
  temperature?: number;
3472
- /** Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking. */
3514
+ /** Optional. Configuration for thinking features. An error will be returned if this field is set for models that don't support thinking. */
3473
3515
  thinkingConfig?: GoogleCloudAiplatformV1GenerationConfigThinkingConfig;
3474
- /** Optional. If specified, top-k sampling will be used. */
3516
+ /** Optional. Specifies the top-k sampling threshold. The model considers only the top k most probable tokens for the next token. This can be useful for generating more coherent and less random text. For example, a `top_k` of 40 means the model will choose the next word from the 40 most likely words. */
3475
3517
  topK?: number;
3476
- /** Optional. If specified, nucleus sampling will be used. */
3518
+ /** Optional. Specifies the nucleus sampling threshold. The model considers only the smallest set of tokens whose cumulative probability is at least `top_p`. This helps generate more diverse and less repetitive responses. For example, a `top_p` of 0.9 means the model considers tokens until the cumulative probability of the tokens to select from reaches 0.9. It's recommended to adjust either temperature or `top_p`, but not both. */
3477
3519
  topP?: number;
3478
3520
  }
3479
3521
  interface GoogleCloudAiplatformV1GenerationConfigRoutingConfig {
3480
- /** Automated routing. */
3522
+ /** In this mode, the model is selected automatically based on the content of the request. */
3481
3523
  autoMode?: GoogleCloudAiplatformV1GenerationConfigRoutingConfigAutoRoutingMode;
3482
- /** Manual routing. */
3524
+ /** In this mode, the model is specified manually. */
3483
3525
  manualMode?: GoogleCloudAiplatformV1GenerationConfigRoutingConfigManualRoutingMode;
3484
3526
  }
3485
3527
  interface GoogleCloudAiplatformV1GenerationConfigRoutingConfigAutoRoutingMode {
@@ -3487,13 +3529,13 @@ declare namespace gapi.client {
3487
3529
  modelRoutingPreference?: string;
3488
3530
  }
3489
3531
  interface GoogleCloudAiplatformV1GenerationConfigRoutingConfigManualRoutingMode {
3490
- /** The model name to use. Only the public LLM models are accepted. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models). */
3532
+ /** The name of the model to use. Only public LLM models are accepted. */
3491
3533
  modelName?: string;
3492
3534
  }
3493
3535
  interface GoogleCloudAiplatformV1GenerationConfigThinkingConfig {
3494
- /** Optional. Indicates whether to include thoughts in the response. If true, thoughts are returned only when available. */
3536
+ /** Optional. If true, the model will include its thoughts in the response. "Thoughts" are the intermediate steps the model takes to arrive at the final response. They can provide insights into the model's reasoning process and help with debugging. If this is true, thoughts are returned only when available. */
3495
3537
  includeThoughts?: boolean;
3496
- /** Optional. Indicates the thinking budget in tokens. */
3538
+ /** Optional. The token budget for the model's thinking process. The model will make a best effort to stay within this budget. This can be used to control the trade-off between response quality and latency. */
3497
3539
  thinkingBudget?: number;
3498
3540
  }
3499
3541
  interface GoogleCloudAiplatformV1GenericOperationMetadata {
@@ -3551,85 +3593,85 @@ declare namespace gapi.client {
3551
3593
  version?: number;
3552
3594
  }
3553
3595
  interface GoogleCloudAiplatformV1GroundingChunk {
3554
- /** Grounding chunk from Google Maps. */
3596
+ /** A grounding chunk from Google Maps. See the `Maps` message for details. */
3555
3597
  maps?: GoogleCloudAiplatformV1GroundingChunkMaps;
3556
- /** Grounding chunk from context retrieved by the retrieval tools. */
3598
+ /** A grounding chunk from a data source retrieved by a retrieval tool, such as Vertex AI Search. See the `RetrievedContext` message for details */
3557
3599
  retrievedContext?: GoogleCloudAiplatformV1GroundingChunkRetrievedContext;
3558
- /** Grounding chunk from the web. */
3600
+ /** A grounding chunk from a web page, typically from Google Search. See the `Web` message for details. */
3559
3601
  web?: GoogleCloudAiplatformV1GroundingChunkWeb;
3560
3602
  }
3561
3603
  interface GoogleCloudAiplatformV1GroundingChunkMaps {
3562
- /** Sources used to generate the place answer. This includes review snippets and photos that were used to generate the answer, as well as uris to flag content. */
3604
+ /** The sources that were used to generate the place answer. This includes review snippets and photos that were used to generate the answer, as well as URIs to flag content. */
3563
3605
  placeAnswerSources?: GoogleCloudAiplatformV1GroundingChunkMapsPlaceAnswerSources;
3564
- /** This Place's resource name, in `places/{place_id}` format. Can be used to look up the Place. */
3606
+ /** This Place's resource name, in `places/{place_id}` format. This can be used to look up the place in the Google Maps API. */
3565
3607
  placeId?: string;
3566
- /** Text of the place answer. */
3608
+ /** The text of the place answer. */
3567
3609
  text?: string;
3568
- /** Title of the place. */
3610
+ /** The title of the place. */
3569
3611
  title?: string;
3570
- /** URI reference of the place. */
3612
+ /** The URI of the place. */
3571
3613
  uri?: string;
3572
3614
  }
3573
3615
  interface GoogleCloudAiplatformV1GroundingChunkMapsPlaceAnswerSources {
3574
- /** Snippets of reviews that are used to generate the answer. */
3616
+ /** Snippets of reviews that were used to generate the answer. */
3575
3617
  reviewSnippets?: GoogleCloudAiplatformV1GroundingChunkMapsPlaceAnswerSourcesReviewSnippet[];
3576
3618
  }
3577
3619
  interface GoogleCloudAiplatformV1GroundingChunkMapsPlaceAnswerSourcesReviewSnippet {
3578
3620
  /** A link to show the review on Google Maps. */
3579
3621
  googleMapsUri?: string;
3580
- /** Id of the review referencing the place. */
3622
+ /** The ID of the review that is being referenced. */
3581
3623
  reviewId?: string;
3582
- /** Title of the review. */
3624
+ /** The title of the review. */
3583
3625
  title?: string;
3584
3626
  }
3585
3627
  interface GoogleCloudAiplatformV1GroundingChunkRetrievedContext {
3586
- /** Output only. The full document name for the referenced Vertex AI Search document. */
3628
+ /** Output only. The full resource name of the referenced Vertex AI Search document. This is used to identify the specific document that was retrieved. The format is `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document}`. */
3587
3629
  documentName?: string;
3588
- /** Additional context for the RAG retrieval result. This is only populated when using the RAG retrieval tool. */
3630
+ /** Additional context for a Retrieval-Augmented Generation (RAG) retrieval result. This is populated only when the RAG retrieval tool is used. */
3589
3631
  ragChunk?: GoogleCloudAiplatformV1RagChunk;
3590
- /** Text of the attribution. */
3632
+ /** The content of the retrieved data source. */
3591
3633
  text?: string;
3592
- /** Title of the attribution. */
3634
+ /** The title of the retrieved data source. */
3593
3635
  title?: string;
3594
- /** URI reference of the attribution. */
3636
+ /** The URI of the retrieved data source. */
3595
3637
  uri?: string;
3596
3638
  }
3597
3639
  interface GoogleCloudAiplatformV1GroundingChunkWeb {
3598
- /** Domain of the (original) URI. */
3640
+ /** The domain of the web page that contains the evidence. This can be used to filter out low-quality sources. */
3599
3641
  domain?: string;
3600
- /** Title of the chunk. */
3642
+ /** The title of the web page that contains the evidence. */
3601
3643
  title?: string;
3602
- /** URI reference of the chunk. */
3644
+ /** The URI of the web page that contains the evidence. */
3603
3645
  uri?: string;
3604
3646
  }
3605
3647
  interface GoogleCloudAiplatformV1GroundingMetadata {
3606
- /** Optional. Output only. Resource name of the Google Maps widget context token to be used with the PlacesContextElement widget to render contextual data. This is populated only for Google Maps grounding. */
3648
+ /** Optional. Output only. A token that can be used to render a Google Maps widget with the contextual data. This field is populated only when the grounding source is Google Maps. */
3607
3649
  googleMapsWidgetContextToken?: string;
3608
- /** List of supporting references retrieved from specified grounding source. */
3650
+ /** A list of supporting references retrieved from the grounding source. This field is populated when the grounding source is Google Search, Vertex AI Search, or Google Maps. */
3609
3651
  groundingChunks?: GoogleCloudAiplatformV1GroundingChunk[];
3610
- /** Optional. List of grounding support. */
3652
+ /** Optional. A list of grounding supports that connect the generated content to the grounding chunks. This field is populated when the grounding source is Google Search or Vertex AI Search. */
3611
3653
  groundingSupports?: GoogleCloudAiplatformV1GroundingSupport[];
3612
- /** Optional. Output only. Retrieval metadata. */
3654
+ /** Optional. Output only. Metadata related to the retrieval grounding source. */
3613
3655
  retrievalMetadata?: GoogleCloudAiplatformV1RetrievalMetadata;
3614
- /** Optional. Google search entry for the following-up web searches. */
3656
+ /** Optional. A web search entry point that can be used to display search results. This field is populated only when the grounding source is Google Search. */
3615
3657
  searchEntryPoint?: GoogleCloudAiplatformV1SearchEntryPoint;
3616
- /** Optional. Output only. List of source flagging uris. This is currently populated only for Google Maps grounding. */
3658
+ /** Optional. Output only. A list of URIs that can be used to flag a place or review for inappropriate content. This field is populated only when the grounding source is Google Maps. */
3617
3659
  sourceFlaggingUris?: GoogleCloudAiplatformV1GroundingMetadataSourceFlaggingUri[];
3618
- /** Optional. Web search queries for the following-up web search. */
3660
+ /** Optional. The web search queries that were used to generate the content. This field is populated only when the grounding source is Google Search. */
3619
3661
  webSearchQueries?: string[];
3620
3662
  }
3621
3663
  interface GoogleCloudAiplatformV1GroundingMetadataSourceFlaggingUri {
3622
- /** A link where users can flag a problem with the source (place or review). */
3664
+ /** The URI that can be used to flag the content. */
3623
3665
  flagContentUri?: string;
3624
- /** Id of the place or review. */
3666
+ /** The ID of the place or review. */
3625
3667
  sourceId?: string;
3626
3668
  }
3627
3669
  interface GoogleCloudAiplatformV1GroundingSupport {
3628
- /** Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. For Gemini 2.0 and before, this list must have the same size as the grounding_chunk_indices. For Gemini 2.5 and after, this list will be empty and should be ignored. */
3670
+ /** The confidence scores for the support references. This list is parallel to the `grounding_chunk_indices` list. A score is a value between 0.0 and 1.0, with a higher score indicating a higher confidence that the reference supports the claim. For Gemini 2.0 and before, this list has the same size as `grounding_chunk_indices`. For Gemini 2.5 and later, this list is empty and should be ignored. */
3629
3671
  confidenceScores?: number[];
3630
- /** A list of indices (into 'grounding_chunk') specifying the citations associated with the claim. For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the retrieved content attributed to the claim. */
3672
+ /** A list of indices into the `grounding_chunks` field of the `GroundingMetadata` message. These indices specify which grounding chunks support the claim made in the content segment. For example, if this field has the values `[1, 3]`, it means that `grounding_chunks[1]` and `grounding_chunks[3]` are the sources for the claim in the content segment. */
3631
3673
  groundingChunkIndices?: number[];
3632
- /** Segment of the content this support belongs to. */
3674
+ /** The content segment that this support message applies to. */
3633
3675
  segment?: GoogleCloudAiplatformV1Segment;
3634
3676
  }
3635
3677
  interface GoogleCloudAiplatformV1HyperparameterTuningJob {
@@ -4345,21 +4387,21 @@ declare namespace gapi.client {
4345
4387
  systemInstruction?: string;
4346
4388
  }
4347
4389
  interface GoogleCloudAiplatformV1LogprobsResult {
4348
- /** Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates. */
4390
+ /** A list of the chosen candidate tokens at each decoding step. The length of this list is equal to the total number of decoding steps. Note that the chosen candidate might not be in `top_candidates`. */
4349
4391
  chosenCandidates?: GoogleCloudAiplatformV1LogprobsResultCandidate[];
4350
- /** Length = total number of decoding steps. */
4392
+ /** A list of the top candidate tokens at each decoding step. The length of this list is equal to the total number of decoding steps. */
4351
4393
  topCandidates?: GoogleCloudAiplatformV1LogprobsResultTopCandidates[];
4352
4394
  }
4353
4395
  interface GoogleCloudAiplatformV1LogprobsResultCandidate {
4354
- /** The candidate's log probability. */
4396
+ /** The log probability of this token. A higher value indicates that the model was more confident in this token. The log probability can be used to assess the relative likelihood of different tokens and to identify when the model is uncertain. */
4355
4397
  logProbability?: number;
4356
- /** The candidate's token string value. */
4398
+ /** The token's string representation. */
4357
4399
  token?: string;
4358
- /** The candidate's token id value. */
4400
+ /** The token's numerical ID. While the `token` field provides the string representation of the token, the `token_id` is the numerical representation that the model uses internally. This can be useful for developers who want to build custom logic based on the model's vocabulary. */
4359
4401
  tokenId?: number;
4360
4402
  }
4361
4403
  interface GoogleCloudAiplatformV1LogprobsResultTopCandidates {
4362
- /** Sorted by log probability in descending order. */
4404
+ /** The list of candidate tokens, sorted by log probability in descending order. */
4363
4405
  candidates?: GoogleCloudAiplatformV1LogprobsResultCandidate[];
4364
4406
  }
4365
4407
  interface GoogleCloudAiplatformV1LookupStudyRequest {
@@ -4453,6 +4495,8 @@ declare namespace gapi.client {
4453
4495
  aggregationMetrics?: string[];
4454
4496
  /** Spec for bleu metric. */
4455
4497
  bleuSpec?: GoogleCloudAiplatformV1BleuSpec;
4498
+ /** Spec for Custom Code Execution metric. */
4499
+ customCodeExecutionSpec?: GoogleCloudAiplatformV1CustomCodeExecutionSpec;
4456
4500
  /** Spec for exact match metric. */
4457
4501
  exactMatchSpec?: any;
4458
4502
  /** Spec for an LLM based metric. */
@@ -4599,9 +4643,9 @@ declare namespace gapi.client {
4599
4643
  model?: string;
4600
4644
  }
4601
4645
  interface GoogleCloudAiplatformV1ModalityTokenCount {
4602
- /** The modality associated with this token count. */
4646
+ /** The modality that this token count applies to. */
4603
4647
  modality?: string;
4604
- /** Number of tokens. */
4648
+ /** The number of tokens counted for this modality. */
4605
4649
  tokenCount?: number;
4606
4650
  }
4607
4651
  interface GoogleCloudAiplatformV1Model {
@@ -4677,9 +4721,9 @@ declare namespace gapi.client {
4677
4721
  versionUpdateTime?: string;
4678
4722
  }
4679
4723
  interface GoogleCloudAiplatformV1ModelArmorConfig {
4680
- /** Optional. The name of the Model Armor template to use for prompt sanitization. */
4724
+ /** Optional. The resource name of the Model Armor template to use for prompt screening. A Model Armor template is a set of customized filters and thresholds that define how Model Armor screens content. If specified, Model Armor will use this template to check the user's prompt for safety and security risks before it is sent to the model. The name must be in the format `projects/{project}/locations/{location}/templates/{template}`. */
4681
4725
  promptTemplateName?: string;
4682
- /** Optional. The name of the Model Armor template to use for response sanitization. */
4726
+ /** Optional. The resource name of the Model Armor template to use for response screening. A Model Armor template is a set of customized filters and thresholds that define how Model Armor screens content. If specified, Model Armor will use this template to check the model's response for safety and security risks before it is returned to the user. The name must be in the format `projects/{project}/locations/{location}/templates/{template}`. */
4683
4727
  responseTemplateName?: string;
4684
4728
  }
4685
4729
  interface GoogleCloudAiplatformV1ModelBaseModelSource {
@@ -5570,21 +5614,21 @@ declare namespace gapi.client {
5570
5614
  version?: number;
5571
5615
  }
5572
5616
  interface GoogleCloudAiplatformV1Part {
5573
- /** Optional. Result of executing the [ExecutableCode]. */
5617
+ /** Optional. The result of executing the ExecutableCode. */
5574
5618
  codeExecutionResult?: GoogleCloudAiplatformV1CodeExecutionResult;
5575
- /** Optional. Code generated by the model that is meant to be executed. */
5619
+ /** Optional. Code generated by the model that is intended to be executed. */
5576
5620
  executableCode?: GoogleCloudAiplatformV1ExecutableCode;
5577
- /** Optional. URI based data. */
5621
+ /** Optional. The URI-based data of the part. This can be used to include files from Google Cloud Storage. */
5578
5622
  fileData?: GoogleCloudAiplatformV1FileData;
5579
- /** Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. */
5623
+ /** Optional. A predicted function call returned from the model. This contains the name of the function to call and the arguments to pass to the function. */
5580
5624
  functionCall?: GoogleCloudAiplatformV1FunctionCall;
5581
- /** Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. */
5625
+ /** Optional. The result of a function call. This is used to provide the model with the result of a function call that it predicted. */
5582
5626
  functionResponse?: GoogleCloudAiplatformV1FunctionResponse;
5583
- /** Optional. Inlined bytes data. */
5627
+ /** Optional. The inline data content of the part. This can be used to include images, audio, or video in a request. */
5584
5628
  inlineData?: GoogleCloudAiplatformV1Blob;
5585
- /** Optional. Text part (can be code). */
5629
+ /** Optional. The text content of the part. */
5586
5630
  text?: string;
5587
- /** Optional. Indicates if the part is thought from the model. */
5631
+ /** Optional. Indicates whether the `part` represents the model's thought process or reasoning. */
5588
5632
  thought?: boolean;
5589
5633
  /** Optional. An opaque signature for the thought so it can be reused in subsequent requests. */
5590
5634
  thoughtSignature?: string;
@@ -5818,7 +5862,7 @@ declare namespace gapi.client {
5818
5862
  postStartupScriptUrl?: string;
5819
5863
  }
5820
5864
  interface GoogleCloudAiplatformV1PrebuiltVoiceConfig {
5821
- /** The name of the preset voice to use. */
5865
+ /** The name of the prebuilt voice to use. */
5822
5866
  voiceName?: string;
5823
5867
  }
5824
5868
  interface GoogleCloudAiplatformV1PredefinedMetricSpec {
@@ -5875,6 +5919,48 @@ declare namespace gapi.client {
5875
5919
  /** Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction produced by this Model, which are returned via PredictResponse.predictions, ExplainResponse.explanations, and BatchPredictionJob.output_config. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. */
5876
5920
  predictionSchemaUri?: string;
5877
5921
  }
5922
+ interface GoogleCloudAiplatformV1PreferenceOptimizationDataStats {
5923
+ /** Output only. A partial sample of the indices (starting from 1) of the dropped examples. */
5924
+ droppedExampleIndices?: string[];
5925
+ /** Output only. For each index in `dropped_example_indices`, the user-facing reason why the example was dropped. */
5926
+ droppedExampleReasons?: string[];
5927
+ /** Output only. Dataset distributions for scores. */
5928
+ scoresDistribution?: GoogleCloudAiplatformV1DatasetDistribution;
5929
+ /** Output only. Dataset distributions for scores variance per example. */
5930
+ scoreVariancePerExampleDistribution?: GoogleCloudAiplatformV1DatasetDistribution;
5931
+ /** Output only. Number of billable tokens in the tuning dataset. */
5932
+ totalBillableTokenCount?: string;
5933
+ /** Output only. Number of examples in the tuning dataset. */
5934
+ tuningDatasetExampleCount?: string;
5935
+ /** Output only. Number of tuning steps for this Tuning Job. */
5936
+ tuningStepCount?: string;
5937
+ /** Output only. Sample user examples in the training dataset. */
5938
+ userDatasetExamples?: GoogleCloudAiplatformV1GeminiPreferenceExample[];
5939
+ /** Output only. Dataset distributions for the user input tokens. */
5940
+ userInputTokenDistribution?: GoogleCloudAiplatformV1DatasetDistribution;
5941
+ /** Output only. Dataset distributions for the user output tokens. */
5942
+ userOutputTokenDistribution?: GoogleCloudAiplatformV1DatasetDistribution;
5943
+ }
5944
+ interface GoogleCloudAiplatformV1PreferenceOptimizationHyperParameters {
5945
+ /** Optional. Adapter size for preference optimization. */
5946
+ adapterSize?: string;
5947
+ /** Optional. Weight for KL Divergence regularization. */
5948
+ beta?: number;
5949
+ /** Optional. Number of complete passes the model makes over the entire training dataset during training. */
5950
+ epochCount?: string;
5951
+ /** Optional. Multiplier for adjusting the default learning rate. */
5952
+ learningRateMultiplier?: number;
5953
+ }
5954
+ interface GoogleCloudAiplatformV1PreferenceOptimizationSpec {
5955
+ /** Optional. If set to true, disable intermediate checkpoints for Preference Optimization and only the last checkpoint will be exported. Otherwise, enable intermediate checkpoints for Preference Optimization. Default is false. */
5956
+ exportLastCheckpointOnly?: boolean;
5957
+ /** Optional. Hyperparameters for Preference Optimization. */
5958
+ hyperParameters?: GoogleCloudAiplatformV1PreferenceOptimizationHyperParameters;
5959
+ /** Required. Cloud Storage path to file containing training dataset for preference optimization tuning. The dataset must be formatted as a JSONL file. */
5960
+ trainingDatasetUri?: string;
5961
+ /** Optional. Cloud Storage path to file containing validation dataset for preference optimization tuning. The dataset must be formatted as a JSONL file. */
5962
+ validationDatasetUri?: string;
5963
+ }
5878
5964
  interface GoogleCloudAiplatformV1Presets {
5879
5965
  /** The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type. */
5880
5966
  modality?: string;
@@ -6370,6 +6456,10 @@ declare namespace gapi.client {
6370
6456
  encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec;
6371
6457
  /** Output only. The resource name of the RagCorpus. */
6372
6458
  name?: string;
6459
+ /** Output only. Reserved for future use. */
6460
+ satisfiesPzi?: boolean;
6461
+ /** Output only. Reserved for future use. */
6462
+ satisfiesPzs?: boolean;
6373
6463
  /** Output only. Timestamp when this RagCorpus was last updated. */
6374
6464
  updateTime?: string;
6375
6465
  /** Optional. Immutable. The config for the Vector DBs. */
@@ -6694,7 +6784,7 @@ declare namespace gapi.client {
6694
6784
  dependencyFilesGcsUri?: string;
6695
6785
  /** Optional. The Cloud Storage URI of the pickled python object. */
6696
6786
  pickleObjectGcsUri?: string;
6697
- /** Optional. The Python version. Currently support 3.8, 3.9, 3.10, 3.11. If not specified, default value is 3.10. */
6787
+ /** Optional. The Python version. Supported values are 3.9, 3.10, 3.11, 3.12, 3.13. If not specified, the default value is 3.10. */
6698
6788
  pythonVersion?: string;
6699
6789
  /** Optional. The Cloud Storage URI of the `requirements.txt` file */
6700
6790
  requirementsGcsUri?: string;
@@ -6814,7 +6904,7 @@ declare namespace gapi.client {
6814
6904
  latLng?: GoogleTypeLatLng;
6815
6905
  }
6816
6906
  interface GoogleCloudAiplatformV1RetrievalMetadata {
6817
- /** Optional. Score indicating how likely information from Google Search could help answer the prompt. The score is in the range `[0, 1]`, where 0 is the least likely and 1 is the most likely. This score is only populated when Google Search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger Google Search. */
6907
+ /** Optional. A score indicating how likely it is that a Google Search query could help answer the prompt. The score is in the range of `[0, 1]`. A score of 1 means the model is confident that a search will be helpful, and 0 means it is not. This score is populated only when Google Search grounding and dynamic retrieval are enabled. The score is used to determine whether to trigger a search. */
6818
6908
  googleSearchDynamicRetrievalScore?: number;
6819
6909
  }
6820
6910
  interface GoogleCloudAiplatformV1RetrieveContextsRequest {
@@ -6945,19 +7035,19 @@ declare namespace gapi.client {
6945
7035
  prediction?: string;
6946
7036
  }
6947
7037
  interface GoogleCloudAiplatformV1SafetyRating {
6948
- /** Output only. Indicates whether the content was filtered out because of this rating. */
7038
+ /** Output only. Indicates whether the content was blocked because of this rating. */
6949
7039
  blocked?: boolean;
6950
- /** Output only. Harm category. */
7040
+ /** Output only. The harm category of this rating. */
6951
7041
  category?: string;
6952
7042
  /** Output only. The overwritten threshold for the safety category of Gemini 2.0 image out. If minors are detected in the output image, the threshold of each safety category will be overwritten if user sets a lower threshold. */
6953
7043
  overwrittenThreshold?: string;
6954
- /** Output only. Harm probability levels in the content. */
7044
+ /** Output only. The probability of harm for this category. */
6955
7045
  probability?: string;
6956
- /** Output only. Harm probability score. */
7046
+ /** Output only. The probability score of harm for this category. */
6957
7047
  probabilityScore?: number;
6958
- /** Output only. Harm severity levels in the content. */
7048
+ /** Output only. The severity of harm for this category. */
6959
7049
  severity?: string;
6960
- /** Output only. Harm severity score. */
7050
+ /** Output only. The severity score of harm for this category. */
6961
7051
  severityScore?: number;
6962
7052
  }
6963
7053
  interface GoogleCloudAiplatformV1SafetyResult {
@@ -6969,11 +7059,11 @@ declare namespace gapi.client {
6969
7059
  score?: number;
6970
7060
  }
6971
7061
  interface GoogleCloudAiplatformV1SafetySetting {
6972
- /** Required. Harm category. */
7062
+ /** Required. The harm category to be blocked. */
6973
7063
  category?: string;
6974
- /** Optional. Specify if the threshold is used for probability or severity score. If not specified, the threshold is used for probability score. */
7064
+ /** Optional. The method for blocking content. If not specified, the default behavior is to use the probability score. */
6975
7065
  method?: string;
6976
- /** Required. The harm block threshold. */
7066
+ /** Required. The threshold for blocking content. If the harm probability exceeds this threshold, the content will be blocked. */
6977
7067
  threshold?: string;
6978
7068
  }
6979
7069
  interface GoogleCloudAiplatformV1SafetySpec {
@@ -8576,9 +8666,9 @@ declare namespace gapi.client {
8576
8666
  nextPageToken?: string;
8577
8667
  }
8578
8668
  interface GoogleCloudAiplatformV1SearchEntryPoint {
8579
- /** Optional. Web content snippet that can be embedded in a web page or an app webview. */
8669
+ /** Optional. An HTML snippet that can be embedded in a web page or an application's webview. This snippet displays a search result, including the title, URL, and a brief description of the search result. */
8580
8670
  renderedContent?: string;
8581
- /** Optional. Base64 encoded JSON representing array of tuple. */
8671
+ /** Optional. A base64-encoded JSON object that contains a list of search queries and their corresponding search URLs. This information can be used to build a custom search UI. */
8582
8672
  sdkBlob?: string;
8583
8673
  }
8584
8674
  interface GoogleCloudAiplatformV1SearchFeaturesResponse {
@@ -8651,13 +8741,13 @@ declare namespace gapi.client {
8651
8741
  version?: string;
8652
8742
  }
8653
8743
  interface GoogleCloudAiplatformV1Segment {
8654
- /** Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero. */
8744
+ /** Output only. The end index of the segment in the `Part`, measured in bytes. This marks the end of the segment and is exclusive, meaning the segment includes content up to, but not including, the byte at this index. */
8655
8745
  endIndex?: number;
8656
- /** Output only. The index of a Part object within its parent Content object. */
8746
+ /** Output only. The index of the `Part` object that this segment belongs to. This is useful for associating the segment with a specific part of the content. */
8657
8747
  partIndex?: number;
8658
- /** Output only. Start index in the given Part, measured in bytes. Offset from the start of the Part, inclusive, starting at zero. */
8748
+ /** Output only. The start index of the segment in the `Part`, measured in bytes. This marks the beginning of the segment and is inclusive, meaning the byte at this index is the first byte of the segment. */
8659
8749
  startIndex?: number;
8660
- /** Output only. The text corresponding to the segment from the response. */
8750
+ /** Output only. The text of the segment. */
8661
8751
  text?: string;
8662
8752
  }
8663
8753
  interface GoogleCloudAiplatformV1ServiceAccountSpec {
@@ -8757,11 +8847,11 @@ declare namespace gapi.client {
8757
8847
  ngramSize?: number;
8758
8848
  }
8759
8849
  interface GoogleCloudAiplatformV1SpeechConfig {
8760
- /** Optional. Language code (ISO 639. e.g. en-US) for the speech synthesization. */
8850
+ /** Optional. The language code (ISO 639-1) for the speech synthesis. */
8761
8851
  languageCode?: string;
8762
8852
  /** The configuration for a multi-speaker text-to-speech request. This field is mutually exclusive with `voice_config`. */
8763
8853
  multiSpeakerVoiceConfig?: GoogleCloudAiplatformV1MultiSpeakerVoiceConfig;
8764
- /** The configuration for the speaker to use. */
8854
+ /** The configuration for the voice to use. */
8765
8855
  voiceConfig?: GoogleCloudAiplatformV1VoiceConfig;
8766
8856
  }
8767
8857
  interface GoogleCloudAiplatformV1StartNotebookRuntimeOperationMetadata {
@@ -9748,6 +9838,8 @@ declare namespace gapi.client {
9748
9838
  tuningJob?: string;
9749
9839
  }
9750
9840
  interface GoogleCloudAiplatformV1TuningDataStats {
9841
+ /** Output only. Statistics for preference optimization. */
9842
+ preferenceOptimizationDataStats?: GoogleCloudAiplatformV1PreferenceOptimizationDataStats;
9751
9843
  /** The SFT Tuning data stats. */
9752
9844
  supervisedTuningDataStats?: GoogleCloudAiplatformV1SupervisedTuningDataStats;
9753
9845
  }
@@ -9770,6 +9862,8 @@ declare namespace gapi.client {
9770
9862
  labels?: {[P in string]: string};
9771
9863
  /** Output only. Identifier. Resource name of a TuningJob. Format: `projects/{project}/locations/{location}/tuningJobs/{tuning_job}` */
9772
9864
  name?: string;
9865
+ /** Tuning Spec for Preference Optimization. */
9866
+ preferenceOptimizationSpec?: GoogleCloudAiplatformV1PreferenceOptimizationSpec;
9773
9867
  /** The pre-tuned model for continuous tuning. */
9774
9868
  preTunedModel?: GoogleCloudAiplatformV1PreTunedModel;
9775
9869
  /** The service account that the tuningJob workload runs as. If not specified, the Vertex AI Secure Fine-Tuned Service Agent in the project will be used. See https://cloud.google.com/iam/docs/service-agents#vertex-ai-secure-fine-tuning-service-agent Users starting the pipeline must have the `iam.serviceAccounts.actAs` permission on this service account. */
@@ -9932,13 +10026,13 @@ declare namespace gapi.client {
9932
10026
  interface GoogleCloudAiplatformV1UpsertDatapointsResponse {}
9933
10027
  interface GoogleCloudAiplatformV1UrlContext {}
9934
10028
  interface GoogleCloudAiplatformV1UrlContextMetadata {
9935
- /** Output only. List of url context. */
10029
+ /** Output only. A list of URL metadata, with one entry for each URL retrieved by the tool. */
9936
10030
  urlMetadata?: GoogleCloudAiplatformV1UrlMetadata[];
9937
10031
  }
9938
10032
  interface GoogleCloudAiplatformV1UrlMetadata {
9939
- /** Retrieved url by the tool. */
10033
+ /** The URL retrieved by the tool. */
9940
10034
  retrievedUrl?: string;
9941
- /** Status of the url retrieval. */
10035
+ /** The status of the URL retrieval. */
9942
10036
  urlRetrievalStatus?: string;
9943
10037
  }
9944
10038
  interface GoogleCloudAiplatformV1UsageMetadata {
@@ -10022,13 +10116,13 @@ declare namespace gapi.client {
10022
10116
  interface GoogleCloudAiplatformV1VideoMetadata {
10023
10117
  /** Optional. The end offset of the video. */
10024
10118
  endOffset?: string;
10025
- /** Optional. The frame rate of the video sent to the model. If not specified, the default value will be 1.0. The fps range is (0.0, 24.0]. */
10119
+ /** Optional. The frame rate of the video sent to the model. If not specified, the default value is 1.0. The valid range is (0.0, 24.0]. */
10026
10120
  fps?: number;
10027
10121
  /** Optional. The start offset of the video. */
10028
10122
  startOffset?: string;
10029
10123
  }
10030
10124
  interface GoogleCloudAiplatformV1VoiceConfig {
10031
- /** The configuration for the prebuilt voice to use. */
10125
+ /** The configuration for a prebuilt voice. */
10032
10126
  prebuiltVoiceConfig?: GoogleCloudAiplatformV1PrebuiltVoiceConfig;
10033
10127
  }
10034
10128
  interface GoogleCloudAiplatformV1WorkerPoolSpec {
@@ -40030,7 +40124,7 @@ declare namespace gapi.client {
40030
40124
  alt?: string;
40031
40125
  /** JSONP */
40032
40126
  callback?: string;
40033
- /** Optional. Unless explicitly documented otherwise, don't use this unsupported field which is primarily intended for internal usage. */
40127
+ /** Optional. Do not use this field. It is unsupported and is ignored unless explicitly documented otherwise. This is primarily for internal usage. */
40034
40128
  extraLocationTypes?: string | string[];
40035
40129
  /** Selector specifying which fields to include in a partial response. */
40036
40130
  fields?: string;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@maxim_mazurok/gapi.client.aiplatform-v1",
3
- "version": "0.3.20251028",
3
+ "version": "0.4.20251028",
4
4
  "description": "TypeScript typings for Vertex AI API v1",
5
5
  "repository": {
6
6
  "type": "git",