google-cloud-ai_platform-v1 0.16.0 → 0.18.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (101) hide show
  1. checksums.yaml +4 -4
  2. data/lib/google/cloud/ai_platform/v1/dataset_service/client.rb +10 -7
  3. data/lib/google/cloud/ai_platform/v1/dataset_service.rb +1 -1
  4. data/lib/google/cloud/ai_platform/v1/endpoint_service/client.rb +28 -21
  5. data/lib/google/cloud/ai_platform/v1/endpoint_service.rb +1 -1
  6. data/lib/google/cloud/ai_platform/v1/featurestore_online_serving_service/client.rb +10 -9
  7. data/lib/google/cloud/ai_platform/v1/featurestore_online_serving_service.rb +1 -1
  8. data/lib/google/cloud/ai_platform/v1/featurestore_service/client.rb +53 -50
  9. data/lib/google/cloud/ai_platform/v1/featurestore_service.rb +1 -1
  10. data/lib/google/cloud/ai_platform/v1/index_endpoint_service/client.rb +18 -14
  11. data/lib/google/cloud/ai_platform/v1/index_endpoint_service.rb +1 -1
  12. data/lib/google/cloud/ai_platform/v1/index_service/client.rb +8 -4
  13. data/lib/google/cloud/ai_platform/v1/index_service.rb +1 -1
  14. data/lib/google/cloud/ai_platform/v1/job_service/client.rb +53 -36
  15. data/lib/google/cloud/ai_platform/v1/job_service.rb +1 -1
  16. data/lib/google/cloud/ai_platform/v1/metadata_service/client.rb +33 -36
  17. data/lib/google/cloud/ai_platform/v1/metadata_service.rb +1 -1
  18. data/lib/google/cloud/ai_platform/v1/migration_service/client.rb +3 -4
  19. data/lib/google/cloud/ai_platform/v1/migration_service.rb +1 -1
  20. data/lib/google/cloud/ai_platform/v1/model_service/client.rb +44 -22
  21. data/lib/google/cloud/ai_platform/v1/model_service.rb +1 -1
  22. data/lib/google/cloud/ai_platform/v1/pipeline_service/client.rb +28 -17
  23. data/lib/google/cloud/ai_platform/v1/pipeline_service.rb +1 -1
  24. data/lib/google/cloud/ai_platform/v1/prediction_service/client.rb +28 -19
  25. data/lib/google/cloud/ai_platform/v1/prediction_service.rb +1 -1
  26. data/lib/google/cloud/ai_platform/v1/specialist_pool_service/client.rb +5 -3
  27. data/lib/google/cloud/ai_platform/v1/specialist_pool_service.rb +1 -1
  28. data/lib/google/cloud/ai_platform/v1/tensorboard_service/client.rb +165 -71
  29. data/lib/google/cloud/ai_platform/v1/tensorboard_service.rb +1 -1
  30. data/lib/google/cloud/ai_platform/v1/version.rb +1 -1
  31. data/lib/google/cloud/ai_platform/v1/vizier_service.rb +1 -1
  32. data/lib/google/cloud/ai_platform/v1.rb +2 -2
  33. data/lib/google/cloud/aiplatform/v1/batch_prediction_job_pb.rb +8 -0
  34. data/lib/google/cloud/aiplatform/v1/custom_job_pb.rb +1 -0
  35. data/lib/google/cloud/aiplatform/v1/dataset_pb.rb +2 -0
  36. data/lib/google/cloud/aiplatform/v1/index_service_services_pb.rb +2 -1
  37. data/lib/google/cloud/aiplatform/v1/job_service_services_pb.rb +24 -14
  38. data/lib/google/cloud/aiplatform/v1/model_service_pb.rb +1 -0
  39. data/lib/google/cloud/aiplatform/v1/model_service_services_pb.rb +10 -5
  40. data/lib/google/cloud/aiplatform/v1/pipeline_service_services_pb.rb +16 -9
  41. data/lib/google/cloud/aiplatform/v1/prediction_service_services_pb.rb +10 -6
  42. data/lib/google/cloud/aiplatform/v1/study_pb.rb +1 -0
  43. data/lib/google/cloud/aiplatform/v1/tensorboard_service_pb.rb +17 -0
  44. data/lib/google/cloud/aiplatform/v1/tensorboard_service_services_pb.rb +8 -8
  45. data/proto_docs/google/cloud/aiplatform/v1/annotation.rb +9 -6
  46. data/proto_docs/google/cloud/aiplatform/v1/annotation_spec.rb +2 -2
  47. data/proto_docs/google/cloud/aiplatform/v1/batch_prediction_job.rb +183 -50
  48. data/proto_docs/google/cloud/aiplatform/v1/completion_stats.rb +8 -7
  49. data/proto_docs/google/cloud/aiplatform/v1/context.rb +2 -2
  50. data/proto_docs/google/cloud/aiplatform/v1/custom_job.rb +34 -17
  51. data/proto_docs/google/cloud/aiplatform/v1/data_item.rb +6 -5
  52. data/proto_docs/google/cloud/aiplatform/v1/data_labeling_job.rb +16 -16
  53. data/proto_docs/google/cloud/aiplatform/v1/dataset.rb +33 -17
  54. data/proto_docs/google/cloud/aiplatform/v1/dataset_service.rb +59 -32
  55. data/proto_docs/google/cloud/aiplatform/v1/encryption_spec.rb +2 -2
  56. data/proto_docs/google/cloud/aiplatform/v1/endpoint.rb +38 -23
  57. data/proto_docs/google/cloud/aiplatform/v1/endpoint_service.rb +57 -35
  58. data/proto_docs/google/cloud/aiplatform/v1/entity_type.rb +8 -6
  59. data/proto_docs/google/cloud/aiplatform/v1/explanation.rb +111 -75
  60. data/proto_docs/google/cloud/aiplatform/v1/explanation_metadata.rb +58 -38
  61. data/proto_docs/google/cloud/aiplatform/v1/feature.rb +9 -6
  62. data/proto_docs/google/cloud/aiplatform/v1/feature_monitoring_stats.rb +2 -1
  63. data/proto_docs/google/cloud/aiplatform/v1/featurestore.rb +12 -11
  64. data/proto_docs/google/cloud/aiplatform/v1/featurestore_monitoring.rb +6 -2
  65. data/proto_docs/google/cloud/aiplatform/v1/featurestore_online_service.rb +27 -19
  66. data/proto_docs/google/cloud/aiplatform/v1/featurestore_service.rb +138 -99
  67. data/proto_docs/google/cloud/aiplatform/v1/hyperparameter_tuning_job.rb +9 -7
  68. data/proto_docs/google/cloud/aiplatform/v1/index.rb +18 -17
  69. data/proto_docs/google/cloud/aiplatform/v1/index_endpoint.rb +36 -24
  70. data/proto_docs/google/cloud/aiplatform/v1/index_endpoint_service.rb +49 -29
  71. data/proto_docs/google/cloud/aiplatform/v1/index_service.rb +35 -17
  72. data/proto_docs/google/cloud/aiplatform/v1/io.rb +2 -1
  73. data/proto_docs/google/cloud/aiplatform/v1/job_service.rb +94 -57
  74. data/proto_docs/google/cloud/aiplatform/v1/machine_resources.rb +53 -43
  75. data/proto_docs/google/cloud/aiplatform/v1/manual_batch_tuning_parameters.rb +5 -5
  76. data/proto_docs/google/cloud/aiplatform/v1/metadata_schema.rb +3 -3
  77. data/proto_docs/google/cloud/aiplatform/v1/metadata_service.rb +141 -91
  78. data/proto_docs/google/cloud/aiplatform/v1/migratable_resource.rb +3 -3
  79. data/proto_docs/google/cloud/aiplatform/v1/migration_service.rb +18 -15
  80. data/proto_docs/google/cloud/aiplatform/v1/model.rb +179 -132
  81. data/proto_docs/google/cloud/aiplatform/v1/model_deployment_monitoring_job.rb +21 -17
  82. data/proto_docs/google/cloud/aiplatform/v1/model_evaluation.rb +7 -6
  83. data/proto_docs/google/cloud/aiplatform/v1/model_evaluation_slice.rb +11 -7
  84. data/proto_docs/google/cloud/aiplatform/v1/model_monitoring.rb +4 -2
  85. data/proto_docs/google/cloud/aiplatform/v1/model_service.rb +110 -54
  86. data/proto_docs/google/cloud/aiplatform/v1/pipeline_job.rb +56 -42
  87. data/proto_docs/google/cloud/aiplatform/v1/pipeline_service.rb +42 -22
  88. data/proto_docs/google/cloud/aiplatform/v1/prediction_service.rb +42 -28
  89. data/proto_docs/google/cloud/aiplatform/v1/saved_query.rb +2 -2
  90. data/proto_docs/google/cloud/aiplatform/v1/specialist_pool_service.rb +19 -11
  91. data/proto_docs/google/cloud/aiplatform/v1/study.rb +42 -25
  92. data/proto_docs/google/cloud/aiplatform/v1/tensorboard.rb +2 -2
  93. data/proto_docs/google/cloud/aiplatform/v1/tensorboard_data.rb +8 -7
  94. data/proto_docs/google/cloud/aiplatform/v1/tensorboard_experiment.rb +2 -1
  95. data/proto_docs/google/cloud/aiplatform/v1/tensorboard_service.rb +203 -109
  96. data/proto_docs/google/cloud/aiplatform/v1/tensorboard_time_series.rb +8 -6
  97. data/proto_docs/google/cloud/aiplatform/v1/training_pipeline.rb +84 -59
  98. data/proto_docs/google/cloud/aiplatform/v1/unmanaged_container_model.rb +2 -2
  99. data/proto_docs/google/cloud/aiplatform/v1/vizier_service.rb +40 -20
  100. data/proto_docs/google/rpc/status.rb +4 -2
  101. metadata +9 -9
@@ -45,21 +45,26 @@ module Google
45
45
  rpc :UpdateModel, ::Google::Cloud::AIPlatform::V1::UpdateModelRequest, ::Google::Cloud::AIPlatform::V1::Model
46
46
  # Deletes a Model.
47
47
  #
48
- # A model cannot be deleted if any [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a
49
- # [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based on the model in its
50
- # [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] field.
48
+ # A model cannot be deleted if any
49
+ # [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a
50
+ # [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based on the
51
+ # model in its
52
+ # [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
53
+ # field.
51
54
  rpc :DeleteModel, ::Google::Cloud::AIPlatform::V1::DeleteModelRequest, ::Google::Longrunning::Operation
52
55
  # Deletes a Model version.
53
56
  #
54
57
  # Model version can only be deleted if there are no [DeployedModels][]
55
58
  # created from it. Deleting the only version in the Model is not allowed. Use
56
- # [DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel] for deleting the Model instead.
59
+ # [DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel] for
60
+ # deleting the Model instead.
57
61
  rpc :DeleteModelVersion, ::Google::Cloud::AIPlatform::V1::DeleteModelVersionRequest, ::Google::Longrunning::Operation
58
62
  # Merges a set of aliases for a Model version.
59
63
  rpc :MergeVersionAliases, ::Google::Cloud::AIPlatform::V1::MergeVersionAliasesRequest, ::Google::Cloud::AIPlatform::V1::Model
60
64
  # Exports a trained, exportable Model to a location specified by the
61
65
  # user. A Model is considered to be exportable if it has at least one
62
- # [supported export format][google.cloud.aiplatform.v1.Model.supported_export_formats].
66
+ # [supported export
67
+ # format][google.cloud.aiplatform.v1.Model.supported_export_formats].
63
68
  rpc :ExportModel, ::Google::Cloud::AIPlatform::V1::ExportModelRequest, ::Google::Longrunning::Operation
64
69
  # Imports an externally generated ModelEvaluation.
65
70
  rpc :ImportModelEvaluation, ::Google::Cloud::AIPlatform::V1::ImportModelEvaluationRequest, ::Google::Cloud::AIPlatform::V1::ModelEvaluation
@@ -47,13 +47,17 @@ module Google
47
47
  # Cancels a TrainingPipeline.
48
48
  # Starts asynchronous cancellation on the TrainingPipeline. The server
49
49
  # makes a best effort to cancel the pipeline, but success is not
50
- # guaranteed. Clients can use [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] or
51
- # other methods to check whether the cancellation succeeded or whether the
50
+ # guaranteed. Clients can use
51
+ # [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]
52
+ # or other methods to check whether the cancellation succeeded or whether the
52
53
  # pipeline completed despite cancellation. On successful cancellation,
53
54
  # the TrainingPipeline is not deleted; instead it becomes a pipeline with
54
- # a [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
55
- # corresponding to `Code.CANCELLED`, and [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] is set to
56
- # `CANCELLED`.
55
+ # a
56
+ # [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error]
57
+ # value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
58
+ # corresponding to `Code.CANCELLED`, and
59
+ # [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state]
60
+ # is set to `CANCELLED`.
57
61
  rpc :CancelTrainingPipeline, ::Google::Cloud::AIPlatform::V1::CancelTrainingPipelineRequest, ::Google::Protobuf::Empty
58
62
  # Creates a PipelineJob. A PipelineJob will run immediately when created.
59
63
  rpc :CreatePipelineJob, ::Google::Cloud::AIPlatform::V1::CreatePipelineJobRequest, ::Google::Cloud::AIPlatform::V1::PipelineJob
@@ -66,12 +70,15 @@ module Google
66
70
  # Cancels a PipelineJob.
67
71
  # Starts asynchronous cancellation on the PipelineJob. The server
68
72
  # makes a best effort to cancel the pipeline, but success is not
69
- # guaranteed. Clients can use [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] or
70
- # other methods to check whether the cancellation succeeded or whether the
73
+ # guaranteed. Clients can use
74
+ # [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]
75
+ # or other methods to check whether the cancellation succeeded or whether the
71
76
  # pipeline completed despite cancellation. On successful cancellation,
72
77
  # the PipelineJob is not deleted; instead it becomes a pipeline with
73
- # a [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
74
- # corresponding to `Code.CANCELLED`, and [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] is set to
78
+ # a [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] value
79
+ # with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding
80
+ # to `Code.CANCELLED`, and
81
+ # [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] is set to
75
82
  # `CANCELLED`.
76
83
  rpc :CancelPipelineJob, ::Google::Cloud::AIPlatform::V1::CancelPipelineJobRequest, ::Google::Protobuf::Empty
77
84
  end
@@ -39,18 +39,22 @@ module Google
39
39
  #
40
40
  # The response includes the following HTTP headers:
41
41
  #
42
- # * `X-Vertex-AI-Endpoint-Id`: ID of the [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served this
42
+ # * `X-Vertex-AI-Endpoint-Id`: ID of the
43
+ # [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served this
43
44
  # prediction.
44
45
  #
45
- # * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's [DeployedModel][google.cloud.aiplatform.v1.DeployedModel]
46
- # that served this prediction.
46
+ # * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's
47
+ # [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] that served this
48
+ # prediction.
47
49
  rpc :RawPredict, ::Google::Cloud::AIPlatform::V1::RawPredictRequest, ::Google::Api::HttpBody
48
50
  # Perform an online explanation.
49
51
  #
50
- # If [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] is specified,
51
- # the corresponding DeployModel must have
52
+ # If
53
+ # [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id]
54
+ # is specified, the corresponding DeployModel must have
52
55
  # [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec]
53
- # populated. If [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id]
56
+ # populated. If
57
+ # [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id]
54
58
  # is not specified, all DeployedModels must have
55
59
  # [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec]
56
60
  # populated. Only deployed AutoML tabular Models have
@@ -136,6 +136,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
136
136
  optional :min_measurement_count, :int64, 3
137
137
  optional :learning_rate_parameter_name, :string, 4
138
138
  optional :use_elapsed_duration, :bool, 5
139
+ proto3_optional :update_all_stopped_trials, :bool, 6
139
140
  end
140
141
  add_enum "google.cloud.aiplatform.v1.StudySpec.Algorithm" do
141
142
  value :ALGORITHM_UNSPECIFIED, 0
@@ -25,6 +25,19 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
25
25
  add_message "google.cloud.aiplatform.v1.GetTensorboardRequest" do
26
26
  optional :name, :string, 1
27
27
  end
28
+ add_message "google.cloud.aiplatform.v1.ReadTensorboardUsageRequest" do
29
+ optional :tensorboard, :string, 1
30
+ end
31
+ add_message "google.cloud.aiplatform.v1.ReadTensorboardUsageResponse" do
32
+ map :monthly_usage_data, :string, :message, 1, "google.cloud.aiplatform.v1.ReadTensorboardUsageResponse.PerMonthUsageData"
33
+ end
34
+ add_message "google.cloud.aiplatform.v1.ReadTensorboardUsageResponse.PerUserUsageData" do
35
+ optional :username, :string, 1
36
+ optional :view_count, :int64, 2
37
+ end
38
+ add_message "google.cloud.aiplatform.v1.ReadTensorboardUsageResponse.PerMonthUsageData" do
39
+ repeated :user_usage_data, :message, 1, "google.cloud.aiplatform.v1.ReadTensorboardUsageResponse.PerUserUsageData"
40
+ end
28
41
  add_message "google.cloud.aiplatform.v1.ListTensorboardsRequest" do
29
42
  optional :parent, :string, 1
30
43
  optional :filter, :string, 2
@@ -199,6 +212,10 @@ module Google
199
212
  module V1
200
213
  CreateTensorboardRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.CreateTensorboardRequest").msgclass
201
214
  GetTensorboardRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.GetTensorboardRequest").msgclass
215
+ ReadTensorboardUsageRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.ReadTensorboardUsageRequest").msgclass
216
+ ReadTensorboardUsageResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.ReadTensorboardUsageResponse").msgclass
217
+ ReadTensorboardUsageResponse::PerUserUsageData = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.ReadTensorboardUsageResponse.PerUserUsageData").msgclass
218
+ ReadTensorboardUsageResponse::PerMonthUsageData = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.ReadTensorboardUsageResponse.PerMonthUsageData").msgclass
202
219
  ListTensorboardsRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.ListTensorboardsRequest").msgclass
203
220
  ListTensorboardsResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.ListTensorboardsResponse").msgclass
204
221
  UpdateTensorboardRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.UpdateTensorboardRequest").msgclass
@@ -37,6 +37,8 @@ module Google
37
37
  rpc :CreateTensorboard, ::Google::Cloud::AIPlatform::V1::CreateTensorboardRequest, ::Google::Longrunning::Operation
38
38
  # Gets a Tensorboard.
39
39
  rpc :GetTensorboard, ::Google::Cloud::AIPlatform::V1::GetTensorboardRequest, ::Google::Cloud::AIPlatform::V1::Tensorboard
40
+ # Returns a list of monthly active users for a given TensorBoard instance.
41
+ rpc :ReadTensorboardUsage, ::Google::Cloud::AIPlatform::V1::ReadTensorboardUsageRequest, ::Google::Cloud::AIPlatform::V1::ReadTensorboardUsageResponse
40
42
  # Updates a Tensorboard.
41
43
  rpc :UpdateTensorboard, ::Google::Cloud::AIPlatform::V1::UpdateTensorboardRequest, ::Google::Longrunning::Operation
42
44
  # Lists Tensorboards in a Location.
@@ -79,13 +81,13 @@ module Google
79
81
  rpc :DeleteTensorboardTimeSeries, ::Google::Cloud::AIPlatform::V1::DeleteTensorboardTimeSeriesRequest, ::Google::Longrunning::Operation
80
82
  # Reads multiple TensorboardTimeSeries' data. The data point number limit is
81
83
  # 1000 for scalars, 100 for tensors and blob references. If the number of
82
- # data points stored is less than the limit, all data will be returned.
83
- # Otherwise, that limit number of data points will be randomly selected from
84
+ # data points stored is less than the limit, all data is returned.
85
+ # Otherwise, the number limit of data points is randomly selected from
84
86
  # this time series and returned.
85
87
  rpc :BatchReadTensorboardTimeSeriesData, ::Google::Cloud::AIPlatform::V1::BatchReadTensorboardTimeSeriesDataRequest, ::Google::Cloud::AIPlatform::V1::BatchReadTensorboardTimeSeriesDataResponse
86
88
  # Reads a TensorboardTimeSeries' data. By default, if the number of data
87
- # points stored is less than 1000, all data will be returned. Otherwise, 1000
88
- # data points will be randomly selected from this time series and returned.
89
+ # points stored is less than 1000, all data is returned. Otherwise, 1000
90
+ # data points is randomly selected from this time series and returned.
89
91
  # This value can be changed by changing max_data_points, which can't be
90
92
  # greater than 10k.
91
93
  rpc :ReadTensorboardTimeSeriesData, ::Google::Cloud::AIPlatform::V1::ReadTensorboardTimeSeriesDataRequest, ::Google::Cloud::AIPlatform::V1::ReadTensorboardTimeSeriesDataResponse
@@ -95,12 +97,10 @@ module Google
95
97
  # permission.
96
98
  rpc :ReadTensorboardBlobData, ::Google::Cloud::AIPlatform::V1::ReadTensorboardBlobDataRequest, stream(::Google::Cloud::AIPlatform::V1::ReadTensorboardBlobDataResponse)
97
99
  # Write time series data points of multiple TensorboardTimeSeries in multiple
98
- # TensorboardRun's. If any data fail to be ingested, an error will be
99
- # returned.
100
+ # TensorboardRun's. If any data fail to be ingested, an error is returned.
100
101
  rpc :WriteTensorboardExperimentData, ::Google::Cloud::AIPlatform::V1::WriteTensorboardExperimentDataRequest, ::Google::Cloud::AIPlatform::V1::WriteTensorboardExperimentDataResponse
101
102
  # Write time series data points into multiple TensorboardTimeSeries under
102
- # a TensorboardRun. If any data fail to be ingested, an error will be
103
- # returned.
103
+ # a TensorboardRun. If any data fail to be ingested, an error is returned.
104
104
  rpc :WriteTensorboardRunData, ::Google::Cloud::AIPlatform::V1::WriteTensorboardRunDataRequest, ::Google::Cloud::AIPlatform::V1::WriteTensorboardRunDataResponse
105
105
  # Exports a TensorboardTimeSeries' data. Data is returned in paginated
106
106
  # responses.
@@ -28,8 +28,9 @@ module Google
28
28
  # Output only. Resource name of the Annotation.
29
29
  # @!attribute [rw] payload_schema_uri
30
30
  # @return [::String]
31
- # Required. Google Cloud Storage URI points to a YAML file describing {::Google::Cloud::AIPlatform::V1::Annotation#payload payload}. The
32
- # schema is defined as an [OpenAPI 3.0.2 Schema
31
+ # Required. Google Cloud Storage URI points to a YAML file describing
32
+ # {::Google::Cloud::AIPlatform::V1::Annotation#payload payload}. The schema is
33
+ # defined as an [OpenAPI 3.0.2 Schema
33
34
  # Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
34
35
  # The schema files that can be used here are found in
35
36
  # gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the
@@ -47,14 +48,15 @@ module Google
47
48
  # Output only. Timestamp when this Annotation was last updated.
48
49
  # @!attribute [rw] etag
49
50
  # @return [::String]
50
- # Optional. Used to perform consistent read-modify-write updates. If not set, a blind
51
- # "overwrite" update happens.
51
+ # Optional. Used to perform consistent read-modify-write updates. If not set,
52
+ # a blind "overwrite" update happens.
52
53
  # @!attribute [r] annotation_source
53
54
  # @return [::Google::Cloud::AIPlatform::V1::UserActionReference]
54
55
  # Output only. The source of the Annotation.
55
56
  # @!attribute [rw] labels
56
57
  # @return [::Google::Protobuf::Map{::String => ::String}]
57
- # Optional. The labels with user-defined metadata to organize your Annotations.
58
+ # Optional. The labels with user-defined metadata to organize your
59
+ # Annotations.
58
60
  #
59
61
  # Label keys and values can be no longer than 64 characters
60
62
  # (Unicode codepoints), can only contain lowercase letters, numeric
@@ -71,7 +73,8 @@ module Google
71
73
  # If not set, the Annotation is not visible in the UI.
72
74
  #
73
75
  # * "aiplatform.googleapis.com/payload_schema":
74
- # output only, its value is the [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
76
+ # output only, its value is the
77
+ # [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
75
78
  # title.
76
79
  class Annotation
77
80
  include ::Google::Protobuf::MessageExts
@@ -38,8 +38,8 @@ module Google
38
38
  # Output only. Timestamp when AnnotationSpec was last updated.
39
39
  # @!attribute [rw] etag
40
40
  # @return [::String]
41
- # Optional. Used to perform consistent read-modify-write updates. If not set, a blind
42
- # "overwrite" update happens.
41
+ # Optional. Used to perform consistent read-modify-write updates. If not set,
42
+ # a blind "overwrite" update happens.
43
43
  class AnnotationSpec
44
44
  include ::Google::Protobuf::MessageExts
45
45
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -21,8 +21,10 @@ module Google
21
21
  module Cloud
22
22
  module AIPlatform
23
23
  module V1
24
- # A job that uses a {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#model Model} to produce predictions
25
- # on multiple {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#input_config input instances}. If
24
+ # A job that uses a
25
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#model Model} to produce
26
+ # predictions on multiple [input
27
+ # instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. If
26
28
  # predictions for significant portion of the instances fail, the job may finish
27
29
  # without attempting predictions for all remaining instances.
28
30
  # @!attribute [r] name
@@ -43,7 +45,8 @@ module Google
43
45
  # the version, if no version is specified, the default version will be used.
44
46
  # @!attribute [r] model_version_id
45
47
  # @return [::String]
46
- # Output only. The version ID of the Model that produces the predictions via this job.
48
+ # Output only. The version ID of the Model that produces the predictions via
49
+ # this job.
47
50
  # @!attribute [rw] unmanaged_container_model
48
51
  # @return [::Google::Cloud::AIPlatform::V1::UnmanagedContainerModel]
49
52
  # Contains model information necessary to perform batch prediction without
@@ -51,15 +54,20 @@ module Google
51
54
  # Exactly one of model and unmanaged_container_model must be set.
52
55
  # @!attribute [rw] input_config
53
56
  # @return [::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InputConfig]
54
- # Required. Input configuration of the instances on which predictions are performed.
55
- # The schema of any single instance may be specified via
56
- # the [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model]
57
+ # Required. Input configuration of the instances on which predictions are
58
+ # performed. The schema of any single instance may be specified via the
59
+ # [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model]
57
60
  # [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
58
61
  # {::Google::Cloud::AIPlatform::V1::PredictSchemata#instance_schema_uri instance_schema_uri}.
62
+ # @!attribute [rw] instance_config
63
+ # @return [::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InstanceConfig]
64
+ # Configuration for how to convert batch prediction input instances to the
65
+ # prediction instances that are sent to the Model.
59
66
  # @!attribute [rw] model_parameters
60
67
  # @return [::Google::Protobuf::Value]
61
68
  # The parameters that govern the predictions. The schema of the parameters
62
- # may be specified via the [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model]
69
+ # may be specified via the
70
+ # [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model]
63
71
  # [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
64
72
  # {::Google::Cloud::AIPlatform::V1::PredictSchemata#parameters_schema_uri parameters_schema_uri}.
65
73
  # @!attribute [rw] output_config
@@ -75,7 +83,8 @@ module Google
75
83
  # @!attribute [rw] dedicated_resources
76
84
  # @return [::Google::Cloud::AIPlatform::V1::BatchDedicatedResources]
77
85
  # The config of resources used by the Model during the batch prediction. If
78
- # the Model {::Google::Cloud::AIPlatform::V1::Model#supported_deployment_resources_types supports}
86
+ # the Model
87
+ # {::Google::Cloud::AIPlatform::V1::Model#supported_deployment_resources_types supports}
79
88
  # DEDICATED_RESOURCES this config may be provided (and the job will use these
80
89
  # resources), if the Model doesn't support AUTOMATIC_RESOURCES, this config
81
90
  # must be provided.
@@ -90,35 +99,49 @@ module Google
90
99
  # permission on this service account.
91
100
  # @!attribute [rw] manual_batch_tuning_parameters
92
101
  # @return [::Google::Cloud::AIPlatform::V1::ManualBatchTuningParameters]
93
- # Immutable. Parameters configuring the batch behavior. Currently only applicable when
94
- # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#dedicated_resources dedicated_resources} are used (in other cases Vertex AI does
95
- # the tuning itself).
102
+ # Immutable. Parameters configuring the batch behavior. Currently only
103
+ # applicable when
104
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#dedicated_resources dedicated_resources}
105
+ # are used (in other cases Vertex AI does the tuning itself).
96
106
  # @!attribute [rw] generate_explanation
97
107
  # @return [::Boolean]
98
108
  # Generate explanation with the batch prediction results.
99
109
  #
100
110
  # When set to `true`, the batch prediction output changes based on the
101
111
  # `predictions_format` field of the
102
- # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#output_config BatchPredictionJob.output_config} object:
112
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#output_config BatchPredictionJob.output_config}
113
+ # object:
103
114
  #
104
115
  # * `bigquery`: output includes a column named `explanation`. The value
105
- # is a struct that conforms to the {::Google::Cloud::AIPlatform::V1::Explanation Explanation} object.
116
+ # is a struct that conforms to the
117
+ # {::Google::Cloud::AIPlatform::V1::Explanation Explanation} object.
106
118
  # * `jsonl`: The JSON objects on each line include an additional entry
107
119
  # keyed `explanation`. The value of the entry is a JSON object that
108
- # conforms to the {::Google::Cloud::AIPlatform::V1::Explanation Explanation} object.
120
+ # conforms to the {::Google::Cloud::AIPlatform::V1::Explanation Explanation}
121
+ # object.
109
122
  # * `csv`: Generating explanations for CSV format is not supported.
110
123
  #
111
- # If this field is set to true, either the {::Google::Cloud::AIPlatform::V1::Model#explanation_spec Model.explanation_spec} or
112
- # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#explanation_spec explanation_spec} must be populated.
124
+ # If this field is set to true, either the
125
+ # {::Google::Cloud::AIPlatform::V1::Model#explanation_spec Model.explanation_spec}
126
+ # or
127
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#explanation_spec explanation_spec}
128
+ # must be populated.
113
129
  # @!attribute [rw] explanation_spec
114
130
  # @return [::Google::Cloud::AIPlatform::V1::ExplanationSpec]
115
131
  # Explanation configuration for this BatchPredictionJob. Can be
116
- # specified only if {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#generate_explanation generate_explanation} is set to `true`.
132
+ # specified only if
133
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#generate_explanation generate_explanation}
134
+ # is set to `true`.
117
135
  #
118
- # This value overrides the value of {::Google::Cloud::AIPlatform::V1::Model#explanation_spec Model.explanation_spec}. All fields of
119
- # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#explanation_spec explanation_spec} are optional in the request. If a field of the
120
- # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#explanation_spec explanation_spec} object is not populated, the corresponding field of
121
- # the {::Google::Cloud::AIPlatform::V1::Model#explanation_spec Model.explanation_spec} object is inherited.
136
+ # This value overrides the value of
137
+ # {::Google::Cloud::AIPlatform::V1::Model#explanation_spec Model.explanation_spec}.
138
+ # All fields of
139
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#explanation_spec explanation_spec}
140
+ # are optional in the request. If a field of the
141
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#explanation_spec explanation_spec}
142
+ # object is not populated, the corresponding field of the
143
+ # {::Google::Cloud::AIPlatform::V1::Model#explanation_spec Model.explanation_spec}
144
+ # object is inherited.
122
145
  # @!attribute [r] output_info
123
146
  # @return [::Google::Cloud::AIPlatform::V1::BatchPredictionJob::OutputInfo]
124
147
  # Output only. Information further describing the output of this job.
@@ -137,8 +160,8 @@ module Google
137
160
  # Status details fields contain standard Google Cloud error details.
138
161
  # @!attribute [r] resources_consumed
139
162
  # @return [::Google::Cloud::AIPlatform::V1::ResourcesConsumed]
140
- # Output only. Information about resources that had been consumed by this job.
141
- # Provided in real time at best effort basis, as well as a final value
163
+ # Output only. Information about resources that had been consumed by this
164
+ # job. Provided in real time at best effort basis, as well as a final value
142
165
  # once the job completes.
143
166
  #
144
167
  # Note: This field currently may be not populated for batch predictions that
@@ -151,12 +174,12 @@ module Google
151
174
  # Output only. Time when the BatchPredictionJob was created.
152
175
  # @!attribute [r] start_time
153
176
  # @return [::Google::Protobuf::Timestamp]
154
- # Output only. Time when the BatchPredictionJob for the first time entered the
155
- # `JOB_STATE_RUNNING` state.
177
+ # Output only. Time when the BatchPredictionJob for the first time entered
178
+ # the `JOB_STATE_RUNNING` state.
156
179
  # @!attribute [r] end_time
157
180
  # @return [::Google::Protobuf::Timestamp]
158
- # Output only. Time when the BatchPredictionJob entered any of the following states:
159
- # `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`.
181
+ # Output only. Time when the BatchPredictionJob entered any of the following
182
+ # states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`.
160
183
  # @!attribute [r] update_time
161
184
  # @return [::Google::Protobuf::Timestamp]
162
185
  # Output only. Time when the BatchPredictionJob was most recently updated.
@@ -178,9 +201,11 @@ module Google
178
201
  include ::Google::Protobuf::MessageExts
179
202
  extend ::Google::Protobuf::MessageExts::ClassMethods
180
203
 
181
- # Configures the input to {::Google::Cloud::AIPlatform::V1::BatchPredictionJob BatchPredictionJob}.
182
- # See {::Google::Cloud::AIPlatform::V1::Model#supported_input_storage_formats Model.supported_input_storage_formats} for Model's supported input
183
- # formats, and how instances should be expressed via any of them.
204
+ # Configures the input to
205
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob BatchPredictionJob}. See
206
+ # {::Google::Cloud::AIPlatform::V1::Model#supported_input_storage_formats Model.supported_input_storage_formats}
207
+ # for Model's supported input formats, and how instances should be expressed
208
+ # via any of them.
184
209
  # @!attribute [rw] gcs_source
185
210
  # @return [::Google::Cloud::AIPlatform::V1::GcsSource]
186
211
  # The Cloud Storage location for the input instances.
@@ -201,9 +226,112 @@ module Google
201
226
  extend ::Google::Protobuf::MessageExts::ClassMethods
202
227
  end
203
228
 
204
- # Configures the output of {::Google::Cloud::AIPlatform::V1::BatchPredictionJob BatchPredictionJob}.
205
- # See {::Google::Cloud::AIPlatform::V1::Model#supported_output_storage_formats Model.supported_output_storage_formats} for supported output
206
- # formats, and how predictions are expressed via any of them.
229
+ # Configuration defining how to transform batch prediction input instances to
230
+ # the instances that the Model accepts.
231
+ # @!attribute [rw] instance_type
232
+ # @return [::String]
233
+ # The format of the instance that the Model accepts. Vertex AI will
234
+ # convert compatible
235
+ # [batch prediction input instance
236
+ # formats][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.instances_format]
237
+ # to the specified format.
238
+ #
239
+ # Supported values are:
240
+ #
241
+ # * `object`: Each input is converted to JSON object format.
242
+ # * For `bigquery`, each row is converted to an object.
243
+ # * For `jsonl`, each line of the JSONL input must be an object.
244
+ # * Does not apply to `csv`, `file-list`, `tf-record`, or
245
+ # `tf-record-gzip`.
246
+ #
247
+ # * `array`: Each input is converted to JSON array format.
248
+ # * For `bigquery`, each row is converted to an array. The order
249
+ # of columns is determined by the BigQuery column order, unless
250
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InstanceConfig#included_fields included_fields}
251
+ # is populated.
252
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InstanceConfig#included_fields included_fields}
253
+ # must be populated for specifying field orders.
254
+ # * For `jsonl`, if each line of the JSONL input is an object,
255
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InstanceConfig#included_fields included_fields}
256
+ # must be populated for specifying field orders.
257
+ # * Does not apply to `csv`, `file-list`, `tf-record`, or
258
+ # `tf-record-gzip`.
259
+ #
260
+ # If not specified, Vertex AI converts the batch prediction input as
261
+ # follows:
262
+ #
263
+ # * For `bigquery` and `csv`, the behavior is the same as `array`. The
264
+ # order of columns is the same as defined in the file or table, unless
265
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InstanceConfig#included_fields included_fields}
266
+ # is populated.
267
+ # * For `jsonl`, the prediction instance format is determined by
268
+ # each line of the input.
269
+ # * For `tf-record`/`tf-record-gzip`, each record will be converted to
270
+ # an object in the format of `{"b64": <value>}`, where `<value>` is
271
+ # the Base64-encoded string of the content of the record.
272
+ # * For `file-list`, each file in the list will be converted to an
273
+ # object in the format of `{"b64": <value>}`, where `<value>` is
274
+ # the Base64-encoded string of the content of the file.
275
+ # @!attribute [rw] key_field
276
+ # @return [::String]
277
+ # The name of the field that is considered as a key.
278
+ #
279
+ # The values identified by the key field is not included in the transformed
280
+ # instances that is sent to the Model. This is similar to
281
+ # specifying this name of the field in
282
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InstanceConfig#excluded_fields excluded_fields}.
283
+ # In addition, the batch prediction output will not include the instances.
284
+ # Instead the output will only include the value of the key field, in a
285
+ # field named `key` in the output:
286
+ #
287
+ # * For `jsonl` output format, the output will have a `key` field
288
+ # instead of the `instance` field.
289
+ # * For `csv`/`bigquery` output format, the output will have have a `key`
290
+ # column instead of the instance feature columns.
291
+ #
292
+ # The input must be JSONL with objects at each line, CSV, BigQuery
293
+ # or TfRecord.
294
+ # @!attribute [rw] included_fields
295
+ # @return [::Array<::String>]
296
+ # Fields that will be included in the prediction instance that is
297
+ # sent to the Model.
298
+ #
299
+ # If
300
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InstanceConfig#instance_type instance_type}
301
+ # is `array`, the order of field names in included_fields also determines
302
+ # the order of the values in the array.
303
+ #
304
+ # When included_fields is populated,
305
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InstanceConfig#excluded_fields excluded_fields}
306
+ # must be empty.
307
+ #
308
+ # The input must be JSONL with objects at each line, CSV, BigQuery
309
+ # or TfRecord.
310
+ # @!attribute [rw] excluded_fields
311
+ # @return [::Array<::String>]
312
+ # Fields that will be excluded in the prediction instance that is
313
+ # sent to the Model.
314
+ #
315
+ # Excluded will be attached to the batch prediction output if
316
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InstanceConfig#key_field key_field}
317
+ # is not specified.
318
+ #
319
+ # When excluded_fields is populated,
320
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InstanceConfig#included_fields included_fields}
321
+ # must be empty.
322
+ #
323
+ # The input must be JSONL with objects at each line, CSV, BigQuery
324
+ # or TfRecord.
325
+ class InstanceConfig
326
+ include ::Google::Protobuf::MessageExts
327
+ extend ::Google::Protobuf::MessageExts::ClassMethods
328
+ end
329
+
330
+ # Configures the output of
331
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob BatchPredictionJob}. See
332
+ # {::Google::Cloud::AIPlatform::V1::Model#supported_output_storage_formats Model.supported_output_storage_formats}
333
+ # for supported output formats, and how predictions are expressed via any of
334
+ # them.
207
335
  # @!attribute [rw] gcs_destination
208
336
  # @return [::Google::Cloud::AIPlatform::V1::GcsDestination]
209
337
  # The Cloud Storage location of the directory where the output is
@@ -213,11 +341,13 @@ module Google
213
341
  # Inside of it files `predictions_0001.<extension>`,
214
342
  # `predictions_0002.<extension>`, ..., `predictions_N.<extension>`
215
343
  # are created where `<extension>` depends on chosen
216
- # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::OutputConfig#predictions_format predictions_format}, and N may equal 0001 and depends on the total
217
- # number of successfully predicted instances.
218
- # If the Model has both {::Google::Cloud::AIPlatform::V1::PredictSchemata#instance_schema_uri instance}
219
- # and {::Google::Cloud::AIPlatform::V1::PredictSchemata#parameters_schema_uri prediction} schemata
220
- # defined then each such file contains predictions as per the
344
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::OutputConfig#predictions_format predictions_format},
345
+ # and N may equal 0001 and depends on the total number of successfully
346
+ # predicted instances. If the Model has both
347
+ # {::Google::Cloud::AIPlatform::V1::PredictSchemata#instance_schema_uri instance}
348
+ # and
349
+ # {::Google::Cloud::AIPlatform::V1::PredictSchemata#parameters_schema_uri prediction}
350
+ # schemata defined then each such file contains predictions as per the
221
351
  # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::OutputConfig#predictions_format predictions_format}.
222
352
  # If prediction for any instance failed (partially or completely), then
223
353
  # an additional `errors_0001.<extension>`, `errors_0002.<extension>`,...,
@@ -236,20 +366,22 @@ module Google
236
366
  # become underscores), and timestamp is in
237
367
  # YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset
238
368
  # two tables will be created, `predictions`, and `errors`.
239
- # If the Model has both {::Google::Cloud::AIPlatform::V1::PredictSchemata#instance_schema_uri instance}
240
- # and {::Google::Cloud::AIPlatform::V1::PredictSchemata#parameters_schema_uri prediction} schemata
241
- # defined then the tables have columns as follows: The `predictions`
242
- # table contains instances for which the prediction succeeded, it
243
- # has columns as per a concatenation of the Model's instance and
244
- # prediction schemata. The `errors` table contains rows for which the
245
- # prediction has failed, it has instance columns, as per the
369
+ # If the Model has both
370
+ # {::Google::Cloud::AIPlatform::V1::PredictSchemata#instance_schema_uri instance}
371
+ # and
372
+ # {::Google::Cloud::AIPlatform::V1::PredictSchemata#parameters_schema_uri prediction}
373
+ # schemata defined then the tables have columns as follows: The
374
+ # `predictions` table contains instances for which the prediction
375
+ # succeeded, it has columns as per a concatenation of the Model's
376
+ # instance and prediction schemata. The `errors` table contains rows for
377
+ # which the prediction has failed, it has instance columns, as per the
246
378
  # instance schema, followed by a single "errors" column, which as values
247
379
  # has {::Google::Rpc::Status google.rpc.Status}
248
380
  # represented as a STRUCT, and containing only `code` and `message`.
249
381
  # @!attribute [rw] predictions_format
250
382
  # @return [::String]
251
- # Required. The format in which Vertex AI gives the predictions, must be one of the
252
- # [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model]
383
+ # Required. The format in which Vertex AI gives the predictions, must be
384
+ # one of the [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model]
253
385
  # {::Google::Cloud::AIPlatform::V1::Model#supported_output_storage_formats supported_output_storage_formats}.
254
386
  class OutputConfig
255
387
  include ::Google::Protobuf::MessageExts
@@ -257,11 +389,12 @@ module Google
257
389
  end
258
390
 
259
391
  # Further describes this job's output.
260
- # Supplements {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#output_config output_config}.
392
+ # Supplements
393
+ # {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#output_config output_config}.
261
394
  # @!attribute [r] gcs_output_directory
262
395
  # @return [::String]
263
- # Output only. The full path of the Cloud Storage directory created, into which
264
- # the prediction output is written.
396
+ # Output only. The full path of the Cloud Storage directory created, into
397
+ # which the prediction output is written.
265
398
  # @!attribute [r] bigquery_output_dataset
266
399
  # @return [::String]
267
400
  # Output only. The path of the BigQuery dataset created, in
@@ -31,15 +31,16 @@ module Google
31
31
  # Output only. The number of entities for which any error was encountered.
32
32
  # @!attribute [r] incomplete_count
33
33
  # @return [::Integer]
34
- # Output only. In cases when enough errors are encountered a job, pipeline, or operation
35
- # may be failed as a whole. Below is the number of entities for which the
36
- # processing had not been finished (either in successful or failed state).
37
- # Set to -1 if the number is unknown (for example, the operation failed
38
- # before the total entity number could be collected).
34
+ # Output only. In cases when enough errors are encountered a job, pipeline,
35
+ # or operation may be failed as a whole. Below is the number of entities for
36
+ # which the processing had not been finished (either in successful or failed
37
+ # state). Set to -1 if the number is unknown (for example, the operation
38
+ # failed before the total entity number could be collected).
39
39
  # @!attribute [r] successful_forecast_point_count
40
40
  # @return [::Integer]
41
- # Output only. The number of the successful forecast points that are generated by the
42
- # forecasting model. This is ONLY used by the forecasting batch prediction.
41
+ # Output only. The number of the successful forecast points that are
42
+ # generated by the forecasting model. This is ONLY used by the forecasting
43
+ # batch prediction.
43
44
  class CompletionStats
44
45
  include ::Google::Protobuf::MessageExts
45
46
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -50,8 +50,8 @@ module Google
50
50
  # Output only. Timestamp when this Context was last updated.
51
51
  # @!attribute [r] parent_contexts
52
52
  # @return [::Array<::String>]
53
- # Output only. A list of resource names of Contexts that are parents of this Context.
54
- # A Context may have at most 10 parent_contexts.
53
+ # Output only. A list of resource names of Contexts that are parents of this
54
+ # Context. A Context may have at most 10 parent_contexts.
55
55
  # @!attribute [rw] schema_title
56
56
  # @return [::String]
57
57
  # The title of the schema describing the metadata.