google-cloud-ai_platform-v1 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.yardopts +12 -0
- data/AUTHENTICATION.md +149 -0
- data/LICENSE.md +201 -0
- data/README.md +139 -0
- data/lib/google/cloud/ai_platform/v1/dataset_service/client.rb +1364 -0
- data/lib/google/cloud/ai_platform/v1/dataset_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/dataset_service/operations.rb +767 -0
- data/lib/google/cloud/ai_platform/v1/dataset_service/paths.rb +111 -0
- data/lib/google/cloud/ai_platform/v1/dataset_service.rb +51 -0
- data/lib/google/cloud/ai_platform/v1/endpoint_service/client.rb +1076 -0
- data/lib/google/cloud/ai_platform/v1/endpoint_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/endpoint_service/operations.rb +767 -0
- data/lib/google/cloud/ai_platform/v1/endpoint_service/paths.rb +124 -0
- data/lib/google/cloud/ai_platform/v1/endpoint_service.rb +50 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_online_serving_service/client.rb +508 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_online_serving_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_online_serving_service/paths.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_online_serving_service.rb +49 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_service/client.rb +2707 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_service/operations.rb +767 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_service/paths.rb +113 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_service.rb +50 -0
- data/lib/google/cloud/ai_platform/v1/index_endpoint_service/client.rb +1146 -0
- data/lib/google/cloud/ai_platform/v1/index_endpoint_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/index_endpoint_service/operations.rb +767 -0
- data/lib/google/cloud/ai_platform/v1/index_endpoint_service/paths.rb +88 -0
- data/lib/google/cloud/ai_platform/v1/index_endpoint_service.rb +50 -0
- data/lib/google/cloud/ai_platform/v1/index_service/client.rb +823 -0
- data/lib/google/cloud/ai_platform/v1/index_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/index_service/operations.rb +767 -0
- data/lib/google/cloud/ai_platform/v1/index_service/paths.rb +88 -0
- data/lib/google/cloud/ai_platform/v1/index_service.rb +50 -0
- data/lib/google/cloud/ai_platform/v1/job_service/client.rb +3236 -0
- data/lib/google/cloud/ai_platform/v1/job_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/job_service/operations.rb +767 -0
- data/lib/google/cloud/ai_platform/v1/job_service/paths.rb +259 -0
- data/lib/google/cloud/ai_platform/v1/job_service.rb +50 -0
- data/lib/google/cloud/ai_platform/v1/metadata_service/client.rb +3654 -0
- data/lib/google/cloud/ai_platform/v1/metadata_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/metadata_service/operations.rb +767 -0
- data/lib/google/cloud/ai_platform/v1/metadata_service/paths.rb +153 -0
- data/lib/google/cloud/ai_platform/v1/metadata_service.rb +50 -0
- data/lib/google/cloud/ai_platform/v1/migration_service/client.rb +538 -0
- data/lib/google/cloud/ai_platform/v1/migration_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/migration_service/operations.rb +767 -0
- data/lib/google/cloud/ai_platform/v1/migration_service/paths.rb +148 -0
- data/lib/google/cloud/ai_platform/v1/migration_service.rb +51 -0
- data/lib/google/cloud/ai_platform/v1/model_service/client.rb +1355 -0
- data/lib/google/cloud/ai_platform/v1/model_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/model_service/operations.rb +767 -0
- data/lib/google/cloud/ai_platform/v1/model_service/paths.rb +151 -0
- data/lib/google/cloud/ai_platform/v1/model_service.rb +50 -0
- data/lib/google/cloud/ai_platform/v1/pipeline_service/client.rb +1384 -0
- data/lib/google/cloud/ai_platform/v1/pipeline_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/pipeline_service/operations.rb +767 -0
- data/lib/google/cloud/ai_platform/v1/pipeline_service/paths.rb +225 -0
- data/lib/google/cloud/ai_platform/v1/pipeline_service.rb +52 -0
- data/lib/google/cloud/ai_platform/v1/prediction_service/client.rb +650 -0
- data/lib/google/cloud/ai_platform/v1/prediction_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/prediction_service/paths.rb +52 -0
- data/lib/google/cloud/ai_platform/v1/prediction_service.rb +49 -0
- data/lib/google/cloud/ai_platform/v1/specialist_pool_service/client.rb +826 -0
- data/lib/google/cloud/ai_platform/v1/specialist_pool_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/specialist_pool_service/operations.rb +767 -0
- data/lib/google/cloud/ai_platform/v1/specialist_pool_service/paths.rb +69 -0
- data/lib/google/cloud/ai_platform/v1/specialist_pool_service.rb +55 -0
- data/lib/google/cloud/ai_platform/v1/tensorboard_service/client.rb +3224 -0
- data/lib/google/cloud/ai_platform/v1/tensorboard_service/credentials.rb +48 -0
- data/lib/google/cloud/ai_platform/v1/tensorboard_service/operations.rb +767 -0
- data/lib/google/cloud/ai_platform/v1/tensorboard_service/paths.rb +138 -0
- data/lib/google/cloud/ai_platform/v1/tensorboard_service.rb +50 -0
- data/lib/google/cloud/ai_platform/v1/version.rb +28 -0
- data/lib/google/cloud/ai_platform/v1/vizier_service/client.rb +1793 -0
- data/lib/google/cloud/ai_platform/v1/vizier_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/vizier_service/operations.rb +767 -0
- data/lib/google/cloud/ai_platform/v1/vizier_service/paths.rb +109 -0
- data/lib/google/cloud/ai_platform/v1/vizier_service.rb +54 -0
- data/lib/google/cloud/ai_platform/v1.rb +52 -0
- data/lib/google/cloud/aiplatform/v1/accelerator_type_pb.rb +29 -0
- data/lib/google/cloud/aiplatform/v1/annotation_pb.rb +35 -0
- data/lib/google/cloud/aiplatform/v1/annotation_spec_pb.rb +30 -0
- data/lib/google/cloud/aiplatform/v1/artifact_pb.rb +45 -0
- data/lib/google/cloud/aiplatform/v1/batch_prediction_job_pb.rb +83 -0
- data/lib/google/cloud/aiplatform/v1/completion_stats_pb.rb +27 -0
- data/lib/google/cloud/aiplatform/v1/context_pb.rb +38 -0
- data/lib/google/cloud/aiplatform/v1/custom_job_pb.rb +84 -0
- data/lib/google/cloud/aiplatform/v1/data_item_pb.rb +32 -0
- data/lib/google/cloud/aiplatform/v1/data_labeling_job_pb.rb +78 -0
- data/lib/google/cloud/aiplatform/v1/dataset_pb.rb +53 -0
- data/lib/google/cloud/aiplatform/v1/dataset_service_pb.rb +126 -0
- data/lib/google/cloud/aiplatform/v1/dataset_service_services_pb.rb +64 -0
- data/lib/google/cloud/aiplatform/v1/deployed_index_ref_pb.rb +26 -0
- data/lib/google/cloud/aiplatform/v1/deployed_model_ref_pb.rb +26 -0
- data/lib/google/cloud/aiplatform/v1/encryption_spec_pb.rb +24 -0
- data/lib/google/cloud/aiplatform/v1/endpoint_pb.rb +67 -0
- data/lib/google/cloud/aiplatform/v1/endpoint_service_pb.rb +90 -0
- data/lib/google/cloud/aiplatform/v1/endpoint_service_services_pb.rb +58 -0
- data/lib/google/cloud/aiplatform/v1/entity_type_pb.rb +32 -0
- data/lib/google/cloud/aiplatform/v1/env_var_pb.rb +25 -0
- data/lib/google/cloud/aiplatform/v1/event_pb.rb +36 -0
- data/lib/google/cloud/aiplatform/v1/execution_pb.rb +48 -0
- data/lib/google/cloud/aiplatform/v1/explanation_metadata_pb.rb +107 -0
- data/lib/google/cloud/aiplatform/v1/explanation_pb.rb +106 -0
- data/lib/google/cloud/aiplatform/v1/feature_monitoring_stats_pb.rb +30 -0
- data/lib/google/cloud/aiplatform/v1/feature_pb.rb +46 -0
- data/lib/google/cloud/aiplatform/v1/feature_selector_pb.rb +28 -0
- data/lib/google/cloud/aiplatform/v1/featurestore_online_service_pb.rb +86 -0
- data/lib/google/cloud/aiplatform/v1/featurestore_online_service_services_pb.rb +51 -0
- data/lib/google/cloud/aiplatform/v1/featurestore_pb.rb +44 -0
- data/lib/google/cloud/aiplatform/v1/featurestore_service_pb.rb +280 -0
- data/lib/google/cloud/aiplatform/v1/featurestore_service_services_pb.rb +109 -0
- data/lib/google/cloud/aiplatform/v1/hyperparameter_tuning_job_pb.rb +46 -0
- data/lib/google/cloud/aiplatform/v1/index_endpoint_pb.rb +66 -0
- data/lib/google/cloud/aiplatform/v1/index_endpoint_service_pb.rb +101 -0
- data/lib/google/cloud/aiplatform/v1/index_endpoint_service_services_pb.rb +62 -0
- data/lib/google/cloud/aiplatform/v1/index_pb.rb +38 -0
- data/lib/google/cloud/aiplatform/v1/index_service_pb.rb +98 -0
- data/lib/google/cloud/aiplatform/v1/index_service_services_pb.rb +55 -0
- data/lib/google/cloud/aiplatform/v1/io_pb.rb +56 -0
- data/lib/google/cloud/aiplatform/v1/job_service_pb.rb +217 -0
- data/lib/google/cloud/aiplatform/v1/job_service_services_pb.rb +134 -0
- data/lib/google/cloud/aiplatform/v1/job_state_pb.rb +32 -0
- data/lib/google/cloud/aiplatform/v1/lineage_subgraph_pb.rb +28 -0
- data/lib/google/cloud/aiplatform/v1/machine_resources_pb.rb +59 -0
- data/lib/google/cloud/aiplatform/v1/manual_batch_tuning_parameters_pb.rb +24 -0
- data/lib/google/cloud/aiplatform/v1/metadata_schema_pb.rb +38 -0
- data/lib/google/cloud/aiplatform/v1/metadata_service_pb.rb +272 -0
- data/lib/google/cloud/aiplatform/v1/metadata_service_services_pb.rb +119 -0
- data/lib/google/cloud/aiplatform/v1/metadata_store_pb.rb +36 -0
- data/lib/google/cloud/aiplatform/v1/migratable_resource_pb.rb +59 -0
- data/lib/google/cloud/aiplatform/v1/migration_service_pb.rb +106 -0
- data/lib/google/cloud/aiplatform/v1/migration_service_services_pb.rb +51 -0
- data/lib/google/cloud/aiplatform/v1/model_deployment_monitoring_job_pb.rb +111 -0
- data/lib/google/cloud/aiplatform/v1/model_evaluation_pb.rb +33 -0
- data/lib/google/cloud/aiplatform/v1/model_evaluation_slice_pb.rb +36 -0
- data/lib/google/cloud/aiplatform/v1/model_monitoring_pb.rb +93 -0
- data/lib/google/cloud/aiplatform/v1/model_pb.rb +88 -0
- data/lib/google/cloud/aiplatform/v1/model_service_pb.rb +129 -0
- data/lib/google/cloud/aiplatform/v1/model_service_services_pb.rb +69 -0
- data/lib/google/cloud/aiplatform/v1/operation_pb.rb +32 -0
- data/lib/google/cloud/aiplatform/v1/pipeline_job_pb.rb +115 -0
- data/lib/google/cloud/aiplatform/v1/pipeline_service_pb.rb +88 -0
- data/lib/google/cloud/aiplatform/v1/pipeline_service_services_pb.rb +84 -0
- data/lib/google/cloud/aiplatform/v1/pipeline_state_pb.rb +31 -0
- data/lib/google/cloud/aiplatform/v1/prediction_service_pb.rb +57 -0
- data/lib/google/cloud/aiplatform/v1/prediction_service_services_pb.rb +66 -0
- data/lib/google/cloud/aiplatform/v1/specialist_pool_pb.rb +30 -0
- data/lib/google/cloud/aiplatform/v1/specialist_pool_service_pb.rb +66 -0
- data/lib/google/cloud/aiplatform/v1/specialist_pool_service_services_pb.rb +58 -0
- data/lib/google/cloud/aiplatform/v1/study_pb.rb +191 -0
- data/lib/google/cloud/aiplatform/v1/tensorboard_data_pb.rb +56 -0
- data/lib/google/cloud/aiplatform/v1/tensorboard_experiment_pb.rb +33 -0
- data/lib/google/cloud/aiplatform/v1/tensorboard_pb.rb +36 -0
- data/lib/google/cloud/aiplatform/v1/tensorboard_run_pb.rb +32 -0
- data/lib/google/cloud/aiplatform/v1/tensorboard_service_pb.rb +244 -0
- data/lib/google/cloud/aiplatform/v1/tensorboard_service_services_pb.rb +115 -0
- data/lib/google/cloud/aiplatform/v1/tensorboard_time_series_pb.rb +48 -0
- data/lib/google/cloud/aiplatform/v1/training_pipeline_pb.rb +95 -0
- data/lib/google/cloud/aiplatform/v1/types_pb.rb +35 -0
- data/lib/google/cloud/aiplatform/v1/unmanaged_container_model_pb.rb +27 -0
- data/lib/google/cloud/aiplatform/v1/user_action_reference_pb.rb +27 -0
- data/lib/google/cloud/aiplatform/v1/value_pb.rb +27 -0
- data/lib/google/cloud/aiplatform/v1/vizier_service_pb.rb +136 -0
- data/lib/google/cloud/aiplatform/v1/vizier_service_services_pb.rb +90 -0
- data/lib/google-cloud-ai_platform-v1.rb +21 -0
- data/proto_docs/README.md +4 -0
- data/proto_docs/google/api/field_behavior.rb +71 -0
- data/proto_docs/google/api/httpbody.rb +80 -0
- data/proto_docs/google/api/resource.rb +222 -0
- data/proto_docs/google/cloud/aiplatform/v1/accelerator_type.rb +50 -0
- data/proto_docs/google/cloud/aiplatform/v1/annotation.rb +92 -0
- data/proto_docs/google/cloud/aiplatform/v1/annotation_spec.rb +50 -0
- data/proto_docs/google/cloud/aiplatform/v1/artifact.rb +112 -0
- data/proto_docs/google/cloud/aiplatform/v1/batch_prediction_job.rb +278 -0
- data/proto_docs/google/cloud/aiplatform/v1/completion_stats.rb +46 -0
- data/proto_docs/google/cloud/aiplatform/v1/context.rb +92 -0
- data/proto_docs/google/cloud/aiplatform/v1/custom_job.rb +272 -0
- data/proto_docs/google/cloud/aiplatform/v1/data_item.rb +73 -0
- data/proto_docs/google/cloud/aiplatform/v1/data_labeling_job.rb +207 -0
- data/proto_docs/google/cloud/aiplatform/v1/dataset.rb +154 -0
- data/proto_docs/google/cloud/aiplatform/v1/dataset_service.rb +301 -0
- data/proto_docs/google/cloud/aiplatform/v1/deployed_index_ref.rb +38 -0
- data/proto_docs/google/cloud/aiplatform/v1/deployed_model_ref.rb +38 -0
- data/proto_docs/google/cloud/aiplatform/v1/encryption_spec.rb +40 -0
- data/proto_docs/google/cloud/aiplatform/v1/endpoint.rb +227 -0
- data/proto_docs/google/cloud/aiplatform/v1/endpoint_service.rb +258 -0
- data/proto_docs/google/cloud/aiplatform/v1/entity_type.rb +79 -0
- data/proto_docs/google/cloud/aiplatform/v1/env_var.rb +44 -0
- data/proto_docs/google/cloud/aiplatform/v1/event.rb +79 -0
- data/proto_docs/google/cloud/aiplatform/v1/execution.rb +118 -0
- data/proto_docs/google/cloud/aiplatform/v1/explanation.rb +445 -0
- data/proto_docs/google/cloud/aiplatform/v1/explanation_metadata.rb +419 -0
- data/proto_docs/google/cloud/aiplatform/v1/feature.rb +115 -0
- data/proto_docs/google/cloud/aiplatform/v1/feature_monitoring_stats.rb +88 -0
- data/proto_docs/google/cloud/aiplatform/v1/feature_selector.rb +49 -0
- data/proto_docs/google/cloud/aiplatform/v1/featurestore.rb +115 -0
- data/proto_docs/google/cloud/aiplatform/v1/featurestore_online_service.rb +203 -0
- data/proto_docs/google/cloud/aiplatform/v1/featurestore_service.rb +978 -0
- data/proto_docs/google/cloud/aiplatform/v1/hyperparameter_tuning_job.rb +109 -0
- data/proto_docs/google/cloud/aiplatform/v1/index.rb +98 -0
- data/proto_docs/google/cloud/aiplatform/v1/index_endpoint.rb +252 -0
- data/proto_docs/google/cloud/aiplatform/v1/index_endpoint_service.rb +240 -0
- data/proto_docs/google/cloud/aiplatform/v1/index_service.rb +220 -0
- data/proto_docs/google/cloud/aiplatform/v1/io.rb +134 -0
- data/proto_docs/google/cloud/aiplatform/v1/job_service.rb +660 -0
- data/proto_docs/google/cloud/aiplatform/v1/job_state.rb +60 -0
- data/proto_docs/google/cloud/aiplatform/v1/lineage_subgraph.rb +42 -0
- data/proto_docs/google/cloud/aiplatform/v1/machine_resources.rb +194 -0
- data/proto_docs/google/cloud/aiplatform/v1/manual_batch_tuning_parameters.rb +41 -0
- data/proto_docs/google/cloud/aiplatform/v1/metadata_schema.rb +74 -0
- data/proto_docs/google/cloud/aiplatform/v1/metadata_service.rb +912 -0
- data/proto_docs/google/cloud/aiplatform/v1/metadata_store.rb +62 -0
- data/proto_docs/google/cloud/aiplatform/v1/migratable_resource.rb +133 -0
- data/proto_docs/google/cloud/aiplatform/v1/migration_service.rb +260 -0
- data/proto_docs/google/cloud/aiplatform/v1/model.rb +562 -0
- data/proto_docs/google/cloud/aiplatform/v1/model_deployment_monitoring_job.rb +293 -0
- data/proto_docs/google/cloud/aiplatform/v1/model_evaluation.rb +60 -0
- data/proto_docs/google/cloud/aiplatform/v1/model_evaluation_slice.rb +68 -0
- data/proto_docs/google/cloud/aiplatform/v1/model_monitoring.rb +257 -0
- data/proto_docs/google/cloud/aiplatform/v1/model_service.rb +329 -0
- data/proto_docs/google/cloud/aiplatform/v1/operation.rb +55 -0
- data/proto_docs/google/cloud/aiplatform/v1/pipeline_job.rb +347 -0
- data/proto_docs/google/cloud/aiplatform/v1/pipeline_service.rb +258 -0
- data/proto_docs/google/cloud/aiplatform/v1/pipeline_state.rb +59 -0
- data/proto_docs/google/cloud/aiplatform/v1/prediction_service.rb +165 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/instance/image_classification.rb +52 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/instance/image_object_detection.rb +52 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/instance/image_segmentation.rb +47 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/instance/text_classification.rb +46 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/instance/text_extraction.rb +53 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/instance/text_sentiment.rb +46 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/instance/video_action_recognition.rb +59 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/instance/video_classification.rb +59 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/instance/video_object_tracking.rb +59 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/params/image_classification.rb +47 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/params/image_object_detection.rb +47 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/params/image_segmentation.rb +44 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/params/video_action_recognition.rb +47 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/params/video_classification.rb +72 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/params/video_object_tracking.rb +51 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/prediction/classification.rb +49 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/prediction/image_object_detection.rb +58 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/prediction/image_segmentation.rb +53 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/prediction/tabular_classification.rb +47 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/prediction/tabular_regression.rb +47 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/prediction/text_extraction.rb +60 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/prediction/text_sentiment.rb +45 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/prediction/video_action_recognition.rb +60 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/prediction/video_classification.rb +73 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/predict/prediction/video_object_tracking.rb +91 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/trainingjob/definition/automl_image_classification.rb +142 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/trainingjob/definition/automl_image_object_detection.rb +134 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/trainingjob/definition/automl_image_segmentation.rb +120 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/trainingjob/definition/automl_tables.rb +315 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/trainingjob/definition/automl_text_classification.rb +48 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/trainingjob/definition/automl_text_extraction.rb +46 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/trainingjob/definition/automl_text_sentiment.rb +55 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/trainingjob/definition/automl_video_action_recognition.rb +73 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/trainingjob/definition/automl_video_classification.rb +67 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/trainingjob/definition/automl_video_object_tracking.rb +78 -0
- data/proto_docs/google/cloud/aiplatform/v1/schema/trainingjob/definition/export_evaluated_data_items_config.rb +51 -0
- data/proto_docs/google/cloud/aiplatform/v1/specialist_pool.rb +58 -0
- data/proto_docs/google/cloud/aiplatform/v1/specialist_pool_service.rb +136 -0
- data/proto_docs/google/cloud/aiplatform/v1/study.rb +543 -0
- data/proto_docs/google/cloud/aiplatform/v1/tensorboard.rb +89 -0
- data/proto_docs/google/cloud/aiplatform/v1/tensorboard_data.rb +110 -0
- data/proto_docs/google/cloud/aiplatform/v1/tensorboard_experiment.rb +82 -0
- data/proto_docs/google/cloud/aiplatform/v1/tensorboard_run.rb +85 -0
- data/proto_docs/google/cloud/aiplatform/v1/tensorboard_service.rb +706 -0
- data/proto_docs/google/cloud/aiplatform/v1/tensorboard_time_series.rb +101 -0
- data/proto_docs/google/cloud/aiplatform/v1/training_pipeline.rb +381 -0
- data/proto_docs/google/cloud/aiplatform/v1/types.rb +62 -0
- data/proto_docs/google/cloud/aiplatform/v1/unmanaged_container_model.rb +44 -0
- data/proto_docs/google/cloud/aiplatform/v1/user_action_reference.rb +49 -0
- data/proto_docs/google/cloud/aiplatform/v1/value.rb +41 -0
- data/proto_docs/google/cloud/aiplatform/v1/vizier_service.rb +332 -0
- data/proto_docs/google/longrunning/operations.rb +164 -0
- data/proto_docs/google/protobuf/any.rb +141 -0
- data/proto_docs/google/protobuf/duration.rb +98 -0
- data/proto_docs/google/protobuf/empty.rb +36 -0
- data/proto_docs/google/protobuf/field_mask.rb +229 -0
- data/proto_docs/google/protobuf/struct.rb +96 -0
- data/proto_docs/google/protobuf/timestamp.rb +129 -0
- data/proto_docs/google/protobuf/wrappers.rb +121 -0
- data/proto_docs/google/rpc/status.rb +46 -0
- data/proto_docs/google/type/money.rb +43 -0
- metadata +479 -0
|
@@ -0,0 +1,562 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Copyright 2022 Google LLC
|
|
4
|
+
#
|
|
5
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
|
+
# you may not use this file except in compliance with the License.
|
|
7
|
+
# You may obtain a copy of the License at
|
|
8
|
+
#
|
|
9
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
#
|
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
# See the License for the specific language governing permissions and
|
|
15
|
+
# limitations under the License.
|
|
16
|
+
|
|
17
|
+
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
module Google
|
|
21
|
+
module Cloud
|
|
22
|
+
module AIPlatform
|
|
23
|
+
module V1
|
|
24
|
+
# A trained machine learning Model.
|
|
25
|
+
# @!attribute [rw] name
|
|
26
|
+
# @return [::String]
|
|
27
|
+
# The resource name of the Model.
|
|
28
|
+
# @!attribute [rw] display_name
|
|
29
|
+
# @return [::String]
|
|
30
|
+
# Required. The display name of the Model.
|
|
31
|
+
# The name can be up to 128 characters long and can be consist of any UTF-8
|
|
32
|
+
# characters.
|
|
33
|
+
# @!attribute [rw] description
|
|
34
|
+
# @return [::String]
|
|
35
|
+
# The description of the Model.
|
|
36
|
+
# @!attribute [rw] predict_schemata
|
|
37
|
+
# @return [::Google::Cloud::AIPlatform::V1::PredictSchemata]
|
|
38
|
+
# The schemata that describe formats of the Model's predictions and
|
|
39
|
+
# explanations as given and returned via
|
|
40
|
+
# {::Google::Cloud::AIPlatform::V1::PredictionService::Client#predict PredictionService.Predict} and {::Google::Cloud::AIPlatform::V1::PredictionService::Client#explain PredictionService.Explain}.
|
|
41
|
+
# @!attribute [rw] metadata_schema_uri
|
|
42
|
+
# @return [::String]
|
|
43
|
+
# Immutable. Points to a YAML file stored on Google Cloud Storage describing additional
|
|
44
|
+
# information about the Model, that is specific to it. Unset if the Model
|
|
45
|
+
# does not have any additional information.
|
|
46
|
+
# The schema is defined as an OpenAPI 3.0.2 [Schema
|
|
47
|
+
# Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
|
|
48
|
+
# AutoML Models always have this field populated by Vertex AI, if no
|
|
49
|
+
# additional metadata is needed, this field is set to an empty string.
|
|
50
|
+
# Note: The URI given on output will be immutable and probably different,
|
|
51
|
+
# including the URI scheme, than the one given on input. The output URI will
|
|
52
|
+
# point to a location where the user only has a read access.
|
|
53
|
+
# @!attribute [rw] metadata
|
|
54
|
+
# @return [::Google::Protobuf::Value]
|
|
55
|
+
# Immutable. An additional information about the Model; the schema of the metadata can
|
|
56
|
+
# be found in {::Google::Cloud::AIPlatform::V1::Model#metadata_schema_uri metadata_schema}.
|
|
57
|
+
# Unset if the Model does not have any additional information.
|
|
58
|
+
# @!attribute [r] supported_export_formats
|
|
59
|
+
# @return [::Array<::Google::Cloud::AIPlatform::V1::Model::ExportFormat>]
|
|
60
|
+
# Output only. The formats in which this Model may be exported. If empty, this Model is
|
|
61
|
+
# not available for export.
|
|
62
|
+
# @!attribute [r] training_pipeline
|
|
63
|
+
# @return [::String]
|
|
64
|
+
# Output only. The resource name of the TrainingPipeline that uploaded this Model, if
|
|
65
|
+
# any.
|
|
66
|
+
# @!attribute [rw] container_spec
|
|
67
|
+
# @return [::Google::Cloud::AIPlatform::V1::ModelContainerSpec]
|
|
68
|
+
# Input only. The specification of the container that is to be used when deploying
|
|
69
|
+
# this Model. The specification is ingested upon
|
|
70
|
+
# {::Google::Cloud::AIPlatform::V1::ModelService::Client#upload_model ModelService.UploadModel}, and all binaries it contains are copied
|
|
71
|
+
# and stored internally by Vertex AI.
|
|
72
|
+
# Not present for AutoML Models.
|
|
73
|
+
# @!attribute [rw] artifact_uri
|
|
74
|
+
# @return [::String]
|
|
75
|
+
# Immutable. The path to the directory containing the Model artifact and any of its
|
|
76
|
+
# supporting files.
|
|
77
|
+
# Not present for AutoML Models.
|
|
78
|
+
# @!attribute [r] supported_deployment_resources_types
|
|
79
|
+
# @return [::Array<::Google::Cloud::AIPlatform::V1::Model::DeploymentResourcesType>]
|
|
80
|
+
# Output only. When this Model is deployed, its prediction resources are described by the
|
|
81
|
+
# `prediction_resources` field of the {::Google::Cloud::AIPlatform::V1::Endpoint#deployed_models Endpoint.deployed_models} object.
|
|
82
|
+
# Because not all Models support all resource configuration types, the
|
|
83
|
+
# configuration types this Model supports are listed here. If no
|
|
84
|
+
# configuration types are listed, the Model cannot be deployed to an
|
|
85
|
+
# {::Google::Cloud::AIPlatform::V1::Endpoint Endpoint} and does not support
|
|
86
|
+
# online predictions ({::Google::Cloud::AIPlatform::V1::PredictionService::Client#predict PredictionService.Predict} or
|
|
87
|
+
# {::Google::Cloud::AIPlatform::V1::PredictionService::Client#explain PredictionService.Explain}). Such a Model can serve predictions by
|
|
88
|
+
# using a {::Google::Cloud::AIPlatform::V1::BatchPredictionJob BatchPredictionJob}, if it has at least one entry each in
|
|
89
|
+
# {::Google::Cloud::AIPlatform::V1::Model#supported_input_storage_formats supported_input_storage_formats} and
|
|
90
|
+
# {::Google::Cloud::AIPlatform::V1::Model#supported_output_storage_formats supported_output_storage_formats}.
|
|
91
|
+
# @!attribute [r] supported_input_storage_formats
|
|
92
|
+
# @return [::Array<::String>]
|
|
93
|
+
# Output only. The formats this Model supports in
|
|
94
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#input_config BatchPredictionJob.input_config}. If
|
|
95
|
+
# {::Google::Cloud::AIPlatform::V1::PredictSchemata#instance_schema_uri PredictSchemata.instance_schema_uri} exists, the instances
|
|
96
|
+
# should be given as per that schema.
|
|
97
|
+
#
|
|
98
|
+
# The possible formats are:
|
|
99
|
+
#
|
|
100
|
+
# * `jsonl`
|
|
101
|
+
# The JSON Lines format, where each instance is a single line. Uses
|
|
102
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InputConfig#gcs_source GcsSource}.
|
|
103
|
+
#
|
|
104
|
+
# * `csv`
|
|
105
|
+
# The CSV format, where each instance is a single comma-separated line.
|
|
106
|
+
# The first line in the file is the header, containing comma-separated field
|
|
107
|
+
# names. Uses {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InputConfig#gcs_source GcsSource}.
|
|
108
|
+
#
|
|
109
|
+
# * `tf-record`
|
|
110
|
+
# The TFRecord format, where each instance is a single record in tfrecord
|
|
111
|
+
# syntax. Uses {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InputConfig#gcs_source GcsSource}.
|
|
112
|
+
#
|
|
113
|
+
# * `tf-record-gzip`
|
|
114
|
+
# Similar to `tf-record`, but the file is gzipped. Uses
|
|
115
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InputConfig#gcs_source GcsSource}.
|
|
116
|
+
#
|
|
117
|
+
# * `bigquery`
|
|
118
|
+
# Each instance is a single row in BigQuery. Uses
|
|
119
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InputConfig#bigquery_source BigQuerySource}.
|
|
120
|
+
#
|
|
121
|
+
# * `file-list`
|
|
122
|
+
# Each line of the file is the location of an instance to process, uses
|
|
123
|
+
# `gcs_source` field of the
|
|
124
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::InputConfig InputConfig} object.
|
|
125
|
+
#
|
|
126
|
+
#
|
|
127
|
+
# If this Model doesn't support any of these formats it means it cannot be
|
|
128
|
+
# used with a {::Google::Cloud::AIPlatform::V1::BatchPredictionJob BatchPredictionJob}. However, if it has
|
|
129
|
+
# {::Google::Cloud::AIPlatform::V1::Model#supported_deployment_resources_types supported_deployment_resources_types}, it could serve online
|
|
130
|
+
# predictions by using {::Google::Cloud::AIPlatform::V1::PredictionService::Client#predict PredictionService.Predict} or
|
|
131
|
+
# {::Google::Cloud::AIPlatform::V1::PredictionService::Client#explain PredictionService.Explain}.
|
|
132
|
+
# @!attribute [r] supported_output_storage_formats
|
|
133
|
+
# @return [::Array<::String>]
|
|
134
|
+
# Output only. The formats this Model supports in
|
|
135
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#output_config BatchPredictionJob.output_config}. If both
|
|
136
|
+
# {::Google::Cloud::AIPlatform::V1::PredictSchemata#instance_schema_uri PredictSchemata.instance_schema_uri} and
|
|
137
|
+
# {::Google::Cloud::AIPlatform::V1::PredictSchemata#prediction_schema_uri PredictSchemata.prediction_schema_uri} exist, the predictions
|
|
138
|
+
# are returned together with their instances. In other words, the
|
|
139
|
+
# prediction has the original instance data first, followed
|
|
140
|
+
# by the actual prediction content (as per the schema).
|
|
141
|
+
#
|
|
142
|
+
# The possible formats are:
|
|
143
|
+
#
|
|
144
|
+
# * `jsonl`
|
|
145
|
+
# The JSON Lines format, where each prediction is a single line. Uses
|
|
146
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::OutputConfig#gcs_destination GcsDestination}.
|
|
147
|
+
#
|
|
148
|
+
# * `csv`
|
|
149
|
+
# The CSV format, where each prediction is a single comma-separated line.
|
|
150
|
+
# The first line in the file is the header, containing comma-separated field
|
|
151
|
+
# names. Uses
|
|
152
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::OutputConfig#gcs_destination GcsDestination}.
|
|
153
|
+
#
|
|
154
|
+
# * `bigquery`
|
|
155
|
+
# Each prediction is a single row in a BigQuery table, uses
|
|
156
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob::OutputConfig#bigquery_destination BigQueryDestination}
|
|
157
|
+
# .
|
|
158
|
+
#
|
|
159
|
+
#
|
|
160
|
+
# If this Model doesn't support any of these formats it means it cannot be
|
|
161
|
+
# used with a {::Google::Cloud::AIPlatform::V1::BatchPredictionJob BatchPredictionJob}. However, if it has
|
|
162
|
+
# {::Google::Cloud::AIPlatform::V1::Model#supported_deployment_resources_types supported_deployment_resources_types}, it could serve online
|
|
163
|
+
# predictions by using {::Google::Cloud::AIPlatform::V1::PredictionService::Client#predict PredictionService.Predict} or
|
|
164
|
+
# {::Google::Cloud::AIPlatform::V1::PredictionService::Client#explain PredictionService.Explain}.
|
|
165
|
+
# @!attribute [r] create_time
|
|
166
|
+
# @return [::Google::Protobuf::Timestamp]
|
|
167
|
+
# Output only. Timestamp when this Model was uploaded into Vertex AI.
|
|
168
|
+
# @!attribute [r] update_time
|
|
169
|
+
# @return [::Google::Protobuf::Timestamp]
|
|
170
|
+
# Output only. Timestamp when this Model was most recently updated.
|
|
171
|
+
# @!attribute [r] deployed_models
|
|
172
|
+
# @return [::Array<::Google::Cloud::AIPlatform::V1::DeployedModelRef>]
|
|
173
|
+
# Output only. The pointers to DeployedModels created from this Model. Note that
|
|
174
|
+
# Model could have been deployed to Endpoints in different Locations.
|
|
175
|
+
# @!attribute [rw] explanation_spec
|
|
176
|
+
# @return [::Google::Cloud::AIPlatform::V1::ExplanationSpec]
|
|
177
|
+
# The default explanation specification for this Model.
|
|
178
|
+
#
|
|
179
|
+
# The Model can be used for [requesting
|
|
180
|
+
# explanation][PredictionService.Explain] after being
|
|
181
|
+
# {::Google::Cloud::AIPlatform::V1::EndpointService::Client#deploy_model deployed} if it is populated.
|
|
182
|
+
# The Model can be used for [batch
|
|
183
|
+
# explanation][BatchPredictionJob.generate_explanation] if it is populated.
|
|
184
|
+
#
|
|
185
|
+
# All fields of the explanation_spec can be overridden by
|
|
186
|
+
# {::Google::Cloud::AIPlatform::V1::DeployedModel#explanation_spec explanation_spec} of
|
|
187
|
+
# {::Google::Cloud::AIPlatform::V1::DeployModelRequest#deployed_model DeployModelRequest.deployed_model}, or
|
|
188
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#explanation_spec explanation_spec} of
|
|
189
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob BatchPredictionJob}.
|
|
190
|
+
#
|
|
191
|
+
# If the default explanation specification is not set for this Model, this
|
|
192
|
+
# Model can still be used for [requesting
|
|
193
|
+
# explanation][PredictionService.Explain] by setting
|
|
194
|
+
# {::Google::Cloud::AIPlatform::V1::DeployedModel#explanation_spec explanation_spec} of
|
|
195
|
+
# {::Google::Cloud::AIPlatform::V1::DeployModelRequest#deployed_model DeployModelRequest.deployed_model} and for [batch
|
|
196
|
+
# explanation][BatchPredictionJob.generate_explanation] by setting
|
|
197
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#explanation_spec explanation_spec} of
|
|
198
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob BatchPredictionJob}.
|
|
199
|
+
# @!attribute [rw] etag
|
|
200
|
+
# @return [::String]
|
|
201
|
+
# Used to perform consistent read-modify-write updates. If not set, a blind
|
|
202
|
+
# "overwrite" update happens.
|
|
203
|
+
# @!attribute [rw] labels
|
|
204
|
+
# @return [::Google::Protobuf::Map{::String => ::String}]
|
|
205
|
+
# The labels with user-defined metadata to organize your Models.
|
|
206
|
+
#
|
|
207
|
+
# Label keys and values can be no longer than 64 characters
|
|
208
|
+
# (Unicode codepoints), can only contain lowercase letters, numeric
|
|
209
|
+
# characters, underscores and dashes. International characters are allowed.
|
|
210
|
+
#
|
|
211
|
+
# See https://goo.gl/xmQnxf for more information and examples of labels.
|
|
212
|
+
# @!attribute [rw] encryption_spec
|
|
213
|
+
# @return [::Google::Cloud::AIPlatform::V1::EncryptionSpec]
|
|
214
|
+
# Customer-managed encryption key spec for a Model. If set, this
|
|
215
|
+
# Model and all sub-resources of this Model will be secured by this key.
|
|
216
|
+
class Model
|
|
217
|
+
include ::Google::Protobuf::MessageExts
|
|
218
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
|
219
|
+
|
|
220
|
+
# Represents export format supported by the Model.
|
|
221
|
+
# All formats export to Google Cloud Storage.
|
|
222
|
+
# @!attribute [r] id
|
|
223
|
+
# @return [::String]
|
|
224
|
+
# Output only. The ID of the export format.
|
|
225
|
+
# The possible format IDs are:
|
|
226
|
+
#
|
|
227
|
+
# * `tflite`
|
|
228
|
+
# Used for Android mobile devices.
|
|
229
|
+
#
|
|
230
|
+
# * `edgetpu-tflite`
|
|
231
|
+
# Used for [Edge TPU](https://cloud.google.com/edge-tpu/) devices.
|
|
232
|
+
#
|
|
233
|
+
# * `tf-saved-model`
|
|
234
|
+
# A tensorflow model in SavedModel format.
|
|
235
|
+
#
|
|
236
|
+
# * `tf-js`
|
|
237
|
+
# A [TensorFlow.js](https://www.tensorflow.org/js) model that can be used
|
|
238
|
+
# in the browser and in Node.js using JavaScript.
|
|
239
|
+
#
|
|
240
|
+
# * `core-ml`
|
|
241
|
+
# Used for iOS mobile devices.
|
|
242
|
+
#
|
|
243
|
+
# * `custom-trained`
|
|
244
|
+
# A Model that was uploaded or trained by custom code.
|
|
245
|
+
# @!attribute [r] exportable_contents
|
|
246
|
+
# @return [::Array<::Google::Cloud::AIPlatform::V1::Model::ExportFormat::ExportableContent>]
|
|
247
|
+
# Output only. The content of this Model that may be exported.
|
|
248
|
+
class ExportFormat
|
|
249
|
+
include ::Google::Protobuf::MessageExts
|
|
250
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
|
251
|
+
|
|
252
|
+
# The Model content that can be exported.
|
|
253
|
+
module ExportableContent
|
|
254
|
+
# Should not be used.
|
|
255
|
+
EXPORTABLE_CONTENT_UNSPECIFIED = 0
|
|
256
|
+
|
|
257
|
+
# Model artifact and any of its supported files. Will be exported to the
|
|
258
|
+
# location specified by the `artifactDestination` field of the
|
|
259
|
+
# {::Google::Cloud::AIPlatform::V1::ExportModelRequest#output_config ExportModelRequest.output_config} object.
|
|
260
|
+
ARTIFACT = 1
|
|
261
|
+
|
|
262
|
+
# The container image that is to be used when deploying this Model. Will
|
|
263
|
+
# be exported to the location specified by the `imageDestination` field
|
|
264
|
+
# of the {::Google::Cloud::AIPlatform::V1::ExportModelRequest#output_config ExportModelRequest.output_config} object.
|
|
265
|
+
IMAGE = 2
|
|
266
|
+
end
|
|
267
|
+
end
|
|
268
|
+
|
|
269
|
+
# @!attribute [rw] key
|
|
270
|
+
# @return [::String]
|
|
271
|
+
# @!attribute [rw] value
|
|
272
|
+
# @return [::String]
|
|
273
|
+
class LabelsEntry
|
|
274
|
+
include ::Google::Protobuf::MessageExts
|
|
275
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
|
276
|
+
end
|
|
277
|
+
|
|
278
|
+
# Identifies a type of Model's prediction resources.
|
|
279
|
+
module DeploymentResourcesType
|
|
280
|
+
# Should not be used.
|
|
281
|
+
DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0
|
|
282
|
+
|
|
283
|
+
# Resources that are dedicated to the {::Google::Cloud::AIPlatform::V1::DeployedModel DeployedModel}, and that need a
|
|
284
|
+
# higher degree of manual configuration.
|
|
285
|
+
DEDICATED_RESOURCES = 1
|
|
286
|
+
|
|
287
|
+
# Resources that to large degree are decided by Vertex AI, and require
|
|
288
|
+
# only a modest additional configuration.
|
|
289
|
+
AUTOMATIC_RESOURCES = 2
|
|
290
|
+
end
|
|
291
|
+
end
|
|
292
|
+
|
|
293
|
+
# Contains the schemata used in Model's predictions and explanations via
|
|
294
|
+
# {::Google::Cloud::AIPlatform::V1::PredictionService::Client#predict PredictionService.Predict}, {::Google::Cloud::AIPlatform::V1::PredictionService::Client#explain PredictionService.Explain} and
|
|
295
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob BatchPredictionJob}.
|
|
296
|
+
# @!attribute [rw] instance_schema_uri
|
|
297
|
+
# @return [::String]
|
|
298
|
+
# Immutable. Points to a YAML file stored on Google Cloud Storage describing the format
|
|
299
|
+
# of a single instance, which are used in {::Google::Cloud::AIPlatform::V1::PredictRequest#instances PredictRequest.instances},
|
|
300
|
+
# {::Google::Cloud::AIPlatform::V1::ExplainRequest#instances ExplainRequest.instances} and
|
|
301
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#input_config BatchPredictionJob.input_config}.
|
|
302
|
+
# The schema is defined as an OpenAPI 3.0.2 [Schema
|
|
303
|
+
# Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
|
|
304
|
+
# AutoML Models always have this field populated by Vertex AI.
|
|
305
|
+
# Note: The URI given on output will be immutable and probably different,
|
|
306
|
+
# including the URI scheme, than the one given on input. The output URI will
|
|
307
|
+
# point to a location where the user only has a read access.
|
|
308
|
+
# @!attribute [rw] parameters_schema_uri
|
|
309
|
+
# @return [::String]
|
|
310
|
+
# Immutable. Points to a YAML file stored on Google Cloud Storage describing the
|
|
311
|
+
# parameters of prediction and explanation via
|
|
312
|
+
# {::Google::Cloud::AIPlatform::V1::PredictRequest#parameters PredictRequest.parameters}, {::Google::Cloud::AIPlatform::V1::ExplainRequest#parameters ExplainRequest.parameters} and
|
|
313
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#model_parameters BatchPredictionJob.model_parameters}.
|
|
314
|
+
# The schema is defined as an OpenAPI 3.0.2 [Schema
|
|
315
|
+
# Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
|
|
316
|
+
# AutoML Models always have this field populated by Vertex AI, if no
|
|
317
|
+
# parameters are supported, then it is set to an empty string.
|
|
318
|
+
# Note: The URI given on output will be immutable and probably different,
|
|
319
|
+
# including the URI scheme, than the one given on input. The output URI will
|
|
320
|
+
# point to a location where the user only has a read access.
|
|
321
|
+
# @!attribute [rw] prediction_schema_uri
|
|
322
|
+
# @return [::String]
|
|
323
|
+
# Immutable. Points to a YAML file stored on Google Cloud Storage describing the format
|
|
324
|
+
# of a single prediction produced by this Model, which are returned via
|
|
325
|
+
# {::Google::Cloud::AIPlatform::V1::PredictResponse#predictions PredictResponse.predictions}, {::Google::Cloud::AIPlatform::V1::ExplainResponse#explanations ExplainResponse.explanations}, and
|
|
326
|
+
# {::Google::Cloud::AIPlatform::V1::BatchPredictionJob#output_config BatchPredictionJob.output_config}.
|
|
327
|
+
# The schema is defined as an OpenAPI 3.0.2 [Schema
|
|
328
|
+
# Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
|
|
329
|
+
# AutoML Models always have this field populated by Vertex AI.
|
|
330
|
+
# Note: The URI given on output will be immutable and probably different,
|
|
331
|
+
# including the URI scheme, than the one given on input. The output URI will
|
|
332
|
+
# point to a location where the user only has a read access.
|
|
333
|
+
class PredictSchemata
|
|
334
|
+
include ::Google::Protobuf::MessageExts
|
|
335
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
|
336
|
+
end
|
|
337
|
+
|
|
338
|
+
# Specification of a container for serving predictions. Some fields in this
|
|
339
|
+
# message correspond to fields in the [Kubernetes Container v1 core
|
|
340
|
+
# specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
|
|
341
|
+
# @!attribute [rw] image_uri
|
|
342
|
+
# @return [::String]
|
|
343
|
+
# Required. Immutable. URI of the Docker image to be used as the custom container for serving
|
|
344
|
+
# predictions. This URI must identify an image in Artifact Registry or
|
|
345
|
+
# Container Registry. Learn more about the [container publishing
|
|
346
|
+
# requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing),
|
|
347
|
+
# including permissions requirements for the Vertex AI Service Agent.
|
|
348
|
+
#
|
|
349
|
+
# The container image is ingested upon {::Google::Cloud::AIPlatform::V1::ModelService::Client#upload_model ModelService.UploadModel}, stored
|
|
350
|
+
# internally, and this original path is afterwards not used.
|
|
351
|
+
#
|
|
352
|
+
# To learn about the requirements for the Docker image itself, see
|
|
353
|
+
# [Custom container
|
|
354
|
+
# requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#).
|
|
355
|
+
#
|
|
356
|
+
# You can use the URI to one of Vertex AI's [pre-built container images for
|
|
357
|
+
# prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)
|
|
358
|
+
# in this field.
|
|
359
|
+
# @!attribute [rw] command
|
|
360
|
+
# @return [::Array<::String>]
|
|
361
|
+
# Immutable. Specifies the command that runs when the container starts. This overrides
|
|
362
|
+
# the container's
|
|
363
|
+
# [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint).
|
|
364
|
+
# Specify this field as an array of executable and arguments, similar to a
|
|
365
|
+
# Docker `ENTRYPOINT`'s "exec" form, not its "shell" form.
|
|
366
|
+
#
|
|
367
|
+
# If you do not specify this field, then the container's `ENTRYPOINT` runs,
|
|
368
|
+
# in conjunction with the {::Google::Cloud::AIPlatform::V1::ModelContainerSpec#args args} field or the
|
|
369
|
+
# container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd),
|
|
370
|
+
# if either exists. If this field is not specified and the container does not
|
|
371
|
+
# have an `ENTRYPOINT`, then refer to the Docker documentation about [how
|
|
372
|
+
# `CMD` and `ENTRYPOINT`
|
|
373
|
+
# interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
|
|
374
|
+
#
|
|
375
|
+
# If you specify this field, then you can also specify the `args` field to
|
|
376
|
+
# provide additional arguments for this command. However, if you specify this
|
|
377
|
+
# field, then the container's `CMD` is ignored. See the
|
|
378
|
+
# [Kubernetes documentation about how the
|
|
379
|
+
# `command` and `args` fields interact with a container's `ENTRYPOINT` and
|
|
380
|
+
# `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
|
|
381
|
+
#
|
|
382
|
+
# In this field, you can reference [environment variables set by Vertex
|
|
383
|
+
# AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
|
|
384
|
+
# and environment variables set in the {::Google::Cloud::AIPlatform::V1::ModelContainerSpec#env env} field.
|
|
385
|
+
# You cannot reference environment variables set in the Docker image. In
|
|
386
|
+
# order for environment variables to be expanded, reference them by using the
|
|
387
|
+
# following syntax:
|
|
388
|
+
# <code>$(<var>VARIABLE_NAME</var>)</code>
|
|
389
|
+
# Note that this differs from Bash variable expansion, which does not use
|
|
390
|
+
# parentheses. If a variable cannot be resolved, the reference in the input
|
|
391
|
+
# string is used unchanged. To avoid variable expansion, you can escape this
|
|
392
|
+
# syntax with `$$`; for example:
|
|
393
|
+
# <code>$$(<var>VARIABLE_NAME</var>)</code>
|
|
394
|
+
# This field corresponds to the `command` field of the Kubernetes Containers
|
|
395
|
+
# [v1 core
|
|
396
|
+
# API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
|
|
397
|
+
# @!attribute [rw] args
|
|
398
|
+
# @return [::Array<::String>]
|
|
399
|
+
# Immutable. Specifies arguments for the command that runs when the container starts.
|
|
400
|
+
# This overrides the container's
|
|
401
|
+
# [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify
|
|
402
|
+
# this field as an array of executable and arguments, similar to a Docker
|
|
403
|
+
# `CMD`'s "default parameters" form.
|
|
404
|
+
#
|
|
405
|
+
# If you don't specify this field but do specify the
|
|
406
|
+
# {::Google::Cloud::AIPlatform::V1::ModelContainerSpec#command command} field, then the command from the
|
|
407
|
+
# `command` field runs without any additional arguments. See the
|
|
408
|
+
# [Kubernetes documentation about how the
|
|
409
|
+
# `command` and `args` fields interact with a container's `ENTRYPOINT` and
|
|
410
|
+
# `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
|
|
411
|
+
#
|
|
412
|
+
# If you don't specify this field and don't specify the `command` field,
|
|
413
|
+
# then the container's
|
|
414
|
+
# [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and
|
|
415
|
+
# `CMD` determine what runs based on their default behavior. See the Docker
|
|
416
|
+
# documentation about [how `CMD` and `ENTRYPOINT`
|
|
417
|
+
# interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
|
|
418
|
+
#
|
|
419
|
+
# In this field, you can reference [environment variables
|
|
420
|
+
# set by Vertex
|
|
421
|
+
# AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
|
|
422
|
+
# and environment variables set in the {::Google::Cloud::AIPlatform::V1::ModelContainerSpec#env env} field.
|
|
423
|
+
# You cannot reference environment variables set in the Docker image. In
|
|
424
|
+
# order for environment variables to be expanded, reference them by using the
|
|
425
|
+
# following syntax:
|
|
426
|
+
# <code>$(<var>VARIABLE_NAME</var>)</code>
|
|
427
|
+
# Note that this differs from Bash variable expansion, which does not use
|
|
428
|
+
# parentheses. If a variable cannot be resolved, the reference in the input
|
|
429
|
+
# string is used unchanged. To avoid variable expansion, you can escape this
|
|
430
|
+
# syntax with `$$`; for example:
|
|
431
|
+
# <code>$$(<var>VARIABLE_NAME</var>)</code>
|
|
432
|
+
# This field corresponds to the `args` field of the Kubernetes Containers
|
|
433
|
+
# [v1 core
|
|
434
|
+
# API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
|
|
435
|
+
# @!attribute [rw] env
|
|
436
|
+
# @return [::Array<::Google::Cloud::AIPlatform::V1::EnvVar>]
|
|
437
|
+
# Immutable. List of environment variables to set in the container. After the container
|
|
438
|
+
# starts running, code running in the container can read these environment
|
|
439
|
+
# variables.
|
|
440
|
+
#
|
|
441
|
+
# Additionally, the {::Google::Cloud::AIPlatform::V1::ModelContainerSpec#command command} and
|
|
442
|
+
# {::Google::Cloud::AIPlatform::V1::ModelContainerSpec#args args} fields can reference these variables. Later
|
|
443
|
+
# entries in this list can also reference earlier entries. For example, the
|
|
444
|
+
# following example sets the variable `VAR_2` to have the value `foo bar`:
|
|
445
|
+
#
|
|
446
|
+
# ```json
|
|
447
|
+
# [
|
|
448
|
+
# {
|
|
449
|
+
# "name": "VAR_1",
|
|
450
|
+
# "value": "foo"
|
|
451
|
+
# },
|
|
452
|
+
# {
|
|
453
|
+
# "name": "VAR_2",
|
|
454
|
+
# "value": "$(VAR_1) bar"
|
|
455
|
+
# }
|
|
456
|
+
# ]
|
|
457
|
+
# ```
|
|
458
|
+
#
|
|
459
|
+
# If you switch the order of the variables in the example, then the expansion
|
|
460
|
+
# does not occur.
|
|
461
|
+
#
|
|
462
|
+
# This field corresponds to the `env` field of the Kubernetes Containers
|
|
463
|
+
# [v1 core
|
|
464
|
+
# API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
|
|
465
|
+
# @!attribute [rw] ports
|
|
466
|
+
# @return [::Array<::Google::Cloud::AIPlatform::V1::Port>]
|
|
467
|
+
# Immutable. List of ports to expose from the container. Vertex AI sends any
|
|
468
|
+
# prediction requests that it receives to the first port on this list. Vertex
|
|
469
|
+
# AI also sends
|
|
470
|
+
# [liveness and health
|
|
471
|
+
# checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness)
|
|
472
|
+
# to this port.
|
|
473
|
+
#
|
|
474
|
+
# If you do not specify this field, it defaults to following value:
|
|
475
|
+
#
|
|
476
|
+
# ```json
|
|
477
|
+
# [
|
|
478
|
+
# {
|
|
479
|
+
# "containerPort": 8080
|
|
480
|
+
# }
|
|
481
|
+
# ]
|
|
482
|
+
# ```
|
|
483
|
+
#
|
|
484
|
+
# Vertex AI does not use ports other than the first one listed. This field
|
|
485
|
+
# corresponds to the `ports` field of the Kubernetes Containers
|
|
486
|
+
# [v1 core
|
|
487
|
+
# API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
|
|
488
|
+
# @!attribute [rw] predict_route
|
|
489
|
+
# @return [::String]
|
|
490
|
+
# Immutable. HTTP path on the container to send prediction requests to. Vertex AI
|
|
491
|
+
# forwards requests sent using
|
|
492
|
+
# {::Google::Cloud::AIPlatform::V1::PredictionService::Client#predict projects.locations.endpoints.predict} to this
|
|
493
|
+
# path on the container's IP address and port. Vertex AI then returns the
|
|
494
|
+
# container's response in the API response.
|
|
495
|
+
#
|
|
496
|
+
# For example, if you set this field to `/foo`, then when Vertex AI
|
|
497
|
+
# receives a prediction request, it forwards the request body in a POST
|
|
498
|
+
# request to the `/foo` path on the port of your container specified by the
|
|
499
|
+
# first value of this `ModelContainerSpec`'s
|
|
500
|
+
# {::Google::Cloud::AIPlatform::V1::ModelContainerSpec#ports ports} field.
|
|
501
|
+
#
|
|
502
|
+
# If you don't specify this field, it defaults to the following value when
|
|
503
|
+
# you {::Google::Cloud::AIPlatform::V1::EndpointService::Client#deploy_model deploy this Model to an Endpoint}:
|
|
504
|
+
# <code>/v1/endpoints/<var>ENDPOINT</var>/deployedModels/<var>DEPLOYED_MODEL</var>:predict</code>
|
|
505
|
+
# The placeholders in this value are replaced as follows:
|
|
506
|
+
#
|
|
507
|
+
# * <var>ENDPOINT</var>: The last segment (following `endpoints/`)of the
|
|
508
|
+
# Endpoint.name][] field of the Endpoint where this Model has been
|
|
509
|
+
# deployed. (Vertex AI makes this value available to your container code
|
|
510
|
+
# as the [`AIP_ENDPOINT_ID` environment
|
|
511
|
+
# variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
|
|
512
|
+
#
|
|
513
|
+
# * <var>DEPLOYED_MODEL</var>: {::Google::Cloud::AIPlatform::V1::DeployedModel#id DeployedModel.id} of the `DeployedModel`.
|
|
514
|
+
# (Vertex AI makes this value available to your container code
|
|
515
|
+
# as the [`AIP_DEPLOYED_MODEL_ID` environment
|
|
516
|
+
# variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
|
|
517
|
+
# @!attribute [rw] health_route
|
|
518
|
+
# @return [::String]
|
|
519
|
+
# Immutable. HTTP path on the container to send health checks to. Vertex AI
|
|
520
|
+
# intermittently sends GET requests to this path on the container's IP
|
|
521
|
+
# address and port to check that the container is healthy. Read more about
|
|
522
|
+
# [health
|
|
523
|
+
# checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health).
|
|
524
|
+
#
|
|
525
|
+
# For example, if you set this field to `/bar`, then Vertex AI
|
|
526
|
+
# intermittently sends a GET request to the `/bar` path on the port of your
|
|
527
|
+
# container specified by the first value of this `ModelContainerSpec`'s
|
|
528
|
+
# {::Google::Cloud::AIPlatform::V1::ModelContainerSpec#ports ports} field.
|
|
529
|
+
#
|
|
530
|
+
# If you don't specify this field, it defaults to the following value when
|
|
531
|
+
# you {::Google::Cloud::AIPlatform::V1::EndpointService::Client#deploy_model deploy this Model to an Endpoint}:
|
|
532
|
+
# <code>/v1/endpoints/<var>ENDPOINT</var>/deployedModels/<var>DEPLOYED_MODEL</var>:predict</code>
|
|
533
|
+
# The placeholders in this value are replaced as follows:
|
|
534
|
+
#
|
|
535
|
+
# * <var>ENDPOINT</var>: The last segment (following `endpoints/`)of the
|
|
536
|
+
# Endpoint.name][] field of the Endpoint where this Model has been
|
|
537
|
+
# deployed. (Vertex AI makes this value available to your container code
|
|
538
|
+
# as the [`AIP_ENDPOINT_ID` environment
|
|
539
|
+
# variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
|
|
540
|
+
#
|
|
541
|
+
# * <var>DEPLOYED_MODEL</var>: {::Google::Cloud::AIPlatform::V1::DeployedModel#id DeployedModel.id} of the `DeployedModel`.
|
|
542
|
+
# (Vertex AI makes this value available to your container code as the
|
|
543
|
+
# [`AIP_DEPLOYED_MODEL_ID` environment
|
|
544
|
+
# variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
|
|
545
|
+
class ModelContainerSpec
|
|
546
|
+
include ::Google::Protobuf::MessageExts
|
|
547
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
|
548
|
+
end
|
|
549
|
+
|
|
550
|
+
# Represents a network port in a container.
|
|
551
|
+
# @!attribute [rw] container_port
|
|
552
|
+
# @return [::Integer]
|
|
553
|
+
# The number of the port to expose on the pod's IP address.
|
|
554
|
+
# Must be a valid port number, between 1 and 65535 inclusive.
|
|
555
|
+
class Port
|
|
556
|
+
include ::Google::Protobuf::MessageExts
|
|
557
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
|
558
|
+
end
|
|
559
|
+
end
|
|
560
|
+
end
|
|
561
|
+
end
|
|
562
|
+
end
|