google-cloud-ai_platform-v1 0.38.0 → 0.40.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/google/cloud/ai_platform/v1/bindings_override.rb +399 -0
- data/lib/google/cloud/ai_platform/v1/dataset_service/client.rb +39 -18
- data/lib/google/cloud/ai_platform/v1/dataset_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/dataset_service/rest/client.rb +2188 -0
- data/lib/google/cloud/ai_platform/v1/dataset_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/dataset_service/rest/service_stub.rb +1136 -0
- data/lib/google/cloud/ai_platform/v1/dataset_service/rest.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/dataset_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/deployment_resource_pool_service/client.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/deployment_resource_pool_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/deployment_resource_pool_service/rest/client.rb +851 -0
- data/lib/google/cloud/ai_platform/v1/deployment_resource_pool_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/deployment_resource_pool_service/rest/service_stub.rb +365 -0
- data/lib/google/cloud/ai_platform/v1/deployment_resource_pool_service/rest.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/deployment_resource_pool_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/endpoint_service/client.rb +21 -11
- data/lib/google/cloud/ai_platform/v1/endpoint_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/endpoint_service/rest/client.rb +1215 -0
- data/lib/google/cloud/ai_platform/v1/endpoint_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/endpoint_service/rest/service_stub.rb +546 -0
- data/lib/google/cloud/ai_platform/v1/endpoint_service/rest.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/endpoint_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/feature_online_store_admin_service/client.rb +29 -13
- data/lib/google/cloud/ai_platform/v1/feature_online_store_admin_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/feature_online_store_admin_service/rest/client.rb +1732 -0
- data/lib/google/cloud/ai_platform/v1/feature_online_store_admin_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/feature_online_store_admin_service/rest/service_stub.rb +841 -0
- data/lib/google/cloud/ai_platform/v1/feature_online_store_admin_service/rest.rb +55 -0
- data/lib/google/cloud/ai_platform/v1/feature_online_store_admin_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/feature_online_store_service/client.rb +7 -2
- data/lib/google/cloud/ai_platform/v1/feature_online_store_service/rest/client.rb +534 -0
- data/lib/google/cloud/ai_platform/v1/feature_online_store_service/rest/service_stub.rb +189 -0
- data/lib/google/cloud/ai_platform/v1/feature_online_store_service/rest.rb +53 -0
- data/lib/google/cloud/ai_platform/v1/feature_online_store_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/feature_registry_service/client.rb +24 -11
- data/lib/google/cloud/ai_platform/v1/feature_registry_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/feature_registry_service/rest/client.rb +1450 -0
- data/lib/google/cloud/ai_platform/v1/feature_registry_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/feature_registry_service/rest/service_stub.rb +663 -0
- data/lib/google/cloud/ai_platform/v1/feature_registry_service/rest.rb +55 -0
- data/lib/google/cloud/ai_platform/v1/feature_registry_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_online_serving_service/client.rb +9 -3
- data/lib/google/cloud/ai_platform/v1/featurestore_online_serving_service/rest/client.rb +645 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_online_serving_service/rest/service_stub.rb +244 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_online_serving_service/rest.rb +53 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_online_serving_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_service/client.rb +45 -21
- data/lib/google/cloud/ai_platform/v1/featurestore_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/featurestore_service/rest/client.rb +2765 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_service/rest/service_stub.rb +1319 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_service/rest.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/featurestore_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/gen_ai_tuning_service/client.rb +765 -0
- data/lib/google/cloud/ai_platform/v1/gen_ai_tuning_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/gen_ai_tuning_service/paths.rb +154 -0
- data/lib/google/cloud/ai_platform/v1/gen_ai_tuning_service/rest/client.rb +720 -0
- data/lib/google/cloud/ai_platform/v1/gen_ai_tuning_service/rest/service_stub.rb +307 -0
- data/lib/google/cloud/ai_platform/v1/gen_ai_tuning_service/rest.rb +53 -0
- data/lib/google/cloud/ai_platform/v1/gen_ai_tuning_service.rb +55 -0
- data/lib/google/cloud/ai_platform/v1/index_endpoint_service/client.rb +19 -8
- data/lib/google/cloud/ai_platform/v1/index_endpoint_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/index_endpoint_service/rest/client.rb +1151 -0
- data/lib/google/cloud/ai_platform/v1/index_endpoint_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/index_endpoint_service/rest/service_stub.rb +546 -0
- data/lib/google/cloud/ai_platform/v1/index_endpoint_service/rest.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/index_endpoint_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/index_service/client.rb +17 -7
- data/lib/google/cloud/ai_platform/v1/index_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/index_service/rest/client.rb +1034 -0
- data/lib/google/cloud/ai_platform/v1/index_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/index_service/rest/service_stub.rb +486 -0
- data/lib/google/cloud/ai_platform/v1/index_service/rest.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/index_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/job_service/client.rb +73 -35
- data/lib/google/cloud/ai_platform/v1/job_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/job_service/paths.rb +19 -0
- data/lib/google/cloud/ai_platform/v1/job_service/rest/client.rb +3841 -0
- data/lib/google/cloud/ai_platform/v1/job_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/job_service/rest/service_stub.rb +2149 -0
- data/lib/google/cloud/ai_platform/v1/job_service/rest.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/job_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/llm_utility_service/client.rb +7 -2
- data/lib/google/cloud/ai_platform/v1/llm_utility_service/rest/client.rb +533 -0
- data/lib/google/cloud/ai_platform/v1/llm_utility_service/rest/service_stub.rb +205 -0
- data/lib/google/cloud/ai_platform/v1/llm_utility_service/rest.rb +53 -0
- data/lib/google/cloud/ai_platform/v1/llm_utility_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/match_service/client.rb +7 -2
- data/lib/google/cloud/ai_platform/v1/match_service/rest/client.rb +541 -0
- data/lib/google/cloud/ai_platform/v1/match_service/rest/service_stub.rb +189 -0
- data/lib/google/cloud/ai_platform/v1/match_service/rest.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/match_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/metadata_service/client.rb +67 -32
- data/lib/google/cloud/ai_platform/v1/metadata_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/metadata_service/rest/client.rb +3634 -0
- data/lib/google/cloud/ai_platform/v1/metadata_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/metadata_service/rest/service_stub.rb +1972 -0
- data/lib/google/cloud/ai_platform/v1/metadata_service/rest.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/metadata_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/migration_service/client.rb +7 -2
- data/lib/google/cloud/ai_platform/v1/migration_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/migration_service/rest/client.rb +574 -0
- data/lib/google/cloud/ai_platform/v1/migration_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/migration_service/rest/service_stub.rb +189 -0
- data/lib/google/cloud/ai_platform/v1/migration_service/rest.rb +55 -0
- data/lib/google/cloud/ai_platform/v1/migration_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/model_garden_service/client.rb +5 -1
- data/lib/google/cloud/ai_platform/v1/model_garden_service/rest/client.rb +438 -0
- data/lib/google/cloud/ai_platform/v1/model_garden_service/rest/service_stub.rb +128 -0
- data/lib/google/cloud/ai_platform/v1/model_garden_service/rest.rb +53 -0
- data/lib/google/cloud/ai_platform/v1/model_garden_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/model_service/client.rb +39 -18
- data/lib/google/cloud/ai_platform/v1/model_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/model_service/rest/client.rb +2213 -0
- data/lib/google/cloud/ai_platform/v1/model_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/model_service/rest/service_stub.rb +1140 -0
- data/lib/google/cloud/ai_platform/v1/model_service/rest.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/model_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/notebook_service/client.rb +1497 -0
- data/lib/google/cloud/ai_platform/v1/notebook_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/notebook_service/operations.rb +817 -0
- data/lib/google/cloud/ai_platform/v1/notebook_service/paths.rb +124 -0
- data/lib/google/cloud/ai_platform/v1/notebook_service/rest/client.rb +1410 -0
- data/lib/google/cloud/ai_platform/v1/notebook_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/notebook_service/rest/service_stub.rb +663 -0
- data/lib/google/cloud/ai_platform/v1/notebook_service/rest.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/notebook_service.rb +56 -0
- data/lib/google/cloud/ai_platform/v1/persistent_resource_service/client.rb +1001 -0
- data/lib/google/cloud/ai_platform/v1/persistent_resource_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/persistent_resource_service/operations.rb +817 -0
- data/lib/google/cloud/ai_platform/v1/persistent_resource_service/paths.rb +86 -0
- data/lib/google/cloud/ai_platform/v1/persistent_resource_service/rest/client.rb +942 -0
- data/lib/google/cloud/ai_platform/v1/persistent_resource_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/persistent_resource_service/rest/service_stub.rb +426 -0
- data/lib/google/cloud/ai_platform/v1/persistent_resource_service/rest.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/persistent_resource_service.rb +56 -0
- data/lib/google/cloud/ai_platform/v1/pipeline_service/client.rb +27 -12
- data/lib/google/cloud/ai_platform/v1/pipeline_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/pipeline_service/rest/client.rb +1600 -0
- data/lib/google/cloud/ai_platform/v1/pipeline_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/pipeline_service/rest/service_stub.rb +783 -0
- data/lib/google/cloud/ai_platform/v1/pipeline_service/rest.rb +56 -0
- data/lib/google/cloud/ai_platform/v1/pipeline_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/prediction_service/client.rb +39 -15
- data/lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb +1297 -0
- data/lib/google/cloud/ai_platform/v1/prediction_service/rest/service_stub.rb +642 -0
- data/lib/google/cloud/ai_platform/v1/prediction_service/rest.rb +53 -0
- data/lib/google/cloud/ai_platform/v1/prediction_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/rest.rb +63 -0
- data/lib/google/cloud/ai_platform/v1/schedule_service/client.rb +17 -7
- data/lib/google/cloud/ai_platform/v1/schedule_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/schedule_service/rest/client.rb +1076 -0
- data/lib/google/cloud/ai_platform/v1/schedule_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/schedule_service/rest/service_stub.rb +486 -0
- data/lib/google/cloud/ai_platform/v1/schedule_service/rest.rb +55 -0
- data/lib/google/cloud/ai_platform/v1/schedule_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/specialist_pool_service/client.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/specialist_pool_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/specialist_pool_service/rest/client.rb +847 -0
- data/lib/google/cloud/ai_platform/v1/specialist_pool_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/specialist_pool_service/rest/service_stub.rb +366 -0
- data/lib/google/cloud/ai_platform/v1/specialist_pool_service/rest.rb +59 -0
- data/lib/google/cloud/ai_platform/v1/specialist_pool_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/tensorboard_service/client.rb +63 -30
- data/lib/google/cloud/ai_platform/v1/tensorboard_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/tensorboard_service/rest/client.rb +3274 -0
- data/lib/google/cloud/ai_platform/v1/tensorboard_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/tensorboard_service/rest/service_stub.rb +1847 -0
- data/lib/google/cloud/ai_platform/v1/tensorboard_service/rest.rb +54 -0
- data/lib/google/cloud/ai_platform/v1/tensorboard_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1/version.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/vizier_service/client.rb +33 -15
- data/lib/google/cloud/ai_platform/v1/vizier_service/operations.rb +13 -5
- data/lib/google/cloud/ai_platform/v1/vizier_service/rest/client.rb +1788 -0
- data/lib/google/cloud/ai_platform/v1/vizier_service/rest/operations.rb +3901 -0
- data/lib/google/cloud/ai_platform/v1/vizier_service/rest/service_stub.rb +963 -0
- data/lib/google/cloud/ai_platform/v1/vizier_service/rest.rb +58 -0
- data/lib/google/cloud/ai_platform/v1/vizier_service.rb +6 -0
- data/lib/google/cloud/ai_platform/v1.rb +9 -1
- data/lib/google/cloud/aiplatform/v1/accelerator_type_pb.rb +1 -1
- data/lib/google/cloud/aiplatform/v1/content_pb.rb +3 -1
- data/lib/google/cloud/aiplatform/v1/custom_job_pb.rb +1 -1
- data/lib/google/cloud/aiplatform/v1/endpoint_pb.rb +3 -1
- data/lib/google/cloud/aiplatform/v1/feature_online_store_pb.rb +3 -1
- data/lib/google/cloud/aiplatform/v1/feature_online_store_service_pb.rb +1 -1
- data/lib/google/cloud/aiplatform/v1/feature_view_pb.rb +5 -1
- data/lib/google/cloud/aiplatform/v1/genai_tuning_service_pb.rb +54 -0
- data/lib/google/cloud/aiplatform/v1/genai_tuning_service_services_pb.rb +63 -0
- data/lib/google/cloud/aiplatform/v1/index_service_pb.rb +1 -1
- data/lib/google/cloud/aiplatform/v1/machine_resources_pb.rb +2 -1
- data/lib/google/cloud/aiplatform/v1/network_spec_pb.rb +44 -0
- data/lib/google/cloud/aiplatform/v1/notebook_euc_config_pb.rb +44 -0
- data/lib/google/cloud/aiplatform/v1/notebook_idle_shutdown_config_pb.rb +46 -0
- data/lib/google/cloud/aiplatform/v1/notebook_runtime_pb.rb +61 -0
- data/lib/google/cloud/aiplatform/v1/notebook_runtime_template_ref_pb.rb +45 -0
- data/lib/google/cloud/aiplatform/v1/notebook_service_pb.rb +72 -0
- data/lib/google/cloud/aiplatform/v1/notebook_service_services_pb.rb +64 -0
- data/lib/google/cloud/aiplatform/v1/openapi_pb.rb +1 -1
- data/lib/google/cloud/aiplatform/v1/persistent_resource_pb.rb +60 -0
- data/lib/google/cloud/aiplatform/v1/persistent_resource_service_pb.rb +64 -0
- data/lib/google/cloud/aiplatform/v1/persistent_resource_service_services_pb.rb +55 -0
- data/lib/google/cloud/aiplatform/v1/prediction_service_pb.rb +1 -1
- data/lib/google/cloud/aiplatform/v1/publisher_model_pb.rb +1 -1
- data/lib/google/cloud/aiplatform/v1/tuning_job_pb.rb +60 -0
- data/proto_docs/google/cloud/aiplatform/v1/accelerator_type.rb +3 -0
- data/proto_docs/google/cloud/aiplatform/v1/content.rb +52 -3
- data/proto_docs/google/cloud/aiplatform/v1/custom_job.rb +9 -0
- data/proto_docs/google/cloud/aiplatform/v1/endpoint.rb +14 -0
- data/proto_docs/google/cloud/aiplatform/v1/endpoint_service.rb +2 -3
- data/proto_docs/google/cloud/aiplatform/v1/feature_online_store.rb +29 -0
- data/proto_docs/google/cloud/aiplatform/v1/feature_online_store_service.rb +5 -0
- data/proto_docs/google/cloud/aiplatform/v1/feature_registry_service.rb +1 -1
- data/proto_docs/google/cloud/aiplatform/v1/feature_view.rb +86 -0
- data/proto_docs/google/cloud/aiplatform/v1/genai_tuning_service.rb +100 -0
- data/proto_docs/google/cloud/aiplatform/v1/index_service.rb +3 -0
- data/proto_docs/google/cloud/aiplatform/v1/machine_resources.rb +17 -0
- data/proto_docs/google/cloud/aiplatform/v1/network_spec.rb +44 -0
- data/proto_docs/google/cloud/aiplatform/v1/notebook_euc_config.rb +46 -0
- data/proto_docs/google/cloud/aiplatform/v1/notebook_idle_shutdown_config.rb +41 -0
- data/proto_docs/google/cloud/aiplatform/v1/notebook_runtime.rb +263 -0
- data/proto_docs/google/cloud/aiplatform/v1/notebook_runtime_template_ref.rb +35 -0
- data/proto_docs/google/cloud/aiplatform/v1/notebook_service.rb +371 -0
- data/proto_docs/google/cloud/aiplatform/v1/openapi.rb +43 -4
- data/proto_docs/google/cloud/aiplatform/v1/persistent_resource.rb +248 -0
- data/proto_docs/google/cloud/aiplatform/v1/persistent_resource_service.rb +170 -0
- data/proto_docs/google/cloud/aiplatform/v1/prediction_service.rb +12 -0
- data/proto_docs/google/cloud/aiplatform/v1/publisher_model.rb +4 -0
- data/proto_docs/google/cloud/aiplatform/v1/tool.rb +7 -5
- data/proto_docs/google/cloud/aiplatform/v1/tuning_job.rb +262 -0
- metadata +138 -2
@@ -0,0 +1,1297 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Copyright 2024 Google LLC
|
4
|
+
#
|
5
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
# you may not use this file except in compliance with the License.
|
7
|
+
# You may obtain a copy of the License at
|
8
|
+
#
|
9
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
#
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
# See the License for the specific language governing permissions and
|
15
|
+
# limitations under the License.
|
16
|
+
|
17
|
+
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
|
18
|
+
|
19
|
+
require "google/cloud/errors"
|
20
|
+
require "google/cloud/aiplatform/v1/prediction_service_pb"
|
21
|
+
require "google/cloud/ai_platform/v1/prediction_service/rest/service_stub"
|
22
|
+
require "google/cloud/location/rest"
|
23
|
+
require "google/iam/v1/rest"
|
24
|
+
|
25
|
+
module Google
|
26
|
+
module Cloud
|
27
|
+
module AIPlatform
|
28
|
+
module V1
|
29
|
+
module PredictionService
|
30
|
+
module Rest
|
31
|
+
##
|
32
|
+
# REST client for the PredictionService service.
|
33
|
+
#
|
34
|
+
# A service for online predictions and explanations.
|
35
|
+
#
|
36
|
+
class Client
|
37
|
+
# @private
|
38
|
+
API_VERSION = ""
|
39
|
+
|
40
|
+
# @private
|
41
|
+
DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.$UNIVERSE_DOMAIN$"
|
42
|
+
|
43
|
+
include Paths
|
44
|
+
|
45
|
+
# @private
|
46
|
+
attr_reader :prediction_service_stub
|
47
|
+
|
48
|
+
##
|
49
|
+
# Configure the PredictionService Client class.
|
50
|
+
#
|
51
|
+
# See {::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client::Configuration}
|
52
|
+
# for a description of the configuration fields.
|
53
|
+
#
|
54
|
+
# @example
|
55
|
+
#
|
56
|
+
# # Modify the configuration for all PredictionService clients
|
57
|
+
# ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.configure do |config|
|
58
|
+
# config.timeout = 10.0
|
59
|
+
# end
|
60
|
+
#
|
61
|
+
# @yield [config] Configure the Client client.
|
62
|
+
# @yieldparam config [Client::Configuration]
|
63
|
+
#
|
64
|
+
# @return [Client::Configuration]
|
65
|
+
#
|
66
|
+
def self.configure
|
67
|
+
@configure ||= begin
|
68
|
+
namespace = ["Google", "Cloud", "AIPlatform", "V1"]
|
69
|
+
parent_config = while namespace.any?
|
70
|
+
parent_name = namespace.join "::"
|
71
|
+
parent_const = const_get parent_name
|
72
|
+
break parent_const.configure if parent_const.respond_to? :configure
|
73
|
+
namespace.pop
|
74
|
+
end
|
75
|
+
default_config = Client::Configuration.new parent_config
|
76
|
+
|
77
|
+
default_config
|
78
|
+
end
|
79
|
+
yield @configure if block_given?
|
80
|
+
@configure
|
81
|
+
end
|
82
|
+
|
83
|
+
##
|
84
|
+
# Configure the PredictionService Client instance.
|
85
|
+
#
|
86
|
+
# The configuration is set to the derived mode, meaning that values can be changed,
|
87
|
+
# but structural changes (adding new fields, etc.) are not allowed. Structural changes
|
88
|
+
# should be made on {Client.configure}.
|
89
|
+
#
|
90
|
+
# See {::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client::Configuration}
|
91
|
+
# for a description of the configuration fields.
|
92
|
+
#
|
93
|
+
# @yield [config] Configure the Client client.
|
94
|
+
# @yieldparam config [Client::Configuration]
|
95
|
+
#
|
96
|
+
# @return [Client::Configuration]
|
97
|
+
#
|
98
|
+
def configure
|
99
|
+
yield @config if block_given?
|
100
|
+
@config
|
101
|
+
end
|
102
|
+
|
103
|
+
##
|
104
|
+
# The effective universe domain
|
105
|
+
#
|
106
|
+
# @return [String]
|
107
|
+
#
|
108
|
+
def universe_domain
|
109
|
+
@prediction_service_stub.universe_domain
|
110
|
+
end
|
111
|
+
|
112
|
+
##
|
113
|
+
# Create a new PredictionService REST client object.
|
114
|
+
#
|
115
|
+
# @example
|
116
|
+
#
|
117
|
+
# # Create a client using the default configuration
|
118
|
+
# client = ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
|
119
|
+
#
|
120
|
+
# # Create a client using a custom configuration
|
121
|
+
# client = ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new do |config|
|
122
|
+
# config.timeout = 10.0
|
123
|
+
# end
|
124
|
+
#
|
125
|
+
# @yield [config] Configure the PredictionService client.
|
126
|
+
# @yieldparam config [Client::Configuration]
|
127
|
+
#
|
128
|
+
def initialize
|
129
|
+
# Create the configuration object
|
130
|
+
@config = Configuration.new Client.configure
|
131
|
+
|
132
|
+
# Yield the configuration if needed
|
133
|
+
yield @config if block_given?
|
134
|
+
|
135
|
+
# Create credentials
|
136
|
+
credentials = @config.credentials
|
137
|
+
# Use self-signed JWT if the endpoint is unchanged from default,
|
138
|
+
# but only if the default endpoint does not have a region prefix.
|
139
|
+
enable_self_signed_jwt = @config.endpoint.nil? ||
|
140
|
+
(@config.endpoint == Configuration::DEFAULT_ENDPOINT &&
|
141
|
+
!@config.endpoint.split(".").first.include?("-"))
|
142
|
+
credentials ||= Credentials.default scope: @config.scope,
|
143
|
+
enable_self_signed_jwt: enable_self_signed_jwt
|
144
|
+
if credentials.is_a?(::String) || credentials.is_a?(::Hash)
|
145
|
+
credentials = Credentials.new credentials, scope: @config.scope
|
146
|
+
end
|
147
|
+
|
148
|
+
@quota_project_id = @config.quota_project
|
149
|
+
@quota_project_id ||= credentials.quota_project_id if credentials.respond_to? :quota_project_id
|
150
|
+
|
151
|
+
@prediction_service_stub = ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::ServiceStub.new(
|
152
|
+
endpoint: @config.endpoint,
|
153
|
+
endpoint_template: DEFAULT_ENDPOINT_TEMPLATE,
|
154
|
+
universe_domain: @config.universe_domain,
|
155
|
+
credentials: credentials
|
156
|
+
)
|
157
|
+
|
158
|
+
@location_client = Google::Cloud::Location::Locations::Rest::Client.new do |config|
|
159
|
+
config.credentials = credentials
|
160
|
+
config.quota_project = @quota_project_id
|
161
|
+
config.endpoint = @prediction_service_stub.endpoint
|
162
|
+
config.universe_domain = @prediction_service_stub.universe_domain
|
163
|
+
config.bindings_override = @config.bindings_override
|
164
|
+
end
|
165
|
+
|
166
|
+
@iam_policy_client = Google::Iam::V1::IAMPolicy::Rest::Client.new do |config|
|
167
|
+
config.credentials = credentials
|
168
|
+
config.quota_project = @quota_project_id
|
169
|
+
config.endpoint = @prediction_service_stub.endpoint
|
170
|
+
config.universe_domain = @prediction_service_stub.universe_domain
|
171
|
+
config.bindings_override = @config.bindings_override
|
172
|
+
end
|
173
|
+
end
|
174
|
+
|
175
|
+
##
|
176
|
+
# Get the associated client for mix-in of the Locations.
|
177
|
+
#
|
178
|
+
# @return [Google::Cloud::Location::Locations::Rest::Client]
|
179
|
+
#
|
180
|
+
attr_reader :location_client
|
181
|
+
|
182
|
+
##
|
183
|
+
# Get the associated client for mix-in of the IAMPolicy.
|
184
|
+
#
|
185
|
+
# @return [Google::Iam::V1::IAMPolicy::Rest::Client]
|
186
|
+
#
|
187
|
+
attr_reader :iam_policy_client
|
188
|
+
|
189
|
+
# Service calls
|
190
|
+
|
191
|
+
##
|
192
|
+
# Perform an online prediction.
|
193
|
+
#
|
194
|
+
# @overload predict(request, options = nil)
|
195
|
+
# Pass arguments to `predict` via a request object, either of type
|
196
|
+
# {::Google::Cloud::AIPlatform::V1::PredictRequest} or an equivalent Hash.
|
197
|
+
#
|
198
|
+
# @param request [::Google::Cloud::AIPlatform::V1::PredictRequest, ::Hash]
|
199
|
+
# A request object representing the call parameters. Required. To specify no
|
200
|
+
# parameters, or to keep all the default parameter values, pass an empty Hash.
|
201
|
+
# @param options [::Gapic::CallOptions, ::Hash]
|
202
|
+
# Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
|
203
|
+
#
|
204
|
+
# @overload predict(endpoint: nil, instances: nil, parameters: nil)
|
205
|
+
# Pass arguments to `predict` via keyword arguments. Note that at
|
206
|
+
# least one keyword argument is required. To specify no parameters, or to keep all
|
207
|
+
# the default parameter values, pass an empty Hash as a request object (see above).
|
208
|
+
#
|
209
|
+
# @param endpoint [::String]
|
210
|
+
# Required. The name of the Endpoint requested to serve the prediction.
|
211
|
+
# Format:
|
212
|
+
# `projects/{project}/locations/{location}/endpoints/{endpoint}`
|
213
|
+
# @param instances [::Array<::Google::Protobuf::Value, ::Hash>]
|
214
|
+
# Required. The instances that are the input to the prediction call.
|
215
|
+
# A DeployedModel may have an upper limit on the number of instances it
|
216
|
+
# supports per request, and when it is exceeded the prediction call errors
|
217
|
+
# in case of AutoML Models, or, in case of customer created Models, the
|
218
|
+
# behaviour is as documented by that Model.
|
219
|
+
# The schema of any single instance may be specified via Endpoint's
|
220
|
+
# DeployedModels' [Model's][google.cloud.aiplatform.v1.DeployedModel.model]
|
221
|
+
# [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
|
222
|
+
# {::Google::Cloud::AIPlatform::V1::PredictSchemata#instance_schema_uri instance_schema_uri}.
|
223
|
+
# @param parameters [::Google::Protobuf::Value, ::Hash]
|
224
|
+
# The parameters that govern the prediction. The schema of the parameters may
|
225
|
+
# be specified via Endpoint's DeployedModels' [Model's
|
226
|
+
# ][google.cloud.aiplatform.v1.DeployedModel.model]
|
227
|
+
# [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
|
228
|
+
# {::Google::Cloud::AIPlatform::V1::PredictSchemata#parameters_schema_uri parameters_schema_uri}.
|
229
|
+
# @yield [result, operation] Access the result along with the TransportOperation object
|
230
|
+
# @yieldparam result [::Google::Cloud::AIPlatform::V1::PredictResponse]
|
231
|
+
# @yieldparam operation [::Gapic::Rest::TransportOperation]
|
232
|
+
#
|
233
|
+
# @return [::Google::Cloud::AIPlatform::V1::PredictResponse]
|
234
|
+
#
|
235
|
+
# @raise [::Google::Cloud::Error] if the REST call is aborted.
|
236
|
+
#
|
237
|
+
# @example Basic example
|
238
|
+
# require "google/cloud/ai_platform/v1"
|
239
|
+
#
|
240
|
+
# # Create a client object. The client can be reused for multiple calls.
|
241
|
+
# client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
|
242
|
+
#
|
243
|
+
# # Create a request. To set request fields, pass in keyword arguments.
|
244
|
+
# request = Google::Cloud::AIPlatform::V1::PredictRequest.new
|
245
|
+
#
|
246
|
+
# # Call the predict method.
|
247
|
+
# result = client.predict request
|
248
|
+
#
|
249
|
+
# # The returned object is of type Google::Cloud::AIPlatform::V1::PredictResponse.
|
250
|
+
# p result
|
251
|
+
#
|
252
|
+
def predict request, options = nil
|
253
|
+
raise ::ArgumentError, "request must be provided" if request.nil?
|
254
|
+
|
255
|
+
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::PredictRequest
|
256
|
+
|
257
|
+
# Converts hash and nil to an options object
|
258
|
+
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
|
259
|
+
|
260
|
+
# Customize the options with defaults
|
261
|
+
call_metadata = @config.rpcs.predict.metadata.to_h
|
262
|
+
|
263
|
+
# Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
|
264
|
+
call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
|
265
|
+
lib_name: @config.lib_name, lib_version: @config.lib_version,
|
266
|
+
gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
|
267
|
+
transports_version_send: [:rest]
|
268
|
+
|
269
|
+
call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
|
270
|
+
call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
|
271
|
+
|
272
|
+
options.apply_defaults timeout: @config.rpcs.predict.timeout,
|
273
|
+
metadata: call_metadata,
|
274
|
+
retry_policy: @config.rpcs.predict.retry_policy
|
275
|
+
|
276
|
+
options.apply_defaults timeout: @config.timeout,
|
277
|
+
metadata: @config.metadata,
|
278
|
+
retry_policy: @config.retry_policy
|
279
|
+
|
280
|
+
@prediction_service_stub.predict request, options do |result, operation|
|
281
|
+
yield result, operation if block_given?
|
282
|
+
return result
|
283
|
+
end
|
284
|
+
rescue ::Gapic::Rest::Error => e
|
285
|
+
raise ::Google::Cloud::Error.from_error(e)
|
286
|
+
end
|
287
|
+
|
288
|
+
##
|
289
|
+
# Perform an online prediction with an arbitrary HTTP payload.
|
290
|
+
#
|
291
|
+
# The response includes the following HTTP headers:
|
292
|
+
#
|
293
|
+
# * `X-Vertex-AI-Endpoint-Id`: ID of the
|
294
|
+
# {::Google::Cloud::AIPlatform::V1::Endpoint Endpoint} that served this
|
295
|
+
# prediction.
|
296
|
+
#
|
297
|
+
# * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's
|
298
|
+
# {::Google::Cloud::AIPlatform::V1::DeployedModel DeployedModel} that served this
|
299
|
+
# prediction.
|
300
|
+
#
|
301
|
+
# @overload raw_predict(request, options = nil)
|
302
|
+
# Pass arguments to `raw_predict` via a request object, either of type
|
303
|
+
# {::Google::Cloud::AIPlatform::V1::RawPredictRequest} or an equivalent Hash.
|
304
|
+
#
|
305
|
+
# @param request [::Google::Cloud::AIPlatform::V1::RawPredictRequest, ::Hash]
|
306
|
+
# A request object representing the call parameters. Required. To specify no
|
307
|
+
# parameters, or to keep all the default parameter values, pass an empty Hash.
|
308
|
+
# @param options [::Gapic::CallOptions, ::Hash]
|
309
|
+
# Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
|
310
|
+
#
|
311
|
+
# @overload raw_predict(endpoint: nil, http_body: nil)
|
312
|
+
# Pass arguments to `raw_predict` via keyword arguments. Note that at
|
313
|
+
# least one keyword argument is required. To specify no parameters, or to keep all
|
314
|
+
# the default parameter values, pass an empty Hash as a request object (see above).
|
315
|
+
#
|
316
|
+
# @param endpoint [::String]
|
317
|
+
# Required. The name of the Endpoint requested to serve the prediction.
|
318
|
+
# Format:
|
319
|
+
# `projects/{project}/locations/{location}/endpoints/{endpoint}`
|
320
|
+
# @param http_body [::Google::Api::HttpBody, ::Hash]
|
321
|
+
# The prediction input. Supports HTTP headers and arbitrary data payload.
|
322
|
+
#
|
323
|
+
# A {::Google::Cloud::AIPlatform::V1::DeployedModel DeployedModel} may have an
|
324
|
+
# upper limit on the number of instances it supports per request. When this
|
325
|
+
# limit it is exceeded for an AutoML model, the
|
326
|
+
# {::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client#raw_predict RawPredict}
|
327
|
+
# method returns an error. When this limit is exceeded for a custom-trained
|
328
|
+
# model, the behavior varies depending on the model.
|
329
|
+
#
|
330
|
+
# You can specify the schema for each instance in the
|
331
|
+
# {::Google::Cloud::AIPlatform::V1::PredictSchemata#instance_schema_uri predict_schemata.instance_schema_uri}
|
332
|
+
# field when you create a {::Google::Cloud::AIPlatform::V1::Model Model}. This
|
333
|
+
# schema applies when you deploy the `Model` as a `DeployedModel` to an
|
334
|
+
# {::Google::Cloud::AIPlatform::V1::Endpoint Endpoint} and use the `RawPredict`
|
335
|
+
# method.
|
336
|
+
# @yield [result, operation] Access the result along with the TransportOperation object
|
337
|
+
# @yieldparam result [::Google::Api::HttpBody]
|
338
|
+
# @yieldparam operation [::Gapic::Rest::TransportOperation]
|
339
|
+
#
|
340
|
+
# @return [::Google::Api::HttpBody]
|
341
|
+
#
|
342
|
+
# @raise [::Google::Cloud::Error] if the REST call is aborted.
|
343
|
+
#
|
344
|
+
# @example Basic example
|
345
|
+
# require "google/cloud/ai_platform/v1"
|
346
|
+
#
|
347
|
+
# # Create a client object. The client can be reused for multiple calls.
|
348
|
+
# client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
|
349
|
+
#
|
350
|
+
# # Create a request. To set request fields, pass in keyword arguments.
|
351
|
+
# request = Google::Cloud::AIPlatform::V1::RawPredictRequest.new
|
352
|
+
#
|
353
|
+
# # Call the raw_predict method.
|
354
|
+
# result = client.raw_predict request
|
355
|
+
#
|
356
|
+
# # The returned object is of type Google::Api::HttpBody.
|
357
|
+
# p result
|
358
|
+
#
|
359
|
+
def raw_predict request, options = nil
|
360
|
+
raise ::ArgumentError, "request must be provided" if request.nil?
|
361
|
+
|
362
|
+
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::RawPredictRequest
|
363
|
+
|
364
|
+
# Converts hash and nil to an options object
|
365
|
+
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
|
366
|
+
|
367
|
+
# Customize the options with defaults
|
368
|
+
call_metadata = @config.rpcs.raw_predict.metadata.to_h
|
369
|
+
|
370
|
+
# Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
|
371
|
+
call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
|
372
|
+
lib_name: @config.lib_name, lib_version: @config.lib_version,
|
373
|
+
gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
|
374
|
+
transports_version_send: [:rest]
|
375
|
+
|
376
|
+
call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
|
377
|
+
call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
|
378
|
+
|
379
|
+
options.apply_defaults timeout: @config.rpcs.raw_predict.timeout,
|
380
|
+
metadata: call_metadata,
|
381
|
+
retry_policy: @config.rpcs.raw_predict.retry_policy
|
382
|
+
|
383
|
+
options.apply_defaults timeout: @config.timeout,
|
384
|
+
metadata: @config.metadata,
|
385
|
+
retry_policy: @config.retry_policy
|
386
|
+
|
387
|
+
@prediction_service_stub.raw_predict request, options do |result, operation|
|
388
|
+
yield result, operation if block_given?
|
389
|
+
return result
|
390
|
+
end
|
391
|
+
rescue ::Gapic::Rest::Error => e
|
392
|
+
raise ::Google::Cloud::Error.from_error(e)
|
393
|
+
end
|
394
|
+
|
395
|
+
##
|
396
|
+
# Perform a streaming online prediction with an arbitrary HTTP payload.
|
397
|
+
#
|
398
|
+
# @overload stream_raw_predict(request, options = nil)
|
399
|
+
# Pass arguments to `stream_raw_predict` via a request object, either of type
|
400
|
+
# {::Google::Cloud::AIPlatform::V1::StreamRawPredictRequest} or an equivalent Hash.
|
401
|
+
#
|
402
|
+
# @param request [::Google::Cloud::AIPlatform::V1::StreamRawPredictRequest, ::Hash]
|
403
|
+
# A request object representing the call parameters. Required. To specify no
|
404
|
+
# parameters, or to keep all the default parameter values, pass an empty Hash.
|
405
|
+
# @param options [::Gapic::CallOptions, ::Hash]
|
406
|
+
# Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
|
407
|
+
#
|
408
|
+
# @overload stream_raw_predict(endpoint: nil, http_body: nil)
|
409
|
+
# Pass arguments to `stream_raw_predict` via keyword arguments. Note that at
|
410
|
+
# least one keyword argument is required. To specify no parameters, or to keep all
|
411
|
+
# the default parameter values, pass an empty Hash as a request object (see above).
|
412
|
+
#
|
413
|
+
# @param endpoint [::String]
|
414
|
+
# Required. The name of the Endpoint requested to serve the prediction.
|
415
|
+
# Format:
|
416
|
+
# `projects/{project}/locations/{location}/endpoints/{endpoint}`
|
417
|
+
# @param http_body [::Google::Api::HttpBody, ::Hash]
|
418
|
+
# The prediction input. Supports HTTP headers and arbitrary data payload.
|
419
|
+
# @return [::Enumerable<::Google::Api::HttpBody>]
|
420
|
+
#
|
421
|
+
# @raise [::Google::Cloud::Error] if the REST call is aborted.
|
422
|
+
#
|
423
|
+
# @example Basic example
|
424
|
+
# require "google/cloud/ai_platform/v1"
|
425
|
+
#
|
426
|
+
# # Create a client object. The client can be reused for multiple calls.
|
427
|
+
# client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
|
428
|
+
#
|
429
|
+
# # Create a request. To set request fields, pass in keyword arguments.
|
430
|
+
# request = Google::Cloud::AIPlatform::V1::StreamRawPredictRequest.new
|
431
|
+
#
|
432
|
+
# # Call the stream_raw_predict method to start streaming.
|
433
|
+
# output = client.stream_raw_predict request
|
434
|
+
#
|
435
|
+
# # The returned object is a streamed enumerable yielding elements of type
|
436
|
+
# # ::Google::Api::HttpBody
|
437
|
+
# output.each do |current_response|
|
438
|
+
# p current_response
|
439
|
+
# end
|
440
|
+
#
|
441
|
+
def stream_raw_predict request, options = nil
|
442
|
+
raise ::ArgumentError, "request must be provided" if request.nil?
|
443
|
+
|
444
|
+
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::StreamRawPredictRequest
|
445
|
+
|
446
|
+
# Converts hash and nil to an options object
|
447
|
+
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
|
448
|
+
|
449
|
+
# Customize the options with defaults
|
450
|
+
call_metadata = @config.rpcs.stream_raw_predict.metadata.to_h
|
451
|
+
|
452
|
+
# Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
|
453
|
+
call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
|
454
|
+
lib_name: @config.lib_name, lib_version: @config.lib_version,
|
455
|
+
gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
|
456
|
+
transports_version_send: [:rest]
|
457
|
+
|
458
|
+
call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
|
459
|
+
call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
|
460
|
+
|
461
|
+
options.apply_defaults timeout: @config.rpcs.stream_raw_predict.timeout,
|
462
|
+
metadata: call_metadata,
|
463
|
+
retry_policy: @config.rpcs.stream_raw_predict.retry_policy
|
464
|
+
|
465
|
+
options.apply_defaults timeout: @config.timeout,
|
466
|
+
metadata: @config.metadata,
|
467
|
+
retry_policy: @config.retry_policy
|
468
|
+
|
469
|
+
::Gapic::Rest::ServerStream.new(
|
470
|
+
::Google::Api::HttpBody,
|
471
|
+
::Gapic::Rest::ThreadedEnumerator.new do |in_q, out_q|
|
472
|
+
@prediction_service_stub.stream_raw_predict request, options do |chunk|
|
473
|
+
in_q.deq
|
474
|
+
out_q.enq chunk
|
475
|
+
end
|
476
|
+
end
|
477
|
+
)
|
478
|
+
rescue ::Gapic::Rest::Error => e
|
479
|
+
raise ::Google::Cloud::Error.from_error(e)
|
480
|
+
end
|
481
|
+
|
482
|
+
##
|
483
|
+
# Perform an unary online prediction request to a gRPC model server for
|
484
|
+
# Vertex first-party products and frameworks.
|
485
|
+
#
|
486
|
+
# @overload direct_predict(request, options = nil)
|
487
|
+
# Pass arguments to `direct_predict` via a request object, either of type
|
488
|
+
# {::Google::Cloud::AIPlatform::V1::DirectPredictRequest} or an equivalent Hash.
|
489
|
+
#
|
490
|
+
# @param request [::Google::Cloud::AIPlatform::V1::DirectPredictRequest, ::Hash]
|
491
|
+
# A request object representing the call parameters. Required. To specify no
|
492
|
+
# parameters, or to keep all the default parameter values, pass an empty Hash.
|
493
|
+
# @param options [::Gapic::CallOptions, ::Hash]
|
494
|
+
# Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
|
495
|
+
#
|
496
|
+
# @overload direct_predict(endpoint: nil, inputs: nil, parameters: nil)
|
497
|
+
# Pass arguments to `direct_predict` via keyword arguments. Note that at
|
498
|
+
# least one keyword argument is required. To specify no parameters, or to keep all
|
499
|
+
# the default parameter values, pass an empty Hash as a request object (see above).
|
500
|
+
#
|
501
|
+
# @param endpoint [::String]
|
502
|
+
# Required. The name of the Endpoint requested to serve the prediction.
|
503
|
+
# Format:
|
504
|
+
# `projects/{project}/locations/{location}/endpoints/{endpoint}`
|
505
|
+
# @param inputs [::Array<::Google::Cloud::AIPlatform::V1::Tensor, ::Hash>]
|
506
|
+
# The prediction input.
|
507
|
+
# @param parameters [::Google::Cloud::AIPlatform::V1::Tensor, ::Hash]
|
508
|
+
# The parameters that govern the prediction.
|
509
|
+
# @yield [result, operation] Access the result along with the TransportOperation object
|
510
|
+
# @yieldparam result [::Google::Cloud::AIPlatform::V1::DirectPredictResponse]
|
511
|
+
# @yieldparam operation [::Gapic::Rest::TransportOperation]
|
512
|
+
#
|
513
|
+
# @return [::Google::Cloud::AIPlatform::V1::DirectPredictResponse]
|
514
|
+
#
|
515
|
+
# @raise [::Google::Cloud::Error] if the REST call is aborted.
|
516
|
+
#
|
517
|
+
# @example Basic example
|
518
|
+
# require "google/cloud/ai_platform/v1"
|
519
|
+
#
|
520
|
+
# # Create a client object. The client can be reused for multiple calls.
|
521
|
+
# client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
|
522
|
+
#
|
523
|
+
# # Create a request. To set request fields, pass in keyword arguments.
|
524
|
+
# request = Google::Cloud::AIPlatform::V1::DirectPredictRequest.new
|
525
|
+
#
|
526
|
+
# # Call the direct_predict method.
|
527
|
+
# result = client.direct_predict request
|
528
|
+
#
|
529
|
+
# # The returned object is of type Google::Cloud::AIPlatform::V1::DirectPredictResponse.
|
530
|
+
# p result
|
531
|
+
#
|
532
|
+
def direct_predict request, options = nil
|
533
|
+
raise ::ArgumentError, "request must be provided" if request.nil?
|
534
|
+
|
535
|
+
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::DirectPredictRequest
|
536
|
+
|
537
|
+
# Converts hash and nil to an options object
|
538
|
+
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
|
539
|
+
|
540
|
+
# Customize the options with defaults
|
541
|
+
call_metadata = @config.rpcs.direct_predict.metadata.to_h
|
542
|
+
|
543
|
+
# Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
|
544
|
+
call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
|
545
|
+
lib_name: @config.lib_name, lib_version: @config.lib_version,
|
546
|
+
gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
|
547
|
+
transports_version_send: [:rest]
|
548
|
+
|
549
|
+
call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
|
550
|
+
call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
|
551
|
+
|
552
|
+
options.apply_defaults timeout: @config.rpcs.direct_predict.timeout,
|
553
|
+
metadata: call_metadata,
|
554
|
+
retry_policy: @config.rpcs.direct_predict.retry_policy
|
555
|
+
|
556
|
+
options.apply_defaults timeout: @config.timeout,
|
557
|
+
metadata: @config.metadata,
|
558
|
+
retry_policy: @config.retry_policy
|
559
|
+
|
560
|
+
@prediction_service_stub.direct_predict request, options do |result, operation|
|
561
|
+
yield result, operation if block_given?
|
562
|
+
return result
|
563
|
+
end
|
564
|
+
rescue ::Gapic::Rest::Error => e
|
565
|
+
raise ::Google::Cloud::Error.from_error(e)
|
566
|
+
end
|
567
|
+
|
568
|
+
##
|
569
|
+
# Perform an unary online prediction request to a gRPC model server for
|
570
|
+
# custom containers.
|
571
|
+
#
|
572
|
+
# @overload direct_raw_predict(request, options = nil)
|
573
|
+
# Pass arguments to `direct_raw_predict` via a request object, either of type
|
574
|
+
# {::Google::Cloud::AIPlatform::V1::DirectRawPredictRequest} or an equivalent Hash.
|
575
|
+
#
|
576
|
+
# @param request [::Google::Cloud::AIPlatform::V1::DirectRawPredictRequest, ::Hash]
|
577
|
+
# A request object representing the call parameters. Required. To specify no
|
578
|
+
# parameters, or to keep all the default parameter values, pass an empty Hash.
|
579
|
+
# @param options [::Gapic::CallOptions, ::Hash]
|
580
|
+
# Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
|
581
|
+
#
|
582
|
+
# @overload direct_raw_predict(endpoint: nil, method_name: nil, input: nil)
|
583
|
+
# Pass arguments to `direct_raw_predict` via keyword arguments. Note that at
|
584
|
+
# least one keyword argument is required. To specify no parameters, or to keep all
|
585
|
+
# the default parameter values, pass an empty Hash as a request object (see above).
|
586
|
+
#
|
587
|
+
# @param endpoint [::String]
|
588
|
+
# Required. The name of the Endpoint requested to serve the prediction.
|
589
|
+
# Format:
|
590
|
+
# `projects/{project}/locations/{location}/endpoints/{endpoint}`
|
591
|
+
# @param method_name [::String]
|
592
|
+
# Fully qualified name of the API method being invoked to perform
|
593
|
+
# predictions.
|
594
|
+
#
|
595
|
+
# Format:
|
596
|
+
# `/namespace.Service/Method/`
|
597
|
+
# Example:
|
598
|
+
# `/tensorflow.serving.PredictionService/Predict`
|
599
|
+
# @param input [::String]
|
600
|
+
# The prediction input.
|
601
|
+
# @yield [result, operation] Access the result along with the TransportOperation object
|
602
|
+
# @yieldparam result [::Google::Cloud::AIPlatform::V1::DirectRawPredictResponse]
|
603
|
+
# @yieldparam operation [::Gapic::Rest::TransportOperation]
|
604
|
+
#
|
605
|
+
# @return [::Google::Cloud::AIPlatform::V1::DirectRawPredictResponse]
|
606
|
+
#
|
607
|
+
# @raise [::Google::Cloud::Error] if the REST call is aborted.
|
608
|
+
#
|
609
|
+
# @example Basic example
|
610
|
+
# require "google/cloud/ai_platform/v1"
|
611
|
+
#
|
612
|
+
# # Create a client object. The client can be reused for multiple calls.
|
613
|
+
# client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
|
614
|
+
#
|
615
|
+
# # Create a request. To set request fields, pass in keyword arguments.
|
616
|
+
# request = Google::Cloud::AIPlatform::V1::DirectRawPredictRequest.new
|
617
|
+
#
|
618
|
+
# # Call the direct_raw_predict method.
|
619
|
+
# result = client.direct_raw_predict request
|
620
|
+
#
|
621
|
+
# # The returned object is of type Google::Cloud::AIPlatform::V1::DirectRawPredictResponse.
|
622
|
+
# p result
|
623
|
+
#
|
624
|
+
def direct_raw_predict request, options = nil
|
625
|
+
raise ::ArgumentError, "request must be provided" if request.nil?
|
626
|
+
|
627
|
+
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::DirectRawPredictRequest
|
628
|
+
|
629
|
+
# Converts hash and nil to an options object
|
630
|
+
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
|
631
|
+
|
632
|
+
# Customize the options with defaults
|
633
|
+
call_metadata = @config.rpcs.direct_raw_predict.metadata.to_h
|
634
|
+
|
635
|
+
# Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
|
636
|
+
call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
|
637
|
+
lib_name: @config.lib_name, lib_version: @config.lib_version,
|
638
|
+
gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
|
639
|
+
transports_version_send: [:rest]
|
640
|
+
|
641
|
+
call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
|
642
|
+
call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
|
643
|
+
|
644
|
+
options.apply_defaults timeout: @config.rpcs.direct_raw_predict.timeout,
|
645
|
+
metadata: call_metadata,
|
646
|
+
retry_policy: @config.rpcs.direct_raw_predict.retry_policy
|
647
|
+
|
648
|
+
options.apply_defaults timeout: @config.timeout,
|
649
|
+
metadata: @config.metadata,
|
650
|
+
retry_policy: @config.retry_policy
|
651
|
+
|
652
|
+
@prediction_service_stub.direct_raw_predict request, options do |result, operation|
|
653
|
+
yield result, operation if block_given?
|
654
|
+
return result
|
655
|
+
end
|
656
|
+
rescue ::Gapic::Rest::Error => e
|
657
|
+
raise ::Google::Cloud::Error.from_error(e)
|
658
|
+
end
|
659
|
+
|
660
|
+
##
|
661
|
+
# Perform a server-side streaming online prediction request for Vertex
|
662
|
+
# LLM streaming.
|
663
|
+
#
|
664
|
+
# @overload server_streaming_predict(request, options = nil)
|
665
|
+
# Pass arguments to `server_streaming_predict` via a request object, either of type
|
666
|
+
# {::Google::Cloud::AIPlatform::V1::StreamingPredictRequest} or an equivalent Hash.
|
667
|
+
#
|
668
|
+
# @param request [::Google::Cloud::AIPlatform::V1::StreamingPredictRequest, ::Hash]
|
669
|
+
# A request object representing the call parameters. Required. To specify no
|
670
|
+
# parameters, or to keep all the default parameter values, pass an empty Hash.
|
671
|
+
# @param options [::Gapic::CallOptions, ::Hash]
|
672
|
+
# Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
|
673
|
+
#
|
674
|
+
# @overload server_streaming_predict(endpoint: nil, inputs: nil, parameters: nil)
|
675
|
+
# Pass arguments to `server_streaming_predict` via keyword arguments. Note that at
|
676
|
+
# least one keyword argument is required. To specify no parameters, or to keep all
|
677
|
+
# the default parameter values, pass an empty Hash as a request object (see above).
|
678
|
+
#
|
679
|
+
# @param endpoint [::String]
|
680
|
+
# Required. The name of the Endpoint requested to serve the prediction.
|
681
|
+
# Format:
|
682
|
+
# `projects/{project}/locations/{location}/endpoints/{endpoint}`
|
683
|
+
# @param inputs [::Array<::Google::Cloud::AIPlatform::V1::Tensor, ::Hash>]
|
684
|
+
# The prediction input.
|
685
|
+
# @param parameters [::Google::Cloud::AIPlatform::V1::Tensor, ::Hash]
|
686
|
+
# The parameters that govern the prediction.
|
687
|
+
# @return [::Enumerable<::Google::Cloud::AIPlatform::V1::StreamingPredictResponse>]
|
688
|
+
#
|
689
|
+
# @raise [::Google::Cloud::Error] if the REST call is aborted.
|
690
|
+
#
|
691
|
+
# @example Basic example
|
692
|
+
# require "google/cloud/ai_platform/v1"
|
693
|
+
#
|
694
|
+
# # Create a client object. The client can be reused for multiple calls.
|
695
|
+
# client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
|
696
|
+
#
|
697
|
+
# # Create a request. To set request fields, pass in keyword arguments.
|
698
|
+
# request = Google::Cloud::AIPlatform::V1::StreamingPredictRequest.new
|
699
|
+
#
|
700
|
+
# # Call the server_streaming_predict method to start streaming.
|
701
|
+
# output = client.server_streaming_predict request
|
702
|
+
#
|
703
|
+
# # The returned object is a streamed enumerable yielding elements of type
|
704
|
+
# # ::Google::Cloud::AIPlatform::V1::StreamingPredictResponse
|
705
|
+
# output.each do |current_response|
|
706
|
+
# p current_response
|
707
|
+
# end
|
708
|
+
#
|
709
|
+
def server_streaming_predict request, options = nil
|
710
|
+
raise ::ArgumentError, "request must be provided" if request.nil?
|
711
|
+
|
712
|
+
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::StreamingPredictRequest
|
713
|
+
|
714
|
+
# Converts hash and nil to an options object
|
715
|
+
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
|
716
|
+
|
717
|
+
# Customize the options with defaults
|
718
|
+
call_metadata = @config.rpcs.server_streaming_predict.metadata.to_h
|
719
|
+
|
720
|
+
# Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
|
721
|
+
call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
|
722
|
+
lib_name: @config.lib_name, lib_version: @config.lib_version,
|
723
|
+
gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
|
724
|
+
transports_version_send: [:rest]
|
725
|
+
|
726
|
+
call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
|
727
|
+
call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
|
728
|
+
|
729
|
+
options.apply_defaults timeout: @config.rpcs.server_streaming_predict.timeout,
|
730
|
+
metadata: call_metadata,
|
731
|
+
retry_policy: @config.rpcs.server_streaming_predict.retry_policy
|
732
|
+
|
733
|
+
options.apply_defaults timeout: @config.timeout,
|
734
|
+
metadata: @config.metadata,
|
735
|
+
retry_policy: @config.retry_policy
|
736
|
+
|
737
|
+
::Gapic::Rest::ServerStream.new(
|
738
|
+
::Google::Cloud::AIPlatform::V1::StreamingPredictResponse,
|
739
|
+
::Gapic::Rest::ThreadedEnumerator.new do |in_q, out_q|
|
740
|
+
@prediction_service_stub.server_streaming_predict request, options do |chunk|
|
741
|
+
in_q.deq
|
742
|
+
out_q.enq chunk
|
743
|
+
end
|
744
|
+
end
|
745
|
+
)
|
746
|
+
rescue ::Gapic::Rest::Error => e
|
747
|
+
raise ::Google::Cloud::Error.from_error(e)
|
748
|
+
end
|
749
|
+
|
750
|
+
##
|
751
|
+
# Perform an online explanation.
|
752
|
+
#
|
753
|
+
# If
|
754
|
+
# {::Google::Cloud::AIPlatform::V1::ExplainRequest#deployed_model_id deployed_model_id}
|
755
|
+
# is specified, the corresponding DeployModel must have
|
756
|
+
# {::Google::Cloud::AIPlatform::V1::DeployedModel#explanation_spec explanation_spec}
|
757
|
+
# populated. If
|
758
|
+
# {::Google::Cloud::AIPlatform::V1::ExplainRequest#deployed_model_id deployed_model_id}
|
759
|
+
# is not specified, all DeployedModels must have
|
760
|
+
# {::Google::Cloud::AIPlatform::V1::DeployedModel#explanation_spec explanation_spec}
|
761
|
+
# populated.
|
762
|
+
#
|
763
|
+
# @overload explain(request, options = nil)
|
764
|
+
# Pass arguments to `explain` via a request object, either of type
|
765
|
+
# {::Google::Cloud::AIPlatform::V1::ExplainRequest} or an equivalent Hash.
|
766
|
+
#
|
767
|
+
# @param request [::Google::Cloud::AIPlatform::V1::ExplainRequest, ::Hash]
|
768
|
+
# A request object representing the call parameters. Required. To specify no
|
769
|
+
# parameters, or to keep all the default parameter values, pass an empty Hash.
|
770
|
+
# @param options [::Gapic::CallOptions, ::Hash]
|
771
|
+
# Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
|
772
|
+
#
|
773
|
+
# @overload explain(endpoint: nil, instances: nil, parameters: nil, explanation_spec_override: nil, deployed_model_id: nil)
|
774
|
+
# Pass arguments to `explain` via keyword arguments. Note that at
|
775
|
+
# least one keyword argument is required. To specify no parameters, or to keep all
|
776
|
+
# the default parameter values, pass an empty Hash as a request object (see above).
|
777
|
+
#
|
778
|
+
# @param endpoint [::String]
|
779
|
+
# Required. The name of the Endpoint requested to serve the explanation.
|
780
|
+
# Format:
|
781
|
+
# `projects/{project}/locations/{location}/endpoints/{endpoint}`
|
782
|
+
# @param instances [::Array<::Google::Protobuf::Value, ::Hash>]
|
783
|
+
# Required. The instances that are the input to the explanation call.
|
784
|
+
# A DeployedModel may have an upper limit on the number of instances it
|
785
|
+
# supports per request, and when it is exceeded the explanation call errors
|
786
|
+
# in case of AutoML Models, or, in case of customer created Models, the
|
787
|
+
# behaviour is as documented by that Model.
|
788
|
+
# The schema of any single instance may be specified via Endpoint's
|
789
|
+
# DeployedModels' [Model's][google.cloud.aiplatform.v1.DeployedModel.model]
|
790
|
+
# [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
|
791
|
+
# {::Google::Cloud::AIPlatform::V1::PredictSchemata#instance_schema_uri instance_schema_uri}.
|
792
|
+
# @param parameters [::Google::Protobuf::Value, ::Hash]
|
793
|
+
# The parameters that govern the prediction. The schema of the parameters may
|
794
|
+
# be specified via Endpoint's DeployedModels' [Model's
|
795
|
+
# ][google.cloud.aiplatform.v1.DeployedModel.model]
|
796
|
+
# [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
|
797
|
+
# {::Google::Cloud::AIPlatform::V1::PredictSchemata#parameters_schema_uri parameters_schema_uri}.
|
798
|
+
# @param explanation_spec_override [::Google::Cloud::AIPlatform::V1::ExplanationSpecOverride, ::Hash]
|
799
|
+
# If specified, overrides the
|
800
|
+
# {::Google::Cloud::AIPlatform::V1::DeployedModel#explanation_spec explanation_spec}
|
801
|
+
# of the DeployedModel. Can be used for explaining prediction results with
|
802
|
+
# different configurations, such as:
|
803
|
+
# - Explaining top-5 predictions results as opposed to top-1;
|
804
|
+
# - Increasing path count or step count of the attribution methods to reduce
|
805
|
+
# approximate errors;
|
806
|
+
# - Using different baselines for explaining the prediction results.
|
807
|
+
# @param deployed_model_id [::String]
|
808
|
+
# If specified, this ExplainRequest will be served by the chosen
|
809
|
+
# DeployedModel, overriding
|
810
|
+
# {::Google::Cloud::AIPlatform::V1::Endpoint#traffic_split Endpoint.traffic_split}.
|
811
|
+
# @yield [result, operation] Access the result along with the TransportOperation object
|
812
|
+
# @yieldparam result [::Google::Cloud::AIPlatform::V1::ExplainResponse]
|
813
|
+
# @yieldparam operation [::Gapic::Rest::TransportOperation]
|
814
|
+
#
|
815
|
+
# @return [::Google::Cloud::AIPlatform::V1::ExplainResponse]
|
816
|
+
#
|
817
|
+
# @raise [::Google::Cloud::Error] if the REST call is aborted.
|
818
|
+
#
|
819
|
+
# @example Basic example
|
820
|
+
# require "google/cloud/ai_platform/v1"
|
821
|
+
#
|
822
|
+
# # Create a client object. The client can be reused for multiple calls.
|
823
|
+
# client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
|
824
|
+
#
|
825
|
+
# # Create a request. To set request fields, pass in keyword arguments.
|
826
|
+
# request = Google::Cloud::AIPlatform::V1::ExplainRequest.new
|
827
|
+
#
|
828
|
+
# # Call the explain method.
|
829
|
+
# result = client.explain request
|
830
|
+
#
|
831
|
+
# # The returned object is of type Google::Cloud::AIPlatform::V1::ExplainResponse.
|
832
|
+
# p result
|
833
|
+
#
|
834
|
+
def explain request, options = nil
|
835
|
+
raise ::ArgumentError, "request must be provided" if request.nil?
|
836
|
+
|
837
|
+
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::ExplainRequest
|
838
|
+
|
839
|
+
# Converts hash and nil to an options object
|
840
|
+
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
|
841
|
+
|
842
|
+
# Customize the options with defaults
|
843
|
+
call_metadata = @config.rpcs.explain.metadata.to_h
|
844
|
+
|
845
|
+
# Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
|
846
|
+
call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
|
847
|
+
lib_name: @config.lib_name, lib_version: @config.lib_version,
|
848
|
+
gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
|
849
|
+
transports_version_send: [:rest]
|
850
|
+
|
851
|
+
call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
|
852
|
+
call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
|
853
|
+
|
854
|
+
options.apply_defaults timeout: @config.rpcs.explain.timeout,
|
855
|
+
metadata: call_metadata,
|
856
|
+
retry_policy: @config.rpcs.explain.retry_policy
|
857
|
+
|
858
|
+
options.apply_defaults timeout: @config.timeout,
|
859
|
+
metadata: @config.metadata,
|
860
|
+
retry_policy: @config.retry_policy
|
861
|
+
|
862
|
+
@prediction_service_stub.explain request, options do |result, operation|
|
863
|
+
yield result, operation if block_given?
|
864
|
+
return result
|
865
|
+
end
|
866
|
+
rescue ::Gapic::Rest::Error => e
|
867
|
+
raise ::Google::Cloud::Error.from_error(e)
|
868
|
+
end
|
869
|
+
|
870
|
+
##
|
871
|
+
# Generate content with multimodal inputs.
|
872
|
+
#
|
873
|
+
# @overload generate_content(request, options = nil)
|
874
|
+
# Pass arguments to `generate_content` via a request object, either of type
|
875
|
+
# {::Google::Cloud::AIPlatform::V1::GenerateContentRequest} or an equivalent Hash.
|
876
|
+
#
|
877
|
+
# @param request [::Google::Cloud::AIPlatform::V1::GenerateContentRequest, ::Hash]
|
878
|
+
# A request object representing the call parameters. Required. To specify no
|
879
|
+
# parameters, or to keep all the default parameter values, pass an empty Hash.
|
880
|
+
# @param options [::Gapic::CallOptions, ::Hash]
|
881
|
+
# Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
|
882
|
+
#
|
883
|
+
# @overload generate_content(model: nil, contents: nil, system_instruction: nil, tools: nil, safety_settings: nil, generation_config: nil)
|
884
|
+
# Pass arguments to `generate_content` via keyword arguments. Note that at
|
885
|
+
# least one keyword argument is required. To specify no parameters, or to keep all
|
886
|
+
# the default parameter values, pass an empty Hash as a request object (see above).
|
887
|
+
#
|
888
|
+
# @param model [::String]
|
889
|
+
# Required. The name of the publisher model requested to serve the
|
890
|
+
# prediction. Format:
|
891
|
+
# `projects/{project}/locations/{location}/publishers/*/models/*`
|
892
|
+
# @param contents [::Array<::Google::Cloud::AIPlatform::V1::Content, ::Hash>]
|
893
|
+
# Required. The content of the current conversation with the model.
|
894
|
+
#
|
895
|
+
# For single-turn queries, this is a single instance. For multi-turn queries,
|
896
|
+
# this is a repeated field that contains conversation history + latest
|
897
|
+
# request.
|
898
|
+
# @param system_instruction [::Google::Cloud::AIPlatform::V1::Content, ::Hash]
|
899
|
+
# Optional. The user provided system instructions for the model.
|
900
|
+
# Note: only text should be used in parts and content in each part will be in
|
901
|
+
# a separate paragraph.
|
902
|
+
# @param tools [::Array<::Google::Cloud::AIPlatform::V1::Tool, ::Hash>]
|
903
|
+
# Optional. A list of `Tools` the model may use to generate the next
|
904
|
+
# response.
|
905
|
+
#
|
906
|
+
# A `Tool` is a piece of code that enables the system to interact with
|
907
|
+
# external systems to perform an action, or set of actions, outside of
|
908
|
+
# knowledge and scope of the model.
|
909
|
+
# @param safety_settings [::Array<::Google::Cloud::AIPlatform::V1::SafetySetting, ::Hash>]
|
910
|
+
# Optional. Per request settings for blocking unsafe content.
|
911
|
+
# Enforced on GenerateContentResponse.candidates.
|
912
|
+
# @param generation_config [::Google::Cloud::AIPlatform::V1::GenerationConfig, ::Hash]
|
913
|
+
# Optional. Generation config.
|
914
|
+
# @yield [result, operation] Access the result along with the TransportOperation object
|
915
|
+
# @yieldparam result [::Google::Cloud::AIPlatform::V1::GenerateContentResponse]
|
916
|
+
# @yieldparam operation [::Gapic::Rest::TransportOperation]
|
917
|
+
#
|
918
|
+
# @return [::Google::Cloud::AIPlatform::V1::GenerateContentResponse]
|
919
|
+
#
|
920
|
+
# @raise [::Google::Cloud::Error] if the REST call is aborted.
|
921
|
+
#
|
922
|
+
# @example Basic example
|
923
|
+
# require "google/cloud/ai_platform/v1"
|
924
|
+
#
|
925
|
+
# # Create a client object. The client can be reused for multiple calls.
|
926
|
+
# client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
|
927
|
+
#
|
928
|
+
# # Create a request. To set request fields, pass in keyword arguments.
|
929
|
+
# request = Google::Cloud::AIPlatform::V1::GenerateContentRequest.new
|
930
|
+
#
|
931
|
+
# # Call the generate_content method.
|
932
|
+
# result = client.generate_content request
|
933
|
+
#
|
934
|
+
# # The returned object is of type Google::Cloud::AIPlatform::V1::GenerateContentResponse.
|
935
|
+
# p result
|
936
|
+
#
|
937
|
+
def generate_content request, options = nil
|
938
|
+
raise ::ArgumentError, "request must be provided" if request.nil?
|
939
|
+
|
940
|
+
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::GenerateContentRequest
|
941
|
+
|
942
|
+
# Converts hash and nil to an options object
|
943
|
+
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
|
944
|
+
|
945
|
+
# Customize the options with defaults
|
946
|
+
call_metadata = @config.rpcs.generate_content.metadata.to_h
|
947
|
+
|
948
|
+
# Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
|
949
|
+
call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
|
950
|
+
lib_name: @config.lib_name, lib_version: @config.lib_version,
|
951
|
+
gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
|
952
|
+
transports_version_send: [:rest]
|
953
|
+
|
954
|
+
call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
|
955
|
+
call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
|
956
|
+
|
957
|
+
options.apply_defaults timeout: @config.rpcs.generate_content.timeout,
|
958
|
+
metadata: call_metadata,
|
959
|
+
retry_policy: @config.rpcs.generate_content.retry_policy
|
960
|
+
|
961
|
+
options.apply_defaults timeout: @config.timeout,
|
962
|
+
metadata: @config.metadata,
|
963
|
+
retry_policy: @config.retry_policy
|
964
|
+
|
965
|
+
@prediction_service_stub.generate_content request, options do |result, operation|
|
966
|
+
yield result, operation if block_given?
|
967
|
+
return result
|
968
|
+
end
|
969
|
+
rescue ::Gapic::Rest::Error => e
|
970
|
+
raise ::Google::Cloud::Error.from_error(e)
|
971
|
+
end
|
972
|
+
|
973
|
+
##
|
974
|
+
# Generate content with multimodal inputs with streaming support.
|
975
|
+
#
|
976
|
+
# @overload stream_generate_content(request, options = nil)
|
977
|
+
# Pass arguments to `stream_generate_content` via a request object, either of type
|
978
|
+
# {::Google::Cloud::AIPlatform::V1::GenerateContentRequest} or an equivalent Hash.
|
979
|
+
#
|
980
|
+
# @param request [::Google::Cloud::AIPlatform::V1::GenerateContentRequest, ::Hash]
|
981
|
+
# A request object representing the call parameters. Required. To specify no
|
982
|
+
# parameters, or to keep all the default parameter values, pass an empty Hash.
|
983
|
+
# @param options [::Gapic::CallOptions, ::Hash]
|
984
|
+
# Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
|
985
|
+
#
|
986
|
+
# @overload stream_generate_content(model: nil, contents: nil, system_instruction: nil, tools: nil, safety_settings: nil, generation_config: nil)
|
987
|
+
# Pass arguments to `stream_generate_content` via keyword arguments. Note that at
|
988
|
+
# least one keyword argument is required. To specify no parameters, or to keep all
|
989
|
+
# the default parameter values, pass an empty Hash as a request object (see above).
|
990
|
+
#
|
991
|
+
# @param model [::String]
|
992
|
+
# Required. The name of the publisher model requested to serve the
|
993
|
+
# prediction. Format:
|
994
|
+
# `projects/{project}/locations/{location}/publishers/*/models/*`
|
995
|
+
# @param contents [::Array<::Google::Cloud::AIPlatform::V1::Content, ::Hash>]
|
996
|
+
# Required. The content of the current conversation with the model.
|
997
|
+
#
|
998
|
+
# For single-turn queries, this is a single instance. For multi-turn queries,
|
999
|
+
# this is a repeated field that contains conversation history + latest
|
1000
|
+
# request.
|
1001
|
+
# @param system_instruction [::Google::Cloud::AIPlatform::V1::Content, ::Hash]
|
1002
|
+
# Optional. The user provided system instructions for the model.
|
1003
|
+
# Note: only text should be used in parts and content in each part will be in
|
1004
|
+
# a separate paragraph.
|
1005
|
+
# @param tools [::Array<::Google::Cloud::AIPlatform::V1::Tool, ::Hash>]
|
1006
|
+
# Optional. A list of `Tools` the model may use to generate the next
|
1007
|
+
# response.
|
1008
|
+
#
|
1009
|
+
# A `Tool` is a piece of code that enables the system to interact with
|
1010
|
+
# external systems to perform an action, or set of actions, outside of
|
1011
|
+
# knowledge and scope of the model.
|
1012
|
+
# @param safety_settings [::Array<::Google::Cloud::AIPlatform::V1::SafetySetting, ::Hash>]
|
1013
|
+
# Optional. Per request settings for blocking unsafe content.
|
1014
|
+
# Enforced on GenerateContentResponse.candidates.
|
1015
|
+
# @param generation_config [::Google::Cloud::AIPlatform::V1::GenerationConfig, ::Hash]
|
1016
|
+
# Optional. Generation config.
|
1017
|
+
# @return [::Enumerable<::Google::Cloud::AIPlatform::V1::GenerateContentResponse>]
|
1018
|
+
#
|
1019
|
+
# @raise [::Google::Cloud::Error] if the REST call is aborted.
|
1020
|
+
#
|
1021
|
+
# @example Basic example
|
1022
|
+
# require "google/cloud/ai_platform/v1"
|
1023
|
+
#
|
1024
|
+
# # Create a client object. The client can be reused for multiple calls.
|
1025
|
+
# client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
|
1026
|
+
#
|
1027
|
+
# # Create a request. To set request fields, pass in keyword arguments.
|
1028
|
+
# request = Google::Cloud::AIPlatform::V1::GenerateContentRequest.new
|
1029
|
+
#
|
1030
|
+
# # Call the stream_generate_content method to start streaming.
|
1031
|
+
# output = client.stream_generate_content request
|
1032
|
+
#
|
1033
|
+
# # The returned object is a streamed enumerable yielding elements of type
|
1034
|
+
# # ::Google::Cloud::AIPlatform::V1::GenerateContentResponse
|
1035
|
+
# output.each do |current_response|
|
1036
|
+
# p current_response
|
1037
|
+
# end
|
1038
|
+
#
|
1039
|
+
def stream_generate_content request, options = nil
|
1040
|
+
raise ::ArgumentError, "request must be provided" if request.nil?
|
1041
|
+
|
1042
|
+
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::GenerateContentRequest
|
1043
|
+
|
1044
|
+
# Converts hash and nil to an options object
|
1045
|
+
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
|
1046
|
+
|
1047
|
+
# Customize the options with defaults
|
1048
|
+
call_metadata = @config.rpcs.stream_generate_content.metadata.to_h
|
1049
|
+
|
1050
|
+
# Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
|
1051
|
+
call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
|
1052
|
+
lib_name: @config.lib_name, lib_version: @config.lib_version,
|
1053
|
+
gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
|
1054
|
+
transports_version_send: [:rest]
|
1055
|
+
|
1056
|
+
call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
|
1057
|
+
call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
|
1058
|
+
|
1059
|
+
options.apply_defaults timeout: @config.rpcs.stream_generate_content.timeout,
|
1060
|
+
metadata: call_metadata,
|
1061
|
+
retry_policy: @config.rpcs.stream_generate_content.retry_policy
|
1062
|
+
|
1063
|
+
options.apply_defaults timeout: @config.timeout,
|
1064
|
+
metadata: @config.metadata,
|
1065
|
+
retry_policy: @config.retry_policy
|
1066
|
+
|
1067
|
+
::Gapic::Rest::ServerStream.new(
|
1068
|
+
::Google::Cloud::AIPlatform::V1::GenerateContentResponse,
|
1069
|
+
::Gapic::Rest::ThreadedEnumerator.new do |in_q, out_q|
|
1070
|
+
@prediction_service_stub.stream_generate_content request, options do |chunk|
|
1071
|
+
in_q.deq
|
1072
|
+
out_q.enq chunk
|
1073
|
+
end
|
1074
|
+
end
|
1075
|
+
)
|
1076
|
+
rescue ::Gapic::Rest::Error => e
|
1077
|
+
raise ::Google::Cloud::Error.from_error(e)
|
1078
|
+
end
|
1079
|
+
|
1080
|
+
##
|
1081
|
+
# Configuration class for the PredictionService REST API.
|
1082
|
+
#
|
1083
|
+
# This class represents the configuration for PredictionService REST,
|
1084
|
+
# providing control over timeouts, retry behavior, logging, transport
|
1085
|
+
# parameters, and other low-level controls. Certain parameters can also be
|
1086
|
+
# applied individually to specific RPCs. See
|
1087
|
+
# {::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client::Configuration::Rpcs}
|
1088
|
+
# for a list of RPCs that can be configured independently.
|
1089
|
+
#
|
1090
|
+
# Configuration can be applied globally to all clients, or to a single client
|
1091
|
+
# on construction.
|
1092
|
+
#
|
1093
|
+
# @example
|
1094
|
+
#
|
1095
|
+
# # Modify the global config, setting the timeout for
|
1096
|
+
# # predict to 20 seconds,
|
1097
|
+
# # and all remaining timeouts to 10 seconds.
|
1098
|
+
# ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.configure do |config|
|
1099
|
+
# config.timeout = 10.0
|
1100
|
+
# config.rpcs.predict.timeout = 20.0
|
1101
|
+
# end
|
1102
|
+
#
|
1103
|
+
# # Apply the above configuration only to a new client.
|
1104
|
+
# client = ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new do |config|
|
1105
|
+
# config.timeout = 10.0
|
1106
|
+
# config.rpcs.predict.timeout = 20.0
|
1107
|
+
# end
|
1108
|
+
#
|
1109
|
+
# @!attribute [rw] endpoint
|
1110
|
+
# A custom service endpoint, as a hostname or hostname:port. The default is
|
1111
|
+
# nil, indicating to use the default endpoint in the current universe domain.
|
1112
|
+
# @return [::String,nil]
|
1113
|
+
# @!attribute [rw] credentials
|
1114
|
+
# Credentials to send with calls. You may provide any of the following types:
|
1115
|
+
# * (`String`) The path to a service account key file in JSON format
|
1116
|
+
# * (`Hash`) A service account key as a Hash
|
1117
|
+
# * (`Google::Auth::Credentials`) A googleauth credentials object
|
1118
|
+
# (see the [googleauth docs](https://rubydoc.info/gems/googleauth/Google/Auth/Credentials))
|
1119
|
+
# * (`Signet::OAuth2::Client`) A signet oauth2 client object
|
1120
|
+
# (see the [signet docs](https://rubydoc.info/gems/signet/Signet/OAuth2/Client))
|
1121
|
+
# * (`nil`) indicating no credentials
|
1122
|
+
# @return [::Object]
|
1123
|
+
# @!attribute [rw] scope
|
1124
|
+
# The OAuth scopes
|
1125
|
+
# @return [::Array<::String>]
|
1126
|
+
# @!attribute [rw] lib_name
|
1127
|
+
# The library name as recorded in instrumentation and logging
|
1128
|
+
# @return [::String]
|
1129
|
+
# @!attribute [rw] lib_version
|
1130
|
+
# The library version as recorded in instrumentation and logging
|
1131
|
+
# @return [::String]
|
1132
|
+
# @!attribute [rw] timeout
|
1133
|
+
# The call timeout in seconds.
|
1134
|
+
# @return [::Numeric]
|
1135
|
+
# @!attribute [rw] metadata
|
1136
|
+
# Additional headers to be sent with the call.
|
1137
|
+
# @return [::Hash{::Symbol=>::String}]
|
1138
|
+
# @!attribute [rw] retry_policy
|
1139
|
+
# The retry policy. The value is a hash with the following keys:
|
1140
|
+
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
|
1141
|
+
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
|
1142
|
+
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
|
1143
|
+
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
|
1144
|
+
# trigger a retry.
|
1145
|
+
# @return [::Hash]
|
1146
|
+
# @!attribute [rw] quota_project
|
1147
|
+
# A separate project against which to charge quota.
|
1148
|
+
# @return [::String]
|
1149
|
+
# @!attribute [rw] universe_domain
|
1150
|
+
# The universe domain within which to make requests. This determines the
|
1151
|
+
# default endpoint URL. The default value of nil uses the environment
|
1152
|
+
# universe (usually the default "googleapis.com" universe).
|
1153
|
+
# @return [::String,nil]
|
1154
|
+
#
|
1155
|
+
class Configuration
|
1156
|
+
extend ::Gapic::Config
|
1157
|
+
|
1158
|
+
# @private
|
1159
|
+
# The endpoint specific to the default "googleapis.com" universe. Deprecated.
|
1160
|
+
DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
|
1161
|
+
|
1162
|
+
config_attr :endpoint, nil, ::String, nil
|
1163
|
+
config_attr :credentials, nil do |value|
|
1164
|
+
allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
|
1165
|
+
allowed.any? { |klass| klass === value }
|
1166
|
+
end
|
1167
|
+
config_attr :scope, nil, ::String, ::Array, nil
|
1168
|
+
config_attr :lib_name, nil, ::String, nil
|
1169
|
+
config_attr :lib_version, nil, ::String, nil
|
1170
|
+
config_attr :timeout, nil, ::Numeric, nil
|
1171
|
+
config_attr :metadata, nil, ::Hash, nil
|
1172
|
+
config_attr :retry_policy, nil, ::Hash, ::Proc, nil
|
1173
|
+
config_attr :quota_project, nil, ::String, nil
|
1174
|
+
config_attr :universe_domain, nil, ::String, nil
|
1175
|
+
|
1176
|
+
# @private
|
1177
|
+
# Overrides for http bindings for the RPCs of this service
|
1178
|
+
# are only used when this service is used as mixin, and only
|
1179
|
+
# by the host service.
|
1180
|
+
# @return [::Hash{::Symbol=>::Array<::Gapic::Rest::GrpcTranscoder::HttpBinding>}]
|
1181
|
+
config_attr :bindings_override, {}, ::Hash, nil
|
1182
|
+
|
1183
|
+
# @private
|
1184
|
+
def initialize parent_config = nil
|
1185
|
+
@parent_config = parent_config unless parent_config.nil?
|
1186
|
+
|
1187
|
+
yield self if block_given?
|
1188
|
+
end
|
1189
|
+
|
1190
|
+
##
|
1191
|
+
# Configurations for individual RPCs
|
1192
|
+
# @return [Rpcs]
|
1193
|
+
#
|
1194
|
+
def rpcs
|
1195
|
+
@rpcs ||= begin
|
1196
|
+
parent_rpcs = nil
|
1197
|
+
parent_rpcs = @parent_config.rpcs if defined?(@parent_config) && @parent_config.respond_to?(:rpcs)
|
1198
|
+
Rpcs.new parent_rpcs
|
1199
|
+
end
|
1200
|
+
end
|
1201
|
+
|
1202
|
+
##
|
1203
|
+
# Configuration RPC class for the PredictionService API.
|
1204
|
+
#
|
1205
|
+
# Includes fields providing the configuration for each RPC in this service.
|
1206
|
+
# Each configuration object is of type `Gapic::Config::Method` and includes
|
1207
|
+
# the following configuration fields:
|
1208
|
+
#
|
1209
|
+
# * `timeout` (*type:* `Numeric`) - The call timeout in seconds
|
1210
|
+
# * `metadata` (*type:* `Hash{Symbol=>String}`) - Additional headers
|
1211
|
+
# * `retry_policy (*type:* `Hash`) - The retry policy. The policy fields
|
1212
|
+
# include the following keys:
|
1213
|
+
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
|
1214
|
+
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
|
1215
|
+
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
|
1216
|
+
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
|
1217
|
+
# trigger a retry.
|
1218
|
+
#
|
1219
|
+
class Rpcs
|
1220
|
+
##
|
1221
|
+
# RPC-specific configuration for `predict`
|
1222
|
+
# @return [::Gapic::Config::Method]
|
1223
|
+
#
|
1224
|
+
attr_reader :predict
|
1225
|
+
##
|
1226
|
+
# RPC-specific configuration for `raw_predict`
|
1227
|
+
# @return [::Gapic::Config::Method]
|
1228
|
+
#
|
1229
|
+
attr_reader :raw_predict
|
1230
|
+
##
|
1231
|
+
# RPC-specific configuration for `stream_raw_predict`
|
1232
|
+
# @return [::Gapic::Config::Method]
|
1233
|
+
#
|
1234
|
+
attr_reader :stream_raw_predict
|
1235
|
+
##
|
1236
|
+
# RPC-specific configuration for `direct_predict`
|
1237
|
+
# @return [::Gapic::Config::Method]
|
1238
|
+
#
|
1239
|
+
attr_reader :direct_predict
|
1240
|
+
##
|
1241
|
+
# RPC-specific configuration for `direct_raw_predict`
|
1242
|
+
# @return [::Gapic::Config::Method]
|
1243
|
+
#
|
1244
|
+
attr_reader :direct_raw_predict
|
1245
|
+
##
|
1246
|
+
# RPC-specific configuration for `server_streaming_predict`
|
1247
|
+
# @return [::Gapic::Config::Method]
|
1248
|
+
#
|
1249
|
+
attr_reader :server_streaming_predict
|
1250
|
+
##
|
1251
|
+
# RPC-specific configuration for `explain`
|
1252
|
+
# @return [::Gapic::Config::Method]
|
1253
|
+
#
|
1254
|
+
attr_reader :explain
|
1255
|
+
##
|
1256
|
+
# RPC-specific configuration for `generate_content`
|
1257
|
+
# @return [::Gapic::Config::Method]
|
1258
|
+
#
|
1259
|
+
attr_reader :generate_content
|
1260
|
+
##
|
1261
|
+
# RPC-specific configuration for `stream_generate_content`
|
1262
|
+
# @return [::Gapic::Config::Method]
|
1263
|
+
#
|
1264
|
+
attr_reader :stream_generate_content
|
1265
|
+
|
1266
|
+
# @private
|
1267
|
+
def initialize parent_rpcs = nil
|
1268
|
+
predict_config = parent_rpcs.predict if parent_rpcs.respond_to? :predict
|
1269
|
+
@predict = ::Gapic::Config::Method.new predict_config
|
1270
|
+
raw_predict_config = parent_rpcs.raw_predict if parent_rpcs.respond_to? :raw_predict
|
1271
|
+
@raw_predict = ::Gapic::Config::Method.new raw_predict_config
|
1272
|
+
stream_raw_predict_config = parent_rpcs.stream_raw_predict if parent_rpcs.respond_to? :stream_raw_predict
|
1273
|
+
@stream_raw_predict = ::Gapic::Config::Method.new stream_raw_predict_config
|
1274
|
+
direct_predict_config = parent_rpcs.direct_predict if parent_rpcs.respond_to? :direct_predict
|
1275
|
+
@direct_predict = ::Gapic::Config::Method.new direct_predict_config
|
1276
|
+
direct_raw_predict_config = parent_rpcs.direct_raw_predict if parent_rpcs.respond_to? :direct_raw_predict
|
1277
|
+
@direct_raw_predict = ::Gapic::Config::Method.new direct_raw_predict_config
|
1278
|
+
server_streaming_predict_config = parent_rpcs.server_streaming_predict if parent_rpcs.respond_to? :server_streaming_predict
|
1279
|
+
@server_streaming_predict = ::Gapic::Config::Method.new server_streaming_predict_config
|
1280
|
+
explain_config = parent_rpcs.explain if parent_rpcs.respond_to? :explain
|
1281
|
+
@explain = ::Gapic::Config::Method.new explain_config
|
1282
|
+
generate_content_config = parent_rpcs.generate_content if parent_rpcs.respond_to? :generate_content
|
1283
|
+
@generate_content = ::Gapic::Config::Method.new generate_content_config
|
1284
|
+
stream_generate_content_config = parent_rpcs.stream_generate_content if parent_rpcs.respond_to? :stream_generate_content
|
1285
|
+
@stream_generate_content = ::Gapic::Config::Method.new stream_generate_content_config
|
1286
|
+
|
1287
|
+
yield self if block_given?
|
1288
|
+
end
|
1289
|
+
end
|
1290
|
+
end
|
1291
|
+
end
|
1292
|
+
end
|
1293
|
+
end
|
1294
|
+
end
|
1295
|
+
end
|
1296
|
+
end
|
1297
|
+
end
|