google-cloud-ai_platform-v1 0.34.0 → 0.35.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/google/cloud/ai_platform/v1/llm_utility_service/client.rb +528 -0
- data/lib/google/cloud/ai_platform/v1/llm_utility_service/credentials.rb +47 -0
- data/lib/google/cloud/ai_platform/v1/llm_utility_service/paths.rb +78 -0
- data/lib/google/cloud/ai_platform/v1/llm_utility_service.rb +49 -0
- data/lib/google/cloud/ai_platform/v1/prediction_service/client.rb +374 -0
- data/lib/google/cloud/ai_platform/v1/version.rb +1 -1
- data/lib/google/cloud/ai_platform/v1.rb +2 -1
- data/lib/google/cloud/aiplatform/v1/llm_utility_service_pb.rb +52 -0
- data/lib/google/cloud/aiplatform/v1/llm_utility_service_services_pb.rb +47 -0
- data/lib/google/cloud/aiplatform/v1/model_pb.rb +5 -1
- data/lib/google/cloud/aiplatform/v1/prediction_service_pb.rb +7 -1
- data/lib/google/cloud/aiplatform/v1/prediction_service_services_pb.rb +10 -0
- data/proto_docs/google/api/client.rb +13 -0
- data/proto_docs/google/cloud/aiplatform/v1/llm_utility_service.rb +64 -0
- data/proto_docs/google/cloud/aiplatform/v1/model.rb +61 -0
- data/proto_docs/google/cloud/aiplatform/v1/prediction_service.rb +112 -2
- metadata +9 -2
@@ -0,0 +1,49 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Copyright 2023 Google LLC
|
4
|
+
#
|
5
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
# you may not use this file except in compliance with the License.
|
7
|
+
# You may obtain a copy of the License at
|
8
|
+
#
|
9
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
#
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
# See the License for the specific language governing permissions and
|
15
|
+
# limitations under the License.
|
16
|
+
|
17
|
+
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
|
18
|
+
|
19
|
+
require "gapic/common"
|
20
|
+
require "gapic/config"
|
21
|
+
require "gapic/config/method"
|
22
|
+
|
23
|
+
require "google/cloud/ai_platform/v1/version"
|
24
|
+
|
25
|
+
require "google/cloud/ai_platform/v1/llm_utility_service/credentials"
|
26
|
+
require "google/cloud/ai_platform/v1/llm_utility_service/paths"
|
27
|
+
require "google/cloud/ai_platform/v1/llm_utility_service/client"
|
28
|
+
|
29
|
+
module Google
|
30
|
+
module Cloud
|
31
|
+
module AIPlatform
|
32
|
+
module V1
|
33
|
+
##
|
34
|
+
# Service for LLM related utility functions.
|
35
|
+
#
|
36
|
+
# @example Load this service and instantiate a gRPC client
|
37
|
+
#
|
38
|
+
# require "google/cloud/ai_platform/v1/llm_utility_service"
|
39
|
+
# client = ::Google::Cloud::AIPlatform::V1::LlmUtilityService::Client.new
|
40
|
+
#
|
41
|
+
module LlmUtilityService
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
helper_path = ::File.join __dir__, "llm_utility_service", "helpers.rb"
|
49
|
+
require "google/cloud/ai_platform/v1/llm_utility_service/helpers" if ::File.file? helper_path
|
@@ -389,6 +389,274 @@ module Google
|
|
389
389
|
raise ::Google::Cloud::Error.from_error(e)
|
390
390
|
end
|
391
391
|
|
392
|
+
##
|
393
|
+
# Perform an unary online prediction request for Vertex first-party products
|
394
|
+
# and frameworks.
|
395
|
+
#
|
396
|
+
# @overload direct_predict(request, options = nil)
|
397
|
+
# Pass arguments to `direct_predict` via a request object, either of type
|
398
|
+
# {::Google::Cloud::AIPlatform::V1::DirectPredictRequest} or an equivalent Hash.
|
399
|
+
#
|
400
|
+
# @param request [::Google::Cloud::AIPlatform::V1::DirectPredictRequest, ::Hash]
|
401
|
+
# A request object representing the call parameters. Required. To specify no
|
402
|
+
# parameters, or to keep all the default parameter values, pass an empty Hash.
|
403
|
+
# @param options [::Gapic::CallOptions, ::Hash]
|
404
|
+
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
|
405
|
+
#
|
406
|
+
# @overload direct_predict(endpoint: nil, inputs: nil, parameters: nil)
|
407
|
+
# Pass arguments to `direct_predict` via keyword arguments. Note that at
|
408
|
+
# least one keyword argument is required. To specify no parameters, or to keep all
|
409
|
+
# the default parameter values, pass an empty Hash as a request object (see above).
|
410
|
+
#
|
411
|
+
# @param endpoint [::String]
|
412
|
+
# Required. The name of the Endpoint requested to serve the prediction.
|
413
|
+
# Format:
|
414
|
+
# `projects/{project}/locations/{location}/endpoints/{endpoint}`
|
415
|
+
# @param inputs [::Array<::Google::Cloud::AIPlatform::V1::Tensor, ::Hash>]
|
416
|
+
# The prediction input.
|
417
|
+
# @param parameters [::Google::Cloud::AIPlatform::V1::Tensor, ::Hash]
|
418
|
+
# The parameters that govern the prediction.
|
419
|
+
#
|
420
|
+
# @yield [response, operation] Access the result along with the RPC operation
|
421
|
+
# @yieldparam response [::Google::Cloud::AIPlatform::V1::DirectPredictResponse]
|
422
|
+
# @yieldparam operation [::GRPC::ActiveCall::Operation]
|
423
|
+
#
|
424
|
+
# @return [::Google::Cloud::AIPlatform::V1::DirectPredictResponse]
|
425
|
+
#
|
426
|
+
# @raise [::Google::Cloud::Error] if the RPC is aborted.
|
427
|
+
#
|
428
|
+
# @example Basic example
|
429
|
+
# require "google/cloud/ai_platform/v1"
|
430
|
+
#
|
431
|
+
# # Create a client object. The client can be reused for multiple calls.
|
432
|
+
# client = Google::Cloud::AIPlatform::V1::PredictionService::Client.new
|
433
|
+
#
|
434
|
+
# # Create a request. To set request fields, pass in keyword arguments.
|
435
|
+
# request = Google::Cloud::AIPlatform::V1::DirectPredictRequest.new
|
436
|
+
#
|
437
|
+
# # Call the direct_predict method.
|
438
|
+
# result = client.direct_predict request
|
439
|
+
#
|
440
|
+
# # The returned object is of type Google::Cloud::AIPlatform::V1::DirectPredictResponse.
|
441
|
+
# p result
|
442
|
+
#
|
443
|
+
def direct_predict request, options = nil
|
444
|
+
raise ::ArgumentError, "request must be provided" if request.nil?
|
445
|
+
|
446
|
+
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::DirectPredictRequest
|
447
|
+
|
448
|
+
# Converts hash and nil to an options object
|
449
|
+
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
|
450
|
+
|
451
|
+
# Customize the options with defaults
|
452
|
+
metadata = @config.rpcs.direct_predict.metadata.to_h
|
453
|
+
|
454
|
+
# Set x-goog-api-client and x-goog-user-project headers
|
455
|
+
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
|
456
|
+
lib_name: @config.lib_name, lib_version: @config.lib_version,
|
457
|
+
gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION
|
458
|
+
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
|
459
|
+
|
460
|
+
header_params = {}
|
461
|
+
if request.endpoint
|
462
|
+
header_params["endpoint"] = request.endpoint
|
463
|
+
end
|
464
|
+
|
465
|
+
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
|
466
|
+
metadata[:"x-goog-request-params"] ||= request_params_header
|
467
|
+
|
468
|
+
options.apply_defaults timeout: @config.rpcs.direct_predict.timeout,
|
469
|
+
metadata: metadata,
|
470
|
+
retry_policy: @config.rpcs.direct_predict.retry_policy
|
471
|
+
|
472
|
+
options.apply_defaults timeout: @config.timeout,
|
473
|
+
metadata: @config.metadata,
|
474
|
+
retry_policy: @config.retry_policy
|
475
|
+
|
476
|
+
@prediction_service_stub.call_rpc :direct_predict, request, options: options do |response, operation|
|
477
|
+
yield response, operation if block_given?
|
478
|
+
return response
|
479
|
+
end
|
480
|
+
rescue ::GRPC::BadStatus => e
|
481
|
+
raise ::Google::Cloud::Error.from_error(e)
|
482
|
+
end
|
483
|
+
|
484
|
+
##
|
485
|
+
# Perform an online prediction request through gRPC.
|
486
|
+
#
|
487
|
+
# @overload direct_raw_predict(request, options = nil)
|
488
|
+
# Pass arguments to `direct_raw_predict` via a request object, either of type
|
489
|
+
# {::Google::Cloud::AIPlatform::V1::DirectRawPredictRequest} or an equivalent Hash.
|
490
|
+
#
|
491
|
+
# @param request [::Google::Cloud::AIPlatform::V1::DirectRawPredictRequest, ::Hash]
|
492
|
+
# A request object representing the call parameters. Required. To specify no
|
493
|
+
# parameters, or to keep all the default parameter values, pass an empty Hash.
|
494
|
+
# @param options [::Gapic::CallOptions, ::Hash]
|
495
|
+
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
|
496
|
+
#
|
497
|
+
# @overload direct_raw_predict(endpoint: nil, method_name: nil, input: nil)
|
498
|
+
# Pass arguments to `direct_raw_predict` via keyword arguments. Note that at
|
499
|
+
# least one keyword argument is required. To specify no parameters, or to keep all
|
500
|
+
# the default parameter values, pass an empty Hash as a request object (see above).
|
501
|
+
#
|
502
|
+
# @param endpoint [::String]
|
503
|
+
# Required. The name of the Endpoint requested to serve the prediction.
|
504
|
+
# Format:
|
505
|
+
# `projects/{project}/locations/{location}/endpoints/{endpoint}`
|
506
|
+
# @param method_name [::String]
|
507
|
+
# Fully qualified name of the API method being invoked to perform
|
508
|
+
# predictions.
|
509
|
+
#
|
510
|
+
# Format:
|
511
|
+
# `/namespace.Service/Method/`
|
512
|
+
# Example:
|
513
|
+
# `/tensorflow.serving.PredictionService/Predict`
|
514
|
+
# @param input [::String]
|
515
|
+
# The prediction input.
|
516
|
+
#
|
517
|
+
# @yield [response, operation] Access the result along with the RPC operation
|
518
|
+
# @yieldparam response [::Google::Cloud::AIPlatform::V1::DirectRawPredictResponse]
|
519
|
+
# @yieldparam operation [::GRPC::ActiveCall::Operation]
|
520
|
+
#
|
521
|
+
# @return [::Google::Cloud::AIPlatform::V1::DirectRawPredictResponse]
|
522
|
+
#
|
523
|
+
# @raise [::Google::Cloud::Error] if the RPC is aborted.
|
524
|
+
#
|
525
|
+
# @example Basic example
|
526
|
+
# require "google/cloud/ai_platform/v1"
|
527
|
+
#
|
528
|
+
# # Create a client object. The client can be reused for multiple calls.
|
529
|
+
# client = Google::Cloud::AIPlatform::V1::PredictionService::Client.new
|
530
|
+
#
|
531
|
+
# # Create a request. To set request fields, pass in keyword arguments.
|
532
|
+
# request = Google::Cloud::AIPlatform::V1::DirectRawPredictRequest.new
|
533
|
+
#
|
534
|
+
# # Call the direct_raw_predict method.
|
535
|
+
# result = client.direct_raw_predict request
|
536
|
+
#
|
537
|
+
# # The returned object is of type Google::Cloud::AIPlatform::V1::DirectRawPredictResponse.
|
538
|
+
# p result
|
539
|
+
#
|
540
|
+
def direct_raw_predict request, options = nil
|
541
|
+
raise ::ArgumentError, "request must be provided" if request.nil?
|
542
|
+
|
543
|
+
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::DirectRawPredictRequest
|
544
|
+
|
545
|
+
# Converts hash and nil to an options object
|
546
|
+
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
|
547
|
+
|
548
|
+
# Customize the options with defaults
|
549
|
+
metadata = @config.rpcs.direct_raw_predict.metadata.to_h
|
550
|
+
|
551
|
+
# Set x-goog-api-client and x-goog-user-project headers
|
552
|
+
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
|
553
|
+
lib_name: @config.lib_name, lib_version: @config.lib_version,
|
554
|
+
gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION
|
555
|
+
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
|
556
|
+
|
557
|
+
header_params = {}
|
558
|
+
if request.endpoint
|
559
|
+
header_params["endpoint"] = request.endpoint
|
560
|
+
end
|
561
|
+
|
562
|
+
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
|
563
|
+
metadata[:"x-goog-request-params"] ||= request_params_header
|
564
|
+
|
565
|
+
options.apply_defaults timeout: @config.rpcs.direct_raw_predict.timeout,
|
566
|
+
metadata: metadata,
|
567
|
+
retry_policy: @config.rpcs.direct_raw_predict.retry_policy
|
568
|
+
|
569
|
+
options.apply_defaults timeout: @config.timeout,
|
570
|
+
metadata: @config.metadata,
|
571
|
+
retry_policy: @config.retry_policy
|
572
|
+
|
573
|
+
@prediction_service_stub.call_rpc :direct_raw_predict, request, options: options do |response, operation|
|
574
|
+
yield response, operation if block_given?
|
575
|
+
return response
|
576
|
+
end
|
577
|
+
rescue ::GRPC::BadStatus => e
|
578
|
+
raise ::Google::Cloud::Error.from_error(e)
|
579
|
+
end
|
580
|
+
|
581
|
+
##
|
582
|
+
# Perform a streaming online prediction request for Vertex first-party
|
583
|
+
# products and frameworks.
|
584
|
+
#
|
585
|
+
# @param request [::Gapic::StreamInput, ::Enumerable<::Google::Cloud::AIPlatform::V1::StreamingPredictRequest, ::Hash>]
|
586
|
+
# An enumerable of {::Google::Cloud::AIPlatform::V1::StreamingPredictRequest} instances.
|
587
|
+
# @param options [::Gapic::CallOptions, ::Hash]
|
588
|
+
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
|
589
|
+
#
|
590
|
+
# @yield [response, operation] Access the result along with the RPC operation
|
591
|
+
# @yieldparam response [::Enumerable<::Google::Cloud::AIPlatform::V1::StreamingPredictResponse>]
|
592
|
+
# @yieldparam operation [::GRPC::ActiveCall::Operation]
|
593
|
+
#
|
594
|
+
# @return [::Enumerable<::Google::Cloud::AIPlatform::V1::StreamingPredictResponse>]
|
595
|
+
#
|
596
|
+
# @raise [::Google::Cloud::Error] if the RPC is aborted.
|
597
|
+
#
|
598
|
+
# @example Basic example
|
599
|
+
# require "google/cloud/ai_platform/v1"
|
600
|
+
#
|
601
|
+
# # Create a client object. The client can be reused for multiple calls.
|
602
|
+
# client = Google::Cloud::AIPlatform::V1::PredictionService::Client.new
|
603
|
+
#
|
604
|
+
# # Create an input stream.
|
605
|
+
# input = Gapic::StreamInput.new
|
606
|
+
#
|
607
|
+
# # Call the streaming_predict method to start streaming.
|
608
|
+
# output = client.streaming_predict input
|
609
|
+
#
|
610
|
+
# # Send requests on the stream. For each request object, set fields by
|
611
|
+
# # passing keyword arguments. Be sure to close the stream when done.
|
612
|
+
# input << Google::Cloud::AIPlatform::V1::StreamingPredictRequest.new
|
613
|
+
# input << Google::Cloud::AIPlatform::V1::StreamingPredictRequest.new
|
614
|
+
# input.close
|
615
|
+
#
|
616
|
+
# # The returned object is a streamed enumerable yielding elements of type
|
617
|
+
# # ::Google::Cloud::AIPlatform::V1::StreamingPredictResponse
|
618
|
+
# output.each do |current_response|
|
619
|
+
# p current_response
|
620
|
+
# end
|
621
|
+
#
|
622
|
+
def streaming_predict request, options = nil
|
623
|
+
unless request.is_a? ::Enumerable
|
624
|
+
raise ::ArgumentError, "request must be an Enumerable" unless request.respond_to? :to_enum
|
625
|
+
request = request.to_enum
|
626
|
+
end
|
627
|
+
|
628
|
+
request = request.lazy.map do |req|
|
629
|
+
::Gapic::Protobuf.coerce req, to: ::Google::Cloud::AIPlatform::V1::StreamingPredictRequest
|
630
|
+
end
|
631
|
+
|
632
|
+
# Converts hash and nil to an options object
|
633
|
+
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
|
634
|
+
|
635
|
+
# Customize the options with defaults
|
636
|
+
metadata = @config.rpcs.streaming_predict.metadata.to_h
|
637
|
+
|
638
|
+
# Set x-goog-api-client and x-goog-user-project headers
|
639
|
+
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
|
640
|
+
lib_name: @config.lib_name, lib_version: @config.lib_version,
|
641
|
+
gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION
|
642
|
+
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
|
643
|
+
|
644
|
+
options.apply_defaults timeout: @config.rpcs.streaming_predict.timeout,
|
645
|
+
metadata: metadata,
|
646
|
+
retry_policy: @config.rpcs.streaming_predict.retry_policy
|
647
|
+
|
648
|
+
options.apply_defaults timeout: @config.timeout,
|
649
|
+
metadata: @config.metadata,
|
650
|
+
retry_policy: @config.retry_policy
|
651
|
+
|
652
|
+
@prediction_service_stub.call_rpc :streaming_predict, request, options: options do |response, operation|
|
653
|
+
yield response, operation if block_given?
|
654
|
+
return response
|
655
|
+
end
|
656
|
+
rescue ::GRPC::BadStatus => e
|
657
|
+
raise ::Google::Cloud::Error.from_error(e)
|
658
|
+
end
|
659
|
+
|
392
660
|
##
|
393
661
|
# Perform a server-side streaming online prediction request for Vertex
|
394
662
|
# LLM streaming.
|
@@ -484,6 +752,84 @@ module Google
|
|
484
752
|
raise ::Google::Cloud::Error.from_error(e)
|
485
753
|
end
|
486
754
|
|
755
|
+
##
|
756
|
+
# Perform a streaming online prediction request through gRPC.
|
757
|
+
#
|
758
|
+
# @param request [::Gapic::StreamInput, ::Enumerable<::Google::Cloud::AIPlatform::V1::StreamingRawPredictRequest, ::Hash>]
|
759
|
+
# An enumerable of {::Google::Cloud::AIPlatform::V1::StreamingRawPredictRequest} instances.
|
760
|
+
# @param options [::Gapic::CallOptions, ::Hash]
|
761
|
+
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
|
762
|
+
#
|
763
|
+
# @yield [response, operation] Access the result along with the RPC operation
|
764
|
+
# @yieldparam response [::Enumerable<::Google::Cloud::AIPlatform::V1::StreamingRawPredictResponse>]
|
765
|
+
# @yieldparam operation [::GRPC::ActiveCall::Operation]
|
766
|
+
#
|
767
|
+
# @return [::Enumerable<::Google::Cloud::AIPlatform::V1::StreamingRawPredictResponse>]
|
768
|
+
#
|
769
|
+
# @raise [::Google::Cloud::Error] if the RPC is aborted.
|
770
|
+
#
|
771
|
+
# @example Basic example
|
772
|
+
# require "google/cloud/ai_platform/v1"
|
773
|
+
#
|
774
|
+
# # Create a client object. The client can be reused for multiple calls.
|
775
|
+
# client = Google::Cloud::AIPlatform::V1::PredictionService::Client.new
|
776
|
+
#
|
777
|
+
# # Create an input stream.
|
778
|
+
# input = Gapic::StreamInput.new
|
779
|
+
#
|
780
|
+
# # Call the streaming_raw_predict method to start streaming.
|
781
|
+
# output = client.streaming_raw_predict input
|
782
|
+
#
|
783
|
+
# # Send requests on the stream. For each request object, set fields by
|
784
|
+
# # passing keyword arguments. Be sure to close the stream when done.
|
785
|
+
# input << Google::Cloud::AIPlatform::V1::StreamingRawPredictRequest.new
|
786
|
+
# input << Google::Cloud::AIPlatform::V1::StreamingRawPredictRequest.new
|
787
|
+
# input.close
|
788
|
+
#
|
789
|
+
# # The returned object is a streamed enumerable yielding elements of type
|
790
|
+
# # ::Google::Cloud::AIPlatform::V1::StreamingRawPredictResponse
|
791
|
+
# output.each do |current_response|
|
792
|
+
# p current_response
|
793
|
+
# end
|
794
|
+
#
|
795
|
+
def streaming_raw_predict request, options = nil
|
796
|
+
unless request.is_a? ::Enumerable
|
797
|
+
raise ::ArgumentError, "request must be an Enumerable" unless request.respond_to? :to_enum
|
798
|
+
request = request.to_enum
|
799
|
+
end
|
800
|
+
|
801
|
+
request = request.lazy.map do |req|
|
802
|
+
::Gapic::Protobuf.coerce req, to: ::Google::Cloud::AIPlatform::V1::StreamingRawPredictRequest
|
803
|
+
end
|
804
|
+
|
805
|
+
# Converts hash and nil to an options object
|
806
|
+
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
|
807
|
+
|
808
|
+
# Customize the options with defaults
|
809
|
+
metadata = @config.rpcs.streaming_raw_predict.metadata.to_h
|
810
|
+
|
811
|
+
# Set x-goog-api-client and x-goog-user-project headers
|
812
|
+
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
|
813
|
+
lib_name: @config.lib_name, lib_version: @config.lib_version,
|
814
|
+
gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION
|
815
|
+
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
|
816
|
+
|
817
|
+
options.apply_defaults timeout: @config.rpcs.streaming_raw_predict.timeout,
|
818
|
+
metadata: metadata,
|
819
|
+
retry_policy: @config.rpcs.streaming_raw_predict.retry_policy
|
820
|
+
|
821
|
+
options.apply_defaults timeout: @config.timeout,
|
822
|
+
metadata: @config.metadata,
|
823
|
+
retry_policy: @config.retry_policy
|
824
|
+
|
825
|
+
@prediction_service_stub.call_rpc :streaming_raw_predict, request, options: options do |response, operation|
|
826
|
+
yield response, operation if block_given?
|
827
|
+
return response
|
828
|
+
end
|
829
|
+
rescue ::GRPC::BadStatus => e
|
830
|
+
raise ::Google::Cloud::Error.from_error(e)
|
831
|
+
end
|
832
|
+
|
487
833
|
##
|
488
834
|
# Perform an online explanation.
|
489
835
|
#
|
@@ -766,11 +1112,31 @@ module Google
|
|
766
1112
|
#
|
767
1113
|
attr_reader :raw_predict
|
768
1114
|
##
|
1115
|
+
# RPC-specific configuration for `direct_predict`
|
1116
|
+
# @return [::Gapic::Config::Method]
|
1117
|
+
#
|
1118
|
+
attr_reader :direct_predict
|
1119
|
+
##
|
1120
|
+
# RPC-specific configuration for `direct_raw_predict`
|
1121
|
+
# @return [::Gapic::Config::Method]
|
1122
|
+
#
|
1123
|
+
attr_reader :direct_raw_predict
|
1124
|
+
##
|
1125
|
+
# RPC-specific configuration for `streaming_predict`
|
1126
|
+
# @return [::Gapic::Config::Method]
|
1127
|
+
#
|
1128
|
+
attr_reader :streaming_predict
|
1129
|
+
##
|
769
1130
|
# RPC-specific configuration for `server_streaming_predict`
|
770
1131
|
# @return [::Gapic::Config::Method]
|
771
1132
|
#
|
772
1133
|
attr_reader :server_streaming_predict
|
773
1134
|
##
|
1135
|
+
# RPC-specific configuration for `streaming_raw_predict`
|
1136
|
+
# @return [::Gapic::Config::Method]
|
1137
|
+
#
|
1138
|
+
attr_reader :streaming_raw_predict
|
1139
|
+
##
|
774
1140
|
# RPC-specific configuration for `explain`
|
775
1141
|
# @return [::Gapic::Config::Method]
|
776
1142
|
#
|
@@ -782,8 +1148,16 @@ module Google
|
|
782
1148
|
@predict = ::Gapic::Config::Method.new predict_config
|
783
1149
|
raw_predict_config = parent_rpcs.raw_predict if parent_rpcs.respond_to? :raw_predict
|
784
1150
|
@raw_predict = ::Gapic::Config::Method.new raw_predict_config
|
1151
|
+
direct_predict_config = parent_rpcs.direct_predict if parent_rpcs.respond_to? :direct_predict
|
1152
|
+
@direct_predict = ::Gapic::Config::Method.new direct_predict_config
|
1153
|
+
direct_raw_predict_config = parent_rpcs.direct_raw_predict if parent_rpcs.respond_to? :direct_raw_predict
|
1154
|
+
@direct_raw_predict = ::Gapic::Config::Method.new direct_raw_predict_config
|
1155
|
+
streaming_predict_config = parent_rpcs.streaming_predict if parent_rpcs.respond_to? :streaming_predict
|
1156
|
+
@streaming_predict = ::Gapic::Config::Method.new streaming_predict_config
|
785
1157
|
server_streaming_predict_config = parent_rpcs.server_streaming_predict if parent_rpcs.respond_to? :server_streaming_predict
|
786
1158
|
@server_streaming_predict = ::Gapic::Config::Method.new server_streaming_predict_config
|
1159
|
+
streaming_raw_predict_config = parent_rpcs.streaming_raw_predict if parent_rpcs.respond_to? :streaming_raw_predict
|
1160
|
+
@streaming_raw_predict = ::Gapic::Config::Method.new streaming_raw_predict_config
|
787
1161
|
explain_config = parent_rpcs.explain if parent_rpcs.respond_to? :explain
|
788
1162
|
@explain = ::Gapic::Config::Method.new explain_config
|
789
1163
|
|
@@ -26,13 +26,14 @@ require "google/cloud/ai_platform/v1/feature_registry_service"
|
|
26
26
|
require "google/cloud/ai_platform/v1/index_endpoint_service"
|
27
27
|
require "google/cloud/ai_platform/v1/index_service"
|
28
28
|
require "google/cloud/ai_platform/v1/job_service"
|
29
|
+
require "google/cloud/ai_platform/v1/prediction_service"
|
30
|
+
require "google/cloud/ai_platform/v1/llm_utility_service"
|
29
31
|
require "google/cloud/ai_platform/v1/match_service"
|
30
32
|
require "google/cloud/ai_platform/v1/metadata_service"
|
31
33
|
require "google/cloud/ai_platform/v1/migration_service"
|
32
34
|
require "google/cloud/ai_platform/v1/model_service"
|
33
35
|
require "google/cloud/ai_platform/v1/model_garden_service"
|
34
36
|
require "google/cloud/ai_platform/v1/pipeline_service"
|
35
|
-
require "google/cloud/ai_platform/v1/prediction_service"
|
36
37
|
require "google/cloud/ai_platform/v1/schedule_service"
|
37
38
|
require "google/cloud/ai_platform/v1/specialist_pool_service"
|
38
39
|
require "google/cloud/ai_platform/v1/tensorboard_service"
|
@@ -0,0 +1,52 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
3
|
+
# source: google/cloud/aiplatform/v1/llm_utility_service.proto
|
4
|
+
|
5
|
+
require 'google/protobuf'
|
6
|
+
|
7
|
+
require 'google/api/annotations_pb'
|
8
|
+
require 'google/api/client_pb'
|
9
|
+
require 'google/api/field_behavior_pb'
|
10
|
+
require 'google/api/resource_pb'
|
11
|
+
require 'google/cloud/aiplatform/v1/prediction_service_pb'
|
12
|
+
require 'google/protobuf/struct_pb'
|
13
|
+
|
14
|
+
|
15
|
+
descriptor_data = "\n4google/cloud/aiplatform/v1/llm_utility_service.proto\x12\x1agoogle.cloud.aiplatform.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/aiplatform/v1/prediction_service.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x84\x01\n\x14\x43omputeTokensRequest\x12<\n\x08\x65ndpoint\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"aiplatform.googleapis.com/Endpoint\x12.\n\tinstances\x18\x02 \x03(\x0b\x32\x16.google.protobuf.ValueB\x03\xe0\x41\x02\"/\n\nTokensInfo\x12\x0e\n\x06tokens\x18\x01 \x03(\x0c\x12\x11\n\ttoken_ids\x18\x02 \x03(\x03\"T\n\x15\x43omputeTokensResponse\x12;\n\x0btokens_info\x18\x01 \x03(\x0b\x32&.google.cloud.aiplatform.v1.TokensInfo2\xac\x05\n\x11LlmUtilityService\x12\x9d\x02\n\x0b\x43ountTokens\x12..google.cloud.aiplatform.v1.CountTokensRequest\x1a/.google.cloud.aiplatform.v1.CountTokensResponse\"\xac\x01\x82\xd3\xe4\x93\x02\x90\x01\"=/v1/{endpoint=projects/*/locations/*/endpoints/*}:countTokens:\x01*ZL\"G/v1/{endpoint=projects/*/locations/*/publishers/*/models/*}:countTokens:\x01*\xda\x41\x12\x65ndpoint,instances\x12\xa7\x02\n\rComputeTokens\x12\x30.google.cloud.aiplatform.v1.ComputeTokensRequest\x1a\x31.google.cloud.aiplatform.v1.ComputeTokensResponse\"\xb0\x01\x82\xd3\xe4\x93\x02\x94\x01\"?/v1/{endpoint=projects/*/locations/*/endpoints/*}:computeTokens:\x01*ZN\"I/v1/{endpoint=projects/*/locations/*/publishers/*/models/*}:computeTokens:\x01*\xda\x41\x12\x65ndpoint,instances\x1aM\xca\x41\x19\x61iplatform.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xd4\x01\n\x1e\x63om.google.cloud.aiplatform.v1B\x16LlmUtilityServiceProtoP\x01Z>cloud.google.com/go/aiplatform/apiv1/aiplatformpb;aiplatformpb\xaa\x02\x1aGoogle.Cloud.AIPlatform.V1\xca\x02\x1aGoogle\\Cloud\\AIPlatform\\V1\xea\x02\x1dGoogle::Cloud::AIPlatform::V1b\x06proto3"
|
16
|
+
|
17
|
+
pool = Google::Protobuf::DescriptorPool.generated_pool
|
18
|
+
|
19
|
+
begin
|
20
|
+
pool.add_serialized_file(descriptor_data)
|
21
|
+
rescue TypeError => e
|
22
|
+
# Compatibility code: will be removed in the next major version.
|
23
|
+
require 'google/protobuf/descriptor_pb'
|
24
|
+
parsed = Google::Protobuf::FileDescriptorProto.decode(descriptor_data)
|
25
|
+
parsed.clear_dependency
|
26
|
+
serialized = parsed.class.encode(parsed)
|
27
|
+
file = pool.add_serialized_file(serialized)
|
28
|
+
warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}"
|
29
|
+
imports = [
|
30
|
+
["google.protobuf.Value", "google/protobuf/struct.proto"],
|
31
|
+
]
|
32
|
+
imports.each do |type_name, expected_filename|
|
33
|
+
import_file = pool.lookup(type_name).file_descriptor
|
34
|
+
if import_file.name != expected_filename
|
35
|
+
warn "- #{file.name} imports #{expected_filename}, but that import was loaded as #{import_file.name}"
|
36
|
+
end
|
37
|
+
end
|
38
|
+
warn "Each proto file must use a consistent fully-qualified name."
|
39
|
+
warn "This will become an error in the next major version."
|
40
|
+
end
|
41
|
+
|
42
|
+
module Google
|
43
|
+
module Cloud
|
44
|
+
module AIPlatform
|
45
|
+
module V1
|
46
|
+
ComputeTokensRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.ComputeTokensRequest").msgclass
|
47
|
+
TokensInfo = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.TokensInfo").msgclass
|
48
|
+
ComputeTokensResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.ComputeTokensResponse").msgclass
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
2
|
+
# Source: google/cloud/aiplatform/v1/llm_utility_service.proto for package 'Google.Cloud.AIPlatform.V1'
|
3
|
+
# Original file comments:
|
4
|
+
# Copyright 2023 Google LLC
|
5
|
+
#
|
6
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
7
|
+
# you may not use this file except in compliance with the License.
|
8
|
+
# You may obtain a copy of the License at
|
9
|
+
#
|
10
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11
|
+
#
|
12
|
+
# Unless required by applicable law or agreed to in writing, software
|
13
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
14
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15
|
+
# See the License for the specific language governing permissions and
|
16
|
+
# limitations under the License.
|
17
|
+
#
|
18
|
+
|
19
|
+
require 'grpc'
|
20
|
+
require 'google/cloud/aiplatform/v1/llm_utility_service_pb'
|
21
|
+
|
22
|
+
module Google
|
23
|
+
module Cloud
|
24
|
+
module AIPlatform
|
25
|
+
module V1
|
26
|
+
module LlmUtilityService
|
27
|
+
# Service for LLM related utility functions.
|
28
|
+
class Service
|
29
|
+
|
30
|
+
include ::GRPC::GenericService
|
31
|
+
|
32
|
+
self.marshal_class_method = :encode
|
33
|
+
self.unmarshal_class_method = :decode
|
34
|
+
self.service_name = 'google.cloud.aiplatform.v1.LlmUtilityService'
|
35
|
+
|
36
|
+
# Perform a token counting.
|
37
|
+
rpc :CountTokens, ::Google::Cloud::AIPlatform::V1::CountTokensRequest, ::Google::Cloud::AIPlatform::V1::CountTokensResponse
|
38
|
+
# Return a list of tokens based on the input text.
|
39
|
+
rpc :ComputeTokens, ::Google::Cloud::AIPlatform::V1::ComputeTokensRequest, ::Google::Cloud::AIPlatform::V1::ComputeTokensResponse
|
40
|
+
end
|
41
|
+
|
42
|
+
Stub = Service.rpc_stub_class
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
@@ -10,11 +10,12 @@ require 'google/cloud/aiplatform/v1/deployed_model_ref_pb'
|
|
10
10
|
require 'google/cloud/aiplatform/v1/encryption_spec_pb'
|
11
11
|
require 'google/cloud/aiplatform/v1/env_var_pb'
|
12
12
|
require 'google/cloud/aiplatform/v1/explanation_pb'
|
13
|
+
require 'google/protobuf/duration_pb'
|
13
14
|
require 'google/protobuf/struct_pb'
|
14
15
|
require 'google/protobuf/timestamp_pb'
|
15
16
|
|
16
17
|
|
17
|
-
descriptor_data = "\n&google/cloud/aiplatform/v1/model.proto\x12\x1agoogle.cloud.aiplatform.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/aiplatform/v1/deployed_model_ref.proto\x1a\x30google/cloud/aiplatform/v1/encryption_spec.proto\x1a(google/cloud/aiplatform/v1/env_var.proto\x1a,google/cloud/aiplatform/v1/explanation.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xdf\x10\n\x05Model\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1a\n\nversion_id\x18\x1c \x01(\tB\x06\xe0\x41\x05\xe0\x41\x03\x12\x17\n\x0fversion_aliases\x18\x1d \x03(\t\x12<\n\x13version_create_time\x18\x1f \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12<\n\x13version_update_time\x18 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x19\n\x0c\x64isplay_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\x1b\n\x13version_description\x18\x1e \x01(\t\x12\x45\n\x10predict_schemata\x18\x04 \x01(\x0b\x32+.google.cloud.aiplatform.v1.PredictSchemata\x12 \n\x13metadata_schema_uri\x18\x05 \x01(\tB\x03\xe0\x41\x05\x12-\n\x08metadata\x18\x06 \x01(\x0b\x32\x16.google.protobuf.ValueB\x03\xe0\x41\x05\x12U\n\x18supported_export_formats\x18\x14 \x03(\x0b\x32..google.cloud.aiplatform.v1.Model.ExportFormatB\x03\xe0\x41\x03\x12M\n\x11training_pipeline\x18\x07 \x01(\tB2\xe0\x41\x03\xfa\x41,\n*aiplatform.googleapis.com/TrainingPipeline\x12\x43\n\x0cpipeline_job\x18/ \x01(\tB-\xe0\x41\x01\xfa\x41\'\n%aiplatform.googleapis.com/PipelineJob\x12K\n\x0e\x63ontainer_spec\x18\t \x01(\x0b\x32..google.cloud.aiplatform.v1.ModelContainerSpecB\x03\xe0\x41\x04\x12\x19\n\x0c\x61rtifact_uri\x18\x1a \x01(\tB\x03\xe0\x41\x05\x12l\n$supported_deployment_resources_types\x18\n \x03(\x0e\x32\x39.google.cloud.aiplatform.v1.Model.DeploymentResourcesTypeB\x03\xe0\x41\x03\x12,\n\x1fsupported_input_storage_formats\x18\x0b \x03(\tB\x03\xe0\x41\x03\x12-\n supported_output_storage_formats\x18\x0c \x03(\tB\x03\xe0\x41\x03\x12\x34\n\x0b\x63reate_time\x18\r \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x34\n\x0bupdate_time\x18\x0e \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12J\n\x0f\x64\x65ployed_models\x18\x0f \x03(\x0b\x32,.google.cloud.aiplatform.v1.DeployedModelRefB\x03\xe0\x41\x03\x12\x45\n\x10\x65xplanation_spec\x18\x17 \x01(\x0b\x32+.google.cloud.aiplatform.v1.ExplanationSpec\x12\x0c\n\x04\x65tag\x18\x10 \x01(\t\x12=\n\x06labels\x18\x11 \x03(\x0b\x32-.google.cloud.aiplatform.v1.Model.LabelsEntry\x12\x43\n\x0f\x65ncryption_spec\x18\x18 \x01(\x0b\x32*.google.cloud.aiplatform.v1.EncryptionSpec\x12K\n\x11model_source_info\x18& \x01(\x0b\x32+.google.cloud.aiplatform.v1.ModelSourceInfoB\x03\xe0\x41\x03\x12U\n\x13original_model_info\x18\" \x01(\x0b\x32\x33.google.cloud.aiplatform.v1.Model.OriginalModelInfoB\x03\xe0\x41\x03\x12\x1e\n\x11metadata_artifact\x18, \x01(\tB\x03\xe0\x41\x03\x1a\xd5\x01\n\x0c\x45xportFormat\x12\x0f\n\x02id\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x62\n\x13\x65xportable_contents\x18\x02 \x03(\x0e\x32@.google.cloud.aiplatform.v1.Model.ExportFormat.ExportableContentB\x03\xe0\x41\x03\"P\n\x11\x45xportableContent\x12\"\n\x1e\x45XPORTABLE_CONTENT_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x41RTIFACT\x10\x01\x12\t\n\x05IMAGE\x10\x02\x1aK\n\x11OriginalModelInfo\x12\x36\n\x05model\x18\x01 \x01(\tB\'\xe0\x41\x03\xfa\x41!\n\x1f\x61iplatform.googleapis.com/Model\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x8c\x01\n\x17\x44\x65ploymentResourcesType\x12)\n%DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED\x10\x00\x12\x17\n\x13\x44\x45\x44ICATED_RESOURCES\x10\x01\x12\x17\n\x13\x41UTOMATIC_RESOURCES\x10\x02\x12\x14\n\x10SHARED_RESOURCES\x10\x03:\\\xea\x41Y\n\x1f\x61iplatform.googleapis.com/Model\x12\x36projects/{project}/locations/{location}/models/{model}\"(\n\x13LargeModelReference\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x02\"{\n\x0fPredictSchemata\x12 \n\x13instance_schema_uri\x18\x01 \x01(\tB\x03\xe0\x41\x05\x12\"\n\x15parameters_schema_uri\x18\x02 \x01(\tB\x03\xe0\x41\x05\x12\"\n\x15prediction_schema_uri\x18\x03 \x01(\tB\x03\xe0\x41\x05\"\
|
18
|
+
descriptor_data = "\n&google/cloud/aiplatform/v1/model.proto\x12\x1agoogle.cloud.aiplatform.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/aiplatform/v1/deployed_model_ref.proto\x1a\x30google/cloud/aiplatform/v1/encryption_spec.proto\x1a(google/cloud/aiplatform/v1/env_var.proto\x1a,google/cloud/aiplatform/v1/explanation.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xdf\x10\n\x05Model\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1a\n\nversion_id\x18\x1c \x01(\tB\x06\xe0\x41\x05\xe0\x41\x03\x12\x17\n\x0fversion_aliases\x18\x1d \x03(\t\x12<\n\x13version_create_time\x18\x1f \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12<\n\x13version_update_time\x18 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x19\n\x0c\x64isplay_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\x1b\n\x13version_description\x18\x1e \x01(\t\x12\x45\n\x10predict_schemata\x18\x04 \x01(\x0b\x32+.google.cloud.aiplatform.v1.PredictSchemata\x12 \n\x13metadata_schema_uri\x18\x05 \x01(\tB\x03\xe0\x41\x05\x12-\n\x08metadata\x18\x06 \x01(\x0b\x32\x16.google.protobuf.ValueB\x03\xe0\x41\x05\x12U\n\x18supported_export_formats\x18\x14 \x03(\x0b\x32..google.cloud.aiplatform.v1.Model.ExportFormatB\x03\xe0\x41\x03\x12M\n\x11training_pipeline\x18\x07 \x01(\tB2\xe0\x41\x03\xfa\x41,\n*aiplatform.googleapis.com/TrainingPipeline\x12\x43\n\x0cpipeline_job\x18/ \x01(\tB-\xe0\x41\x01\xfa\x41\'\n%aiplatform.googleapis.com/PipelineJob\x12K\n\x0e\x63ontainer_spec\x18\t \x01(\x0b\x32..google.cloud.aiplatform.v1.ModelContainerSpecB\x03\xe0\x41\x04\x12\x19\n\x0c\x61rtifact_uri\x18\x1a \x01(\tB\x03\xe0\x41\x05\x12l\n$supported_deployment_resources_types\x18\n \x03(\x0e\x32\x39.google.cloud.aiplatform.v1.Model.DeploymentResourcesTypeB\x03\xe0\x41\x03\x12,\n\x1fsupported_input_storage_formats\x18\x0b \x03(\tB\x03\xe0\x41\x03\x12-\n supported_output_storage_formats\x18\x0c \x03(\tB\x03\xe0\x41\x03\x12\x34\n\x0b\x63reate_time\x18\r \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x34\n\x0bupdate_time\x18\x0e \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12J\n\x0f\x64\x65ployed_models\x18\x0f \x03(\x0b\x32,.google.cloud.aiplatform.v1.DeployedModelRefB\x03\xe0\x41\x03\x12\x45\n\x10\x65xplanation_spec\x18\x17 \x01(\x0b\x32+.google.cloud.aiplatform.v1.ExplanationSpec\x12\x0c\n\x04\x65tag\x18\x10 \x01(\t\x12=\n\x06labels\x18\x11 \x03(\x0b\x32-.google.cloud.aiplatform.v1.Model.LabelsEntry\x12\x43\n\x0f\x65ncryption_spec\x18\x18 \x01(\x0b\x32*.google.cloud.aiplatform.v1.EncryptionSpec\x12K\n\x11model_source_info\x18& \x01(\x0b\x32+.google.cloud.aiplatform.v1.ModelSourceInfoB\x03\xe0\x41\x03\x12U\n\x13original_model_info\x18\" \x01(\x0b\x32\x33.google.cloud.aiplatform.v1.Model.OriginalModelInfoB\x03\xe0\x41\x03\x12\x1e\n\x11metadata_artifact\x18, \x01(\tB\x03\xe0\x41\x03\x1a\xd5\x01\n\x0c\x45xportFormat\x12\x0f\n\x02id\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x62\n\x13\x65xportable_contents\x18\x02 \x03(\x0e\x32@.google.cloud.aiplatform.v1.Model.ExportFormat.ExportableContentB\x03\xe0\x41\x03\"P\n\x11\x45xportableContent\x12\"\n\x1e\x45XPORTABLE_CONTENT_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x41RTIFACT\x10\x01\x12\t\n\x05IMAGE\x10\x02\x1aK\n\x11OriginalModelInfo\x12\x36\n\x05model\x18\x01 \x01(\tB\'\xe0\x41\x03\xfa\x41!\n\x1f\x61iplatform.googleapis.com/Model\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x8c\x01\n\x17\x44\x65ploymentResourcesType\x12)\n%DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED\x10\x00\x12\x17\n\x13\x44\x45\x44ICATED_RESOURCES\x10\x01\x12\x17\n\x13\x41UTOMATIC_RESOURCES\x10\x02\x12\x14\n\x10SHARED_RESOURCES\x10\x03:\\\xea\x41Y\n\x1f\x61iplatform.googleapis.com/Model\x12\x36projects/{project}/locations/{location}/models/{model}\"(\n\x13LargeModelReference\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x02\"{\n\x0fPredictSchemata\x12 \n\x13instance_schema_uri\x18\x01 \x01(\tB\x03\xe0\x41\x05\x12\"\n\x15parameters_schema_uri\x18\x02 \x01(\tB\x03\xe0\x41\x05\x12\"\n\x15prediction_schema_uri\x18\x03 \x01(\tB\x03\xe0\x41\x05\"\x93\x04\n\x12ModelContainerSpec\x12\x19\n\timage_uri\x18\x01 \x01(\tB\x06\xe0\x41\x02\xe0\x41\x05\x12\x14\n\x07\x63ommand\x18\x02 \x03(\tB\x03\xe0\x41\x05\x12\x11\n\x04\x61rgs\x18\x03 \x03(\tB\x03\xe0\x41\x05\x12\x34\n\x03\x65nv\x18\x04 \x03(\x0b\x32\".google.cloud.aiplatform.v1.EnvVarB\x03\xe0\x41\x05\x12\x34\n\x05ports\x18\x05 \x03(\x0b\x32 .google.cloud.aiplatform.v1.PortB\x03\xe0\x41\x05\x12\x1a\n\rpredict_route\x18\x06 \x01(\tB\x03\xe0\x41\x05\x12\x19\n\x0chealth_route\x18\x07 \x01(\tB\x03\xe0\x41\x05\x12\x39\n\ngrpc_ports\x18\t \x03(\x0b\x32 .google.cloud.aiplatform.v1.PortB\x03\xe0\x41\x05\x12:\n\x12\x64\x65ployment_timeout\x18\n \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x05\x12\"\n\x15shared_memory_size_mb\x18\x0b \x01(\x03\x42\x03\xe0\x41\x05\x12=\n\rstartup_probe\x18\x0c \x01(\x0b\x32!.google.cloud.aiplatform.v1.ProbeB\x03\xe0\x41\x05\x12<\n\x0chealth_probe\x18\r \x01(\x0b\x32!.google.cloud.aiplatform.v1.ProbeB\x03\xe0\x41\x05\"\x1e\n\x04Port\x12\x16\n\x0e\x63ontainer_port\x18\x03 \x01(\x05\"\xe6\x01\n\x0fModelSourceInfo\x12P\n\x0bsource_type\x18\x01 \x01(\x0e\x32;.google.cloud.aiplatform.v1.ModelSourceInfo.ModelSourceType\x12\x0c\n\x04\x63opy\x18\x02 \x01(\x08\"s\n\x0fModelSourceType\x12!\n\x1dMODEL_SOURCE_TYPE_UNSPECIFIED\x10\x00\x12\n\n\x06\x41UTOML\x10\x01\x12\n\n\x06\x43USTOM\x10\x02\x12\x08\n\x04\x42QML\x10\x03\x12\x10\n\x0cMODEL_GARDEN\x10\x04\x12\t\n\x05GENIE\x10\x05\"\xa3\x01\n\x05Probe\x12<\n\x04\x65xec\x18\x01 \x01(\x0b\x32,.google.cloud.aiplatform.v1.Probe.ExecActionH\x00\x12\x16\n\x0eperiod_seconds\x18\x02 \x01(\x05\x12\x17\n\x0ftimeout_seconds\x18\x03 \x01(\x05\x1a\x1d\n\nExecAction\x12\x0f\n\x07\x63ommand\x18\x01 \x03(\tB\x0c\n\nprobe_typeB\xc8\x01\n\x1e\x63om.google.cloud.aiplatform.v1B\nModelProtoP\x01Z>cloud.google.com/go/aiplatform/apiv1/aiplatformpb;aiplatformpb\xaa\x02\x1aGoogle.Cloud.AIPlatform.V1\xca\x02\x1aGoogle\\Cloud\\AIPlatform\\V1\xea\x02\x1dGoogle::Cloud::AIPlatform::V1b\x06proto3"
|
18
19
|
|
19
20
|
pool = Google::Protobuf::DescriptorPool.generated_pool
|
20
21
|
|
@@ -35,6 +36,7 @@ rescue TypeError => e
|
|
35
36
|
["google.cloud.aiplatform.v1.ExplanationSpec", "google/cloud/aiplatform/v1/explanation.proto"],
|
36
37
|
["google.cloud.aiplatform.v1.EncryptionSpec", "google/cloud/aiplatform/v1/encryption_spec.proto"],
|
37
38
|
["google.cloud.aiplatform.v1.EnvVar", "google/cloud/aiplatform/v1/env_var.proto"],
|
39
|
+
["google.protobuf.Duration", "google/protobuf/duration.proto"],
|
38
40
|
]
|
39
41
|
imports.each do |type_name, expected_filename|
|
40
42
|
import_file = pool.lookup(type_name).file_descriptor
|
@@ -61,6 +63,8 @@ module Google
|
|
61
63
|
Port = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.Port").msgclass
|
62
64
|
ModelSourceInfo = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.ModelSourceInfo").msgclass
|
63
65
|
ModelSourceInfo::ModelSourceType = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.ModelSourceInfo.ModelSourceType").enummodule
|
66
|
+
Probe = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.Probe").msgclass
|
67
|
+
Probe::ExecAction = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.aiplatform.v1.Probe.ExecAction").msgclass
|
64
68
|
end
|
65
69
|
end
|
66
70
|
end
|