google-cloud-ai_platform-v1 0.43.0 → 0.45.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/google/cloud/ai_platform/v1/dataset_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/deployment_resource_pool_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/endpoint_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/feature_online_store_admin_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/feature_online_store_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/feature_registry_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/featurestore_online_serving_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/featurestore_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/gen_ai_tuning_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/index_endpoint_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/index_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/job_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/llm_utility_service/client.rb +8 -2
- data/lib/google/cloud/ai_platform/v1/llm_utility_service/rest/client.rb +8 -2
- data/lib/google/cloud/ai_platform/v1/llm_utility_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/match_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/metadata_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/migration_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/model_garden_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/model_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/notebook_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/persistent_resource_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/pipeline_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/prediction_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/schedule_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/specialist_pool_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/tensorboard_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/version.rb +1 -1
- data/lib/google/cloud/ai_platform/v1/vizier_service/rest/service_stub.rb +1 -1
- data/lib/google/cloud/aiplatform/v1/llm_utility_service_pb.rb +3 -1
- data/proto_docs/google/cloud/aiplatform/v1/llm_utility_service.rb +12 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 20ab2142a54e0bfb88772b77efd9679edcdadefd37f71304cf9f37ecab988521
|
4
|
+
data.tar.gz: f6a8eda7818125751b435d0fea140c3ab827eec254f02dbe5f4d1f04198f3bb3
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4acf620176962649dc317fc90dc18d18c5bf015cda458a0570d00be9f677a14ae72c49f853057e4486be2c3903e8a00e9200252e1abadc15bdf0d91ddbbac62e
|
7
|
+
data.tar.gz: d60171d3a541e24e2791542cb9ab3b08d5993459471beac1596f29dde50e9828f5c700929b4b198313027ca55da3bc6b5b7024b4e3cd4f805be5ccf6dc59c051
|
@@ -303,7 +303,7 @@ module Google
|
|
303
303
|
# @param options [::Gapic::CallOptions, ::Hash]
|
304
304
|
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
|
305
305
|
#
|
306
|
-
# @overload compute_tokens(endpoint: nil, instances: nil)
|
306
|
+
# @overload compute_tokens(endpoint: nil, instances: nil, model: nil, contents: nil)
|
307
307
|
# Pass arguments to `compute_tokens` via keyword arguments. Note that at
|
308
308
|
# least one keyword argument is required. To specify no parameters, or to keep all
|
309
309
|
# the default parameter values, pass an empty Hash as a request object (see above).
|
@@ -312,9 +312,15 @@ module Google
|
|
312
312
|
# Required. The name of the Endpoint requested to get lists of tokens and
|
313
313
|
# token ids.
|
314
314
|
# @param instances [::Array<::Google::Protobuf::Value, ::Hash>]
|
315
|
-
#
|
315
|
+
# Optional. The instances that are the input to token computing API call.
|
316
316
|
# Schema is identical to the prediction schema of the text model, even for
|
317
317
|
# the non-text models, like chat models, or Codey models.
|
318
|
+
# @param model [::String]
|
319
|
+
# Optional. The name of the publisher model requested to serve the
|
320
|
+
# prediction. Format:
|
321
|
+
# projects/\\{project}/locations/\\{location}/publishers/*/models/*
|
322
|
+
# @param contents [::Array<::Google::Cloud::AIPlatform::V1::Content, ::Hash>]
|
323
|
+
# Optional. Input content.
|
318
324
|
#
|
319
325
|
# @yield [response, operation] Access the result along with the RPC operation
|
320
326
|
# @yieldparam response [::Google::Cloud::AIPlatform::V1::ComputeTokensResponse]
|
@@ -291,7 +291,7 @@ module Google
|
|
291
291
|
# @param options [::Gapic::CallOptions, ::Hash]
|
292
292
|
# Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
|
293
293
|
#
|
294
|
-
# @overload compute_tokens(endpoint: nil, instances: nil)
|
294
|
+
# @overload compute_tokens(endpoint: nil, instances: nil, model: nil, contents: nil)
|
295
295
|
# Pass arguments to `compute_tokens` via keyword arguments. Note that at
|
296
296
|
# least one keyword argument is required. To specify no parameters, or to keep all
|
297
297
|
# the default parameter values, pass an empty Hash as a request object (see above).
|
@@ -300,9 +300,15 @@ module Google
|
|
300
300
|
# Required. The name of the Endpoint requested to get lists of tokens and
|
301
301
|
# token ids.
|
302
302
|
# @param instances [::Array<::Google::Protobuf::Value, ::Hash>]
|
303
|
-
#
|
303
|
+
# Optional. The instances that are the input to token computing API call.
|
304
304
|
# Schema is identical to the prediction schema of the text model, even for
|
305
305
|
# the non-text models, like chat models, or Codey models.
|
306
|
+
# @param model [::String]
|
307
|
+
# Optional. The name of the publisher model requested to serve the
|
308
|
+
# prediction. Format:
|
309
|
+
# projects/\\{project}/locations/\\{location}/publishers/*/models/*
|
310
|
+
# @param contents [::Array<::Google::Cloud::AIPlatform::V1::Content, ::Hash>]
|
311
|
+
# Optional. Input content.
|
306
312
|
# @yield [result, operation] Access the result along with the TransportOperation object
|
307
313
|
# @yieldparam result [::Google::Cloud::AIPlatform::V1::ComputeTokensResponse]
|
308
314
|
# @yieldparam operation [::Gapic::Rest::TransportOperation]
|
@@ -8,11 +8,12 @@ require 'google/api/annotations_pb'
|
|
8
8
|
require 'google/api/client_pb'
|
9
9
|
require 'google/api/field_behavior_pb'
|
10
10
|
require 'google/api/resource_pb'
|
11
|
+
require 'google/cloud/aiplatform/v1/content_pb'
|
11
12
|
require 'google/cloud/aiplatform/v1/prediction_service_pb'
|
12
13
|
require 'google/protobuf/struct_pb'
|
13
14
|
|
14
15
|
|
15
|
-
descriptor_data = "\n4google/cloud/aiplatform/v1/llm_utility_service.proto\x12\x1agoogle.cloud.aiplatform.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/aiplatform/v1/prediction_service.proto\x1a\x1cgoogle/protobuf/struct.proto\"\
|
16
|
+
descriptor_data = "\n4google/cloud/aiplatform/v1/llm_utility_service.proto\x12\x1agoogle.cloud.aiplatform.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a(google/cloud/aiplatform/v1/content.proto\x1a\x33google/cloud/aiplatform/v1/prediction_service.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xd4\x01\n\x14\x43omputeTokensRequest\x12<\n\x08\x65ndpoint\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"aiplatform.googleapis.com/Endpoint\x12.\n\tinstances\x18\x02 \x03(\x0b\x32\x16.google.protobuf.ValueB\x03\xe0\x41\x01\x12\x12\n\x05model\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12:\n\x08\x63ontents\x18\x04 \x03(\x0b\x32#.google.cloud.aiplatform.v1.ContentB\x03\xe0\x41\x01\"B\n\nTokensInfo\x12\x0e\n\x06tokens\x18\x01 \x03(\x0c\x12\x11\n\ttoken_ids\x18\x02 \x03(\x03\x12\x11\n\x04role\x18\x03 \x01(\tB\x03\xe0\x41\x01\"T\n\x15\x43omputeTokensResponse\x12;\n\x0btokens_info\x18\x01 \x03(\x0b\x32&.google.cloud.aiplatform.v1.TokensInfo2\xac\x05\n\x11LlmUtilityService\x12\x9d\x02\n\x0b\x43ountTokens\x12..google.cloud.aiplatform.v1.CountTokensRequest\x1a/.google.cloud.aiplatform.v1.CountTokensResponse\"\xac\x01\xda\x41\x12\x65ndpoint,instances\x82\xd3\xe4\x93\x02\x90\x01\"=/v1/{endpoint=projects/*/locations/*/endpoints/*}:countTokens:\x01*ZL\"G/v1/{endpoint=projects/*/locations/*/publishers/*/models/*}:countTokens:\x01*\x12\xa7\x02\n\rComputeTokens\x12\x30.google.cloud.aiplatform.v1.ComputeTokensRequest\x1a\x31.google.cloud.aiplatform.v1.ComputeTokensResponse\"\xb0\x01\xda\x41\x12\x65ndpoint,instances\x82\xd3\xe4\x93\x02\x94\x01\"?/v1/{endpoint=projects/*/locations/*/endpoints/*}:computeTokens:\x01*ZN\"I/v1/{endpoint=projects/*/locations/*/publishers/*/models/*}:computeTokens:\x01*\x1aM\xca\x41\x19\x61iplatform.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xd4\x01\n\x1e\x63om.google.cloud.aiplatform.v1B\x16LlmUtilityServiceProtoP\x01Z>cloud.google.com/go/aiplatform/apiv1/aiplatformpb;aiplatformpb\xaa\x02\x1aGoogle.Cloud.AIPlatform.V1\xca\x02\x1aGoogle\\Cloud\\AIPlatform\\V1\xea\x02\x1dGoogle::Cloud::AIPlatform::V1b\x06proto3"
|
16
17
|
|
17
18
|
pool = Google::Protobuf::DescriptorPool.generated_pool
|
18
19
|
|
@@ -28,6 +29,7 @@ rescue TypeError
|
|
28
29
|
warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}"
|
29
30
|
imports = [
|
30
31
|
["google.protobuf.Value", "google/protobuf/struct.proto"],
|
32
|
+
["google.cloud.aiplatform.v1.Content", "google/cloud/aiplatform/v1/content.proto"],
|
31
33
|
]
|
32
34
|
imports.each do |type_name, expected_filename|
|
33
35
|
import_file = pool.lookup(type_name).file_descriptor
|
@@ -28,9 +28,17 @@ module Google
|
|
28
28
|
# token ids.
|
29
29
|
# @!attribute [rw] instances
|
30
30
|
# @return [::Array<::Google::Protobuf::Value>]
|
31
|
-
#
|
31
|
+
# Optional. The instances that are the input to token computing API call.
|
32
32
|
# Schema is identical to the prediction schema of the text model, even for
|
33
33
|
# the non-text models, like chat models, or Codey models.
|
34
|
+
# @!attribute [rw] model
|
35
|
+
# @return [::String]
|
36
|
+
# Optional. The name of the publisher model requested to serve the
|
37
|
+
# prediction. Format:
|
38
|
+
# projects/\\{project}/locations/\\{location}/publishers/*/models/*
|
39
|
+
# @!attribute [rw] contents
|
40
|
+
# @return [::Array<::Google::Cloud::AIPlatform::V1::Content>]
|
41
|
+
# Optional. Input content.
|
34
42
|
class ComputeTokensRequest
|
35
43
|
include ::Google::Protobuf::MessageExts
|
36
44
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -43,6 +51,9 @@ module Google
|
|
43
51
|
# @!attribute [rw] token_ids
|
44
52
|
# @return [::Array<::Integer>]
|
45
53
|
# A list of token ids from the input.
|
54
|
+
# @!attribute [rw] role
|
55
|
+
# @return [::String]
|
56
|
+
# Optional. Optional fields for the role from the corresponding Content.
|
46
57
|
class TokensInfo
|
47
58
|
include ::Google::Protobuf::MessageExts
|
48
59
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: google-cloud-ai_platform-v1
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.45.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Google LLC
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-07-
|
11
|
+
date: 2024-07-09 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: gapic-common
|