google-cloud-ai_platform-v1 0.43.0 → 0.44.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: cd3d6857272d2def6e204c5c24e6330d5b4e301be661cd59769242047eca873a
4
- data.tar.gz: 22bdfaa734640a641e795348b97387f308518be9e8bbc18d06dd360615b8ce4f
3
+ metadata.gz: dfebb0bd20d17832ff682e18f1c6262d7e56e55ed3162eb0b1b437c482d52706
4
+ data.tar.gz: 4787c5bde77b648367cd0c05bba4bbe884de059f0077f390091cf0b021d1dc63
5
5
  SHA512:
6
- metadata.gz: 45b421686312408bd49955d1b6b3df10eead6aabb986621f8925524c94a573fc34add89eaea583dd8d8824ab24543a53c7b51124bedd814ad81118552ade479a
7
- data.tar.gz: 2b10f6561ef6913aea0c2cf5d1989ed7d2cebc82234e3b1bed2eda9b4e878024abad335e3e3a9b12ca00d42c4f1be0c9df0ca3acf6ae178f1fd17ad7b0533e5d
6
+ metadata.gz: aa2d04b113dc41123fcbb033fbb3bbf5d1de2eec3224cb9c63e260ddda39cd5513bc9b493f5c28cae7941c9d926facae9628fb2a8abbdc81599a40ac034f5250
7
+ data.tar.gz: 38a39e98c62f3dc416c3c4f58f49756d46b6ef06d84d0ba62cbf92a6836be844ebe362e1c5a534256ca42f81c327e3bb6c63885da0c8fd7f6b679877682dd379
@@ -303,7 +303,7 @@ module Google
303
303
  # @param options [::Gapic::CallOptions, ::Hash]
304
304
  # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
305
305
  #
306
- # @overload compute_tokens(endpoint: nil, instances: nil)
306
+ # @overload compute_tokens(endpoint: nil, instances: nil, model: nil, contents: nil)
307
307
  # Pass arguments to `compute_tokens` via keyword arguments. Note that at
308
308
  # least one keyword argument is required. To specify no parameters, or to keep all
309
309
  # the default parameter values, pass an empty Hash as a request object (see above).
@@ -312,9 +312,15 @@ module Google
312
312
  # Required. The name of the Endpoint requested to get lists of tokens and
313
313
  # token ids.
314
314
  # @param instances [::Array<::Google::Protobuf::Value, ::Hash>]
315
- # Required. The instances that are the input to token computing API call.
315
+ # Optional. The instances that are the input to token computing API call.
316
316
  # Schema is identical to the prediction schema of the text model, even for
317
317
  # the non-text models, like chat models, or Codey models.
318
+ # @param model [::String]
319
+ # Optional. The name of the publisher model requested to serve the
320
+ # prediction. Format:
321
+ # projects/\\{project}/locations/\\{location}/publishers/*/models/*
322
+ # @param contents [::Array<::Google::Cloud::AIPlatform::V1::Content, ::Hash>]
323
+ # Optional. Input content.
318
324
  #
319
325
  # @yield [response, operation] Access the result along with the RPC operation
320
326
  # @yieldparam response [::Google::Cloud::AIPlatform::V1::ComputeTokensResponse]
@@ -291,7 +291,7 @@ module Google
291
291
  # @param options [::Gapic::CallOptions, ::Hash]
292
292
  # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
293
293
  #
294
- # @overload compute_tokens(endpoint: nil, instances: nil)
294
+ # @overload compute_tokens(endpoint: nil, instances: nil, model: nil, contents: nil)
295
295
  # Pass arguments to `compute_tokens` via keyword arguments. Note that at
296
296
  # least one keyword argument is required. To specify no parameters, or to keep all
297
297
  # the default parameter values, pass an empty Hash as a request object (see above).
@@ -300,9 +300,15 @@ module Google
300
300
  # Required. The name of the Endpoint requested to get lists of tokens and
301
301
  # token ids.
302
302
  # @param instances [::Array<::Google::Protobuf::Value, ::Hash>]
303
- # Required. The instances that are the input to token computing API call.
303
+ # Optional. The instances that are the input to token computing API call.
304
304
  # Schema is identical to the prediction schema of the text model, even for
305
305
  # the non-text models, like chat models, or Codey models.
306
+ # @param model [::String]
307
+ # Optional. The name of the publisher model requested to serve the
308
+ # prediction. Format:
309
+ # projects/\\{project}/locations/\\{location}/publishers/*/models/*
310
+ # @param contents [::Array<::Google::Cloud::AIPlatform::V1::Content, ::Hash>]
311
+ # Optional. Input content.
306
312
  # @yield [result, operation] Access the result along with the TransportOperation object
307
313
  # @yieldparam result [::Google::Cloud::AIPlatform::V1::ComputeTokensResponse]
308
314
  # @yieldparam operation [::Gapic::Rest::TransportOperation]
@@ -21,7 +21,7 @@ module Google
21
21
  module Cloud
22
22
  module AIPlatform
23
23
  module V1
24
- VERSION = "0.43.0"
24
+ VERSION = "0.44.0"
25
25
  end
26
26
  end
27
27
  end
@@ -8,11 +8,12 @@ require 'google/api/annotations_pb'
8
8
  require 'google/api/client_pb'
9
9
  require 'google/api/field_behavior_pb'
10
10
  require 'google/api/resource_pb'
11
+ require 'google/cloud/aiplatform/v1/content_pb'
11
12
  require 'google/cloud/aiplatform/v1/prediction_service_pb'
12
13
  require 'google/protobuf/struct_pb'
13
14
 
14
15
 
15
- descriptor_data = "\n4google/cloud/aiplatform/v1/llm_utility_service.proto\x12\x1agoogle.cloud.aiplatform.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/aiplatform/v1/prediction_service.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x84\x01\n\x14\x43omputeTokensRequest\x12<\n\x08\x65ndpoint\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"aiplatform.googleapis.com/Endpoint\x12.\n\tinstances\x18\x02 \x03(\x0b\x32\x16.google.protobuf.ValueB\x03\xe0\x41\x02\"/\n\nTokensInfo\x12\x0e\n\x06tokens\x18\x01 \x03(\x0c\x12\x11\n\ttoken_ids\x18\x02 \x03(\x03\"T\n\x15\x43omputeTokensResponse\x12;\n\x0btokens_info\x18\x01 \x03(\x0b\x32&.google.cloud.aiplatform.v1.TokensInfo2\xac\x05\n\x11LlmUtilityService\x12\x9d\x02\n\x0b\x43ountTokens\x12..google.cloud.aiplatform.v1.CountTokensRequest\x1a/.google.cloud.aiplatform.v1.CountTokensResponse\"\xac\x01\xda\x41\x12\x65ndpoint,instances\x82\xd3\xe4\x93\x02\x90\x01\"=/v1/{endpoint=projects/*/locations/*/endpoints/*}:countTokens:\x01*ZL\"G/v1/{endpoint=projects/*/locations/*/publishers/*/models/*}:countTokens:\x01*\x12\xa7\x02\n\rComputeTokens\x12\x30.google.cloud.aiplatform.v1.ComputeTokensRequest\x1a\x31.google.cloud.aiplatform.v1.ComputeTokensResponse\"\xb0\x01\xda\x41\x12\x65ndpoint,instances\x82\xd3\xe4\x93\x02\x94\x01\"?/v1/{endpoint=projects/*/locations/*/endpoints/*}:computeTokens:\x01*ZN\"I/v1/{endpoint=projects/*/locations/*/publishers/*/models/*}:computeTokens:\x01*\x1aM\xca\x41\x19\x61iplatform.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xd4\x01\n\x1e\x63om.google.cloud.aiplatform.v1B\x16LlmUtilityServiceProtoP\x01Z>cloud.google.com/go/aiplatform/apiv1/aiplatformpb;aiplatformpb\xaa\x02\x1aGoogle.Cloud.AIPlatform.V1\xca\x02\x1aGoogle\\Cloud\\AIPlatform\\V1\xea\x02\x1dGoogle::Cloud::AIPlatform::V1b\x06proto3"
16
+ descriptor_data = "\n4google/cloud/aiplatform/v1/llm_utility_service.proto\x12\x1agoogle.cloud.aiplatform.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a(google/cloud/aiplatform/v1/content.proto\x1a\x33google/cloud/aiplatform/v1/prediction_service.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xd4\x01\n\x14\x43omputeTokensRequest\x12<\n\x08\x65ndpoint\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"aiplatform.googleapis.com/Endpoint\x12.\n\tinstances\x18\x02 \x03(\x0b\x32\x16.google.protobuf.ValueB\x03\xe0\x41\x01\x12\x12\n\x05model\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12:\n\x08\x63ontents\x18\x04 \x03(\x0b\x32#.google.cloud.aiplatform.v1.ContentB\x03\xe0\x41\x01\"B\n\nTokensInfo\x12\x0e\n\x06tokens\x18\x01 \x03(\x0c\x12\x11\n\ttoken_ids\x18\x02 \x03(\x03\x12\x11\n\x04role\x18\x03 \x01(\tB\x03\xe0\x41\x01\"T\n\x15\x43omputeTokensResponse\x12;\n\x0btokens_info\x18\x01 \x03(\x0b\x32&.google.cloud.aiplatform.v1.TokensInfo2\xac\x05\n\x11LlmUtilityService\x12\x9d\x02\n\x0b\x43ountTokens\x12..google.cloud.aiplatform.v1.CountTokensRequest\x1a/.google.cloud.aiplatform.v1.CountTokensResponse\"\xac\x01\xda\x41\x12\x65ndpoint,instances\x82\xd3\xe4\x93\x02\x90\x01\"=/v1/{endpoint=projects/*/locations/*/endpoints/*}:countTokens:\x01*ZL\"G/v1/{endpoint=projects/*/locations/*/publishers/*/models/*}:countTokens:\x01*\x12\xa7\x02\n\rComputeTokens\x12\x30.google.cloud.aiplatform.v1.ComputeTokensRequest\x1a\x31.google.cloud.aiplatform.v1.ComputeTokensResponse\"\xb0\x01\xda\x41\x12\x65ndpoint,instances\x82\xd3\xe4\x93\x02\x94\x01\"?/v1/{endpoint=projects/*/locations/*/endpoints/*}:computeTokens:\x01*ZN\"I/v1/{endpoint=projects/*/locations/*/publishers/*/models/*}:computeTokens:\x01*\x1aM\xca\x41\x19\x61iplatform.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xd4\x01\n\x1e\x63om.google.cloud.aiplatform.v1B\x16LlmUtilityServiceProtoP\x01Z>cloud.google.com/go/aiplatform/apiv1/aiplatformpb;aiplatformpb\xaa\x02\x1aGoogle.Cloud.AIPlatform.V1\xca\x02\x1aGoogle\\Cloud\\AIPlatform\\V1\xea\x02\x1dGoogle::Cloud::AIPlatform::V1b\x06proto3"
16
17
 
17
18
  pool = Google::Protobuf::DescriptorPool.generated_pool
18
19
 
@@ -28,6 +29,7 @@ rescue TypeError
28
29
  warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}"
29
30
  imports = [
30
31
  ["google.protobuf.Value", "google/protobuf/struct.proto"],
32
+ ["google.cloud.aiplatform.v1.Content", "google/cloud/aiplatform/v1/content.proto"],
31
33
  ]
32
34
  imports.each do |type_name, expected_filename|
33
35
  import_file = pool.lookup(type_name).file_descriptor
@@ -28,9 +28,17 @@ module Google
28
28
  # token ids.
29
29
  # @!attribute [rw] instances
30
30
  # @return [::Array<::Google::Protobuf::Value>]
31
- # Required. The instances that are the input to token computing API call.
31
+ # Optional. The instances that are the input to token computing API call.
32
32
  # Schema is identical to the prediction schema of the text model, even for
33
33
  # the non-text models, like chat models, or Codey models.
34
+ # @!attribute [rw] model
35
+ # @return [::String]
36
+ # Optional. The name of the publisher model requested to serve the
37
+ # prediction. Format:
38
+ # projects/\\{project}/locations/\\{location}/publishers/*/models/*
39
+ # @!attribute [rw] contents
40
+ # @return [::Array<::Google::Cloud::AIPlatform::V1::Content>]
41
+ # Optional. Input content.
34
42
  class ComputeTokensRequest
35
43
  include ::Google::Protobuf::MessageExts
36
44
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -43,6 +51,9 @@ module Google
43
51
  # @!attribute [rw] token_ids
44
52
  # @return [::Array<::Integer>]
45
53
  # A list of token ids from the input.
54
+ # @!attribute [rw] role
55
+ # @return [::String]
56
+ # Optional. Optional fields for the role from the corresponding Content.
46
57
  class TokensInfo
47
58
  include ::Google::Protobuf::MessageExts
48
59
  extend ::Google::Protobuf::MessageExts::ClassMethods
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: google-cloud-ai_platform-v1
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.43.0
4
+ version: 0.44.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Google LLC