omniai-google 1.6.3 → 1.8.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b2626eef567a04e92b73eaf64a39dc274c2651b479810cd62a94be1a920cf25f
4
- data.tar.gz: 8c968e5ca2328ce77e604f91c36607c22ef6572bf2e7633505316fdd6bab93db
3
+ metadata.gz: 07e400994624881af7790d3a209eb28030237a4684d319edc11ebb8dc7052f2c
4
+ data.tar.gz: e9ea6d986267343627c9cc8d061aaea673f4cbaba35a6fffdd113e49fe674278
5
5
  SHA512:
6
- metadata.gz: c104b92ca09ae4c29285b2ac35e7badd23d2c54e4cb7189b0f1fb2fa1d0ad0cac4770e6c8ceeaaf5468225db52913458e6d4b6b94a0054f5c58735b01a78911d
7
- data.tar.gz: a1037ddc9ccc58f84dfba6a094d0c7e1481dbba103e4e4d9f16dfbfb21def59fee61e4c463e96e3807c0763db4920f84934133b66090c9a8d575ac20c4225913
6
+ metadata.gz: cc5d0209e6024d8860f8aa619e8bdc76469863f314d66810d5b525674c417d4a79d57f5ad0ac404401214cf29e7606ee8a2669026609780f563a173351048b5a
7
+ data.tar.gz: c0958efb3190c4df770d49862fdf3370584c4533dc8925350fb194490f3da5cefd5de58c7e47fda14bf76803575e438505fdaf13d00f6693e363c6cc2ed42ef7
data/README.md CHANGED
@@ -34,36 +34,27 @@ Global configuration is supported for the following options:
34
34
  OmniAI::Google.configure do |config|
35
35
  config.api_key = 'sk-...' # default: ENV['GOOGLE_API_KEY']
36
36
  config.host = '...' # default: 'https://generativelanguage.googleapis.com'
37
- config.version = 'v1beta' # default: 'v1'
37
+ config.version = OmniAI::Google::Config::Version::BETA # either 'v1' or 'v1beta'
38
38
  end
39
39
  ```
40
40
 
41
41
  ### Chat
42
42
 
43
- A chat completion is generated by passing in prompts using any a variety of formats:
43
+ A chat completion is generated by passing in a simple text prompt:
44
44
 
45
45
  ```ruby
46
46
  completion = client.chat('Tell me a joke!')
47
- completion.choice.message.content # 'Why did the chicken cross the road? To get to the other side.'
47
+ completion.text # 'Why did the chicken cross the road? To get to the other side.'
48
48
  ```
49
49
 
50
- ```ruby
51
- completion = client.chat({
52
- role: OmniAI::Chat::Role::USER,
53
- content: 'Is it wise to jump off a bridge?'
54
- })
55
- completion.choice.message.content # 'No.'
56
- ```
50
+ A chat completion may also be generated by using the prompt builder:
57
51
 
58
52
  ```ruby
59
- completion = client.chat([
60
- {
61
- role: OmniAI::Chat::Role::USER,
62
- content: 'You are a helpful assistant.'
63
- },
64
- 'What is the capital of Canada?',
65
- ])
66
- completion.choice.message.content # 'The capital of Canada is Ottawa.'
53
+ completion = client.chat do |prompt|
54
+ prompt.system('Your are an expert in geography.')
55
+ prompt.user('What is the capital of Canada?')
56
+ end
57
+ completion.text # 'The capital of Canada is Ottawa.'
67
58
  ```
68
59
 
69
60
  #### Model
@@ -72,7 +63,7 @@ completion.choice.message.content # 'The capital of Canada is Ottawa.'
72
63
 
73
64
  ```ruby
74
65
  completion = client.chat('How fast is a cheetah?', model: OmniAI::Google::Chat::Model::GEMINI_FLASH)
75
- completion.choice.message.content # 'A cheetah can reach speeds over 100 km/h.'
66
+ completion.text # 'A cheetah can reach speeds over 100 km/h.'
76
67
  ```
77
68
 
78
69
  [Google API Reference `model`](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versioning#gemini-model-versions)
@@ -83,7 +74,7 @@ completion.choice.message.content # 'A cheetah can reach speeds over 100 km/h.'
83
74
 
84
75
  ```ruby
85
76
  completion = client.chat('Pick a number between 1 and 5', temperature: 2.0)
86
- completion.choice.message.content # '3'
77
+ completion.text # '3'
87
78
  ```
88
79
 
89
80
  [Google API Reference `temperature`](https://ai.google.dev/api/rest/v1/GenerationConfig)
@@ -94,7 +85,7 @@ completion.choice.message.content # '3'
94
85
 
95
86
  ```ruby
96
87
  stream = proc do |chunk|
97
- print(chunk.choice.delta.content) # 'Better', 'three', 'hours', ...
88
+ print(chunk.text) # 'Better', 'three', 'hours', ...
98
89
  end
99
90
  client.chat('Be poetic.', stream:)
100
91
  ```
@@ -0,0 +1,26 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OmniAI
4
+ module Google
5
+ class Chat
6
+ # Overrides choice serialize / deserialize.
7
+ module ChoiceSerializer
8
+ # @param choice [OmniAI::Chat::Choice]
9
+ # @param context [Context]
10
+ # @return [Hash]
11
+ def self.serialize(choice, context:)
12
+ content = choice.message.serialize(context:)
13
+ { content: }
14
+ end
15
+
16
+ # @param data [Hash]
17
+ # @param context [Context]
18
+ # @return [OmniAI::Chat::Choice]
19
+ def self.deserialize(data, context:)
20
+ message = OmniAI::Chat::Message.deserialize(data['content'], context:)
21
+ OmniAI::Chat::Choice.new(message:)
22
+ end
23
+ end
24
+ end
25
+ end
26
+ end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OmniAI
4
+ module Google
5
+ class Chat
6
+ # Overrides content serialize / deserialize.
7
+ module ContentSerializer
8
+ # @param data [Hash]
9
+ # @param context [Context]
10
+ # @return [OmniAI::Chat::Text, OmniAI::Chat::ToolCall]
11
+ def self.deserialize(data, context:)
12
+ case
13
+ when data['text'] then data['text']
14
+ when data['functionCall'] then OmniAI::Chat::ToolCall.deserialize(data, context:)
15
+ end
16
+ end
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,27 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OmniAI
4
+ module Google
5
+ class Chat
6
+ # Overrides function serialize / deserialize.
7
+ module FunctionSerializer
8
+ # @param function [OmniAI::Chat::Function]
9
+ # @return [Hash]
10
+ def self.serialize(function, *)
11
+ {
12
+ name: function.name,
13
+ args: function.arguments,
14
+ }
15
+ end
16
+
17
+ # @param data [Hash]
18
+ # @return [OmniAI::Chat::Function]
19
+ def self.deserialize(data, *)
20
+ name = data['name']
21
+ arguments = data['args']
22
+ OmniAI::Chat::Function.new(name:, arguments:)
23
+ end
24
+ end
25
+ end
26
+ end
27
+ end
@@ -0,0 +1,21 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OmniAI
4
+ module Google
5
+ class Chat
6
+ # Overrides media serialize / deserialize.
7
+ module MediaSerializer
8
+ # @param media [OmniAI::Chat::Media]
9
+ # @return [Hash]
10
+ def self.serialize(media, *)
11
+ {
12
+ inlineData: {
13
+ mimeType: media.type,
14
+ data: media.data,
15
+ },
16
+ }
17
+ end
18
+ end
19
+ end
20
+ end
21
+ end
@@ -0,0 +1,52 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OmniAI
4
+ module Google
5
+ class Chat
6
+ # Overrides message serialize / deserialize.
7
+ module MessageSerializer
8
+ # @param message [OmniAI::Chat::Message]
9
+ # @param context [OmniAI::Context]
10
+ # @return [Hash]
11
+ def self.serialize(message, context:)
12
+ role = message.role
13
+ parts = (arrayify(message.content) + arrayify(message.tool_call_list)).map do |part|
14
+ case part
15
+ when String then { text: part }
16
+ else part.serialize(context:)
17
+ end
18
+ end
19
+
20
+ { role:, parts: }
21
+ end
22
+
23
+ # @param data [Hash]
24
+ # @param context [OmniAI::Context]
25
+ # @return [OmniAI::Chat::Message]
26
+ def self.deserialize(data, context:)
27
+ role = data['role']
28
+ parts = arrayify(data['parts']).map do |part|
29
+ case
30
+ when part['text'] then OmniAI::Chat::Text.deserialize(part, context:)
31
+ when part['functionCall'] then OmniAI::Chat::ToolCall.deserialize(part, context:)
32
+ when part['functionResponse'] then OmniAI::Chat::ToolCallResult.deserialize(part, context:)
33
+ end
34
+ end
35
+
36
+ tool_call_list = parts.select { |part| part.is_a?(OmniAI::Chat::ToolCall) }
37
+ content = parts.reject { |part| part.is_a?(OmniAI::Chat::ToolCall) }
38
+
39
+ OmniAI::Chat::Message.new(content:, role:, tool_call_list:)
40
+ end
41
+
42
+ # @param content [Object]
43
+ # @return [Array<Object>]
44
+ def self.arrayify(content)
45
+ return [] if content.nil?
46
+
47
+ content.is_a?(Array) ? content : [content]
48
+ end
49
+ end
50
+ end
51
+ end
52
+ end
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OmniAI
4
+ module Google
5
+ class Chat
6
+ # Overrides payload serialize / deserialize.
7
+ module PayloadSerializer
8
+ # @param payload [OmniAI::Chat::Payload]
9
+ # @param context [OmniAI::Context]
10
+ # @return [Hash]
11
+ def self.serialize(payload, context:)
12
+ candidates = payload.choices.map { |choice| choice.serialize(context:) }
13
+ usage_metadata = payload.usage&.serialize(context:)
14
+
15
+ {
16
+ candidates:,
17
+ usage_metadata:,
18
+ }
19
+ end
20
+
21
+ # @param data [Hash]
22
+ # @param context [OmniAI::Context]
23
+ # @return [OmniAI::Chat::Payload]
24
+ def self.deserialize(data, context:)
25
+ choices = data['candidates'].map { |candidate| OmniAI::Chat::Choice.deserialize(candidate, context:) }
26
+ usage = OmniAI::Chat::Usage.deserialize(data['usage_metadata'], context:) if data['usage_metadata']
27
+
28
+ OmniAI::Chat::Payload.new(choices:, usage:)
29
+ end
30
+ end
31
+ end
32
+ end
33
+ end
@@ -0,0 +1,22 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OmniAI
4
+ module Google
5
+ class Chat
6
+ # Overrides text serialize / deserialize.
7
+ module TextSerializer
8
+ # @param text [OmniAI::Chat::Text]
9
+ # @return [Hash]
10
+ def self.serialize(text, *)
11
+ { text: text.text }
12
+ end
13
+
14
+ # @param data [Hash]
15
+ # @return [OmniAI::Chat::Text]
16
+ def self.deserialize(data, *)
17
+ OmniAI::Chat::Text.new(data['text'])
18
+ end
19
+ end
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,32 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OmniAI
4
+ module Google
5
+ class Chat
6
+ # Overrides tool-call result serialize / deserialize.
7
+ module ToolCallResultSerializer
8
+ # @param tool_call_response [OmniAI::Chat::ToolCallResult]
9
+ # @return [Hash]
10
+ def self.serialize(tool_call_response, *)
11
+ {
12
+ functionResponse: {
13
+ name: tool_call_response.tool_call_id,
14
+ response: {
15
+ name: tool_call_response.tool_call_id,
16
+ content: tool_call_response.content,
17
+ },
18
+ },
19
+ }
20
+ end
21
+
22
+ # @param data [Hash]
23
+ # @return [ToolCallResult]
24
+ def self.deserialize(data, *)
25
+ tool_call_id = data['functionResponse']['name']
26
+ content = data['functionResponse']['response']['content']
27
+ OmniAI::Chat::ToolCallResult.new(content:, tool_call_id:)
28
+ end
29
+ end
30
+ end
31
+ end
32
+ end
@@ -0,0 +1,25 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OmniAI
4
+ module Google
5
+ class Chat
6
+ # Overrides tool-call serialize / deserialize.
7
+ module ToolCallSerializer
8
+ # @param tool_call [OmniAI::Chat::ToolCall]
9
+ # @param context [OmniAI::Context]
10
+ # @return [Hash]
11
+ def self.serialize(tool_call, context:)
12
+ { functionCall: tool_call.function.serialize(context:) }
13
+ end
14
+
15
+ # @param data [Hash]
16
+ # @param context [OmniAI::Context]
17
+ # @return [OmniAI::Chat::ToolCall]
18
+ def self.deserialize(data, context:)
19
+ function = OmniAI::Chat::Function.deserialize(data['functionCall'], context:)
20
+ OmniAI::Chat::ToolCall.new(id: function.name, function:)
21
+ end
22
+ end
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,19 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OmniAI
4
+ module Google
5
+ class Chat
6
+ # Overrides tool serialize / deserialize.
7
+ module ToolSerializer
8
+ # @param tool [OmniAI::Tool]
9
+ def self.serialize(tool, *)
10
+ {
11
+ name: tool.name,
12
+ description: tool.description,
13
+ parameters: tool.parameters&.prepare,
14
+ }
15
+ end
16
+ end
17
+ end
18
+ end
19
+ end
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OmniAI
4
+ module Google
5
+ class Chat
6
+ # Overrides usage serialize / deserialize.
7
+ module UsageSerializer
8
+ # @param usage [OmniAI::Chat::Usage]
9
+ # @return [Hash]
10
+ def self.serialize(usage, *)
11
+ {
12
+ prompt_token_count: usage.input_tokens,
13
+ candidates_token_count: usage.output_tokens,
14
+ total_token_count: usage.total_tokens,
15
+ }
16
+ end
17
+
18
+ # @param data [Hash]
19
+ # @return [OmniAI::Chat::Usage]
20
+ def self.deserialize(data, *)
21
+ input_tokens = data['prompt_token_count']
22
+ output_tokens = data['candidates_token_count']
23
+ total_tokens = data['total_token_count']
24
+ OmniAI::Chat::Usage.new(input_tokens:, output_tokens:, total_tokens:)
25
+ end
26
+ end
27
+ end
28
+ end
29
+ end
@@ -25,49 +25,47 @@ module OmniAI
25
25
 
26
26
  DEFAULT_MODEL = Model::GEMINI_PRO
27
27
 
28
- TEXT_SERIALIZER = lambda do |content, *|
29
- { text: content.text }
30
- end
28
+ # @return [Context]
29
+ CONTEXT = Context.build do |context|
30
+ context.serializers[:text] = TextSerializer.method(:serialize)
31
+ context.deserializers[:text] = TextSerializer.method(:deserialize)
31
32
 
32
- # @param [Message]
33
- # @return [Hash]
34
- # @example
35
- # message = Message.new(...)
36
- # MESSAGE_SERIALIZER.call(message)
37
- MESSAGE_SERIALIZER = lambda do |message, context:|
38
- parts = message.content.is_a?(String) ? [Text.new(message.content)] : message.content
39
- role = message.system? ? Role::USER : message.role
40
-
41
- {
42
- role:,
43
- parts: parts.map { |part| part.serialize(context:) },
44
- }
45
- end
33
+ context.serializers[:file] = MediaSerializer.method(:serialize)
34
+ context.serializers[:url] = MediaSerializer.method(:serialize)
46
35
 
47
- # @param [Media]
48
- # @return [Hash]
49
- # @example
50
- # media = Media.new(...)
51
- # MEDIA_SERIALIZER.call(media)
52
- MEDIA_SERIALIZER = lambda do |media, *|
53
- {
54
- inlineData: {
55
- mimeType: media.type,
56
- data: media.data,
57
- },
58
- }
59
- end
36
+ context.serializers[:tool_call] = ToolCallSerializer.method(:serialize)
37
+ context.deserializers[:tool_call] = ToolCallSerializer.method(:deserialize)
60
38
 
61
- # @return [Context]
62
- CONTEXT = Context.build do |context|
63
- context.serializers[:message] = MESSAGE_SERIALIZER
64
- context.serializers[:text] = TEXT_SERIALIZER
65
- context.serializers[:file] = MEDIA_SERIALIZER
66
- context.serializers[:url] = MEDIA_SERIALIZER
39
+ context.serializers[:tool_call_result] = ToolCallResultSerializer.method(:serialize)
40
+ context.deserializers[:tool_call_result] = ToolCallResultSerializer.method(:deserialize)
41
+
42
+ context.serializers[:function] = FunctionSerializer.method(:serialize)
43
+ context.deserializers[:function] = FunctionSerializer.method(:deserialize)
44
+
45
+ context.serializers[:usage] = UsageSerializer.method(:serialize)
46
+ context.deserializers[:usage] = UsageSerializer.method(:deserialize)
47
+
48
+ context.serializers[:payload] = PayloadSerializer.method(:serialize)
49
+ context.deserializers[:payload] = PayloadSerializer.method(:deserialize)
50
+
51
+ context.serializers[:choice] = ChoiceSerializer.method(:serialize)
52
+ context.deserializers[:choice] = ChoiceSerializer.method(:deserialize)
53
+
54
+ context.serializers[:message] = MessageSerializer.method(:serialize)
55
+ context.deserializers[:message] = MessageSerializer.method(:deserialize)
56
+
57
+ context.deserializers[:content] = ContentSerializer.method(:deserialize)
58
+
59
+ context.serializers[:tool] = ToolSerializer.method(:serialize)
67
60
  end
68
61
 
69
62
  protected
70
63
 
64
+ # @return [Context]
65
+ def context
66
+ CONTEXT
67
+ end
68
+
71
69
  # @return [HTTP::Response]
72
70
  def request!
73
71
  @client
@@ -82,7 +80,8 @@ module OmniAI
82
80
  # @return [Hash]
83
81
  def payload
84
82
  OmniAI::Google.config.chat_options.merge({
85
- contents:,
83
+ system_instruction: @prompt.messages.find(&:system?)&.serialize(context:),
84
+ contents: @prompt.messages.reject(&:system?).map { |message| message.serialize(context:) },
86
85
  tools:,
87
86
  generationConfig: generation_config,
88
87
  }).compact
@@ -93,7 +92,7 @@ module OmniAI
93
92
  return unless @tools
94
93
 
95
94
  [
96
- function_declarations: @tools&.map(&:prepare),
95
+ function_declarations: @tools.map { |tool| tool.serialize(context:) },
97
96
  ]
98
97
  end
99
98
 
@@ -104,15 +103,6 @@ module OmniAI
104
103
  { temperature: @temperature }.compact
105
104
  end
106
105
 
107
- # Example:
108
- #
109
- # [{ role: 'user', parts: [{ text: '...' }] }]
110
- #
111
- # @return [Array<Hash>]
112
- def contents
113
- @prompt.serialize(context: CONTEXT)
114
- end
115
-
116
106
  # @return [String]
117
107
  def path
118
108
  "/#{@client.version}/models/#{@model}:#{operation}"
@@ -122,6 +112,15 @@ module OmniAI
122
112
  def operation
123
113
  @stream ? 'streamGenerateContent' : 'generateContent'
124
114
  end
115
+
116
+ # @return [Array<Message>]
117
+ def build_tool_call_messages(tool_call_list)
118
+ content = tool_call_list.map do |tool_call|
119
+ ToolCallResult.new(tool_call_id: tool_call.id, content: execute_tool_call(tool_call))
120
+ end
121
+
122
+ [Message.new(role: 'function', content:)]
123
+ end
125
124
  end
126
125
  end
127
126
  end
@@ -4,8 +4,13 @@ module OmniAI
4
4
  module Google
5
5
  # Configuration for Google.
6
6
  class Config < OmniAI::Config
7
+ module Version
8
+ STABLE = 'v1'
9
+ BETA = 'v1beta'
10
+ end
11
+
7
12
  DEFAULT_HOST = 'https://generativelanguage.googleapis.com'
8
- DEFAULT_VERSION = 'v1'
13
+ DEFAULT_VERSION = Version::STABLE
9
14
 
10
15
  # @!attribute [rw] version
11
16
  # @return [String, nil]
@@ -2,6 +2,6 @@
2
2
 
3
3
  module OmniAI
4
4
  module Google
5
- VERSION = '1.6.3'
5
+ VERSION = '1.8.0'
6
6
  end
7
7
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: omniai-google
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.6.3
4
+ version: 1.8.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Kevin Sylvestre
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-07-18 00:00:00.000000000 Z
11
+ date: 2024-08-16 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: event_stream_parser
@@ -63,9 +63,17 @@ files:
63
63
  - README.md
64
64
  - lib/omniai/google.rb
65
65
  - lib/omniai/google/chat.rb
66
- - lib/omniai/google/chat/response/chunk.rb
67
- - lib/omniai/google/chat/response/completion.rb
68
- - lib/omniai/google/chat/response/stream.rb
66
+ - lib/omniai/google/chat/choice_serializer.rb
67
+ - lib/omniai/google/chat/content_serializer.rb
68
+ - lib/omniai/google/chat/function_serializer.rb
69
+ - lib/omniai/google/chat/media_serializer.rb
70
+ - lib/omniai/google/chat/message_serializer.rb
71
+ - lib/omniai/google/chat/payload_serializer.rb
72
+ - lib/omniai/google/chat/text_serializer.rb
73
+ - lib/omniai/google/chat/tool_call_result_serializer.rb
74
+ - lib/omniai/google/chat/tool_call_serializer.rb
75
+ - lib/omniai/google/chat/tool_serializer.rb
76
+ - lib/omniai/google/chat/usage_serializer.rb
69
77
  - lib/omniai/google/client.rb
70
78
  - lib/omniai/google/config.rb
71
79
  - lib/omniai/google/version.rb
@@ -1,29 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module OmniAI
4
- module Google
5
- class Chat
6
- module Response
7
- # A chunk given when streaming.
8
- class Chunk < OmniAI::Chat::Response::Chunk
9
- # @return [Array<OmniAI::Chat::Choice>]
10
- def choices
11
- @choices ||= [].tap do |choices|
12
- @data['candidates'].each do |candidate|
13
- candidate['content']['parts'].each do |part|
14
- choices << OmniAI::Chat::Response::DeltaChoice.new(data: {
15
- 'index' => candidate['index'],
16
- 'delta' => {
17
- 'role' => candidate['content']['role'],
18
- 'content' => part['text'],
19
- },
20
- })
21
- end
22
- end
23
- end
24
- end
25
- end
26
- end
27
- end
28
- end
29
- end
@@ -1,29 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module OmniAI
4
- module Google
5
- class Chat
6
- module Response
7
- # A completion returned by the API.
8
- class Completion < OmniAI::Chat::Response::Completion
9
- # @return [Array<OmniAI::Chat::Choice>]
10
- def choices
11
- @choices ||= [].tap do |entries|
12
- @data['candidates'].each do |candidate|
13
- candidate['content']['parts'].each do |part|
14
- entries << OmniAI::Chat::Response::MessageChoice.new(data: {
15
- 'index' => candidate['index'],
16
- 'message' => {
17
- 'role' => candidate['content']['role'],
18
- 'content' => part['text'],
19
- },
20
- })
21
- end
22
- end
23
- end
24
- end
25
- end
26
- end
27
- end
28
- end
29
- end
@@ -1,21 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module OmniAI
4
- module Google
5
- class Chat
6
- module Response
7
- # A stream given when streaming.
8
- class Stream < OmniAI::Chat::Response::Stream
9
- # @yield [OmniAI::Chat::Chunk]
10
- def stream!(&)
11
- @response.body.each do |chunk|
12
- @parser.feed(chunk) do |_, data|
13
- yield(Chunk.new(data: JSON.parse(data)))
14
- end
15
- end
16
- end
17
- end
18
- end
19
- end
20
- end
21
- end