omniai-google 3.5.0 → 3.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +26 -11
- data/lib/omniai/google/chat/message_serializer.rb +1 -2
- data/lib/omniai/google/chat/stream.rb +12 -16
- data/lib/omniai/google/chat.rb +2 -2
- data/lib/omniai/google/credentials.rb +1 -1
- data/lib/omniai/google/version.rb +1 -1
- metadata +1 -1
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 75cf7d6dde07411b0b22958ecc3ea72600956f7bd3a7d68c0bb034b0230e2b5f
|
|
4
|
+
data.tar.gz: 32d419138aa379822c21fac7fc94402c3423ff9b7cc127bb382d1be24f3c18a1
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 61b0c47b210b5610c331d31af784c249d854e40a8f1246dd39cf68d7b6be032d224cc52455b63917cad748ffb6b575ba0dc5b3e92b9b9a254d53363a5f6eb4ea
|
|
7
|
+
data.tar.gz: 505c2c82358dfcbcf83b10a90ebb87b1d76c00e46e7318708c465c8c1eb68e4f8ba0a5bb71c0767c694701e98669d3c9fe6e5fec2ce747a6aea59ee0e35fa7c7
|
data/README.md
CHANGED
|
@@ -87,7 +87,7 @@ completion.text # 'The capital of Canada is Ottawa.'
|
|
|
87
87
|
|
|
88
88
|
#### Model
|
|
89
89
|
|
|
90
|
-
`model` takes an optional string (default is `gemini-
|
|
90
|
+
`model` takes an optional string (default is `gemini-3-flash-preview`):
|
|
91
91
|
|
|
92
92
|
```ruby
|
|
93
93
|
completion = client.chat('How fast is a cheetah?', model: OmniAI::Google::Chat::Model::GEMINI_FLASH)
|
|
@@ -120,30 +120,45 @@ client.chat('Be poetic.', stream:)
|
|
|
120
120
|
|
|
121
121
|
#### Extended Thinking
|
|
122
122
|
|
|
123
|
-
|
|
123
|
+
Gemini models support extended thinking, which shows the model's reasoning process.
|
|
124
124
|
|
|
125
125
|
```ruby
|
|
126
|
-
# Enable thinking
|
|
127
|
-
response = client.chat("What is 25 * 25?", model:
|
|
126
|
+
# Enable thinking with model defaults
|
|
127
|
+
response = client.chat("What is 25 * 25?", model: OmniAI::Google::Chat::Model::GEMINI_PRO, thinking: true)
|
|
128
128
|
```
|
|
129
129
|
|
|
130
|
-
|
|
130
|
+
##### Thinking Effort
|
|
131
|
+
|
|
132
|
+
Control how much the model thinks using the `effort` option (`"low"`, `"medium"`, or `"high"`):
|
|
133
|
+
|
|
134
|
+
```ruby
|
|
135
|
+
# High effort (more reasoning tokens)
|
|
136
|
+
response = client.chat("Solve this step by step.", model: OmniAI::Google::Chat::Model::GEMINI_PRO, thinking: { effort: "high" })
|
|
137
|
+
|
|
138
|
+
# Medium effort
|
|
139
|
+
response = client.chat("Explain briefly.", model: OmniAI::Google::Chat::Model::GEMINI_PRO, thinking: { effort: "medium" })
|
|
140
|
+
|
|
141
|
+
# Low effort (fewer reasoning tokens)
|
|
142
|
+
response = client.chat("What is 1+1?", model: OmniAI::Google::Chat::Model::GEMINI_PRO, thinking: { effort: "low" })
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
##### Accessing Thinking Content
|
|
131
146
|
|
|
132
147
|
```ruby
|
|
133
|
-
response.choices.first.message.
|
|
134
|
-
case
|
|
148
|
+
response.choices.first.message.content.each do |part|
|
|
149
|
+
case part
|
|
135
150
|
when OmniAI::Chat::Thinking
|
|
136
|
-
puts "Thinking: #{
|
|
151
|
+
puts "Thinking: #{part.thinking}"
|
|
137
152
|
when OmniAI::Chat::Text
|
|
138
|
-
puts "Response: #{
|
|
153
|
+
puts "Response: #{part.text}"
|
|
139
154
|
end
|
|
140
155
|
end
|
|
141
156
|
```
|
|
142
157
|
|
|
143
|
-
|
|
158
|
+
##### Streaming with Thinking
|
|
144
159
|
|
|
145
160
|
```ruby
|
|
146
|
-
client.chat("What are the prime factors of 1234567?", model:
|
|
161
|
+
client.chat("What are the prime factors of 1234567?", model: OmniAI::Google::Chat::Model::GEMINI_PRO, thinking: true, stream: $stdout)
|
|
147
162
|
```
|
|
148
163
|
|
|
149
164
|
[Google API Reference `thinking`](https://ai.google.dev/gemini-api/docs/thinking)
|
|
@@ -34,8 +34,7 @@ module OmniAI
|
|
|
34
34
|
end
|
|
35
35
|
end
|
|
36
36
|
|
|
37
|
-
tool_call_list = parts.
|
|
38
|
-
content = parts.reject { |part| part.is_a?(OmniAI::Chat::ToolCall) }
|
|
37
|
+
tool_call_list, content = parts.partition { |part| part.is_a?(OmniAI::Chat::ToolCall) }
|
|
39
38
|
|
|
40
39
|
OmniAI::Chat::Message.new(content:, role:, tool_call_list:)
|
|
41
40
|
end
|
|
@@ -44,7 +44,7 @@ module OmniAI
|
|
|
44
44
|
@data[key] = value unless key.eql?("candidates")
|
|
45
45
|
end
|
|
46
46
|
|
|
47
|
-
data["candidates"]
|
|
47
|
+
data["candidates"]&.each_with_index do |candidate, index|
|
|
48
48
|
process_candidate!(candidate:, index:, &block)
|
|
49
49
|
end
|
|
50
50
|
end
|
|
@@ -53,12 +53,12 @@ module OmniAI
|
|
|
53
53
|
# @yieldparam delta [OmniAI::Chat::Delta]
|
|
54
54
|
#
|
|
55
55
|
# @param candidate [Hash]
|
|
56
|
+
# @param index [Integer]
|
|
56
57
|
def process_candidate!(candidate:, index:, &block)
|
|
57
58
|
return unless candidate["content"]
|
|
58
59
|
|
|
59
|
-
candidate["content"]["parts"]
|
|
60
|
+
candidate["content"]["parts"]&.each do |part|
|
|
60
61
|
if part["thought"]
|
|
61
|
-
# Google uses thought: true as a flag, content is in text
|
|
62
62
|
block&.call(OmniAI::Chat::Delta.new(thinking: part["text"]))
|
|
63
63
|
elsif part["text"]
|
|
64
64
|
block&.call(OmniAI::Chat::Delta.new(text: part["text"]))
|
|
@@ -74,27 +74,23 @@ module OmniAI
|
|
|
74
74
|
if @data["candidates"][index].nil?
|
|
75
75
|
@data["candidates"][index] = candidate
|
|
76
76
|
else
|
|
77
|
-
|
|
77
|
+
(candidate["content"]["parts"] || []).each do |part|
|
|
78
|
+
merge_part!(part:, candidate: @data["candidates"][index])
|
|
79
|
+
end
|
|
78
80
|
end
|
|
79
81
|
end
|
|
80
82
|
|
|
81
|
-
# @param parts [Array<Hash>]
|
|
82
|
-
# @param candidate [Hash]
|
|
83
|
-
def merge_parts!(parts:, candidate:)
|
|
84
|
-
parts.each { |part| merge_part!(part:, candidate:) }
|
|
85
|
-
end
|
|
86
|
-
|
|
87
83
|
# @param part [Hash]
|
|
88
|
-
# @param
|
|
84
|
+
# @param candidate [Hash]
|
|
89
85
|
def merge_part!(part:, candidate:)
|
|
90
|
-
|
|
86
|
+
parts = candidate["content"]["parts"] ||= []
|
|
87
|
+
last_part = parts.last
|
|
91
88
|
|
|
92
|
-
if last_part&.key?("text") && part
|
|
89
|
+
if (last_part&.key?("text") && part.key?("text")) ||
|
|
90
|
+
(last_part&.key?("thought") && part.key?("thought"))
|
|
93
91
|
last_part["text"] += part["text"]
|
|
94
|
-
elsif last_part&.key?("thought") && part["thought"]
|
|
95
|
-
last_part["thought"] += part["thought"]
|
|
96
92
|
else
|
|
97
|
-
|
|
93
|
+
parts << part
|
|
98
94
|
end
|
|
99
95
|
end
|
|
100
96
|
end
|
data/lib/omniai/google/chat.rb
CHANGED
|
@@ -104,13 +104,13 @@ module OmniAI
|
|
|
104
104
|
return unless tools?
|
|
105
105
|
|
|
106
106
|
[
|
|
107
|
-
function_declarations: custom_tools.map { |tool| tool.serialize(context:) },
|
|
107
|
+
{ function_declarations: custom_tools.map { |tool| tool.serialize(context:) } },
|
|
108
108
|
].concat(internal_tools.map { |name| { name => {} } })
|
|
109
109
|
end
|
|
110
110
|
|
|
111
111
|
# @return [Array<OmniAI::Tool>]
|
|
112
112
|
def custom_tools
|
|
113
|
-
@tools.
|
|
113
|
+
@tools.grep(OmniAI::Tool)
|
|
114
114
|
end
|
|
115
115
|
|
|
116
116
|
# @return [Array<Symbol, String>]
|
|
@@ -23,7 +23,7 @@ module OmniAI
|
|
|
23
23
|
case value
|
|
24
24
|
when IO, StringIO then ::Google::Auth::ServiceAccountCredentials.make_creds(json_key_io: value, scope: SCOPE)
|
|
25
25
|
when Hash then parse(JSON.generate(value))
|
|
26
|
-
when Pathname then
|
|
26
|
+
when Pathname then File.open(value) { |file| parse(file) }
|
|
27
27
|
when String then parse(StringIO.new(value))
|
|
28
28
|
else value
|
|
29
29
|
end
|