omniai-google 3.4.3 → 3.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +26 -11
- data/lib/omniai/google/chat.rb +10 -9
- data/lib/omniai/google/version.rb +1 -1
- metadata +1 -1
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 82ee690e391c20d752962b27396514b2c8ed7f402e279b44115ca717622fabc3
|
|
4
|
+
data.tar.gz: b559de433576791ac3676ae2d5829369f1951a4824091a0c01add03f7e4a2b50
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 8ac9197937f962bd597c4a876dfc1b29edcb5802aae8b39e9e2962a118208cf0b17829b3a0dda4849c82091ffb60c12f050643712dc0c0c467cea0880bd8fd0b
|
|
7
|
+
data.tar.gz: 62600d5597689f129a3412b8718058f7de8c34e9e4e42ad0434b945b52c09cb7f4cd60e4dfe23d688d27026d4694b69e1a16febae76a3aa7e90f9680a2a77cb0
|
data/README.md
CHANGED
|
@@ -87,7 +87,7 @@ completion.text # 'The capital of Canada is Ottawa.'
|
|
|
87
87
|
|
|
88
88
|
#### Model
|
|
89
89
|
|
|
90
|
-
`model` takes an optional string (default is `gemini-
|
|
90
|
+
`model` takes an optional string (default is `gemini-3-flash-preview`):
|
|
91
91
|
|
|
92
92
|
```ruby
|
|
93
93
|
completion = client.chat('How fast is a cheetah?', model: OmniAI::Google::Chat::Model::GEMINI_FLASH)
|
|
@@ -120,30 +120,45 @@ client.chat('Be poetic.', stream:)
|
|
|
120
120
|
|
|
121
121
|
#### Extended Thinking
|
|
122
122
|
|
|
123
|
-
|
|
123
|
+
Gemini models support extended thinking, which shows the model's reasoning process.
|
|
124
124
|
|
|
125
125
|
```ruby
|
|
126
|
-
# Enable thinking
|
|
127
|
-
response = client.chat("What is 25 * 25?", model:
|
|
126
|
+
# Enable thinking with model defaults
|
|
127
|
+
response = client.chat("What is 25 * 25?", model: OmniAI::Google::Chat::Model::GEMINI_PRO, thinking: true)
|
|
128
128
|
```
|
|
129
129
|
|
|
130
|
-
|
|
130
|
+
##### Thinking Effort
|
|
131
|
+
|
|
132
|
+
Control how much the model thinks using the `effort` option (`"low"`, `"medium"`, or `"high"`):
|
|
133
|
+
|
|
134
|
+
```ruby
|
|
135
|
+
# High effort (more reasoning tokens)
|
|
136
|
+
response = client.chat("Solve this step by step.", model: OmniAI::Google::Chat::Model::GEMINI_PRO, thinking: { effort: "high" })
|
|
137
|
+
|
|
138
|
+
# Medium effort
|
|
139
|
+
response = client.chat("Explain briefly.", model: OmniAI::Google::Chat::Model::GEMINI_PRO, thinking: { effort: "medium" })
|
|
140
|
+
|
|
141
|
+
# Low effort (fewer reasoning tokens)
|
|
142
|
+
response = client.chat("What is 1+1?", model: OmniAI::Google::Chat::Model::GEMINI_PRO, thinking: { effort: "low" })
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
##### Accessing Thinking Content
|
|
131
146
|
|
|
132
147
|
```ruby
|
|
133
|
-
response.choices.first.message.
|
|
134
|
-
case
|
|
148
|
+
response.choices.first.message.content.each do |part|
|
|
149
|
+
case part
|
|
135
150
|
when OmniAI::Chat::Thinking
|
|
136
|
-
puts "Thinking: #{
|
|
151
|
+
puts "Thinking: #{part.thinking}"
|
|
137
152
|
when OmniAI::Chat::Text
|
|
138
|
-
puts "Response: #{
|
|
153
|
+
puts "Response: #{part.text}"
|
|
139
154
|
end
|
|
140
155
|
end
|
|
141
156
|
```
|
|
142
157
|
|
|
143
|
-
|
|
158
|
+
##### Streaming with Thinking
|
|
144
159
|
|
|
145
160
|
```ruby
|
|
146
|
-
client.chat("What are the prime factors of 1234567?", model:
|
|
161
|
+
client.chat("What are the prime factors of 1234567?", model: OmniAI::Google::Chat::Model::GEMINI_PRO, thinking: true, stream: $stdout)
|
|
147
162
|
```
|
|
148
163
|
|
|
149
164
|
[Google API Reference `thinking`](https://ai.google.dev/gemini-api/docs/thinking)
|
data/lib/omniai/google/chat.rb
CHANGED
|
@@ -17,11 +17,12 @@ module OmniAI
|
|
|
17
17
|
GEMINI_1_5_PRO = "gemini-1.5-pro"
|
|
18
18
|
GEMINI_2_5_PRO = "gemini-2.5-pro"
|
|
19
19
|
GEMINI_3_0_PRO = "gemini-3-pro-preview"
|
|
20
|
+
GEMINI_3_1_PRO = "gemini-3.1-pro-preview"
|
|
20
21
|
GEMINI_1_5_FLASH = "gemini-1.5-flash"
|
|
21
22
|
GEMINI_2_0_FLASH = "gemini-2.0-flash"
|
|
22
23
|
GEMINI_2_5_FLASH = "gemini-2.5-flash"
|
|
23
24
|
GEMINI_3_FLASH = "gemini-3-flash-preview"
|
|
24
|
-
GEMINI_PRO =
|
|
25
|
+
GEMINI_PRO = GEMINI_3_1_PRO
|
|
25
26
|
GEMINI_FLASH = GEMINI_3_FLASH
|
|
26
27
|
end
|
|
27
28
|
|
|
@@ -103,7 +104,7 @@ module OmniAI
|
|
|
103
104
|
return unless tools?
|
|
104
105
|
|
|
105
106
|
[
|
|
106
|
-
function_declarations: custom_tools.map { |tool| tool.serialize(context:) },
|
|
107
|
+
{ function_declarations: custom_tools.map { |tool| tool.serialize(context:) } },
|
|
107
108
|
].concat(internal_tools.map { |name| { name => {} } })
|
|
108
109
|
end
|
|
109
110
|
|
|
@@ -132,7 +133,7 @@ module OmniAI
|
|
|
132
133
|
end
|
|
133
134
|
|
|
134
135
|
data[:temperature] = @temperature if @temperature
|
|
135
|
-
data[:thinkingConfig] = thinking_config if
|
|
136
|
+
data[:thinkingConfig] = thinking_config if @options[:thinking]
|
|
136
137
|
|
|
137
138
|
data = data.compact
|
|
138
139
|
data unless data.empty?
|
|
@@ -148,17 +149,17 @@ module OmniAI
|
|
|
148
149
|
stream? ? "streamGenerateContent" : "generateContent"
|
|
149
150
|
end
|
|
150
151
|
|
|
151
|
-
# Translates unified thinking option to Google's thinkingConfig format.
|
|
152
|
-
# Example: `thinking: true` becomes `{ includeThoughts: true }`
|
|
153
152
|
# @return [Hash, nil]
|
|
154
153
|
def thinking_config
|
|
155
154
|
thinking = @options[:thinking]
|
|
156
155
|
return unless thinking
|
|
156
|
+
return { includeThoughts: true } unless thinking.is_a?(Hash)
|
|
157
157
|
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
158
|
+
config = { includeThoughts: true }
|
|
159
|
+
return config.merge(thinking) unless thinking.key?(:effort)
|
|
160
|
+
|
|
161
|
+
config[:thinkingLevel] = thinking[:effort].upcase if thinking[:effort]
|
|
162
|
+
config
|
|
162
163
|
end
|
|
163
164
|
|
|
164
165
|
# @return [Array<Message>]
|