omniai-google 3.7.0 → 3.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/omniai/google/client.rb +2 -2
- data/lib/omniai/google/embed.rb +19 -18
- data/lib/omniai/google/version.rb +1 -1
- metadata +1 -1
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 74848884db95fef34b6c7306b2814ecf8d50797963e690a93b77e69c93cc7d3a
|
|
4
|
+
data.tar.gz: 9fbb92e5fe4fe36d9086182391ae383180607b5e97b36a17e458dc200bef73e1
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: c92c79303ec17e46832115d075aa29760ba8d1f52111286d51d370c49c5266bb2e0ec9956e058a133de69bd0bb28c8ea4fa52efab34edea64f9cdc896397069e
|
|
7
|
+
data.tar.gz: 5fc61a87b13a8bd3ba73bc6b8c150c7da1d77aef938d7c826f9eac541b78ea288fb0698bbaf9a7609d7a19a85607b3d04d97e82cd013a46f64c319c86329ea60
|
data/lib/omniai/google/client.rb
CHANGED
|
@@ -86,8 +86,8 @@ module OmniAI
|
|
|
86
86
|
# @param input [String, Array<String>, Array<Integer>] required
|
|
87
87
|
# @param model [String] optional
|
|
88
88
|
# @param options [Hash] provider-specific options (e.g. task_type: "RETRIEVAL_DOCUMENT")
|
|
89
|
-
def embed(input, model: Embed::DEFAULT_MODEL, **
|
|
90
|
-
Embed.process!(input, model:, client: self, **
|
|
89
|
+
def embed(input, model: Embed::DEFAULT_MODEL, **)
|
|
90
|
+
Embed.process!(input, model:, client: self, **)
|
|
91
91
|
end
|
|
92
92
|
|
|
93
93
|
# @raise [OmniAI::Error]
|
data/lib/omniai/google/embed.rb
CHANGED
|
@@ -44,7 +44,7 @@ module OmniAI
|
|
|
44
44
|
prompt_tokens = data.dig("usageMetadata", "promptTokenCount")
|
|
45
45
|
total_tokens = data.dig("usageMetadata", "totalTokenCount")
|
|
46
46
|
|
|
47
|
-
Usage.new(prompt_tokens
|
|
47
|
+
Usage.new(prompt_tokens:, total_tokens:)
|
|
48
48
|
end
|
|
49
49
|
|
|
50
50
|
# @return [Context]
|
|
@@ -73,13 +73,14 @@ module OmniAI
|
|
|
73
73
|
#
|
|
74
74
|
# @return [Symbol] :embed_content, :predict, or :batch_embed_contents
|
|
75
75
|
def endpoint
|
|
76
|
-
@endpoint ||=
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
76
|
+
@endpoint ||=
|
|
77
|
+
if @client.vertex? && @model.start_with?("gemini-embedding-2")
|
|
78
|
+
:embed_content
|
|
79
|
+
elsif @client.vertex?
|
|
80
|
+
:predict
|
|
81
|
+
else
|
|
82
|
+
:batch_embed_contents
|
|
83
|
+
end
|
|
83
84
|
end
|
|
84
85
|
|
|
85
86
|
# @return [Context]
|
|
@@ -106,7 +107,7 @@ module OmniAI
|
|
|
106
107
|
raise ArgumentError, "embedContent does not support batch input" if @input.is_a?(Array) && @input.length > 1
|
|
107
108
|
|
|
108
109
|
text = @input.is_a?(Array) ? @input.first : @input
|
|
109
|
-
result = { content: { parts: [{ text:
|
|
110
|
+
result = { content: { parts: [{ text: }] } }
|
|
110
111
|
result[:taskType] = @options[:task_type] if @options[:task_type]
|
|
111
112
|
result
|
|
112
113
|
end
|
|
@@ -126,11 +127,11 @@ module OmniAI
|
|
|
126
127
|
requests: inputs.map do |text|
|
|
127
128
|
request = {
|
|
128
129
|
model: "models/#{@model}",
|
|
129
|
-
content: { parts: [{ text:
|
|
130
|
+
content: { parts: [{ text: }] },
|
|
130
131
|
}
|
|
131
132
|
request[:taskType] = @options[:task_type] if @options[:task_type]
|
|
132
133
|
request
|
|
133
|
-
end
|
|
134
|
+
end,
|
|
134
135
|
}
|
|
135
136
|
end
|
|
136
137
|
|
|
@@ -139,15 +140,15 @@ module OmniAI
|
|
|
139
140
|
{ key: (@client.api_key unless @client.credentials?) }.compact
|
|
140
141
|
end
|
|
141
142
|
|
|
143
|
+
PROCEDURES = {
|
|
144
|
+
embed_content: "embedContent",
|
|
145
|
+
predict: "predict",
|
|
146
|
+
batch_embed_contents: "batchEmbedContents",
|
|
147
|
+
}.freeze
|
|
148
|
+
|
|
142
149
|
# @return [String]
|
|
143
150
|
def path
|
|
144
|
-
|
|
145
|
-
when :embed_content then "embedContent"
|
|
146
|
-
when :predict then "predict"
|
|
147
|
-
when :batch_embed_contents then "batchEmbedContents"
|
|
148
|
-
end
|
|
149
|
-
|
|
150
|
-
"/#{@client.path}/models/#{@model}:#{procedure}"
|
|
151
|
+
"/#{@client.path}/models/#{@model}:#{PROCEDURES[endpoint]}"
|
|
151
152
|
end
|
|
152
153
|
end
|
|
153
154
|
end
|