gemini_cache 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/lib/gemini_cache.rb +125 -0
- data/lib/geminiext/cache.rb +127 -0
- data/lib/geminiext/messages.rb.disabled +34 -0
- data/lib/geminiext/model.rb.disabled +27 -0
- data/lib/geminiext/response_extender.rb.disabled +44 -0
- metadata +61 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 7bc6c689b1bad80c2f7865816137cd8cbadd3a3c112938d7a0f8982820f54065
|
4
|
+
data.tar.gz: c18e33fe4733104d6945cd1e72255066e2e49e339cc18873cfad3276721e7c9a
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 6f8659b09c15ab666ca8e858389eb39a4e2efabb9368a98685bce34bd45049359cf5a1c0c8e9080ea5bcf5d05f113df17d784f768154d720c9a87f135f0b4de5
|
7
|
+
data.tar.gz: eb06c97493baf8a5cf61d6020fbb47094e9b831ec257afae63148ba5127867a9f8c5ba8b3241fe548e6727766afe3e3a8f9137d0def687c1cfa13f9638837b43
|
data/lib/gemini_cache.rb
ADDED
@@ -0,0 +1,125 @@
|
|
1
|
+
require 'faraday'
|
2
|
+
require 'json'
|
3
|
+
|
4
|
+
module GeminiCache
|
5
|
+
def self.create(contents:, display_name:, model: 'gemini-1.5-flash-8b', ttl: 600)
|
6
|
+
content = {
|
7
|
+
model: "models/#{model}",
|
8
|
+
display_name:,
|
9
|
+
contents:,
|
10
|
+
ttl: "#{ttl}s"
|
11
|
+
}.to_json
|
12
|
+
|
13
|
+
conn = Faraday.new(
|
14
|
+
url: 'https://generativelanguage.googleapis.com',
|
15
|
+
headers: { 'Content-Type' => 'application/json' }
|
16
|
+
)
|
17
|
+
|
18
|
+
response = conn.post('/v1beta/cachedContents') do |req|
|
19
|
+
req.params['key'] = ENV.fetch('GEMINI_API_KEY')
|
20
|
+
req.body = content
|
21
|
+
end
|
22
|
+
|
23
|
+
return JSON.parse(response.body) if response.status == 200
|
24
|
+
|
25
|
+
raise "Erro ao criar cache: #{response.status} - #{response.body}"
|
26
|
+
rescue Faraday::Error => e
|
27
|
+
raise "Erro na requisição: #{e.message}"
|
28
|
+
end
|
29
|
+
|
30
|
+
def self.get(name: nil, display_name: nil)
|
31
|
+
raise 'Nome do cache ou display name é obrigatório' if name.nil? && display_name.nil?
|
32
|
+
raise 'Nome do cache e display name não podem ser informados juntos' if !name.nil? && !display_name.nil?
|
33
|
+
|
34
|
+
return GeminiCache::Cache.list.find { |item| item['name'].eql? name } if !name.nil?
|
35
|
+
return GeminiCache::Cache.list.find { |item| item['displayName'].eql? display_name } if !display_name.nil?
|
36
|
+
end
|
37
|
+
|
38
|
+
def self.list
|
39
|
+
conn = Faraday.new(
|
40
|
+
url: 'https://generativelanguage.googleapis.com',
|
41
|
+
headers: { 'Content-Type' => 'application/json' }
|
42
|
+
)
|
43
|
+
|
44
|
+
response = conn.get("/v1beta/cachedContents") do |req|
|
45
|
+
req.params['key'] = ENV.fetch('GEMINI_API_KEY')
|
46
|
+
end
|
47
|
+
|
48
|
+
return [] if JSON.parse(response.body).empty?
|
49
|
+
|
50
|
+
JSON.parse(response.body)['cachedContents'].map do |item|
|
51
|
+
def item.delete = GeminiCache::Cache.delete(name: self['name'])
|
52
|
+
def item.set_ttl(ttl = 120) = GeminiCache::Cache.update(name: self['name'], content: { ttl: "#{ttl}s" })
|
53
|
+
|
54
|
+
def item.generate_content(contents:)
|
55
|
+
conn = Faraday.new(
|
56
|
+
url: 'https://generativelanguage.googleapis.com',
|
57
|
+
headers: { 'Content-Type' => 'application/json' }
|
58
|
+
) do |f|
|
59
|
+
f.options.timeout = 300 # timeout em segundos para a requisição completa
|
60
|
+
f.options.open_timeout = 300 # timeout em segundos para abrir a conexão
|
61
|
+
end
|
62
|
+
|
63
|
+
response = conn.post("/v1beta/models/#{self['model'].split('/').last}:generateContent") do |req|
|
64
|
+
req.params['key'] = ENV.fetch('GEMINI_API_KEY')
|
65
|
+
req.body = {
|
66
|
+
cached_content: self['name'],
|
67
|
+
contents:
|
68
|
+
}.to_json
|
69
|
+
end
|
70
|
+
|
71
|
+
if response.status == 200
|
72
|
+
resp = JSON.parse(response.body)
|
73
|
+
def resp.content = dig('candidates', 0, 'content', 'parts', 0, 'text')
|
74
|
+
return resp
|
75
|
+
end
|
76
|
+
|
77
|
+
raise "Erro ao gerar conteúdo: #{response.body}"
|
78
|
+
rescue Faraday::Error => e
|
79
|
+
raise "Erro na requisição: #{e.message}"
|
80
|
+
end
|
81
|
+
|
82
|
+
def item.single_prompt(prompt:) = generate_content(contents: [{ parts: [{ text: prompt }], role: 'user' }]).content
|
83
|
+
|
84
|
+
item
|
85
|
+
end
|
86
|
+
|
87
|
+
rescue Faraday::Error => e
|
88
|
+
raise "Erro na requisição: #{e.message}"
|
89
|
+
end
|
90
|
+
|
91
|
+
def self.update(name:, content:)
|
92
|
+
conn = Faraday.new(
|
93
|
+
url: 'https://generativelanguage.googleapis.com',
|
94
|
+
headers: { 'Content-Type' => 'application/json' }
|
95
|
+
)
|
96
|
+
|
97
|
+
response = conn.patch("/v1beta/#{name}") do |req|
|
98
|
+
req.params['key'] = ENV.fetch('GEMINI_API_KEY')
|
99
|
+
req.body = content.to_json
|
100
|
+
end
|
101
|
+
|
102
|
+
return JSON.parse(response.body) if response.status == 200
|
103
|
+
|
104
|
+
raise "Erro ao atualizar cache: #{response.body}"
|
105
|
+
rescue Faraday::Error => e
|
106
|
+
raise "Erro na requisição: #{e.message}"
|
107
|
+
end
|
108
|
+
|
109
|
+
def self.delete(name:)
|
110
|
+
conn = Faraday.new(
|
111
|
+
url: 'https://generativelanguage.googleapis.com',
|
112
|
+
headers: { 'Content-Type' => 'application/json' }
|
113
|
+
)
|
114
|
+
|
115
|
+
response = conn.delete("/v1beta/#{name}") do |req|
|
116
|
+
req.params['key'] = ENV.fetch('GEMINI_API_KEY')
|
117
|
+
end
|
118
|
+
|
119
|
+
return true if response.status == 200
|
120
|
+
|
121
|
+
raise "Erro ao deletar cache: #{response.body}"
|
122
|
+
rescue Faraday::Error => e
|
123
|
+
raise "Erro na requisição: #{e.message}"
|
124
|
+
end
|
125
|
+
end
|
@@ -0,0 +1,127 @@
|
|
1
|
+
require 'faraday'
|
2
|
+
require 'json'
|
3
|
+
|
4
|
+
module GeminiExt
|
5
|
+
module Cache
|
6
|
+
def self.create(contents:, display_name:, model: 'gemini-1.5-flash-8b', ttl: 600)
|
7
|
+
content = {
|
8
|
+
model: "models/#{model}",
|
9
|
+
display_name:,
|
10
|
+
contents:,
|
11
|
+
ttl: "#{ttl}s"
|
12
|
+
}.to_json
|
13
|
+
|
14
|
+
conn = Faraday.new(
|
15
|
+
url: 'https://generativelanguage.googleapis.com',
|
16
|
+
headers: { 'Content-Type' => 'application/json' }
|
17
|
+
)
|
18
|
+
|
19
|
+
response = conn.post('/v1beta/cachedContents') do |req|
|
20
|
+
req.params['key'] = ENV.fetch('GEMINI_API_KEY')
|
21
|
+
req.body = content
|
22
|
+
end
|
23
|
+
|
24
|
+
return JSON.parse(response.body) if response.status == 200
|
25
|
+
|
26
|
+
raise "Erro ao criar cache: #{response.status} - #{response.body}"
|
27
|
+
rescue Faraday::Error => e
|
28
|
+
raise "Erro na requisição: #{e.message}"
|
29
|
+
end
|
30
|
+
|
31
|
+
def self.get(name: nil, display_name: nil)
|
32
|
+
raise 'Nome do cache ou display name é obrigatório' if name.nil? && display_name.nil?
|
33
|
+
raise 'Nome do cache e display name não podem ser informados juntos' if !name.nil? && !display_name.nil?
|
34
|
+
|
35
|
+
return GeminiExt::Cache.list.find { |item| item['name'].eql? name } if !name.nil?
|
36
|
+
return GeminiExt::Cache.list.find { |item| item['displayName'].eql? display_name } if !display_name.nil?
|
37
|
+
end
|
38
|
+
|
39
|
+
def self.list
|
40
|
+
conn = Faraday.new(
|
41
|
+
url: 'https://generativelanguage.googleapis.com',
|
42
|
+
headers: { 'Content-Type' => 'application/json' }
|
43
|
+
)
|
44
|
+
|
45
|
+
response = conn.get("/v1beta/cachedContents") do |req|
|
46
|
+
req.params['key'] = ENV.fetch('GEMINI_API_KEY')
|
47
|
+
end
|
48
|
+
|
49
|
+
return [] if JSON.parse(response.body).empty?
|
50
|
+
|
51
|
+
JSON.parse(response.body)['cachedContents'].map do |item|
|
52
|
+
def item.delete = GeminiExt::Cache.delete(name: self['name'])
|
53
|
+
def item.set_ttl(ttl = 120) = GeminiExt::Cache.update(name: self['name'], content: { ttl: "#{ttl}s" })
|
54
|
+
|
55
|
+
def item.generate_content(contents:)
|
56
|
+
conn = Faraday.new(
|
57
|
+
url: 'https://generativelanguage.googleapis.com',
|
58
|
+
headers: { 'Content-Type' => 'application/json' }
|
59
|
+
) do |f|
|
60
|
+
f.options.timeout = 300 # timeout em segundos para a requisição completa
|
61
|
+
f.options.open_timeout = 300 # timeout em segundos para abrir a conexão
|
62
|
+
end
|
63
|
+
|
64
|
+
response = conn.post("/v1beta/models/#{self['model'].split('/').last}:generateContent") do |req|
|
65
|
+
req.params['key'] = ENV.fetch('GEMINI_API_KEY')
|
66
|
+
req.body = {
|
67
|
+
cached_content: self['name'],
|
68
|
+
contents:
|
69
|
+
}.to_json
|
70
|
+
end
|
71
|
+
|
72
|
+
if response.status == 200
|
73
|
+
resp = JSON.parse(response.body)
|
74
|
+
def resp.content = dig('candidates', 0, 'content', 'parts', 0, 'text')
|
75
|
+
return resp
|
76
|
+
end
|
77
|
+
|
78
|
+
raise "Erro ao gerar conteúdo: #{response.body}"
|
79
|
+
rescue Faraday::Error => e
|
80
|
+
raise "Erro na requisição: #{e.message}"
|
81
|
+
end
|
82
|
+
|
83
|
+
def item.single_prompt(prompt: ) = generate_content(contents: [{ parts: [{ text: prompt }], role: 'user' }])
|
84
|
+
|
85
|
+
item
|
86
|
+
end
|
87
|
+
|
88
|
+
rescue Faraday::Error => e
|
89
|
+
raise "Erro na requisição: #{e.message}"
|
90
|
+
end
|
91
|
+
|
92
|
+
def self.update(name:, content:)
|
93
|
+
conn = Faraday.new(
|
94
|
+
url: 'https://generativelanguage.googleapis.com',
|
95
|
+
headers: { 'Content-Type' => 'application/json' }
|
96
|
+
)
|
97
|
+
|
98
|
+
response = conn.patch("/v1beta/#{name}") do |req|
|
99
|
+
req.params['key'] = ENV.fetch('GEMINI_API_KEY')
|
100
|
+
req.body = content.to_json
|
101
|
+
end
|
102
|
+
|
103
|
+
return JSON.parse(response.body) if response.status == 200
|
104
|
+
|
105
|
+
raise "Erro ao atualizar cache: #{response.body}"
|
106
|
+
rescue Faraday::Error => e
|
107
|
+
raise "Erro na requisição: #{e.message}"
|
108
|
+
end
|
109
|
+
|
110
|
+
def self.delete(name:)
|
111
|
+
conn = Faraday.new(
|
112
|
+
url: 'https://generativelanguage.googleapis.com',
|
113
|
+
headers: { 'Content-Type' => 'application/json' }
|
114
|
+
)
|
115
|
+
|
116
|
+
response = conn.delete("/v1beta/#{name}") do |req|
|
117
|
+
req.params['key'] = ENV.fetch('GEMINI_API_KEY')
|
118
|
+
end
|
119
|
+
|
120
|
+
return true if response.status == 200
|
121
|
+
|
122
|
+
raise "Erro ao deletar cache: #{response.body}"
|
123
|
+
rescue Faraday::Error => e
|
124
|
+
raise "Erro na requisição: #{e.message}"
|
125
|
+
end
|
126
|
+
end
|
127
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
module geminiext
|
2
|
+
class Messages < Array
|
3
|
+
def initialize messages = nil
|
4
|
+
super parse_messages(messages)
|
5
|
+
end
|
6
|
+
|
7
|
+
def add(message) = concat(parse_messages(message))
|
8
|
+
|
9
|
+
private
|
10
|
+
def parse_messages(messages)
|
11
|
+
return [] if messages.nil?
|
12
|
+
|
13
|
+
messages = [messages] unless messages.is_a?(Array)
|
14
|
+
|
15
|
+
# if first element is ok, then do not parse the rest
|
16
|
+
return messages if messages.first in { role: String | Symbol, content: String | Array | Hash}
|
17
|
+
|
18
|
+
messages.flat_map do |msg|
|
19
|
+
if msg.is_a?(Hash)
|
20
|
+
if msg.keys.size == 1
|
21
|
+
role, content = msg.first
|
22
|
+
{ role: role.to_s, content: content }
|
23
|
+
elsif msg.key?(:role) && msg.key?(:content)
|
24
|
+
{ role: msg[:role].to_s, content: msg[:content] }
|
25
|
+
else
|
26
|
+
msg.map { |role, content| { role: role.to_s, content: content } }
|
27
|
+
end
|
28
|
+
else
|
29
|
+
raise ArgumentError, "Invalid message format: #{msg}"
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
module geminiext
|
2
|
+
module Model
|
3
|
+
GPT_BASIC_MODEL = ENV.fetch('OPENAI_GPT_BASIC_MODEL', 'gpt-4o-mini')
|
4
|
+
GPT_ADVANCED_MODEL = ENV.fetch('OPENAI_GPT_ADVANCED_MODEL', 'gpt-4o')
|
5
|
+
GPT_ADVANCED_MODEL_LATEST = ENV.fetch('OPENAI_GPT_ADVANCED_MODEL_LATEST', 'chatgpt-4o-latest')
|
6
|
+
|
7
|
+
O1_BASIC_MODEL = ENV.fetch('OPENAI_O1_BASIC_MODEL', 'o1-mini')
|
8
|
+
O1_ADVANCED_MODEL = ENV.fetch('OPENAI_O1_ADVANCED_MODEL', 'o1-preview')
|
9
|
+
|
10
|
+
def self.select(model)
|
11
|
+
case model
|
12
|
+
when :gpt_basic
|
13
|
+
GPT_BASIC_MODEL
|
14
|
+
when :gpt_advanced
|
15
|
+
GPT_ADVANCED_MODEL
|
16
|
+
when :gpt_advanced_latest
|
17
|
+
GPT_ADVANCED_MODEL_LATEST
|
18
|
+
when :o1_basic
|
19
|
+
O1_BASIC_MODEL
|
20
|
+
when :o1_advanced
|
21
|
+
O1_ADVANCED_MODEL
|
22
|
+
else
|
23
|
+
model
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
module ResponseExtender
|
2
|
+
def chat_params = self[:chat_params]
|
3
|
+
|
4
|
+
def message = dig('choices', 0, 'message')
|
5
|
+
|
6
|
+
def content = dig('choices', 0, 'message', 'content')
|
7
|
+
def content? = !content.nil?
|
8
|
+
|
9
|
+
def tool_calls = dig('choices', 0, 'message', 'tool_calls')
|
10
|
+
def tool_calls? = !tool_calls.nil?
|
11
|
+
|
12
|
+
def functions
|
13
|
+
return if tool_calls.nil?
|
14
|
+
|
15
|
+
functions = tool_calls.filter { |tool| tool['type'].eql? 'function' }
|
16
|
+
return if functions.empty?
|
17
|
+
|
18
|
+
functions_list = []
|
19
|
+
functions.map.with_index do |function, function_index|
|
20
|
+
function_info = tool_calls.dig(function_index, 'function')
|
21
|
+
function_def = { id: function['id'], name: function_info['name'], arguments: Oj.load(function_info['arguments'], symbol_keys: true) }
|
22
|
+
|
23
|
+
def function_def.run(context:)
|
24
|
+
{
|
25
|
+
tool_call_id: self[:id],
|
26
|
+
role: :tool,
|
27
|
+
name: self[:name],
|
28
|
+
content: context.send(self[:name], **self[:arguments])
|
29
|
+
}
|
30
|
+
end
|
31
|
+
|
32
|
+
functions_list << function_def
|
33
|
+
end
|
34
|
+
|
35
|
+
functions_list
|
36
|
+
end
|
37
|
+
|
38
|
+
def functions_run_all(context:)
|
39
|
+
raise 'No functions to run' if functions.nil?
|
40
|
+
functions.map { |function| function.run(context:) }
|
41
|
+
end
|
42
|
+
|
43
|
+
def functions? = !functions.nil?
|
44
|
+
end
|
metadata
ADDED
@@ -0,0 +1,61 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: gemini_cache
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.0.1
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Gedean Dias
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2024-11-10 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: faraday
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '2'
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '2'
|
27
|
+
description: Ruby's Gemini Context Caching wrapper
|
28
|
+
email: gedean.dias@gmail.com
|
29
|
+
executables: []
|
30
|
+
extensions: []
|
31
|
+
extra_rdoc_files: []
|
32
|
+
files:
|
33
|
+
- lib/gemini_cache.rb
|
34
|
+
- lib/geminiext/cache.rb
|
35
|
+
- lib/geminiext/messages.rb.disabled
|
36
|
+
- lib/geminiext/model.rb.disabled
|
37
|
+
- lib/geminiext/response_extender.rb.disabled
|
38
|
+
homepage: https://github.com/gedean/gemini_cache
|
39
|
+
licenses:
|
40
|
+
- MIT
|
41
|
+
metadata: {}
|
42
|
+
post_install_message:
|
43
|
+
rdoc_options: []
|
44
|
+
require_paths:
|
45
|
+
- lib
|
46
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
47
|
+
requirements:
|
48
|
+
- - ">="
|
49
|
+
- !ruby/object:Gem::Version
|
50
|
+
version: '3'
|
51
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
52
|
+
requirements:
|
53
|
+
- - ">="
|
54
|
+
- !ruby/object:Gem::Version
|
55
|
+
version: '0'
|
56
|
+
requirements: []
|
57
|
+
rubygems_version: 3.5.23
|
58
|
+
signing_key:
|
59
|
+
specification_version: 4
|
60
|
+
summary: Ruby Gemini Context Caching
|
61
|
+
test_files: []
|