gen-ai 0.4.1 → 0.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.rubocop.yml +2 -0
- data/gen-ai.gemspec +2 -1
- data/lib/gen_ai/api/format/gemini.rb +18 -1
- data/lib/gen_ai/api/format/open_ai.rb +25 -0
- data/lib/gen_ai/base.rb +6 -0
- data/lib/gen_ai/chat/base.rb +2 -2
- data/lib/gen_ai/chat.rb +1 -1
- data/lib/gen_ai/core/chunk.rb +15 -0
- data/lib/gen_ai/{result.rb → core/result.rb} +3 -4
- data/lib/gen_ai/image/stability_ai.rb +0 -18
- data/lib/gen_ai/language/gemini.rb +35 -9
- data/lib/gen_ai/language/open_ai.rb +44 -12
- data/lib/gen_ai/version.rb +1 -1
- data/lib/gen_ai.rb +1 -0
- metadata +20 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: fe2c9cb2e87385fea4fb417b62fbfeaaf4c825367395bb2b06b1deeddf3defec
|
4
|
+
data.tar.gz: 8c452c2767c8c2ae5a166b26329ba23b7190efa71f19a5d055e66110d2511cde
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 953848790eb404df5294ff6e629643d6735ca6c2506b0129d667f12b89e56a3c5b838fa1cd9bb7257f779c8c5e047ed667702c533d75b301625dcc53434a7632
|
7
|
+
data.tar.gz: 3f6c563be51280f1d539ab72ad65b3f5732180423ae86c726fd91e4111103043a9812fca91cd242bebabdf7839640af8a3878b6d97538e4c797b39bf3733e7ff
|
data/.rubocop.yml
CHANGED
data/gen-ai.gemspec
CHANGED
@@ -38,8 +38,9 @@ Gem::Specification.new do |spec|
|
|
38
38
|
spec.add_dependency 'faraday-multipart', '~> 1.0'
|
39
39
|
spec.add_dependency 'zeitwerk', '~> 2.6'
|
40
40
|
|
41
|
+
spec.add_development_dependency 'gemini-ai', '~> 3.2'
|
41
42
|
spec.add_development_dependency 'google_palm_api', '~> 0.1'
|
42
|
-
spec.add_development_dependency 'ruby-openai', '~>
|
43
|
+
spec.add_development_dependency 'ruby-openai', '~> 6.0'
|
43
44
|
# For more information and examples about making a new gem, check out our
|
44
45
|
# guide at: https://bundler.io/guides/creating_gem.html
|
45
46
|
spec.metadata['rubygems_mfa_required'] = 'true'
|
@@ -20,11 +20,28 @@ module GenAI
|
|
20
20
|
end
|
21
21
|
|
22
22
|
def extract_completions(response)
|
23
|
-
response
|
23
|
+
if response.is_a?(Array)
|
24
|
+
response.map { |completion| extract_candidates(completion) }
|
25
|
+
else
|
26
|
+
extract_candidates(response)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def chunk_params_from_streaming(chunk)
|
31
|
+
{
|
32
|
+
model: 'gemini-pro',
|
33
|
+
index: chunk.dig('candidates', 0, 'index'),
|
34
|
+
value: chunk.dig('candidates', 0, 'content', 'parts', 0, 'text'),
|
35
|
+
raw: chunk
|
36
|
+
}
|
24
37
|
end
|
25
38
|
|
26
39
|
private
|
27
40
|
|
41
|
+
def extract_candidates(candidates)
|
42
|
+
candidates['candidates'].map { |candidate| candidate.dig('content', 'parts', 0, 'text') }
|
43
|
+
end
|
44
|
+
|
28
45
|
def role_for(message)
|
29
46
|
message[:role] == 'user' ? USER_ROLE : ASSISTANT_ROLE
|
30
47
|
end
|
@@ -11,6 +11,31 @@ module GenAI
|
|
11
11
|
def extract_completions(response)
|
12
12
|
response['choices'].map { |choice| choice.dig('message', 'content') }
|
13
13
|
end
|
14
|
+
|
15
|
+
def chunk_params_from_streaming(chunk)
|
16
|
+
{
|
17
|
+
model: chunk['model'],
|
18
|
+
index: chunk.dig('choices', 0, 'index'),
|
19
|
+
value: chunk.dig('choices', 0, 'delta', 'content'),
|
20
|
+
raw: chunk
|
21
|
+
}
|
22
|
+
end
|
23
|
+
|
24
|
+
def build_raw_response(chunks)
|
25
|
+
{ 'choices' => build_raw_choices(chunks),
|
26
|
+
'usage' => { 'completion_tokens' => chunks.values.map(&:size).sum } }
|
27
|
+
end
|
28
|
+
|
29
|
+
def build_raw_choices(chunks)
|
30
|
+
chunks.map do |key, values|
|
31
|
+
{
|
32
|
+
'index' => key,
|
33
|
+
'logprobs' => nil,
|
34
|
+
'finish_reason' => 'stop',
|
35
|
+
'message' => { 'role' => 'asssistant', 'content' => values.map(&:value).join }
|
36
|
+
}
|
37
|
+
end
|
38
|
+
end
|
14
39
|
end
|
15
40
|
end
|
16
41
|
end
|
data/lib/gen_ai/base.rb
CHANGED
@@ -15,6 +15,8 @@ module GenAI
|
|
15
15
|
raise GenAI::ApiError, "#{api_provider_name} API error: #{response.dig('error', 'message')}" if response['error']
|
16
16
|
|
17
17
|
response
|
18
|
+
rescue Faraday::BadRequestError => e
|
19
|
+
raise GenAI::ApiError, "#{api_provider_name} API error: #{e.response[:body].dig('error', 'message')}"
|
18
20
|
end
|
19
21
|
|
20
22
|
def provider_name
|
@@ -28,5 +30,9 @@ module GenAI
|
|
28
30
|
def build_result(model:, raw:, parsed:)
|
29
31
|
GenAI::Result.new(provider: provider_name.to_sym, model: model, raw: raw, values: parsed)
|
30
32
|
end
|
33
|
+
|
34
|
+
def build_chunk(options)
|
35
|
+
GenAI::Chunk.new(provider: provider_name.to_sym, **options)
|
36
|
+
end
|
31
37
|
end
|
32
38
|
end
|
data/lib/gen_ai/chat/base.rb
CHANGED
@@ -18,14 +18,14 @@ module GenAI
|
|
18
18
|
@history = build_history(history.map(&:deep_symbolize_keys), context, examples.map(&:deep_symbolize_keys))
|
19
19
|
end
|
20
20
|
|
21
|
-
def message(message, options = {})
|
21
|
+
def message(message, options = {}, &block)
|
22
22
|
if @history.size == 1 && @history.first[:role] != 'system'
|
23
23
|
append_to_message(message)
|
24
24
|
else
|
25
25
|
append_to_history({ role: USER_ROLE, content: message })
|
26
26
|
end
|
27
27
|
|
28
|
-
response = @model.chat(@history.dup, default_options.merge(options).compact)
|
28
|
+
response = @model.chat(@history.dup, default_options.merge(options).compact, &block)
|
29
29
|
append_to_history({ role: ASSISTANT_ROLE, content: response.value })
|
30
30
|
response
|
31
31
|
end
|
data/lib/gen_ai/chat.rb
CHANGED
@@ -0,0 +1,15 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module GenAI
|
4
|
+
class Chunk
|
5
|
+
attr_reader :raw, :provider, :model, :value, :index
|
6
|
+
|
7
|
+
def initialize(provider:, model:, index:, raw:, value:)
|
8
|
+
@raw = raw
|
9
|
+
@index = index
|
10
|
+
@provider = provider
|
11
|
+
@model = model
|
12
|
+
@value = value
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -27,10 +27,9 @@ module GenAI
|
|
27
27
|
end
|
28
28
|
|
29
29
|
def completion_tokens
|
30
|
-
if usage['completion_tokens']
|
31
|
-
|
32
|
-
|
33
|
-
end
|
30
|
+
return usage['completion_tokens'] if usage['completion_tokens']
|
31
|
+
|
32
|
+
total_tokens.to_i - prompt_tokens.to_i if total_tokens && prompt_tokens
|
34
33
|
end
|
35
34
|
|
36
35
|
def total_tokens
|
@@ -27,24 +27,6 @@ module GenAI
|
|
27
27
|
)
|
28
28
|
end
|
29
29
|
|
30
|
-
def variations(image, options = {})
|
31
|
-
model = options[:model] || DEFAULT_MODEL
|
32
|
-
url = "/v1/generation/stable-diffusion-xl-1024-v1-0/image-to-image"
|
33
|
-
|
34
|
-
response = client.post url, {
|
35
|
-
init_image: File.binread(image),
|
36
|
-
image_strength: 0.3,
|
37
|
-
'text_prompts[0][text]' => "Portrait of old Jim Morrison at age of 60, handsome, color, phot-realistic, 50mm, crisp, high-res, depth of field, composition",
|
38
|
-
'text_prompts[0][weight]' => 0.7
|
39
|
-
}, multipart: true
|
40
|
-
|
41
|
-
build_result(
|
42
|
-
raw: response,
|
43
|
-
model: model,
|
44
|
-
parsed: parse_response_data(response['artifacts'])
|
45
|
-
)
|
46
|
-
end
|
47
|
-
|
48
30
|
def edit(image, prompt, options = {})
|
49
31
|
model = options[:model] || DEFAULT_MODEL
|
50
32
|
url = "/v1/generation/#{model}/image-to-image"
|
@@ -1,34 +1,60 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require 'faraday'
|
4
|
+
require 'pry'
|
4
5
|
|
5
6
|
module GenAI
|
6
7
|
class Language
|
7
8
|
class Gemini < Base
|
8
9
|
include GenAI::Api::Format::Gemini
|
9
10
|
|
10
|
-
|
11
|
+
COMPLETION_MODEL = 'gemini-pro'
|
11
12
|
|
12
13
|
def initialize(token:, options: {})
|
13
|
-
|
14
|
-
|
14
|
+
depends_on 'gemini-ai'
|
15
|
+
|
16
|
+
@client = ::Gemini.new(
|
17
|
+
credentials: {
|
18
|
+
service: 'generative-language-api',
|
19
|
+
api_key: token
|
20
|
+
},
|
21
|
+
options: { model: model(options) }
|
22
|
+
)
|
15
23
|
end
|
16
24
|
|
17
25
|
def complete(prompt, options = {}); end
|
18
26
|
|
19
|
-
def chat(messages, options = {})
|
20
|
-
|
27
|
+
def chat(messages, options = {}, &block)
|
28
|
+
if block_given?
|
29
|
+
response = @client.stream_generate_content(
|
30
|
+
generate_options(messages, options), server_sent_events: true, &chunk_process_block(block)
|
31
|
+
)
|
32
|
+
build_result(model: model(options), raw: response.first, parsed: extract_completions(response).flatten)
|
33
|
+
else
|
34
|
+
response = @client.generate_content(generate_options(messages, options))
|
35
|
+
build_result(model: model(options), raw: response, parsed: extract_completions(response))
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
private
|
40
|
+
|
41
|
+
def generate_options(messages, options)
|
42
|
+
{
|
21
43
|
contents: format_messages(messages),
|
22
44
|
generationConfig: options.except(:model)
|
23
45
|
}
|
46
|
+
end
|
24
47
|
|
25
|
-
|
48
|
+
def model(options)
|
49
|
+
options[:model] || COMPLETION_MODEL
|
26
50
|
end
|
27
51
|
|
28
|
-
|
52
|
+
def chunk_process_block(block)
|
53
|
+
proc do |data|
|
54
|
+
chunk = build_chunk(chunk_params_from_streaming(data))
|
29
55
|
|
30
|
-
|
31
|
-
|
56
|
+
block.call chunk
|
57
|
+
end
|
32
58
|
end
|
33
59
|
end
|
34
60
|
end
|
@@ -23,31 +23,63 @@ module GenAI
|
|
23
23
|
end
|
24
24
|
|
25
25
|
def complete(prompt, options = {})
|
26
|
-
|
26
|
+
chat_request build_completion_options(prompt, options)
|
27
|
+
end
|
27
28
|
|
28
|
-
|
29
|
+
def chat(messages, options = {}, &block)
|
30
|
+
parameters = build_chat_options(messages, options)
|
29
31
|
|
30
|
-
|
32
|
+
block_given? ? chat_streaming_request(parameters, block) : chat_request(parameters)
|
33
|
+
end
|
34
|
+
|
35
|
+
private
|
36
|
+
|
37
|
+
def build_chat_options(messages, options)
|
38
|
+
build_options(messages.map(&:deep_symbolize_keys), options)
|
39
|
+
end
|
40
|
+
|
41
|
+
def build_completion_options(prompt, options)
|
42
|
+
build_options([{ role: DEFAULT_ROLE, content: prompt }], options)
|
31
43
|
end
|
32
44
|
|
33
|
-
def
|
34
|
-
|
35
|
-
messages: messages
|
45
|
+
def build_options(messages, options)
|
46
|
+
{
|
47
|
+
messages: messages,
|
36
48
|
model: options.delete(:model) || COMPLETION_MODEL
|
37
49
|
}.merge(options)
|
50
|
+
end
|
38
51
|
|
52
|
+
def chat_request(parameters)
|
39
53
|
response = handle_errors { client.chat(parameters: parameters) }
|
40
54
|
|
41
55
|
build_result(model: parameters[:model], raw: response, parsed: extract_completions(response))
|
42
56
|
end
|
43
57
|
|
44
|
-
|
58
|
+
def chat_streaming_request(parameters, block)
|
59
|
+
chunks = {}
|
45
60
|
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
61
|
+
parameters[:stream] = chunk_process_block(chunks, block)
|
62
|
+
|
63
|
+
client.chat(parameters: parameters)
|
64
|
+
|
65
|
+
build_result(
|
66
|
+
model: parameters[:model],
|
67
|
+
parsed: chunks.values.map { |group| group.map(&:value).join },
|
68
|
+
raw: build_raw_response(chunks)
|
69
|
+
)
|
70
|
+
end
|
71
|
+
|
72
|
+
def chunk_process_block(chunks, block)
|
73
|
+
proc do |data|
|
74
|
+
chunk = build_chunk(chunk_params_from_streaming(data))
|
75
|
+
|
76
|
+
unless chunk.value.nil? || chunk.value.empty?
|
77
|
+
block.call chunk
|
78
|
+
|
79
|
+
chunks[chunk.index] = [] unless chunks[chunk.index]
|
80
|
+
chunks[chunk.index] << chunk
|
81
|
+
end
|
82
|
+
end
|
51
83
|
end
|
52
84
|
end
|
53
85
|
end
|
data/lib/gen_ai/version.rb
CHANGED
data/lib/gen_ai.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: gen-ai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.4.
|
4
|
+
version: 0.4.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alex Chaplinsky
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-
|
11
|
+
date: 2024-02-29 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: activesupport
|
@@ -66,6 +66,20 @@ dependencies:
|
|
66
66
|
- - "~>"
|
67
67
|
- !ruby/object:Gem::Version
|
68
68
|
version: '2.6'
|
69
|
+
- !ruby/object:Gem::Dependency
|
70
|
+
name: gemini-ai
|
71
|
+
requirement: !ruby/object:Gem::Requirement
|
72
|
+
requirements:
|
73
|
+
- - "~>"
|
74
|
+
- !ruby/object:Gem::Version
|
75
|
+
version: '3.2'
|
76
|
+
type: :development
|
77
|
+
prerelease: false
|
78
|
+
version_requirements: !ruby/object:Gem::Requirement
|
79
|
+
requirements:
|
80
|
+
- - "~>"
|
81
|
+
- !ruby/object:Gem::Version
|
82
|
+
version: '3.2'
|
69
83
|
- !ruby/object:Gem::Dependency
|
70
84
|
name: google_palm_api
|
71
85
|
requirement: !ruby/object:Gem::Requirement
|
@@ -86,14 +100,14 @@ dependencies:
|
|
86
100
|
requirements:
|
87
101
|
- - "~>"
|
88
102
|
- !ruby/object:Gem::Version
|
89
|
-
version: '
|
103
|
+
version: '6.0'
|
90
104
|
type: :development
|
91
105
|
prerelease: false
|
92
106
|
version_requirements: !ruby/object:Gem::Requirement
|
93
107
|
requirements:
|
94
108
|
- - "~>"
|
95
109
|
- !ruby/object:Gem::Version
|
96
|
-
version: '
|
110
|
+
version: '6.0'
|
97
111
|
description: GenAI allows you to easily integrate Generative AI model providers like
|
98
112
|
OpenAI, Google Vertex AI, Stability AI, etc
|
99
113
|
email:
|
@@ -123,6 +137,8 @@ files:
|
|
123
137
|
- lib/gen_ai/chat/gemini.rb
|
124
138
|
- lib/gen_ai/chat/google_palm.rb
|
125
139
|
- lib/gen_ai/chat/open_ai.rb
|
140
|
+
- lib/gen_ai/core/chunk.rb
|
141
|
+
- lib/gen_ai/core/result.rb
|
126
142
|
- lib/gen_ai/dependency.rb
|
127
143
|
- lib/gen_ai/image.rb
|
128
144
|
- lib/gen_ai/image/base.rb
|
@@ -134,7 +150,6 @@ files:
|
|
134
150
|
- lib/gen_ai/language/gemini.rb
|
135
151
|
- lib/gen_ai/language/google_palm.rb
|
136
152
|
- lib/gen_ai/language/open_ai.rb
|
137
|
-
- lib/gen_ai/result.rb
|
138
153
|
- lib/gen_ai/version.rb
|
139
154
|
- sig/gen_ai.rbs
|
140
155
|
homepage: https://github.com/alchaplinsky/gen-ai
|