gen-ai 0.3.2 → 0.4.0.alpha.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/gen-ai.gemspec +1 -0
- data/lib/gen_ai/api/format/gemini.rb +27 -0
- data/lib/gen_ai/api/format/open_ai.rb +17 -0
- data/lib/gen_ai/chat/base.rb +37 -0
- data/lib/gen_ai/chat/gemini.rb +32 -0
- data/lib/gen_ai/chat/google_palm.rb +29 -0
- data/lib/gen_ai/chat/open_ai.rb +31 -0
- data/lib/gen_ai/chat.rb +25 -0
- data/lib/gen_ai/image.rb +4 -18
- data/lib/gen_ai/language/gemini.rb +35 -0
- data/lib/gen_ai/language/google_palm.rb +2 -2
- data/lib/gen_ai/language/open_ai.rb +9 -27
- data/lib/gen_ai/language.rb +6 -26
- data/lib/gen_ai/version.rb +1 -1
- data/lib/gen_ai.rb +1 -0
- metadata +26 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 8e1ff584afa06668b219e7131622a406e84a7c608508fdc85d980358cd1b30dd
|
4
|
+
data.tar.gz: 84e22dad10312b99f01fca4b059beceac94af7203a51d2fe31d72b3f3985e7c4
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 286c3cd97b77d2ca53da59a54cf1b88499e5d547069fadab29fbb7eb19651962a044945ae9ecef4f88dcd49eef179ccb5a905b360a127fd7f1078eace6d215a6
|
7
|
+
data.tar.gz: ba16f0cc317b209fc1b17f8fcf04b57eaa60cc0247cd6565802a3d77af56ac795ae3121ed8fd89ca70a2cfc0d99e9afda3aa2778a362df0e27e4b2fb385984e3
|
data/gen-ai.gemspec
CHANGED
@@ -33,6 +33,7 @@ Gem::Specification.new do |spec|
|
|
33
33
|
spec.require_paths = ['lib']
|
34
34
|
|
35
35
|
# Uncomment to register a new dependency of your gem
|
36
|
+
spec.add_dependency 'activesupport', '~> 7.1'
|
36
37
|
spec.add_dependency 'faraday', '~> 2.7'
|
37
38
|
spec.add_dependency 'faraday-multipart', '~> 1.0'
|
38
39
|
spec.add_dependency 'zeitwerk', '~> 2.6'
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module GenAI
|
4
|
+
module Api
|
5
|
+
module Format
|
6
|
+
module Gemini
|
7
|
+
def format_messages(messages)
|
8
|
+
messages.map { |message| transform_message(message) }
|
9
|
+
end
|
10
|
+
|
11
|
+
def transform_message(message)
|
12
|
+
{ role: role_for(message), parts: [text: message[:content]] }
|
13
|
+
end
|
14
|
+
|
15
|
+
def extract_completions(response)
|
16
|
+
response['candidates'].map { |candidate| candidate.dig('content', 'parts', 0, 'text') }
|
17
|
+
end
|
18
|
+
|
19
|
+
private
|
20
|
+
|
21
|
+
def role_for(message)
|
22
|
+
message[:role] == 'user' ? self.class::USER_ROLE : self.class::ASSISTANT_ROLE
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module GenAI
|
4
|
+
module Api
|
5
|
+
module Format
|
6
|
+
module OpenAI
|
7
|
+
def extract_embeddings(response)
|
8
|
+
response['data'].map { |datum| datum['embedding'] }
|
9
|
+
end
|
10
|
+
|
11
|
+
def extract_completions(response)
|
12
|
+
response['choices'].map { |choice| choice.dig('message', 'content') }
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module GenAI
|
4
|
+
class Chat
|
5
|
+
class Base < GenAI::Base
|
6
|
+
USER_ROLE = 'user'
|
7
|
+
ASSISTANT_ROLE = 'assistant'
|
8
|
+
|
9
|
+
def initialize(provider:, token:, options: {})
|
10
|
+
@history = []
|
11
|
+
@model = GenAI::Language.new(provider, token, options: options)
|
12
|
+
end
|
13
|
+
|
14
|
+
def start(history: [], context: nil, examples: [])
|
15
|
+
@history = build_history(history.map(&:deep_symbolize_keys!), context, examples.map(&:deep_symbolize_keys!))
|
16
|
+
end
|
17
|
+
|
18
|
+
def message(message, options = {})
|
19
|
+
if @history.size == 1
|
20
|
+
append_to_message(message)
|
21
|
+
else
|
22
|
+
append_to_history({ role: USER_ROLE, content: message })
|
23
|
+
end
|
24
|
+
|
25
|
+
response = @model.chat(@history.dup, options)
|
26
|
+
append_to_history({ role: ASSISTANT_ROLE, content: response.value })
|
27
|
+
response
|
28
|
+
end
|
29
|
+
|
30
|
+
private
|
31
|
+
|
32
|
+
def append_to_history(message)
|
33
|
+
@history << transform_message(message)
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
@@ -0,0 +1,32 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module GenAI
|
4
|
+
class Chat
|
5
|
+
class Gemini < Base
|
6
|
+
include GenAI::Api::Format::Gemini
|
7
|
+
|
8
|
+
USER_ROLE = 'user'
|
9
|
+
ASSISTANT_ROLE = 'model'
|
10
|
+
|
11
|
+
private
|
12
|
+
|
13
|
+
def build_history(messages, context, examples)
|
14
|
+
history = format_messages(messages.drop(1))
|
15
|
+
history.prepend({ role: USER_ROLE, parts: [{text: build_first_message(context, examples, messages.first)}] })
|
16
|
+
history
|
17
|
+
end
|
18
|
+
|
19
|
+
def build_first_message(context, examples, message)
|
20
|
+
chunks = []
|
21
|
+
chunks << context if context
|
22
|
+
chunks << examples.map { |example| "#{example[:role]}: #{example[:content]}" }.join("\n") if examples.any?
|
23
|
+
chunks << message[:content] if message
|
24
|
+
chunks.join("\n")
|
25
|
+
end
|
26
|
+
|
27
|
+
def append_to_message(message)
|
28
|
+
@history.last[:parts][0][:text] << "\n" << message
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module GenAI
|
4
|
+
class Chat
|
5
|
+
class GooglePalm < Base
|
6
|
+
private
|
7
|
+
|
8
|
+
def build_history(messages, context, examples)
|
9
|
+
history = []
|
10
|
+
history << { role: SYSTEM_ROLE, content: context } if context
|
11
|
+
history.concat(examples)
|
12
|
+
history.concat(messages)
|
13
|
+
history
|
14
|
+
end
|
15
|
+
|
16
|
+
def role(message)
|
17
|
+
message[:role]
|
18
|
+
end
|
19
|
+
|
20
|
+
def transform_message(message)
|
21
|
+
message
|
22
|
+
end
|
23
|
+
|
24
|
+
def append_to_message(message)
|
25
|
+
@history.last[:content] = "#{@history.last[:content]}\n#{message}"
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module GenAI
|
4
|
+
class Chat
|
5
|
+
class OpenAI < Base
|
6
|
+
SYSTEM_ROLE = 'system'
|
7
|
+
|
8
|
+
private
|
9
|
+
|
10
|
+
def build_history(messages, context, examples)
|
11
|
+
history = []
|
12
|
+
history << { role: SYSTEM_ROLE, content: context } if context
|
13
|
+
history.concat(examples)
|
14
|
+
history.concat(messages)
|
15
|
+
history
|
16
|
+
end
|
17
|
+
|
18
|
+
def role(message)
|
19
|
+
message[:role]
|
20
|
+
end
|
21
|
+
|
22
|
+
def transform_message(message)
|
23
|
+
message
|
24
|
+
end
|
25
|
+
|
26
|
+
def append_to_message(message)
|
27
|
+
@history.last[:content] = "#{@history.last[:content]}\n#{message}"
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
data/lib/gen_ai/chat.rb
ADDED
@@ -0,0 +1,25 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module GenAI
|
4
|
+
class Chat
|
5
|
+
extend Forwardable
|
6
|
+
|
7
|
+
def_delegators :@chat, :start, :message
|
8
|
+
|
9
|
+
def initialize(provider, token, options: {})
|
10
|
+
build_chat(provider, token, options)
|
11
|
+
end
|
12
|
+
|
13
|
+
private
|
14
|
+
|
15
|
+
def build_chat(provider, token, options)
|
16
|
+
klass = GenAI::Chat.constants.find do |const|
|
17
|
+
const.to_s.downcase == provider.to_s.downcase.gsub('_', '')
|
18
|
+
end
|
19
|
+
|
20
|
+
raise UnsupportedProvider, "Unsupported Chat provider '#{provider}'" unless klass
|
21
|
+
|
22
|
+
@chat = GenAI::Chat.const_get(klass).new(provider: provider, token: token, options: options)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
data/lib/gen_ai/image.rb
CHANGED
@@ -2,30 +2,16 @@
|
|
2
2
|
|
3
3
|
module GenAI
|
4
4
|
class Image
|
5
|
-
|
6
|
-
build_client(provider, token, options)
|
7
|
-
end
|
5
|
+
extend Forwardable
|
8
6
|
|
9
|
-
|
10
|
-
client.generate(prompt, options)
|
11
|
-
end
|
12
|
-
|
13
|
-
def variations(image, options = {})
|
14
|
-
client.variations(image, options)
|
15
|
-
end
|
7
|
+
def_delegators :@client, :generate, :variations, :edit, :upscale
|
16
8
|
|
17
|
-
def
|
18
|
-
|
19
|
-
end
|
20
|
-
|
21
|
-
def upscale(image, options = {})
|
22
|
-
client.upscale(image, options)
|
9
|
+
def initialize(provider, token, options: {})
|
10
|
+
build_client(provider, token, options)
|
23
11
|
end
|
24
12
|
|
25
13
|
private
|
26
14
|
|
27
|
-
attr_reader :client
|
28
|
-
|
29
15
|
def build_client(provider, token, options)
|
30
16
|
klass = GenAI::Image.constants.find do |const|
|
31
17
|
const.to_s.downcase == provider.to_s.downcase.gsub('_', '')
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'faraday'
|
4
|
+
|
5
|
+
module GenAI
|
6
|
+
class Language
|
7
|
+
class Gemini < Base
|
8
|
+
include GenAI::Api::Format::Gemini
|
9
|
+
|
10
|
+
BASE_API_URL = 'https://generativelanguage.googleapis.com'
|
11
|
+
|
12
|
+
def initialize(token:, options: {})
|
13
|
+
@token = token
|
14
|
+
build_client(token)
|
15
|
+
end
|
16
|
+
|
17
|
+
def complete(prompt, options = {}); end
|
18
|
+
|
19
|
+
def chat(messages, options = {})
|
20
|
+
response = client.post "/v1beta/models/gemini-pro:generateContent?key=#{@token}", {
|
21
|
+
contents: messages.map(&:deep_symbolize_keys!),
|
22
|
+
generationConfig: options
|
23
|
+
}
|
24
|
+
|
25
|
+
build_result(model: 'gemini-pro', raw: response, parsed: extract_completions(response))
|
26
|
+
end
|
27
|
+
|
28
|
+
private
|
29
|
+
|
30
|
+
def build_client(token)
|
31
|
+
@client = GenAI::Api::Client.new(url: BASE_API_URL, token: nil)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
@@ -26,7 +26,7 @@ module GenAI
|
|
26
26
|
)
|
27
27
|
end
|
28
28
|
|
29
|
-
def complete(prompt, options
|
29
|
+
def complete(prompt, options = {})
|
30
30
|
parameters = build_completion_options(prompt, options)
|
31
31
|
|
32
32
|
response = handle_errors { client.generate_text(**parameters) }
|
@@ -38,7 +38,7 @@ module GenAI
|
|
38
38
|
)
|
39
39
|
end
|
40
40
|
|
41
|
-
def chat(message, context: nil, history: [], examples: [], options
|
41
|
+
def chat(message, context: nil, history: [], examples: [], **options)
|
42
42
|
parameters = build_chat_options(message, context, history, examples, options)
|
43
43
|
|
44
44
|
response = handle_errors { client.generate_chat_message(**parameters) }
|
@@ -3,8 +3,10 @@
|
|
3
3
|
module GenAI
|
4
4
|
class Language
|
5
5
|
class OpenAI < Base
|
6
|
+
include GenAI::Api::Format::OpenAI
|
7
|
+
|
6
8
|
EMBEDDING_MODEL = 'text-embedding-ada-002'
|
7
|
-
COMPLETION_MODEL = 'gpt-3.5-turbo'
|
9
|
+
COMPLETION_MODEL = 'gpt-3.5-turbo-1106'
|
8
10
|
|
9
11
|
def initialize(token:, options: {})
|
10
12
|
depends_on 'ruby-openai'
|
@@ -20,7 +22,7 @@ module GenAI
|
|
20
22
|
build_result(model: parameters[:model], raw: response, parsed: extract_embeddings(response))
|
21
23
|
end
|
22
24
|
|
23
|
-
def complete(prompt, options
|
25
|
+
def complete(prompt, options = {})
|
24
26
|
parameters = build_completion_options(prompt, options)
|
25
27
|
|
26
28
|
response = handle_errors { client.chat(parameters: parameters) }
|
@@ -28,8 +30,11 @@ module GenAI
|
|
28
30
|
build_result(model: parameters[:model], raw: response, parsed: extract_completions(response))
|
29
31
|
end
|
30
32
|
|
31
|
-
def chat(
|
32
|
-
parameters =
|
33
|
+
def chat(messages, options = {})
|
34
|
+
parameters = {
|
35
|
+
messages: messages.map(&:deep_symbolize_keys!),
|
36
|
+
model: options.delete(:model) || COMPLETION_MODEL
|
37
|
+
}.merge(options)
|
33
38
|
|
34
39
|
response = handle_errors { client.chat(parameters: parameters) }
|
35
40
|
|
@@ -38,35 +43,12 @@ module GenAI
|
|
38
43
|
|
39
44
|
private
|
40
45
|
|
41
|
-
def build_chat_options(message, context, history, examples, options)
|
42
|
-
messages = []
|
43
|
-
messages.concat(examples)
|
44
|
-
messages.concat(history)
|
45
|
-
|
46
|
-
messages.prepend({ role: 'system', content: context }) if context
|
47
|
-
|
48
|
-
messages.append({ role: DEFAULT_ROLE, content: message })
|
49
|
-
|
50
|
-
{
|
51
|
-
messages: messages,
|
52
|
-
model: options.delete(:model) || COMPLETION_MODEL
|
53
|
-
}.merge(options)
|
54
|
-
end
|
55
|
-
|
56
46
|
def build_completion_options(prompt, options)
|
57
47
|
{
|
58
48
|
messages: [{ role: DEFAULT_ROLE, content: prompt }],
|
59
49
|
model: options.delete(:model) || COMPLETION_MODEL
|
60
50
|
}.merge(options)
|
61
51
|
end
|
62
|
-
|
63
|
-
def extract_embeddings(response)
|
64
|
-
response['data'].map { |datum| datum['embedding'] }
|
65
|
-
end
|
66
|
-
|
67
|
-
def extract_completions(response)
|
68
|
-
response['choices'].map { |choice| choice.dig('message', 'content') }
|
69
|
-
end
|
70
52
|
end
|
71
53
|
end
|
72
54
|
end
|
data/lib/gen_ai/language.rb
CHANGED
@@ -1,39 +1,19 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require 'forwardable'
|
4
|
+
|
3
5
|
module GenAI
|
4
6
|
class Language
|
5
|
-
|
6
|
-
build_llm(provider, token, options)
|
7
|
-
end
|
7
|
+
extend Forwardable
|
8
8
|
|
9
|
-
|
10
|
-
llm.embed(text, model: model)
|
11
|
-
end
|
12
|
-
|
13
|
-
def complete(prompt, options = {})
|
14
|
-
llm.complete(prompt, options: options)
|
15
|
-
end
|
9
|
+
def_delegators :@llm, :embed, :complete, :chat
|
16
10
|
|
17
|
-
def
|
18
|
-
|
11
|
+
def initialize(provider, token, options: {})
|
12
|
+
build_llm(provider, token, options)
|
19
13
|
end
|
20
14
|
|
21
|
-
# def answer(prompt); end
|
22
|
-
|
23
|
-
# def sentiment(text); end
|
24
|
-
|
25
|
-
# def keywords(text); end
|
26
|
-
|
27
|
-
# def summarization(text); end
|
28
|
-
|
29
|
-
# def translation(text, _target:); end
|
30
|
-
|
31
|
-
# def correction(text); end
|
32
|
-
|
33
15
|
private
|
34
16
|
|
35
|
-
attr_reader :llm
|
36
|
-
|
37
17
|
def build_llm(provider, token, options)
|
38
18
|
klass = GenAI::Language.constants.find do |const|
|
39
19
|
const.to_s.downcase == provider.to_s.downcase.gsub('_', '')
|
data/lib/gen_ai/version.rb
CHANGED
data/lib/gen_ai.rb
CHANGED
metadata
CHANGED
@@ -1,15 +1,29 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: gen-ai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.4.0.alpha.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alex Chaplinsky
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-01-
|
11
|
+
date: 2024-01-03 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: activesupport
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '7.1'
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '7.1'
|
13
27
|
- !ruby/object:Gem::Dependency
|
14
28
|
name: faraday
|
15
29
|
requirement: !ruby/object:Gem::Requirement
|
@@ -99,7 +113,14 @@ files:
|
|
99
113
|
- lib/gen/ai.rb
|
100
114
|
- lib/gen_ai.rb
|
101
115
|
- lib/gen_ai/api/client.rb
|
116
|
+
- lib/gen_ai/api/format/gemini.rb
|
117
|
+
- lib/gen_ai/api/format/open_ai.rb
|
102
118
|
- lib/gen_ai/base.rb
|
119
|
+
- lib/gen_ai/chat.rb
|
120
|
+
- lib/gen_ai/chat/base.rb
|
121
|
+
- lib/gen_ai/chat/gemini.rb
|
122
|
+
- lib/gen_ai/chat/google_palm.rb
|
123
|
+
- lib/gen_ai/chat/open_ai.rb
|
103
124
|
- lib/gen_ai/dependency.rb
|
104
125
|
- lib/gen_ai/image.rb
|
105
126
|
- lib/gen_ai/image/base.rb
|
@@ -107,6 +128,7 @@ files:
|
|
107
128
|
- lib/gen_ai/image/stability_ai.rb
|
108
129
|
- lib/gen_ai/language.rb
|
109
130
|
- lib/gen_ai/language/base.rb
|
131
|
+
- lib/gen_ai/language/gemini.rb
|
110
132
|
- lib/gen_ai/language/google_palm.rb
|
111
133
|
- lib/gen_ai/language/open_ai.rb
|
112
134
|
- lib/gen_ai/result.rb
|
@@ -131,9 +153,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
131
153
|
version: 2.7.0
|
132
154
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
133
155
|
requirements:
|
134
|
-
- - "
|
156
|
+
- - ">"
|
135
157
|
- !ruby/object:Gem::Version
|
136
|
-
version:
|
158
|
+
version: 1.3.1
|
137
159
|
requirements: []
|
138
160
|
rubygems_version: 3.3.7
|
139
161
|
signing_key:
|