gen-ai 0.4.1 → 0.4.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: aa99531962c9b41b65e33716910d7debc1b847283dcc89a0047d03819f73b016
4
- data.tar.gz: 86803a58994aca6fa3428674def207f318423ea28d04942a7427d6f2ae883a61
3
+ metadata.gz: fe2c9cb2e87385fea4fb417b62fbfeaaf4c825367395bb2b06b1deeddf3defec
4
+ data.tar.gz: 8c452c2767c8c2ae5a166b26329ba23b7190efa71f19a5d055e66110d2511cde
5
5
  SHA512:
6
- metadata.gz: 151cbe9615dfb96bd9fecd96fc87d122c1f13368c1a236180432256336d4c22680c8d1fbc654e53a347b8c9391a99a3e1faef76e5051f1524a0ac28e7129caca
7
- data.tar.gz: d8ae8a92ef41e0e09af567aecaed6d33ed0af6ac80af6cbbe092269b90e974674aa53626eff449ef642657c5cb9c9e7b88bc076339e234b60eeaaf13a9fba356
6
+ metadata.gz: 953848790eb404df5294ff6e629643d6735ca6c2506b0129d667f12b89e56a3c5b838fa1cd9bb7257f779c8c5e047ed667702c533d75b301625dcc53434a7632
7
+ data.tar.gz: 3f6c563be51280f1d539ab72ad65b3f5732180423ae86c726fd91e4111103043a9812fca91cd242bebabdf7839640af8a3878b6d97538e4c797b39bf3733e7ff
data/.rubocop.yml CHANGED
@@ -32,7 +32,9 @@ Layout/ArgumentAlignment:
32
32
  Layout/LineLength:
33
33
  Exclude:
34
34
  - spec/**/*.rb
35
+ - gen-ai.gemspec
35
36
 
36
37
  Metrics/BlockLength:
37
38
  Exclude:
38
39
  - spec/**/*.rb
40
+ - gen-ai.gemspec
data/gen-ai.gemspec CHANGED
@@ -38,8 +38,9 @@ Gem::Specification.new do |spec|
38
38
  spec.add_dependency 'faraday-multipart', '~> 1.0'
39
39
  spec.add_dependency 'zeitwerk', '~> 2.6'
40
40
 
41
+ spec.add_development_dependency 'gemini-ai', '~> 3.2'
41
42
  spec.add_development_dependency 'google_palm_api', '~> 0.1'
42
- spec.add_development_dependency 'ruby-openai', '~> 5.1'
43
+ spec.add_development_dependency 'ruby-openai', '~> 6.0'
43
44
  # For more information and examples about making a new gem, check out our
44
45
  # guide at: https://bundler.io/guides/creating_gem.html
45
46
  spec.metadata['rubygems_mfa_required'] = 'true'
@@ -20,11 +20,28 @@ module GenAI
20
20
  end
21
21
 
22
22
  def extract_completions(response)
23
- response['candidates'].map { |candidate| candidate.dig('content', 'parts', 0, 'text') }
23
+ if response.is_a?(Array)
24
+ response.map { |completion| extract_candidates(completion) }
25
+ else
26
+ extract_candidates(response)
27
+ end
28
+ end
29
+
30
+ def chunk_params_from_streaming(chunk)
31
+ {
32
+ model: 'gemini-pro',
33
+ index: chunk.dig('candidates', 0, 'index'),
34
+ value: chunk.dig('candidates', 0, 'content', 'parts', 0, 'text'),
35
+ raw: chunk
36
+ }
24
37
  end
25
38
 
26
39
  private
27
40
 
41
+ def extract_candidates(candidates)
42
+ candidates['candidates'].map { |candidate| candidate.dig('content', 'parts', 0, 'text') }
43
+ end
44
+
28
45
  def role_for(message)
29
46
  message[:role] == 'user' ? USER_ROLE : ASSISTANT_ROLE
30
47
  end
@@ -11,6 +11,31 @@ module GenAI
11
11
  def extract_completions(response)
12
12
  response['choices'].map { |choice| choice.dig('message', 'content') }
13
13
  end
14
+
15
+ def chunk_params_from_streaming(chunk)
16
+ {
17
+ model: chunk['model'],
18
+ index: chunk.dig('choices', 0, 'index'),
19
+ value: chunk.dig('choices', 0, 'delta', 'content'),
20
+ raw: chunk
21
+ }
22
+ end
23
+
24
+ def build_raw_response(chunks)
25
+ { 'choices' => build_raw_choices(chunks),
26
+ 'usage' => { 'completion_tokens' => chunks.values.map(&:size).sum } }
27
+ end
28
+
29
+ def build_raw_choices(chunks)
30
+ chunks.map do |key, values|
31
+ {
32
+ 'index' => key,
33
+ 'logprobs' => nil,
34
+ 'finish_reason' => 'stop',
35
+ 'message' => { 'role' => 'asssistant', 'content' => values.map(&:value).join }
36
+ }
37
+ end
38
+ end
14
39
  end
15
40
  end
16
41
  end
data/lib/gen_ai/base.rb CHANGED
@@ -15,6 +15,8 @@ module GenAI
15
15
  raise GenAI::ApiError, "#{api_provider_name} API error: #{response.dig('error', 'message')}" if response['error']
16
16
 
17
17
  response
18
+ rescue Faraday::BadRequestError => e
19
+ raise GenAI::ApiError, "#{api_provider_name} API error: #{e.response[:body].dig('error', 'message')}"
18
20
  end
19
21
 
20
22
  def provider_name
@@ -28,5 +30,9 @@ module GenAI
28
30
  def build_result(model:, raw:, parsed:)
29
31
  GenAI::Result.new(provider: provider_name.to_sym, model: model, raw: raw, values: parsed)
30
32
  end
33
+
34
+ def build_chunk(options)
35
+ GenAI::Chunk.new(provider: provider_name.to_sym, **options)
36
+ end
31
37
  end
32
38
  end
@@ -18,14 +18,14 @@ module GenAI
18
18
  @history = build_history(history.map(&:deep_symbolize_keys), context, examples.map(&:deep_symbolize_keys))
19
19
  end
20
20
 
21
- def message(message, options = {})
21
+ def message(message, options = {}, &block)
22
22
  if @history.size == 1 && @history.first[:role] != 'system'
23
23
  append_to_message(message)
24
24
  else
25
25
  append_to_history({ role: USER_ROLE, content: message })
26
26
  end
27
27
 
28
- response = @model.chat(@history.dup, default_options.merge(options).compact)
28
+ response = @model.chat(@history.dup, default_options.merge(options).compact, &block)
29
29
  append_to_history({ role: ASSISTANT_ROLE, content: response.value })
30
30
  response
31
31
  end
data/lib/gen_ai/chat.rb CHANGED
@@ -4,7 +4,7 @@ module GenAI
4
4
  class Chat
5
5
  extend Forwardable
6
6
 
7
- def_delegators :@chat, :start, :message
7
+ def_delegators :@chat, :start, :message, :history
8
8
 
9
9
  def initialize(provider, token, options: {})
10
10
  build_chat(provider, token, options)
@@ -0,0 +1,15 @@
1
+ # frozen_string_literal: true
2
+
3
+ module GenAI
4
+ class Chunk
5
+ attr_reader :raw, :provider, :model, :value, :index
6
+
7
+ def initialize(provider:, model:, index:, raw:, value:)
8
+ @raw = raw
9
+ @index = index
10
+ @provider = provider
11
+ @model = model
12
+ @value = value
13
+ end
14
+ end
15
+ end
@@ -27,10 +27,9 @@ module GenAI
27
27
  end
28
28
 
29
29
  def completion_tokens
30
- if usage['completion_tokens'] ||
31
- (total_tokens && prompt_tokens)
32
- total_tokens.to_i - prompt_tokens.to_i
33
- end
30
+ return usage['completion_tokens'] if usage['completion_tokens']
31
+
32
+ total_tokens.to_i - prompt_tokens.to_i if total_tokens && prompt_tokens
34
33
  end
35
34
 
36
35
  def total_tokens
@@ -27,24 +27,6 @@ module GenAI
27
27
  )
28
28
  end
29
29
 
30
- def variations(image, options = {})
31
- model = options[:model] || DEFAULT_MODEL
32
- url = "/v1/generation/stable-diffusion-xl-1024-v1-0/image-to-image"
33
-
34
- response = client.post url, {
35
- init_image: File.binread(image),
36
- image_strength: 0.3,
37
- 'text_prompts[0][text]' => "Portrait of old Jim Morrison at age of 60, handsome, color, phot-realistic, 50mm, crisp, high-res, depth of field, composition",
38
- 'text_prompts[0][weight]' => 0.7
39
- }, multipart: true
40
-
41
- build_result(
42
- raw: response,
43
- model: model,
44
- parsed: parse_response_data(response['artifacts'])
45
- )
46
- end
47
-
48
30
  def edit(image, prompt, options = {})
49
31
  model = options[:model] || DEFAULT_MODEL
50
32
  url = "/v1/generation/#{model}/image-to-image"
@@ -1,34 +1,60 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require 'faraday'
4
+ require 'pry'
4
5
 
5
6
  module GenAI
6
7
  class Language
7
8
  class Gemini < Base
8
9
  include GenAI::Api::Format::Gemini
9
10
 
10
- BASE_API_URL = 'https://generativelanguage.googleapis.com'
11
+ COMPLETION_MODEL = 'gemini-pro'
11
12
 
12
13
  def initialize(token:, options: {})
13
- @token = token
14
- build_client(token)
14
+ depends_on 'gemini-ai'
15
+
16
+ @client = ::Gemini.new(
17
+ credentials: {
18
+ service: 'generative-language-api',
19
+ api_key: token
20
+ },
21
+ options: { model: model(options) }
22
+ )
15
23
  end
16
24
 
17
25
  def complete(prompt, options = {}); end
18
26
 
19
- def chat(messages, options = {})
20
- response = client.post "/v1beta/models/gemini-pro:generateContent?key=#{@token}", {
27
+ def chat(messages, options = {}, &block)
28
+ if block_given?
29
+ response = @client.stream_generate_content(
30
+ generate_options(messages, options), server_sent_events: true, &chunk_process_block(block)
31
+ )
32
+ build_result(model: model(options), raw: response.first, parsed: extract_completions(response).flatten)
33
+ else
34
+ response = @client.generate_content(generate_options(messages, options))
35
+ build_result(model: model(options), raw: response, parsed: extract_completions(response))
36
+ end
37
+ end
38
+
39
+ private
40
+
41
+ def generate_options(messages, options)
42
+ {
21
43
  contents: format_messages(messages),
22
44
  generationConfig: options.except(:model)
23
45
  }
46
+ end
24
47
 
25
- build_result(model: 'gemini-pro', raw: response, parsed: extract_completions(response))
48
+ def model(options)
49
+ options[:model] || COMPLETION_MODEL
26
50
  end
27
51
 
28
- private
52
+ def chunk_process_block(block)
53
+ proc do |data|
54
+ chunk = build_chunk(chunk_params_from_streaming(data))
29
55
 
30
- def build_client(token)
31
- @client = GenAI::Api::Client.new(url: BASE_API_URL, token: nil)
56
+ block.call chunk
57
+ end
32
58
  end
33
59
  end
34
60
  end
@@ -23,31 +23,63 @@ module GenAI
23
23
  end
24
24
 
25
25
  def complete(prompt, options = {})
26
- parameters = build_completion_options(prompt, options)
26
+ chat_request build_completion_options(prompt, options)
27
+ end
27
28
 
28
- response = handle_errors { client.chat(parameters: parameters) }
29
+ def chat(messages, options = {}, &block)
30
+ parameters = build_chat_options(messages, options)
29
31
 
30
- build_result(model: parameters[:model], raw: response, parsed: extract_completions(response))
32
+ block_given? ? chat_streaming_request(parameters, block) : chat_request(parameters)
33
+ end
34
+
35
+ private
36
+
37
+ def build_chat_options(messages, options)
38
+ build_options(messages.map(&:deep_symbolize_keys), options)
39
+ end
40
+
41
+ def build_completion_options(prompt, options)
42
+ build_options([{ role: DEFAULT_ROLE, content: prompt }], options)
31
43
  end
32
44
 
33
- def chat(messages, options = {})
34
- parameters = {
35
- messages: messages.map(&:deep_symbolize_keys),
45
+ def build_options(messages, options)
46
+ {
47
+ messages: messages,
36
48
  model: options.delete(:model) || COMPLETION_MODEL
37
49
  }.merge(options)
50
+ end
38
51
 
52
+ def chat_request(parameters)
39
53
  response = handle_errors { client.chat(parameters: parameters) }
40
54
 
41
55
  build_result(model: parameters[:model], raw: response, parsed: extract_completions(response))
42
56
  end
43
57
 
44
- private
58
+ def chat_streaming_request(parameters, block)
59
+ chunks = {}
45
60
 
46
- def build_completion_options(prompt, options)
47
- {
48
- messages: [{ role: DEFAULT_ROLE, content: prompt }],
49
- model: options.delete(:model) || COMPLETION_MODEL
50
- }.merge(options)
61
+ parameters[:stream] = chunk_process_block(chunks, block)
62
+
63
+ client.chat(parameters: parameters)
64
+
65
+ build_result(
66
+ model: parameters[:model],
67
+ parsed: chunks.values.map { |group| group.map(&:value).join },
68
+ raw: build_raw_response(chunks)
69
+ )
70
+ end
71
+
72
+ def chunk_process_block(chunks, block)
73
+ proc do |data|
74
+ chunk = build_chunk(chunk_params_from_streaming(data))
75
+
76
+ unless chunk.value.nil? || chunk.value.empty?
77
+ block.call chunk
78
+
79
+ chunks[chunk.index] = [] unless chunks[chunk.index]
80
+ chunks[chunk.index] << chunk
81
+ end
82
+ end
51
83
  end
52
84
  end
53
85
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module GenAI
4
- VERSION = '0.4.1'
4
+ VERSION = '0.4.2'
5
5
  end
data/lib/gen_ai.rb CHANGED
@@ -10,6 +10,7 @@ loader.inflector.inflect(
10
10
  'stability_ai' => 'StabilityAI'
11
11
  )
12
12
  loader.ignore("#{__dir__}/gen")
13
+ loader.collapse("#{__dir__}/gen_ai/core")
13
14
  loader.setup
14
15
 
15
16
  module GenAI
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: gen-ai
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.1
4
+ version: 0.4.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Alex Chaplinsky
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-01-19 00:00:00.000000000 Z
11
+ date: 2024-02-29 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: activesupport
@@ -66,6 +66,20 @@ dependencies:
66
66
  - - "~>"
67
67
  - !ruby/object:Gem::Version
68
68
  version: '2.6'
69
+ - !ruby/object:Gem::Dependency
70
+ name: gemini-ai
71
+ requirement: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - "~>"
74
+ - !ruby/object:Gem::Version
75
+ version: '3.2'
76
+ type: :development
77
+ prerelease: false
78
+ version_requirements: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - "~>"
81
+ - !ruby/object:Gem::Version
82
+ version: '3.2'
69
83
  - !ruby/object:Gem::Dependency
70
84
  name: google_palm_api
71
85
  requirement: !ruby/object:Gem::Requirement
@@ -86,14 +100,14 @@ dependencies:
86
100
  requirements:
87
101
  - - "~>"
88
102
  - !ruby/object:Gem::Version
89
- version: '5.1'
103
+ version: '6.0'
90
104
  type: :development
91
105
  prerelease: false
92
106
  version_requirements: !ruby/object:Gem::Requirement
93
107
  requirements:
94
108
  - - "~>"
95
109
  - !ruby/object:Gem::Version
96
- version: '5.1'
110
+ version: '6.0'
97
111
  description: GenAI allows you to easily integrate Generative AI model providers like
98
112
  OpenAI, Google Vertex AI, Stability AI, etc
99
113
  email:
@@ -123,6 +137,8 @@ files:
123
137
  - lib/gen_ai/chat/gemini.rb
124
138
  - lib/gen_ai/chat/google_palm.rb
125
139
  - lib/gen_ai/chat/open_ai.rb
140
+ - lib/gen_ai/core/chunk.rb
141
+ - lib/gen_ai/core/result.rb
126
142
  - lib/gen_ai/dependency.rb
127
143
  - lib/gen_ai/image.rb
128
144
  - lib/gen_ai/image/base.rb
@@ -134,7 +150,6 @@ files:
134
150
  - lib/gen_ai/language/gemini.rb
135
151
  - lib/gen_ai/language/google_palm.rb
136
152
  - lib/gen_ai/language/open_ai.rb
137
- - lib/gen_ai/result.rb
138
153
  - lib/gen_ai/version.rb
139
154
  - sig/gen_ai.rbs
140
155
  homepage: https://github.com/alchaplinsky/gen-ai