llm.rb 2.1.0 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +6 -0
  3. data/lib/llm/bot.rb +4 -4
  4. data/lib/llm/buffer.rb +0 -9
  5. data/lib/llm/contract/completion.rb +57 -0
  6. data/lib/llm/contract.rb +48 -0
  7. data/lib/llm/error.rb +22 -14
  8. data/lib/llm/eventhandler.rb +6 -4
  9. data/lib/llm/eventstream/parser.rb +18 -13
  10. data/lib/llm/function.rb +1 -1
  11. data/lib/llm/json_adapter.rb +109 -0
  12. data/lib/llm/message.rb +7 -28
  13. data/lib/llm/multipart/enumerator_io.rb +86 -0
  14. data/lib/llm/multipart.rb +32 -51
  15. data/lib/llm/object/builder.rb +6 -6
  16. data/lib/llm/object/kernel.rb +2 -2
  17. data/lib/llm/object.rb +23 -8
  18. data/lib/llm/provider.rb +11 -3
  19. data/lib/llm/providers/anthropic/error_handler.rb +1 -1
  20. data/lib/llm/providers/anthropic/files.rb +4 -5
  21. data/lib/llm/providers/anthropic/models.rb +1 -2
  22. data/lib/llm/providers/anthropic/{format/completion_format.rb → request_adapter/completion.rb} +19 -19
  23. data/lib/llm/providers/anthropic/{format.rb → request_adapter.rb} +7 -7
  24. data/lib/llm/providers/anthropic/response_adapter/completion.rb +66 -0
  25. data/lib/llm/providers/anthropic/{response → response_adapter}/enumerable.rb +1 -1
  26. data/lib/llm/providers/anthropic/{response → response_adapter}/file.rb +1 -1
  27. data/lib/llm/providers/anthropic/{response → response_adapter}/web_search.rb +3 -3
  28. data/lib/llm/providers/anthropic/response_adapter.rb +36 -0
  29. data/lib/llm/providers/anthropic/stream_parser.rb +6 -6
  30. data/lib/llm/providers/anthropic.rb +8 -11
  31. data/lib/llm/providers/deepseek/{format/completion_format.rb → request_adapter/completion.rb} +15 -15
  32. data/lib/llm/providers/deepseek/{format.rb → request_adapter.rb} +7 -7
  33. data/lib/llm/providers/deepseek.rb +2 -2
  34. data/lib/llm/providers/gemini/audio.rb +2 -2
  35. data/lib/llm/providers/gemini/error_handler.rb +3 -3
  36. data/lib/llm/providers/gemini/files.rb +4 -7
  37. data/lib/llm/providers/gemini/images.rb +9 -14
  38. data/lib/llm/providers/gemini/models.rb +1 -2
  39. data/lib/llm/providers/gemini/{format/completion_format.rb → request_adapter/completion.rb} +14 -14
  40. data/lib/llm/providers/gemini/{format.rb → request_adapter.rb} +8 -8
  41. data/lib/llm/providers/gemini/response_adapter/completion.rb +67 -0
  42. data/lib/llm/providers/gemini/{response → response_adapter}/embedding.rb +1 -1
  43. data/lib/llm/providers/gemini/{response → response_adapter}/file.rb +1 -1
  44. data/lib/llm/providers/gemini/{response → response_adapter}/files.rb +1 -1
  45. data/lib/llm/providers/gemini/{response → response_adapter}/image.rb +3 -3
  46. data/lib/llm/providers/gemini/{response → response_adapter}/models.rb +1 -1
  47. data/lib/llm/providers/gemini/{response → response_adapter}/web_search.rb +3 -3
  48. data/lib/llm/providers/gemini/response_adapter.rb +42 -0
  49. data/lib/llm/providers/gemini/stream_parser.rb +37 -32
  50. data/lib/llm/providers/gemini.rb +10 -14
  51. data/lib/llm/providers/ollama/error_handler.rb +1 -1
  52. data/lib/llm/providers/ollama/{format/completion_format.rb → request_adapter/completion.rb} +19 -19
  53. data/lib/llm/providers/ollama/{format.rb → request_adapter.rb} +7 -7
  54. data/lib/llm/providers/ollama/response_adapter/completion.rb +61 -0
  55. data/lib/llm/providers/ollama/{response → response_adapter}/embedding.rb +1 -1
  56. data/lib/llm/providers/ollama/response_adapter.rb +32 -0
  57. data/lib/llm/providers/ollama/stream_parser.rb +2 -2
  58. data/lib/llm/providers/ollama.rb +8 -10
  59. data/lib/llm/providers/openai/audio.rb +1 -1
  60. data/lib/llm/providers/openai/error_handler.rb +12 -2
  61. data/lib/llm/providers/openai/files.rb +3 -6
  62. data/lib/llm/providers/openai/images.rb +4 -5
  63. data/lib/llm/providers/openai/models.rb +1 -3
  64. data/lib/llm/providers/openai/moderations.rb +3 -5
  65. data/lib/llm/providers/openai/{format/completion_format.rb → request_adapter/completion.rb} +22 -22
  66. data/lib/llm/providers/openai/{format/moderation_format.rb → request_adapter/moderation.rb} +5 -5
  67. data/lib/llm/providers/openai/{format/respond_format.rb → request_adapter/respond.rb} +16 -16
  68. data/lib/llm/providers/openai/{format.rb → request_adapter.rb} +12 -12
  69. data/lib/llm/providers/openai/{response → response_adapter}/audio.rb +1 -1
  70. data/lib/llm/providers/openai/response_adapter/completion.rb +62 -0
  71. data/lib/llm/providers/openai/{response → response_adapter}/embedding.rb +1 -1
  72. data/lib/llm/providers/openai/{response → response_adapter}/enumerable.rb +1 -1
  73. data/lib/llm/providers/openai/{response → response_adapter}/file.rb +1 -1
  74. data/lib/llm/providers/openai/{response → response_adapter}/image.rb +1 -1
  75. data/lib/llm/providers/openai/{response → response_adapter}/moderations.rb +1 -1
  76. data/lib/llm/providers/openai/{response → response_adapter}/responds.rb +6 -10
  77. data/lib/llm/providers/openai/{response → response_adapter}/web_search.rb +3 -3
  78. data/lib/llm/providers/openai/response_adapter.rb +47 -0
  79. data/lib/llm/providers/openai/responses/stream_parser.rb +22 -22
  80. data/lib/llm/providers/openai/responses.rb +6 -8
  81. data/lib/llm/providers/openai/stream_parser.rb +6 -5
  82. data/lib/llm/providers/openai/vector_stores.rb +8 -9
  83. data/lib/llm/providers/openai.rb +12 -14
  84. data/lib/llm/response.rb +2 -5
  85. data/lib/llm/usage.rb +10 -0
  86. data/lib/llm/version.rb +1 -1
  87. data/lib/llm.rb +33 -1
  88. metadata +44 -35
  89. data/lib/llm/providers/anthropic/response/completion.rb +0 -39
  90. data/lib/llm/providers/gemini/response/completion.rb +0 -35
  91. data/lib/llm/providers/ollama/response/completion.rb +0 -28
  92. data/lib/llm/providers/openai/response/completion.rb +0 -40
@@ -14,14 +14,13 @@ module LLM
14
14
  # bot.chat ["Tell me about this photo", File.open("/images/dog.jpg", "rb")]
15
15
  # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
16
16
  class Anthropic < Provider
17
- require_relative "anthropic/response/completion"
18
- require_relative "anthropic/response/web_search"
19
- require_relative "anthropic/format"
20
17
  require_relative "anthropic/error_handler"
18
+ require_relative "anthropic/request_adapter"
19
+ require_relative "anthropic/response_adapter"
21
20
  require_relative "anthropic/stream_parser"
22
- require_relative "anthropic/files"
23
21
  require_relative "anthropic/models"
24
- include Format
22
+ require_relative "anthropic/files"
23
+ include RequestAdapter
25
24
 
26
25
  HOST = "api.anthropic.com"
27
26
 
@@ -44,16 +43,15 @@ module LLM
44
43
  def complete(prompt, params = {})
45
44
  params = {role: :user, model: default_model, max_tokens: 1024}.merge!(params)
46
45
  tools = resolve_tools(params.delete(:tools))
47
- params = [params, format_tools(tools)].inject({}, &:merge!).compact
46
+ params = [params, adapt_tools(tools)].inject({}, &:merge!).compact
48
47
  role, stream = params.delete(:role), params.delete(:stream)
49
48
  params[:stream] = true if stream.respond_to?(:<<) || stream == true
50
49
  req = Net::HTTP::Post.new("/v1/messages", headers)
51
50
  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
52
- body = JSON.dump({messages: [format(messages)].flatten}.merge!(params))
51
+ body = LLM.json.dump({messages: [adapt(messages)].flatten}.merge!(params))
53
52
  set_body_stream(req, StringIO.new(body))
54
53
  res = execute(request: req, stream:)
55
- LLM::Response.new(res)
56
- .extend(LLM::Anthropic::Response::Completion)
54
+ ResponseAdapter.adapt(res, type: :completion)
57
55
  .extend(Module.new { define_method(:__tools__) { tools } })
58
56
  end
59
57
 
@@ -112,8 +110,7 @@ module LLM
112
110
  # @param query [String] The search query.
113
111
  # @return [LLM::Response] The response from the LLM provider.
114
112
  def web_search(query:)
115
- complete(query, tools: [server_tools[:web_search]])
116
- .extend(LLM::Anthropic::Response::WebSearch)
113
+ ResponseAdapter.adapt(complete(query, tools: [server_tools[:web_search]]), type: :web_search)
117
114
  end
118
115
 
119
116
  private
@@ -1,9 +1,9 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::DeepSeek::Format
3
+ module LLM::DeepSeek::RequestAdapter
4
4
  ##
5
5
  # @private
6
- class CompletionFormat
6
+ class Completion
7
7
  ##
8
8
  # @param [LLM::Message, Hash] message
9
9
  # The message to format
@@ -12,30 +12,30 @@ module LLM::DeepSeek::Format
12
12
  end
13
13
 
14
14
  ##
15
- # Formats the message for the DeepSeek chat completions API
15
+ # Adapts the message for the DeepSeek chat completions API
16
16
  # @return [Hash]
17
- def format
17
+ def adapt
18
18
  catch(:abort) do
19
19
  if Hash === message
20
- {role: message[:role], content: format_content(message[:content])}
20
+ {role: message[:role], content: adapt_content(message[:content])}
21
21
  elsif message.tool_call?
22
22
  {role: message.role, content: nil, tool_calls: message.extra[:original_tool_calls]}
23
23
  else
24
- format_message
24
+ adapt_message
25
25
  end
26
26
  end
27
27
  end
28
28
 
29
29
  private
30
30
 
31
- def format_content(content)
31
+ def adapt_content(content)
32
32
  case content
33
33
  when String
34
34
  content.to_s
35
35
  when LLM::Message
36
- format_content(content.content)
36
+ adapt_content(content.content)
37
37
  when LLM::Function::Return
38
- throw(:abort, {role: "tool", tool_call_id: content.id, content: JSON.dump(content.value)})
38
+ throw(:abort, {role: "tool", tool_call_id: content.id, content: LLM.json.dump(content.value)})
39
39
  when LLM::Object
40
40
  prompt_error!(content)
41
41
  else
@@ -43,22 +43,22 @@ module LLM::DeepSeek::Format
43
43
  end
44
44
  end
45
45
 
46
- def format_message
46
+ def adapt_message
47
47
  case content
48
48
  when Array
49
- format_array
49
+ adapt_array
50
50
  else
51
- {role: message.role, content: format_content(content)}
51
+ {role: message.role, content: adapt_content(content)}
52
52
  end
53
53
  end
54
54
 
55
- def format_array
55
+ def adapt_array
56
56
  if content.empty?
57
57
  nil
58
58
  elsif returns.any?
59
- returns.map { {role: "tool", tool_call_id: _1.id, content: JSON.dump(_1.value)} }
59
+ returns.map { {role: "tool", tool_call_id: _1.id, content: LLM.json.dump(_1.value)} }
60
60
  else
61
- {role: message.role, content: content.flat_map { format_content(_1) }}
61
+ {role: message.role, content: content.flat_map { adapt_content(_1) }}
62
62
  end
63
63
  end
64
64
 
@@ -3,15 +3,15 @@
3
3
  class LLM::DeepSeek
4
4
  ##
5
5
  # @private
6
- module Format
7
- require_relative "format/completion_format"
6
+ module RequestAdapter
7
+ require_relative "request_adapter/completion"
8
8
  ##
9
9
  # @param [Array<LLM::Message>] messages
10
- # The messages to format
10
+ # The messages to adapt
11
11
  # @return [Array<Hash>]
12
- def format(messages, ...)
12
+ def adapt(messages, mode: nil)
13
13
  messages.filter_map do |message|
14
- CompletionFormat.new(message).format
14
+ Completion.new(message).adapt
15
15
  end
16
16
  end
17
17
 
@@ -20,8 +20,8 @@ class LLM::DeepSeek
20
20
  ##
21
21
  # @param [Hash] params
22
22
  # @return [Hash]
23
- def format_tools(tools)
24
- (tools.nil? || tools.empty?) ? {} : {tools: tools.map { _1.format(self) }}
23
+ def adapt_tools(tools)
24
+ (tools.nil? || tools.empty?) ? {} : {tools: tools.map { _1.adapt(self) }}
25
25
  end
26
26
  end
27
27
  end
@@ -18,8 +18,8 @@ module LLM
18
18
  # bot.chat ["Tell me about this photo", File.open("/images/cat.jpg", "rb")]
19
19
  # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
20
20
  class DeepSeek < OpenAI
21
- require_relative "deepseek/format"
22
- include DeepSeek::Format
21
+ require_relative "deepseek/request_adapter"
22
+ include DeepSeek::RequestAdapter
23
23
 
24
24
  ##
25
25
  # @param (see LLM::Provider#initialize)
@@ -43,7 +43,7 @@ class LLM::Gemini
43
43
  res = @provider.complete [
44
44
  "Your task is to transcribe the contents of an audio file",
45
45
  "Your response should include the transcription, and nothing else",
46
- LLM::Object.from_hash(value: LLM.File(file), kind: :local_file)
46
+ LLM::Object.from(value: LLM.File(file), kind: :local_file)
47
47
  ], params.merge(role: :user, model:)
48
48
  res.tap { _1.define_singleton_method(:text) { choices[0].content } }
49
49
  end
@@ -65,7 +65,7 @@ class LLM::Gemini
65
65
  res = @provider.complete [
66
66
  "Your task is to translate the contents of an audio file into English",
67
67
  "Your response should include the translation, and nothing else",
68
- LLM::Object.from_hash(value: LLM.File(file), kind: :local_file)
68
+ LLM::Object.from(value: LLM.File(file), kind: :local_file)
69
69
  ], params.merge(role: :user, model:)
70
70
  res.tap { _1.define_singleton_method(:text) { choices[0].content } }
71
71
  end
@@ -29,19 +29,19 @@ class LLM::Gemini
29
29
  if reason == "API_KEY_INVALID"
30
30
  raise LLM::UnauthorizedError.new { _1.response = res }, "Authentication error"
31
31
  else
32
- raise LLM::ResponseError.new { _1.response = res }, "Unexpected response"
32
+ raise LLM::Error.new { _1.response = res }, "Unexpected response"
33
33
  end
34
34
  when Net::HTTPTooManyRequests
35
35
  raise LLM::RateLimitError.new { _1.response = res }, "Too many requests"
36
36
  else
37
- raise LLM::ResponseError.new { _1.response = res }, "Unexpected response"
37
+ raise LLM::Error.new { _1.response = res }, "Unexpected response"
38
38
  end
39
39
  end
40
40
 
41
41
  private
42
42
 
43
43
  def body
44
- @body ||= JSON.parse(res.body)
44
+ @body ||= LLM.json.load(res.body)
45
45
  end
46
46
  end
47
47
  end
@@ -23,9 +23,6 @@ class LLM::Gemini
23
23
  # bot.chat ["Tell me about this file", file]
24
24
  # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
25
25
  class Files
26
- require_relative "response/file"
27
- require_relative "response/files"
28
-
29
26
  ##
30
27
  # Returns a new Files object
31
28
  # @param provider [LLM::Provider]
@@ -50,7 +47,7 @@ class LLM::Gemini
50
47
  query = URI.encode_www_form(params.merge!(key: key))
51
48
  req = Net::HTTP::Get.new("/v1beta/files?#{query}", headers)
52
49
  res = execute(request: req)
53
- LLM::Response.new(res).extend(LLM::Gemini::Response::Files)
50
+ ResponseAdapter.adapt(res, type: :files)
54
51
  end
55
52
 
56
53
  ##
@@ -72,7 +69,7 @@ class LLM::Gemini
72
69
  file.with_io do |io|
73
70
  set_body_stream(req, io)
74
71
  res = execute(request: req)
75
- LLM::Response.new(res).extend(LLM::Gemini::Response::File)
72
+ ResponseAdapter.adapt(res, type: :file)
76
73
  end
77
74
  end
78
75
 
@@ -92,7 +89,7 @@ class LLM::Gemini
92
89
  query = URI.encode_www_form(params.merge!(key: key))
93
90
  req = Net::HTTP::Get.new("/v1beta/#{file_id}?#{query}", headers)
94
91
  res = execute(request: req)
95
- LLM::Response.new(res).extend(LLM::Gemini::Response::File)
92
+ ResponseAdapter.adapt(res, type: :file)
96
93
  end
97
94
 
98
95
  ##
@@ -130,7 +127,7 @@ class LLM::Gemini
130
127
  req["X-Goog-Upload-Command"] = "start"
131
128
  req["X-Goog-Upload-Header-Content-Length"] = file.bytesize
132
129
  req["X-Goog-Upload-Header-Content-Type"] = file.mime_type
133
- req.body = JSON.dump(file: {display_name: File.basename(file.path)})
130
+ req.body = LLM.json.dump(file: {display_name: File.basename(file.path)})
134
131
  res = execute(request: req)
135
132
  res["x-goog-upload-url"]
136
133
  end
@@ -15,8 +15,7 @@ class LLM::Gemini
15
15
  # res = llm.images.create prompt: "A dog on a rocket to the moon"
16
16
  # IO.copy_stream res.images[0], "rocket.png"
17
17
  class Images
18
- require_relative "response/image"
19
- include Format
18
+ include RequestAdapter
20
19
 
21
20
  ##
22
21
  # Returns a new Images object
@@ -37,20 +36,16 @@ class LLM::Gemini
37
36
  # @param [Hash] params Other parameters (see Gemini docs)
38
37
  # @raise (see LLM::Provider#request)
39
38
  # @raise [LLM::NoImageError] when no images are returned
40
- # @note
41
- # The prompt should make it clear you want to generate an image, or you
42
- # might unexpectedly receive a purely textual response. This is due to how
43
- # Gemini implements image generation under the hood.
44
39
  # @return [LLM::Response]
45
40
  def create(prompt:, model: "gemini-2.5-flash-image", **params)
46
41
  req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
47
- body = JSON.dump({
42
+ body = LLM.json.dump({
48
43
  contents: [{parts: [{text: create_prompt}, {text: prompt}]}],
49
44
  generationConfig: {responseModalities: ["TEXT", "IMAGE"]}
50
45
  }.merge!(params))
51
46
  req.body = body
52
47
  res = execute(request: req)
53
- validate LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
48
+ validate ResponseAdapter.adapt(res, type: :image)
54
49
  end
55
50
 
56
51
  ##
@@ -69,14 +64,14 @@ class LLM::Gemini
69
64
  # @return [LLM::Response]
70
65
  def edit(image:, prompt:, model: "gemini-2.5-flash-image", **params)
71
66
  req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
72
- image = LLM::Object.from_hash(value: LLM.File(image), kind: :local_file)
73
- body = JSON.dump({
74
- contents: [{parts: [{text: edit_prompt}, {text: prompt}, format.format_content(image)]}],
67
+ image = LLM::Object.from(value: LLM.File(image), kind: :local_file)
68
+ body = LLM.json.dump({
69
+ contents: [{parts: [{text: edit_prompt}, {text: prompt}, adapter.adapt_content(image)]}],
75
70
  generationConfig: {responseModalities: ["TEXT", "IMAGE"]}
76
71
  }.merge!(params)).b
77
72
  set_body_stream(req, StringIO.new(body))
78
73
  res = execute(request: req)
79
- validate LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
74
+ validate ResponseAdapter.adapt(res, type: :image)
80
75
  end
81
76
 
82
77
  ##
@@ -88,8 +83,8 @@ class LLM::Gemini
88
83
 
89
84
  private
90
85
 
91
- def format
92
- @format ||= CompletionFormat.new(nil)
86
+ def adapter
87
+ @adapter ||= Completion.new(nil)
93
88
  end
94
89
 
95
90
  def key
@@ -17,7 +17,6 @@ class LLM::Gemini
17
17
  # print "id: ", model.id, "\n"
18
18
  # end
19
19
  class Models
20
- require_relative "response/models"
21
20
  include LLM::Utils
22
21
 
23
22
  ##
@@ -44,7 +43,7 @@ class LLM::Gemini
44
43
  query = URI.encode_www_form(params.merge!(key: key))
45
44
  req = Net::HTTP::Get.new("/v1beta/models?#{query}", headers)
46
45
  res = execute(request: req)
47
- LLM::Response.new(res).extend(LLM::Gemini::Response::Models)
46
+ ResponseAdapter.adapt(res, type: :models)
48
47
  end
49
48
 
50
49
  private
@@ -1,9 +1,9 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::Gemini::Format
3
+ module LLM::Gemini::RequestAdapter
4
4
  ##
5
5
  # @private
6
- class CompletionFormat
6
+ class Completion
7
7
  ##
8
8
  # @param [LLM::Message, Hash] message
9
9
  # The message to format
@@ -12,40 +12,40 @@ module LLM::Gemini::Format
12
12
  end
13
13
 
14
14
  ##
15
- # Formats the message for the Gemini chat completions API
15
+ # Adapts the message for the Gemini chat completions API
16
16
  # @return [Hash]
17
- def format
17
+ def adapt
18
18
  catch(:abort) do
19
19
  if Hash === message
20
- {role: message[:role], parts: format_content(message[:content])}
20
+ {role: message[:role], parts: adapt_content(message[:content])}
21
21
  elsif message.tool_call?
22
22
  {role: message.role, parts: message.extra[:original_tool_calls].map { {"functionCall" => _1} }}
23
23
  else
24
- {role: message.role, parts: format_content(message.content)}
24
+ {role: message.role, parts: adapt_content(message.content)}
25
25
  end
26
26
  end
27
27
  end
28
28
 
29
- def format_content(content)
29
+ def adapt_content(content)
30
30
  case content
31
31
  when Array
32
- content.empty? ? throw(:abort, nil) : content.flat_map { format_content(_1) }
32
+ content.empty? ? throw(:abort, nil) : content.flat_map { adapt_content(_1) }
33
33
  when String
34
34
  [{text: content}]
35
35
  when LLM::Response
36
- format_remote_file(content)
36
+ adapt_remote_file(content)
37
37
  when LLM::Message
38
- format_content(content.content)
38
+ adapt_content(content.content)
39
39
  when LLM::Function::Return
40
40
  [{functionResponse: {name: content.name, response: content.value}}]
41
41
  when LLM::Object
42
- format_object(content)
42
+ adapt_object(content)
43
43
  else
44
44
  prompt_error!(content)
45
45
  end
46
46
  end
47
47
 
48
- def format_object(object)
48
+ def adapt_object(object)
49
49
  case object.kind
50
50
  when :image_url
51
51
  [{file_data: {mime_type: "image/*", file_uri: object.value.to_s}}]
@@ -53,13 +53,13 @@ module LLM::Gemini::Format
53
53
  file = object.value
54
54
  [{inline_data: {mime_type: file.mime_type, data: file.to_b64}}]
55
55
  when :remote_file
56
- format_remote_file(object.value)
56
+ adapt_remote_file(object.value)
57
57
  else
58
58
  prompt_error!(object)
59
59
  end
60
60
  end
61
61
 
62
- def format_remote_file(file)
62
+ def adapt_remote_file(file)
63
63
  return prompt_error!(file) unless file.file?
64
64
  [{file_data: {mime_type: file.mime_type, file_uri: file.uri}}]
65
65
  end
@@ -3,16 +3,16 @@
3
3
  class LLM::Gemini
4
4
  ##
5
5
  # @private
6
- module Format
7
- require_relative "format/completion_format"
6
+ module RequestAdapter
7
+ require_relative "request_adapter/completion"
8
8
 
9
9
  ##
10
10
  # @param [Array<LLM::Message>] messages
11
- # The messages to format
11
+ # The messages to adapt
12
12
  # @return [Array<Hash>]
13
- def format(messages)
13
+ def adapt(messages, mode: nil)
14
14
  messages.filter_map do |message|
15
- CompletionFormat.new(message).format
15
+ Completion.new(message).adapt
16
16
  end
17
17
  end
18
18
 
@@ -21,7 +21,7 @@ class LLM::Gemini
21
21
  ##
22
22
  # @param [Hash] params
23
23
  # @return [Hash]
24
- def format_schema(params)
24
+ def adapt_schema(params)
25
25
  return {} unless params and params[:schema]
26
26
  schema = params.delete(:schema)
27
27
  schema = schema.respond_to?(:object) ? schema.object : schema
@@ -31,10 +31,10 @@ class LLM::Gemini
31
31
  ##
32
32
  # @param [Hash] params
33
33
  # @return [Hash]
34
- def format_tools(tools)
34
+ def adapt_tools(tools)
35
35
  return {} unless tools&.any?
36
36
  platform, functions = [tools.grep(LLM::ServerTool), tools.grep(LLM::Function)]
37
- {tools: [*platform, {functionDeclarations: functions.map { _1.format(self) }}]}
37
+ {tools: [*platform, {functionDeclarations: functions.map { _1.adapt(self) }}]}
38
38
  end
39
39
  end
40
40
  end
@@ -0,0 +1,67 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Gemini::ResponseAdapter
4
+ module Completion
5
+ ##
6
+ # (see LLM::Contract::Completion#messages)
7
+ def messages
8
+ adapt_choices
9
+ end
10
+ alias_method :choices, :messages
11
+
12
+ ##
13
+ # (see LLM::Contract::Completion#input_tokens)
14
+ def input_tokens
15
+ body.usageMetadata.promptTokenCount || 0
16
+ end
17
+
18
+ ##
19
+ # (see LLM::Contract::Completion#output_tokens)
20
+ def output_tokens
21
+ body.usageMetadata.candidatesTokenCount || 0
22
+ end
23
+
24
+ ##
25
+ # (see LLM::Contract::Completion#total_tokens)
26
+ def total_tokens
27
+ body.usageMetadata.totalTokenCount || 0
28
+ end
29
+
30
+ ##
31
+ # (see LLM::Contract::Completion#usage)
32
+ def usage
33
+ super
34
+ end
35
+
36
+ ##
37
+ # (see LLM::Contract::Completion#model)
38
+ def model
39
+ body.modelVersion
40
+ end
41
+
42
+ private
43
+
44
+ def adapt_choices
45
+ candidates.map.with_index do |choice, index|
46
+ content = choice.content || LLM::Object.new
47
+ role = content.role || "model"
48
+ parts = content.parts || [{"text" => choice.finishReason}]
49
+ text = parts.filter_map { _1["text"] }.join
50
+ tools = parts.filter_map { _1["functionCall"] }
51
+ extra = {index:, response: self, tool_calls: adapt_tool_calls(tools), original_tool_calls: tools}
52
+ LLM::Message.new(role, text, extra)
53
+ end
54
+ end
55
+
56
+ def adapt_tool_calls(tools)
57
+ (tools || []).map do |tool|
58
+ function = {name: tool.name, arguments: tool.args}
59
+ function
60
+ end
61
+ end
62
+
63
+ def candidates = body.candidates || []
64
+
65
+ include LLM::Contract::Completion
66
+ end
67
+ end
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::Gemini::Response
3
+ module LLM::Gemini::ResponseAdapter
4
4
  module Embedding
5
5
  def model = "text-embedding-004"
6
6
  def embeddings = body.dig("embedding", "values")
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::Gemini::Response
3
+ module LLM::Gemini::ResponseAdapter
4
4
  module File
5
5
  def name = respond_to?(:file) ? file.name : body.name
6
6
  def display_name = respond_to?(:file) ? file.displayName : body.displayName
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::Gemini::Response
3
+ module LLM::Gemini::ResponseAdapter
4
4
  module Files
5
5
  include ::Enumerable
6
6
  def each(&)
@@ -1,14 +1,14 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::Gemini::Response
3
+ module LLM::Gemini::ResponseAdapter
4
4
  module Image
5
5
  ##
6
6
  # @return [Array<StringIO>]
7
7
  def images
8
8
  candidates.flat_map do |candidate|
9
- parts = candidate&.dig(:content, :parts) || []
9
+ parts = candidate&.dig("content", "parts") || []
10
10
  parts.filter_map do
11
- data = _1.dig(:inlineData, :data)
11
+ data = _1.dig("inlineData", "data")
12
12
  next unless data
13
13
  StringIO.new(data.unpack1("m0"))
14
14
  end
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::Gemini::Response
3
+ module LLM::Gemini::ResponseAdapter
4
4
  module Models
5
5
  include ::Enumerable
6
6
  def each(&)
@@ -1,8 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::Gemini::Response
3
+ module LLM::Gemini::ResponseAdapter
4
4
  ##
5
- # The {LLM::Gemini::Response::WebSearch LLM::Gemini::Response::WebSearch}
5
+ # The {LLM::Gemini::ResponseAdapter::WebSearch LLM::Gemini::ResponseAdapter::WebSearch}
6
6
  # module provides methods for accessing web search results from a web search
7
7
  # tool call made via the {LLM::Provider#web_search LLM::Provider#web_search}
8
8
  # method.
@@ -11,7 +11,7 @@ module LLM::Gemini::Response
11
11
  # Returns one or more search results
12
12
  # @return [Array<LLM::Object>]
13
13
  def search_results
14
- LLM::Object.from_hash(
14
+ LLM::Object.from(
15
15
  candidates[0]
16
16
  .groundingMetadata
17
17
  .groundingChunks
@@ -0,0 +1,42 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Gemini
4
+ ##
5
+ # @private
6
+ module ResponseAdapter
7
+ require_relative "response_adapter/completion"
8
+ require_relative "response_adapter/embedding"
9
+ require_relative "response_adapter/file"
10
+ require_relative "response_adapter/files"
11
+ require_relative "response_adapter/image"
12
+ require_relative "response_adapter/models"
13
+ require_relative "response_adapter/web_search"
14
+
15
+ module_function
16
+
17
+ ##
18
+ # @param [LLM::Response, Net::HTTPResponse] res
19
+ # @param [Symbol] type
20
+ # @return [LLM::Response]
21
+ def adapt(res, type:)
22
+ response = (LLM::Response === res) ? res : LLM::Response.new(res)
23
+ response.extend(select(type))
24
+ end
25
+
26
+ ##
27
+ # @api private
28
+ def select(type)
29
+ case type
30
+ when :completion then LLM::Gemini::ResponseAdapter::Completion
31
+ when :embedding then LLM::Gemini::ResponseAdapter::Embedding
32
+ when :file then LLM::Gemini::ResponseAdapter::File
33
+ when :files then LLM::Gemini::ResponseAdapter::Files
34
+ when :image then LLM::Gemini::ResponseAdapter::Image
35
+ when :models then LLM::Gemini::ResponseAdapter::Models
36
+ when :web_search then LLM::Gemini::ResponseAdapter::WebSearch
37
+ else
38
+ raise ArgumentError, "Unknown response adapter type: #{type.inspect}"
39
+ end
40
+ end
41
+ end
42
+ end