llm.rb 0.4.2 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +132 -84
  3. data/lib/json/schema/array.rb +5 -0
  4. data/lib/json/schema/boolean.rb +4 -0
  5. data/lib/json/schema/integer.rb +23 -1
  6. data/lib/json/schema/leaf.rb +11 -0
  7. data/lib/json/schema/null.rb +4 -0
  8. data/lib/json/schema/number.rb +23 -1
  9. data/lib/json/schema/object.rb +6 -2
  10. data/lib/json/schema/string.rb +26 -1
  11. data/lib/json/schema/version.rb +2 -0
  12. data/lib/json/schema.rb +10 -10
  13. data/lib/llm/buffer.rb +28 -10
  14. data/lib/llm/chat.rb +26 -1
  15. data/lib/llm/core_ext/ostruct.rb +14 -8
  16. data/lib/llm/file.rb +6 -1
  17. data/lib/llm/function.rb +81 -0
  18. data/lib/llm/message.rb +46 -1
  19. data/lib/llm/providers/anthropic/format/completion_format.rb +73 -0
  20. data/lib/llm/providers/anthropic/format.rb +7 -33
  21. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +51 -0
  22. data/lib/llm/providers/anthropic/response_parser.rb +1 -9
  23. data/lib/llm/providers/anthropic.rb +4 -3
  24. data/lib/llm/providers/gemini/audio.rb +4 -4
  25. data/lib/llm/providers/gemini/files.rb +5 -4
  26. data/lib/llm/providers/gemini/format/completion_format.rb +54 -0
  27. data/lib/llm/providers/gemini/format.rb +28 -27
  28. data/lib/llm/providers/gemini/images.rb +9 -4
  29. data/lib/llm/providers/gemini/response_parser/completion_parser.rb +46 -0
  30. data/lib/llm/providers/gemini/response_parser.rb +13 -20
  31. data/lib/llm/providers/gemini.rb +3 -12
  32. data/lib/llm/providers/ollama/format/completion_format.rb +72 -0
  33. data/lib/llm/providers/ollama/format.rb +10 -30
  34. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +42 -0
  35. data/lib/llm/providers/ollama/response_parser.rb +8 -11
  36. data/lib/llm/providers/ollama.rb +3 -11
  37. data/lib/llm/providers/openai/audio.rb +6 -6
  38. data/lib/llm/providers/openai/files.rb +3 -3
  39. data/lib/llm/providers/openai/format/completion_format.rb +81 -0
  40. data/lib/llm/providers/openai/format/respond_format.rb +69 -0
  41. data/lib/llm/providers/openai/format.rb +25 -58
  42. data/lib/llm/providers/openai/images.rb +4 -2
  43. data/lib/llm/providers/openai/response_parser/completion_parser.rb +55 -0
  44. data/lib/llm/providers/openai/response_parser/respond_parser.rb +56 -0
  45. data/lib/llm/providers/openai/response_parser.rb +8 -44
  46. data/lib/llm/providers/openai/responses.rb +10 -11
  47. data/lib/llm/providers/openai.rb +5 -16
  48. data/lib/llm/response/{output.rb → respond.rb} +2 -2
  49. data/lib/llm/response.rb +1 -1
  50. data/lib/llm/version.rb +1 -1
  51. data/lib/llm.rb +28 -0
  52. data/llm.gemspec +1 -0
  53. metadata +28 -3
data/lib/llm/buffer.rb CHANGED
@@ -28,6 +28,22 @@ module LLM
28
28
  @completed.each { yield(_1) }
29
29
  end
30
30
 
31
+ ##
32
+ # Returns an array of unread messages
33
+ # @see LLM::Message#read?
34
+ # @see LLM::Message#read!
35
+ # @return [Array<LLM::Message>]
36
+ def unread
37
+ reject(&:read?)
38
+ end
39
+
40
+ ##
41
+ # Find a message (in descending order)
42
+ # @return [LLM::Message, nil]
43
+ def find(...)
44
+ reverse_each.find(...)
45
+ end
46
+
31
47
  ##
32
48
  # @param [[LLM::Message, Hash]] item
33
49
  # A message and its parameters
@@ -48,7 +64,7 @@ module LLM
48
64
  private
49
65
 
50
66
  def empty!
51
- message, params, method = @pending[-1]
67
+ message, params, method = @pending.pop
52
68
  if method == :complete
53
69
  complete!(message, params)
54
70
  elsif method == :respond
@@ -59,24 +75,26 @@ module LLM
59
75
  end
60
76
 
61
77
  def complete!(message, params)
62
- messages = @pending[0..-2].map { _1[0] }
78
+ pendings = @pending.map { _1[0] }
79
+ messages = [*@completed, *pendings]
63
80
  completion = @provider.complete(
64
81
  message.content,
65
82
  message.role,
66
83
  **params.merge(messages:)
67
84
  )
68
- @completed.concat([*messages, message, completion.choices[0]])
85
+ @completed.concat([*pendings, message, completion.choices[0]])
69
86
  @pending.clear
70
87
  end
71
88
 
72
89
  def respond!(message, params)
73
- input = @pending[0..-2].map { _1[0] }
74
- @response = @provider.responses.create(
75
- message.content,
76
- message.role,
77
- **params.merge(input:).merge(@response ? {previous_response_id: @response.id} : {})
78
- )
79
- @completed.concat([*input, message, @response.outputs[0]])
90
+ pendings = @pending.map { _1[0] }
91
+ input = [*pendings]
92
+ params = [
93
+ params.merge(input:),
94
+ @response ? {previous_response_id: @response.id} : {}
95
+ ].inject({}, &:merge!)
96
+ @response = @provider.responses.create(message.content, message.role, **params)
97
+ @completed.concat([*pendings, message, @response.outputs[0]])
80
98
  @pending.clear
81
99
  end
82
100
  end
data/lib/llm/chat.rb CHANGED
@@ -37,7 +37,7 @@ module LLM
37
37
  @provider = provider
38
38
  @params = params.merge!(model:, schema:)
39
39
  @lazy = false
40
- @messages = []
40
+ @messages = [].extend(Array)
41
41
  end
42
42
 
43
43
  ##
@@ -108,14 +108,39 @@ module LLM
108
108
  @lazy
109
109
  end
110
110
 
111
+ ##
112
+ # @return [String]
111
113
  def inspect
112
114
  "#<#{self.class.name}:0x#{object_id.to_s(16)} " \
113
115
  "@provider=#{@provider.class}, @params=#{@params.inspect}, " \
114
116
  "@messages=#{@messages.inspect}, @lazy=#{@lazy.inspect}>"
115
117
  end
116
118
 
119
+ ##
120
+ # Returns an array of functions that have yet to be called
121
+ # @return [Array<LLM::Function>]
122
+ def functions
123
+ messages
124
+ .select(&:assistant?)
125
+ .flat_map(&:functions)
126
+ .reject(&:called?)
127
+ end
128
+
117
129
  private
118
130
 
131
+ ##
132
+ # @private
133
+ module Array
134
+ def find(...)
135
+ reverse_each.find(...)
136
+ end
137
+
138
+ def unread
139
+ reject(&:read?)
140
+ end
141
+ end
142
+ private_constant :Array
143
+
119
144
  def respond!(prompt, role, params)
120
145
  @provider.responses.create(
121
146
  prompt,
@@ -8,17 +8,19 @@ class OpenStruct
8
8
  # obj = OpenStruct.from_hash(person: {name: 'John'})
9
9
  # obj.person.name # => 'John'
10
10
  # obj.person.class # => OpenStruct
11
- # @param [Hash] hash_obj
11
+ # @param [Hash, Array] obj
12
12
  # A Hash object
13
13
  # @return [OpenStruct]
14
- # An OpenStruct object initialized by visiting `hash_obj` with
15
- # recursion
16
- def from_hash(hash_obj)
17
- visited_object = {}
18
- hash_obj.each do |key, value|
19
- visited_object[key] = walk(value)
14
+ # An OpenStruct object initialized by visiting `obj` with recursion
15
+ def from_hash(obj)
16
+ case obj
17
+ when self then from_hash(obj.to_h)
18
+ when Array then obj.map { |v| from_hash(v) }
19
+ else
20
+ visited = {}
21
+ obj.each { visited[_1] = walk(_2) }
22
+ new(visited)
20
23
  end
21
- new(visited_object)
22
24
  end
23
25
 
24
26
  private
@@ -34,4 +36,8 @@ class OpenStruct
34
36
  end
35
37
  end
36
38
  extend FromHash
39
+
40
+ def to_json(...)
41
+ to_h.to_json(...)
42
+ end
37
43
  end
data/lib/llm/file.rb CHANGED
@@ -72,5 +72,10 @@ end
72
72
  # The path to a file
73
73
  # @return [LLM::File]
74
74
  def LLM.File(path)
75
- LLM::File.new(path)
75
+ case path
76
+ when LLM::File, LLM::Response::File
77
+ path
78
+ else
79
+ LLM::File.new(path)
80
+ end
76
81
  end
@@ -0,0 +1,81 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Function
4
+ class Return < Struct.new(:id, :value)
5
+ end
6
+
7
+ ##
8
+ # Returns function arguments
9
+ # @return [Array, nil]
10
+ attr_accessor :arguments
11
+
12
+ ##
13
+ # Returns the function ID
14
+ # @return [String, nil]
15
+ attr_accessor :id
16
+
17
+ ##
18
+ # @param [String] name The function name
19
+ # @yieldparam [LLM::Function] self The function object
20
+ def initialize(name, &b)
21
+ @name = name
22
+ @schema = JSON::Schema.new
23
+ yield(self)
24
+ end
25
+
26
+ ##
27
+ # Set the function description
28
+ # @param [String] str The function description
29
+ # @return [void]
30
+ def description(str)
31
+ @description = str
32
+ end
33
+
34
+ ##
35
+ # @yieldparam [JSON::Schema] schema The schema object
36
+ # @return [void]
37
+ def params
38
+ @params = yield(@schema)
39
+ end
40
+
41
+ ##
42
+ # Set the function implementation
43
+ # @param [Proc] b The function implementation
44
+ # @return [void]
45
+ def define(&b)
46
+ @runner = b
47
+ end
48
+
49
+ ##
50
+ # Call the function
51
+ # @param [Array] args The arguments to pass to the function
52
+ # @return [Object] The result of the function call
53
+ def call
54
+ Return.new id, @runner.call(arguments)
55
+ ensure
56
+ @called = true
57
+ end
58
+
59
+ ##
60
+ # Returns true when a function has been called
61
+ # @return [Boolean]
62
+ def called?
63
+ @called
64
+ end
65
+
66
+ ##
67
+ # @return [Hash]
68
+ def format(provider)
69
+ case provider.class.to_s
70
+ when "LLM::Gemini"
71
+ {name: @name, description: @description, parameters: @params}.compact
72
+ when "LLM::Anthropic"
73
+ {name: @name, description: @description, input_schema: @params}.compact
74
+ else
75
+ {
76
+ type: "function", name: @name,
77
+ function: {name: @name, description: @description, parameters: @params}
78
+ }.compact
79
+ end
80
+ end
81
+ end
data/lib/llm/message.rb CHANGED
@@ -64,12 +64,57 @@ module LLM
64
64
  role == "assistant" || role == "model"
65
65
  end
66
66
 
67
+ ##
68
+ # @return [Array<LLM::Function>]
69
+ def functions
70
+ @functions ||= tool_calls.map do |fn|
71
+ function = LLM.functions[fn.name].dup
72
+ function.tap { _1.id = fn.id }
73
+ function.tap { _1.arguments = fn.arguments }
74
+ end
75
+ end
76
+
77
+ ##
78
+ # @return [Boolean]
79
+ # Returns true when the message requests a function call
80
+ def tool_call?
81
+ tool_calls.any?
82
+ end
83
+
84
+ ##
85
+ # Returns true when the message is a system message
86
+ # @return [Boolean]
87
+ def system?
88
+ role == "system"
89
+ end
90
+
91
+ ##
92
+ # Marks the message as read
93
+ # @return [void]
94
+ def read!
95
+ @read = true
96
+ end
97
+
98
+ ##
99
+ # Returns true when the message has been read
100
+ # @return [Boolean]
101
+ def read?
102
+ @read
103
+ end
104
+
67
105
  ##
68
106
  # Returns a string representation of the message
69
107
  # @return [String]
70
108
  def inspect
71
109
  "#<#{self.class.name}:0x#{object_id.to_s(16)} " \
72
- "role=#{role.inspect} content=#{content.inspect}>"
110
+ "tool_call=#{tool_calls.any?} role=#{role.inspect} " \
111
+ "content=#{content.inspect}>"
112
+ end
113
+
114
+ private
115
+
116
+ def tool_calls
117
+ @tool_calls ||= OpenStruct.from_hash(@extra[:tool_calls] || [])
73
118
  end
74
119
  end
75
120
  end
@@ -0,0 +1,73 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Anthropic::Format
4
+ ##
5
+ # @private
6
+ class CompletionFormat
7
+ ##
8
+ # @param [LLM::Message, Hash] message
9
+ # The message to format
10
+ def initialize(message)
11
+ @message = message
12
+ end
13
+
14
+ ##
15
+ # Formats the message for the Anthropic chat completions API
16
+ # @return [Hash]
17
+ def format
18
+ catch(:abort) do
19
+ if Hash === message
20
+ {role: message[:role], content: format_content(message[:content])}
21
+ else
22
+ format_message
23
+ end
24
+ end
25
+ end
26
+
27
+ private
28
+
29
+ def format_message
30
+ if message.tool_call?
31
+ {role: message.role, content: message.extra[:original_tool_calls]}
32
+ else
33
+ {role: message.role, content: format_content(content)}
34
+ end
35
+ end
36
+
37
+ ##
38
+ # @param [String, URI] content
39
+ # The content to format
40
+ # @return [String, Hash]
41
+ # The formatted content
42
+ def format_content(content)
43
+ case content
44
+ when Hash
45
+ content.empty? ? throw(:abort, nil) : [content]
46
+ when Array
47
+ content.empty? ? throw(:abort, nil) : content.flat_map { format_content(_1) }
48
+ when URI
49
+ [{type: :image, source: {type: "url", url: content.to_s}}]
50
+ when LLM::File
51
+ if content.image?
52
+ [{type: :image, source: {type: "base64", media_type: content.mime_type, data: content.to_b64}}]
53
+ else
54
+ raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
55
+ "is not an image, and therefore not supported by the " \
56
+ "Anthropic API"
57
+ end
58
+ when String
59
+ [{type: :text, text: content}]
60
+ when LLM::Message
61
+ format_content(content.content)
62
+ when LLM::Function::Return
63
+ {type: "tool_result", tool_use_id: content.id, content: content.value}
64
+ else
65
+ raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
66
+ "is not supported by the Anthropic API"
67
+ end
68
+ end
69
+
70
+ def message = @message
71
+ def content = message.content
72
+ end
73
+ end
@@ -4,49 +4,23 @@ class LLM::Anthropic
4
4
  ##
5
5
  # @private
6
6
  module Format
7
+ require_relative "format/completion_format"
8
+
7
9
  ##
8
10
  # @param [Array<LLM::Message>] messages
9
11
  # The messages to format
10
12
  # @return [Array<Hash>]
11
13
  def format(messages)
12
- messages.map do
13
- if Hash === _1
14
- {role: _1[:role], content: format_content(_1[:content])}
15
- else
16
- {role: _1.role, content: format_content(_1.content)}
17
- end
14
+ messages.filter_map do
15
+ CompletionFormat.new(_1).format
18
16
  end
19
17
  end
20
18
 
21
19
  private
22
20
 
23
- ##
24
- # @param [String, URI] content
25
- # The content to format
26
- # @return [String, Hash]
27
- # The formatted content
28
- def format_content(content)
29
- case content
30
- when Array
31
- content.flat_map { format_content(_1) }
32
- when URI
33
- [{type: :image, source: {type: "url", url: content.to_s}}]
34
- when LLM::File
35
- if content.image?
36
- [{type: :image, source: {type: "base64", media_type: content.mime_type, data: content.to_b64}}]
37
- else
38
- raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
39
- "is not an image, and therefore not supported by the " \
40
- "Anthropic API"
41
- end
42
- when String
43
- [{type: :text, text: content}]
44
- when LLM::Message
45
- format_content(content.content)
46
- else
47
- raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
48
- "is not supported by the Anthropic API"
49
- end
21
+ def format_tools(tools)
22
+ return {} unless tools
23
+ {tools: tools.map { _1.format(self) }}
50
24
  end
51
25
  end
52
26
  end
@@ -0,0 +1,51 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Anthropic::ResponseParser
4
+ ##
5
+ # @private
6
+ class CompletionParser
7
+ def initialize(body)
8
+ @body = OpenStruct.from_hash(body)
9
+ end
10
+
11
+ def format(response)
12
+ {
13
+ model:,
14
+ prompt_tokens:,
15
+ completion_tokens:,
16
+ total_tokens:,
17
+ choices: format_choices(response)
18
+ }
19
+ end
20
+
21
+ private
22
+
23
+ def format_choices(response)
24
+ texts.map.with_index do |choice, index|
25
+ extra = {index:, response:, tool_calls: format_tool_calls(tools), original_tool_calls: tools}
26
+ LLM::Message.new(role, choice.text, extra)
27
+ end
28
+ end
29
+
30
+ def format_tool_calls(tools)
31
+ (tools || []).filter_map do |tool|
32
+ tool = {
33
+ id: tool.id,
34
+ name: tool.name,
35
+ arguments: tool.input
36
+ }
37
+ OpenStruct.new(tool)
38
+ end
39
+ end
40
+
41
+ def body = @body
42
+ def role = body.role
43
+ def model = body.model
44
+ def prompt_tokens = body.usage.input_tokens
45
+ def completion_tokens = body.usage.output_tokens
46
+ def total_tokens = body.usage.total_tokens
47
+ def parts = body.content
48
+ def texts = parts.select { _1["type"] == "text" }
49
+ def tools = parts.select { _1["type"] == "tool_use" }
50
+ end
51
+ end
@@ -17,15 +17,7 @@ class LLM::Anthropic
17
17
  # The response body from the LLM provider
18
18
  # @return [Hash]
19
19
  def parse_completion(body)
20
- {
21
- model: body["model"],
22
- choices: body["content"].map do
23
- # TODO: don't hardcode role
24
- LLM::Message.new("assistant", _1["text"], {response: self})
25
- end,
26
- prompt_tokens: body.dig("usage", "input_tokens"),
27
- completion_tokens: body.dig("usage", "output_tokens")
28
- }
20
+ CompletionParser.new(body).format(self)
29
21
  end
30
22
  end
31
23
  end
@@ -7,6 +7,7 @@ module LLM
7
7
  class Anthropic < Provider
8
8
  require_relative "anthropic/error_handler"
9
9
  require_relative "anthropic/response_parser"
10
+ require_relative "anthropic/response_parser/completion_parser"
10
11
  require_relative "anthropic/format"
11
12
  require_relative "anthropic/models"
12
13
  include Format
@@ -49,11 +50,11 @@ module LLM
49
50
  # @raise [LLM::Error::PromptError]
50
51
  # When given an object a provider does not understand
51
52
  # @return (see LLM::Provider#complete)
52
- def complete(prompt, role = :user, model: default_model, max_tokens: 1024, **params)
53
- params = {max_tokens:, model:}.merge!(params)
53
+ def complete(prompt, role = :user, model: default_model, max_tokens: 1024, tools: nil, **params)
54
+ params = [{max_tokens:, model:}, format_tools(tools), params].inject({}, &:merge!).compact
54
55
  req = Net::HTTP::Post.new("/v1/messages", headers)
55
56
  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
56
- body = JSON.dump({messages: format(messages)}.merge!(params))
57
+ body = JSON.dump({messages: [format(messages)].flatten}.merge!(params))
57
58
  set_body_stream(req, StringIO.new(body))
58
59
  res = request(@http, req)
59
60
  Response::Completion.new(res).extend(response_parser)
@@ -34,7 +34,7 @@ class LLM::Gemini
34
34
  # res = llm.audio.create_transcription(file: LLM::File("/rocket.mp3"))
35
35
  # res.text # => "A dog on a rocket to the moon"
36
36
  # @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
37
- # @param [LLM::File, LLM::Response::File] file The input audio
37
+ # @param [String, LLM::File, LLM::Response::File] file The input audio
38
38
  # @param [String] model The model to use
39
39
  # @param [Hash] params Other parameters (see Gemini docs)
40
40
  # @raise (see LLM::Provider#request)
@@ -43,7 +43,7 @@ class LLM::Gemini
43
43
  res = @provider.complete [
44
44
  "Your task is to transcribe the contents of an audio file",
45
45
  "Your response should include the transcription, and nothing else",
46
- file
46
+ LLM.File(file)
47
47
  ], :user, model:, **params
48
48
  LLM::Response::AudioTranscription
49
49
  .new(res)
@@ -58,7 +58,7 @@ class LLM::Gemini
58
58
  # res = llm.audio.create_translation(file: LLM::File("/bismillah.mp3"))
59
59
  # res.text # => "In the name of Allah, the Beneficent, the Merciful."
60
60
  # @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
61
- # @param [LLM::File, LLM::Response::File] file The input audio
61
+ # @param [String, LLM::File, LLM::Response::File] file The input audio
62
62
  # @param [String] model The model to use
63
63
  # @param [Hash] params Other parameters (see Gemini docs)
64
64
  # @raise (see LLM::Provider#request)
@@ -67,7 +67,7 @@ class LLM::Gemini
67
67
  res = @provider.complete [
68
68
  "Your task is to translate the contents of an audio file into English",
69
69
  "Your response should include the translation, and nothing else",
70
- file
70
+ LLM.File(file)
71
71
  ], :user, model:, **params
72
72
  LLM::Response::AudioTranslation
73
73
  .new(res)
@@ -19,7 +19,7 @@ class LLM::Gemini
19
19
  #
20
20
  # llm = LLM.gemini(ENV["KEY"])
21
21
  # bot = LLM::Chat.new(llm).lazy
22
- # file = llm.files.create file: LLM::File("/audio/haiku.mp3")
22
+ # file = llm.files.create file: "/audio/haiku.mp3"
23
23
  # bot.chat(file)
24
24
  # bot.chat("Describe the audio file I sent to you")
25
25
  # bot.chat("The audio file is the first message I sent to you.")
@@ -30,7 +30,7 @@ class LLM::Gemini
30
30
  #
31
31
  # llm = LLM.gemini(ENV["KEY"])
32
32
  # bot = LLM::Chat.new(llm).lazy
33
- # file = llm.files.create file: LLM::File("/audio/haiku.mp3")
33
+ # file = llm.files.create file: "/audio/haiku.mp3"
34
34
  # bot.chat(["Describe the audio file I sent to you", file])
35
35
  # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
36
36
  class Files
@@ -71,13 +71,14 @@ class LLM::Gemini
71
71
  # Create a file
72
72
  # @example
73
73
  # llm = LLM.gemini(ENV["KEY"])
74
- # res = llm.files.create file: LLM::File("/audio/haiku.mp3"),
74
+ # res = llm.files.create file: "/audio/haiku.mp3"
75
75
  # @see https://ai.google.dev/gemini-api/docs/files Gemini docs
76
- # @param [File] file The file
76
+ # @param [String, LLM::File] file The file
77
77
  # @param [Hash] params Other parameters (see Gemini docs)
78
78
  # @raise (see LLM::Provider#request)
79
79
  # @return [LLM::Response::File]
80
80
  def create(file:, **params)
81
+ file = LLM.File(file)
81
82
  req = Net::HTTP::Post.new(request_upload_url(file:), {})
82
83
  req["content-length"] = file.bytesize
83
84
  req["X-Goog-Upload-Offset"] = 0
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Gemini::Format
4
+ ##
5
+ # @private
6
+ class CompletionFormat
7
+ ##
8
+ # @param [LLM::Message, Hash] message
9
+ # The message to format
10
+ def initialize(message)
11
+ @message = message
12
+ end
13
+
14
+ ##
15
+ # Formats the message for the Gemini chat completions API
16
+ # @return [Hash]
17
+ def format
18
+ catch(:abort) do
19
+ if Hash === message
20
+ {role: message[:role], parts: format_content(message[:content])}
21
+ elsif message.tool_call?
22
+ {role: message.role, parts: message.extra[:original_tool_calls].map { {"functionCall" => _1} }}
23
+ else
24
+ {role: message.role, parts: format_content(message.content)}
25
+ end
26
+ end
27
+ end
28
+
29
+ def format_content(content)
30
+ case content
31
+ when Array
32
+ content.empty? ? throw(:abort, nil) : content.flat_map { format_content(_1) }
33
+ when LLM::Response::File
34
+ file = content
35
+ [{file_data: {mime_type: file.mime_type, file_uri: file.uri}}]
36
+ when LLM::File
37
+ file = content
38
+ [{inline_data: {mime_type: file.mime_type, data: file.to_b64}}]
39
+ when String
40
+ [{text: content}]
41
+ when LLM::Message
42
+ format_content(content.content)
43
+ when LLM::Function::Return
44
+ [{text: content.value}]
45
+ else
46
+ raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
47
+ "is not supported by the Gemini API"
48
+ end
49
+ end
50
+
51
+ def message = @message
52
+ def content = message.content
53
+ end
54
+ end