llm.rb 4.1.0 → 4.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +2 -2
  3. data/README.md +241 -166
  4. data/lib/llm/agent.rb +65 -37
  5. data/lib/llm/bot.rb +118 -30
  6. data/lib/llm/buffer.rb +6 -0
  7. data/lib/llm/function/tracing.rb +19 -0
  8. data/lib/llm/function.rb +28 -3
  9. data/lib/llm/json_adapter.rb +12 -12
  10. data/lib/llm/message.rb +19 -4
  11. data/lib/llm/prompt.rb +85 -0
  12. data/lib/llm/provider.rb +62 -10
  13. data/lib/llm/providers/anthropic/error_handler.rb +27 -5
  14. data/lib/llm/providers/anthropic/files.rb +22 -16
  15. data/lib/llm/providers/anthropic/models.rb +4 -3
  16. data/lib/llm/providers/anthropic.rb +6 -5
  17. data/lib/llm/providers/deepseek.rb +3 -3
  18. data/lib/llm/providers/gemini/error_handler.rb +34 -12
  19. data/lib/llm/providers/gemini/files.rb +19 -14
  20. data/lib/llm/providers/gemini/images.rb +4 -3
  21. data/lib/llm/providers/gemini/models.rb +4 -3
  22. data/lib/llm/providers/gemini.rb +9 -7
  23. data/lib/llm/providers/llamacpp.rb +3 -3
  24. data/lib/llm/providers/ollama/error_handler.rb +28 -6
  25. data/lib/llm/providers/ollama/models.rb +4 -3
  26. data/lib/llm/providers/ollama.rb +9 -7
  27. data/lib/llm/providers/openai/audio.rb +10 -7
  28. data/lib/llm/providers/openai/error_handler.rb +41 -14
  29. data/lib/llm/providers/openai/files.rb +19 -14
  30. data/lib/llm/providers/openai/images.rb +10 -7
  31. data/lib/llm/providers/openai/models.rb +4 -3
  32. data/lib/llm/providers/openai/moderations.rb +4 -3
  33. data/lib/llm/providers/openai/responses.rb +10 -7
  34. data/lib/llm/providers/openai/vector_stores.rb +34 -23
  35. data/lib/llm/providers/openai.rb +9 -7
  36. data/lib/llm/providers/xai.rb +3 -3
  37. data/lib/llm/providers/zai.rb +2 -2
  38. data/lib/llm/schema/object.rb +4 -4
  39. data/lib/llm/schema.rb +16 -2
  40. data/lib/llm/server_tool.rb +3 -3
  41. data/lib/llm/session/deserializer.rb +36 -0
  42. data/lib/llm/session.rb +3 -0
  43. data/lib/llm/tracer/logger.rb +192 -0
  44. data/lib/llm/tracer/null.rb +49 -0
  45. data/lib/llm/tracer/telemetry.rb +255 -0
  46. data/lib/llm/tracer.rb +134 -0
  47. data/lib/llm/version.rb +1 -1
  48. data/lib/llm.rb +4 -3
  49. data/llm.gemspec +6 -3
  50. metadata +41 -5
  51. data/lib/llm/builder.rb +0 -79
data/lib/llm/agent.rb CHANGED
@@ -6,15 +6,10 @@ module LLM
6
6
  # reusable, preconfigured assistants with defaults for model,
7
7
  # tools, schema, and instructions.
8
8
  #
9
- # @note
10
- # Unlike {LLM::Bot LLM::Bot}, this class will automatically run
11
- # tool calls for you.
12
- #
13
- # @note
14
- # Instructions are injected only on the first request.
15
- #
16
- # @note
17
- # This idea originally came from RubyLLM and was adapted to llm.rb.
9
+ # **Notes:**
10
+ # * Instructions are injected only on the first request.
11
+ # * An agent will automatically execute tool calls (unlike {LLM::Session LLM::Session}).
12
+ # * The idea originally came from RubyLLM and was adapted to llm.rb.
18
13
  #
19
14
  # @example
20
15
  # class SystemAdmin < LLM::Agent
@@ -26,7 +21,7 @@ module LLM
26
21
  #
27
22
  # llm = LLM.openai(key: ENV["KEY"])
28
23
  # agent = SystemAdmin.new(llm)
29
- # agent.chat("Run 'date'")
24
+ # agent.talk("Run 'date'")
30
25
  class Agent
31
26
  ##
32
27
  # Set or get the default model
@@ -85,7 +80,7 @@ module LLM
85
80
  def initialize(provider, params = {})
86
81
  defaults = {model: self.class.model, tools: self.class.tools, schema: self.class.schema}.compact
87
82
  @provider = provider
88
- @bot = LLM::Bot.new(provider, defaults.merge(params))
83
+ @ses = LLM::Session.new(provider, defaults.merge(params))
89
84
  @instructions_applied = false
90
85
  end
91
86
 
@@ -100,19 +95,20 @@ module LLM
100
95
  # @example
101
96
  # llm = LLM.openai(key: ENV["KEY"])
102
97
  # agent = LLM::Agent.new(llm)
103
- # response = agent.chat("Hello, what is your name?")
98
+ # response = agent.talk("Hello, what is your name?")
104
99
  # puts response.choices[0].content
105
- def chat(prompt, params = {})
100
+ def talk(prompt, params = {})
106
101
  i, max = 0, Integer(params.delete(:max_tool_rounds) || 10)
107
- res = @bot.chat(apply_instructions(prompt), params)
108
- until @bot.functions.empty?
102
+ res = @ses.talk(apply_instructions(prompt), params)
103
+ until @ses.functions.empty?
109
104
  raise LLM::ToolLoopError, "pending tool calls remain" if i >= max
110
- res = @bot.chat @bot.functions.map(&:call), params
105
+ res = @ses.talk @ses.functions.map(&:call), params
111
106
  i += 1
112
107
  end
113
108
  @instructions_applied = true
114
109
  res
115
110
  end
111
+ alias_method :chat, :talk
116
112
 
117
113
  ##
118
114
  # Maintain a conversation via the responses API.
@@ -130,10 +126,10 @@ module LLM
130
126
  # puts res.output_text
131
127
  def respond(prompt, params = {})
132
128
  i, max = 0, Integer(params.delete(:max_tool_rounds) || 10)
133
- res = @bot.respond(apply_instructions(prompt), params)
134
- until @bot.functions.empty?
129
+ res = @ses.respond(apply_instructions(prompt), params)
130
+ until @ses.functions.empty?
135
131
  raise LLM::ToolLoopError, "pending tool calls remain" if i >= max
136
- res = @bot.respond @bot.functions.map(&:call), params
132
+ res = @ses.respond @ses.functions.map(&:call), params
137
133
  i += 1
138
134
  end
139
135
  @instructions_applied = true
@@ -143,26 +139,29 @@ module LLM
143
139
  ##
144
140
  # @return [LLM::Buffer<LLM::Message>]
145
141
  def messages
146
- @bot.messages
142
+ @ses.messages
147
143
  end
148
144
 
149
145
  ##
150
146
  # @return [Array<LLM::Function>]
151
147
  def functions
152
- @bot.functions
148
+ @ses.functions
153
149
  end
154
150
 
155
151
  ##
156
152
  # @return [LLM::Object]
157
153
  def usage
158
- @bot.usage
154
+ @ses.usage
159
155
  end
160
156
 
161
157
  ##
162
- # @return [LLM::Builder]
163
- def build_prompt(&)
164
- @bot.build_prompt(&)
158
+ # @param (see LLM::Session#prompt)
159
+ # @return (see LLM::Session#prompt)
160
+ # @see LLM::Session#prompt
161
+ def prompt(&b)
162
+ @ses.prompt(&b)
165
163
  end
164
+ alias_method :build_prompt, :prompt
166
165
 
167
166
  ##
168
167
  # @param [String] url
@@ -170,7 +169,7 @@ module LLM
170
169
  # @return [LLM::Object]
171
170
  # Returns a tagged object
172
171
  def image_url(url)
173
- @bot.image_url(url)
172
+ @ses.image_url(url)
174
173
  end
175
174
 
176
175
  ##
@@ -179,7 +178,7 @@ module LLM
179
178
  # @return [LLM::Object]
180
179
  # Returns a tagged object
181
180
  def local_file(path)
182
- @bot.local_file(path)
181
+ @ses.local_file(path)
183
182
  end
184
183
 
185
184
  ##
@@ -188,25 +187,54 @@ module LLM
188
187
  # @return [LLM::Object]
189
188
  # Returns a tagged object
190
189
  def remote_file(res)
191
- @bot.remote_file(res)
190
+ @ses.remote_file(res)
191
+ end
192
+
193
+ ##
194
+ # @return [LLM::Tracer]
195
+ # Returns an LLM tracer
196
+ def tracer
197
+ @ses.tracer
198
+ end
199
+
200
+ ##
201
+ # Returns the model an Agent is actively using
202
+ # @return [String]
203
+ def model
204
+ @ses.model
192
205
  end
193
206
 
207
+ ##
208
+ # @param (see LLM::Session#serialize)
209
+ # @return (see LLM::Session#serialize)
210
+ def serialize(**kw)
211
+ @ses.serialize(**kw)
212
+ end
213
+ alias_method :save, :serialize
214
+
215
+ ##
216
+ # @param (see LLM::Session#deserialize)
217
+ # @return (see LLM::Session#deserialize)
218
+ def deserialize(**kw)
219
+ @ses.deserialize(**kw)
220
+ end
221
+ alias_method :restore, :deserialize
222
+
194
223
  private
195
224
 
196
225
  def apply_instructions(prompt)
197
226
  instr = self.class.instructions
198
227
  return prompt unless instr
199
- if LLM::Builder === prompt
228
+ if LLM::Prompt === prompt
200
229
  messages = prompt.to_a
201
- builder = LLM::Builder.new(@provider) do |builder|
202
- builder.system instr unless @instructions_applied
203
- messages.each { |msg| builder.chat(msg.content, role: msg.role) }
204
- end
205
- builder.tap(&:call)
230
+ prompt = LLM::Prompt.new(@provider)
231
+ prompt.system instr unless @instructions_applied
232
+ messages.each { |msg| prompt.talk(msg.content, role: msg.role) }
233
+ prompt
206
234
  else
207
- build_prompt do
208
- _1.system instr unless @instructions_applied
209
- _1.user prompt
235
+ prompt do
236
+ system instr unless @instructions_applied
237
+ user prompt
210
238
  end
211
239
  end
212
240
  end
data/lib/llm/bot.rb CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  module LLM
4
4
  ##
5
- # {LLM::Bot LLM::Bot} provides an object that can maintain a
5
+ # {LLM::Session LLM::Session} provides an object that can maintain a
6
6
  # conversation. A conversation can use the chat completions API
7
7
  # that all LLM providers support or the responses API that currently
8
8
  # only OpenAI supports.
@@ -11,20 +11,21 @@ module LLM
11
11
  # #!/usr/bin/env ruby
12
12
  # require "llm"
13
13
  #
14
- # llm = LLM.openai(key: ENV["KEY"])
15
- # bot = LLM::Bot.new(llm)
16
- # url = "https://upload.wikimedia.org/wikipedia/commons/c/c7/Lisc_lipy.jpg"
14
+ # llm = LLM.openai(key: ENV["KEY"])
15
+ # ses = LLM::Session.new(llm)
17
16
  #
18
- # prompt = bot.build_prompt do
19
- # it.system "Your task is to answer all user queries"
20
- # it.user ["Tell me about this URL", bot.image_url(url)]
21
- # it.user ["Tell me about this PDF", bot.local_file("handbook.pdf")]
17
+ # prompt = LLM::Prompt.new(llm) do
18
+ # system "Be concise and show your reasoning briefly."
19
+ # user "If a train goes 60 mph for 1.5 hours, how far does it travel?"
20
+ # user "Now double the speed for the same time."
22
21
  # end
23
- # bot.chat(prompt)
24
22
  #
25
- # # The full conversation history is in bot.messages
26
- # bot.messages.each { print "[#{_1.role}] ", _1.content, "\n" }
27
- class Bot
23
+ # ses.talk(prompt)
24
+ # ses.messages.each { |m| puts "[#{m.role}] #{m.content}" }
25
+ class Session
26
+ require_relative "session/deserializer"
27
+ include Deserializer
28
+
28
29
  ##
29
30
  # Returns an Enumerable for the messages in a conversation
30
31
  # @return [LLM::Buffer<LLM::Message>]
@@ -54,19 +55,22 @@ module LLM
54
55
  # @return [LLM::Response] Returns the LLM's response for this turn.
55
56
  # @example
56
57
  # llm = LLM.openai(key: ENV["KEY"])
57
- # bot = LLM::Bot.new(llm)
58
- # response = bot.chat("Hello, what is your name?")
59
- # puts response.choices[0].content
60
- def chat(prompt, params = {})
58
+ # ses = LLM::Session.new(llm)
59
+ # res = ses.talk("Hello, what is your name?")
60
+ # puts res.messages[0].content
61
+ def talk(prompt, params = {})
61
62
  prompt, params, messages = fetch(prompt, params)
62
63
  params = params.merge(messages: [*@messages.to_a, *messages])
63
64
  params = @params.merge(params)
64
65
  res = @provider.complete(prompt, params)
65
- @messages.concat [LLM::Message.new(params[:role] || :user, prompt)]
66
+ role = params[:role] || @provider.user_role
67
+ role = @provider.tool_role if params[:role].nil? && [*prompt].grep(LLM::Function::Return).any?
68
+ @messages.concat [LLM::Message.new(role, prompt)]
66
69
  @messages.concat messages
67
70
  @messages.concat [res.choices[-1]]
68
71
  res
69
72
  end
73
+ alias_method :chat, :talk
70
74
 
71
75
  ##
72
76
  # Maintain a conversation via the responses API.
@@ -78,8 +82,8 @@ module LLM
78
82
  # @return [LLM::Response] Returns the LLM's response for this turn.
79
83
  # @example
80
84
  # llm = LLM.openai(key: ENV["KEY"])
81
- # bot = LLM::Bot.new(llm)
82
- # res = bot.respond("What is the capital of France?")
85
+ # ses = LLM::Session.new(llm)
86
+ # res = ses.respond("What is the capital of France?")
83
87
  # puts res.output_text
84
88
  def respond(prompt, params = {})
85
89
  prompt, params, messages = fetch(prompt, params)
@@ -87,7 +91,8 @@ module LLM
87
91
  params = params.merge(previous_response_id: res_id, input: messages).compact
88
92
  params = @params.merge(params)
89
93
  res = @provider.responses.create(prompt, params)
90
- @messages.concat [LLM::Message.new(params[:role] || :user, prompt)]
94
+ role = params[:role] || @provider.user_role
95
+ @messages.concat [LLM::Message.new(role, prompt)]
91
96
  @messages.concat messages
92
97
  @messages.concat [res.choices[-1]]
93
98
  res
@@ -107,8 +112,13 @@ module LLM
107
112
  def functions
108
113
  @messages
109
114
  .select(&:assistant?)
110
- .flat_map(&:functions)
111
- .select(&:pending?)
115
+ .flat_map do |msg|
116
+ fns = msg.functions.select(&:pending?)
117
+ fns.each do |fn|
118
+ fn.tracer = tracer
119
+ fn.model = msg.model
120
+ end
121
+ end
112
122
  end
113
123
 
114
124
  ##
@@ -123,16 +133,24 @@ module LLM
123
133
  end
124
134
 
125
135
  ##
126
- # Build a prompt
136
+ # Build a role-aware prompt for a single request.
137
+ #
138
+ # Prefer this method over {#build_prompt}. The older
139
+ # method name is kept for backward compatibility.
127
140
  # @example
128
- # prompt = bot.build_prompt do
129
- # it.system "Your task is to assist the user"
130
- # it.user "Hello, can you assist me?"
141
+ # prompt = ses.prompt do
142
+ # system "Your task is to assist the user"
143
+ # user "Hello, can you assist me?"
131
144
  # end
132
- # bot.chat(prompt)
133
- def build_prompt(&)
134
- LLM::Builder.new(@provider, &).tap(&:call)
145
+ # ses.talk(prompt)
146
+ # @param [Proc] b
147
+ # A block that composes messages. If it takes one argument,
148
+ # it receives the prompt object. Otherwise it runs in prompt context.
149
+ # @return [LLM::Prompt]
150
+ def prompt(&b)
151
+ LLM::Prompt.new(@provider, &b)
135
152
  end
153
+ alias_method :build_prompt, :prompt
136
154
 
137
155
  ##
138
156
  # Recongize an object as a URL to an image
@@ -164,14 +182,84 @@ module LLM
164
182
  LLM::Object.from(value: res, kind: :remote_file)
165
183
  end
166
184
 
185
+ ##
186
+ # @return [LLM::Tracer]
187
+ # Returns an LLM tracer
188
+ def tracer
189
+ @provider.tracer
190
+ end
191
+
192
+ ##
193
+ # Returns the model a Session is actively using
194
+ # @return [String]
195
+ def model
196
+ messages.find(&:assistant?)&.model || @params[:model]
197
+ end
198
+
199
+ ##
200
+ # @return [Hash]
201
+ def to_h
202
+ {model:, messages:}
203
+ end
204
+
205
+ ##
206
+ # @return [String]
207
+ def to_json(...)
208
+ {schema_version: 1}.merge!(to_h).to_json(...)
209
+ end
210
+
211
+ ##
212
+ # Save a session
213
+ # @example
214
+ # llm = LLM.openai(key: ENV["KEY"])
215
+ # ses = LLM::Session.new(llm)
216
+ # ses.talk "Hello"
217
+ # ses.save(path: "session.json")
218
+ # @raise [SystemCallError]
219
+ # Might raise a number of SystemCallError subclasses
220
+ # @return [void]
221
+ def serialize(path:)
222
+ ::File.binwrite path, LLM.json.dump(self)
223
+ end
224
+ alias_method :save, :serialize
225
+
226
+ ##
227
+ # Restore a session
228
+ # @param [String, nil] path
229
+ # The path to a JSON file
230
+ # @param [String, nil] string
231
+ # A raw JSON string
232
+ # @raise [SystemCallError]
233
+ # Might raise a number of SystemCallError subclasses
234
+ # @return [LLM::Session]
235
+ def deserialize(path: nil, string: nil)
236
+ payload = if path.nil? and string.nil?
237
+ raise ArgumentError, "a path or string is required"
238
+ elsif path
239
+ ::File.binread(path)
240
+ else
241
+ string
242
+ end
243
+ ses = LLM.json.load(payload)
244
+ @messages.concat [*ses["messages"]].map { deserialize_message(_1) }
245
+ self
246
+ end
247
+ alias_method :restore, :deserialize
248
+
167
249
  private
168
250
 
169
251
  def fetch(prompt, params)
170
- return [prompt, params, []] unless LLM::Builder === prompt
252
+ return [prompt, params, []] unless LLM::Prompt === prompt
171
253
  messages = prompt.to_a
172
254
  prompt = messages.shift
173
255
  params.merge!(role: prompt.role)
174
256
  [prompt.content, params, messages]
175
257
  end
176
258
  end
259
+
260
+ # Backward-compatible alias
261
+ Bot = Session
262
+
263
+ # Scheduled for removal in v5.0
264
+ deprecate_constant :Bot
177
265
  end
data/lib/llm/buffer.rb CHANGED
@@ -70,6 +70,12 @@ module LLM
70
70
  @messages[index]
71
71
  end
72
72
 
73
+ ##
74
+ # @return [String]
75
+ def to_json(...)
76
+ LLM.json.dump(@messages, ...)
77
+ end
78
+
73
79
  ##
74
80
  # @return [String]
75
81
  def inspect
@@ -0,0 +1,19 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Function
4
+ ##
5
+ # The {LLM::Function::Tracing LLM::Function::Tracing} module patches
6
+ # an LLM function (or tool) in order to add tracing support.
7
+ module Tracing
8
+ def call(...)
9
+ return super unless @tracer
10
+ span = @tracer.on_tool_start(id:, name:, arguments:, model:)
11
+ result = super
12
+ @tracer.on_tool_finish(result:, span:)
13
+ result
14
+ rescue => ex
15
+ @tracer.on_tool_error(ex:, span:)
16
+ raise(ex)
17
+ end
18
+ end
19
+ end
data/lib/llm/function.rb CHANGED
@@ -29,7 +29,22 @@
29
29
  # end
30
30
  # end
31
31
  class LLM::Function
32
+ require_relative "function/tracing"
33
+ prepend LLM::Function::Tracing
34
+
32
35
  class Return < Struct.new(:id, :name, :value)
36
+ ##
37
+ # Returns a Hash representation of {LLM::Function::Return}
38
+ # @return [Hash]
39
+ def to_h
40
+ {id:, name:, value:}
41
+ end
42
+
43
+ ##
44
+ # @return [String]
45
+ def to_json(...)
46
+ LLM.json.dump(to_h, ...)
47
+ end
33
48
  end
34
49
 
35
50
  ##
@@ -42,6 +57,16 @@ class LLM::Function
42
57
  # @return [Array, nil]
43
58
  attr_accessor :arguments
44
59
 
60
+ ##
61
+ # Returns a tracer, or nil
62
+ # @return [LLM::Tracer, nil]
63
+ attr_accessor :tracer
64
+
65
+ ##
66
+ # Returns a model name, or nil
67
+ # @return [String, nil]
68
+ attr_accessor :model
69
+
45
70
  ##
46
71
  # @param [String] name The function name
47
72
  # @yieldparam [LLM::Function] self The function object
@@ -116,9 +141,9 @@ class LLM::Function
116
141
  # Returns a value that communicates that the function call was cancelled
117
142
  # @example
118
143
  # llm = LLM.openai(key: ENV["KEY"])
119
- # bot = LLM::Bot.new(llm, tools: [fn1, fn2])
120
- # bot.chat "I want to run the functions"
121
- # bot.chat bot.functions.map(&:cancel)
144
+ # ses = LLM::Session.new(llm, tools: [fn1, fn2])
145
+ # ses.talk "I want to run the functions"
146
+ # ses.talk ses.functions.map(&:cancel)
122
147
  # @return [LLM::Function::Return]
123
148
  def cancel(reason: "function call cancelled")
124
149
  Return.new(id, name, {cancelled: true, reason:})
@@ -35,16 +35,16 @@ module LLM
35
35
  class JSONAdapter::JSON < JSONAdapter
36
36
  ##
37
37
  # @return (see JSONAdapter#dump)
38
- def self.dump(obj)
38
+ def self.dump(obj, ...)
39
39
  require "json" unless defined?(::JSON)
40
- ::JSON.dump(obj)
40
+ ::JSON.dump(obj, ...)
41
41
  end
42
42
 
43
43
  ##
44
44
  # @return (see JSONAdapter#load)
45
- def self.load(string)
45
+ def self.load(string, ...)
46
46
  require "json" unless defined?(::JSON)
47
- ::JSON.parse(string)
47
+ ::JSON.parse(string, ...)
48
48
  end
49
49
 
50
50
  ##
@@ -61,16 +61,16 @@ module LLM
61
61
  class JSONAdapter::Oj < JSONAdapter
62
62
  ##
63
63
  # @return (see JSONAdapter#dump)
64
- def self.dump(obj)
64
+ def self.dump(obj, options = {})
65
65
  require "oj" unless defined?(::Oj)
66
- ::Oj.dump(obj)
66
+ ::Oj.dump(obj, options.merge(mode: :compat))
67
67
  end
68
68
 
69
69
  ##
70
70
  # @return (see JSONAdapter#load)
71
- def self.load(string)
71
+ def self.load(string, options = {})
72
72
  require "oj" unless defined?(::Oj)
73
- ::Oj.load(string, mode: :compat, symbol_keys: false, symbolize_names: false)
73
+ ::Oj.load(string, options.merge(mode: :compat, symbol_keys: false, symbolize_names: false))
74
74
  end
75
75
 
76
76
  ##
@@ -87,16 +87,16 @@ module LLM
87
87
  class JSONAdapter::Yajl < JSONAdapter
88
88
  ##
89
89
  # @return (see JSONAdapter#dump)
90
- def self.dump(obj)
90
+ def self.dump(obj, ...)
91
91
  require "yajl" unless defined?(::Yajl)
92
- ::Yajl::Encoder.encode(obj)
92
+ ::Yajl::Encoder.encode(obj, ...)
93
93
  end
94
94
 
95
95
  ##
96
96
  # @return (see JSONAdapter#load)
97
- def self.load(string)
97
+ def self.load(string, ...)
98
98
  require "yajl" unless defined?(::Yajl)
99
- ::Yajl::Parser.parse(string)
99
+ ::Yajl::Parser.parse(string, ...)
100
100
  end
101
101
 
102
102
  ##
data/lib/llm/message.rb CHANGED
@@ -30,10 +30,18 @@ module LLM
30
30
  end
31
31
 
32
32
  ##
33
- # Returns a hash representation of the message
33
+ # Returns a Hash representation of the message.
34
34
  # @return [Hash]
35
35
  def to_h
36
- {role:, content:}
36
+ {role:, content:,
37
+ tools: @extra[:tool_calls],
38
+ original_tool_calls: extra[:original_tool_calls]}.compact
39
+ end
40
+
41
+ ##
42
+ # @return [String]
43
+ def to_json(...)
44
+ LLM.json.dump(to_h, ...)
37
45
  end
38
46
 
39
47
  ##
@@ -62,7 +70,7 @@ module LLM
62
70
  # @return [Array<LLM::Function>]
63
71
  def functions
64
72
  @functions ||= tool_calls.map do |fn|
65
- function = tools.find { _1.name.to_s == fn["name"] }.dup
73
+ function = available_tools.find { _1.name.to_s == fn["name"] }.dup
66
74
  function.tap { _1.id = fn.id }
67
75
  function.tap { _1.arguments = fn.arguments }
68
76
  end
@@ -136,6 +144,13 @@ module LLM
136
144
  end
137
145
  alias_method :token_usage, :usage
138
146
 
147
+ ##
148
+ # @return [String, nil]
149
+ # Returns the model associated with a message
150
+ def model
151
+ response&.model
152
+ end
153
+
139
154
  ##
140
155
  # Returns a string representation of the message
141
156
  # @return [String]
@@ -151,7 +166,7 @@ module LLM
151
166
  @tool_calls ||= LLM::Object.from(@extra[:tool_calls] || [])
152
167
  end
153
168
 
154
- def tools
169
+ def available_tools
155
170
  response&.__tools__ || []
156
171
  end
157
172
  end