llm.rb 4.1.0 → 4.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +2 -2
  3. data/README.md +186 -172
  4. data/lib/llm/agent.rb +49 -37
  5. data/lib/llm/bot.rb +57 -28
  6. data/lib/llm/function/tracing.rb +19 -0
  7. data/lib/llm/function.rb +16 -3
  8. data/lib/llm/json_adapter.rb +1 -1
  9. data/lib/llm/message.rb +7 -0
  10. data/lib/llm/prompt.rb +85 -0
  11. data/lib/llm/provider.rb +56 -10
  12. data/lib/llm/providers/anthropic/error_handler.rb +27 -5
  13. data/lib/llm/providers/anthropic/files.rb +22 -16
  14. data/lib/llm/providers/anthropic/models.rb +4 -3
  15. data/lib/llm/providers/anthropic.rb +6 -5
  16. data/lib/llm/providers/deepseek.rb +3 -3
  17. data/lib/llm/providers/gemini/error_handler.rb +34 -12
  18. data/lib/llm/providers/gemini/files.rb +18 -13
  19. data/lib/llm/providers/gemini/images.rb +4 -3
  20. data/lib/llm/providers/gemini/models.rb +4 -3
  21. data/lib/llm/providers/gemini.rb +9 -7
  22. data/lib/llm/providers/llamacpp.rb +3 -3
  23. data/lib/llm/providers/ollama/error_handler.rb +28 -6
  24. data/lib/llm/providers/ollama/models.rb +4 -3
  25. data/lib/llm/providers/ollama.rb +9 -7
  26. data/lib/llm/providers/openai/audio.rb +10 -7
  27. data/lib/llm/providers/openai/error_handler.rb +41 -14
  28. data/lib/llm/providers/openai/files.rb +19 -14
  29. data/lib/llm/providers/openai/images.rb +10 -7
  30. data/lib/llm/providers/openai/models.rb +4 -3
  31. data/lib/llm/providers/openai/moderations.rb +4 -3
  32. data/lib/llm/providers/openai/responses.rb +10 -7
  33. data/lib/llm/providers/openai/vector_stores.rb +34 -23
  34. data/lib/llm/providers/openai.rb +9 -7
  35. data/lib/llm/providers/xai.rb +3 -3
  36. data/lib/llm/providers/zai.rb +2 -2
  37. data/lib/llm/schema/object.rb +2 -2
  38. data/lib/llm/schema.rb +16 -2
  39. data/lib/llm/server_tool.rb +3 -3
  40. data/lib/llm/session.rb +3 -0
  41. data/lib/llm/tracer/logger.rb +192 -0
  42. data/lib/llm/tracer/null.rb +49 -0
  43. data/lib/llm/tracer/telemetry.rb +255 -0
  44. data/lib/llm/tracer.rb +134 -0
  45. data/lib/llm/version.rb +1 -1
  46. data/lib/llm.rb +4 -3
  47. data/llm.gemspec +4 -1
  48. metadata +38 -3
  49. data/lib/llm/builder.rb +0 -79
data/lib/llm/agent.rb CHANGED
@@ -6,15 +6,10 @@ module LLM
6
6
  # reusable, preconfigured assistants with defaults for model,
7
7
  # tools, schema, and instructions.
8
8
  #
9
- # @note
10
- # Unlike {LLM::Bot LLM::Bot}, this class will automatically run
11
- # tool calls for you.
12
- #
13
- # @note
14
- # Instructions are injected only on the first request.
15
- #
16
- # @note
17
- # This idea originally came from RubyLLM and was adapted to llm.rb.
9
+ # **Notes:**
10
+ # * Instructions are injected only on the first request.
11
+ # * An agent will automatically execute tool calls (unlike {LLM::Session LLM::Session}).
12
+ # * The idea originally came from RubyLLM and was adapted to llm.rb.
18
13
  #
19
14
  # @example
20
15
  # class SystemAdmin < LLM::Agent
@@ -26,7 +21,7 @@ module LLM
26
21
  #
27
22
  # llm = LLM.openai(key: ENV["KEY"])
28
23
  # agent = SystemAdmin.new(llm)
29
- # agent.chat("Run 'date'")
24
+ # agent.talk("Run 'date'")
30
25
  class Agent
31
26
  ##
32
27
  # Set or get the default model
@@ -85,7 +80,7 @@ module LLM
85
80
  def initialize(provider, params = {})
86
81
  defaults = {model: self.class.model, tools: self.class.tools, schema: self.class.schema}.compact
87
82
  @provider = provider
88
- @bot = LLM::Bot.new(provider, defaults.merge(params))
83
+ @ses = LLM::Session.new(provider, defaults.merge(params))
89
84
  @instructions_applied = false
90
85
  end
91
86
 
@@ -100,19 +95,20 @@ module LLM
100
95
  # @example
101
96
  # llm = LLM.openai(key: ENV["KEY"])
102
97
  # agent = LLM::Agent.new(llm)
103
- # response = agent.chat("Hello, what is your name?")
98
+ # response = agent.talk("Hello, what is your name?")
104
99
  # puts response.choices[0].content
105
- def chat(prompt, params = {})
100
+ def talk(prompt, params = {})
106
101
  i, max = 0, Integer(params.delete(:max_tool_rounds) || 10)
107
- res = @bot.chat(apply_instructions(prompt), params)
108
- until @bot.functions.empty?
102
+ res = @ses.talk(apply_instructions(prompt), params)
103
+ until @ses.functions.empty?
109
104
  raise LLM::ToolLoopError, "pending tool calls remain" if i >= max
110
- res = @bot.chat @bot.functions.map(&:call), params
105
+ res = @ses.talk @ses.functions.map(&:call), params
111
106
  i += 1
112
107
  end
113
108
  @instructions_applied = true
114
109
  res
115
110
  end
111
+ alias_method :chat, :talk
116
112
 
117
113
  ##
118
114
  # Maintain a conversation via the responses API.
@@ -130,10 +126,10 @@ module LLM
130
126
  # puts res.output_text
131
127
  def respond(prompt, params = {})
132
128
  i, max = 0, Integer(params.delete(:max_tool_rounds) || 10)
133
- res = @bot.respond(apply_instructions(prompt), params)
134
- until @bot.functions.empty?
129
+ res = @ses.respond(apply_instructions(prompt), params)
130
+ until @ses.functions.empty?
135
131
  raise LLM::ToolLoopError, "pending tool calls remain" if i >= max
136
- res = @bot.respond @bot.functions.map(&:call), params
132
+ res = @ses.respond @ses.functions.map(&:call), params
137
133
  i += 1
138
134
  end
139
135
  @instructions_applied = true
@@ -143,26 +139,29 @@ module LLM
143
139
  ##
144
140
  # @return [LLM::Buffer<LLM::Message>]
145
141
  def messages
146
- @bot.messages
142
+ @ses.messages
147
143
  end
148
144
 
149
145
  ##
150
146
  # @return [Array<LLM::Function>]
151
147
  def functions
152
- @bot.functions
148
+ @ses.functions
153
149
  end
154
150
 
155
151
  ##
156
152
  # @return [LLM::Object]
157
153
  def usage
158
- @bot.usage
154
+ @ses.usage
159
155
  end
160
156
 
161
157
  ##
162
- # @return [LLM::Builder]
163
- def build_prompt(&)
164
- @bot.build_prompt(&)
158
+ # @param (see LLM::Session#prompt)
159
+ # @return (see LLM::Session#prompt)
160
+ # @see LLM::Session#prompt
161
+ def prompt(&b)
162
+ @ses.prompt(&b)
165
163
  end
164
+ alias_method :build_prompt, :prompt
166
165
 
167
166
  ##
168
167
  # @param [String] url
@@ -170,7 +169,7 @@ module LLM
170
169
  # @return [LLM::Object]
171
170
  # Returns a tagged object
172
171
  def image_url(url)
173
- @bot.image_url(url)
172
+ @ses.image_url(url)
174
173
  end
175
174
 
176
175
  ##
@@ -179,7 +178,7 @@ module LLM
179
178
  # @return [LLM::Object]
180
179
  # Returns a tagged object
181
180
  def local_file(path)
182
- @bot.local_file(path)
181
+ @ses.local_file(path)
183
182
  end
184
183
 
185
184
  ##
@@ -188,7 +187,21 @@ module LLM
188
187
  # @return [LLM::Object]
189
188
  # Returns a tagged object
190
189
  def remote_file(res)
191
- @bot.remote_file(res)
190
+ @ses.remote_file(res)
191
+ end
192
+
193
+ ##
194
+ # @return [LLM::Tracer]
195
+ # Returns an LLM tracer
196
+ def tracer
197
+ @ses.tracer
198
+ end
199
+
200
+ ##
201
+ # Returns the model an Agent is actively using
202
+ # @return [String]
203
+ def model
204
+ @ses.model
192
205
  end
193
206
 
194
207
  private
@@ -196,17 +209,16 @@ module LLM
196
209
  def apply_instructions(prompt)
197
210
  instr = self.class.instructions
198
211
  return prompt unless instr
199
- if LLM::Builder === prompt
212
+ if LLM::Prompt === prompt
200
213
  messages = prompt.to_a
201
- builder = LLM::Builder.new(@provider) do |builder|
202
- builder.system instr unless @instructions_applied
203
- messages.each { |msg| builder.chat(msg.content, role: msg.role) }
204
- end
205
- builder.tap(&:call)
214
+ prompt = LLM::Prompt.new(@provider)
215
+ prompt.system instr unless @instructions_applied
216
+ messages.each { |msg| prompt.talk(msg.content, role: msg.role) }
217
+ prompt
206
218
  else
207
- build_prompt do
208
- _1.system instr unless @instructions_applied
209
- _1.user prompt
219
+ prompt do
220
+ system instr unless @instructions_applied
221
+ user prompt
210
222
  end
211
223
  end
212
224
  end
data/lib/llm/bot.rb CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  module LLM
4
4
  ##
5
- # {LLM::Bot LLM::Bot} provides an object that can maintain a
5
+ # {LLM::Session LLM::Session} provides an object that can maintain a
6
6
  # conversation. A conversation can use the chat completions API
7
7
  # that all LLM providers support or the responses API that currently
8
8
  # only OpenAI supports.
@@ -11,20 +11,18 @@ module LLM
11
11
  # #!/usr/bin/env ruby
12
12
  # require "llm"
13
13
  #
14
- # llm = LLM.openai(key: ENV["KEY"])
15
- # bot = LLM::Bot.new(llm)
16
- # url = "https://upload.wikimedia.org/wikipedia/commons/c/c7/Lisc_lipy.jpg"
14
+ # llm = LLM.openai(key: ENV["KEY"])
15
+ # ses = LLM::Session.new(llm)
17
16
  #
18
- # prompt = bot.build_prompt do
19
- # it.system "Your task is to answer all user queries"
20
- # it.user ["Tell me about this URL", bot.image_url(url)]
21
- # it.user ["Tell me about this PDF", bot.local_file("handbook.pdf")]
17
+ # prompt = LLM::Prompt.new(llm) do
18
+ # system "Be concise and show your reasoning briefly."
19
+ # user "If a train goes 60 mph for 1.5 hours, how far does it travel?"
20
+ # user "Now double the speed for the same time."
22
21
  # end
23
- # bot.chat(prompt)
24
22
  #
25
- # # The full conversation history is in bot.messages
26
- # bot.messages.each { print "[#{_1.role}] ", _1.content, "\n" }
27
- class Bot
23
+ # ses.talk(prompt)
24
+ # ses.messages.each { |m| puts "[#{m.role}] #{m.content}" }
25
+ class Session
28
26
  ##
29
27
  # Returns an Enumerable for the messages in a conversation
30
28
  # @return [LLM::Buffer<LLM::Message>]
@@ -54,10 +52,10 @@ module LLM
54
52
  # @return [LLM::Response] Returns the LLM's response for this turn.
55
53
  # @example
56
54
  # llm = LLM.openai(key: ENV["KEY"])
57
- # bot = LLM::Bot.new(llm)
58
- # response = bot.chat("Hello, what is your name?")
59
- # puts response.choices[0].content
60
- def chat(prompt, params = {})
55
+ # ses = LLM::Session.new(llm)
56
+ # res = ses.talk("Hello, what is your name?")
57
+ # puts res.messages[0].content
58
+ def talk(prompt, params = {})
61
59
  prompt, params, messages = fetch(prompt, params)
62
60
  params = params.merge(messages: [*@messages.to_a, *messages])
63
61
  params = @params.merge(params)
@@ -67,6 +65,7 @@ module LLM
67
65
  @messages.concat [res.choices[-1]]
68
66
  res
69
67
  end
68
+ alias_method :chat, :talk
70
69
 
71
70
  ##
72
71
  # Maintain a conversation via the responses API.
@@ -78,8 +77,8 @@ module LLM
78
77
  # @return [LLM::Response] Returns the LLM's response for this turn.
79
78
  # @example
80
79
  # llm = LLM.openai(key: ENV["KEY"])
81
- # bot = LLM::Bot.new(llm)
82
- # res = bot.respond("What is the capital of France?")
80
+ # ses = LLM::Session.new(llm)
81
+ # res = ses.respond("What is the capital of France?")
83
82
  # puts res.output_text
84
83
  def respond(prompt, params = {})
85
84
  prompt, params, messages = fetch(prompt, params)
@@ -107,8 +106,13 @@ module LLM
107
106
  def functions
108
107
  @messages
109
108
  .select(&:assistant?)
110
- .flat_map(&:functions)
111
- .select(&:pending?)
109
+ .flat_map do |msg|
110
+ fns = msg.functions.select(&:pending?)
111
+ fns.each do |fn|
112
+ fn.tracer = tracer
113
+ fn.model = msg.model
114
+ end
115
+ end
112
116
  end
113
117
 
114
118
  ##
@@ -123,16 +127,24 @@ module LLM
123
127
  end
124
128
 
125
129
  ##
126
- # Build a prompt
130
+ # Build a role-aware prompt for a single request.
131
+ #
132
+ # Prefer this method over {#build_prompt}. The older
133
+ # method name is kept for backward compatibility.
127
134
  # @example
128
- # prompt = bot.build_prompt do
129
- # it.system "Your task is to assist the user"
130
- # it.user "Hello, can you assist me?"
135
+ # prompt = ses.prompt do
136
+ # system "Your task is to assist the user"
137
+ # user "Hello, can you assist me?"
131
138
  # end
132
- # bot.chat(prompt)
133
- def build_prompt(&)
134
- LLM::Builder.new(@provider, &).tap(&:call)
139
+ # ses.talk(prompt)
140
+ # @param [Proc] b
141
+ # A block that composes messages. If it takes one argument,
142
+ # it receives the prompt object. Otherwise it runs in prompt context.
143
+ # @return [LLM::Prompt]
144
+ def prompt(&b)
145
+ LLM::Prompt.new(@provider, &b)
135
146
  end
147
+ alias_method :build_prompt, :prompt
136
148
 
137
149
  ##
138
150
  # Recongize an object as a URL to an image
@@ -164,14 +176,31 @@ module LLM
164
176
  LLM::Object.from(value: res, kind: :remote_file)
165
177
  end
166
178
 
179
+ ##
180
+ # @return [LLM::Tracer]
181
+ # Returns an LLM tracer
182
+ def tracer
183
+ @provider.tracer
184
+ end
185
+
186
+ ##
187
+ # Returns the model a Session is actively using
188
+ # @return [String]
189
+ def model
190
+ messages.find(&:assistant?)&.model || @params[:model]
191
+ end
192
+
167
193
  private
168
194
 
169
195
  def fetch(prompt, params)
170
- return [prompt, params, []] unless LLM::Builder === prompt
196
+ return [prompt, params, []] unless LLM::Prompt === prompt
171
197
  messages = prompt.to_a
172
198
  prompt = messages.shift
173
199
  params.merge!(role: prompt.role)
174
200
  [prompt.content, params, messages]
175
201
  end
176
202
  end
203
+
204
+ # Backward-compatible alias
205
+ Bot = Session
177
206
  end
@@ -0,0 +1,19 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Function
4
+ ##
5
+ # The {LLM::Function::Tracing LLM::Function::Tracing} module patches
6
+ # an LLM function (or tool) in order to add tracing support.
7
+ module Tracing
8
+ def call(...)
9
+ return super unless @tracer
10
+ span = @tracer.on_tool_start(id:, name:, arguments:, model:)
11
+ result = super
12
+ @tracer.on_tool_finish(result:, span:)
13
+ result
14
+ rescue => ex
15
+ @tracer.on_tool_error(ex:, span:)
16
+ raise(ex)
17
+ end
18
+ end
19
+ end
data/lib/llm/function.rb CHANGED
@@ -29,6 +29,9 @@
29
29
  # end
30
30
  # end
31
31
  class LLM::Function
32
+ require_relative "function/tracing"
33
+ prepend LLM::Function::Tracing
34
+
32
35
  class Return < Struct.new(:id, :name, :value)
33
36
  end
34
37
 
@@ -42,6 +45,16 @@ class LLM::Function
42
45
  # @return [Array, nil]
43
46
  attr_accessor :arguments
44
47
 
48
+ ##
49
+ # Returns a tracer, or nil
50
+ # @return [LLM::Tracer, nil]
51
+ attr_accessor :tracer
52
+
53
+ ##
54
+ # Returns a model name, or nil
55
+ # @return [String, nil]
56
+ attr_accessor :model
57
+
45
58
  ##
46
59
  # @param [String] name The function name
47
60
  # @yieldparam [LLM::Function] self The function object
@@ -116,9 +129,9 @@ class LLM::Function
116
129
  # Returns a value that communicates that the function call was cancelled
117
130
  # @example
118
131
  # llm = LLM.openai(key: ENV["KEY"])
119
- # bot = LLM::Bot.new(llm, tools: [fn1, fn2])
120
- # bot.chat "I want to run the functions"
121
- # bot.chat bot.functions.map(&:cancel)
132
+ # ses = LLM::Session.new(llm, tools: [fn1, fn2])
133
+ # ses.talk "I want to run the functions"
134
+ # ses.talk ses.functions.map(&:cancel)
122
135
  # @return [LLM::Function::Return]
123
136
  def cancel(reason: "function call cancelled")
124
137
  Return.new(id, name, {cancelled: true, reason:})
@@ -63,7 +63,7 @@ module LLM
63
63
  # @return (see JSONAdapter#dump)
64
64
  def self.dump(obj)
65
65
  require "oj" unless defined?(::Oj)
66
- ::Oj.dump(obj)
66
+ ::Oj.dump(obj, mode: :compat)
67
67
  end
68
68
 
69
69
  ##
data/lib/llm/message.rb CHANGED
@@ -136,6 +136,13 @@ module LLM
136
136
  end
137
137
  alias_method :token_usage, :usage
138
138
 
139
+ ##
140
+ # @return [String, nil]
141
+ # Returns the model associated with a message
142
+ def model
143
+ response&.model
144
+ end
145
+
139
146
  ##
140
147
  # Returns a string representation of the message
141
148
  # @return [String]
data/lib/llm/prompt.rb ADDED
@@ -0,0 +1,85 @@
1
+ # frozen_string_literal: true
2
+
3
+ ##
4
+ # {LLM::Prompt LLM::Prompt} is a small object for composing
5
+ # a single request from multiple role-aware messages.
6
+ # A prompt is not just a string. It is an ordered chain of
7
+ # messages with explicit roles (for example `system` and `user`).
8
+ # Use {LLM::Session#prompt} when building a prompt inside a session.
9
+ # Use `LLM::Prompt.new(provider)` directly when you want to construct
10
+ # or pass prompt objects around explicitly.
11
+ #
12
+ # @example
13
+ # llm = LLM.openai(key: ENV["KEY"])
14
+ # ses = LLM::Session.new(llm)
15
+ #
16
+ # prompt = ses.prompt do
17
+ # system "Your task is to assist the user"
18
+ # user "Hello. Can you assist me?"
19
+ # end
20
+ #
21
+ # res = ses.talk(prompt)
22
+ class LLM::Prompt
23
+ ##
24
+ # @param [LLM::Provider] provider
25
+ # A provider used to resolve provider-specific role names.
26
+ # @param [Proc] b
27
+ # A block that composes messages. If the block takes one argument,
28
+ # it receives the prompt object. Otherwise the block runs in the
29
+ # prompt context via `instance_eval`.
30
+ def initialize(provider, &b)
31
+ @provider = provider
32
+ @buffer = []
33
+ unless b.nil?
34
+ (b.arity == 1) ? b.call(self) : instance_eval(&b)
35
+ end
36
+ end
37
+
38
+ ##
39
+ # @param [String] content
40
+ # The message
41
+ # @param [Symbol] role
42
+ # The role (eg user, system)
43
+ # @return [void]
44
+ def talk(content, role: @provider.user_role)
45
+ role = case role.to_sym
46
+ when :system then @provider.system_role
47
+ when :user then @provider.user_role
48
+ when :developer then @provider.developer_role
49
+ else role
50
+ end
51
+ @buffer << LLM::Message.new(role, content)
52
+ end
53
+ alias_method :chat, :talk
54
+
55
+ ##
56
+ # @param [String] content
57
+ # The message content
58
+ # @return [void]
59
+ def user(content)
60
+ chat(content, role: @provider.user_role)
61
+ end
62
+
63
+ ##
64
+ # @param [String] content
65
+ # The message content
66
+ # @return [void]
67
+ def system(content)
68
+ chat(content, role: @provider.system_role)
69
+ end
70
+
71
+ ##
72
+ # @param [String] content
73
+ # The message content
74
+ # @return [void]
75
+ def developer(content)
76
+ chat(content, role: @provider.developer_role)
77
+ end
78
+
79
+ ##
80
+ # @return [Array<LLM::Message>]
81
+ # Returns the prompt messages in order.
82
+ def to_a
83
+ @buffer.dup
84
+ end
85
+ end
data/lib/llm/provider.rb CHANGED
@@ -37,6 +37,7 @@ class LLM::Provider
37
37
  @timeout = timeout
38
38
  @ssl = ssl
39
39
  @client = persistent ? persistent_client : transient_client
40
+ @tracer = LLM::Tracer::Null.new(self)
40
41
  @base_uri = URI("#{ssl ? "https" : "http"}://#{host}:#{port}/")
41
42
  end
42
43
 
@@ -45,7 +46,7 @@ class LLM::Provider
45
46
  # @return [String]
46
47
  # @note The secret key is redacted in inspect for security reasons
47
48
  def inspect
48
- "#<#{self.class.name}:0x#{object_id.to_s(16)} @key=[REDACTED] @client=#{@client.inspect}>"
49
+ "#<#{self.class.name}:0x#{object_id.to_s(16)} @key=[REDACTED] @client=#{@client.inspect} @tracer=#{@tracer.inspect}>"
49
50
  end
50
51
 
51
52
  ##
@@ -69,7 +70,7 @@ class LLM::Provider
69
70
  # llm = LLM.openai(key: ENV["KEY"])
70
71
  # messages = [{role: "system", content: "Your task is to answer all of my questions"}]
71
72
  # res = llm.complete("5 + 2 ?", messages:)
72
- # print "[#{res.choices[0].role}]", res.choices[0].content, "\n"
73
+ # print "[#{res.messages[0].role}]", res.messages[0].content, "\n"
73
74
  # @param [String] prompt
74
75
  # The input prompt to be completed
75
76
  # @param [Hash] params
@@ -91,10 +92,10 @@ class LLM::Provider
91
92
  # Starts a new chat powered by the chat completions API
92
93
  # @param prompt (see LLM::Provider#complete)
93
94
  # @param params (see LLM::Provider#complete)
94
- # @return [LLM::Bot]
95
+ # @return [LLM::Session]
95
96
  def chat(prompt, params = {})
96
97
  role = params.delete(:role)
97
- LLM::Bot.new(self, params).chat(prompt, role:)
98
+ LLM::Session.new(self, params).talk(prompt, role:)
98
99
  end
99
100
 
100
101
  ##
@@ -102,10 +103,10 @@ class LLM::Provider
102
103
  # @param prompt (see LLM::Provider#complete)
103
104
  # @param params (see LLM::Provider#complete)
104
105
  # @raise (see LLM::Provider#complete)
105
- # @return [LLM::Bot]
106
+ # @return [LLM::Session]
106
107
  def respond(prompt, params = {})
107
108
  role = params.delete(:role)
108
- LLM::Bot.new(self, params).respond(prompt, role:)
109
+ LLM::Session.new(self, params).respond(prompt, role:)
109
110
  end
110
111
 
111
112
  ##
@@ -252,6 +253,30 @@ class LLM::Provider
252
253
  :developer
253
254
  end
254
255
 
256
+ ##
257
+ # @return [LLM::Tracer]
258
+ # Returns an LLM tracer
259
+ def tracer
260
+ @tracer
261
+ end
262
+
263
+ ##
264
+ # Set the tracer
265
+ # @example
266
+ # llm = LLM.openai(key: ENV["KEY"])
267
+ # llm.tracer = LLM::Tracer::Logger.new(llm, path: "/path/to/log.txt")
268
+ # # ...
269
+ # @param [LLM::Tracer] tracer
270
+ # A tracer
271
+ # @return [void]
272
+ def tracer=(tracer)
273
+ @tracer = if tracer.nil?
274
+ LLM::Tracer::Null.new(self)
275
+ else
276
+ tracer
277
+ end
278
+ end
279
+
255
280
  private
256
281
 
257
282
  attr_reader :client, :base_uri, :host, :port, :timeout, :ssl
@@ -303,7 +328,8 @@ class LLM::Provider
303
328
  # @raise [SystemCallError]
304
329
  # When there is a network error at the operating system level
305
330
  # @return [Net::HTTPResponse]
306
- def execute(request:, stream: nil, stream_parser: self.stream_parser, &b)
331
+ def execute(request:, operation:, stream: nil, stream_parser: self.stream_parser, model: nil, &b)
332
+ span = @tracer.on_request_start(operation:, model:)
307
333
  args = (Net::HTTP === client) ? [request] : [URI.join(base_uri, request.path), request]
308
334
  res = if stream
309
335
  client.request(*args) do |res|
@@ -323,18 +349,20 @@ class LLM::Provider
323
349
  b ? client.request(*args) { (Net::HTTPSuccess === _1) ? b.call(_1) : _1 } :
324
350
  client.request(*args)
325
351
  end
326
- handle_response(res)
352
+ [handle_response(res, span), span]
327
353
  end
328
354
 
329
355
  ##
330
356
  # Handles the response from a request
331
357
  # @param [Net::HTTPResponse] res
332
358
  # The response to handle
359
+ # @param [Object, nil] span
360
+ # The span
333
361
  # @return [Net::HTTPResponse]
334
- def handle_response(res)
362
+ def handle_response(res, span)
335
363
  case res
336
364
  when Net::HTTPOK then res.body = parse_response(res)
337
- else error_handler.new(res).raise_error!
365
+ else error_handler.new(@tracer, span, res).raise_error!
338
366
  end
339
367
  res
340
368
  end
@@ -375,4 +403,22 @@ class LLM::Provider
375
403
  end
376
404
  end
377
405
  end
406
+
407
+ ##
408
+ # @return [Hash<Symbol, LLM::Tracer>]
409
+ def tracers
410
+ self.class.tracers
411
+ end
412
+
413
+ ##
414
+ # Finalizes tracing after a response has been adapted/wrapped.
415
+ # @param [String] operation
416
+ # @param [String, nil] model
417
+ # @param [LLM::Response] res
418
+ # @param [Object, nil] span
419
+ # @return [LLM::Response]
420
+ def finish_trace(operation:, res:, model: nil, span: nil)
421
+ @tracer.on_request_finish(operation:, model:, res:, span:)
422
+ res
423
+ end
378
424
  end
@@ -10,10 +10,21 @@ class LLM::Anthropic
10
10
  attr_reader :res
11
11
 
12
12
  ##
13
+ # @return [Object, nil]
14
+ # The span
15
+ attr_reader :span
16
+
17
+ ##
18
+ # @param [LLM::Tracer] tracer
19
+ # The tracer
20
+ # @param [Object, nil] span
21
+ # The span
13
22
  # @param [Net::HTTPResponse] res
14
23
  # The response from the server
15
24
  # @return [LLM::Anthropic::ErrorHandler]
16
- def initialize(res)
25
+ def initialize(tracer, span, res)
26
+ @tracer = tracer
27
+ @span = span
17
28
  @res = res
18
29
  end
19
30
 
@@ -21,15 +32,26 @@ class LLM::Anthropic
21
32
  # @raise [LLM::Error]
22
33
  # Raises a subclass of {LLM::Error LLM::Error}
23
34
  def raise_error!
35
+ ex = error
36
+ @tracer.on_request_error(ex:, span:)
37
+ ensure
38
+ raise(ex)
39
+ end
40
+
41
+ private
42
+
43
+ ##
44
+ # @return [LLM::Error]
45
+ def error
24
46
  case res
25
47
  when Net::HTTPServerError
26
- raise LLM::ServerError.new { _1.response = res }, "Server error"
48
+ LLM::ServerError.new("Server error").tap { _1.response = res }
27
49
  when Net::HTTPUnauthorized
28
- raise LLM::UnauthorizedError.new { _1.response = res }, "Authentication error"
50
+ LLM::UnauthorizedError.new("Authentication error").tap { _1.response = res }
29
51
  when Net::HTTPTooManyRequests
30
- raise LLM::RateLimitError.new { _1.response = res }, "Too many requests"
52
+ LLM::RateLimitError.new("Too many requests").tap { _1.response = res }
31
53
  else
32
- raise LLM::Error.new { _1.response = res }, "Unexpected response"
54
+ LLM::Error.new("Unexpected response").tap { _1.response = res }
33
55
  end
34
56
  end
35
57
  end