llm.rb 4.1.0 → 4.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +2 -2
  3. data/README.md +186 -172
  4. data/lib/llm/agent.rb +49 -37
  5. data/lib/llm/bot.rb +57 -28
  6. data/lib/llm/function/tracing.rb +19 -0
  7. data/lib/llm/function.rb +16 -3
  8. data/lib/llm/json_adapter.rb +1 -1
  9. data/lib/llm/message.rb +7 -0
  10. data/lib/llm/prompt.rb +85 -0
  11. data/lib/llm/provider.rb +56 -10
  12. data/lib/llm/providers/anthropic/error_handler.rb +27 -5
  13. data/lib/llm/providers/anthropic/files.rb +22 -16
  14. data/lib/llm/providers/anthropic/models.rb +4 -3
  15. data/lib/llm/providers/anthropic.rb +6 -5
  16. data/lib/llm/providers/deepseek.rb +3 -3
  17. data/lib/llm/providers/gemini/error_handler.rb +34 -12
  18. data/lib/llm/providers/gemini/files.rb +18 -13
  19. data/lib/llm/providers/gemini/images.rb +4 -3
  20. data/lib/llm/providers/gemini/models.rb +4 -3
  21. data/lib/llm/providers/gemini.rb +9 -7
  22. data/lib/llm/providers/llamacpp.rb +3 -3
  23. data/lib/llm/providers/ollama/error_handler.rb +28 -6
  24. data/lib/llm/providers/ollama/models.rb +4 -3
  25. data/lib/llm/providers/ollama.rb +9 -7
  26. data/lib/llm/providers/openai/audio.rb +10 -7
  27. data/lib/llm/providers/openai/error_handler.rb +41 -14
  28. data/lib/llm/providers/openai/files.rb +19 -14
  29. data/lib/llm/providers/openai/images.rb +10 -7
  30. data/lib/llm/providers/openai/models.rb +4 -3
  31. data/lib/llm/providers/openai/moderations.rb +4 -3
  32. data/lib/llm/providers/openai/responses.rb +10 -7
  33. data/lib/llm/providers/openai/vector_stores.rb +34 -23
  34. data/lib/llm/providers/openai.rb +9 -7
  35. data/lib/llm/providers/xai.rb +3 -3
  36. data/lib/llm/providers/zai.rb +2 -2
  37. data/lib/llm/schema/object.rb +2 -2
  38. data/lib/llm/schema.rb +16 -2
  39. data/lib/llm/server_tool.rb +3 -3
  40. data/lib/llm/session.rb +3 -0
  41. data/lib/llm/tracer/logger.rb +192 -0
  42. data/lib/llm/tracer/null.rb +49 -0
  43. data/lib/llm/tracer/telemetry.rb +255 -0
  44. data/lib/llm/tracer.rb +134 -0
  45. data/lib/llm/version.rb +1 -1
  46. data/lib/llm.rb +4 -3
  47. data/llm.gemspec +4 -1
  48. metadata +38 -3
  49. data/lib/llm/builder.rb +0 -79
@@ -13,10 +13,10 @@ class LLM::OpenAI
13
13
  # require "llm"
14
14
  #
15
15
  # llm = LLM.openai(key: ENV["KEY"])
16
- # bot = LLM::Bot.new(llm)
16
+ # ses = LLM::Session.new(llm)
17
17
  # file = llm.files.create file: "/books/goodread.pdf"
18
- # bot.chat ["Tell me about this PDF", file]
19
- # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
18
+ # ses.talk ["Tell me about this PDF", file]
19
+ # ses.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
20
20
  class Files
21
21
  ##
22
22
  # Returns a new Files object
@@ -41,8 +41,9 @@ class LLM::OpenAI
41
41
  def all(**params)
42
42
  query = URI.encode_www_form(params)
43
43
  req = Net::HTTP::Get.new("/v1/files?#{query}", headers)
44
- res = execute(request: req)
45
- ResponseAdapter.adapt(res, type: :enumerable)
44
+ res, span = execute(request: req, operation: "request")
45
+ res = ResponseAdapter.adapt(res, type: :enumerable)
46
+ finish_trace(operation: "request", res:, span:)
46
47
  end
47
48
 
48
49
  ##
@@ -61,8 +62,9 @@ class LLM::OpenAI
61
62
  req = Net::HTTP::Post.new("/v1/files", headers)
62
63
  req["content-type"] = multi.content_type
63
64
  set_body_stream(req, multi.body)
64
- res = execute(request: req)
65
- ResponseAdapter.adapt(res, type: :file)
65
+ res, span = execute(request: req, operation: "request")
66
+ res = ResponseAdapter.adapt(res, type: :file)
67
+ finish_trace(operation: "request", res:, span:)
66
68
  end
67
69
 
68
70
  ##
@@ -80,8 +82,9 @@ class LLM::OpenAI
80
82
  file_id = file.respond_to?(:id) ? file.id : file
81
83
  query = URI.encode_www_form(params)
82
84
  req = Net::HTTP::Get.new("/v1/files/#{file_id}?#{query}", headers)
83
- res = execute(request: req)
84
- ResponseAdapter.adapt(res, type: :file)
85
+ res, span = execute(request: req, operation: "request")
86
+ res = ResponseAdapter.adapt(res, type: :file)
87
+ finish_trace(operation: "request", res:, span:)
85
88
  end
86
89
 
87
90
  ##
@@ -101,8 +104,9 @@ class LLM::OpenAI
101
104
  file_id = file.respond_to?(:id) ? file.id : file
102
105
  req = Net::HTTP::Get.new("/v1/files/#{file_id}/content?#{query}", headers)
103
106
  io = StringIO.new("".b)
104
- res = execute(request: req) { |res| res.read_body { |chunk| io << chunk } }
105
- LLM::Response.new(res).tap { _1.define_singleton_method(:file) { io } }
107
+ res, span = execute(request: req, operation: "request") { |res| res.read_body { |chunk| io << chunk } }
108
+ res = LLM::Response.new(res).tap { _1.define_singleton_method(:file) { io } }
109
+ finish_trace(operation: "request", res:, span:)
106
110
  end
107
111
 
108
112
  ##
@@ -118,13 +122,14 @@ class LLM::OpenAI
118
122
  def delete(file:)
119
123
  file_id = file.respond_to?(:id) ? file.id : file
120
124
  req = Net::HTTP::Delete.new("/v1/files/#{file_id}", headers)
121
- res = execute(request: req)
122
- LLM::Response.new(res)
125
+ res, span = execute(request: req, operation: "request")
126
+ res = LLM::Response.new(res)
127
+ finish_trace(operation: "request", res:, span:)
123
128
  end
124
129
 
125
130
  private
126
131
 
127
- [:headers, :execute, :set_body_stream].each do |m|
132
+ [:headers, :execute, :set_body_stream, :finish_trace].each do |m|
128
133
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
129
134
  end
130
135
  end
@@ -50,8 +50,9 @@ class LLM::OpenAI
50
50
  def create(prompt:, model: "dall-e-3", **params)
51
51
  req = Net::HTTP::Post.new("/v1/images/generations", headers)
52
52
  req.body = LLM.json.dump({prompt:, n: 1, model:}.merge!(params))
53
- res = execute(request: req)
54
- ResponseAdapter.adapt(res, type: :image)
53
+ res, span = execute(request: req, operation: "request")
54
+ res = ResponseAdapter.adapt(res, type: :image)
55
+ finish_trace(operation: "request", model:, res:, span:)
55
56
  end
56
57
 
57
58
  ##
@@ -72,8 +73,9 @@ class LLM::OpenAI
72
73
  req = Net::HTTP::Post.new("/v1/images/variations", headers)
73
74
  req["content-type"] = multi.content_type
74
75
  set_body_stream(req, multi.body)
75
- res = execute(request: req)
76
- ResponseAdapter.adapt(res, type: :image)
76
+ res, span = execute(request: req, operation: "request")
77
+ res = ResponseAdapter.adapt(res, type: :image)
78
+ finish_trace(operation: "request", model:, res:, span:)
77
79
  end
78
80
 
79
81
  ##
@@ -95,13 +97,14 @@ class LLM::OpenAI
95
97
  req = Net::HTTP::Post.new("/v1/images/edits", headers)
96
98
  req["content-type"] = multi.content_type
97
99
  set_body_stream(req, multi.body)
98
- res = execute(request: req)
99
- ResponseAdapter.adapt(res, type: :image)
100
+ res, span = execute(request: req, operation: "request")
101
+ res = ResponseAdapter.adapt(res, type: :image)
102
+ finish_trace(operation: "request", model:, res:, span:)
100
103
  end
101
104
 
102
105
  private
103
106
 
104
- [:headers, :execute, :set_body_stream].each do |m|
107
+ [:headers, :execute, :set_body_stream, :finish_trace].each do |m|
105
108
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
106
109
  end
107
110
  end
@@ -40,13 +40,14 @@ class LLM::OpenAI
40
40
  def all(**params)
41
41
  query = URI.encode_www_form(params)
42
42
  req = Net::HTTP::Get.new("/v1/models?#{query}", headers)
43
- res = execute(request: req)
44
- ResponseAdapter.adapt(res, type: :enumerable)
43
+ res, span = execute(request: req, operation: "request")
44
+ res = ResponseAdapter.adapt(res, type: :enumerable)
45
+ finish_trace(operation: "request", res:, span:)
45
46
  end
46
47
 
47
48
  private
48
49
 
49
- [:headers, :execute, :set_body_stream].each do |m|
50
+ [:headers, :execute, :set_body_stream, :finish_trace].each do |m|
50
51
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
51
52
  end
52
53
  end
@@ -50,13 +50,14 @@ class LLM::OpenAI
50
50
  req = Net::HTTP::Post.new("/v1/moderations", headers)
51
51
  input = RequestAdapter::Moderation.new(input).adapt
52
52
  req.body = LLM.json.dump({input:, model:}.merge!(params))
53
- res = execute(request: req)
54
- ResponseAdapter.adapt(res, type: :moderations)
53
+ res, span = execute(request: req, operation: "request")
54
+ res = ResponseAdapter.adapt(res, type: :moderations)
55
+ finish_trace(operation: "request", model:, res:, span:)
55
56
  end
56
57
 
57
58
  private
58
59
 
59
- [:headers, :execute].each do |m|
60
+ [:headers, :execute, :finish_trace].each do |m|
60
61
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
61
62
  end
62
63
  end
@@ -44,9 +44,10 @@ class LLM::OpenAI
44
44
  messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
45
45
  body = LLM.json.dump({input: [adapt(messages, mode: :response)].flatten}.merge!(params))
46
46
  set_body_stream(req, StringIO.new(body))
47
- res = execute(request: req, stream:, stream_parser:)
48
- ResponseAdapter.adapt(res, type: :responds)
47
+ res, span = execute(request: req, stream:, stream_parser:, operation: "chat", model: params[:model])
48
+ res = ResponseAdapter.adapt(res, type: :responds)
49
49
  .extend(Module.new { define_method(:__tools__) { tools } })
50
+ finish_trace(operation: "chat", model: params[:model], res:, span:)
50
51
  end
51
52
 
52
53
  ##
@@ -59,8 +60,9 @@ class LLM::OpenAI
59
60
  response_id = response.respond_to?(:id) ? response.id : response
60
61
  query = URI.encode_www_form(params)
61
62
  req = Net::HTTP::Get.new("/v1/responses/#{response_id}?#{query}", headers)
62
- res = execute(request: req)
63
- ResponseAdapter.adapt(res, type: :responds)
63
+ res, span = execute(request: req, operation: "request")
64
+ res = ResponseAdapter.adapt(res, type: :responds)
65
+ finish_trace(operation: "request", res:, span:)
64
66
  end
65
67
 
66
68
  ##
@@ -72,13 +74,14 @@ class LLM::OpenAI
72
74
  def delete(response)
73
75
  response_id = response.respond_to?(:id) ? response.id : response
74
76
  req = Net::HTTP::Delete.new("/v1/responses/#{response_id}", headers)
75
- res = execute(request: req)
76
- LLM::Response.new(res)
77
+ res, span = execute(request: req, operation: "request")
78
+ res = LLM::Response.new(res)
79
+ finish_trace(operation: "request", res:, span:)
77
80
  end
78
81
 
79
82
  private
80
83
 
81
- [:headers, :execute, :set_body_stream, :resolve_tools].each do |m|
84
+ [:headers, :execute, :set_body_stream, :resolve_tools, :finish_trace].each do |m|
82
85
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
83
86
  end
84
87
 
@@ -32,8 +32,9 @@ class LLM::OpenAI
32
32
  def all(**params)
33
33
  query = URI.encode_www_form(params)
34
34
  req = Net::HTTP::Get.new("/v1/vector_stores?#{query}", headers)
35
- res = execute(request: req)
36
- ResponseAdapter.adapt(res, type: :enumerable)
35
+ res, span = execute(request: req, operation: "request")
36
+ res = ResponseAdapter.adapt(res, type: :enumerable)
37
+ finish_trace(operation: "request", res:, span:)
37
38
  end
38
39
 
39
40
  ##
@@ -47,8 +48,9 @@ class LLM::OpenAI
47
48
  def create(name:, file_ids: nil, **params)
48
49
  req = Net::HTTP::Post.new("/v1/vector_stores", headers)
49
50
  req.body = LLM.json.dump(params.merge({name:, file_ids:}).compact)
50
- res = execute(request: req)
51
- LLM::Response.new(res)
51
+ res, span = execute(request: req, operation: "request")
52
+ res = LLM::Response.new(res)
53
+ finish_trace(operation: "request", res:, span:)
52
54
  end
53
55
 
54
56
  ##
@@ -69,8 +71,9 @@ class LLM::OpenAI
69
71
  def get(vector:)
70
72
  vector_id = vector.respond_to?(:id) ? vector.id : vector
71
73
  req = Net::HTTP::Get.new("/v1/vector_stores/#{vector_id}", headers)
72
- res = execute(request: req)
73
- LLM::Response.new(res)
74
+ res, span = execute(request: req, operation: "request")
75
+ res = LLM::Response.new(res)
76
+ finish_trace(operation: "request", res:, span:)
74
77
  end
75
78
 
76
79
  ##
@@ -85,8 +88,9 @@ class LLM::OpenAI
85
88
  vector_id = vector.respond_to?(:id) ? vector.id : vector
86
89
  req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}", headers)
87
90
  req.body = LLM.json.dump(params.merge({name:}).compact)
88
- res = execute(request: req)
89
- LLM::Response.new(res)
91
+ res, span = execute(request: req, operation: "request")
92
+ res = LLM::Response.new(res)
93
+ finish_trace(operation: "request", res:, span:)
90
94
  end
91
95
 
92
96
  ##
@@ -98,8 +102,9 @@ class LLM::OpenAI
98
102
  def delete(vector:)
99
103
  vector_id = vector.respond_to?(:id) ? vector.id : vector
100
104
  req = Net::HTTP::Delete.new("/v1/vector_stores/#{vector_id}", headers)
101
- res = execute(request: req)
102
- LLM::Response.new(res)
105
+ res, span = execute(request: req, operation: "request")
106
+ res = LLM::Response.new(res)
107
+ finish_trace(operation: "request", res:, span:)
103
108
  end
104
109
 
105
110
  ##
@@ -114,8 +119,9 @@ class LLM::OpenAI
114
119
  vector_id = vector.respond_to?(:id) ? vector.id : vector
115
120
  req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}/search", headers)
116
121
  req.body = LLM.json.dump(params.merge({query:}).compact)
117
- res = execute(request: req)
118
- ResponseAdapter.adapt(res, type: :enumerable)
122
+ res, span = execute(request: req, operation: "retrieval")
123
+ res = ResponseAdapter.adapt(res, type: :enumerable)
124
+ finish_trace(operation: "retrieval", res:, span:)
119
125
  end
120
126
 
121
127
  ##
@@ -129,8 +135,9 @@ class LLM::OpenAI
129
135
  vector_id = vector.respond_to?(:id) ? vector.id : vector
130
136
  query = URI.encode_www_form(params)
131
137
  req = Net::HTTP::Get.new("/v1/vector_stores/#{vector_id}/files?#{query}", headers)
132
- res = execute(request: req)
133
- ResponseAdapter.adapt(res, type: :enumerable)
138
+ res, span = execute(request: req, operation: "request")
139
+ res = ResponseAdapter.adapt(res, type: :enumerable)
140
+ finish_trace(operation: "request", res:, span:)
134
141
  end
135
142
 
136
143
  ##
@@ -147,8 +154,9 @@ class LLM::OpenAI
147
154
  file_id = file.respond_to?(:id) ? file.id : file
148
155
  req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}/files", headers)
149
156
  req.body = LLM.json.dump(params.merge({file_id:, attributes:}).compact)
150
- res = execute(request: req)
151
- LLM::Response.new(res)
157
+ res, span = execute(request: req, operation: "request")
158
+ res = LLM::Response.new(res)
159
+ finish_trace(operation: "request", res:, span:)
152
160
  end
153
161
  alias_method :create_file, :add_file
154
162
 
@@ -176,8 +184,9 @@ class LLM::OpenAI
176
184
  file_id = file.respond_to?(:id) ? file.id : file
177
185
  req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}/files/#{file_id}", headers)
178
186
  req.body = LLM.json.dump(params.merge({attributes:}).compact)
179
- res = execute(request: req)
180
- LLM::Response.new(res)
187
+ res, span = execute(request: req, operation: "request")
188
+ res = LLM::Response.new(res)
189
+ finish_trace(operation: "request", res:, span:)
181
190
  end
182
191
 
183
192
  ##
@@ -192,8 +201,9 @@ class LLM::OpenAI
192
201
  file_id = file.respond_to?(:id) ? file.id : file
193
202
  query = URI.encode_www_form(params)
194
203
  req = Net::HTTP::Get.new("/v1/vector_stores/#{vector_id}/files/#{file_id}?#{query}", headers)
195
- res = execute(request: req)
196
- LLM::Response.new(res)
204
+ res, span = execute(request: req, operation: "request")
205
+ res = LLM::Response.new(res)
206
+ finish_trace(operation: "request", res:, span:)
197
207
  end
198
208
 
199
209
  ##
@@ -207,8 +217,9 @@ class LLM::OpenAI
207
217
  vector_id = vector.respond_to?(:id) ? vector.id : vector
208
218
  file_id = file.respond_to?(:id) ? file.id : file
209
219
  req = Net::HTTP::Delete.new("/v1/vector_stores/#{vector_id}/files/#{file_id}", headers)
210
- res = execute(request: req)
211
- LLM::Response.new(res)
220
+ res, span = execute(request: req, operation: "request")
221
+ res = LLM::Response.new(res)
222
+ finish_trace(operation: "request", res:, span:)
212
223
  end
213
224
 
214
225
  ##
@@ -237,7 +248,7 @@ class LLM::OpenAI
237
248
 
238
249
  private
239
250
 
240
- [:headers, :execute, :set_body_stream].each do |m|
251
+ [:headers, :execute, :set_body_stream, :finish_trace].each do |m|
241
252
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
242
253
  end
243
254
  end
@@ -10,9 +10,9 @@ module LLM
10
10
  # require "llm"
11
11
  #
12
12
  # llm = LLM.openai(key: ENV["KEY"])
13
- # bot = LLM::Bot.new(llm)
14
- # bot.chat ["Tell me about this photo", File.open("/images/capybara.jpg", "rb")]
15
- # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
13
+ # ses = LLM::Session.new(llm)
14
+ # ses.talk ["Tell me about this photo", ses.local_file("/images/photo.png")]
15
+ # ses.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
16
16
  class OpenAI < Provider
17
17
  require_relative "openai/error_handler"
18
18
  require_relative "openai/request_adapter"
@@ -47,8 +47,9 @@ module LLM
47
47
  def embed(input, model: "text-embedding-3-small", **params)
48
48
  req = Net::HTTP::Post.new("/v1/embeddings", headers)
49
49
  req.body = LLM.json.dump({input:, model:}.merge!(params))
50
- res = execute(request: req)
51
- ResponseAdapter.adapt(res, type: :embedding)
50
+ res, span = execute(request: req, operation: "embeddings", model:)
51
+ res = ResponseAdapter.adapt(res, type: :embedding)
52
+ finish_trace(operation: "embeddings", model:, res:, span:)
52
53
  end
53
54
 
54
55
  ##
@@ -64,9 +65,10 @@ module LLM
64
65
  def complete(prompt, params = {})
65
66
  params, stream, tools, role = normalize_complete_params(params)
66
67
  req = build_complete_request(prompt, params, role)
67
- res = execute(request: req, stream: stream)
68
- ResponseAdapter.adapt(res, type: :completion)
68
+ res, span = execute(request: req, stream: stream, operation: "chat", model: params[:model])
69
+ res = ResponseAdapter.adapt(res, type: :completion)
69
70
  .extend(Module.new { define_method(:__tools__) { tools } })
71
+ finish_trace(operation: "chat", model: params[:model], res:, span:)
70
72
  end
71
73
 
72
74
  ##
@@ -11,9 +11,9 @@ module LLM
11
11
  # require "llm"
12
12
  #
13
13
  # llm = LLM.xai(key: ENV["KEY"])
14
- # bot = LLM::Bot.new(llm)
15
- # bot.chat ["Tell me about this photo", File.open("/images/crow.jpg", "rb")]
16
- # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
14
+ # ses = LLM::Session.new(llm)
15
+ # ses.talk ["Tell me about this photo", ses.local_file("/images/photo.png")]
16
+ # ses.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
17
17
  class XAI < OpenAI
18
18
  require_relative "xai/images"
19
19
 
@@ -11,8 +11,8 @@ module LLM
11
11
  # require "llm"
12
12
  #
13
13
  # llm = LLM.zai(key: ENV["KEY"])
14
- # bot = LLM::Bot.new(llm, stream: $stdout)
15
- # bot.chat("Greetings Robot", role: :user).flush
14
+ # ses = LLM::Session.new(llm, stream: $stdout)
15
+ # ses.talk "Hello"
16
16
  class ZAI < OpenAI
17
17
  ##
18
18
  # @param [String] host A regional host or the default ("api.z.ai")
@@ -36,7 +36,7 @@ class LLM::Schema
36
36
  ##
37
37
  # @return [Hash]
38
38
  def to_h
39
- super.merge!({type: "object", properties:, required:})
39
+ super.merge!({type: "object", properties:, required: required_items})
40
40
  end
41
41
 
42
42
  ##
@@ -64,7 +64,7 @@ class LLM::Schema
64
64
 
65
65
  private
66
66
 
67
- def required
67
+ def required_items
68
68
  @properties.filter_map { _2.required? ? _1 : nil }
69
69
  end
70
70
  end
data/lib/llm/schema.rb CHANGED
@@ -9,14 +9,26 @@
9
9
  # @see https://json-schema.org/ JSON Schema Specification
10
10
  # @see https://tour.json-schema.org/ JSON Schema Tour
11
11
  #
12
- # @example
12
+ # @example JavaScript-style
13
13
  # schema = LLM::Schema.new
14
14
  # schema.object({
15
15
  # name: schema.string.enum("John", "Jane").required,
16
16
  # age: schema.integer.required,
17
- # hobbies: schema.array(schema.string, schema.null).required,
17
+ # hobbies: schema.array(schema.string).required,
18
18
  # address: schema.object({street: schema.string}).required,
19
19
  # })
20
+ #
21
+ # @example Ruby-style
22
+ # class Address < LLM::Schema
23
+ # property :street, String, "Street address", required: true
24
+ # end
25
+ #
26
+ # class Person < LLM::Schema
27
+ # property :name, String, "Person's name", required: true
28
+ # property :age, Integer, "Person's age", required: true
29
+ # property :hobbies, Array[String], "Person's hobbies", required: true
30
+ # property :address, Address, "Person's address", required: true
31
+ # end
20
32
  class LLM::Schema
21
33
  require_relative "schema/version"
22
34
  require_relative "schema/leaf"
@@ -50,6 +62,8 @@ class LLM::Schema
50
62
  lock do
51
63
  if LLM::Schema::Leaf === type
52
64
  prop = type
65
+ elsif Class === type && type.respond_to?(:object)
66
+ prop = type.object
53
67
  else
54
68
  target = type.name.split("::").last.downcase
55
69
  prop = schema.public_send(target)
@@ -9,9 +9,9 @@
9
9
  # @example
10
10
  # #!/usr/bin/env ruby
11
11
  # llm = LLM.gemini ENV["KEY"]
12
- # bot = LLM::Bot.new(llm, tools: [LLM::ServerTool.new(:google_search)])
13
- # bot.chat("Summarize today's news", role: :user)
14
- # print bot.messages.find(&:assistant?).content, "\n"
12
+ # ses = LLM::Session.new(llm, tools: [LLM::ServerTool.new(:google_search)])
13
+ # ses.talk("Summarize today's news", role: :user)
14
+ # print ses.messages.find(&:assistant?).content, "\n"
15
15
  class LLM::ServerTool < Struct.new(:name, :options, :provider)
16
16
  ##
17
17
  # @return [String]
@@ -0,0 +1,3 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "bot"
@@ -0,0 +1,192 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The {LLM::Tracer::Logger LLM::Tracer::Logger} class provides a
6
+ # tracer that provides logging facilities through Ruby's
7
+ # standard library.
8
+ #
9
+ # @example
10
+ # llm = LLM.openai(key: ENV["KEY"])
11
+ # # Log to a file
12
+ # llm.tracer = LLM::Tracer::Logger.new(llm, path: "/tmp/log.txt")
13
+ # # Log to $stdout (default)
14
+ # llm.tracer = LLM::Tracer::Logger.new(llm, io: $stdout)
15
+ class Tracer::Logger < Tracer
16
+ ##
17
+ # @param (see LLM::Tracer#initialize)
18
+ def initialize(provider, options = {})
19
+ super
20
+ setup!(**options)
21
+ end
22
+
23
+ ##
24
+ # @param (see LLM::Tracer#on_request_start)
25
+ # @return [void]
26
+ def on_request_start(operation:, model: nil)
27
+ case operation
28
+ when "chat" then start_chat(operation:, model:)
29
+ when "retrieval" then start_retrieval(operation:)
30
+ else nil
31
+ end
32
+ end
33
+
34
+ ##
35
+ # @param (see LLM::Tracer#on_request_finish)
36
+ # @return [void]
37
+ def on_request_finish(operation:, res:, model: nil, **)
38
+ case operation
39
+ when "chat" then finish_chat(operation:, res:, model:)
40
+ when "retrieval" then finish_retrieval(operation:, res:)
41
+ else nil
42
+ end
43
+ end
44
+
45
+ ##
46
+ # @param (see LLM::Tracer#on_request_error)
47
+ # @return [void]
48
+ def on_request_error(ex:, **)
49
+ @logger.error(
50
+ tracer: "llm.rb (logger)",
51
+ event: "request.error",
52
+ provider: provider_name,
53
+ error_class: ex.class.to_s,
54
+ error_message: ex.message
55
+ )
56
+ end
57
+
58
+ ##
59
+ # @param (see LLM::Tracer#on_tool_start)
60
+ # @return [void]
61
+ def on_tool_start(id:, name:, arguments:, model:, **)
62
+ @logger.info(
63
+ tracer: "llm.rb (logger)",
64
+ event: "tool.start",
65
+ provider: provider_name,
66
+ operation: "execute_tool",
67
+ tool_id: id,
68
+ tool_name: name,
69
+ tool_arguments: arguments,
70
+ model:
71
+ )
72
+ end
73
+
74
+ ##
75
+ # @param (see LLM::Tracer#on_tool_finish)
76
+ # @return [void]
77
+ def on_tool_finish(result:, **)
78
+ @logger.info(
79
+ tracer: "llm.rb (logger)",
80
+ event: "tool.finish",
81
+ provider: provider_name,
82
+ operation: "execute_tool",
83
+ tool_id: result.id,
84
+ tool_name: result.name,
85
+ tool_result: result.value
86
+ )
87
+ end
88
+
89
+ ##
90
+ # @param (see LLM::Tracer#on_tool_error)
91
+ # @return [void]
92
+ def on_tool_error(ex:, **)
93
+ @logger.error(
94
+ tracer: "llm.rb (logger)",
95
+ event: "tool.error",
96
+ provider: provider_name,
97
+ operation: "execute_tool",
98
+ error_class: ex.class.to_s,
99
+ error_message: ex.message
100
+ )
101
+ end
102
+
103
+ private
104
+
105
+ ##
106
+ # @api private
107
+ def setup!(path: nil, io: $stdout)
108
+ require "logger" unless defined?(::Logger)
109
+ @logger = ::Logger.new(path || io)
110
+ end
111
+
112
+ ##
113
+ # @param [String] operation
114
+ # @param [LLM::Response] res
115
+ # @api private
116
+ def finish_attributes(operation, res)
117
+ case @provider.class.to_s
118
+ when "LLM::OpenAI" then openai_attributes(operation, res)
119
+ else {}
120
+ end
121
+ end
122
+
123
+ ##
124
+ # @param [String] operation
125
+ # @param [LLM::Response] res
126
+ # @api private
127
+ def openai_attributes(operation, res)
128
+ case operation
129
+ when "chat"
130
+ {
131
+ openai_service_tier: res.service_tier,
132
+ openai_system_fingerprint: res.system_fingerprint
133
+ }.compact
134
+ when "retrieval"
135
+ {
136
+ openai_vector_store_search_result_count: res.size,
137
+ openai_vector_store_search_has_more: res.has_more
138
+ }.compact
139
+ else {}
140
+ end
141
+ end
142
+
143
+ ##
144
+ # start_*
145
+
146
+ def start_chat(operation:, model:)
147
+ @logger.info(
148
+ tracer: "llm.rb (logger)",
149
+ event: "request.start",
150
+ provider: provider_name,
151
+ operation:,
152
+ model:
153
+ )
154
+ end
155
+
156
+ def start_retrieval(operation:)
157
+ @logger.info(
158
+ tracer: "llm.rb (logger)",
159
+ event: "request.start",
160
+ provider: provider_name,
161
+ operation:
162
+ )
163
+ end
164
+
165
+ ##
166
+ # finish_*
167
+
168
+ def finish_chat(operation:, model:, res:)
169
+ @logger.info(
170
+ tracer: "llm.rb (logger)",
171
+ event: "request.finish",
172
+ provider: provider_name,
173
+ operation:,
174
+ model:,
175
+ response_id: res.id,
176
+ input_tokens: res.usage.input_tokens,
177
+ output_tokens: res.usage.output_tokens,
178
+ **finish_attributes(operation, res)
179
+ )
180
+ end
181
+
182
+ def finish_retrieval(operation:, res:)
183
+ @logger.info(
184
+ tracer: "llm.rb (logger)",
185
+ event: "request.finish",
186
+ provider: provider_name,
187
+ operation:,
188
+ **finish_attributes(operation, res)
189
+ )
190
+ end
191
+ end
192
+ end