llm.rb 4.7.0 → 4.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +335 -587
  3. data/data/anthropic.json +770 -0
  4. data/data/deepseek.json +75 -0
  5. data/data/google.json +1050 -0
  6. data/data/openai.json +1421 -0
  7. data/data/xai.json +792 -0
  8. data/data/zai.json +330 -0
  9. data/lib/llm/agent.rb +42 -41
  10. data/lib/llm/bot.rb +1 -263
  11. data/lib/llm/buffer.rb +7 -0
  12. data/lib/llm/{session → context}/deserializer.rb +4 -3
  13. data/lib/llm/context.rb +292 -0
  14. data/lib/llm/cost.rb +26 -0
  15. data/lib/llm/error.rb +8 -0
  16. data/lib/llm/eventstream/parser.rb +0 -5
  17. data/lib/llm/function/array.rb +61 -0
  18. data/lib/llm/function/fiber_group.rb +91 -0
  19. data/lib/llm/function/task_group.rb +89 -0
  20. data/lib/llm/function/thread_group.rb +94 -0
  21. data/lib/llm/function.rb +75 -10
  22. data/lib/llm/mcp/command.rb +108 -0
  23. data/lib/llm/mcp/error.rb +31 -0
  24. data/lib/llm/mcp/pipe.rb +82 -0
  25. data/lib/llm/mcp/rpc.rb +118 -0
  26. data/lib/llm/mcp/transport/stdio.rb +85 -0
  27. data/lib/llm/mcp.rb +102 -0
  28. data/lib/llm/message.rb +13 -11
  29. data/lib/llm/model.rb +115 -0
  30. data/lib/llm/prompt.rb +17 -7
  31. data/lib/llm/provider.rb +60 -32
  32. data/lib/llm/providers/anthropic/error_handler.rb +1 -1
  33. data/lib/llm/providers/anthropic/files.rb +3 -3
  34. data/lib/llm/providers/anthropic/models.rb +1 -1
  35. data/lib/llm/providers/anthropic/request_adapter.rb +20 -3
  36. data/lib/llm/providers/anthropic/response_adapter/models.rb +13 -0
  37. data/lib/llm/providers/anthropic/response_adapter.rb +2 -0
  38. data/lib/llm/providers/anthropic.rb +21 -5
  39. data/lib/llm/providers/deepseek.rb +10 -3
  40. data/lib/llm/providers/{gemini → google}/audio.rb +6 -6
  41. data/lib/llm/providers/{gemini → google}/error_handler.rb +20 -5
  42. data/lib/llm/providers/{gemini → google}/files.rb +11 -11
  43. data/lib/llm/providers/{gemini → google}/images.rb +7 -7
  44. data/lib/llm/providers/{gemini → google}/models.rb +5 -5
  45. data/lib/llm/providers/{gemini → google}/request_adapter/completion.rb +7 -3
  46. data/lib/llm/providers/{gemini → google}/request_adapter.rb +1 -1
  47. data/lib/llm/providers/{gemini → google}/response_adapter/completion.rb +7 -7
  48. data/lib/llm/providers/{gemini → google}/response_adapter/embedding.rb +1 -1
  49. data/lib/llm/providers/{gemini → google}/response_adapter/file.rb +1 -1
  50. data/lib/llm/providers/{gemini → google}/response_adapter/files.rb +1 -1
  51. data/lib/llm/providers/{gemini → google}/response_adapter/image.rb +1 -1
  52. data/lib/llm/providers/google/response_adapter/models.rb +13 -0
  53. data/lib/llm/providers/{gemini → google}/response_adapter/web_search.rb +2 -2
  54. data/lib/llm/providers/{gemini → google}/response_adapter.rb +8 -8
  55. data/lib/llm/providers/{gemini → google}/stream_parser.rb +3 -3
  56. data/lib/llm/providers/{gemini.rb → google.rb} +41 -26
  57. data/lib/llm/providers/llamacpp.rb +10 -3
  58. data/lib/llm/providers/ollama/error_handler.rb +1 -1
  59. data/lib/llm/providers/ollama/models.rb +1 -1
  60. data/lib/llm/providers/ollama/response_adapter/models.rb +13 -0
  61. data/lib/llm/providers/ollama/response_adapter.rb +2 -0
  62. data/lib/llm/providers/ollama.rb +19 -4
  63. data/lib/llm/providers/openai/error_handler.rb +18 -3
  64. data/lib/llm/providers/openai/files.rb +3 -3
  65. data/lib/llm/providers/openai/images.rb +17 -11
  66. data/lib/llm/providers/openai/models.rb +1 -1
  67. data/lib/llm/providers/openai/response_adapter/completion.rb +9 -1
  68. data/lib/llm/providers/openai/response_adapter/models.rb +13 -0
  69. data/lib/llm/providers/openai/response_adapter/responds.rb +9 -1
  70. data/lib/llm/providers/openai/response_adapter.rb +2 -0
  71. data/lib/llm/providers/openai/responses.rb +16 -1
  72. data/lib/llm/providers/openai/stream_parser.rb +2 -0
  73. data/lib/llm/providers/openai.rb +28 -6
  74. data/lib/llm/providers/xai/images.rb +7 -6
  75. data/lib/llm/providers/xai.rb +10 -3
  76. data/lib/llm/providers/zai.rb +9 -2
  77. data/lib/llm/registry.rb +81 -0
  78. data/lib/llm/schema/enum.rb +16 -0
  79. data/lib/llm/schema/parser.rb +109 -0
  80. data/lib/llm/schema.rb +5 -0
  81. data/lib/llm/server_tool.rb +5 -5
  82. data/lib/llm/session.rb +10 -1
  83. data/lib/llm/tool/param.rb +1 -1
  84. data/lib/llm/tool.rb +86 -5
  85. data/lib/llm/tracer/langsmith.rb +144 -0
  86. data/lib/llm/tracer/logger.rb +9 -1
  87. data/lib/llm/tracer/null.rb +8 -0
  88. data/lib/llm/tracer/telemetry.rb +98 -78
  89. data/lib/llm/tracer.rb +108 -4
  90. data/lib/llm/usage.rb +5 -0
  91. data/lib/llm/version.rb +1 -1
  92. data/lib/llm.rb +40 -6
  93. data/llm.gemspec +45 -8
  94. metadata +87 -28
  95. data/lib/llm/providers/gemini/response_adapter/models.rb +0 -15
data/lib/llm/provider.rb CHANGED
@@ -37,7 +37,6 @@ class LLM::Provider
37
37
  @timeout = timeout
38
38
  @ssl = ssl
39
39
  @client = persistent ? persistent_client : nil
40
- @tracer = LLM::Tracer::Null.new(self)
41
40
  @base_uri = URI("#{ssl ? "https" : "http"}://#{host}:#{port}/")
42
41
  @headers = {"User-Agent" => "llm.rb v#{LLM::VERSION}"}
43
42
  @monitor = Monitor.new
@@ -48,7 +47,16 @@ class LLM::Provider
48
47
  # @return [String]
49
48
  # @note The secret key is redacted in inspect for security reasons
50
49
  def inspect
51
- "#<#{self.class.name}:0x#{object_id.to_s(16)} @key=[REDACTED] @client=#{@client.inspect} @tracer=#{@tracer.inspect}>"
50
+ "#<#{self.class.name}:0x#{object_id.to_s(16)} @key=[REDACTED] @client=#{@client.inspect} @tracer=#{tracer.inspect}>"
51
+ end
52
+
53
+ ##
54
+ # @raise [NotImplementedError]
55
+ # When the method is not implemented by a subclass
56
+ # @return [Symbol]
57
+ # Returns the provider's name
58
+ def name
59
+ raise NotImplementedError
52
60
  end
53
61
 
54
62
  ##
@@ -94,10 +102,10 @@ class LLM::Provider
94
102
  # Starts a new chat powered by the chat completions API
95
103
  # @param prompt (see LLM::Provider#complete)
96
104
  # @param params (see LLM::Provider#complete)
97
- # @return [LLM::Session]
105
+ # @return [LLM::Context]
98
106
  def chat(prompt, params = {})
99
107
  role = params.delete(:role)
100
- LLM::Session.new(self, params).talk(prompt, role:)
108
+ LLM::Context.new(self, params).talk(prompt, role:)
101
109
  end
102
110
 
103
111
  ##
@@ -105,10 +113,10 @@ class LLM::Provider
105
113
  # @param prompt (see LLM::Provider#complete)
106
114
  # @param params (see LLM::Provider#complete)
107
115
  # @raise (see LLM::Provider#complete)
108
- # @return [LLM::Session]
116
+ # @return [LLM::Context]
109
117
  def respond(prompt, params = {})
110
118
  role = params.delete(:role)
111
- LLM::Session.new(self, params).respond(prompt, role:)
119
+ LLM::Context.new(self, params).respond(prompt, role:)
112
120
  end
113
121
 
114
122
  ##
@@ -123,7 +131,7 @@ class LLM::Provider
123
131
  end
124
132
 
125
133
  ##
126
- # @return [LLM::OpenAI::Images, LLM::Gemini::Images]
134
+ # @return [LLM::OpenAI::Images, LLM::Google::Images]
127
135
  # Returns an interface to the images API
128
136
  def images
129
137
  raise NotImplementedError
@@ -265,27 +273,34 @@ class LLM::Provider
265
273
 
266
274
  ##
267
275
  # @return [LLM::Tracer]
268
- # Returns an LLM tracer
276
+ # Returns a fiber-local tracer
269
277
  def tracer
270
- @tracer
278
+ weakmap[self] || LLM::Tracer::Null.new(self)
271
279
  end
272
280
 
273
281
  ##
274
- # Set the tracer
282
+ # Set a fiber-local tracer
275
283
  # @example
276
284
  # llm = LLM.openai(key: ENV["KEY"])
277
- # llm.tracer = LLM::Tracer::Logger.new(llm, path: "/path/to/log.txt")
285
+ # Thread.new do
286
+ # llm.tracer = LLM::Tracer::Logger.new(llm, path: "/path/to/log/1.txt")
287
+ # end
288
+ # Thread.new do
289
+ # llm.tracer = LLM::Tracer::Logger.new(llm, path: "/path/to/log/2.txt")
290
+ # end
278
291
  # # ...
279
292
  # @param [LLM::Tracer] tracer
280
293
  # A tracer
281
294
  # @return [void]
282
295
  def tracer=(tracer)
283
- lock do
284
- @tracer = if tracer.nil?
285
- LLM::Tracer::Null.new(self)
296
+ if tracer.nil?
297
+ if weakmap.respond_to?(:delete)
298
+ weakmap.delete(self)
286
299
  else
287
- tracer
300
+ weakmap[self] = nil
288
301
  end
302
+ else
303
+ weakmap[self] = tracer
289
304
  end
290
305
  end
291
306
 
@@ -354,22 +369,29 @@ class LLM::Provider
354
369
  # @raise [SystemCallError]
355
370
  # When there is a network error at the operating system level
356
371
  # @return [Net::HTTPResponse]
357
- def execute(request:, operation:, stream: nil, stream_parser: self.stream_parser, model: nil, &b)
358
- tracer = @tracer
359
- span = tracer.on_request_start(operation:, model:)
372
+ def execute(request:, operation:, stream: nil, stream_parser: self.stream_parser, model: nil, inputs: nil, &b)
373
+ tracer = self.tracer
374
+ span = tracer.on_request_start(operation:, model:, inputs:)
360
375
  http = client || transient_client
361
376
  args = (Net::HTTP === http) ? [request] : [URI.join(base_uri, request.path), request]
362
377
  res = if stream
363
378
  http.request(*args) do |res|
364
- handler = event_handler.new stream_parser.new(stream)
365
- parser = LLM::EventStream::Parser.new
366
- parser.register(handler)
367
- res.read_body(parser)
368
- # If the handler body is empty, it means the
369
- # response was most likely not streamed or
370
- # parsing has failed. In that case, we fallback
371
- # on the original response body.
372
- res.body = LLM::Object.from(handler.body.empty? ? parser.body : handler.body)
379
+ if Net::HTTPSuccess === res
380
+ handler = event_handler.new stream_parser.new(stream)
381
+ parser = LLM::EventStream::Parser.new
382
+ parser.register(handler)
383
+ res.read_body(parser)
384
+ # If the handler body is empty, the response was
385
+ # most likely not streamed or parsing failed.
386
+ # Preserve the raw body in that case so standard
387
+ # JSON/error handling can parse it later.
388
+ body = handler.body.empty? ? parser.body : handler.body
389
+ res.body = Hash === body || Array === body ? LLM::Object.from(body) : body
390
+ else
391
+ body = +""
392
+ res.read_body { body << _1 }
393
+ res.body = body
394
+ end
373
395
  ensure
374
396
  parser&.free
375
397
  end
@@ -437,14 +459,20 @@ class LLM::Provider
437
459
  end
438
460
 
439
461
  ##
440
- # @return [Hash<Symbol, LLM::Tracer>]
441
- def tracers
442
- self.class.tracers
462
+ # @api private
463
+ def lock(&)
464
+ @monitor.synchronize(&)
443
465
  end
444
466
 
445
467
  ##
446
468
  # @api private
447
- def lock(&)
448
- @monitor.synchronize(&)
469
+ def thread
470
+ Thread.current
471
+ end
472
+
473
+ ##
474
+ # @api private
475
+ def weakmap
476
+ thread[:"llm.provider.weakmap"] ||= ObjectSpace::WeakMap.new
449
477
  end
450
478
  end
@@ -35,7 +35,7 @@ class LLM::Anthropic
35
35
  ex = error
36
36
  @tracer.on_request_error(ex:, span:)
37
37
  ensure
38
- raise(ex)
38
+ raise(ex) if ex
39
39
  end
40
40
 
41
41
  private
@@ -10,10 +10,10 @@ class LLM::Anthropic
10
10
  # require "llm"
11
11
  #
12
12
  # llm = LLM.anthropic(key: ENV["KEY"])
13
- # ses = LLM::Session.new(llm)
13
+ # ctx = LLM::Context.new(llm)
14
14
  # file = llm.files.create file: "/books/goodread.pdf"
15
- # ses.talk ["Tell me about this PDF", file]
16
- # ses.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
15
+ # ctx.talk ["Tell me about this PDF", file]
16
+ # ctx.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
17
17
  class Files
18
18
  ##
19
19
  # Returns a new Files object
@@ -41,7 +41,7 @@ class LLM::Anthropic
41
41
  query = URI.encode_www_form(params)
42
42
  req = Net::HTTP::Get.new("/v1/models?#{query}", headers)
43
43
  res, span, tracer = execute(request: req, operation: "request")
44
- res = ResponseAdapter.adapt(res, type: :enumerable)
44
+ res = ResponseAdapter.adapt(res, type: :models)
45
45
  tracer.on_request_finish(operation: "request", res:, span:)
46
46
  res
47
47
  end
@@ -9,11 +9,20 @@ class LLM::Anthropic
9
9
  ##
10
10
  # @param [Array<LLM::Message>] messages
11
11
  # The messages to adapt
12
- # @return [Array<Hash>]
12
+ # @return [Hash]
13
13
  def adapt(messages, mode: nil)
14
- messages.filter_map do
15
- Completion.new(_1).adapt
14
+ payload = {messages: [], system: []}
15
+ messages.each do |message|
16
+ adapted = Completion.new(message).adapt
17
+ next if adapted.nil?
18
+ if system?(message)
19
+ payload[:system].concat Array(adapted[:content])
20
+ else
21
+ payload[:messages] << adapted
22
+ end
16
23
  end
24
+ payload.delete(:system) if payload[:system].empty?
25
+ payload
17
26
  end
18
27
 
19
28
  private
@@ -25,5 +34,13 @@ class LLM::Anthropic
25
34
  return {} unless tools&.any?
26
35
  {tools: tools.map { _1.respond_to?(:adapt) ? _1.adapt(self) : _1 }}
27
36
  end
37
+
38
+ def system?(message)
39
+ if message.respond_to?(:system?)
40
+ message.system?
41
+ else
42
+ Hash === message and message[:role].to_s == "system"
43
+ end
44
+ end
28
45
  end
29
46
  end
@@ -0,0 +1,13 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Anthropic::ResponseAdapter
4
+ module Models
5
+ include LLM::Model::Collection
6
+
7
+ private
8
+
9
+ def raw_models
10
+ data || []
11
+ end
12
+ end
13
+ end
@@ -7,6 +7,7 @@ class LLM::Anthropic
7
7
  require_relative "response_adapter/completion"
8
8
  require_relative "response_adapter/enumerable"
9
9
  require_relative "response_adapter/file"
10
+ require_relative "response_adapter/models"
10
11
  require_relative "response_adapter/web_search"
11
12
 
12
13
  module_function
@@ -27,6 +28,7 @@ class LLM::Anthropic
27
28
  when :completion then LLM::Anthropic::ResponseAdapter::Completion
28
29
  when :enumerable then LLM::Anthropic::ResponseAdapter::Enumerable
29
30
  when :file then LLM::Anthropic::ResponseAdapter::File
31
+ when :models then LLM::Anthropic::ResponseAdapter::Models
30
32
  when :web_search then LLM::Anthropic::ResponseAdapter::WebSearch
31
33
  else
32
34
  raise ArgumentError, "Unknown response adapter type: #{type.inspect}"
@@ -10,9 +10,9 @@ module LLM
10
10
  # require "llm"
11
11
  #
12
12
  # llm = LLM.anthropic(key: ENV["KEY"])
13
- # ses = LLM::Session.new(llm)
14
- # ses.talk ["Tell me about this photo", ses.local_file("/images/photo.png")]
15
- # ses.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
13
+ # ctx = LLM::Context.new(llm)
14
+ # ctx.talk ["Tell me about this photo", ctx.local_file("/images/photo.png")]
15
+ # ctx.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
16
16
  class Anthropic < Provider
17
17
  require_relative "anthropic/error_handler"
18
18
  require_relative "anthropic/request_adapter"
@@ -30,6 +30,13 @@ module LLM
30
30
  super(host: HOST, **)
31
31
  end
32
32
 
33
+ ##
34
+ # @return [Symbol]
35
+ # Returns the provider's name
36
+ def name
37
+ :anthropic
38
+ end
39
+
33
40
  ##
34
41
  # Provides an interface to the chat completions API
35
42
  # @see https://docs.anthropic.com/en/api/messages Anthropic docs
@@ -139,11 +146,20 @@ module LLM
139
146
  end
140
147
 
141
148
  def build_complete_request(prompt, params, role)
142
- messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
143
- body = LLM.json.dump({messages: [adapt(messages)].flatten}.merge!(params))
149
+ messages = build_complete_messages(prompt, params, role)
150
+ payload = adapt(messages)
151
+ body = LLM.json.dump(payload.merge!(params))
144
152
  req = Net::HTTP::Post.new("/v1/messages", headers)
145
153
  set_body_stream(req, StringIO.new(body))
146
154
  req
147
155
  end
156
+
157
+ def build_complete_messages(prompt, params, role)
158
+ if LLM::Prompt === prompt
159
+ [*(params.delete(:messages) || []), *prompt.to_a]
160
+ else
161
+ [*(params.delete(:messages) || []), Message.new(role, prompt)]
162
+ end
163
+ end
148
164
  end
149
165
  end
@@ -14,9 +14,9 @@ module LLM
14
14
  # require "llm"
15
15
  #
16
16
  # llm = LLM.deepseek(key: ENV["KEY"])
17
- # ses = LLM::Session.new(llm)
18
- # ses.talk ["Tell me about this photo", ses.local_file("/images/photo.png")]
19
- # ses.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
17
+ # ctx = LLM::Context.new(llm)
18
+ # ctx.talk ["Tell me about this photo", ctx.local_file("/images/photo.png")]
19
+ # ctx.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
20
20
  class DeepSeek < OpenAI
21
21
  require_relative "deepseek/request_adapter"
22
22
  include DeepSeek::RequestAdapter
@@ -28,6 +28,13 @@ module LLM
28
28
  super
29
29
  end
30
30
 
31
+ ##
32
+ # @return [Symbol]
33
+ # Returns the provider's name
34
+ def name
35
+ :deepseek
36
+ end
37
+
31
38
  ##
32
39
  # @raise [NotImplementedError]
33
40
  def files
@@ -1,21 +1,21 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- class LLM::Gemini
3
+ class LLM::Google
4
4
  ##
5
- # The {LLM::Gemini::Audio LLM::Gemini::Audio} class provides an audio
5
+ # The {LLM::Google::Audio LLM::Google::Audio} class provides an audio
6
6
  # object for interacting with [Gemini's audio API](https://ai.google.dev/gemini-api/docs/audio).
7
7
  # @example
8
8
  # #!/usr/bin/env ruby
9
9
  # require "llm"
10
10
  #
11
- # llm = LLM.gemini(key: ENV["KEY"])
11
+ # llm = LLM.google(key: ENV["KEY"])
12
12
  # res = llm.audio.create_transcription(input: "/audio/rocket.mp3")
13
13
  # res.text # => "A dog on a rocket to the moon"
14
14
  class Audio
15
15
  ##
16
16
  # Returns a new Audio object
17
17
  # @param provider [LLM::Provider]
18
- # @return [LLM::Gemini::Responses]
18
+ # @return [LLM::Google::Audio]
19
19
  def initialize(provider)
20
20
  @provider = provider
21
21
  end
@@ -30,7 +30,7 @@ class LLM::Gemini
30
30
  ##
31
31
  # Create an audio transcription
32
32
  # @example
33
- # llm = LLM.gemini(key: ENV["KEY"])
33
+ # llm = LLM.google(key: ENV["KEY"])
34
34
  # res = llm.audio.create_transcription(file: "/audio/rocket.mp3")
35
35
  # res.text # => "A dog on a rocket to the moon"
36
36
  # @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
@@ -52,7 +52,7 @@ class LLM::Gemini
52
52
  # Create an audio translation (in English)
53
53
  # @example
54
54
  # # Arabic => English
55
- # llm = LLM.gemini(key: ENV["KEY"])
55
+ # llm = LLM.google(key: ENV["KEY"])
56
56
  # res = llm.audio.create_translation(file: "/audio/bismillah.mp3")
57
57
  # res.text # => "In the name of Allah, the Beneficent, the Merciful."
58
58
  # @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- class LLM::Gemini
3
+ class LLM::Google
4
4
  ##
5
5
  # @private
6
6
  class ErrorHandler
@@ -21,7 +21,7 @@ class LLM::Gemini
21
21
  # The span
22
22
  # @param [Net::HTTPResponse] res
23
23
  # The response from the server
24
- # @return [LLM::Gemini::ErrorHandler]
24
+ # @return [LLM::Google::ErrorHandler]
25
25
  def initialize(tracer, span, res)
26
26
  @tracer = tracer
27
27
  @span = span
@@ -35,15 +35,15 @@ class LLM::Gemini
35
35
  ex = error
36
36
  @tracer.on_request_error(ex:, span:)
37
37
  ensure
38
- raise(ex)
38
+ raise(ex) if ex
39
39
  end
40
40
 
41
41
  private
42
42
 
43
43
  ##
44
- # @return [LLM::Object]
44
+ # @return [String, LLM::Object]
45
45
  def body
46
- @body ||= LLM.json.load(res.body)
46
+ @body ||= parse_body!
47
47
  end
48
48
 
49
49
  ##
@@ -65,5 +65,20 @@ class LLM::Gemini
65
65
  LLM::Error.new("Unexpected response").tap { _1.response = res }
66
66
  end
67
67
  end
68
+
69
+ ##
70
+ # Tries to parse the response body as a LLM::Object
71
+ # @return [String, LLM::Object]
72
+ def parse_body!
73
+ if String === res.body
74
+ LLM::Object.from LLM.json.load(res.body)
75
+ elsif Hash === res.body
76
+ LLM::Object.from(res.body)
77
+ else
78
+ res.body
79
+ end
80
+ rescue
81
+ res.body
82
+ end
68
83
  end
69
84
  end
@@ -1,8 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- class LLM::Gemini
3
+ class LLM::Google
4
4
  ##
5
- # The {LLM::Gemini::Files LLM::Gemini::Files} class provides a files
5
+ # The {LLM::Google::Files LLM::Google::Files} class provides a files
6
6
  # object for interacting with [Gemini's Files API](https://ai.google.dev/gemini-api/docs/files).
7
7
  # The files API allows a client to reference media files in prompts
8
8
  # where they can be referenced by their URL.
@@ -17,16 +17,16 @@ class LLM::Gemini
17
17
  # #!/usr/bin/env ruby
18
18
  # require "llm"
19
19
  #
20
- # llm = LLM.gemini(key: ENV["KEY"])
21
- # ses = LLM::Session.new(llm)
20
+ # llm = LLM.google(key: ENV["KEY"])
21
+ # ctx = LLM::Context.new(llm)
22
22
  # file = llm.files.create(file: "/audio/haiku.mp3")
23
- # ses.talk ["Tell me about this file", file]
24
- # ses.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
23
+ # ctx.talk ["Tell me about this file", file]
24
+ # ctx.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
25
25
  class Files
26
26
  ##
27
27
  # Returns a new Files object
28
28
  # @param provider [LLM::Provider]
29
- # @return [LLM::Gemini::Files]
29
+ # @return [LLM::Google::Files]
30
30
  def initialize(provider)
31
31
  @provider = provider
32
32
  end
@@ -34,7 +34,7 @@ class LLM::Gemini
34
34
  ##
35
35
  # List all files
36
36
  # @example
37
- # llm = LLM.gemini(key: ENV["KEY"])
37
+ # llm = LLM.google(key: ENV["KEY"])
38
38
  # res = llm.files.all
39
39
  # res.each do |file|
40
40
  # print "name: ", file.name, "\n"
@@ -55,7 +55,7 @@ class LLM::Gemini
55
55
  ##
56
56
  # Create a file
57
57
  # @example
58
- # llm = LLM.gemini(key: ENV["KEY"])
58
+ # llm = LLM.google(key: ENV["KEY"])
59
59
  # res = llm.files.create(file: "/audio/haiku.mp3")
60
60
  # @see https://ai.google.dev/gemini-api/docs/files Gemini docs
61
61
  # @param [String, LLM::File] file The file
@@ -80,7 +80,7 @@ class LLM::Gemini
80
80
  ##
81
81
  # Get a file
82
82
  # @example
83
- # llm = LLM.gemini(key: ENV["KEY"])
83
+ # llm = LLM.google(key: ENV["KEY"])
84
84
  # res = llm.files.get(file: "files/1234567890")
85
85
  # print "name: ", res.name, "\n"
86
86
  # @see https://ai.google.dev/gemini-api/docs/files Gemini docs
@@ -101,7 +101,7 @@ class LLM::Gemini
101
101
  ##
102
102
  # Delete a file
103
103
  # @example
104
- # llm = LLM.gemini(key: ENV["KEY"])
104
+ # llm = LLM.google(key: ENV["KEY"])
105
105
  # res = llm.files.delete(file: "files/1234567890")
106
106
  # @see https://ai.google.dev/gemini-api/docs/files Gemini docs
107
107
  # @param [#name, String] file The file to delete
@@ -1,15 +1,15 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- class LLM::Gemini
3
+ class LLM::Google
4
4
  ##
5
- # The {LLM::Gemini::Images LLM::Gemini::Images} class provides an images
5
+ # The {LLM::Google::Images LLM::Google::Images} class provides an images
6
6
  # object for interacting with Google's Imagen text-to-image models via the
7
7
  # Imagen API: https://ai.google.dev/gemini-api/docs/imagen
8
8
  #
9
9
  # @example
10
10
  # #!/usr/bin/env ruby
11
11
  # require "llm"
12
- # llm = LLM.gemini(key: ENV["KEY"])
12
+ # llm = LLM.google(key: ENV["KEY"])
13
13
  # res = llm.images.create prompt: "A dog on a rocket to the moon"
14
14
  # IO.copy_stream res.images[0], "rocket.png"
15
15
  class Images
@@ -18,7 +18,7 @@ class LLM::Gemini
18
18
  ##
19
19
  # Returns a new Images object
20
20
  # @param provider [LLM::Provider]
21
- # @return [LLM::Gemini::Responses]
21
+ # @return [LLM::Google::Images]
22
22
  def initialize(provider)
23
23
  @provider = provider
24
24
  end
@@ -26,7 +26,7 @@ class LLM::Gemini
26
26
  ##
27
27
  # Create an image
28
28
  # @example
29
- # llm = LLM.gemini(key: ENV["KEY"])
29
+ # llm = LLM.google(key: ENV["KEY"])
30
30
  # res = llm.images.create prompt: "A dog on a rocket to the moon"
31
31
  # IO.copy_stream res.images[0], "rocket.png"
32
32
  # @see https://ai.google.dev/gemini-api/docs/imagen Imagen docs
@@ -60,7 +60,7 @@ class LLM::Gemini
60
60
  ##
61
61
  # Edit an image
62
62
  # @example
63
- # llm = LLM.gemini(key: ENV["KEY"])
63
+ # llm = LLM.google(key: ENV["KEY"])
64
64
  # res = llm.images.edit image: "cat.png", prompt: "Add a hat to the cat"
65
65
  # IO.copy_stream res.images[0], "hatoncat.png"
66
66
  # @see https://ai.google.dev/gemini-api/docs/image-generation Gemini docs
@@ -68,7 +68,7 @@ class LLM::Gemini
68
68
  # @param [String] prompt The prompt
69
69
  # @param [Hash] params Other parameters (see Gemini docs)
70
70
  # @raise (see LLM::Provider#request)
71
- # @note (see LLM::Gemini::Images#create)
71
+ # @note (see LLM::Google::Images#create)
72
72
  # @return [LLM::Response]
73
73
  def edit(image:, prompt:, model: "gemini-2.5-flash-image", **params)
74
74
  raise NotImplementedError, "image editing is not yet supported by Gemini"
@@ -1,8 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- class LLM::Gemini
3
+ class LLM::Google
4
4
  ##
5
- # The {LLM::Gemini::Models LLM::Gemini::Models} class provides a model
5
+ # The {LLM::Google::Models LLM::Google::Models} class provides a model
6
6
  # object for interacting with [Gemini's models API](https://ai.google.dev/api/models?hl=en#method:-models.list).
7
7
  # The models API allows a client to query Gemini for a list of models
8
8
  # that are available for use with the Gemini API.
@@ -11,7 +11,7 @@ class LLM::Gemini
11
11
  # #!/usr/bin/env ruby
12
12
  # require "llm"
13
13
  #
14
- # llm = LLM.gemini(key: ENV["KEY"])
14
+ # llm = LLM.google(key: ENV["KEY"])
15
15
  # res = llm.models.all
16
16
  # res.each do |model|
17
17
  # print "id: ", model.id, "\n"
@@ -22,7 +22,7 @@ class LLM::Gemini
22
22
  ##
23
23
  # Returns a new Models object
24
24
  # @param provider [LLM::Provider]
25
- # @return [LLM::Gemini::Models]
25
+ # @return [LLM::Google::Models]
26
26
  def initialize(provider)
27
27
  @provider = provider
28
28
  end
@@ -30,7 +30,7 @@ class LLM::Gemini
30
30
  ##
31
31
  # List all models
32
32
  # @example
33
- # llm = LLM.gemini(key: ENV["KEY"])
33
+ # llm = LLM.google(key: ENV["KEY"])
34
34
  # res = llm.models.all
35
35
  # res.each do |model|
36
36
  # print "id: ", model.id, "\n"
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::Gemini::RequestAdapter
3
+ module LLM::Google::RequestAdapter
4
4
  ##
5
5
  # @private
6
6
  class Completion
@@ -19,7 +19,7 @@ module LLM::Gemini::RequestAdapter
19
19
  if Hash === message
20
20
  {role: message[:role], parts: adapt_content(message[:content])}
21
21
  elsif message.tool_call?
22
- {role: message.role, parts: message.extra[:original_tool_calls].map { {"functionCall" => _1} }}
22
+ {role: message.role, parts: message.extra.original_tool_calls}
23
23
  else
24
24
  {role: message.role, parts: adapt_content(message.content)}
25
25
  end
@@ -37,7 +37,7 @@ module LLM::Gemini::RequestAdapter
37
37
  when LLM::Message
38
38
  adapt_content(content.content)
39
39
  when LLM::Function::Return
40
- [{functionResponse: {name: content.name, response: content.value}}]
40
+ [{functionResponse: {name: content.name, response: adapt_function_response(content.value)}}]
41
41
  when LLM::Object
42
42
  adapt_object(content)
43
43
  else
@@ -64,6 +64,10 @@ module LLM::Gemini::RequestAdapter
64
64
  [{file_data: {mime_type: file.mime_type, file_uri: file.uri}}]
65
65
  end
66
66
 
67
+ def adapt_function_response(value)
68
+ Hash === value ? value : {result: value}
69
+ end
70
+
67
71
  def prompt_error!(object)
68
72
  if LLM::Object === object
69
73
  raise LLM::PromptError, "The given LLM::Object with kind '#{content.kind}' is not " \
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- class LLM::Gemini
3
+ class LLM::Google
4
4
  ##
5
5
  # @private
6
6
  module RequestAdapter