llm.rb 0.10.1 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +120 -119
  3. data/lib/llm/bot/builder.rb +2 -2
  4. data/lib/llm/bot.rb +13 -22
  5. data/lib/llm/buffer.rb +7 -0
  6. data/lib/llm/file.rb +22 -12
  7. data/lib/llm/function.rb +8 -7
  8. data/lib/llm/message.rb +8 -0
  9. data/lib/llm/multipart.rb +0 -1
  10. data/lib/llm/object/kernel.rb +8 -0
  11. data/lib/llm/object.rb +9 -3
  12. data/lib/llm/provider.rb +10 -12
  13. data/lib/llm/providers/anthropic/format/completion_format.rb +10 -5
  14. data/lib/llm/providers/anthropic/models.rb +4 -9
  15. data/lib/llm/providers/anthropic/response/completion.rb +39 -0
  16. data/lib/llm/providers/anthropic.rb +13 -25
  17. data/lib/llm/providers/deepseek/format/completion_format.rb +3 -3
  18. data/lib/llm/providers/deepseek.rb +16 -1
  19. data/lib/llm/providers/gemini/audio.rb +9 -13
  20. data/lib/llm/providers/gemini/files.rb +19 -34
  21. data/lib/llm/providers/gemini/format/completion_format.rb +20 -5
  22. data/lib/llm/providers/gemini/images.rb +12 -11
  23. data/lib/llm/providers/gemini/models.rb +4 -10
  24. data/lib/llm/providers/gemini/{response_parser/completion_parser.rb → response/completion.rb} +10 -24
  25. data/lib/llm/providers/gemini/response/embedding.rb +8 -0
  26. data/lib/llm/providers/gemini/response/file.rb +11 -0
  27. data/lib/llm/providers/gemini/response/image.rb +26 -0
  28. data/lib/llm/providers/gemini.rb +18 -29
  29. data/lib/llm/providers/llamacpp.rb +18 -1
  30. data/lib/llm/providers/ollama/format/completion_format.rb +8 -5
  31. data/lib/llm/providers/ollama/models.rb +2 -8
  32. data/lib/llm/providers/ollama/response/completion.rb +28 -0
  33. data/lib/llm/providers/ollama/response/embedding.rb +9 -0
  34. data/lib/llm/providers/ollama.rb +13 -19
  35. data/lib/llm/providers/openai/audio.rb +10 -10
  36. data/lib/llm/providers/openai/files.rb +22 -34
  37. data/lib/llm/providers/openai/format/completion_format.rb +11 -4
  38. data/lib/llm/providers/openai/format/moderation_format.rb +2 -2
  39. data/lib/llm/providers/openai/format/respond_format.rb +7 -4
  40. data/lib/llm/providers/openai/images.rb +18 -17
  41. data/lib/llm/providers/openai/models.rb +4 -9
  42. data/lib/llm/providers/openai/moderations.rb +9 -11
  43. data/lib/llm/providers/openai/response/audio.rb +7 -0
  44. data/lib/llm/providers/openai/{response_parser/completion_parser.rb → response/completion.rb} +14 -30
  45. data/lib/llm/providers/openai/response/embedding.rb +9 -0
  46. data/lib/llm/providers/openai/response/file.rb +7 -0
  47. data/lib/llm/providers/openai/response/image.rb +16 -0
  48. data/lib/llm/providers/openai/response/moderations.rb +34 -0
  49. data/lib/llm/providers/openai/{response_parser/respond_parser.rb → response/responds.rb} +7 -29
  50. data/lib/llm/providers/openai/responses.rb +16 -34
  51. data/lib/llm/providers/openai/stream_parser.rb +1 -0
  52. data/lib/llm/providers/openai/vector_stores.rb +188 -0
  53. data/lib/llm/providers/openai.rb +24 -9
  54. data/lib/llm/providers/xai/images.rb +58 -0
  55. data/lib/llm/providers/xai.rb +72 -0
  56. data/lib/llm/response.rb +42 -13
  57. data/lib/llm/version.rb +1 -1
  58. data/lib/llm.rb +12 -13
  59. data/llm.gemspec +5 -5
  60. metadata +29 -38
  61. data/lib/llm/model.rb +0 -32
  62. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +0 -51
  63. data/lib/llm/providers/anthropic/response_parser.rb +0 -24
  64. data/lib/llm/providers/gemini/response_parser.rb +0 -46
  65. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +0 -42
  66. data/lib/llm/providers/ollama/response_parser.rb +0 -30
  67. data/lib/llm/providers/openai/response_parser.rb +0 -65
  68. data/lib/llm/providers/voyageai/error_handler.rb +0 -32
  69. data/lib/llm/providers/voyageai/response_parser.rb +0 -13
  70. data/lib/llm/providers/voyageai.rb +0 -44
  71. data/lib/llm/response/audio.rb +0 -13
  72. data/lib/llm/response/audio_transcription.rb +0 -14
  73. data/lib/llm/response/audio_translation.rb +0 -14
  74. data/lib/llm/response/completion.rb +0 -51
  75. data/lib/llm/response/download_file.rb +0 -15
  76. data/lib/llm/response/embedding.rb +0 -23
  77. data/lib/llm/response/file.rb +0 -42
  78. data/lib/llm/response/filelist.rb +0 -18
  79. data/lib/llm/response/image.rb +0 -29
  80. data/lib/llm/response/modellist.rb +0 -18
  81. data/lib/llm/response/moderationlist/moderation.rb +0 -47
  82. data/lib/llm/response/moderationlist.rb +0 -51
  83. data/lib/llm/response/respond.rb +0 -56
  84. /data/lib/llm/{event_handler.rb → eventhandler.rb} +0 -0
@@ -0,0 +1,188 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::OpenAI
4
+ ##
5
+ # The {LLM::OpenAI::VectorStores LLM::OpenAI::VectorStores} class provides
6
+ # an interface for [OpenAI's vector stores API](https://platform.openai.com/docs/api-reference/vector_stores/create)
7
+ class VectorStores
8
+ ##
9
+ # @param [LLM::Provider] provider
10
+ # An OpenAI provider
11
+ def initialize(provider)
12
+ @provider = provider
13
+ end
14
+
15
+ ##
16
+ # List all vector stores
17
+ # @param [Hash] params Other parameters (see OpenAI docs)
18
+ # @return [LLM::Response]
19
+ def all(**params)
20
+ query = URI.encode_www_form(params)
21
+ req = Net::HTTP::Get.new("/v1/vector_stores?#{query}", headers)
22
+ res = execute(request: req)
23
+ LLM::Response.new(res)
24
+ end
25
+
26
+ ##
27
+ # Create a vector store
28
+ # @param [String] name The name of the vector store
29
+ # @param [Array<String>] file_ids The IDs of the files to include in the vector store
30
+ # @param [Hash] params Other parameters (see OpenAI docs)
31
+ # @raise (see LLM::Provider#request)
32
+ # @return [LLM::Response]
33
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/create OpenAI docs
34
+ def create(name:, file_ids: [], **params)
35
+ req = Net::HTTP::Post.new("/v1/vector_stores", headers)
36
+ req.body = JSON.dump(params.merge({name:, file_ids:}).compact)
37
+ res = execute(request: req)
38
+ LLM::Response.new(res)
39
+ end
40
+
41
+ ##
42
+ # Get a vector store
43
+ # @param [String, #id] vector The ID of the vector store
44
+ # @raise (see LLM::Provider#request)
45
+ # @return [LLM::Response]
46
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/retrieve OpenAI docs
47
+ def get(vector:)
48
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
49
+ req = Net::HTTP::Get.new("/v1/vector_stores/#{vector_id}", headers)
50
+ res = execute(request: req)
51
+ LLM::Response.new(res)
52
+ end
53
+
54
+ ##
55
+ # Modify an existing vector store
56
+ # @param [String, #id] vector The ID of the vector store
57
+ # @param [String] name The new name of the vector store
58
+ # @param [Hash] params Other parameters (see OpenAI docs)
59
+ # @raise (see LLM::Provider#request)
60
+ # @return [LLM::Response]
61
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/modify OpenAI docs
62
+ def modify(vector:, name: nil, **params)
63
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
64
+ req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}", headers)
65
+ req.body = JSON.dump(params.merge({name:}).compact)
66
+ res = execute(request: req)
67
+ LLM::Response.new(res)
68
+ end
69
+
70
+ ##
71
+ # Delete a vector store
72
+ # @param [String, #id] vector The ID of the vector store
73
+ # @raise (see LLM::Provider#request)
74
+ # @return [LLM::Response]
75
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/delete OpenAI docs
76
+ def delete(vector:)
77
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
78
+ req = Net::HTTP::Delete.new("/v1/vector_stores/#{vector_id}", headers)
79
+ res = execute(request: req)
80
+ LLM::Response.new(res)
81
+ end
82
+
83
+ ##
84
+ # Search a vector store
85
+ # @param [String, #id] vector The ID of the vector store
86
+ # @param query [String] The query to search for
87
+ # @param params [Hash] Other parameters (see OpenAI docs)
88
+ # @raise (see LLM::Provider#request)
89
+ # @return [LLM::Response]
90
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/search OpenAI docs
91
+ def search(vector:, query:, **params)
92
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
93
+ req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}/search", headers)
94
+ req.body = JSON.dump(params.merge({query:}).compact)
95
+ res = execute(request: req)
96
+ LLM::Response.new(res)
97
+ end
98
+
99
+ ##
100
+ # List all files in a vector store
101
+ # @param [String, #id] vector The ID of the vector store
102
+ # @param [Hash] params Other parameters (see OpenAI docs)
103
+ # @raise (see LLM::Provider#request)
104
+ # @return [LLM::Response]
105
+ # @see https://platform.openai.com/docs/api-reference/vector_stores_files/listFiles OpenAI docs
106
+ def all_files(vector:, **params)
107
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
108
+ query = URI.encode_www_form(params)
109
+ req = Net::HTTP::Get.new("/v1/vector_stores/#{vector_id}/files?#{query}", headers)
110
+ res = execute(request: req)
111
+ LLM::Response.new(res)
112
+ end
113
+
114
+ ##
115
+ # Add a file to a vector store
116
+ # @param [String, #id] vector The ID of the vector store
117
+ # @param [String, #id] file The ID of the file to add
118
+ # @param [Hash] attributes Attributes to associate with the file (optional)
119
+ # @param [Hash] params Other parameters (see OpenAI docs)
120
+ # @raise (see LLM::Provider#request)
121
+ # @return [LLM::Response]
122
+ # @see https://platform.openai.com/docs/api-reference/vector_stores_files/createFile OpenAI docs
123
+ def add_file(vector:, file:, attributes: nil, **params)
124
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
125
+ file_id = file.respond_to?(:id) ? file.id : file
126
+ req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}/files", headers)
127
+ req.body = JSON.dump(params.merge({file_id:, attributes:}).compact)
128
+ res = execute(request: req)
129
+ LLM::Response.new(res)
130
+ end
131
+ alias_method :create_file, :add_file
132
+
133
+ ##
134
+ # Update a file in a vector store
135
+ # @param [String, #id] vector The ID of the vector store
136
+ # @param [String, #id] file The ID of the file to update
137
+ # @param [Hash] attributes Attributes to associate with the file
138
+ # @param [Hash] params Other parameters (see OpenAI docs)
139
+ # @raise (see LLM::Provider#request)
140
+ # @return [LLM::Response]
141
+ # @see https://platform.openai.com/docs/api-reference/vector_stores_files/updateAttributes OpenAI docs
142
+ def update_file(vector:, file:, attributes:, **params)
143
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
144
+ file_id = file.respond_to?(:id) ? file.id : file
145
+ req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}/files/#{file_id}", headers)
146
+ req.body = JSON.dump(params.merge({attributes:}).compact)
147
+ res = execute(request: req)
148
+ LLM::Response.new(res)
149
+ end
150
+
151
+ ##
152
+ # Get a file from a vector store
153
+ # @param [String, #id] vector The ID of the vector store
154
+ # @param [String, #id] file The ID of the file to retrieve
155
+ # @raise (see LLM::Provider#request)
156
+ # @return [LLM::Response]
157
+ # @see https://platform.openai.com/docs/api-reference/vector_stores_files/getFile OpenAI docs
158
+ def get_file(vector:, file:, **params)
159
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
160
+ file_id = file.respond_to?(:id) ? file.id : file
161
+ query = URI.encode_www_form(params)
162
+ req = Net::HTTP::Get.new("/v1/vector_stores/#{vector_id}/files/#{file_id}?#{query}", headers)
163
+ res = execute(request: req)
164
+ LLM::Response.new(res)
165
+ end
166
+
167
+ ##
168
+ # Delete a file from a vector store
169
+ # @param [String, #id] vector The ID of the vector store
170
+ # @param [String, #id] file The ID of the file to delete
171
+ # @raise (see LLM::Provider#request)
172
+ # @return [LLM::Response]
173
+ # @see https://platform.openai.com/docs/api-reference/vector_stores_files/deleteFile OpenAI docs
174
+ def delete_file(vector:, file:)
175
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
176
+ file_id = file.respond_to?(:id) ? file.id : file
177
+ req = Net::HTTP::Delete.new("/v1/vector_stores/#{vector_id}/files/#{file_id}", headers)
178
+ res = execute(request: req)
179
+ LLM::Response.new(res)
180
+ end
181
+
182
+ private
183
+
184
+ [:headers, :execute, :set_body_stream].each do |m|
185
+ define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
186
+ end
187
+ end
188
+ end
@@ -3,18 +3,29 @@
3
3
  module LLM
4
4
  ##
5
5
  # The OpenAI class implements a provider for
6
- # [OpenAI](https://platform.openai.com/)
6
+ # [OpenAI](https://platform.openai.com/).
7
+ #
8
+ # @example
9
+ # #!/usr/bin/env ruby
10
+ # require "llm"
11
+ #
12
+ # llm = LLM.openai(key: ENV["KEY"])
13
+ # bot = LLM::Bot.new(llm)
14
+ # bot.chat ["Tell me about this photo", File.open("/images/capybara.jpg", "rb")]
15
+ # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
7
16
  class OpenAI < Provider
17
+ require_relative "openai/response/embedding"
18
+ require_relative "openai/response/completion"
8
19
  require_relative "openai/error_handler"
9
20
  require_relative "openai/format"
10
21
  require_relative "openai/stream_parser"
11
- require_relative "openai/response_parser"
12
22
  require_relative "openai/models"
13
23
  require_relative "openai/responses"
14
24
  require_relative "openai/images"
15
25
  require_relative "openai/audio"
16
26
  require_relative "openai/files"
17
27
  require_relative "openai/moderations"
28
+ require_relative "openai/vector_stores"
18
29
 
19
30
  include Format
20
31
 
@@ -38,7 +49,7 @@ module LLM
38
49
  req = Net::HTTP::Post.new("/v1/embeddings", headers)
39
50
  req.body = JSON.dump({input:, model:}.merge!(params))
40
51
  res = execute(request: req)
41
- Response::Embedding.new(res).extend(response_parser)
52
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Embedding)
42
53
  end
43
54
 
44
55
  ##
@@ -48,7 +59,7 @@ module LLM
48
59
  # @param params (see LLM::Provider#complete)
49
60
  # @example (see LLM::Provider#complete)
50
61
  # @raise (see LLM::Provider#request)
51
- # @raise [LLM::Error::PromptError]
62
+ # @raise [LLM::PromptError]
52
63
  # When given an object a provider does not understand
53
64
  # @return (see LLM::Provider#complete)
54
65
  def complete(prompt, params = {})
@@ -61,7 +72,7 @@ module LLM
61
72
  body = JSON.dump({messages: format(messages, :complete).flatten}.merge!(params))
62
73
  set_body_stream(req, StringIO.new(body))
63
74
  res = execute(request: req, stream:)
64
- Response::Completion.new(res).extend(response_parser)
75
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Completion)
65
76
  end
66
77
 
67
78
  ##
@@ -113,6 +124,14 @@ module LLM
113
124
  LLM::OpenAI::Moderations.new(self)
114
125
  end
115
126
 
127
+ ##
128
+ # Provides an interface to OpenAI's vector store API
129
+ # @see https://platform.openai.com/docs/api-reference/vector-stores/create OpenAI docs
130
+ # @return [LLM::OpenAI::VectorStore]
131
+ def vector_stores
132
+ LLM::OpenAI::VectorStores.new(self)
133
+ end
134
+
116
135
  ##
117
136
  # @return (see LLM::Provider#assistant_role)
118
137
  def assistant_role
@@ -136,10 +155,6 @@ module LLM
136
155
  )
137
156
  end
138
157
 
139
- def response_parser
140
- LLM::OpenAI::ResponseParser
141
- end
142
-
143
158
  def stream_parser
144
159
  LLM::OpenAI::StreamParser
145
160
  end
@@ -0,0 +1,58 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::XAI
4
+ ##
5
+ # The {LLM::XAI::Images LLM::XAI::Images} class provides an interface
6
+ # for [xAI's images API](https://docs.x.ai/docs/guides/image-generations).
7
+ # xAI supports multiple response formats: temporary URLs, or binary strings
8
+ # encoded in base64. The default is to return temporary URLs.
9
+ #
10
+ # @example Temporary URLs
11
+ # #!/usr/bin/env ruby
12
+ # require "llm"
13
+ # require "open-uri"
14
+ # require "fileutils"
15
+ #
16
+ # llm = LLM.xai(key: ENV["KEY"])
17
+ # res = llm.images.create prompt: "A dog on a rocket to the moon"
18
+ # FileUtils.mv OpenURI.open_uri(res.urls[0]).path,
19
+ # "rocket.png"
20
+ #
21
+ # @example Binary strings
22
+ # #!/usr/bin/env ruby
23
+ # require "llm"
24
+ #
25
+ # llm = LLM.xai(key: ENV["KEY"])
26
+ # res = llm.images.create prompt: "A dog on a rocket to the moon",
27
+ # response_format: "b64_json"
28
+ # IO.copy_stream res.images[0], "rocket.png"
29
+ class Images < LLM::OpenAI::Images
30
+ ##
31
+ # Create an image
32
+ # @example
33
+ # llm = LLM.xai(key: ENV["KEY"])
34
+ # res = llm.images.create prompt: "A dog on a rocket to the moon"
35
+ # res.urls.each { print _1, "\n"}
36
+ # @see https://docs.x.ai/docs/guides/image-generations xAI docs
37
+ # @param [String] prompt The prompt
38
+ # @param [String] model The model to use
39
+ # @param [Hash] params Other parameters (see xAI docs)
40
+ # @raise (see LLM::Provider#request)
41
+ # @return [LLM::Response]
42
+ def create(model: "grok-2-image-1212", **)
43
+ super
44
+ end
45
+
46
+ ##
47
+ # @raise [NotImplementedError]
48
+ def edit(model: "grok-2-image-1212", **)
49
+ raise NotImplementedError
50
+ end
51
+
52
+ ##
53
+ # @raise [NotImplementedError]
54
+ def create_variation(model: "grok-2-image-1212", **)
55
+ raise NotImplementedError
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,72 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "openai" unless defined?(LLM::OpenAI)
4
+
5
+ module LLM
6
+ ##
7
+ # The XAI class implements a provider for [xAI](https://docs.x.ai).
8
+ #
9
+ # @example
10
+ # #!/usr/bin/env ruby
11
+ # require "llm"
12
+ #
13
+ # llm = LLM.xai(key: ENV["KEY"])
14
+ # bot = LLM::Bot.new(llm)
15
+ # bot.chat ["Tell me about this photo", File.open("/images/crow.jpg", "rb")]
16
+ # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
17
+ class XAI < OpenAI
18
+ require_relative "xai/images"
19
+
20
+ ##
21
+ # @param [String] host A regional host or the default ("api.x.ai")
22
+ # @param key (see LLM::Provider#initialize)
23
+ # @see https://docs.x.ai/docs/key-information/regions Regional endpoints
24
+ def initialize(host: "api.x.ai", **)
25
+ super
26
+ end
27
+
28
+ ##
29
+ # @raise [NotImplementedError]
30
+ def files
31
+ raise NotImplementedError
32
+ end
33
+
34
+ ##
35
+ # @return [LLM::XAI::Images]
36
+ def images
37
+ LLM::XAI::Images.new(self)
38
+ end
39
+
40
+ ##
41
+ # @raise [NotImplementedError]
42
+ def audio
43
+ raise NotImplementedError
44
+ end
45
+
46
+ ##
47
+ # @raise [NotImplementedError]
48
+ def moderations
49
+ raise NotImplementedError
50
+ end
51
+
52
+ ##
53
+ # @raise [NotImplementedError]
54
+ def responses
55
+ raise NotImplementedError
56
+ end
57
+
58
+ ##
59
+ # @raise [NotImplementedError]
60
+ def vector_stores
61
+ raise NotImplementedError
62
+ end
63
+
64
+ ##
65
+ # Returns the default model for chat completions
66
+ # #see https://docs.x.ai/docs/models grok-4-0709
67
+ # @return [String]
68
+ def default_model
69
+ "grok-4-0709"
70
+ end
71
+ end
72
+ end
data/lib/llm/response.rb CHANGED
@@ -1,20 +1,18 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
+ ##
5
+ # {LLM::Response LLM::Response} encapsulates a response
6
+ # from an LLM provider. It is returned by all methods
7
+ # that make requests to a provider, and sometimes extended
8
+ # with provider-specific functionality.
4
9
  class Response
5
10
  require "json"
6
- require_relative "response/completion"
7
- require_relative "response/embedding"
8
- require_relative "response/respond"
9
- require_relative "response/image"
10
- require_relative "response/audio"
11
- require_relative "response/audio_transcription"
12
- require_relative "response/audio_translation"
13
- require_relative "response/file"
14
- require_relative "response/filelist"
15
- require_relative "response/download_file"
16
- require_relative "response/modellist"
17
- require_relative "response/moderationlist"
11
+
12
+ ##
13
+ # Returns the HTTP response
14
+ # @return [Net::HTTPResponse]
15
+ attr_reader :res
18
16
 
19
17
  ##
20
18
  # @param [Net::HTTPResponse] res
@@ -30,9 +28,40 @@ module LLM
30
28
  # @return [Hash, String]
31
29
  def body
32
30
  @body ||= case @res["content-type"]
33
- when %r|\Aapplication/json\s*| then JSON.parse(@res.body)
31
+ when %r|\Aapplication/json\s*| then LLM::Object.from_hash(JSON.parse(@res.body))
34
32
  else @res.body
35
33
  end
36
34
  end
35
+
36
+ ##
37
+ # Returns an inspection of the response object
38
+ # @return [String]
39
+ def inspect
40
+ "#<#{self.class.name}:0x#{object_id.to_s(16)} @body=#{body.inspect} @res=#{@res.inspect}>"
41
+ end
42
+
43
+ ##
44
+ # Returns true if the response is successful
45
+ # @return [Boolean]
46
+ def ok?
47
+ Net::HTTPSuccess === @res
48
+ end
49
+
50
+ ##
51
+ # Returns true if the response is from the Files API
52
+ # @return [Boolean]
53
+ def file?
54
+ false
55
+ end
56
+
57
+ private
58
+
59
+ def method_missing(m, *args, **kwargs, &b)
60
+ body.respond_to?(m) ? body[m.to_s] : super
61
+ end
62
+
63
+ def respond_to_missing?(m, include_private = false)
64
+ body.respond_to?(m) || super
65
+ end
37
66
  end
38
67
  end
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.10.1"
4
+ VERSION = "0.12.0"
5
5
  end
data/lib/llm.rb CHANGED
@@ -12,13 +12,12 @@ module LLM
12
12
  require_relative "llm/mime"
13
13
  require_relative "llm/multipart"
14
14
  require_relative "llm/file"
15
- require_relative "llm/model"
16
15
  require_relative "llm/provider"
17
16
  require_relative "llm/bot"
18
17
  require_relative "llm/buffer"
19
18
  require_relative "llm/function"
20
19
  require_relative "llm/eventstream"
21
- require_relative "llm/event_handler"
20
+ require_relative "llm/eventhandler"
22
21
 
23
22
  module_function
24
23
 
@@ -27,18 +26,9 @@ module LLM
27
26
  # @return (see LLM::Anthropic#initialize)
28
27
  def anthropic(**)
29
28
  require_relative "llm/providers/anthropic" unless defined?(LLM::Anthropic)
30
- require_relative "llm/providers/voyageai" unless defined?(LLM::VoyageAI)
31
29
  LLM::Anthropic.new(**)
32
30
  end
33
31
 
34
- ##
35
- # @param (see LLM::Provider#initialize)
36
- # @return (see LLM::VoyageAI#initialize)
37
- def voyageai(**)
38
- require_relative "llm/providers/voyageai" unless defined?(LLM::VoyageAI)
39
- LLM::VoyageAI.new(**)
40
- end
41
-
42
32
  ##
43
33
  # @param (see LLM::Provider#initialize)
44
34
  # @return (see LLM::Gemini#initialize)
@@ -79,6 +69,15 @@ module LLM
79
69
  LLM::OpenAI.new(**)
80
70
  end
81
71
 
72
+ ##
73
+ # @param key (see LLM::XAI#initialize)
74
+ # @param host (see LLM::XAI#initialize)
75
+ # @return (see LLM::XAI#initialize)
76
+ def xai(**)
77
+ require_relative "llm/providers/xai" unless defined?(LLM::XAI)
78
+ LLM::XAI.new(**)
79
+ end
80
+
82
81
  ##
83
82
  # Define a function
84
83
  # @example
@@ -87,8 +86,8 @@ module LLM
87
86
  # fn.params do |schema|
88
87
  # schema.object(command: schema.string.required)
89
88
  # end
90
- # fn.define do |params|
91
- # system(params.command)
89
+ # fn.define do |command:|
90
+ # system(command)
92
91
  # end
93
92
  # end
94
93
  # @param [Symbol] name The name of the function
data/llm.gemspec CHANGED
@@ -10,14 +10,14 @@ Gem::Specification.new do |spec|
10
10
 
11
11
  spec.summary = <<~SUMMARY
12
12
  llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
13
- includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. The
14
- toolkit includes full support for chat, streaming, tool calling, audio,
15
- images, files, and JSON Schema generation.
13
+ includes OpenAI, Gemini, Anthropic, xAI (grok), DeepSeek, Ollama, and
14
+ LlamaCpp. The toolkit includes full support for chat, streaming, tool calling,
15
+ audio, images, files, and JSON Schema generation.
16
16
  SUMMARY
17
17
 
18
18
  spec.description = spec.summary
19
19
  spec.homepage = "https://github.com/llmrb/llm"
20
- spec.license = "0BSDL"
20
+ spec.license = "0BSD"
21
21
  spec.required_ruby_version = ">= 3.2.0"
22
22
 
23
23
  spec.metadata["homepage_uri"] = spec.homepage
@@ -37,7 +37,7 @@ Gem::Specification.new do |spec|
37
37
  spec.add_development_dependency "test-cmd.rb", "~> 0.12.0"
38
38
  spec.add_development_dependency "rake", "~> 13.0"
39
39
  spec.add_development_dependency "rspec", "~> 3.0"
40
- spec.add_development_dependency "standard", "~> 1.40"
40
+ spec.add_development_dependency "standard", "~> 1.50"
41
41
  spec.add_development_dependency "vcr", "~> 6.0"
42
42
  spec.add_development_dependency "dotenv", "~> 2.8"
43
43
  end