llm.rb 0.7.1 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +40 -24
  3. data/lib/json/schema/array.rb +1 -1
  4. data/lib/llm/buffer.rb +2 -2
  5. data/lib/llm/chat/conversable.rb +2 -2
  6. data/lib/llm/error.rb +12 -4
  7. data/lib/llm/message.rb +1 -1
  8. data/lib/llm/model.rb +1 -1
  9. data/lib/llm/{core_ext/ostruct.rb → object/builder.rb} +8 -12
  10. data/lib/llm/object/kernel.rb +45 -0
  11. data/lib/llm/object.rb +71 -0
  12. data/lib/llm/provider.rb +21 -0
  13. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +2 -2
  14. data/lib/llm/providers/anthropic.rb +2 -2
  15. data/lib/llm/providers/deepseek/format/completion_format.rb +68 -0
  16. data/lib/llm/providers/deepseek/format.rb +28 -0
  17. data/lib/llm/providers/deepseek.rb +60 -0
  18. data/lib/llm/providers/gemini/files.rb +1 -1
  19. data/lib/llm/providers/gemini/response_parser/completion_parser.rb +2 -2
  20. data/lib/llm/providers/gemini.rb +2 -2
  21. data/lib/llm/providers/llamacpp.rb +16 -2
  22. data/lib/llm/providers/ollama/format/completion_format.rb +1 -1
  23. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +2 -2
  24. data/lib/llm/providers/ollama.rb +5 -5
  25. data/lib/llm/providers/openai/files.rb +3 -3
  26. data/lib/llm/providers/openai/format/moderation_format.rb +35 -0
  27. data/lib/llm/providers/openai/format.rb +3 -3
  28. data/lib/llm/providers/openai/moderations.rb +71 -0
  29. data/lib/llm/providers/openai/response_parser/completion_parser.rb +2 -2
  30. data/lib/llm/providers/openai/response_parser/respond_parser.rb +2 -2
  31. data/lib/llm/providers/openai/response_parser.rb +12 -0
  32. data/lib/llm/providers/openai/responses.rb +4 -4
  33. data/lib/llm/providers/openai.rb +13 -2
  34. data/lib/llm/response/filelist.rb +1 -1
  35. data/lib/llm/response/image.rb +1 -1
  36. data/lib/llm/response/modellist.rb +1 -1
  37. data/lib/llm/response/moderationlist/moderation.rb +47 -0
  38. data/lib/llm/response/moderationlist.rb +51 -0
  39. data/lib/llm/response.rb +1 -0
  40. data/lib/llm/version.rb +1 -1
  41. data/lib/llm.rb +9 -2
  42. data/llm.gemspec +1 -1
  43. metadata +18 -9
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require_relative "openai" unless defined?(LLM::OpenAI)
4
+
3
5
  module LLM
4
6
  ##
5
7
  # The LlamaCpp class implements a provider for
@@ -32,12 +34,24 @@ module LLM
32
34
  raise NotImplementedError
33
35
  end
34
36
 
37
+ ##
38
+ # @raise [NotImplementedError]
39
+ def moderations
40
+ raise NotImplementedError
41
+ end
42
+
43
+ ##
44
+ # @raise [NotImplementedError]
45
+ def responses
46
+ raise NotImplementedError
47
+ end
48
+
35
49
  ##
36
50
  # Returns the default model for chat completions
37
- # @see https://ollama.com/library llama3.2
51
+ # @see https://ollama.com/library/qwen3 qwen3
38
52
  # @return [String]
39
53
  def default_model
40
- "llama3.2"
54
+ "qwen3"
41
55
  end
42
56
  end
43
57
  end
@@ -63,7 +63,7 @@ module LLM::Ollama::Format
63
63
  elsif returns.any?
64
64
  returns.map { {role: "tool", tool_call_id: _1.id, content: JSON.dump(_1.value)} }
65
65
  else
66
- [{role: message.role, content: content.flat_map { format_content(_1) }}]
66
+ content.flat_map { {role: message.role }.merge(format_content(_1)) }
67
67
  end
68
68
  end
69
69
 
@@ -5,7 +5,7 @@ module LLM::Ollama::ResponseParser
5
5
  # @private
6
6
  class CompletionParser
7
7
  def initialize(body)
8
- @body = OpenStruct.from_hash(body)
8
+ @body = LLM::Object.from_hash(body)
9
9
  end
10
10
 
11
11
  def format(response)
@@ -29,7 +29,7 @@ module LLM::Ollama::ResponseParser
29
29
  return [] unless tools
30
30
  tools.filter_map do |tool|
31
31
  next unless tool["function"]
32
- OpenStruct.new(tool["function"])
32
+ LLM::Object.new(tool["function"])
33
33
  end
34
34
  end
35
35
 
@@ -40,7 +40,7 @@ module LLM
40
40
  # @param params (see LLM::Provider#embed)
41
41
  # @raise (see LLM::Provider#request)
42
42
  # @return (see LLM::Provider#embed)
43
- def embed(input, model: "llama3.2", **params)
43
+ def embed(input, model: default_model, **params)
44
44
  params = {model:}.merge!(params)
45
45
  req = Net::HTTP::Post.new("/v1/embeddings", headers)
46
46
  req.body = JSON.dump({input:}.merge!(params))
@@ -86,19 +86,19 @@ module LLM
86
86
 
87
87
  ##
88
88
  # Returns the default model for chat completions
89
- # @see https://ollama.com/library llama3.2
89
+ # @see https://ollama.com/library/qwen3 qwen3
90
90
  # @return [String]
91
91
  def default_model
92
- "llama3.2"
92
+ "qwen3:latest"
93
93
  end
94
94
 
95
95
  private
96
96
 
97
97
  def headers
98
- {
98
+ (@headers || {}).merge(
99
99
  "Content-Type" => "application/json",
100
100
  "Authorization" => "Bearer #{@key}"
101
- }
101
+ )
102
102
  end
103
103
 
104
104
  def response_parser
@@ -53,7 +53,7 @@ class LLM::OpenAI
53
53
  req = Net::HTTP::Get.new("/v1/files?#{query}", headers)
54
54
  res = request(http, req)
55
55
  LLM::Response::FileList.new(res).tap { |filelist|
56
- files = filelist.body["data"].map { OpenStruct.from_hash(_1) }
56
+ files = filelist.body["data"].map { LLM::Object.from_hash(_1) }
57
57
  filelist.files = files
58
58
  }
59
59
  end
@@ -127,12 +127,12 @@ class LLM::OpenAI
127
127
  # @see https://platform.openai.com/docs/api-reference/files/delete OpenAI docs
128
128
  # @param [#id, #to_s] file The file ID
129
129
  # @raise (see LLM::Provider#request)
130
- # @return [OpenStruct] Response body
130
+ # @return [LLM::Object] Response body
131
131
  def delete(file:)
132
132
  file_id = file.respond_to?(:id) ? file.id : file
133
133
  req = Net::HTTP::Delete.new("/v1/files/#{file_id}", headers)
134
134
  res = request(http, req)
135
- OpenStruct.from_hash JSON.parse(res.body)
135
+ LLM::Object.from_hash JSON.parse(res.body)
136
136
  end
137
137
 
138
138
  private
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::Format
4
+ ##
5
+ # @private
6
+ class ModerationFormat
7
+ ##
8
+ # @param [String, URI, Array<String, URI>] inputs
9
+ # The inputs to format
10
+ # @return [LLM::OpenAI::Format::ModerationFormat]
11
+ def initialize(inputs)
12
+ @inputs = inputs
13
+ end
14
+
15
+ ##
16
+ # Formats the inputs for the OpenAI moderations API
17
+ # @return [Array<Hash>]
18
+ def format
19
+ [*inputs].flat_map do |input|
20
+ if String === input
21
+ {type: :text, text: input}
22
+ elsif URI === input
23
+ {type: :image_url, url: input.to_s}
24
+ else
25
+ raise LLM::Error::FormatError, "The given object (an instance of #{input.class}) " \
26
+ "is not supported by OpenAI moderations API"
27
+ end
28
+ end
29
+ end
30
+
31
+ private
32
+
33
+ attr_reader :inputs
34
+ end
35
+ end
@@ -6,6 +6,7 @@ class LLM::OpenAI
6
6
  module Format
7
7
  require_relative "format/completion_format"
8
8
  require_relative "format/respond_format"
9
+ require_relative "format/moderation_format"
9
10
 
10
11
  ##
11
12
  # @param [Array<LLM::Message>] messages
@@ -43,9 +44,8 @@ class LLM::OpenAI
43
44
  # @param [Hash] params
44
45
  # @return [Hash]
45
46
  def format_tools(params)
46
- return {} unless params and params[:tools]&.any?
47
- tools = params[:tools]
48
- {tools: tools.map { _1.format(self) }}
47
+ tools = params.delete(:tools)
48
+ (tools.nil? || tools.empty?) ? {} : {tools: tools.map { _1.format(self) }}
49
49
  end
50
50
  end
51
51
  end
@@ -0,0 +1,71 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::OpenAI
4
+ ##
5
+ # The {LLM::OpenAI::Moderations LLM::OpenAI::Moderations} class provides a moderations
6
+ # object for interacting with [OpenAI's moderations API](https://platform.openai.com/docs/api-reference/moderations).
7
+ # The moderations API can categorize content into different categories, such as
8
+ # hate speech, self-harm, and sexual content. It can also provide a confidence score
9
+ # for each category.
10
+ #
11
+ # @example
12
+ # #!/usr/bin/env ruby
13
+ # require "llm"
14
+ #
15
+ # llm = LLM.openai(key: ENV["KEY"])
16
+ # mod = llm.moderations.create input: "I hate you"
17
+ # print "categories: #{mod.categories}", "\n"
18
+ # print "scores: #{mod.scores}", "\n"
19
+ #
20
+ # @example
21
+ # #!/usr/bin/env ruby
22
+ # require "llm"
23
+ #
24
+ # llm = LLM.openai(key: ENV["KEY"])
25
+ # mod = llm.moderations.create input: URI.parse("https://example.com/image.png")
26
+ # print "categories: #{mod.categories}", "\n"
27
+ # print "scores: #{mod.scores}", "\n"
28
+ #
29
+ # @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
30
+ # @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
31
+ class Moderations
32
+ ##
33
+ # Returns a new Moderations object
34
+ # @param [LLM::Provider] provider
35
+ # @return [LLM::OpenAI::Moderations]
36
+ def initialize(provider)
37
+ @provider = provider
38
+ end
39
+
40
+ ##
41
+ # Create a moderation
42
+ # @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
43
+ # @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
44
+ # @note
45
+ # Although OpenAI mentions an array as a valid input, and that it can return one
46
+ # or more moderations, in practice the API only returns one moderation object. We
47
+ # recommend using a single input string or URI, and to keep in mind that llm.rb
48
+ # returns a Moderation object but has code in place to return multiple objects in
49
+ # the future (in case OpenAI documentation ever matches the actual API).
50
+ # @param [String, URI, Array<String, URI>] input
51
+ # @param [String, LLM::Model] model The model to use
52
+ # @return [LLM::Response::ModerationList::Moderation]
53
+ def create(input:, model: "omni-moderation-latest", **params)
54
+ req = Net::HTTP::Post.new("/v1/moderations", headers)
55
+ input = Format::ModerationFormat.new(input).format
56
+ req.body = JSON.dump({input:, model:}.merge!(params))
57
+ res = request(http, req)
58
+ LLM::Response::ModerationList.new(res).extend(response_parser).first
59
+ end
60
+
61
+ private
62
+
63
+ def http
64
+ @provider.instance_variable_get(:@http)
65
+ end
66
+
67
+ [:response_parser, :headers, :request].each do |m|
68
+ define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
69
+ end
70
+ end
71
+ end
@@ -5,7 +5,7 @@ module LLM::OpenAI::ResponseParser
5
5
  # @private
6
6
  class CompletionParser
7
7
  def initialize(body)
8
- @body = OpenStruct.from_hash(body)
8
+ @body = LLM::Object.from_hash(body)
9
9
  end
10
10
 
11
11
  def format(response)
@@ -41,7 +41,7 @@ module LLM::OpenAI::ResponseParser
41
41
  name: tool.function.name,
42
42
  arguments: JSON.parse(tool.function.arguments)
43
43
  }
44
- OpenStruct.new(tool)
44
+ LLM::Object.new(tool)
45
45
  end
46
46
  end
47
47
 
@@ -5,7 +5,7 @@ module LLM::OpenAI::ResponseParser
5
5
  # @private
6
6
  class RespondParser
7
7
  def initialize(body)
8
- @body = OpenStruct.from_hash(body)
8
+ @body = LLM::Object.from_hash(body)
9
9
  end
10
10
 
11
11
  def format(response)
@@ -37,7 +37,7 @@ module LLM::OpenAI::ResponseParser
37
37
  end
38
38
 
39
39
  def format_tool(tool)
40
- OpenStruct.new(
40
+ LLM::Object.new(
41
41
  id: tool.call_id,
42
42
  name: tool.name,
43
43
  arguments: JSON.parse(tool.arguments)
@@ -20,6 +20,18 @@ class LLM::OpenAI
20
20
  RespondParser.new(body).format(self)
21
21
  end
22
22
 
23
+ ##
24
+ # @param [Hash] body
25
+ # The response body from the LLM provider
26
+ # @return [Hash]
27
+ def parse_moderation_list(body)
28
+ {
29
+ id: body["id"],
30
+ model: body["model"],
31
+ moderations: body["results"].map { LLM::Response::ModerationList::Moderation.new(_1) }
32
+ }
33
+ end
34
+
23
35
  ##
24
36
  # @param [Hash] body
25
37
  # The response body from the LLM provider
@@ -13,8 +13,8 @@ class LLM::OpenAI
13
13
  # require "llm"
14
14
  #
15
15
  # llm = LLM.openai(ENV["KEY"])
16
- # res1 = llm.responses.create "Your task is to help me with math", :developer
17
- # res2 = llm.responses.create "5 + 5 = ?", :user, previous_response_id: res1.id
16
+ # res1 = llm.responses.create "Your task is to help me with math", role: :developer
17
+ # res2 = llm.responses.create "5 + 5 = ?", role: :user, previous_response_id: res1.id
18
18
  # [res1,res2].each { llm.responses.delete(_1) }
19
19
  # @example
20
20
  # #!/usr/bin/env ruby
@@ -81,12 +81,12 @@ class LLM::OpenAI
81
81
  # @see https://platform.openai.com/docs/api-reference/responses/delete OpenAI docs
82
82
  # @param [#id, #to_s] response Response ID
83
83
  # @raise (see LLM::Provider#request)
84
- # @return [OpenStruct] Response body
84
+ # @return [LLM::Object] Response body
85
85
  def delete(response)
86
86
  response_id = response.respond_to?(:id) ? response.id : response
87
87
  req = Net::HTTP::Delete.new("/v1/responses/#{response_id}", headers)
88
88
  res = request(http, req)
89
- OpenStruct.from_hash JSON.parse(res.body)
89
+ LLM::Object.from_hash JSON.parse(res.body)
90
90
  end
91
91
 
92
92
  private
@@ -15,6 +15,8 @@ module LLM
15
15
  require_relative "openai/audio"
16
16
  require_relative "openai/files"
17
17
  require_relative "openai/models"
18
+ require_relative "openai/moderations"
19
+
18
20
  include Format
19
21
 
20
22
  HOST = "api.openai.com"
@@ -102,6 +104,15 @@ module LLM
102
104
  LLM::OpenAI::Models.new(self)
103
105
  end
104
106
 
107
+ ##
108
+ # Provides an interface to OpenAI's moderation API
109
+ # @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
110
+ # @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
111
+ # @return [LLM::OpenAI::Moderations]
112
+ def moderations
113
+ LLM::OpenAI::Moderations.new(self)
114
+ end
115
+
105
116
  ##
106
117
  # @return (see LLM::Provider#assistant_role)
107
118
  def assistant_role
@@ -119,10 +130,10 @@ module LLM
119
130
  private
120
131
 
121
132
  def headers
122
- {
133
+ (@headers || {}).merge(
123
134
  "Content-Type" => "application/json",
124
135
  "Authorization" => "Bearer #{@key}"
125
- }
136
+ )
126
137
  end
127
138
 
128
139
  def response_parser
@@ -5,7 +5,7 @@ module LLM
5
5
  # The {LLM::Response::FileList LLM::Response::FileList} class represents a
6
6
  # list of file objects that are returned by a provider. It is an Enumerable
7
7
  # object, and can be used to iterate over the file objects in a way that is
8
- # similar to an array. Each element is an instance of OpenStruct.
8
+ # similar to an array. Each element is an instance of LLM::Object.
9
9
  class Response::FileList < Response
10
10
  include Enumerable
11
11
 
@@ -8,7 +8,7 @@ module LLM
8
8
  class Response::Image < Response
9
9
  ##
10
10
  # Returns one or more image objects, or nil
11
- # @return [Array<OpenStruct>, nil]
11
+ # @return [Array<LLM::Object>, nil]
12
12
  def images
13
13
  parsed[:images].any? ? parsed[:images] : nil
14
14
  end
@@ -5,7 +5,7 @@ module LLM
5
5
  # The {LLM::Response::ModelList LLM::Response::ModelList} class represents a
6
6
  # list of model objects that are returned by a provider. It is an Enumerable
7
7
  # object, and can be used to iterate over the model objects in a way that is
8
- # similar to an array. Each element is an instance of OpenStruct.
8
+ # similar to an array. Each element is an instance of LLM::Object.
9
9
  class Response::ModelList < Response
10
10
  include Enumerable
11
11
 
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Response::ModerationList
4
+ ##
5
+ # The {LLM::Response::ModerationList::Moderation Moderation}
6
+ # class represents a moderation object that is returned by
7
+ # the moderations interface.
8
+ # @see LLM::Response::ModerationList
9
+ # @see LLM::OpenAI::Moderations
10
+ class Moderation
11
+ ##
12
+ # @param [Hash] moderation
13
+ # @return [LLM::Response::ModerationList::Moderation]
14
+ def initialize(moderation)
15
+ @moderation = moderation
16
+ end
17
+
18
+ ##
19
+ # Returns true if the moderation is flagged
20
+ # @return [Boolean]
21
+ def flagged?
22
+ @moderation["flagged"]
23
+ end
24
+
25
+ ##
26
+ # Returns the moderation categories
27
+ # @return [Array<String>]
28
+ def categories
29
+ @moderation["categories"].filter_map { _2 ? _1 : nil }
30
+ end
31
+
32
+ ##
33
+ # Returns the moderation scores
34
+ # @return [Hash]
35
+ def scores
36
+ @moderation["category_scores"].select { categories.include?(_1) }
37
+ end
38
+
39
+ ##
40
+ # @return [String]
41
+ def inspect
42
+ "#<#{self.class}:0x#{object_id.to_s(16)} " \
43
+ "categories=#{categories} " \
44
+ "scores=#{scores}>"
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,51 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The {LLM::Response::ModerationList LLM::Response::ModerationList} class
6
+ # represents a response from the moderations API. It is an Enumerable that
7
+ # yields an instance of {LLM::Response::ModerationList::Moderation LLM::Response::ModerationList::Moderation},
8
+ # and each moderation object contains the categories and scores for a given
9
+ # input.
10
+ # @see LLM::OpenAI::Moderations LLM::OpenAI::Moderations
11
+ class Response::ModerationList < Response
12
+ require_relative "moderationlist/moderation"
13
+ include Enumerable
14
+
15
+ ##
16
+ # Returns the moderation ID
17
+ # @return [String]
18
+ def id
19
+ parsed[:id]
20
+ end
21
+
22
+ ##
23
+ # Returns the moderation model
24
+ # @return [String]
25
+ def model
26
+ parsed[:model]
27
+ end
28
+
29
+ ##
30
+ # Yields each moderation object
31
+ # @yieldparam [OpenStruct] moderation
32
+ # @yieldreturn [void]
33
+ # @return [void]
34
+ def each(&)
35
+ moderations.each(&)
36
+ end
37
+
38
+ private
39
+
40
+ def parsed
41
+ @parsed ||= parse_moderation_list(body)
42
+ end
43
+
44
+ ##
45
+ # Returns an array of moderation objects
46
+ # @return [Array<OpenStruct>]
47
+ def moderations
48
+ parsed[:moderations]
49
+ end
50
+ end
51
+ end
data/lib/llm/response.rb CHANGED
@@ -14,6 +14,7 @@ module LLM
14
14
  require_relative "response/filelist"
15
15
  require_relative "response/download_file"
16
16
  require_relative "response/modellist"
17
+ require_relative "response/moderationlist"
17
18
 
18
19
  ##
19
20
  # @param [Net::HTTPResponse] res
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.7.1"
4
+ VERSION = "0.8.0"
5
5
  end
data/lib/llm.rb CHANGED
@@ -3,7 +3,7 @@
3
3
  module LLM
4
4
  require "stringio"
5
5
  require_relative "json/schema"
6
- require_relative "llm/core_ext/ostruct"
6
+ require_relative "llm/object"
7
7
  require_relative "llm/version"
8
8
  require_relative "llm/utils"
9
9
  require_relative "llm/error"
@@ -57,11 +57,18 @@ module LLM
57
57
  # @param key (see LLM::Provider#initialize)
58
58
  # @return (see LLM::LlamaCpp#initialize)
59
59
  def llamacpp(key: nil, **)
60
- require_relative "llm/providers/openai" unless defined?(LLM::OpenAI)
61
60
  require_relative "llm/providers/llamacpp" unless defined?(LLM::LlamaCpp)
62
61
  LLM::LlamaCpp.new(key:, **)
63
62
  end
64
63
 
64
+ ##
65
+ # @param key (see LLM::Provider#initialize)
66
+ # @return (see LLM::DeepSeek#initialize)
67
+ def deepseek(**)
68
+ require_relative "llm/providers/deepseek" unless defined?(LLM::DeepSeek)
69
+ LLM::DeepSeek.new(**)
70
+ end
71
+
65
72
  ##
66
73
  # @param key (see LLM::Provider#initialize)
67
74
  # @return (see LLM::OpenAI#initialize)
data/llm.gemspec CHANGED
@@ -10,7 +10,7 @@ Gem::Specification.new do |spec|
10
10
 
11
11
  spec.summary = "llm.rb is a zero-dependency Ruby toolkit for " \
12
12
  "Large Language Models that includes OpenAI, Gemini, " \
13
- "Anthropic, Ollama, and LlamaCpp. It’s fast, simple " \
13
+ "Anthropic, DeepSeek, Ollama, and LlamaCpp. It’s fast, simple " \
14
14
  "and composable – with full support for chat, tool calling, audio, " \
15
15
  "images, files, and JSON Schema generation."
16
16
  spec.description = spec.summary
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.1
4
+ version: 0.8.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2025-05-11 00:00:00.000000000 Z
12
+ date: 2025-05-17 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: webmock
@@ -152,9 +152,9 @@ dependencies:
152
152
  - !ruby/object:Gem::Version
153
153
  version: '2.8'
154
154
  description: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
155
- includes OpenAI, Gemini, Anthropic, Ollama, and LlamaCpp. It’s fast, simple and
156
- composable – with full support for chat, tool calling, audio, images, files, and
157
- JSON Schema generation.
155
+ includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. It’s fast, simple
156
+ and composable – with full support for chat, tool calling, audio, images, files,
157
+ and JSON Schema generation.
158
158
  email:
159
159
  - azantar@proton.me
160
160
  - 0x1eef@proton.me
@@ -181,7 +181,6 @@ files:
181
181
  - lib/llm/chat/conversable.rb
182
182
  - lib/llm/chat/prompt/completion.rb
183
183
  - lib/llm/chat/prompt/respond.rb
184
- - lib/llm/core_ext/ostruct.rb
185
184
  - lib/llm/error.rb
186
185
  - lib/llm/file.rb
187
186
  - lib/llm/function.rb
@@ -189,6 +188,9 @@ files:
189
188
  - lib/llm/mime.rb
190
189
  - lib/llm/model.rb
191
190
  - lib/llm/multipart.rb
191
+ - lib/llm/object.rb
192
+ - lib/llm/object/builder.rb
193
+ - lib/llm/object/kernel.rb
192
194
  - lib/llm/provider.rb
193
195
  - lib/llm/providers/anthropic.rb
194
196
  - lib/llm/providers/anthropic/error_handler.rb
@@ -197,6 +199,9 @@ files:
197
199
  - lib/llm/providers/anthropic/models.rb
198
200
  - lib/llm/providers/anthropic/response_parser.rb
199
201
  - lib/llm/providers/anthropic/response_parser/completion_parser.rb
202
+ - lib/llm/providers/deepseek.rb
203
+ - lib/llm/providers/deepseek/format.rb
204
+ - lib/llm/providers/deepseek/format/completion_format.rb
200
205
  - lib/llm/providers/gemini.rb
201
206
  - lib/llm/providers/gemini/audio.rb
202
207
  - lib/llm/providers/gemini/error_handler.rb
@@ -221,9 +226,11 @@ files:
221
226
  - lib/llm/providers/openai/files.rb
222
227
  - lib/llm/providers/openai/format.rb
223
228
  - lib/llm/providers/openai/format/completion_format.rb
229
+ - lib/llm/providers/openai/format/moderation_format.rb
224
230
  - lib/llm/providers/openai/format/respond_format.rb
225
231
  - lib/llm/providers/openai/images.rb
226
232
  - lib/llm/providers/openai/models.rb
233
+ - lib/llm/providers/openai/moderations.rb
227
234
  - lib/llm/providers/openai/response_parser.rb
228
235
  - lib/llm/providers/openai/response_parser/completion_parser.rb
229
236
  - lib/llm/providers/openai/response_parser/respond_parser.rb
@@ -242,6 +249,8 @@ files:
242
249
  - lib/llm/response/filelist.rb
243
250
  - lib/llm/response/image.rb
244
251
  - lib/llm/response/modellist.rb
252
+ - lib/llm/response/moderationlist.rb
253
+ - lib/llm/response/moderationlist/moderation.rb
245
254
  - lib/llm/response/respond.rb
246
255
  - lib/llm/utils.rb
247
256
  - lib/llm/version.rb
@@ -271,7 +280,7 @@ rubygems_version: 3.5.23
271
280
  signing_key:
272
281
  specification_version: 4
273
282
  summary: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that includes
274
- OpenAI, Gemini, Anthropic, Ollama, and LlamaCpp. It’s fast, simple and composable
275
- – with full support for chat, tool calling, audio, images, files, and JSON Schema
276
- generation.
283
+ OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. It’s fast, simple and
284
+ composable – with full support for chat, tool calling, audio, images, files, and
285
+ JSON Schema generation.
277
286
  test_files: []