llm.rb 0.7.2 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +40 -24
  3. data/lib/json/schema/array.rb +1 -1
  4. data/lib/llm/buffer.rb +2 -2
  5. data/lib/llm/chat/conversable.rb +2 -2
  6. data/lib/llm/error.rb +12 -4
  7. data/lib/llm/message.rb +1 -1
  8. data/lib/llm/model.rb +1 -1
  9. data/lib/llm/{core_ext/ostruct.rb → object/builder.rb} +8 -12
  10. data/lib/llm/object/kernel.rb +45 -0
  11. data/lib/llm/object.rb +71 -0
  12. data/lib/llm/provider.rb +7 -0
  13. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +2 -2
  14. data/lib/llm/providers/deepseek/format/completion_format.rb +68 -0
  15. data/lib/llm/providers/deepseek/format.rb +28 -0
  16. data/lib/llm/providers/deepseek.rb +60 -0
  17. data/lib/llm/providers/gemini/files.rb +1 -1
  18. data/lib/llm/providers/gemini/response_parser/completion_parser.rb +2 -2
  19. data/lib/llm/providers/llamacpp.rb +16 -2
  20. data/lib/llm/providers/ollama/format/completion_format.rb +1 -1
  21. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +2 -2
  22. data/lib/llm/providers/ollama.rb +3 -3
  23. data/lib/llm/providers/openai/files.rb +3 -3
  24. data/lib/llm/providers/openai/format/moderation_format.rb +35 -0
  25. data/lib/llm/providers/openai/format.rb +3 -3
  26. data/lib/llm/providers/openai/moderations.rb +71 -0
  27. data/lib/llm/providers/openai/response_parser/completion_parser.rb +2 -2
  28. data/lib/llm/providers/openai/response_parser/respond_parser.rb +2 -2
  29. data/lib/llm/providers/openai/response_parser.rb +12 -0
  30. data/lib/llm/providers/openai/responses.rb +4 -4
  31. data/lib/llm/providers/openai.rb +11 -0
  32. data/lib/llm/response/filelist.rb +1 -1
  33. data/lib/llm/response/image.rb +1 -1
  34. data/lib/llm/response/modellist.rb +1 -1
  35. data/lib/llm/response/moderationlist/moderation.rb +47 -0
  36. data/lib/llm/response/moderationlist.rb +51 -0
  37. data/lib/llm/response.rb +1 -0
  38. data/lib/llm/version.rb +1 -1
  39. data/lib/llm.rb +9 -2
  40. data/llm.gemspec +1 -1
  41. metadata +18 -9
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2c8c175d5b640f4d04e114569781bb2cad10b17f76489f4eaecd2ea7963c2baf
4
- data.tar.gz: b00e4c4b7bf9211157e66f1ac015540ecec86b1f4d6d8b3bcaa46068ac97fbaf
3
+ metadata.gz: 9b4e83ac151c51faaa4a1e275058091a9ce6f61c3dc10e879a6215b0f1498aad
4
+ data.tar.gz: f78b7bbeaece69384d6b38014e9d1d99816195d8536a310a25d2a23479dda122
5
5
  SHA512:
6
- metadata.gz: e1d27b0fa9a59ec4baf55e7863675c71827c66af091f4b4d2db268e08d83709bbb87c23175c0dda8e2583ced40a2c76bafcba95f118ec64e6f44245878e668dd
7
- data.tar.gz: 0201f576e4180dfef2ace9b699711eb013ac4556baf2bc9a584060000ec94fb7f764d1bb1e0d9d8187f76deeea6761e2323f661974e5e93fb50a29e0a62207d8
6
+ metadata.gz: e117602fae5643713a159d633201cd88e94a339763710bbb788b3b1439e39bbbff9f2c221975fc58e1b57aabdf8d0d935d69dbc6acbece84e98701e129cf3c3d
7
+ data.tar.gz: 79f2ef053bf500ba9e5ab76c62abdb69ab93ba43b2f11fce867d008e64fbe09e154c1a30fbfdeb08ae30a5442b5d7e5876aa42c788dce8d0778c786f0a69adee
data/README.md CHANGED
@@ -1,9 +1,9 @@
1
1
  ## About
2
2
 
3
3
  llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
4
- includes OpenAI, Gemini, Anthropic, Ollama, and LlamaCpp. It’s fast, simple
5
- and composable – with full support for chat, tool calling, audio,
6
- images, files, and JSON Schema generation.
4
+ includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp.
5
+ It's fast, simple and composable – with full support for chat,
6
+ tool calling, audio, images, files, and JSON Schema generation.
7
7
 
8
8
  ## Features
9
9
 
@@ -11,7 +11,6 @@ images, files, and JSON Schema generation.
11
11
  - ✅ A single unified interface for multiple providers
12
12
  - 📦 Zero dependencies outside Ruby's standard library
13
13
  - 🚀 Optimized for performance and low memory usage
14
- - 🔌 Retrieve models dynamically for introspection and selection
15
14
 
16
15
  #### Chat, Agents
17
16
  - 🧠 Stateless and stateful chat via completions and responses API
@@ -24,8 +23,9 @@ images, files, and JSON Schema generation.
24
23
  - 📎 File uploads and prompt-aware file interaction
25
24
  - 💡 Multimodal prompts (text, images, PDFs, URLs, files)
26
25
 
27
- #### Embeddings
26
+ #### Miscellaneous
28
27
  - 🧮 Text embeddings and vector support
28
+ - 🔌 Retrieve models dynamically for introspection and selection
29
29
 
30
30
  ## Demos
31
31
 
@@ -35,13 +35,13 @@ images, files, and JSON Schema generation.
35
35
  </details>
36
36
 
37
37
  <details>
38
- <summary><b>2. Files: import at boot time</b></summary>
39
- <img src="share/llm-shell/examples/files-boottime.gif">
38
+ <summary><b>2. Files: import at runtime</b></summary>
39
+ <img src="share/llm-shell/examples/files-runtime.gif">
40
40
  </details>
41
41
 
42
42
  <details>
43
- <summary><b>3. Files: import at runtime</b></summary>
44
- <img src="share/llm-shell/examples/files-runtime.gif">
43
+ <summary><b>3. Files: import at boot time</b></summary>
44
+ <img src="share/llm-shell/examples/files-boottime.gif">
45
45
  </details>
46
46
 
47
47
  ## Examples
@@ -59,12 +59,18 @@ using an API key (if required) and an optional set of configuration options via
59
59
  #!/usr/bin/env ruby
60
60
  require "llm"
61
61
 
62
+ ##
63
+ # cloud providers
62
64
  llm = LLM.openai(key: "yourapikey")
63
65
  llm = LLM.gemini(key: "yourapikey")
64
66
  llm = LLM.anthropic(key: "yourapikey")
67
+ llm = LLM.deepseek(key: "yourapikey")
68
+ llm = LLM.voyageai(key: "yourapikey")
69
+
70
+ ##
71
+ # local providers
65
72
  llm = LLM.ollama(key: nil)
66
73
  llm = LLM.llamacpp(key: nil)
67
- llm = LLM.voyageai(key: "yourapikey")
68
74
  ```
69
75
 
70
76
  ### Conversations
@@ -73,7 +79,7 @@ llm = LLM.voyageai(key: "yourapikey")
73
79
 
74
80
  > This example uses the stateless chat completions API that all
75
81
  > providers support. A similar example for OpenAI's stateful
76
- > responses API is available in the [docs/](docs/OPENAI_RESPONSES.md)
82
+ > responses API is available in the [docs/](docs/OPENAI.md)
77
83
  > directory.
78
84
 
79
85
  The following example enables lazy mode for a
@@ -121,11 +127,12 @@ msgs.each { print "[#{_1.role}] ", _1.content, "\n" }
121
127
 
122
128
  #### Structured
123
129
 
124
- All LLM providers except Anthropic allow a client to describe the structure
125
- of a response that a LLM emits according to a schema that is described by JSON.
126
- The schema lets a client describe what JSON object (or value) an LLM should emit,
127
- and the LLM will abide by the schema. See also: [JSON Schema website](https://json-schema.org/overview/what-is-jsonschema).
128
- We will use the
130
+ All LLM providers except Anthropic and DeepSeek allow a client to describe
131
+ the structure of a response that a LLM emits according to a schema that is
132
+ described by JSON. The schema lets a client describe what JSON object (or value)
133
+ an LLM should emit, and the LLM will abide by the schema.
134
+ See also: [JSON Schema website](https://json-schema.org/overview/what-is-jsonschema).
135
+ We will use the
129
136
  [llmrb/json-schema](https://github.com/llmrb/json-schema)
130
137
  library for the sake of the examples &ndash; the interface is designed so you
131
138
  could drop in any other library in its place:
@@ -134,22 +141,31 @@ could drop in any other library in its place:
134
141
  #!/usr/bin/env ruby
135
142
  require "llm"
136
143
 
144
+ ##
145
+ # Objects
137
146
  llm = LLM.openai(key: ENV["KEY"])
138
- schema = llm.schema.object({fruit: llm.schema.string.enum("Apple", "Orange", "Pineapple")})
147
+ schema = llm.schema.object(answer: llm.schema.integer.required)
148
+ bot = LLM::Chat.new(llm, schema:).lazy
149
+ bot.chat "Does the earth orbit the sun?", role: :user
150
+ bot.messages.find(&:assistant?).content! # => {probability: 1}
151
+
152
+ ##
153
+ # Enums
154
+ schema = llm.schema.object(fruit: llm.schema.string.enum("Apple", "Orange", "Pineapple"))
139
155
  bot = LLM::Chat.new(llm, schema:).lazy
140
156
  bot.chat "Your favorite fruit is Pineapple", role: :system
141
157
  bot.chat "What fruit is your favorite?", role: :user
142
158
  bot.messages.find(&:assistant?).content! # => {fruit: "Pineapple"}
143
159
 
144
- schema = llm.schema.object({answer: llm.schema.integer.required})
160
+ ##
161
+ # Arrays
162
+ schema = llm.schema.object(answers: llm.schema.array(llm.schema.integer.required))
145
163
  bot = LLM::Chat.new(llm, schema:).lazy
164
+ bot.chat "Answer all of my questions", role: :system
146
165
  bot.chat "Tell me the answer to ((5 + 5) / 2)", role: :user
147
- bot.messages.find(&:assistant?).content! # => {answer: 5}
148
-
149
- schema = llm.schema.object({probability: llm.schema.number.required})
150
- bot = LLM::Chat.new(llm, schema:).lazy
151
- bot.chat "Does the earth orbit the sun?", role: :user
152
- bot.messages.find(&:assistant?).content! # => {probability: 1}
166
+ bot.chat "Tell me the answer to ((5 + 5) / 2) * 2", role: :user
167
+ bot.chat "Tell me the answer to ((5 + 5) / 2) * 2 + 1", role: :user
168
+ bot.messages.find(&:assistant?).content! # => {answers: [5, 10, 11]}
153
169
  ```
154
170
 
155
171
  ### Tools
@@ -7,7 +7,7 @@ class JSON::Schema
7
7
  # {JSON::Schema::Leaf JSON::Schema::Leaf} and provides methods that
8
8
  # can act as constraints.
9
9
  class Array < Leaf
10
- def initialize(*items)
10
+ def initialize(items)
11
11
  @items = items
12
12
  end
13
13
 
data/lib/llm/buffer.rb CHANGED
@@ -82,7 +82,7 @@ module LLM
82
82
  message.content,
83
83
  params.merge(role:, messages:)
84
84
  )
85
- @completed.concat([*pendings, message, completion.choices[0]])
85
+ @completed.concat([*pendings, message, *completion.choices[0]])
86
86
  @pending.clear
87
87
  end
88
88
 
@@ -95,7 +95,7 @@ module LLM
95
95
  @response ? {previous_response_id: @response.id} : {}
96
96
  ].inject({}, &:merge!)
97
97
  @response = @provider.responses.create(message.content, params.merge(role:))
98
- @completed.concat([*pendings, message, @response.outputs[0]])
98
+ @completed.concat([*pendings, message, *@response.outputs[0]])
99
99
  @pending.clear
100
100
  end
101
101
  end
@@ -24,7 +24,7 @@ class LLM::Chat
24
24
  def sync_response(prompt, params = {})
25
25
  role = params[:role]
26
26
  @response = create_response!(prompt, params)
27
- @messages.concat [Message.new(role, prompt), @response.outputs[0]]
27
+ @messages.concat [Message.new(role, prompt), *@response.outputs[0]]
28
28
  end
29
29
 
30
30
  ##
@@ -45,7 +45,7 @@ class LLM::Chat
45
45
  def sync_completion(prompt, params = {})
46
46
  role = params[:role]
47
47
  completion = create_completion!(prompt, params)
48
- @messages.concat [Message.new(role, prompt), completion.choices[0]]
48
+ @messages.concat [Message.new(role, prompt), *completion.choices[0]]
49
49
  end
50
50
 
51
51
  include LLM
data/lib/llm/error.rb CHANGED
@@ -16,11 +16,11 @@ module LLM
16
16
  # @return [Net::HTTPResponse]
17
17
  # Returns the response associated with an error
18
18
  attr_accessor :response
19
- end
20
19
 
21
- ##
22
- # When a prompt is given an object that's not understood
23
- PromptError = Class.new(Error)
20
+ def message
21
+ [super, response.body].join("\n")
22
+ end
23
+ end
24
24
 
25
25
  ##
26
26
  # HTTPUnauthorized
@@ -29,5 +29,13 @@ module LLM
29
29
  ##
30
30
  # HTTPTooManyRequests
31
31
  RateLimit = Class.new(ResponseError)
32
+
33
+ ##
34
+ # When an given an input that is not understood
35
+ FormatError = Class.new(Error)
36
+
37
+ ##
38
+ # When given a prompt that is not understood
39
+ PromptError = Class.new(FormatError)
32
40
  end
33
41
  end
data/lib/llm/message.rb CHANGED
@@ -121,7 +121,7 @@ module LLM
121
121
  private
122
122
 
123
123
  def tool_calls
124
- @tool_calls ||= OpenStruct.from_hash(@extra[:tool_calls] || [])
124
+ @tool_calls ||= LLM::Object.from_hash(@extra[:tool_calls] || [])
125
125
  end
126
126
  end
127
127
  end
data/lib/llm/model.rb CHANGED
@@ -4,7 +4,7 @@
4
4
  # The {LLM::Model LLM::Model} class represents an LLM model that
5
5
  # is available to use. Its properties are delegated to the underlying
6
6
  # response body, and vary by provider.
7
- class LLM::Model < OpenStruct
7
+ class LLM::Model < LLM::Object
8
8
  ##
9
9
  # Returns a subclass of {LLM::Provider LLM::Provider}
10
10
  # @return [LLM::Provider]
@@ -1,17 +1,18 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "ostruct"
4
- class OpenStruct
5
- module FromHash
3
+ class LLM::Object
4
+ ##
5
+ # @private
6
+ module Builder
6
7
  ##
7
8
  # @example
8
- # obj = OpenStruct.from_hash(person: {name: 'John'})
9
+ # obj = LLM::Object.from_hash(person: {name: 'John'})
9
10
  # obj.person.name # => 'John'
10
- # obj.person.class # => OpenStruct
11
+ # obj.person.class # => LLM::Object
11
12
  # @param [Hash, Array] obj
12
13
  # A Hash object
13
- # @return [OpenStruct]
14
- # An OpenStruct object initialized by visiting `obj` with recursion
14
+ # @return [LLM::Object]
15
+ # An LLM::Object object initialized by visiting `obj` with recursion
15
16
  def from_hash(obj)
16
17
  case obj
17
18
  when self then from_hash(obj.to_h)
@@ -35,9 +36,4 @@ class OpenStruct
35
36
  end
36
37
  end
37
38
  end
38
- extend FromHash
39
-
40
- def to_json(...)
41
- to_h.to_json(...)
42
- end
43
39
  end
@@ -0,0 +1,45 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Object
4
+ ##
5
+ # @private
6
+ module Kernel
7
+ def tap(...)
8
+ ::Kernel.instance_method(:tap).bind(self).call(...)
9
+ end
10
+
11
+ def instance_of?(...)
12
+ ::Kernel.instance_method(:instance_of?).bind(self).call(...)
13
+ end
14
+
15
+ def method(...)
16
+ ::Kernel.instance_method(:method).bind(self).call(...)
17
+ end
18
+
19
+ def kind_of?(...)
20
+ ::Kernel.instance_method(:kind_of?).bind(self).call(...)
21
+ end
22
+ alias_method :is_a?, :kind_of?
23
+
24
+ def respond_to?(m, include_private = false)
25
+ @h.key?(m.to_sym) || self.class.instance_methods.include?(m) || super
26
+ end
27
+
28
+ def respond_to_missing?(m, include_private = false)
29
+ @h.key?(m.to_sym) || super
30
+ end
31
+
32
+ def object_id
33
+ ::Kernel.instance_method(:object_id).bind(self).call
34
+ end
35
+
36
+ def class
37
+ ::Kernel.instance_method(:class).bind(self).call
38
+ end
39
+
40
+ def inspect
41
+ "#<#{self.class}:0x#{object_id.to_s(16)} @h=#{to_h.inspect}>"
42
+ end
43
+ alias_method :to_s, :inspect
44
+ end
45
+ end
data/lib/llm/object.rb ADDED
@@ -0,0 +1,71 @@
1
+ # frozen_string_literal: true
2
+
3
+ ##
4
+ # The {LLM::Object LLM::Object} class encapsulates a Hash object, and it
5
+ # allows a consumer to get and set Hash keys via regular methods. It is
6
+ # similar in spirit to OpenStruct, and it was introduced after OpenStruct
7
+ # became a bundled gem (and not a default gem) in Ruby 3.5.
8
+ class LLM::Object < BasicObject
9
+ require_relative "object/builder"
10
+ require_relative "object/kernel"
11
+
12
+ extend Builder
13
+ include Kernel
14
+ include ::Enumerable
15
+ defined?(::PP) ? include(::PP::ObjectMixin) : nil
16
+
17
+ ##
18
+ # @param [Hash] h
19
+ # @return [LLM::Object]
20
+ def initialize(h)
21
+ @h = h.transform_keys(&:to_sym) || h
22
+ end
23
+
24
+ ##
25
+ # Yields a key|value pair to a block.
26
+ # @yieldparam [Symbol] k
27
+ # @yieldparam [Object] v
28
+ # @return [void]
29
+ def each(&)
30
+ @h.each(&)
31
+ end
32
+
33
+ ##
34
+ # @param [Symbol, #to_sym] k
35
+ # @return [Object]
36
+ def [](k)
37
+ @h[k.to_sym]
38
+ end
39
+
40
+ ##
41
+ # @param [Symbol, #to_sym] k
42
+ # @param [Object] v
43
+ # @return [void]
44
+ def []=(k, v)
45
+ @h[k.to_sym] = v
46
+ end
47
+
48
+ ##
49
+ # @return [String]
50
+ def to_json(...)
51
+ to_h.to_json(...)
52
+ end
53
+
54
+ ##
55
+ # @return [Hash]
56
+ def to_h
57
+ @h
58
+ end
59
+
60
+ private
61
+
62
+ def method_missing(m, *args, &b)
63
+ if m.to_s.end_with?("=")
64
+ @h[m[0..-2].to_sym] = args.first
65
+ elsif @h.key?(m)
66
+ @h[m]
67
+ else
68
+ nil
69
+ end
70
+ end
71
+ end
data/lib/llm/provider.rb CHANGED
@@ -168,6 +168,13 @@ class LLM::Provider
168
168
  raise NotImplementedError
169
169
  end
170
170
 
171
+ ##
172
+ # @return [LLM::OpenAI::Moderations]
173
+ # Returns an interface to the moderations API
174
+ def moderations
175
+ raise NotImplementedError
176
+ end
177
+
171
178
  ##
172
179
  # @return [String]
173
180
  # Returns the role of the assistant in the conversation.
@@ -5,7 +5,7 @@ module LLM::Anthropic::ResponseParser
5
5
  # @private
6
6
  class CompletionParser
7
7
  def initialize(body)
8
- @body = OpenStruct.from_hash(body)
8
+ @body = LLM::Object.from_hash(body)
9
9
  end
10
10
 
11
11
  def format(response)
@@ -34,7 +34,7 @@ module LLM::Anthropic::ResponseParser
34
34
  name: tool.name,
35
35
  arguments: tool.input
36
36
  }
37
- OpenStruct.new(tool)
37
+ LLM::Object.new(tool)
38
38
  end
39
39
  end
40
40
 
@@ -0,0 +1,68 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::DeepSeek::Format
4
+ ##
5
+ # @private
6
+ class CompletionFormat
7
+ ##
8
+ # @param [LLM::Message, Hash] message
9
+ # The message to format
10
+ def initialize(message)
11
+ @message = message
12
+ end
13
+
14
+ ##
15
+ # Formats the message for the OpenAI chat completions API
16
+ # @return [Hash]
17
+ def format
18
+ catch(:abort) do
19
+ if Hash === message
20
+ {role: message[:role], content: format_content(message[:content])}
21
+ elsif message.tool_call?
22
+ {role: message.role, content: nil, tool_calls: message.extra[:original_tool_calls]}
23
+ else
24
+ format_message
25
+ end
26
+ end
27
+ end
28
+
29
+ private
30
+
31
+ def format_content(content)
32
+ case content
33
+ when String
34
+ content.to_s
35
+ when LLM::Message
36
+ format_content(content.content)
37
+ when LLM::Function::Return
38
+ throw(:abort, {role: "tool", tool_call_id: content.id, content: JSON.dump(content.value)})
39
+ else
40
+ raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
41
+ "is not supported by the DeepSeek chat completions API"
42
+ end
43
+ end
44
+
45
+ def format_message
46
+ case content
47
+ when Array
48
+ format_array
49
+ else
50
+ {role: message.role, content: format_content(content)}
51
+ end
52
+ end
53
+
54
+ def format_array
55
+ if content.empty?
56
+ nil
57
+ elsif returns.any?
58
+ returns.map { {role: "tool", tool_call_id: _1.id, content: JSON.dump(_1.value)} }
59
+ else
60
+ {role: message.role, content: content.flat_map { format_content(_1) }}
61
+ end
62
+ end
63
+
64
+ def message = @message
65
+ def content = message.content
66
+ def returns = content.grep(LLM::Function::Return)
67
+ end
68
+ end
@@ -0,0 +1,28 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::DeepSeek
4
+ ##
5
+ # @private
6
+ module Format
7
+ require_relative "format/completion_format"
8
+ ##
9
+ # @param [Array<LLM::Message>] messages
10
+ # The messages to format
11
+ # @return [Array<Hash>]
12
+ def format(messages, ...)
13
+ messages.filter_map do |message|
14
+ CompletionFormat.new(message).format
15
+ end
16
+ end
17
+
18
+ private
19
+
20
+ ##
21
+ # @param [Hash] params
22
+ # @return [Hash]
23
+ def format_tools(params)
24
+ tools = params.delete(:tools)
25
+ (tools.nil? || tools.empty?) ? {} : {tools: tools.map { _1.format(self) }}
26
+ end
27
+ end
28
+ end
@@ -0,0 +1,60 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "openai" unless defined?(LLM::OpenAI)
4
+
5
+ module LLM
6
+ ##
7
+ # The DeepSeek class implements a provider for
8
+ # [DeepSeek](https://deepseek.com)
9
+ # through its OpenAI-compatible API provided via
10
+ # their [web platform](https://platform.deepseek.com).
11
+ class DeepSeek < OpenAI
12
+ require_relative "deepseek/format"
13
+ include DeepSeek::Format
14
+
15
+ ##
16
+ # @param (see LLM::Provider#initialize)
17
+ # @return [LLM::DeepSeek]
18
+ def initialize(host: "api.deepseek.com", port: 443, ssl: true, **)
19
+ super
20
+ end
21
+
22
+ ##
23
+ # @raise [NotImplementedError]
24
+ def files
25
+ raise NotImplementedError
26
+ end
27
+
28
+ ##
29
+ # @raise [NotImplementedError]
30
+ def images
31
+ raise NotImplementedError
32
+ end
33
+
34
+ ##
35
+ # @raise [NotImplementedError]
36
+ def audio
37
+ raise NotImplementedError
38
+ end
39
+
40
+ ##
41
+ # @raise [NotImplementedError]
42
+ def moderations
43
+ raise NotImplementedError
44
+ end
45
+
46
+ ##
47
+ # @raise [NotImplementedError]
48
+ def responses
49
+ raise NotImplementedError
50
+ end
51
+
52
+ ##
53
+ # Returns the default model for chat completions
54
+ # @see https://api-docs.deepseek.com/quick_start/pricing deepseek-chat
55
+ # @return [String]
56
+ def default_model
57
+ "deepseek-chat"
58
+ end
59
+ end
60
+ end
@@ -61,7 +61,7 @@ class LLM::Gemini
61
61
  LLM::Response::FileList.new(res).tap { |filelist|
62
62
  files = filelist.body["files"]&.map do |file|
63
63
  file = file.transform_keys { snakecase(_1) }
64
- OpenStruct.from_hash(file)
64
+ LLM::Object.from_hash(file)
65
65
  end || []
66
66
  filelist.files = files
67
67
  }
@@ -3,7 +3,7 @@
3
3
  module LLM::Gemini::ResponseParser
4
4
  class CompletionParser
5
5
  def initialize(body)
6
- @body = OpenStruct.from_hash(body)
6
+ @body = LLM::Object.from_hash(body)
7
7
  end
8
8
 
9
9
  def format(response)
@@ -32,7 +32,7 @@ module LLM::Gemini::ResponseParser
32
32
  def format_tool_calls(tools)
33
33
  (tools || []).map do |tool|
34
34
  function = {name: tool.name, arguments: tool.args}
35
- OpenStruct.new(function)
35
+ LLM::Object.new(function)
36
36
  end
37
37
  end
38
38
 
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require_relative "openai" unless defined?(LLM::OpenAI)
4
+
3
5
  module LLM
4
6
  ##
5
7
  # The LlamaCpp class implements a provider for
@@ -32,12 +34,24 @@ module LLM
32
34
  raise NotImplementedError
33
35
  end
34
36
 
37
+ ##
38
+ # @raise [NotImplementedError]
39
+ def moderations
40
+ raise NotImplementedError
41
+ end
42
+
43
+ ##
44
+ # @raise [NotImplementedError]
45
+ def responses
46
+ raise NotImplementedError
47
+ end
48
+
35
49
  ##
36
50
  # Returns the default model for chat completions
37
- # @see https://ollama.com/library llama3.2
51
+ # @see https://ollama.com/library/qwen3 qwen3
38
52
  # @return [String]
39
53
  def default_model
40
- "llama3.2"
54
+ "qwen3"
41
55
  end
42
56
  end
43
57
  end
@@ -63,7 +63,7 @@ module LLM::Ollama::Format
63
63
  elsif returns.any?
64
64
  returns.map { {role: "tool", tool_call_id: _1.id, content: JSON.dump(_1.value)} }
65
65
  else
66
- [{role: message.role, content: content.flat_map { format_content(_1) }}]
66
+ content.flat_map { {role: message.role }.merge(format_content(_1)) }
67
67
  end
68
68
  end
69
69
 
@@ -5,7 +5,7 @@ module LLM::Ollama::ResponseParser
5
5
  # @private
6
6
  class CompletionParser
7
7
  def initialize(body)
8
- @body = OpenStruct.from_hash(body)
8
+ @body = LLM::Object.from_hash(body)
9
9
  end
10
10
 
11
11
  def format(response)
@@ -29,7 +29,7 @@ module LLM::Ollama::ResponseParser
29
29
  return [] unless tools
30
30
  tools.filter_map do |tool|
31
31
  next unless tool["function"]
32
- OpenStruct.new(tool["function"])
32
+ LLM::Object.new(tool["function"])
33
33
  end
34
34
  end
35
35
 
@@ -40,7 +40,7 @@ module LLM
40
40
  # @param params (see LLM::Provider#embed)
41
41
  # @raise (see LLM::Provider#request)
42
42
  # @return (see LLM::Provider#embed)
43
- def embed(input, model: "llama3.2", **params)
43
+ def embed(input, model: default_model, **params)
44
44
  params = {model:}.merge!(params)
45
45
  req = Net::HTTP::Post.new("/v1/embeddings", headers)
46
46
  req.body = JSON.dump({input:}.merge!(params))
@@ -86,10 +86,10 @@ module LLM
86
86
 
87
87
  ##
88
88
  # Returns the default model for chat completions
89
- # @see https://ollama.com/library llama3.2
89
+ # @see https://ollama.com/library/qwen3 qwen3
90
90
  # @return [String]
91
91
  def default_model
92
- "llama3.2"
92
+ "qwen3:latest"
93
93
  end
94
94
 
95
95
  private
@@ -53,7 +53,7 @@ class LLM::OpenAI
53
53
  req = Net::HTTP::Get.new("/v1/files?#{query}", headers)
54
54
  res = request(http, req)
55
55
  LLM::Response::FileList.new(res).tap { |filelist|
56
- files = filelist.body["data"].map { OpenStruct.from_hash(_1) }
56
+ files = filelist.body["data"].map { LLM::Object.from_hash(_1) }
57
57
  filelist.files = files
58
58
  }
59
59
  end
@@ -127,12 +127,12 @@ class LLM::OpenAI
127
127
  # @see https://platform.openai.com/docs/api-reference/files/delete OpenAI docs
128
128
  # @param [#id, #to_s] file The file ID
129
129
  # @raise (see LLM::Provider#request)
130
- # @return [OpenStruct] Response body
130
+ # @return [LLM::Object] Response body
131
131
  def delete(file:)
132
132
  file_id = file.respond_to?(:id) ? file.id : file
133
133
  req = Net::HTTP::Delete.new("/v1/files/#{file_id}", headers)
134
134
  res = request(http, req)
135
- OpenStruct.from_hash JSON.parse(res.body)
135
+ LLM::Object.from_hash JSON.parse(res.body)
136
136
  end
137
137
 
138
138
  private
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::Format
4
+ ##
5
+ # @private
6
+ class ModerationFormat
7
+ ##
8
+ # @param [String, URI, Array<String, URI>] inputs
9
+ # The inputs to format
10
+ # @return [LLM::OpenAI::Format::ModerationFormat]
11
+ def initialize(inputs)
12
+ @inputs = inputs
13
+ end
14
+
15
+ ##
16
+ # Formats the inputs for the OpenAI moderations API
17
+ # @return [Array<Hash>]
18
+ def format
19
+ [*inputs].flat_map do |input|
20
+ if String === input
21
+ {type: :text, text: input}
22
+ elsif URI === input
23
+ {type: :image_url, url: input.to_s}
24
+ else
25
+ raise LLM::Error::FormatError, "The given object (an instance of #{input.class}) " \
26
+ "is not supported by OpenAI moderations API"
27
+ end
28
+ end
29
+ end
30
+
31
+ private
32
+
33
+ attr_reader :inputs
34
+ end
35
+ end
@@ -6,6 +6,7 @@ class LLM::OpenAI
6
6
  module Format
7
7
  require_relative "format/completion_format"
8
8
  require_relative "format/respond_format"
9
+ require_relative "format/moderation_format"
9
10
 
10
11
  ##
11
12
  # @param [Array<LLM::Message>] messages
@@ -43,9 +44,8 @@ class LLM::OpenAI
43
44
  # @param [Hash] params
44
45
  # @return [Hash]
45
46
  def format_tools(params)
46
- return {} unless params and params[:tools]&.any?
47
- tools = params[:tools]
48
- {tools: tools.map { _1.format(self) }}
47
+ tools = params.delete(:tools)
48
+ (tools.nil? || tools.empty?) ? {} : {tools: tools.map { _1.format(self) }}
49
49
  end
50
50
  end
51
51
  end
@@ -0,0 +1,71 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::OpenAI
4
+ ##
5
+ # The {LLM::OpenAI::Moderations LLM::OpenAI::Moderations} class provides a moderations
6
+ # object for interacting with [OpenAI's moderations API](https://platform.openai.com/docs/api-reference/moderations).
7
+ # The moderations API can categorize content into different categories, such as
8
+ # hate speech, self-harm, and sexual content. It can also provide a confidence score
9
+ # for each category.
10
+ #
11
+ # @example
12
+ # #!/usr/bin/env ruby
13
+ # require "llm"
14
+ #
15
+ # llm = LLM.openai(key: ENV["KEY"])
16
+ # mod = llm.moderations.create input: "I hate you"
17
+ # print "categories: #{mod.categories}", "\n"
18
+ # print "scores: #{mod.scores}", "\n"
19
+ #
20
+ # @example
21
+ # #!/usr/bin/env ruby
22
+ # require "llm"
23
+ #
24
+ # llm = LLM.openai(key: ENV["KEY"])
25
+ # mod = llm.moderations.create input: URI.parse("https://example.com/image.png")
26
+ # print "categories: #{mod.categories}", "\n"
27
+ # print "scores: #{mod.scores}", "\n"
28
+ #
29
+ # @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
30
+ # @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
31
+ class Moderations
32
+ ##
33
+ # Returns a new Moderations object
34
+ # @param [LLM::Provider] provider
35
+ # @return [LLM::OpenAI::Moderations]
36
+ def initialize(provider)
37
+ @provider = provider
38
+ end
39
+
40
+ ##
41
+ # Create a moderation
42
+ # @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
43
+ # @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
44
+ # @note
45
+ # Although OpenAI mentions an array as a valid input, and that it can return one
46
+ # or more moderations, in practice the API only returns one moderation object. We
47
+ # recommend using a single input string or URI, and to keep in mind that llm.rb
48
+ # returns a Moderation object but has code in place to return multiple objects in
49
+ # the future (in case OpenAI documentation ever matches the actual API).
50
+ # @param [String, URI, Array<String, URI>] input
51
+ # @param [String, LLM::Model] model The model to use
52
+ # @return [LLM::Response::ModerationList::Moderation]
53
+ def create(input:, model: "omni-moderation-latest", **params)
54
+ req = Net::HTTP::Post.new("/v1/moderations", headers)
55
+ input = Format::ModerationFormat.new(input).format
56
+ req.body = JSON.dump({input:, model:}.merge!(params))
57
+ res = request(http, req)
58
+ LLM::Response::ModerationList.new(res).extend(response_parser).first
59
+ end
60
+
61
+ private
62
+
63
+ def http
64
+ @provider.instance_variable_get(:@http)
65
+ end
66
+
67
+ [:response_parser, :headers, :request].each do |m|
68
+ define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
69
+ end
70
+ end
71
+ end
@@ -5,7 +5,7 @@ module LLM::OpenAI::ResponseParser
5
5
  # @private
6
6
  class CompletionParser
7
7
  def initialize(body)
8
- @body = OpenStruct.from_hash(body)
8
+ @body = LLM::Object.from_hash(body)
9
9
  end
10
10
 
11
11
  def format(response)
@@ -41,7 +41,7 @@ module LLM::OpenAI::ResponseParser
41
41
  name: tool.function.name,
42
42
  arguments: JSON.parse(tool.function.arguments)
43
43
  }
44
- OpenStruct.new(tool)
44
+ LLM::Object.new(tool)
45
45
  end
46
46
  end
47
47
 
@@ -5,7 +5,7 @@ module LLM::OpenAI::ResponseParser
5
5
  # @private
6
6
  class RespondParser
7
7
  def initialize(body)
8
- @body = OpenStruct.from_hash(body)
8
+ @body = LLM::Object.from_hash(body)
9
9
  end
10
10
 
11
11
  def format(response)
@@ -37,7 +37,7 @@ module LLM::OpenAI::ResponseParser
37
37
  end
38
38
 
39
39
  def format_tool(tool)
40
- OpenStruct.new(
40
+ LLM::Object.new(
41
41
  id: tool.call_id,
42
42
  name: tool.name,
43
43
  arguments: JSON.parse(tool.arguments)
@@ -20,6 +20,18 @@ class LLM::OpenAI
20
20
  RespondParser.new(body).format(self)
21
21
  end
22
22
 
23
+ ##
24
+ # @param [Hash] body
25
+ # The response body from the LLM provider
26
+ # @return [Hash]
27
+ def parse_moderation_list(body)
28
+ {
29
+ id: body["id"],
30
+ model: body["model"],
31
+ moderations: body["results"].map { LLM::Response::ModerationList::Moderation.new(_1) }
32
+ }
33
+ end
34
+
23
35
  ##
24
36
  # @param [Hash] body
25
37
  # The response body from the LLM provider
@@ -13,8 +13,8 @@ class LLM::OpenAI
13
13
  # require "llm"
14
14
  #
15
15
  # llm = LLM.openai(ENV["KEY"])
16
- # res1 = llm.responses.create "Your task is to help me with math", :developer
17
- # res2 = llm.responses.create "5 + 5 = ?", :user, previous_response_id: res1.id
16
+ # res1 = llm.responses.create "Your task is to help me with math", role: :developer
17
+ # res2 = llm.responses.create "5 + 5 = ?", role: :user, previous_response_id: res1.id
18
18
  # [res1,res2].each { llm.responses.delete(_1) }
19
19
  # @example
20
20
  # #!/usr/bin/env ruby
@@ -81,12 +81,12 @@ class LLM::OpenAI
81
81
  # @see https://platform.openai.com/docs/api-reference/responses/delete OpenAI docs
82
82
  # @param [#id, #to_s] response Response ID
83
83
  # @raise (see LLM::Provider#request)
84
- # @return [OpenStruct] Response body
84
+ # @return [LLM::Object] Response body
85
85
  def delete(response)
86
86
  response_id = response.respond_to?(:id) ? response.id : response
87
87
  req = Net::HTTP::Delete.new("/v1/responses/#{response_id}", headers)
88
88
  res = request(http, req)
89
- OpenStruct.from_hash JSON.parse(res.body)
89
+ LLM::Object.from_hash JSON.parse(res.body)
90
90
  end
91
91
 
92
92
  private
@@ -15,6 +15,8 @@ module LLM
15
15
  require_relative "openai/audio"
16
16
  require_relative "openai/files"
17
17
  require_relative "openai/models"
18
+ require_relative "openai/moderations"
19
+
18
20
  include Format
19
21
 
20
22
  HOST = "api.openai.com"
@@ -102,6 +104,15 @@ module LLM
102
104
  LLM::OpenAI::Models.new(self)
103
105
  end
104
106
 
107
+ ##
108
+ # Provides an interface to OpenAI's moderation API
109
+ # @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
110
+ # @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
111
+ # @return [LLM::OpenAI::Moderations]
112
+ def moderations
113
+ LLM::OpenAI::Moderations.new(self)
114
+ end
115
+
105
116
  ##
106
117
  # @return (see LLM::Provider#assistant_role)
107
118
  def assistant_role
@@ -5,7 +5,7 @@ module LLM
5
5
  # The {LLM::Response::FileList LLM::Response::FileList} class represents a
6
6
  # list of file objects that are returned by a provider. It is an Enumerable
7
7
  # object, and can be used to iterate over the file objects in a way that is
8
- # similar to an array. Each element is an instance of OpenStruct.
8
+ # similar to an array. Each element is an instance of LLM::Object.
9
9
  class Response::FileList < Response
10
10
  include Enumerable
11
11
 
@@ -8,7 +8,7 @@ module LLM
8
8
  class Response::Image < Response
9
9
  ##
10
10
  # Returns one or more image objects, or nil
11
- # @return [Array<OpenStruct>, nil]
11
+ # @return [Array<LLM::Object>, nil]
12
12
  def images
13
13
  parsed[:images].any? ? parsed[:images] : nil
14
14
  end
@@ -5,7 +5,7 @@ module LLM
5
5
  # The {LLM::Response::ModelList LLM::Response::ModelList} class represents a
6
6
  # list of model objects that are returned by a provider. It is an Enumerable
7
7
  # object, and can be used to iterate over the model objects in a way that is
8
- # similar to an array. Each element is an instance of OpenStruct.
8
+ # similar to an array. Each element is an instance of LLM::Object.
9
9
  class Response::ModelList < Response
10
10
  include Enumerable
11
11
 
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Response::ModerationList
4
+ ##
5
+ # The {LLM::Response::ModerationList::Moderation Moderation}
6
+ # class represents a moderation object that is returned by
7
+ # the moderations interface.
8
+ # @see LLM::Response::ModerationList
9
+ # @see LLM::OpenAI::Moderations
10
+ class Moderation
11
+ ##
12
+ # @param [Hash] moderation
13
+ # @return [LLM::Response::ModerationList::Moderation]
14
+ def initialize(moderation)
15
+ @moderation = moderation
16
+ end
17
+
18
+ ##
19
+ # Returns true if the moderation is flagged
20
+ # @return [Boolean]
21
+ def flagged?
22
+ @moderation["flagged"]
23
+ end
24
+
25
+ ##
26
+ # Returns the moderation categories
27
+ # @return [Array<String>]
28
+ def categories
29
+ @moderation["categories"].filter_map { _2 ? _1 : nil }
30
+ end
31
+
32
+ ##
33
+ # Returns the moderation scores
34
+ # @return [Hash]
35
+ def scores
36
+ @moderation["category_scores"].select { categories.include?(_1) }
37
+ end
38
+
39
+ ##
40
+ # @return [String]
41
+ def inspect
42
+ "#<#{self.class}:0x#{object_id.to_s(16)} " \
43
+ "categories=#{categories} " \
44
+ "scores=#{scores}>"
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,51 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The {LLM::Response::ModerationList LLM::Response::ModerationList} class
6
+ # represents a response from the moderations API. It is an Enumerable that
7
+ # yields an instance of {LLM::Response::ModerationList::Moderation LLM::Response::ModerationList::Moderation},
8
+ # and each moderation object contains the categories and scores for a given
9
+ # input.
10
+ # @see LLM::OpenAI::Moderations LLM::OpenAI::Moderations
11
+ class Response::ModerationList < Response
12
+ require_relative "moderationlist/moderation"
13
+ include Enumerable
14
+
15
+ ##
16
+ # Returns the moderation ID
17
+ # @return [String]
18
+ def id
19
+ parsed[:id]
20
+ end
21
+
22
+ ##
23
+ # Returns the moderation model
24
+ # @return [String]
25
+ def model
26
+ parsed[:model]
27
+ end
28
+
29
+ ##
30
+ # Yields each moderation object
31
+ # @yieldparam [OpenStruct] moderation
32
+ # @yieldreturn [void]
33
+ # @return [void]
34
+ def each(&)
35
+ moderations.each(&)
36
+ end
37
+
38
+ private
39
+
40
+ def parsed
41
+ @parsed ||= parse_moderation_list(body)
42
+ end
43
+
44
+ ##
45
+ # Returns an array of moderation objects
46
+ # @return [Array<OpenStruct>]
47
+ def moderations
48
+ parsed[:moderations]
49
+ end
50
+ end
51
+ end
data/lib/llm/response.rb CHANGED
@@ -14,6 +14,7 @@ module LLM
14
14
  require_relative "response/filelist"
15
15
  require_relative "response/download_file"
16
16
  require_relative "response/modellist"
17
+ require_relative "response/moderationlist"
17
18
 
18
19
  ##
19
20
  # @param [Net::HTTPResponse] res
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.7.2"
4
+ VERSION = "0.8.0"
5
5
  end
data/lib/llm.rb CHANGED
@@ -3,7 +3,7 @@
3
3
  module LLM
4
4
  require "stringio"
5
5
  require_relative "json/schema"
6
- require_relative "llm/core_ext/ostruct"
6
+ require_relative "llm/object"
7
7
  require_relative "llm/version"
8
8
  require_relative "llm/utils"
9
9
  require_relative "llm/error"
@@ -57,11 +57,18 @@ module LLM
57
57
  # @param key (see LLM::Provider#initialize)
58
58
  # @return (see LLM::LlamaCpp#initialize)
59
59
  def llamacpp(key: nil, **)
60
- require_relative "llm/providers/openai" unless defined?(LLM::OpenAI)
61
60
  require_relative "llm/providers/llamacpp" unless defined?(LLM::LlamaCpp)
62
61
  LLM::LlamaCpp.new(key:, **)
63
62
  end
64
63
 
64
+ ##
65
+ # @param key (see LLM::Provider#initialize)
66
+ # @return (see LLM::DeepSeek#initialize)
67
+ def deepseek(**)
68
+ require_relative "llm/providers/deepseek" unless defined?(LLM::DeepSeek)
69
+ LLM::DeepSeek.new(**)
70
+ end
71
+
65
72
  ##
66
73
  # @param key (see LLM::Provider#initialize)
67
74
  # @return (see LLM::OpenAI#initialize)
data/llm.gemspec CHANGED
@@ -10,7 +10,7 @@ Gem::Specification.new do |spec|
10
10
 
11
11
  spec.summary = "llm.rb is a zero-dependency Ruby toolkit for " \
12
12
  "Large Language Models that includes OpenAI, Gemini, " \
13
- "Anthropic, Ollama, and LlamaCpp. It’s fast, simple " \
13
+ "Anthropic, DeepSeek, Ollama, and LlamaCpp. It’s fast, simple " \
14
14
  "and composable – with full support for chat, tool calling, audio, " \
15
15
  "images, files, and JSON Schema generation."
16
16
  spec.description = spec.summary
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.2
4
+ version: 0.8.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2025-05-11 00:00:00.000000000 Z
12
+ date: 2025-05-17 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: webmock
@@ -152,9 +152,9 @@ dependencies:
152
152
  - !ruby/object:Gem::Version
153
153
  version: '2.8'
154
154
  description: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
155
- includes OpenAI, Gemini, Anthropic, Ollama, and LlamaCpp. It’s fast, simple and
156
- composable – with full support for chat, tool calling, audio, images, files, and
157
- JSON Schema generation.
155
+ includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. It’s fast, simple
156
+ and composable – with full support for chat, tool calling, audio, images, files,
157
+ and JSON Schema generation.
158
158
  email:
159
159
  - azantar@proton.me
160
160
  - 0x1eef@proton.me
@@ -181,7 +181,6 @@ files:
181
181
  - lib/llm/chat/conversable.rb
182
182
  - lib/llm/chat/prompt/completion.rb
183
183
  - lib/llm/chat/prompt/respond.rb
184
- - lib/llm/core_ext/ostruct.rb
185
184
  - lib/llm/error.rb
186
185
  - lib/llm/file.rb
187
186
  - lib/llm/function.rb
@@ -189,6 +188,9 @@ files:
189
188
  - lib/llm/mime.rb
190
189
  - lib/llm/model.rb
191
190
  - lib/llm/multipart.rb
191
+ - lib/llm/object.rb
192
+ - lib/llm/object/builder.rb
193
+ - lib/llm/object/kernel.rb
192
194
  - lib/llm/provider.rb
193
195
  - lib/llm/providers/anthropic.rb
194
196
  - lib/llm/providers/anthropic/error_handler.rb
@@ -197,6 +199,9 @@ files:
197
199
  - lib/llm/providers/anthropic/models.rb
198
200
  - lib/llm/providers/anthropic/response_parser.rb
199
201
  - lib/llm/providers/anthropic/response_parser/completion_parser.rb
202
+ - lib/llm/providers/deepseek.rb
203
+ - lib/llm/providers/deepseek/format.rb
204
+ - lib/llm/providers/deepseek/format/completion_format.rb
200
205
  - lib/llm/providers/gemini.rb
201
206
  - lib/llm/providers/gemini/audio.rb
202
207
  - lib/llm/providers/gemini/error_handler.rb
@@ -221,9 +226,11 @@ files:
221
226
  - lib/llm/providers/openai/files.rb
222
227
  - lib/llm/providers/openai/format.rb
223
228
  - lib/llm/providers/openai/format/completion_format.rb
229
+ - lib/llm/providers/openai/format/moderation_format.rb
224
230
  - lib/llm/providers/openai/format/respond_format.rb
225
231
  - lib/llm/providers/openai/images.rb
226
232
  - lib/llm/providers/openai/models.rb
233
+ - lib/llm/providers/openai/moderations.rb
227
234
  - lib/llm/providers/openai/response_parser.rb
228
235
  - lib/llm/providers/openai/response_parser/completion_parser.rb
229
236
  - lib/llm/providers/openai/response_parser/respond_parser.rb
@@ -242,6 +249,8 @@ files:
242
249
  - lib/llm/response/filelist.rb
243
250
  - lib/llm/response/image.rb
244
251
  - lib/llm/response/modellist.rb
252
+ - lib/llm/response/moderationlist.rb
253
+ - lib/llm/response/moderationlist/moderation.rb
245
254
  - lib/llm/response/respond.rb
246
255
  - lib/llm/utils.rb
247
256
  - lib/llm/version.rb
@@ -271,7 +280,7 @@ rubygems_version: 3.5.23
271
280
  signing_key:
272
281
  specification_version: 4
273
282
  summary: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that includes
274
- OpenAI, Gemini, Anthropic, Ollama, and LlamaCpp. It’s fast, simple and composable
275
- – with full support for chat, tool calling, audio, images, files, and JSON Schema
276
- generation.
283
+ OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. It’s fast, simple and
284
+ composable – with full support for chat, tool calling, audio, images, files, and
285
+ JSON Schema generation.
277
286
  test_files: []