llm.rb 0.7.1 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +40 -24
  3. data/lib/json/schema/array.rb +1 -1
  4. data/lib/llm/buffer.rb +2 -2
  5. data/lib/llm/chat/conversable.rb +2 -2
  6. data/lib/llm/error.rb +12 -4
  7. data/lib/llm/message.rb +1 -1
  8. data/lib/llm/model.rb +1 -1
  9. data/lib/llm/{core_ext/ostruct.rb → object/builder.rb} +8 -12
  10. data/lib/llm/object/kernel.rb +45 -0
  11. data/lib/llm/object.rb +71 -0
  12. data/lib/llm/provider.rb +21 -0
  13. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +2 -2
  14. data/lib/llm/providers/anthropic.rb +2 -2
  15. data/lib/llm/providers/deepseek/format/completion_format.rb +68 -0
  16. data/lib/llm/providers/deepseek/format.rb +28 -0
  17. data/lib/llm/providers/deepseek.rb +60 -0
  18. data/lib/llm/providers/gemini/files.rb +1 -1
  19. data/lib/llm/providers/gemini/response_parser/completion_parser.rb +2 -2
  20. data/lib/llm/providers/gemini.rb +2 -2
  21. data/lib/llm/providers/llamacpp.rb +16 -2
  22. data/lib/llm/providers/ollama/format/completion_format.rb +1 -1
  23. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +2 -2
  24. data/lib/llm/providers/ollama.rb +5 -5
  25. data/lib/llm/providers/openai/files.rb +3 -3
  26. data/lib/llm/providers/openai/format/moderation_format.rb +35 -0
  27. data/lib/llm/providers/openai/format.rb +3 -3
  28. data/lib/llm/providers/openai/moderations.rb +71 -0
  29. data/lib/llm/providers/openai/response_parser/completion_parser.rb +2 -2
  30. data/lib/llm/providers/openai/response_parser/respond_parser.rb +2 -2
  31. data/lib/llm/providers/openai/response_parser.rb +12 -0
  32. data/lib/llm/providers/openai/responses.rb +4 -4
  33. data/lib/llm/providers/openai.rb +13 -2
  34. data/lib/llm/response/filelist.rb +1 -1
  35. data/lib/llm/response/image.rb +1 -1
  36. data/lib/llm/response/modellist.rb +1 -1
  37. data/lib/llm/response/moderationlist/moderation.rb +47 -0
  38. data/lib/llm/response/moderationlist.rb +51 -0
  39. data/lib/llm/response.rb +1 -0
  40. data/lib/llm/version.rb +1 -1
  41. data/lib/llm.rb +9 -2
  42. data/llm.gemspec +1 -1
  43. metadata +18 -9
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 7d5d93a645b666da3d6947c2076189063aec26e7bc3381cfb84d6a6aea4ce8fa
4
- data.tar.gz: db764cd8e9180a3c21ca5bf2b35d8a5fa3f525cb63101381aca09f9e92cb5d37
3
+ metadata.gz: 9b4e83ac151c51faaa4a1e275058091a9ce6f61c3dc10e879a6215b0f1498aad
4
+ data.tar.gz: f78b7bbeaece69384d6b38014e9d1d99816195d8536a310a25d2a23479dda122
5
5
  SHA512:
6
- metadata.gz: c17999419e02e8c2d9d689299d149dd76077b1f52154557253dd9fe3876ff1109eaaa29d7d7aceda8147f38aa572261b455eaa253fbede212b874af42c8e03e0
7
- data.tar.gz: 1061af3f752a1e8cbc37c8929a23f6bfa97328f66f3adf030e435f468785ffa6a3aae32fe5120936f872b60d8a48a31fcbf35cee0de66b8ee12ddedf17228beb
6
+ metadata.gz: e117602fae5643713a159d633201cd88e94a339763710bbb788b3b1439e39bbbff9f2c221975fc58e1b57aabdf8d0d935d69dbc6acbece84e98701e129cf3c3d
7
+ data.tar.gz: 79f2ef053bf500ba9e5ab76c62abdb69ab93ba43b2f11fce867d008e64fbe09e154c1a30fbfdeb08ae30a5442b5d7e5876aa42c788dce8d0778c786f0a69adee
data/README.md CHANGED
@@ -1,9 +1,9 @@
1
1
  ## About
2
2
 
3
3
  llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
4
- includes OpenAI, Gemini, Anthropic, Ollama, and LlamaCpp. It’s fast, simple
5
- and composable – with full support for chat, tool calling, audio,
6
- images, files, and JSON Schema generation.
4
+ includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp.
5
+ It's fast, simple and composable – with full support for chat,
6
+ tool calling, audio, images, files, and JSON Schema generation.
7
7
 
8
8
  ## Features
9
9
 
@@ -11,7 +11,6 @@ images, files, and JSON Schema generation.
11
11
  - ✅ A single unified interface for multiple providers
12
12
  - 📦 Zero dependencies outside Ruby's standard library
13
13
  - 🚀 Optimized for performance and low memory usage
14
- - 🔌 Retrieve models dynamically for introspection and selection
15
14
 
16
15
  #### Chat, Agents
17
16
  - 🧠 Stateless and stateful chat via completions and responses API
@@ -24,8 +23,9 @@ images, files, and JSON Schema generation.
24
23
  - 📎 File uploads and prompt-aware file interaction
25
24
  - 💡 Multimodal prompts (text, images, PDFs, URLs, files)
26
25
 
27
- #### Embeddings
26
+ #### Miscellaneous
28
27
  - 🧮 Text embeddings and vector support
28
+ - 🔌 Retrieve models dynamically for introspection and selection
29
29
 
30
30
  ## Demos
31
31
 
@@ -35,13 +35,13 @@ images, files, and JSON Schema generation.
35
35
  </details>
36
36
 
37
37
  <details>
38
- <summary><b>2. Files: import at boot time</b></summary>
39
- <img src="share/llm-shell/examples/files-boottime.gif">
38
+ <summary><b>2. Files: import at runtime</b></summary>
39
+ <img src="share/llm-shell/examples/files-runtime.gif">
40
40
  </details>
41
41
 
42
42
  <details>
43
- <summary><b>3. Files: import at runtime</b></summary>
44
- <img src="share/llm-shell/examples/files-runtime.gif">
43
+ <summary><b>3. Files: import at boot time</b></summary>
44
+ <img src="share/llm-shell/examples/files-boottime.gif">
45
45
  </details>
46
46
 
47
47
  ## Examples
@@ -59,12 +59,18 @@ using an API key (if required) and an optional set of configuration options via
59
59
  #!/usr/bin/env ruby
60
60
  require "llm"
61
61
 
62
+ ##
63
+ # cloud providers
62
64
  llm = LLM.openai(key: "yourapikey")
63
65
  llm = LLM.gemini(key: "yourapikey")
64
66
  llm = LLM.anthropic(key: "yourapikey")
67
+ llm = LLM.deepseek(key: "yourapikey")
68
+ llm = LLM.voyageai(key: "yourapikey")
69
+
70
+ ##
71
+ # local providers
65
72
  llm = LLM.ollama(key: nil)
66
73
  llm = LLM.llamacpp(key: nil)
67
- llm = LLM.voyageai(key: "yourapikey")
68
74
  ```
69
75
 
70
76
  ### Conversations
@@ -73,7 +79,7 @@ llm = LLM.voyageai(key: "yourapikey")
73
79
 
74
80
  > This example uses the stateless chat completions API that all
75
81
  > providers support. A similar example for OpenAI's stateful
76
- > responses API is available in the [docs/](docs/OPENAI_RESPONSES.md)
82
+ > responses API is available in the [docs/](docs/OPENAI.md)
77
83
  > directory.
78
84
 
79
85
  The following example enables lazy mode for a
@@ -121,11 +127,12 @@ msgs.each { print "[#{_1.role}] ", _1.content, "\n" }
121
127
 
122
128
  #### Structured
123
129
 
124
- All LLM providers except Anthropic allow a client to describe the structure
125
- of a response that a LLM emits according to a schema that is described by JSON.
126
- The schema lets a client describe what JSON object (or value) an LLM should emit,
127
- and the LLM will abide by the schema. See also: [JSON Schema website](https://json-schema.org/overview/what-is-jsonschema).
128
- We will use the
130
+ All LLM providers except Anthropic and DeepSeek allow a client to describe
131
+ the structure of a response that a LLM emits according to a schema that is
132
+ described by JSON. The schema lets a client describe what JSON object (or value)
133
+ an LLM should emit, and the LLM will abide by the schema.
134
+ See also: [JSON Schema website](https://json-schema.org/overview/what-is-jsonschema).
135
+ We will use the
129
136
  [llmrb/json-schema](https://github.com/llmrb/json-schema)
130
137
  library for the sake of the examples &ndash; the interface is designed so you
131
138
  could drop in any other library in its place:
@@ -134,22 +141,31 @@ could drop in any other library in its place:
134
141
  #!/usr/bin/env ruby
135
142
  require "llm"
136
143
 
144
+ ##
145
+ # Objects
137
146
  llm = LLM.openai(key: ENV["KEY"])
138
- schema = llm.schema.object({fruit: llm.schema.string.enum("Apple", "Orange", "Pineapple")})
147
+ schema = llm.schema.object(answer: llm.schema.integer.required)
148
+ bot = LLM::Chat.new(llm, schema:).lazy
149
+ bot.chat "Does the earth orbit the sun?", role: :user
150
+ bot.messages.find(&:assistant?).content! # => {probability: 1}
151
+
152
+ ##
153
+ # Enums
154
+ schema = llm.schema.object(fruit: llm.schema.string.enum("Apple", "Orange", "Pineapple"))
139
155
  bot = LLM::Chat.new(llm, schema:).lazy
140
156
  bot.chat "Your favorite fruit is Pineapple", role: :system
141
157
  bot.chat "What fruit is your favorite?", role: :user
142
158
  bot.messages.find(&:assistant?).content! # => {fruit: "Pineapple"}
143
159
 
144
- schema = llm.schema.object({answer: llm.schema.integer.required})
160
+ ##
161
+ # Arrays
162
+ schema = llm.schema.object(answers: llm.schema.array(llm.schema.integer.required))
145
163
  bot = LLM::Chat.new(llm, schema:).lazy
164
+ bot.chat "Answer all of my questions", role: :system
146
165
  bot.chat "Tell me the answer to ((5 + 5) / 2)", role: :user
147
- bot.messages.find(&:assistant?).content! # => {answer: 5}
148
-
149
- schema = llm.schema.object({probability: llm.schema.number.required})
150
- bot = LLM::Chat.new(llm, schema:).lazy
151
- bot.chat "Does the earth orbit the sun?", role: :user
152
- bot.messages.find(&:assistant?).content! # => {probability: 1}
166
+ bot.chat "Tell me the answer to ((5 + 5) / 2) * 2", role: :user
167
+ bot.chat "Tell me the answer to ((5 + 5) / 2) * 2 + 1", role: :user
168
+ bot.messages.find(&:assistant?).content! # => {answers: [5, 10, 11]}
153
169
  ```
154
170
 
155
171
  ### Tools
@@ -7,7 +7,7 @@ class JSON::Schema
7
7
  # {JSON::Schema::Leaf JSON::Schema::Leaf} and provides methods that
8
8
  # can act as constraints.
9
9
  class Array < Leaf
10
- def initialize(*items)
10
+ def initialize(items)
11
11
  @items = items
12
12
  end
13
13
 
data/lib/llm/buffer.rb CHANGED
@@ -82,7 +82,7 @@ module LLM
82
82
  message.content,
83
83
  params.merge(role:, messages:)
84
84
  )
85
- @completed.concat([*pendings, message, completion.choices[0]])
85
+ @completed.concat([*pendings, message, *completion.choices[0]])
86
86
  @pending.clear
87
87
  end
88
88
 
@@ -95,7 +95,7 @@ module LLM
95
95
  @response ? {previous_response_id: @response.id} : {}
96
96
  ].inject({}, &:merge!)
97
97
  @response = @provider.responses.create(message.content, params.merge(role:))
98
- @completed.concat([*pendings, message, @response.outputs[0]])
98
+ @completed.concat([*pendings, message, *@response.outputs[0]])
99
99
  @pending.clear
100
100
  end
101
101
  end
@@ -24,7 +24,7 @@ class LLM::Chat
24
24
  def sync_response(prompt, params = {})
25
25
  role = params[:role]
26
26
  @response = create_response!(prompt, params)
27
- @messages.concat [Message.new(role, prompt), @response.outputs[0]]
27
+ @messages.concat [Message.new(role, prompt), *@response.outputs[0]]
28
28
  end
29
29
 
30
30
  ##
@@ -45,7 +45,7 @@ class LLM::Chat
45
45
  def sync_completion(prompt, params = {})
46
46
  role = params[:role]
47
47
  completion = create_completion!(prompt, params)
48
- @messages.concat [Message.new(role, prompt), completion.choices[0]]
48
+ @messages.concat [Message.new(role, prompt), *completion.choices[0]]
49
49
  end
50
50
 
51
51
  include LLM
data/lib/llm/error.rb CHANGED
@@ -16,11 +16,11 @@ module LLM
16
16
  # @return [Net::HTTPResponse]
17
17
  # Returns the response associated with an error
18
18
  attr_accessor :response
19
- end
20
19
 
21
- ##
22
- # When a prompt is given an object that's not understood
23
- PromptError = Class.new(Error)
20
+ def message
21
+ [super, response.body].join("\n")
22
+ end
23
+ end
24
24
 
25
25
  ##
26
26
  # HTTPUnauthorized
@@ -29,5 +29,13 @@ module LLM
29
29
  ##
30
30
  # HTTPTooManyRequests
31
31
  RateLimit = Class.new(ResponseError)
32
+
33
+ ##
34
+ # When an given an input that is not understood
35
+ FormatError = Class.new(Error)
36
+
37
+ ##
38
+ # When given a prompt that is not understood
39
+ PromptError = Class.new(FormatError)
32
40
  end
33
41
  end
data/lib/llm/message.rb CHANGED
@@ -121,7 +121,7 @@ module LLM
121
121
  private
122
122
 
123
123
  def tool_calls
124
- @tool_calls ||= OpenStruct.from_hash(@extra[:tool_calls] || [])
124
+ @tool_calls ||= LLM::Object.from_hash(@extra[:tool_calls] || [])
125
125
  end
126
126
  end
127
127
  end
data/lib/llm/model.rb CHANGED
@@ -4,7 +4,7 @@
4
4
  # The {LLM::Model LLM::Model} class represents an LLM model that
5
5
  # is available to use. Its properties are delegated to the underlying
6
6
  # response body, and vary by provider.
7
- class LLM::Model < OpenStruct
7
+ class LLM::Model < LLM::Object
8
8
  ##
9
9
  # Returns a subclass of {LLM::Provider LLM::Provider}
10
10
  # @return [LLM::Provider]
@@ -1,17 +1,18 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "ostruct"
4
- class OpenStruct
5
- module FromHash
3
+ class LLM::Object
4
+ ##
5
+ # @private
6
+ module Builder
6
7
  ##
7
8
  # @example
8
- # obj = OpenStruct.from_hash(person: {name: 'John'})
9
+ # obj = LLM::Object.from_hash(person: {name: 'John'})
9
10
  # obj.person.name # => 'John'
10
- # obj.person.class # => OpenStruct
11
+ # obj.person.class # => LLM::Object
11
12
  # @param [Hash, Array] obj
12
13
  # A Hash object
13
- # @return [OpenStruct]
14
- # An OpenStruct object initialized by visiting `obj` with recursion
14
+ # @return [LLM::Object]
15
+ # An LLM::Object object initialized by visiting `obj` with recursion
15
16
  def from_hash(obj)
16
17
  case obj
17
18
  when self then from_hash(obj.to_h)
@@ -35,9 +36,4 @@ class OpenStruct
35
36
  end
36
37
  end
37
38
  end
38
- extend FromHash
39
-
40
- def to_json(...)
41
- to_h.to_json(...)
42
- end
43
39
  end
@@ -0,0 +1,45 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Object
4
+ ##
5
+ # @private
6
+ module Kernel
7
+ def tap(...)
8
+ ::Kernel.instance_method(:tap).bind(self).call(...)
9
+ end
10
+
11
+ def instance_of?(...)
12
+ ::Kernel.instance_method(:instance_of?).bind(self).call(...)
13
+ end
14
+
15
+ def method(...)
16
+ ::Kernel.instance_method(:method).bind(self).call(...)
17
+ end
18
+
19
+ def kind_of?(...)
20
+ ::Kernel.instance_method(:kind_of?).bind(self).call(...)
21
+ end
22
+ alias_method :is_a?, :kind_of?
23
+
24
+ def respond_to?(m, include_private = false)
25
+ @h.key?(m.to_sym) || self.class.instance_methods.include?(m) || super
26
+ end
27
+
28
+ def respond_to_missing?(m, include_private = false)
29
+ @h.key?(m.to_sym) || super
30
+ end
31
+
32
+ def object_id
33
+ ::Kernel.instance_method(:object_id).bind(self).call
34
+ end
35
+
36
+ def class
37
+ ::Kernel.instance_method(:class).bind(self).call
38
+ end
39
+
40
+ def inspect
41
+ "#<#{self.class}:0x#{object_id.to_s(16)} @h=#{to_h.inspect}>"
42
+ end
43
+ alias_method :to_s, :inspect
44
+ end
45
+ end
data/lib/llm/object.rb ADDED
@@ -0,0 +1,71 @@
1
+ # frozen_string_literal: true
2
+
3
+ ##
4
+ # The {LLM::Object LLM::Object} class encapsulates a Hash object, and it
5
+ # allows a consumer to get and set Hash keys via regular methods. It is
6
+ # similar in spirit to OpenStruct, and it was introduced after OpenStruct
7
+ # became a bundled gem (and not a default gem) in Ruby 3.5.
8
+ class LLM::Object < BasicObject
9
+ require_relative "object/builder"
10
+ require_relative "object/kernel"
11
+
12
+ extend Builder
13
+ include Kernel
14
+ include ::Enumerable
15
+ defined?(::PP) ? include(::PP::ObjectMixin) : nil
16
+
17
+ ##
18
+ # @param [Hash] h
19
+ # @return [LLM::Object]
20
+ def initialize(h)
21
+ @h = h.transform_keys(&:to_sym) || h
22
+ end
23
+
24
+ ##
25
+ # Yields a key|value pair to a block.
26
+ # @yieldparam [Symbol] k
27
+ # @yieldparam [Object] v
28
+ # @return [void]
29
+ def each(&)
30
+ @h.each(&)
31
+ end
32
+
33
+ ##
34
+ # @param [Symbol, #to_sym] k
35
+ # @return [Object]
36
+ def [](k)
37
+ @h[k.to_sym]
38
+ end
39
+
40
+ ##
41
+ # @param [Symbol, #to_sym] k
42
+ # @param [Object] v
43
+ # @return [void]
44
+ def []=(k, v)
45
+ @h[k.to_sym] = v
46
+ end
47
+
48
+ ##
49
+ # @return [String]
50
+ def to_json(...)
51
+ to_h.to_json(...)
52
+ end
53
+
54
+ ##
55
+ # @return [Hash]
56
+ def to_h
57
+ @h
58
+ end
59
+
60
+ private
61
+
62
+ def method_missing(m, *args, &b)
63
+ if m.to_s.end_with?("=")
64
+ @h[m[0..-2].to_sym] = args.first
65
+ elsif @h.key?(m)
66
+ @h[m]
67
+ else
68
+ nil
69
+ end
70
+ end
71
+ end
data/lib/llm/provider.rb CHANGED
@@ -168,6 +168,13 @@ class LLM::Provider
168
168
  raise NotImplementedError
169
169
  end
170
170
 
171
+ ##
172
+ # @return [LLM::OpenAI::Moderations]
173
+ # Returns an interface to the moderations API
174
+ def moderations
175
+ raise NotImplementedError
176
+ end
177
+
171
178
  ##
172
179
  # @return [String]
173
180
  # Returns the role of the assistant in the conversation.
@@ -193,6 +200,20 @@ class LLM::Provider
193
200
  end
194
201
  end
195
202
 
203
+ ##
204
+ # Add one or more headers to all requests
205
+ # @example
206
+ # llm = LLM.openai(key: ENV["KEY"])
207
+ # llm.with(headers: {"OpenAI-Organization" => ENV["ORG"]})
208
+ # llm.with(headers: {"OpenAI-Project" => ENV["PROJECT"]})
209
+ # @param [Hash<String,String>] headers
210
+ # One or more headers
211
+ # @return [LLM::Provider]
212
+ # Returns self
213
+ def with(headers:)
214
+ tap { (@headers ||= {}).merge!(headers) }
215
+ end
216
+
196
217
  private
197
218
 
198
219
  ##
@@ -5,7 +5,7 @@ module LLM::Anthropic::ResponseParser
5
5
  # @private
6
6
  class CompletionParser
7
7
  def initialize(body)
8
- @body = OpenStruct.from_hash(body)
8
+ @body = LLM::Object.from_hash(body)
9
9
  end
10
10
 
11
11
  def format(response)
@@ -34,7 +34,7 @@ module LLM::Anthropic::ResponseParser
34
34
  name: tool.name,
35
35
  arguments: tool.input
36
36
  }
37
- OpenStruct.new(tool)
37
+ LLM::Object.new(tool)
38
38
  end
39
39
  end
40
40
 
@@ -84,11 +84,11 @@ module LLM
84
84
  private
85
85
 
86
86
  def headers
87
- {
87
+ (@headers || {}).merge(
88
88
  "Content-Type" => "application/json",
89
89
  "x-api-key" => @key,
90
90
  "anthropic-version" => "2023-06-01"
91
- }
91
+ )
92
92
  end
93
93
 
94
94
  def response_parser
@@ -0,0 +1,68 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::DeepSeek::Format
4
+ ##
5
+ # @private
6
+ class CompletionFormat
7
+ ##
8
+ # @param [LLM::Message, Hash] message
9
+ # The message to format
10
+ def initialize(message)
11
+ @message = message
12
+ end
13
+
14
+ ##
15
+ # Formats the message for the OpenAI chat completions API
16
+ # @return [Hash]
17
+ def format
18
+ catch(:abort) do
19
+ if Hash === message
20
+ {role: message[:role], content: format_content(message[:content])}
21
+ elsif message.tool_call?
22
+ {role: message.role, content: nil, tool_calls: message.extra[:original_tool_calls]}
23
+ else
24
+ format_message
25
+ end
26
+ end
27
+ end
28
+
29
+ private
30
+
31
+ def format_content(content)
32
+ case content
33
+ when String
34
+ content.to_s
35
+ when LLM::Message
36
+ format_content(content.content)
37
+ when LLM::Function::Return
38
+ throw(:abort, {role: "tool", tool_call_id: content.id, content: JSON.dump(content.value)})
39
+ else
40
+ raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
41
+ "is not supported by the DeepSeek chat completions API"
42
+ end
43
+ end
44
+
45
+ def format_message
46
+ case content
47
+ when Array
48
+ format_array
49
+ else
50
+ {role: message.role, content: format_content(content)}
51
+ end
52
+ end
53
+
54
+ def format_array
55
+ if content.empty?
56
+ nil
57
+ elsif returns.any?
58
+ returns.map { {role: "tool", tool_call_id: _1.id, content: JSON.dump(_1.value)} }
59
+ else
60
+ {role: message.role, content: content.flat_map { format_content(_1) }}
61
+ end
62
+ end
63
+
64
+ def message = @message
65
+ def content = message.content
66
+ def returns = content.grep(LLM::Function::Return)
67
+ end
68
+ end
@@ -0,0 +1,28 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::DeepSeek
4
+ ##
5
+ # @private
6
+ module Format
7
+ require_relative "format/completion_format"
8
+ ##
9
+ # @param [Array<LLM::Message>] messages
10
+ # The messages to format
11
+ # @return [Array<Hash>]
12
+ def format(messages, ...)
13
+ messages.filter_map do |message|
14
+ CompletionFormat.new(message).format
15
+ end
16
+ end
17
+
18
+ private
19
+
20
+ ##
21
+ # @param [Hash] params
22
+ # @return [Hash]
23
+ def format_tools(params)
24
+ tools = params.delete(:tools)
25
+ (tools.nil? || tools.empty?) ? {} : {tools: tools.map { _1.format(self) }}
26
+ end
27
+ end
28
+ end
@@ -0,0 +1,60 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "openai" unless defined?(LLM::OpenAI)
4
+
5
+ module LLM
6
+ ##
7
+ # The DeepSeek class implements a provider for
8
+ # [DeepSeek](https://deepseek.com)
9
+ # through its OpenAI-compatible API provided via
10
+ # their [web platform](https://platform.deepseek.com).
11
+ class DeepSeek < OpenAI
12
+ require_relative "deepseek/format"
13
+ include DeepSeek::Format
14
+
15
+ ##
16
+ # @param (see LLM::Provider#initialize)
17
+ # @return [LLM::DeepSeek]
18
+ def initialize(host: "api.deepseek.com", port: 443, ssl: true, **)
19
+ super
20
+ end
21
+
22
+ ##
23
+ # @raise [NotImplementedError]
24
+ def files
25
+ raise NotImplementedError
26
+ end
27
+
28
+ ##
29
+ # @raise [NotImplementedError]
30
+ def images
31
+ raise NotImplementedError
32
+ end
33
+
34
+ ##
35
+ # @raise [NotImplementedError]
36
+ def audio
37
+ raise NotImplementedError
38
+ end
39
+
40
+ ##
41
+ # @raise [NotImplementedError]
42
+ def moderations
43
+ raise NotImplementedError
44
+ end
45
+
46
+ ##
47
+ # @raise [NotImplementedError]
48
+ def responses
49
+ raise NotImplementedError
50
+ end
51
+
52
+ ##
53
+ # Returns the default model for chat completions
54
+ # @see https://api-docs.deepseek.com/quick_start/pricing deepseek-chat
55
+ # @return [String]
56
+ def default_model
57
+ "deepseek-chat"
58
+ end
59
+ end
60
+ end
@@ -61,7 +61,7 @@ class LLM::Gemini
61
61
  LLM::Response::FileList.new(res).tap { |filelist|
62
62
  files = filelist.body["files"]&.map do |file|
63
63
  file = file.transform_keys { snakecase(_1) }
64
- OpenStruct.from_hash(file)
64
+ LLM::Object.from_hash(file)
65
65
  end || []
66
66
  filelist.files = files
67
67
  }
@@ -3,7 +3,7 @@
3
3
  module LLM::Gemini::ResponseParser
4
4
  class CompletionParser
5
5
  def initialize(body)
6
- @body = OpenStruct.from_hash(body)
6
+ @body = LLM::Object.from_hash(body)
7
7
  end
8
8
 
9
9
  def format(response)
@@ -32,7 +32,7 @@ module LLM::Gemini::ResponseParser
32
32
  def format_tool_calls(tools)
33
33
  (tools || []).map do |tool|
34
34
  function = {name: tool.name, arguments: tool.args}
35
- OpenStruct.new(function)
35
+ LLM::Object.new(function)
36
36
  end
37
37
  end
38
38
 
@@ -131,9 +131,9 @@ module LLM
131
131
  private
132
132
 
133
133
  def headers
134
- {
134
+ (@headers || {}).merge(
135
135
  "Content-Type" => "application/json"
136
- }
136
+ )
137
137
  end
138
138
 
139
139
  def response_parser