llm.rb 0.8.0 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +62 -48
  3. data/lib/llm/{chat → bot}/builder.rb +1 -1
  4. data/lib/llm/bot/conversable.rb +31 -0
  5. data/lib/llm/{chat → bot}/prompt/completion.rb +14 -4
  6. data/lib/llm/{chat → bot}/prompt/respond.rb +16 -5
  7. data/lib/llm/{chat.rb → bot.rb} +48 -66
  8. data/lib/llm/error.rb +22 -22
  9. data/lib/llm/event_handler.rb +44 -0
  10. data/lib/llm/eventstream/event.rb +69 -0
  11. data/lib/llm/eventstream/parser.rb +88 -0
  12. data/lib/llm/eventstream.rb +8 -0
  13. data/lib/llm/function.rb +9 -12
  14. data/lib/llm/object/builder.rb +8 -9
  15. data/lib/llm/object/kernel.rb +1 -1
  16. data/lib/llm/object.rb +7 -1
  17. data/lib/llm/provider.rb +61 -26
  18. data/lib/llm/providers/anthropic/error_handler.rb +3 -3
  19. data/lib/llm/providers/anthropic/models.rb +3 -7
  20. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +3 -3
  21. data/lib/llm/providers/anthropic/response_parser.rb +1 -0
  22. data/lib/llm/providers/anthropic/stream_parser.rb +66 -0
  23. data/lib/llm/providers/anthropic.rb +9 -4
  24. data/lib/llm/providers/gemini/error_handler.rb +4 -4
  25. data/lib/llm/providers/gemini/files.rb +12 -15
  26. data/lib/llm/providers/gemini/images.rb +4 -8
  27. data/lib/llm/providers/gemini/models.rb +3 -7
  28. data/lib/llm/providers/gemini/stream_parser.rb +69 -0
  29. data/lib/llm/providers/gemini.rb +19 -11
  30. data/lib/llm/providers/ollama/error_handler.rb +3 -3
  31. data/lib/llm/providers/ollama/format/completion_format.rb +1 -1
  32. data/lib/llm/providers/ollama/models.rb +3 -7
  33. data/lib/llm/providers/ollama/stream_parser.rb +44 -0
  34. data/lib/llm/providers/ollama.rb +13 -6
  35. data/lib/llm/providers/openai/audio.rb +5 -9
  36. data/lib/llm/providers/openai/error_handler.rb +3 -3
  37. data/lib/llm/providers/openai/files.rb +12 -15
  38. data/lib/llm/providers/openai/images.rb +8 -11
  39. data/lib/llm/providers/openai/models.rb +3 -7
  40. data/lib/llm/providers/openai/moderations.rb +3 -7
  41. data/lib/llm/providers/openai/response_parser/completion_parser.rb +3 -3
  42. data/lib/llm/providers/openai/response_parser.rb +3 -0
  43. data/lib/llm/providers/openai/responses.rb +10 -12
  44. data/lib/llm/providers/openai/stream_parser.rb +77 -0
  45. data/lib/llm/providers/openai.rb +11 -7
  46. data/lib/llm/providers/voyageai/error_handler.rb +3 -3
  47. data/lib/llm/providers/voyageai.rb +1 -1
  48. data/lib/llm/version.rb +1 -1
  49. data/lib/llm.rb +4 -2
  50. data/llm.gemspec +1 -1
  51. metadata +30 -25
  52. data/lib/llm/chat/conversable.rb +0 -53
  53. /data/lib/{json → llm/json}/schema/array.rb +0 -0
  54. /data/lib/{json → llm/json}/schema/boolean.rb +0 -0
  55. /data/lib/{json → llm/json}/schema/integer.rb +0 -0
  56. /data/lib/{json → llm/json}/schema/leaf.rb +0 -0
  57. /data/lib/{json → llm/json}/schema/null.rb +0 -0
  58. /data/lib/{json → llm/json}/schema/number.rb +0 -0
  59. /data/lib/{json → llm/json}/schema/object.rb +0 -0
  60. /data/lib/{json → llm/json}/schema/string.rb +0 -0
  61. /data/lib/{json → llm/json}/schema/version.rb +0 -0
  62. /data/lib/{json → llm/json}/schema.rb +0 -0
@@ -54,18 +54,14 @@ class LLM::OpenAI
54
54
  req = Net::HTTP::Post.new("/v1/moderations", headers)
55
55
  input = Format::ModerationFormat.new(input).format
56
56
  req.body = JSON.dump({input:, model:}.merge!(params))
57
- res = request(http, req)
57
+ res = execute(request: req)
58
58
  LLM::Response::ModerationList.new(res).extend(response_parser).first
59
59
  end
60
60
 
61
61
  private
62
62
 
63
- def http
64
- @provider.instance_variable_get(:@http)
65
- end
66
-
67
- [:response_parser, :headers, :request].each do |m|
68
- define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
63
+ [:response_parser, :headers, :execute].each do |m|
64
+ define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
69
65
  end
70
66
  end
71
67
  end
@@ -47,9 +47,9 @@ module LLM::OpenAI::ResponseParser
47
47
 
48
48
  def body = @body
49
49
  def model = body.model
50
- def prompt_tokens = body.usage.prompt_tokens
51
- def completion_tokens = body.usage.completion_tokens
52
- def total_tokens = body.usage.total_tokens
50
+ def prompt_tokens = body.usage&.prompt_tokens
51
+ def completion_tokens = body.usage&.completion_tokens
52
+ def total_tokens = body.usage&.total_tokens
53
53
  def choices = body.choices
54
54
  end
55
55
  end
@@ -4,6 +4,9 @@ class LLM::OpenAI
4
4
  ##
5
5
  # @private
6
6
  module ResponseParser
7
+ require_relative "response_parser/completion_parser"
8
+ require_relative "response_parser/respond_parser"
9
+
7
10
  ##
8
11
  # @param [Hash] body
9
12
  # The response body from the LLM provider
@@ -8,7 +8,7 @@ class LLM::OpenAI
8
8
  # conversation state across multiple requests. This is useful when you want to
9
9
  # save bandwidth and/or not maintain the message thread by yourself.
10
10
  #
11
- # @example
11
+ # @example example #1
12
12
  # #!/usr/bin/env ruby
13
13
  # require "llm"
14
14
  #
@@ -16,14 +16,16 @@ class LLM::OpenAI
16
16
  # res1 = llm.responses.create "Your task is to help me with math", role: :developer
17
17
  # res2 = llm.responses.create "5 + 5 = ?", role: :user, previous_response_id: res1.id
18
18
  # [res1,res2].each { llm.responses.delete(_1) }
19
- # @example
19
+ #
20
+ # @example example #2
20
21
  # #!/usr/bin/env ruby
21
22
  # require "llm"
22
23
  #
23
24
  # llm = LLM.openai(ENV["KEY"])
24
25
  # file = llm.files.create file: "/images/hat.png"
25
26
  # res = llm.responses.create ["Describe the image", file]
26
- # @example
27
+ #
28
+ # @example example #3
27
29
  # #!/usr/bin/env ruby
28
30
  # require "llm"
29
31
  #
@@ -58,7 +60,7 @@ class LLM::OpenAI
58
60
  messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
59
61
  body = JSON.dump({input: [format(messages, :response)].flatten}.merge!(params))
60
62
  set_body_stream(req, StringIO.new(body))
61
- res = request(http, req)
63
+ res = execute(request: req)
62
64
  LLM::Response::Respond.new(res).extend(response_parser)
63
65
  end
64
66
 
@@ -72,7 +74,7 @@ class LLM::OpenAI
72
74
  response_id = response.respond_to?(:id) ? response.id : response
73
75
  query = URI.encode_www_form(params)
74
76
  req = Net::HTTP::Get.new("/v1/responses/#{response_id}?#{query}", headers)
75
- res = request(http, req)
77
+ res = execute(request: req)
76
78
  LLM::Response::Respond.new(res).extend(response_parser)
77
79
  end
78
80
 
@@ -85,20 +87,16 @@ class LLM::OpenAI
85
87
  def delete(response)
86
88
  response_id = response.respond_to?(:id) ? response.id : response
87
89
  req = Net::HTTP::Delete.new("/v1/responses/#{response_id}", headers)
88
- res = request(http, req)
90
+ res = execute(request: req)
89
91
  LLM::Object.from_hash JSON.parse(res.body)
90
92
  end
91
93
 
92
94
  private
93
95
 
94
- def http
95
- @provider.instance_variable_get(:@http)
96
- end
97
-
98
96
  [:response_parser, :headers,
99
- :request, :set_body_stream,
97
+ :execute, :set_body_stream,
100
98
  :format_schema, :format_tools].each do |m|
101
- define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
99
+ define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
102
100
  end
103
101
  end
104
102
  end
@@ -0,0 +1,77 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::OpenAI
4
+ ##
5
+ # @private
6
+ class StreamParser
7
+ ##
8
+ # Returns the fully constructed response body
9
+ # @return [LLM::Object]
10
+ attr_reader :body
11
+
12
+ ##
13
+ # @return [LLM::OpenAI::Chunk]
14
+ def initialize(io)
15
+ @body = LLM::Object.new
16
+ @io = io
17
+ end
18
+
19
+ ##
20
+ # @param [Hash] chunk
21
+ # @return [LLM::OpenAI::Chunk]
22
+ def parse!(chunk)
23
+ tap { merge!(chunk) }
24
+ end
25
+
26
+ private
27
+
28
+ def merge!(chunk)
29
+ chunk.each do |key, value|
30
+ if key == "choices"
31
+ @body["choices"] ||= []
32
+ merge_choices!(value)
33
+ else
34
+ @body[key] = value
35
+ end
36
+ end
37
+ end
38
+
39
+ def merge_choices!(choices)
40
+ choices.each do |choice|
41
+ if @body.choices[choice["index"]]
42
+ target = @body["choices"][choice["index"]]["message"]
43
+ delta = choice["delta"]
44
+ delta.each do |key, value|
45
+ if target[key]
46
+ if key == "content"
47
+ target[key] << value
48
+ @io << value if @io.respond_to?(:<<)
49
+ elsif key == "tool_calls"
50
+ merge_tools!(target, value)
51
+ else
52
+ target[key] = value
53
+ end
54
+ else
55
+ target[key] = value
56
+ end
57
+ end
58
+ else
59
+ target = {"message" => {"role" => "assistant"}}
60
+ @body["choices"][choice["index"]] = target
61
+ target["message"].merge!(choice["delta"])
62
+ end
63
+ end
64
+ end
65
+
66
+ def merge_tools!(target, tools)
67
+ tools.each.with_index do |toola, index|
68
+ toolb = target["tool_calls"][index]
69
+ if toolb
70
+ toola["function"].each { toolb["function"][_1] << _2 }
71
+ else
72
+ target["tool_calls"][index] = toola
73
+ end
74
+ end
75
+ end
76
+ end
77
+ end
@@ -5,16 +5,15 @@ module LLM
5
5
  # The OpenAI class implements a provider for
6
6
  # [OpenAI](https://platform.openai.com/)
7
7
  class OpenAI < Provider
8
- require_relative "openai/format"
9
8
  require_relative "openai/error_handler"
9
+ require_relative "openai/format"
10
+ require_relative "openai/stream_parser"
10
11
  require_relative "openai/response_parser"
11
- require_relative "openai/response_parser/completion_parser"
12
- require_relative "openai/response_parser/respond_parser"
12
+ require_relative "openai/models"
13
13
  require_relative "openai/responses"
14
14
  require_relative "openai/images"
15
15
  require_relative "openai/audio"
16
16
  require_relative "openai/files"
17
- require_relative "openai/models"
18
17
  require_relative "openai/moderations"
19
18
 
20
19
  include Format
@@ -38,7 +37,7 @@ module LLM
38
37
  def embed(input, model: "text-embedding-3-small", **params)
39
38
  req = Net::HTTP::Post.new("/v1/embeddings", headers)
40
39
  req.body = JSON.dump({input:, model:}.merge!(params))
41
- res = request(@http, req)
40
+ res = execute(request: req)
42
41
  Response::Embedding.new(res).extend(response_parser)
43
42
  end
44
43
 
@@ -55,12 +54,13 @@ module LLM
55
54
  def complete(prompt, params = {})
56
55
  params = {role: :user, model: default_model}.merge!(params)
57
56
  params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
58
- role = params.delete(:role)
57
+ role, stream = params.delete(:role), params.delete(:stream)
58
+ params[:stream] = true if stream.respond_to?(:<<) || stream == true
59
59
  req = Net::HTTP::Post.new("/v1/chat/completions", headers)
60
60
  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
61
61
  body = JSON.dump({messages: format(messages, :complete).flatten}.merge!(params))
62
62
  set_body_stream(req, StringIO.new(body))
63
- res = request(@http, req)
63
+ res = execute(request: req, stream:)
64
64
  Response::Completion.new(res).extend(response_parser)
65
65
  end
66
66
 
@@ -140,6 +140,10 @@ module LLM
140
140
  LLM::OpenAI::ResponseParser
141
141
  end
142
142
 
143
+ def stream_parser
144
+ LLM::OpenAI::StreamParser
145
+ end
146
+
143
147
  def error_handler
144
148
  LLM::OpenAI::ErrorHandler
145
149
  end
@@ -21,11 +21,11 @@ class LLM::VoyageAI
21
21
  def raise_error!
22
22
  case res
23
23
  when Net::HTTPUnauthorized
24
- raise LLM::Error::Unauthorized.new { _1.response = res }, "Authentication error"
24
+ raise LLM::UnauthorizedError.new { _1.response = res }, "Authentication error"
25
25
  when Net::HTTPTooManyRequests
26
- raise LLM::Error::RateLimit.new { _1.response = res }, "Too many requests"
26
+ raise LLM::RateLimitError.new { _1.response = res }, "Too many requests"
27
27
  else
28
- raise LLM::Error::ResponseError.new { _1.response = res }, "Unexpected response"
28
+ raise LLM::ResponseError.new { _1.response = res }, "Unexpected response"
29
29
  end
30
30
  end
31
31
  end
@@ -20,7 +20,7 @@ module LLM
20
20
  def embed(input, model: "voyage-2", **params)
21
21
  req = Net::HTTP::Post.new("/v1/embeddings", headers)
22
22
  req.body = JSON.dump({input:, model:}.merge!(params))
23
- res = request(@http, req)
23
+ res = execute(request: req)
24
24
  Response::Embedding.new(res).extend(response_parser)
25
25
  end
26
26
 
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.8.0"
4
+ VERSION = "0.9.0"
5
5
  end
data/lib/llm.rb CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  module LLM
4
4
  require "stringio"
5
- require_relative "json/schema"
5
+ require_relative "llm/json/schema"
6
6
  require_relative "llm/object"
7
7
  require_relative "llm/version"
8
8
  require_relative "llm/utils"
@@ -14,9 +14,11 @@ module LLM
14
14
  require_relative "llm/file"
15
15
  require_relative "llm/model"
16
16
  require_relative "llm/provider"
17
- require_relative "llm/chat"
17
+ require_relative "llm/bot"
18
18
  require_relative "llm/buffer"
19
19
  require_relative "llm/function"
20
+ require_relative "llm/eventstream"
21
+ require_relative "llm/event_handler"
20
22
 
21
23
  module_function
22
24
 
data/llm.gemspec CHANGED
@@ -11,7 +11,7 @@ Gem::Specification.new do |spec|
11
11
  spec.summary = "llm.rb is a zero-dependency Ruby toolkit for " \
12
12
  "Large Language Models that includes OpenAI, Gemini, " \
13
13
  "Anthropic, DeepSeek, Ollama, and LlamaCpp. It’s fast, simple " \
14
- "and composable – with full support for chat, tool calling, audio, " \
14
+ "and composable – with full support for chat, streaming, tool calling, audio, " \
15
15
  "images, files, and JSON Schema generation."
16
16
  spec.description = spec.summary
17
17
  spec.homepage = "https://github.com/llmrb/llm"
metadata CHANGED
@@ -1,15 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.8.0
4
+ version: 0.9.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
8
8
  - '0x1eef'
9
- autorequire:
10
9
  bindir: bin
11
10
  cert_chain: []
12
- date: 2025-05-17 00:00:00.000000000 Z
11
+ date: 1980-01-02 00:00:00.000000000 Z
13
12
  dependencies:
14
13
  - !ruby/object:Gem::Dependency
15
14
  name: webmock
@@ -153,8 +152,8 @@ dependencies:
153
152
  version: '2.8'
154
153
  description: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
155
154
  includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. It’s fast, simple
156
- and composable – with full support for chat, tool calling, audio, images, files,
157
- and JSON Schema generation.
155
+ and composable – with full support for chat, streaming, tool calling, audio, images,
156
+ files, and JSON Schema generation.
158
157
  email:
159
158
  - azantar@proton.me
160
159
  - 0x1eef@proton.me
@@ -164,26 +163,30 @@ extra_rdoc_files: []
164
163
  files:
165
164
  - LICENSE
166
165
  - README.md
167
- - lib/json/schema.rb
168
- - lib/json/schema/array.rb
169
- - lib/json/schema/boolean.rb
170
- - lib/json/schema/integer.rb
171
- - lib/json/schema/leaf.rb
172
- - lib/json/schema/null.rb
173
- - lib/json/schema/number.rb
174
- - lib/json/schema/object.rb
175
- - lib/json/schema/string.rb
176
- - lib/json/schema/version.rb
177
166
  - lib/llm.rb
167
+ - lib/llm/bot.rb
168
+ - lib/llm/bot/builder.rb
169
+ - lib/llm/bot/conversable.rb
170
+ - lib/llm/bot/prompt/completion.rb
171
+ - lib/llm/bot/prompt/respond.rb
178
172
  - lib/llm/buffer.rb
179
- - lib/llm/chat.rb
180
- - lib/llm/chat/builder.rb
181
- - lib/llm/chat/conversable.rb
182
- - lib/llm/chat/prompt/completion.rb
183
- - lib/llm/chat/prompt/respond.rb
184
173
  - lib/llm/error.rb
174
+ - lib/llm/event_handler.rb
175
+ - lib/llm/eventstream.rb
176
+ - lib/llm/eventstream/event.rb
177
+ - lib/llm/eventstream/parser.rb
185
178
  - lib/llm/file.rb
186
179
  - lib/llm/function.rb
180
+ - lib/llm/json/schema.rb
181
+ - lib/llm/json/schema/array.rb
182
+ - lib/llm/json/schema/boolean.rb
183
+ - lib/llm/json/schema/integer.rb
184
+ - lib/llm/json/schema/leaf.rb
185
+ - lib/llm/json/schema/null.rb
186
+ - lib/llm/json/schema/number.rb
187
+ - lib/llm/json/schema/object.rb
188
+ - lib/llm/json/schema/string.rb
189
+ - lib/llm/json/schema/version.rb
187
190
  - lib/llm/message.rb
188
191
  - lib/llm/mime.rb
189
192
  - lib/llm/model.rb
@@ -199,6 +202,7 @@ files:
199
202
  - lib/llm/providers/anthropic/models.rb
200
203
  - lib/llm/providers/anthropic/response_parser.rb
201
204
  - lib/llm/providers/anthropic/response_parser/completion_parser.rb
205
+ - lib/llm/providers/anthropic/stream_parser.rb
202
206
  - lib/llm/providers/deepseek.rb
203
207
  - lib/llm/providers/deepseek/format.rb
204
208
  - lib/llm/providers/deepseek/format/completion_format.rb
@@ -212,6 +216,7 @@ files:
212
216
  - lib/llm/providers/gemini/models.rb
213
217
  - lib/llm/providers/gemini/response_parser.rb
214
218
  - lib/llm/providers/gemini/response_parser/completion_parser.rb
219
+ - lib/llm/providers/gemini/stream_parser.rb
215
220
  - lib/llm/providers/llamacpp.rb
216
221
  - lib/llm/providers/ollama.rb
217
222
  - lib/llm/providers/ollama/error_handler.rb
@@ -220,6 +225,7 @@ files:
220
225
  - lib/llm/providers/ollama/models.rb
221
226
  - lib/llm/providers/ollama/response_parser.rb
222
227
  - lib/llm/providers/ollama/response_parser/completion_parser.rb
228
+ - lib/llm/providers/ollama/stream_parser.rb
223
229
  - lib/llm/providers/openai.rb
224
230
  - lib/llm/providers/openai/audio.rb
225
231
  - lib/llm/providers/openai/error_handler.rb
@@ -235,6 +241,7 @@ files:
235
241
  - lib/llm/providers/openai/response_parser/completion_parser.rb
236
242
  - lib/llm/providers/openai/response_parser/respond_parser.rb
237
243
  - lib/llm/providers/openai/responses.rb
244
+ - lib/llm/providers/openai/stream_parser.rb
238
245
  - lib/llm/providers/voyageai.rb
239
246
  - lib/llm/providers/voyageai/error_handler.rb
240
247
  - lib/llm/providers/voyageai/response_parser.rb
@@ -261,7 +268,6 @@ licenses:
261
268
  metadata:
262
269
  homepage_uri: https://github.com/llmrb/llm
263
270
  source_code_uri: https://github.com/llmrb/llm
264
- post_install_message:
265
271
  rdoc_options: []
266
272
  require_paths:
267
273
  - lib
@@ -276,11 +282,10 @@ required_rubygems_version: !ruby/object:Gem::Requirement
276
282
  - !ruby/object:Gem::Version
277
283
  version: '0'
278
284
  requirements: []
279
- rubygems_version: 3.5.23
280
- signing_key:
285
+ rubygems_version: 3.6.8
281
286
  specification_version: 4
282
287
  summary: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that includes
283
288
  OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. It’s fast, simple and
284
- composable – with full support for chat, tool calling, audio, images, files, and
285
- JSON Schema generation.
289
+ composable – with full support for chat, streaming, tool calling, audio, images,
290
+ files, and JSON Schema generation.
286
291
  test_files: []
@@ -1,53 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- class LLM::Chat
4
- ##
5
- # @private
6
- module Conversable
7
- private
8
-
9
- ##
10
- # Queues a response to be sent to the provider.
11
- # @param [String] prompt The prompt
12
- # @param [Hash] params
13
- # @return [void]
14
- def async_response(prompt, params = {})
15
- role = params.delete(:role)
16
- @messages << [LLM::Message.new(role, prompt), @params.merge(params), :respond]
17
- end
18
-
19
- ##
20
- # Sends a response to the provider and returns the response.
21
- # @param [String] prompt The prompt
22
- # @param [Hash] params
23
- # @return [LLM::Response::Respond]
24
- def sync_response(prompt, params = {})
25
- role = params[:role]
26
- @response = create_response!(prompt, params)
27
- @messages.concat [Message.new(role, prompt), *@response.outputs[0]]
28
- end
29
-
30
- ##
31
- # Queues a completion to be sent to the provider.
32
- # @param [String] prompt The prompt
33
- # @param [Hash] params
34
- # @return [void]
35
- def async_completion(prompt, params = {})
36
- role = params.delete(:role)
37
- @messages.push [LLM::Message.new(role, prompt), @params.merge(params), :complete]
38
- end
39
-
40
- ##
41
- # Sends a completion to the provider and returns the completion.
42
- # @param [String] prompt The prompt
43
- # @param [Hash] params
44
- # @return [LLM::Response::Completion]
45
- def sync_completion(prompt, params = {})
46
- role = params[:role]
47
- completion = create_completion!(prompt, params)
48
- @messages.concat [Message.new(role, prompt), *completion.choices[0]]
49
- end
50
-
51
- include LLM
52
- end
53
- end
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes