llm.rb 2.0.1 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +86 -26
  3. data/lib/llm/bot.rb +4 -4
  4. data/lib/llm/buffer.rb +0 -9
  5. data/lib/llm/contract/completion.rb +57 -0
  6. data/lib/llm/contract.rb +48 -0
  7. data/lib/llm/error.rb +22 -14
  8. data/lib/llm/eventhandler.rb +6 -4
  9. data/lib/llm/eventstream/parser.rb +18 -13
  10. data/lib/llm/function.rb +1 -1
  11. data/lib/llm/json_adapter.rb +109 -0
  12. data/lib/llm/message.rb +7 -28
  13. data/lib/llm/multipart/enumerator_io.rb +86 -0
  14. data/lib/llm/multipart.rb +32 -51
  15. data/lib/llm/object/builder.rb +6 -6
  16. data/lib/llm/object/kernel.rb +2 -2
  17. data/lib/llm/object.rb +23 -8
  18. data/lib/llm/provider.rb +11 -3
  19. data/lib/llm/providers/anthropic/error_handler.rb +1 -1
  20. data/lib/llm/providers/anthropic/files.rb +4 -5
  21. data/lib/llm/providers/anthropic/models.rb +1 -2
  22. data/lib/llm/providers/anthropic/{format/completion_format.rb → request_adapter/completion.rb} +19 -19
  23. data/lib/llm/providers/anthropic/{format.rb → request_adapter.rb} +7 -7
  24. data/lib/llm/providers/anthropic/response_adapter/completion.rb +66 -0
  25. data/lib/llm/providers/anthropic/response_adapter/enumerable.rb +31 -0
  26. data/lib/llm/providers/anthropic/{response → response_adapter}/file.rb +1 -1
  27. data/lib/llm/providers/anthropic/{response → response_adapter}/web_search.rb +3 -3
  28. data/lib/llm/providers/anthropic/response_adapter.rb +36 -0
  29. data/lib/llm/providers/anthropic/stream_parser.rb +6 -6
  30. data/lib/llm/providers/anthropic.rb +8 -11
  31. data/lib/llm/providers/deepseek/{format/completion_format.rb → request_adapter/completion.rb} +15 -15
  32. data/lib/llm/providers/deepseek/{format.rb → request_adapter.rb} +7 -7
  33. data/lib/llm/providers/deepseek.rb +2 -2
  34. data/lib/llm/providers/gemini/audio.rb +2 -2
  35. data/lib/llm/providers/gemini/error_handler.rb +3 -3
  36. data/lib/llm/providers/gemini/files.rb +4 -7
  37. data/lib/llm/providers/gemini/images.rb +11 -16
  38. data/lib/llm/providers/gemini/models.rb +1 -2
  39. data/lib/llm/providers/gemini/{format/completion_format.rb → request_adapter/completion.rb} +14 -14
  40. data/lib/llm/providers/gemini/{format.rb → request_adapter.rb} +9 -8
  41. data/lib/llm/providers/gemini/response_adapter/completion.rb +67 -0
  42. data/lib/llm/providers/gemini/{response → response_adapter}/embedding.rb +1 -1
  43. data/lib/llm/providers/gemini/{response → response_adapter}/file.rb +1 -1
  44. data/lib/llm/providers/gemini/{response → response_adapter}/files.rb +1 -1
  45. data/lib/llm/providers/gemini/{response → response_adapter}/image.rb +3 -3
  46. data/lib/llm/providers/gemini/{response → response_adapter}/models.rb +1 -1
  47. data/lib/llm/providers/gemini/{response → response_adapter}/web_search.rb +3 -3
  48. data/lib/llm/providers/gemini/response_adapter.rb +42 -0
  49. data/lib/llm/providers/gemini/stream_parser.rb +37 -32
  50. data/lib/llm/providers/gemini.rb +10 -14
  51. data/lib/llm/providers/ollama/error_handler.rb +1 -1
  52. data/lib/llm/providers/ollama/{format/completion_format.rb → request_adapter/completion.rb} +19 -19
  53. data/lib/llm/providers/ollama/{format.rb → request_adapter.rb} +7 -7
  54. data/lib/llm/providers/ollama/response_adapter/completion.rb +61 -0
  55. data/lib/llm/providers/ollama/{response → response_adapter}/embedding.rb +1 -1
  56. data/lib/llm/providers/ollama/response_adapter.rb +32 -0
  57. data/lib/llm/providers/ollama/stream_parser.rb +2 -2
  58. data/lib/llm/providers/ollama.rb +8 -10
  59. data/lib/llm/providers/openai/audio.rb +1 -1
  60. data/lib/llm/providers/openai/error_handler.rb +12 -2
  61. data/lib/llm/providers/openai/files.rb +3 -6
  62. data/lib/llm/providers/openai/images.rb +4 -5
  63. data/lib/llm/providers/openai/models.rb +1 -3
  64. data/lib/llm/providers/openai/moderations.rb +3 -5
  65. data/lib/llm/providers/openai/{format/completion_format.rb → request_adapter/completion.rb} +22 -22
  66. data/lib/llm/providers/openai/{format/moderation_format.rb → request_adapter/moderation.rb} +5 -5
  67. data/lib/llm/providers/openai/{format/respond_format.rb → request_adapter/respond.rb} +16 -16
  68. data/lib/llm/providers/openai/{format.rb → request_adapter.rb} +13 -12
  69. data/lib/llm/providers/openai/{response → response_adapter}/audio.rb +1 -1
  70. data/lib/llm/providers/openai/response_adapter/completion.rb +62 -0
  71. data/lib/llm/providers/openai/{response → response_adapter}/embedding.rb +1 -1
  72. data/lib/llm/providers/openai/{response → response_adapter}/enumerable.rb +9 -1
  73. data/lib/llm/providers/openai/{response → response_adapter}/file.rb +1 -1
  74. data/lib/llm/providers/openai/{response → response_adapter}/image.rb +1 -1
  75. data/lib/llm/providers/openai/{response → response_adapter}/moderations.rb +1 -1
  76. data/lib/llm/providers/openai/{response → response_adapter}/responds.rb +6 -10
  77. data/lib/llm/providers/openai/{response → response_adapter}/web_search.rb +3 -3
  78. data/lib/llm/providers/openai/response_adapter.rb +47 -0
  79. data/lib/llm/providers/openai/responses/stream_parser.rb +22 -22
  80. data/lib/llm/providers/openai/responses.rb +6 -8
  81. data/lib/llm/providers/openai/stream_parser.rb +6 -5
  82. data/lib/llm/providers/openai/vector_stores.rb +37 -21
  83. data/lib/llm/providers/openai.rb +12 -14
  84. data/lib/llm/response.rb +2 -5
  85. data/lib/llm/schema/array.rb +7 -0
  86. data/lib/llm/schema/leaf.rb +49 -10
  87. data/lib/llm/schema/object.rb +20 -0
  88. data/lib/llm/schema.rb +57 -0
  89. data/lib/llm/usage.rb +10 -0
  90. data/lib/llm/version.rb +1 -1
  91. data/lib/llm.rb +33 -1
  92. data/llm.gemspec +1 -1
  93. metadata +46 -37
  94. data/lib/llm/providers/anthropic/response/completion.rb +0 -39
  95. data/lib/llm/providers/anthropic/response/enumerable.rb +0 -11
  96. data/lib/llm/providers/gemini/response/completion.rb +0 -35
  97. data/lib/llm/providers/ollama/response/completion.rb +0 -28
  98. data/lib/llm/providers/openai/response/completion.rb +0 -40
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 25cfa0f4004670cb739599150aa68cc74123a8d835b4a72a91c80b7e98640a6a
4
- data.tar.gz: 1a8e7547baa911c8d03c84f7e4f849f7bd36f574aa709732e0397b033addd617
3
+ metadata.gz: 2e60be1fa699baabf9a1df129d0263d0c2b6fecc3ce2b818128eed319aa7bb18
4
+ data.tar.gz: a39d32cd9cfcfb7fa4152ba3e02960522b6ae4b5d6e22705b846f5b9dcd2972b
5
5
  SHA512:
6
- metadata.gz: aad8b1f2cd63ecb95875cd30e09a2dc514bd332b676653825f08ecc776fffcebe16d47a0a4252dad91dfbb4f27a62bbd75b033b04383591edc87788a15551817
7
- data.tar.gz: f894b483b8700ed334e0d499baa0d2f5b3b5fb331356267210de8d0111fd6281c1ae4ed139edb7947d578587567e2d4d9f181352b9bf4a6fc149ed91562ec57f
6
+ metadata.gz: 4d434afe1a6acaeef6036178c5914500e047450ee7d92999dedd80f66a6be22c73f4d788f86739842ee283c579b68476c0a114f50e25e7db7bbfa6e6cdf1a5bc
7
+ data.tar.gz: e818753b0b06cf4053652d11b2e3c79d7ef58de0b0acc5fb7fa147e55c693eee583f1c9e1f80b08096de95247c03644fad9e596f4511d57a99d2abd5339087c2
data/README.md CHANGED
@@ -1,3 +1,7 @@
1
+ > **Minimal footprint** <br>
2
+ > Zero dependencies outside Ruby’s standard library. <br>
3
+ > Zero runtime dependencies.
4
+
1
5
  ## About
2
6
 
3
7
  llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
@@ -9,8 +13,7 @@ tool calling, audio, images, files, and structured outputs.
9
13
 
10
14
  #### REPL
11
15
 
12
- A simple chatbot that maintains a conversation and streams
13
- responses in real-time:
16
+ A simple chatbot that maintains a conversation and streams responses in real-time:
14
17
 
15
18
  ```ruby
16
19
  #!/usr/bin/env ruby
@@ -20,14 +23,14 @@ llm = LLM.openai(key: ENV["KEY"])
20
23
  bot = LLM::Bot.new(llm, stream: $stdout)
21
24
  loop do
22
25
  print "> "
23
- bot.chat($stdin.gets)
26
+ bot.chat(gets)
24
27
  print "\n"
25
28
  end
26
29
  ```
27
30
 
28
- #### Build
31
+ #### Prompts
29
32
 
30
- We can send multiple messages at once by building a chain of messages:
33
+ A prompt builder that produces a chain of messages that can be sent in one request:
31
34
 
32
35
  ```ruby
33
36
  #!/usr/bin/env ruby
@@ -37,37 +40,65 @@ llm = LLM.openai(key: ENV["KEY"])
37
40
  bot = LLM::Bot.new(llm)
38
41
  prompt = bot.build_prompt do
39
42
  it.system "Your task is to answer all user queries"
40
- it.user "What language should I learn next ?"
43
+ it.user "Was 2024 a leap year?"
44
+ it.user "How many days in a year?"
41
45
  end
42
-
43
46
  bot.chat(prompt)
44
47
  bot.messages.each { print "[#{it.role}] ", it.content, "\n" }
45
48
  ```
46
49
 
47
- #### Images
50
+ #### Schema
48
51
 
49
- We can generate an image on the fly and estimate how old the person
50
- in the image is:
52
+ A bot that instructs the LLM to respond in JSON, and according to the given schema:
51
53
 
52
54
  ```ruby
53
55
  #!/usr/bin/env ruby
54
56
  require "llm"
55
57
 
56
- llm = LLM.openai(key: ENV["OPENAI_SECRET"])
57
- schema = llm.schema.object(
58
- age: llm.schema.integer.required.description("The age of the person in a photo"),
59
- confidence: llm.schema.number.required.description("Model confidence (0.0 to 1.0)"),
60
- notes: llm.schema.string.required.description("Model notes or caveats")
61
- )
58
+ class Estimation < LLM::Schema
59
+ property :age, Integer, "The age of a person in a photo", required: true
60
+ property :confidence, Number, "Model confidence (0.0 to 1.0)", required: true
61
+ property :notes, String, "Model notes or caveats", optional: true
62
+ end
62
63
 
64
+ llm = LLM.openai(key: ENV["KEY"])
65
+ bot = LLM::Bot.new(llm, schema: Estimation)
63
66
  img = llm.images.create(prompt: "A man in his 30s")
64
- bot = LLM::Bot.new(llm, schema:)
65
67
  res = bot.chat bot.image_url(img.urls[0])
66
- body = res.choices.find(&:assistant?).content!
68
+ estimation = res.choices.find(&:assistant?).content!
67
69
 
68
- print "age: ", body["age"], "\n"
69
- print "confidence: ", body["confidence"], "\n"
70
- print "notes: ", body["notes"], "\n"
70
+ puts "age: #{estimation["age"]}"
71
+ puts "confidence: #{estimation["confidence"]}"
72
+ puts "notes: #{estimation["notes"]}"
73
+ ```
74
+
75
+ #### Tools
76
+
77
+ A bot equipped with a tool that is capable of running system commands:
78
+
79
+ ```ruby
80
+ #!/usr/bin/env ruby
81
+ require "llm"
82
+
83
+ class System < LLM::Tool
84
+ name "system"
85
+ description "Run a shell command"
86
+ param :command, String, "The command to execute", required: true
87
+
88
+ def call(command:)
89
+ {success: system(command)}
90
+ end
91
+ end
92
+
93
+ llm = LLM.openai(key: ENV["KEY"])
94
+ bot = LLM::Bot.new(llm, tools: [System])
95
+ prompt = bot.build_prompt do
96
+ it.system "Your task is to execute system commands"
97
+ it.user "mkdir /home/robert/projects"
98
+ end
99
+ bot.chat(prompt)
100
+ bot.chat bot.functions.map(&:call)
101
+ bot.messages.select(&:assistant?).each { print "[#{it.role}] ", it.content, "\n" }
71
102
  ```
72
103
 
73
104
  ## Features
@@ -75,6 +106,7 @@ print "notes: ", body["notes"], "\n"
75
106
  #### General
76
107
  - ✅ A single unified interface for multiple providers
77
108
  - 📦 Zero dependencies outside Ruby's standard library
109
+ - 🧩 Choose your own JSON parser (JSON stdlib, Oj, Yajl, etc)
78
110
  - 🚀 Simple, composable API
79
111
  - ♻️ Optional: per-provider, process-wide connection pool via net-http-persistent
80
112
 
@@ -88,6 +120,7 @@ print "notes: ", body["notes"], "\n"
88
120
  - 🗣️ Text-to-speech, transcription, and translation
89
121
  - 🖼️ Image generation, editing, and variation support
90
122
  - 📎 File uploads and prompt-aware file interaction
123
+ - 📦 Streams multipart uploads and avoids buffering large files in memory
91
124
  - 💡 Multimodal prompts (text, documents, audio, images, videos, URLs, etc)
92
125
 
93
126
  #### Embeddings
@@ -210,7 +243,6 @@ prompt = bot.build_prompt do
210
243
  it.user ["Tell me about this URL", bot.image_url(url)]
211
244
  it.user ["Tell me about this PDF", bot.local_file("handbook.pdf")]
212
245
  end
213
-
214
246
  bot.chat(prompt)
215
247
  bot.messages.each { print "[#{it.role}] ", it.content, "\n" }
216
248
  ```
@@ -237,18 +269,18 @@ prompt = bot.build_prompt do
237
269
  it.user ["Tell me about this URL", bot.image_url(url)]
238
270
  it.user ["Tell me about the PDF", bot.local_file("handbook.pdf")]
239
271
  end
240
-
241
272
  bot.chat(prompt)
242
273
  ```
243
274
 
244
275
  ### Schema
245
276
 
246
- #### Structured
277
+ #### Object
247
278
 
248
279
  All LLM providers except Anthropic and DeepSeek allow a client to describe
249
280
  the structure of a response that a LLM emits according to a schema that is
250
- described by JSON. The schema lets a client describe what JSON object (or value)
251
- an LLM should emit, and the LLM will abide by the schema:
281
+ described by JSON. The schema lets a client describe what JSON object
282
+ an LLM should emit, and the LLM will abide by the schema to the best of
283
+ its ability:
252
284
 
253
285
  ```ruby
254
286
  #!/usr/bin/env ruby
@@ -278,6 +310,34 @@ bot.chat "Tell me the answer to ((5 + 5) / 2) * 2 + 1", role: :user
278
310
  puts bot.messages.find(&:assistant?).content! # => {answers: [11]}
279
311
  ```
280
312
 
313
+ #### Class
314
+
315
+ Other than the object form we saw in the previous example, a class form
316
+ is also supported. Under the hood, it is implemented with the object form
317
+ and the class form primarily exists to provide structure and organization
318
+ that the object form lacks:
319
+
320
+ ```ruby
321
+ #!/usr/bin/env ruby
322
+ require "llm"
323
+
324
+ class Player < LLM::Schema
325
+ property :name, String, "The player's name", required: true
326
+ property :numbers, Array[Integer], "The player's favorite numbers", required: true
327
+ end
328
+
329
+ llm = LLM.openai(key: ENV["KEY"])
330
+ bot = LLM::Bot.new(llm, schema: Player)
331
+ prompt = bot.build_prompt do
332
+ it.system "The user's name is Robert and their favorite numbers are 7 and 12"
333
+ it.user "Tell me about myself"
334
+ end
335
+
336
+ player = bot.chat(prompt).content!
337
+ puts "name: #{player.name}"
338
+ puts "numbers: #{player.numbers}"
339
+ ```
340
+
281
341
  ### Tools
282
342
 
283
343
  #### Introduction
data/lib/llm/bot.rb CHANGED
@@ -119,7 +119,7 @@ module LLM
119
119
  # if there are no assistant messages
120
120
  # @return [LLM::Object]
121
121
  def usage
122
- @messages.find(&:assistant?)&.usage || LLM::Object.from_hash({})
122
+ @messages.find(&:assistant?)&.usage || LLM::Object.from({})
123
123
  end
124
124
 
125
125
  ##
@@ -141,7 +141,7 @@ module LLM
141
141
  # @return [LLM::Object]
142
142
  # Returns a tagged object
143
143
  def image_url(url)
144
- LLM::Object.from_hash(value: url, kind: :image_url)
144
+ LLM::Object.from(value: url, kind: :image_url)
145
145
  end
146
146
 
147
147
  ##
@@ -151,7 +151,7 @@ module LLM
151
151
  # @return [LLM::Object]
152
152
  # Returns a tagged object
153
153
  def local_file(path)
154
- LLM::Object.from_hash(value: LLM.File(path), kind: :local_file)
154
+ LLM::Object.from(value: LLM.File(path), kind: :local_file)
155
155
  end
156
156
 
157
157
  ##
@@ -161,7 +161,7 @@ module LLM
161
161
  # @return [LLM::Object]
162
162
  # Returns a tagged object
163
163
  def remote_file(res)
164
- LLM::Object.from_hash(value: res, kind: :remote_file)
164
+ LLM::Object.from(value: res, kind: :remote_file)
165
165
  end
166
166
 
167
167
  private
data/lib/llm/buffer.rb CHANGED
@@ -35,15 +35,6 @@ module LLM
35
35
  end
36
36
  end
37
37
 
38
- ##
39
- # Returns an array of unread messages
40
- # @see LLM::Message#read?
41
- # @see LLM::Message#read!
42
- # @return [Array<LLM::Message>]
43
- def unread
44
- reject(&:read?)
45
- end
46
-
47
38
  ##
48
39
  # Find a message (in descending order)
49
40
  # @return [LLM::Message, nil]
@@ -0,0 +1,57 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Contract
4
+ ##
5
+ # Defines the interface all completion responses must implement
6
+ # @abstract
7
+ module Completion
8
+ extend LLM::Contract
9
+
10
+ ##
11
+ # @return [Array<LLM::Messsage>]
12
+ # Returns one or more messages
13
+ def messages
14
+ raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
15
+ end
16
+ alias_method :choices, :messages
17
+
18
+ ##
19
+ # @return [Integer]
20
+ # Returns the number of input tokens
21
+ def input_tokens
22
+ raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
23
+ end
24
+
25
+ ##
26
+ # @return [Integer]
27
+ # Returns the number of output tokens
28
+ def output_tokens
29
+ raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
30
+ end
31
+
32
+ ##
33
+ # @return [Integer]
34
+ # Returns the total number of tokens
35
+ def total_tokens
36
+ raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
37
+ end
38
+
39
+ ##
40
+ # @return [LLM::Usage]
41
+ # Returns usage information
42
+ def usage
43
+ LLM::Usage.new(
44
+ input_tokens:,
45
+ output_tokens:,
46
+ total_tokens:
47
+ )
48
+ end
49
+
50
+ ##
51
+ # @return [String]
52
+ # Returns the model name
53
+ def model
54
+ raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
55
+ end
56
+ end
57
+ end
@@ -0,0 +1,48 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The `LLM::Contract` module provides the ability for modules
6
+ # who are extended by it to implement contracts which must be
7
+ # implemented by other modules who include a given contract.
8
+ #
9
+ # @example
10
+ # module LLM::Contract
11
+ # # ..
12
+ # end
13
+ #
14
+ # module LLM::Contract
15
+ # module Completion
16
+ # extend LLM::Contract
17
+ # # inheriting modules must implement these methods
18
+ # # otherwise an error is raised on include
19
+ # def foo = nil
20
+ # def bar = nil
21
+ # end
22
+ # end
23
+ #
24
+ # module LLM::OpenAI::ResponseAdapter
25
+ # module Completion
26
+ # def foo = nil
27
+ # def bar = nil
28
+ # include LLM::Contract::Completion
29
+ # end
30
+ # end
31
+ module Contract
32
+ ContractError = Class.new(LLM::Error)
33
+ require_relative "contract/completion"
34
+
35
+ ##
36
+ # @api private
37
+ def included(mod)
38
+ meths = mod.instance_methods(false)
39
+ if meths.empty?
40
+ raise ContractError, "#{mod} does not implement any methods required by #{self}"
41
+ end
42
+ missing = instance_methods - meths
43
+ if missing.any?
44
+ raise ContractError, "#{mod} does not implement methods (#{missing.join(", ")}) required by #{self}"
45
+ end
46
+ end
47
+ end
48
+ end
data/lib/llm/error.rb CHANGED
@@ -4,40 +4,40 @@ module LLM
4
4
  ##
5
5
  # The superclass of all LLM errors
6
6
  class Error < RuntimeError
7
+ ##
8
+ # @return [Net::HTTPResponse, nil]
9
+ # Returns the response associated with an error, or nil
10
+ attr_accessor :response
11
+
7
12
  def initialize(...)
8
13
  block_given? ? yield(self) : nil
9
14
  super
10
15
  end
11
- end
12
-
13
- ##
14
- # The superclass of all HTTP protocol errors
15
- class ResponseError < Error
16
- ##
17
- # @return [Net::HTTPResponse]
18
- # Returns the response associated with an error
19
- attr_accessor :response
20
16
 
21
17
  def message
22
- [super, response.body].join("\n")
18
+ if response
19
+ [super, response.body].join("\n")
20
+ else
21
+ super
22
+ end
23
23
  end
24
24
  end
25
25
 
26
26
  ##
27
27
  # HTTPUnauthorized
28
- UnauthorizedError = Class.new(ResponseError)
28
+ UnauthorizedError = Class.new(Error)
29
29
 
30
30
  ##
31
31
  # HTTPTooManyRequests
32
- RateLimitError = Class.new(ResponseError)
32
+ RateLimitError = Class.new(Error)
33
33
 
34
34
  ##
35
35
  # HTTPServerError
36
- ServerError = Class.new(ResponseError)
36
+ ServerError = Class.new(Error)
37
37
 
38
38
  ##
39
39
  # When no images are found in a response
40
- NoImageError = Class.new(ResponseError)
40
+ NoImageError = Class.new(Error)
41
41
 
42
42
  ##
43
43
  # When an given an input object that is not understood
@@ -46,4 +46,12 @@ module LLM
46
46
  ##
47
47
  # When given a prompt object that is not understood
48
48
  PromptError = Class.new(FormatError)
49
+
50
+ ##
51
+ # When given an invalid request
52
+ InvalidRequestError = Class.new(Error)
53
+
54
+ ##
55
+ # When the context window is exceeded
56
+ ContextWindowError = Class.new(InvalidRequestError)
49
57
  end
@@ -17,9 +17,10 @@ module LLM
17
17
  # @return [void]
18
18
  def on_data(event)
19
19
  return if event.end?
20
- chunk = JSON.parse(event.value)
20
+ chunk = LLM.json.load(event.value)
21
+ return unless chunk
21
22
  @parser.parse!(chunk)
22
- rescue JSON::ParserError
23
+ rescue *LLM.json.parser_error
23
24
  end
24
25
 
25
26
  ##
@@ -31,9 +32,10 @@ module LLM
31
32
  # @return [void]
32
33
  def on_chunk(event)
33
34
  return if event.end?
34
- chunk = JSON.parse(event.chunk)
35
+ chunk = LLM.json.load(event.chunk)
36
+ return unless chunk
35
37
  @parser.parse!(chunk)
36
- rescue JSON::ParserError
38
+ rescue *LLM.json.parser_error
37
39
  end
38
40
 
39
41
  ##
@@ -7,9 +7,9 @@ module LLM::EventStream
7
7
  ##
8
8
  # @return [LLM::EventStream::Parser]
9
9
  def initialize
10
- @buffer = StringIO.new
10
+ @buffer = +""
11
11
  @events = Hash.new { |h, k| h[k] = [] }
12
- @offset = 0
12
+ @cursor = 0
13
13
  @visitors = []
14
14
  end
15
15
 
@@ -34,8 +34,7 @@ module LLM::EventStream
34
34
  # Append an event to the internal buffer
35
35
  # @return [void]
36
36
  def <<(event)
37
- io = StringIO.new(event)
38
- IO.copy_stream io, @buffer
37
+ @buffer << event
39
38
  each_line { parse!(_1) }
40
39
  end
41
40
 
@@ -43,15 +42,15 @@ module LLM::EventStream
43
42
  # Returns the internal buffer
44
43
  # @return [String]
45
44
  def body
46
- @buffer.string
45
+ @buffer.dup
47
46
  end
48
47
 
49
48
  ##
50
49
  # Free the internal buffer
51
50
  # @return [void]
52
51
  def free
53
- @buffer.truncate(0)
54
- @buffer.rewind
52
+ @buffer.clear
53
+ @cursor = 0
55
54
  end
56
55
 
57
56
  private
@@ -76,13 +75,19 @@ module LLM::EventStream
76
75
  end
77
76
 
78
77
  def each_line
79
- string.each_line.with_index do
80
- next if _2 < @offset
81
- yield(_1)
82
- @offset += 1
78
+ while (newline = @buffer.index("\n", @cursor))
79
+ line = @buffer[@cursor..newline]
80
+ @cursor = newline + 1
81
+ yield(line)
83
82
  end
83
+ if @cursor < @buffer.length
84
+ line = @buffer[@cursor..]
85
+ @cursor = @buffer.length
86
+ yield(line)
87
+ end
88
+ return if @cursor.zero?
89
+ @buffer = @buffer[@cursor..] || +""
90
+ @cursor = 0
84
91
  end
85
-
86
- def string = @buffer.string
87
92
  end
88
93
  end
data/lib/llm/function.rb CHANGED
@@ -149,7 +149,7 @@ class LLM::Function
149
149
 
150
150
  ##
151
151
  # @return [Hash]
152
- def format(provider)
152
+ def adapt(provider)
153
153
  case provider.class.to_s
154
154
  when "LLM::Gemini"
155
155
  {name: @name, description: @description, parameters: @params}.compact
@@ -0,0 +1,109 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The JSONAdapter class defines the interface for JSON parsers
6
+ # that can be used by the library when dealing with JSON. The
7
+ # following parsers are supported:
8
+ # * {LLM::JSONAdapter::JSON LLM::JSONAdapter::JSON} (default)
9
+ # * {LLM::JSONAdapter::Oj LLM::JSONAdapter::Oj}
10
+ # * {LLM::JSONAdapter::Yajl LLM::JSONAdapter::Yajl}
11
+ #
12
+ # @example Change parser
13
+ # LLM.json = LLM::JSONAdapter::Oj
14
+ class JSONAdapter
15
+ ##
16
+ # @return [String]
17
+ # Returns a JSON string representation of the given object
18
+ def self.dump(*) = raise NotImplementedError
19
+
20
+ ##
21
+ # @return [Object]
22
+ # Returns a Ruby object parsed from the given JSON string
23
+ def self.load(*) = raise NotImplementedError
24
+
25
+ ##
26
+ # @return [Exception]
27
+ # Returns the error raised when parsing fails
28
+ def self.parser_error = [StandardError]
29
+ end
30
+
31
+ ##
32
+ # The {LLM::JSONAdapter::JSON LLM::JSONAdapter::JSON} class
33
+ # provides a JSON adapter backed by the standard library
34
+ # JSON module.
35
+ class JSONAdapter::JSON < JSONAdapter
36
+ ##
37
+ # @return (see JSONAdapter#dump)
38
+ def self.dump(obj)
39
+ require "json" unless defined?(::JSON)
40
+ ::JSON.dump(obj)
41
+ end
42
+
43
+ ##
44
+ # @return (see JSONAdapter#load)
45
+ def self.load(string)
46
+ require "json" unless defined?(::JSON)
47
+ ::JSON.parse(string)
48
+ end
49
+
50
+ ##
51
+ # @return (see JSONAdapter#parser_error)
52
+ def self.parser_error
53
+ require "json" unless defined?(::JSON)
54
+ [::JSON::ParserError]
55
+ end
56
+ end
57
+
58
+ ##
59
+ # The {LLM::JSONAdapter::Oj LLM::JSONAdapter::Oj} class
60
+ # provides a JSON adapter backed by the Oj gem.
61
+ class JSONAdapter::Oj < JSONAdapter
62
+ ##
63
+ # @return (see JSONAdapter#dump)
64
+ def self.dump(obj)
65
+ require "oj" unless defined?(::Oj)
66
+ ::Oj.dump(obj)
67
+ end
68
+
69
+ ##
70
+ # @return (see JSONAdapter#load)
71
+ def self.load(string)
72
+ require "oj" unless defined?(::Oj)
73
+ ::Oj.load(string, mode: :compat, symbol_keys: false, symbolize_names: false)
74
+ end
75
+
76
+ ##
77
+ # @return (see JSONAdapter#parser_error)
78
+ def self.parser_error
79
+ require "oj" unless defined?(::Oj)
80
+ [::Oj::ParseError, ::EncodingError]
81
+ end
82
+ end
83
+
84
+ ##
85
+ # The {LLM::JSONAdapter::Yajl LLM::JSONAdapter::Yajl} class
86
+ # provides a JSON adapter backed by the Yajl gem.
87
+ class JSONAdapter::Yajl < JSONAdapter
88
+ ##
89
+ # @return (see JSONAdapter#dump)
90
+ def self.dump(obj)
91
+ require "yajl" unless defined?(::Yajl)
92
+ ::Yajl::Encoder.encode(obj)
93
+ end
94
+
95
+ ##
96
+ # @return (see JSONAdapter#load)
97
+ def self.load(string)
98
+ require "yajl" unless defined?(::Yajl)
99
+ ::Yajl::Parser.parse(string)
100
+ end
101
+
102
+ ##
103
+ # @return (see JSONAdapter#parser_error)
104
+ def self.parser_error
105
+ require "yajl" unless defined?(::Yajl)
106
+ [::Yajl::ParseError]
107
+ end
108
+ end
109
+ end