llm.rb 2.0.1 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 25cfa0f4004670cb739599150aa68cc74123a8d835b4a72a91c80b7e98640a6a
4
- data.tar.gz: 1a8e7547baa911c8d03c84f7e4f849f7bd36f574aa709732e0397b033addd617
3
+ metadata.gz: 8578f727c5a45b243d86f498cf1a3fcc594981f4056234d2376805744b7e7633
4
+ data.tar.gz: 03aefaa4ebdf15200e0d6999a8b7015e101dcc0d9afff4c205672a6fa94532c4
5
5
  SHA512:
6
- metadata.gz: aad8b1f2cd63ecb95875cd30e09a2dc514bd332b676653825f08ecc776fffcebe16d47a0a4252dad91dfbb4f27a62bbd75b033b04383591edc87788a15551817
7
- data.tar.gz: f894b483b8700ed334e0d499baa0d2f5b3b5fb331356267210de8d0111fd6281c1ae4ed139edb7947d578587567e2d4d9f181352b9bf4a6fc149ed91562ec57f
6
+ metadata.gz: 1b969f525f44192999bcb3ea45aec1e53283d6bb4347b6852bc0bfe9095ecf4d9a9d21a42f7b04e6c530329bf96deb0fa0508757b9e0c185b8944d1630da1648
7
+ data.tar.gz: 239ff739c3f9bfdfbe8f574a1045acac9ace3bbb0ddca3a92958fee9319e4834c2f057a61c9b0c418979111050ade4968a01ec73862f70278b75cbe7752c9815
data/README.md CHANGED
@@ -9,8 +9,7 @@ tool calling, audio, images, files, and structured outputs.
9
9
 
10
10
  #### REPL
11
11
 
12
- A simple chatbot that maintains a conversation and streams
13
- responses in real-time:
12
+ A simple chatbot that maintains a conversation and streams responses in real-time:
14
13
 
15
14
  ```ruby
16
15
  #!/usr/bin/env ruby
@@ -20,14 +19,14 @@ llm = LLM.openai(key: ENV["KEY"])
20
19
  bot = LLM::Bot.new(llm, stream: $stdout)
21
20
  loop do
22
21
  print "> "
23
- bot.chat($stdin.gets)
22
+ bot.chat(gets)
24
23
  print "\n"
25
24
  end
26
25
  ```
27
26
 
28
- #### Build
27
+ #### Prompts
29
28
 
30
- We can send multiple messages at once by building a chain of messages:
29
+ A prompt builder that produces a chain of messages that can be sent in one request:
31
30
 
32
31
  ```ruby
33
32
  #!/usr/bin/env ruby
@@ -37,37 +36,65 @@ llm = LLM.openai(key: ENV["KEY"])
37
36
  bot = LLM::Bot.new(llm)
38
37
  prompt = bot.build_prompt do
39
38
  it.system "Your task is to answer all user queries"
40
- it.user "What language should I learn next ?"
39
+ it.user "Was 2024 a leap year?"
40
+ it.user "How many days in a year?"
41
41
  end
42
-
43
42
  bot.chat(prompt)
44
43
  bot.messages.each { print "[#{it.role}] ", it.content, "\n" }
45
44
  ```
46
45
 
47
- #### Images
46
+ #### Schema
48
47
 
49
- We can generate an image on the fly and estimate how old the person
50
- in the image is:
48
+ A bot that instructs the LLM to respond in JSON, and according to the given schema:
51
49
 
52
50
  ```ruby
53
51
  #!/usr/bin/env ruby
54
52
  require "llm"
55
53
 
56
- llm = LLM.openai(key: ENV["OPENAI_SECRET"])
57
- schema = llm.schema.object(
58
- age: llm.schema.integer.required.description("The age of the person in a photo"),
59
- confidence: llm.schema.number.required.description("Model confidence (0.0 to 1.0)"),
60
- notes: llm.schema.string.required.description("Model notes or caveats")
61
- )
54
+ class Estimation < LLM::Schema
55
+ property :age, Integer, "The age of a person in a photo", required: true
56
+ property :confidence, Number, "Model confidence (0.0 to 1.0)", required: true
57
+ property :notes, String, "Model notes or caveats", optional: true
58
+ end
62
59
 
60
+ llm = LLM.openai(key: ENV["KEY"])
61
+ bot = LLM::Bot.new(llm, schema: Estimation)
63
62
  img = llm.images.create(prompt: "A man in his 30s")
64
- bot = LLM::Bot.new(llm, schema:)
65
63
  res = bot.chat bot.image_url(img.urls[0])
66
- body = res.choices.find(&:assistant?).content!
64
+ estimation = res.choices.find(&:assistant?).content!
65
+
66
+ puts "age: #{estimation["age"]}"
67
+ puts "confidence: #{estimation["confidence"]}"
68
+ puts "notes: #{estimation["notes"]}"
69
+ ```
70
+
71
+ #### Tools
72
+
73
+ A bot equipped with a tool that is capable of running system commands:
74
+
75
+ ```ruby
76
+ #!/usr/bin/env ruby
77
+ require "llm"
78
+
79
+ class System < LLM::Tool
80
+ name "system"
81
+ description "Run a shell command"
82
+ param :command, String, "The command to execute", required: true
83
+
84
+ def call(command:)
85
+ {success: system(command)}
86
+ end
87
+ end
67
88
 
68
- print "age: ", body["age"], "\n"
69
- print "confidence: ", body["confidence"], "\n"
70
- print "notes: ", body["notes"], "\n"
89
+ llm = LLM.openai(key: ENV["KEY"])
90
+ bot = LLM::Bot.new(llm, tools: [System])
91
+ prompt = bot.build_prompt do
92
+ it.system "Your task is to execute system commands"
93
+ it.user "mkdir /home/robert/projects"
94
+ end
95
+ bot.chat(prompt)
96
+ bot.chat bot.functions.map(&:call)
97
+ bot.messages.select(&:assistant?).each { print "[#{it.role}] ", it.content, "\n" }
71
98
  ```
72
99
 
73
100
  ## Features
@@ -210,7 +237,6 @@ prompt = bot.build_prompt do
210
237
  it.user ["Tell me about this URL", bot.image_url(url)]
211
238
  it.user ["Tell me about this PDF", bot.local_file("handbook.pdf")]
212
239
  end
213
-
214
240
  bot.chat(prompt)
215
241
  bot.messages.each { print "[#{it.role}] ", it.content, "\n" }
216
242
  ```
@@ -237,18 +263,18 @@ prompt = bot.build_prompt do
237
263
  it.user ["Tell me about this URL", bot.image_url(url)]
238
264
  it.user ["Tell me about the PDF", bot.local_file("handbook.pdf")]
239
265
  end
240
-
241
266
  bot.chat(prompt)
242
267
  ```
243
268
 
244
269
  ### Schema
245
270
 
246
- #### Structured
271
+ #### Object
247
272
 
248
273
  All LLM providers except Anthropic and DeepSeek allow a client to describe
249
274
  the structure of a response that a LLM emits according to a schema that is
250
- described by JSON. The schema lets a client describe what JSON object (or value)
251
- an LLM should emit, and the LLM will abide by the schema:
275
+ described by JSON. The schema lets a client describe what JSON object
276
+ an LLM should emit, and the LLM will abide by the schema to the best of
277
+ its ability:
252
278
 
253
279
  ```ruby
254
280
  #!/usr/bin/env ruby
@@ -278,6 +304,34 @@ bot.chat "Tell me the answer to ((5 + 5) / 2) * 2 + 1", role: :user
278
304
  puts bot.messages.find(&:assistant?).content! # => {answers: [11]}
279
305
  ```
280
306
 
307
+ #### Class
308
+
309
+ Other than the object form we saw in the previous example, a class form
310
+ is also supported. Under the hood, it is implemented with the object form
311
+ and the class form primarily exists to provide structure and organization
312
+ that the object form lacks:
313
+
314
+ ```ruby
315
+ #!/usr/bin/env ruby
316
+ require "llm"
317
+
318
+ class Player < LLM::Schema
319
+ property :name, String, "The player's name", required: true
320
+ property :numbers, Array[Integer], "The player's favorite numbers", required: true
321
+ end
322
+
323
+ llm = LLM.openai(key: ENV["KEY"])
324
+ bot = LLM::Bot.new(llm, schema: Player)
325
+ prompt = bot.build_prompt do
326
+ it.system "The user's name is Robert and their favorite numbers are 7 and 12"
327
+ it.user "Tell me about myself"
328
+ end
329
+
330
+ player = bot.chat(prompt).content!
331
+ puts "name: #{player.name}"
332
+ puts "numbers: #{player.numbers}"
333
+ ```
334
+
281
335
  ### Tools
282
336
 
283
337
  #### Introduction
@@ -3,9 +3,29 @@
3
3
  module LLM::Anthropic::Response
4
4
  module Enumerable
5
5
  include ::Enumerable
6
+
6
7
  def each(&)
7
8
  return enum_for(:each) unless block_given?
8
9
  data.each { yield(_1) }
9
10
  end
11
+
12
+ ##
13
+ # Returns an element, or a slice, or nil
14
+ # @return [Object, Array<Object>, nil]
15
+ def [](*pos, **kw)
16
+ data[*pos, **kw]
17
+ end
18
+
19
+ ##
20
+ # @return [Boolean]
21
+ def empty?
22
+ data.empty?
23
+ end
24
+
25
+ ##
26
+ # @return [Integer]
27
+ def size
28
+ data.size
29
+ end
10
30
  end
11
31
  end
@@ -24,6 +24,7 @@ class LLM::Gemini
24
24
  def format_schema(params)
25
25
  return {} unless params and params[:schema]
26
26
  schema = params.delete(:schema)
27
+ schema = schema.respond_to?(:object) ? schema.object : schema
27
28
  {generationConfig: {response_mime_type: "application/json", response_schema: schema}}
28
29
  end
29
30
 
@@ -42,7 +42,7 @@ class LLM::Gemini
42
42
  # might unexpectedly receive a purely textual response. This is due to how
43
43
  # Gemini implements image generation under the hood.
44
44
  # @return [LLM::Response]
45
- def create(prompt:, model: "gemini-2.5-flash-image-preview", **params)
45
+ def create(prompt:, model: "gemini-2.5-flash-image", **params)
46
46
  req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
47
47
  body = JSON.dump({
48
48
  contents: [{parts: [{text: create_prompt}, {text: prompt}]}],
@@ -67,7 +67,7 @@ class LLM::Gemini
67
67
  # @raise [LLM::NoImageError] when no images are returned
68
68
  # @note (see LLM::Gemini::Images#create)
69
69
  # @return [LLM::Response]
70
- def edit(image:, prompt:, model: "gemini-2.5-flash-image-preview", **params)
70
+ def edit(image:, prompt:, model: "gemini-2.5-flash-image", **params)
71
71
  req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
72
72
  image = LLM::Object.from_hash(value: LLM.File(image), kind: :local_file)
73
73
  body = JSON.dump({
@@ -32,6 +32,7 @@ class LLM::OpenAI
32
32
  def format_schema(params)
33
33
  return {} unless params and params[:schema]
34
34
  schema = params.delete(:schema)
35
+ schema = schema.respond_to?(:object) ? schema.object : schema
35
36
  {
36
37
  response_format: {
37
38
  type: "json_schema",
@@ -3,11 +3,19 @@
3
3
  module LLM::OpenAI::Response
4
4
  module Enumerable
5
5
  include ::Enumerable
6
+
6
7
  def each(&)
7
8
  return enum_for(:each) unless block_given?
8
9
  data.each { yield(_1) }
9
10
  end
10
11
 
12
+ ##
13
+ # Returns an element, or a slice, or nil
14
+ # @return [Object, Array<Object>, nil]
15
+ def [](*pos, **kw)
16
+ data[*pos, **kw]
17
+ end
18
+
11
19
  ##
12
20
  # @return [Boolean]
13
21
  def empty?
@@ -16,6 +16,9 @@ class LLM::OpenAI
16
16
  require_relative "response/enumerable"
17
17
  PollError = Class.new(LLM::Error)
18
18
 
19
+ INTERVAL = 0.01
20
+ private_constant :INTERVAL
21
+
19
22
  ##
20
23
  # @param [LLM::Provider] provider
21
24
  # An OpenAI provider
@@ -51,10 +54,11 @@ class LLM::OpenAI
51
54
 
52
55
  ##
53
56
  # Create a vector store and poll until its status is "completed"
57
+ # @param interval [Float] The interval between polling attempts (seconds)
54
58
  # @param (see LLM::OpenAI::VectorStores#create)
55
59
  # @return (see LLM::OpenAI::VectorStores#poll)
56
- def create_and_poll(...)
57
- poll(vector: create(...))
60
+ def create_and_poll(interval: INTERVAL, **rest)
61
+ poll(interval:, vector: create(**rest))
58
62
  end
59
63
 
60
64
  ##
@@ -149,6 +153,16 @@ class LLM::OpenAI
149
153
  end
150
154
  alias_method :create_file, :add_file
151
155
 
156
+ ##
157
+ # Add a file to a vector store and poll until its status is "completed"
158
+ # @param interval [Float] The interval between polling attempts (seconds)
159
+ # @param (see LLM::OpenAI::VectorStores#add_file)
160
+ # @return (see LLM::OpenAI::VectorStores#poll)
161
+ def add_file_and_poll(vector:, file:, interval: INTERVAL, **rest)
162
+ poll(vector:, interval:, file: add_file(vector:, file:, **rest))
163
+ end
164
+ alias_method :create_file_and_poll, :add_file_and_poll
165
+
152
166
  ##
153
167
  # Update a file in a vector store
154
168
  # @param [String, #id] vector The ID of the vector store
@@ -199,23 +213,26 @@ class LLM::OpenAI
199
213
  end
200
214
 
201
215
  ##
202
- # Poll a vector store until its status is "completed"
216
+ # Poll a vector store or file until its status is "completed"
203
217
  # @param [String, #id] vector The ID of the vector store
218
+ # @param [String, #id] file The file to poll (optional)
204
219
  # @param [Integer] attempts The current number of attempts (default: 0)
205
220
  # @param [Integer] max The maximum number of iterations (default: 50)
221
+ # @param [Float] interval The interval between polling attempts (seconds)
206
222
  # @raise [LLM::PollError] When the maximum number of iterations is reached
207
223
  # @return [LLM::Response]
208
- def poll(vector:, attempts: 0, max: 50)
224
+ def poll(vector:, file: nil, attempts: 0, max: 50, interval: INTERVAL)
225
+ target = file || vector
209
226
  if attempts == max
210
- raise LLM::PollError, "vector store '#{vector.id}' has status '#{vector.status}' after #{max} attempts"
211
- elsif vector.status == "expired"
212
- raise LLM::PollError, "vector store '#{vector.id}' has expired"
213
- elsif vector.status != "completed"
214
- vector = get(vector:)
215
- sleep(0.1 * (2**attempts))
216
- poll(vector:, attempts: attempts + 1, max:)
227
+ raise LLM::PollError, "'#{target.id}' has status '#{target.status}' after #{max} attempts"
228
+ elsif target.status == "expired"
229
+ raise LLM::PollError, "#{target.id}' has expired"
230
+ elsif target.status != "completed"
231
+ file ? (file = get_file(vector:, file:)) : (vector = get(vector:))
232
+ sleep(interval * (2**attempts)) unless interval.zero?
233
+ poll(vector:, file:, attempts: attempts + 1, max:, interval:)
217
234
  else
218
- vector
235
+ target
219
236
  end
220
237
  end
221
238
 
@@ -7,6 +7,13 @@ class LLM::Schema
7
7
  # {LLM::Schema::Leaf LLM::Schema::Leaf} and provides methods that
8
8
  # can act as constraints.
9
9
  class Array < Leaf
10
+ ##
11
+ # Returns an array for the given type
12
+ # @return [LLM::Schema::Array]
13
+ def self.[](type)
14
+ new(type.new)
15
+ end
16
+
10
17
  def initialize(items)
11
18
  @items = items
12
19
  end
@@ -20,16 +20,24 @@ class LLM::Schema
20
20
  # Set the description of a leaf
21
21
  # @param [String] str The description
22
22
  # @return [LLM::Schema::Leaf]
23
- def description(str)
24
- tap { @description = str }
23
+ def description(str = nil)
24
+ if str.nil?
25
+ @description
26
+ else
27
+ tap { @description = str }
28
+ end
25
29
  end
26
30
 
27
31
  ##
28
32
  # Set the default value of a leaf
29
33
  # @param [Object] value The default value
30
34
  # @return [LLM::Schema::Leaf]
31
- def default(value)
32
- tap { @default = value }
35
+ def default(value = nil)
36
+ if value.nil?
37
+ @default
38
+ else
39
+ tap { @default = value }
40
+ end
33
41
  end
34
42
 
35
43
  ##
@@ -38,7 +46,11 @@ class LLM::Schema
38
46
  # @param [Array] values The allowed values
39
47
  # @return [LLM::Schema::Leaf]
40
48
  def enum(*values)
41
- tap { @enum = values }
49
+ if values.empty?
50
+ @enum
51
+ else
52
+ tap { @enum = values }
53
+ end
42
54
  end
43
55
 
44
56
  ##
@@ -46,17 +58,40 @@ class LLM::Schema
46
58
  # @see https://tour.json-schema.org/content/02-Primitive-Types/08-Defining-Constant-Values Constant Values
47
59
  # @param [Object] value The constant value
48
60
  # @return [LLM::Schema::Leaf]
49
- def const(value)
50
- tap { @const = value }
61
+ def const(value = nil)
62
+ if value.nil?
63
+ @const
64
+ else
65
+ tap { @const = value }
66
+ end
51
67
  end
52
68
 
53
69
  ##
54
- # Denote a leaf as required
70
+ # Mark a leaf as required
55
71
  # @return [LLM::Schema::Leaf]
56
72
  def required
57
73
  tap { @required = true }
58
74
  end
59
75
 
76
+ ##
77
+ # @return [Boolean]
78
+ def required?
79
+ @required
80
+ end
81
+
82
+ ##
83
+ # Mark a leaf as optional
84
+ # @return [LLM::Schema::Leaf]
85
+ def optional
86
+ tap { @required = false }
87
+ end
88
+
89
+ ##
90
+ # @return [Boolean]
91
+ def optional?
92
+ !@required
93
+ end
94
+
60
95
  ##
61
96
  # @return [Hash]
62
97
  def to_h
@@ -70,9 +105,13 @@ class LLM::Schema
70
105
  end
71
106
 
72
107
  ##
108
+ # @param [LLM::Schema::Leaf] other
109
+ # An object to compare
73
110
  # @return [Boolean]
74
- def required?
75
- @required
111
+ def ==(other)
112
+ return false unless self.class === other
113
+ to_h == other.to_h
76
114
  end
115
+ alias_method :eql?, :==
77
116
  end
78
117
  end
@@ -19,6 +19,20 @@ class LLM::Schema
19
19
  @properties = properties
20
20
  end
21
21
 
22
+ ##
23
+ # Get a property
24
+ # @return [LLM::Schema::Leaf]
25
+ def [](key)
26
+ properties[key.to_s]
27
+ end
28
+
29
+ ##
30
+ # Set a property
31
+ # @return [void]
32
+ def []=(key, val)
33
+ properties[key.to_s] = val
34
+ end
35
+
22
36
  ##
23
37
  # @return [Hash]
24
38
  def to_h
@@ -42,6 +56,12 @@ class LLM::Schema
42
56
  to_h.to_json(options)
43
57
  end
44
58
 
59
+ ##
60
+ # @return [Array<String>]
61
+ def keys
62
+ @properties.keys
63
+ end
64
+
45
65
  private
46
66
 
47
67
  def required
data/lib/llm/schema.rb CHANGED
@@ -28,6 +28,63 @@ class LLM::Schema
28
28
  require_relative "schema/boolean"
29
29
  require_relative "schema/null"
30
30
 
31
+ ##
32
+ # Configures a monitor for a subclass
33
+ # @return [void]
34
+ def self.inherited(klass)
35
+ LLM.lock(:inherited) do
36
+ klass.instance_eval { @__monitor = Monitor.new }
37
+ end
38
+ end
39
+
40
+ ##
41
+ # @param [String] name
42
+ # The property name
43
+ # @param [Class] type
44
+ # The property type
45
+ # @param [String] description
46
+ # The property description
47
+ # @param [Hash] options
48
+ # A hash of options
49
+ def self.property(name, type, description, options = {})
50
+ lock do
51
+ if LLM::Schema::Leaf === type
52
+ prop = type
53
+ else
54
+ target = type.name.split("::").last.downcase
55
+ prop = schema.public_send(target)
56
+ end
57
+ options = {description:}.merge(options)
58
+ options.each { (_2 == true) ? prop.public_send(_1) : prop.public_send(_1, *_2) }
59
+ object[name] = prop
60
+ end
61
+ end
62
+
63
+ ##
64
+ # @api private
65
+ # @return [LLM::Schema]
66
+ def self.schema
67
+ lock do
68
+ @schema ||= LLM::Schema.new
69
+ end
70
+ end
71
+
72
+ ##
73
+ # @api private
74
+ # @return [LLM::Schema::Object]
75
+ def self.object
76
+ lock do
77
+ @object ||= schema.object({})
78
+ end
79
+ end
80
+
81
+ ##
82
+ # @api private
83
+ def self.lock(&)
84
+ @__monitor.synchronize(&)
85
+ end
86
+ private_class_method :lock
87
+
31
88
  ##
32
89
  # Returns an object
33
90
  # @param [Hash] properties A hash of properties
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "2.0.1"
4
+ VERSION = "2.1.0"
5
5
  end
data/llm.gemspec CHANGED
@@ -12,7 +12,7 @@ Gem::Specification.new do |spec|
12
12
  llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
13
13
  includes OpenAI, Gemini, Anthropic, xAI (grok), zAI, DeepSeek, Ollama, and
14
14
  LlamaCpp. The toolkit includes full support for chat, streaming, tool calling,
15
- audio, images, files, and structured outputs (JSON Schema).
15
+ audio, images, files, and structured outputs.
16
16
  SUMMARY
17
17
 
18
18
  spec.description = spec.summary
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.0.1
4
+ version: 2.1.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -167,7 +167,7 @@ dependencies:
167
167
  description: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
168
168
  includes OpenAI, Gemini, Anthropic, xAI (grok), zAI, DeepSeek, Ollama, and LlamaCpp.
169
169
  The toolkit includes full support for chat, streaming, tool calling, audio, images,
170
- files, and structured outputs (JSON Schema).
170
+ files, and structured outputs.
171
171
  email:
172
172
  - azantar@proton.me
173
173
  - 0x1eef@proton.me
@@ -304,5 +304,5 @@ specification_version: 4
304
304
  summary: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that includes
305
305
  OpenAI, Gemini, Anthropic, xAI (grok), zAI, DeepSeek, Ollama, and LlamaCpp. The
306
306
  toolkit includes full support for chat, streaming, tool calling, audio, images,
307
- files, and structured outputs (JSON Schema).
307
+ files, and structured outputs.
308
308
  test_files: []