llm.rb 0.16.0 → 0.16.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 38502ab4a41dba8177cb7b21db68f3e0dd5492323ac8b132b1775926b46ffffc
4
- data.tar.gz: ebe196962c43934ae979e298f80b4bc2e30a147ad0f42595eef59880be9fc01e
3
+ metadata.gz: 01f2984f551757482fbf590485875bef064060d27eb84d3d4325e307db1029c5
4
+ data.tar.gz: a492d121a5dc1916412296269cf0055ce14512855fecb94e2b018dcefb9b404d
5
5
  SHA512:
6
- metadata.gz: 5100d71b851771137a86e799bc2cadab360fc0ced297288b09fa701a8f434c671fe739427b889243e9704c5ae2a05b6b8761c85f0b0bea9268700bf770e80f13
7
- data.tar.gz: 3aed0f826c229a37d30b2f0d41678976ba00df72216588a9302320c52e22d9d1b814df0b76bca8f6e713e105d73006339121a8bb900a73ac8896cc1f7c3f0051
6
+ metadata.gz: 341d074d9d732056599d6f27865f8fcf3fecd0f6062de73ec5dffcd056031e727c43f8058c472d69b598ae3fa4232bb47b960b331b20b8f7cc9323a86e9beeb7
7
+ data.tar.gz: aa9c19a4b51b8c379f6d8a8f486172e332d6ca1fb4a3f65fa2b32ff7685a3f48fd6d5db0092bcc3a97fca7f36b4f4214df1dfd675ad1215a172e5ae9ce76cc00
data/README.md CHANGED
@@ -34,6 +34,25 @@ GitHub Copilot but for the terminal.
34
34
  * [llm-shell](https://github.com/llmrb/llm-shell) – a developer-oriented console for Large Language Model communication
35
35
  * [llm-spell](https://github.com/llmrb/llm-spell) – a utility that can correct spelling mistakes with a Large Language Model
36
36
 
37
+ #### Show code
38
+
39
+ A simple chatbot that maintains a conversation and streams
40
+ responses in real-time:
41
+
42
+ ```ruby
43
+ #!/usr/bin/env ruby
44
+ require "llm"
45
+
46
+ llm = LLM.openai(key: ENV["KEY"])
47
+ bot = LLM::Bot.new(llm, stream: $stdout)
48
+ loop do
49
+ print "> "
50
+ input = $stdin.gets&.chomp || break
51
+ bot.chat(input).flush
52
+ print "\n"
53
+ end
54
+ ```
55
+
37
56
  ## Features
38
57
 
39
58
  #### General
@@ -129,9 +148,11 @@ and the gem should be installed separately:
129
148
  #!/usr/bin/env ruby
130
149
  require "llm"
131
150
 
132
- llm = LLM.openai(key: ENV["KEY"], persistent: true)
133
- res = llm.responses.create "Hello world"
134
- llm.responses.create "Adios", last_response_id: res.response_id
151
+ llm = LLM.openai(key: ENV["KEY"], persistent: true)
152
+ res1 = llm.responses.create "message 1"
153
+ res2 = llm.responses.create "message 2", previous_response_id: res1.response_id
154
+ res3 = llm.responses.create "message 3", previous_response_id: res2.response_id
155
+ print res3.output_text, "\n"
135
156
  ```
136
157
 
137
158
  ### Conversations
@@ -195,7 +216,7 @@ bot.chat(stream: $stdout) do |prompt|
195
216
  prompt.user ["Tell me about this URL", URI(url)]
196
217
  prompt.user ["Tell me about this PDF", File.open("handbook.pdf", "rb")]
197
218
  prompt.user "Are the URL and PDF similar to each other?"
198
- end.to_a
219
+ end.flush
199
220
  ```
200
221
 
201
222
  ### Schema
data/lib/llm/bot.rb CHANGED
@@ -123,5 +123,17 @@ module LLM
123
123
  .flat_map(&:functions)
124
124
  .select(&:pending?)
125
125
  end
126
+
127
+ ##
128
+ # @example
129
+ # llm = LLM.openai(key: ENV["KEY"])
130
+ # bot = LLM::Bot.new(llm, stream: $stdout)
131
+ # bot.chat("Hello", role: :user).flush
132
+ # Drains the buffer and returns all messages as an array
133
+ # @return [Array<LLM::Message>]
134
+ def drain
135
+ messages.drain
136
+ end
137
+ alias_method :flush, :drain
126
138
  end
127
139
  end
data/lib/llm/buffer.rb CHANGED
@@ -65,12 +65,20 @@ module LLM
65
65
  alias_method :push, :<<
66
66
 
67
67
  ##
68
- # @param [Integer, #to_i] index
68
+ # @param [Integer, Range, #to_i] index
69
69
  # The message index
70
70
  # @return [LLM::Message, nil]
71
71
  # Returns a message, or nil
72
72
  def [](index)
73
- @completed[index.to_i] || to_a[index.to_i]
73
+ if index.respond_to?(:to_i)
74
+ @completed[index.to_i] || to_a[index.to_i]
75
+ elsif Range === index
76
+ slice = @completed[index]
77
+ invalidate = slice.nil? || slice.size < index.size
78
+ invalidate ? to_a[index] : slice
79
+ else
80
+ raise TypeError, "index must be an Integer or Range"
81
+ end
74
82
  end
75
83
 
76
84
  ##
@@ -92,7 +100,8 @@ module LLM
92
100
  # llm = LLM.openai(key: ENV["KEY"])
93
101
  # bot = LLM::Bot.new(llm, stream: $stdout)
94
102
  # bot.chat "Hello", role: :user
95
- # bot.messages.drain
103
+ # bot.messages.flush
104
+ # @see LLM::Bot#drain
96
105
  # @note
97
106
  # This method is especially useful when using the streaming API.
98
107
  # Drains the buffer and returns all messages as an array
@@ -100,6 +109,7 @@ module LLM
100
109
  def drain
101
110
  to_a
102
111
  end
112
+ alias_method :flush, :drain
103
113
 
104
114
  private
105
115
 
data/lib/llm/error.rb CHANGED
@@ -35,6 +35,10 @@ module LLM
35
35
  # HTTPServerError
36
36
  ServerError = Class.new(ResponseError)
37
37
 
38
+ ##
39
+ # When no images are found in a response
40
+ NoImageError = Class.new(ResponseError)
41
+
38
42
  ##
39
43
  # When an given an input object that is not understood
40
44
  FormatError = Class.new(Error)
@@ -36,12 +36,13 @@ class LLM::Gemini
36
36
  # @param [String] prompt The prompt
37
37
  # @param [Hash] params Other parameters (see Gemini docs)
38
38
  # @raise (see LLM::Provider#request)
39
+ # @raise [LLM::NoImageError] when no images are returned
39
40
  # @note
40
41
  # The prompt should make it clear you want to generate an image, or you
41
42
  # might unexpectedly receive a purely textual response. This is due to how
42
43
  # Gemini implements image generation under the hood.
43
44
  # @return [LLM::Response]
44
- def create(prompt:, model: "gemini-2.0-flash-exp-image-generation", **params)
45
+ def create(prompt:, model: "gemini-2.5-flash-image-preview", **params)
45
46
  req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
46
47
  body = JSON.dump({
47
48
  contents: [{parts: [{text: create_prompt}, {text: prompt}]}],
@@ -49,7 +50,7 @@ class LLM::Gemini
49
50
  }.merge!(params))
50
51
  req.body = body
51
52
  res = execute(request: req)
52
- LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
53
+ validate LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
53
54
  end
54
55
 
55
56
  ##
@@ -63,9 +64,10 @@ class LLM::Gemini
63
64
  # @param [String] prompt The prompt
64
65
  # @param [Hash] params Other parameters (see Gemini docs)
65
66
  # @raise (see LLM::Provider#request)
67
+ # @raise [LLM::NoImageError] when no images are returned
66
68
  # @note (see LLM::Gemini::Images#create)
67
69
  # @return [LLM::Response]
68
- def edit(image:, prompt:, model: "gemini-2.0-flash-exp-image-generation", **params)
70
+ def edit(image:, prompt:, model: "gemini-2.5-flash-image-preview", **params)
69
71
  req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
70
72
  image = LLM.File(image)
71
73
  body = JSON.dump({
@@ -74,7 +76,7 @@ class LLM::Gemini
74
76
  }.merge!(params)).b
75
77
  set_body_stream(req, StringIO.new(body))
76
78
  res = execute(request: req)
77
- LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
79
+ validate LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
78
80
  end
79
81
 
80
82
  ##
@@ -119,6 +121,11 @@ class LLM::Gemini
119
121
  PROMPT
120
122
  end
121
123
 
124
+ def validate(res)
125
+ return res unless res.images.empty?
126
+ raise LLM::NoImageError.new { _1.response = res.res }, "no images found in response"
127
+ end
128
+
122
129
  [:headers, :execute, :set_body_stream].each do |m|
123
130
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
124
131
  end
@@ -22,5 +22,10 @@ module LLM::Gemini::Response
22
22
  # will always return an empty array.
23
23
  # @return [Array<String>]
24
24
  def urls = []
25
+
26
+ ##
27
+ # Returns one or more candidates, or an empty array
28
+ # @return [Array<Hash>]
29
+ def candidates = body.candidates || []
25
30
  end
26
31
  end
@@ -8,8 +8,7 @@ class LLM::OpenAI
8
8
  # @example
9
9
  # llm = LLM.openai(key: ENV["OPENAI_SECRET"])
10
10
  # files = %w(foo.pdf bar.pdf).map { llm.files.create(file: _1) }
11
- # store = llm.vector_stores.create(name: "PDF Store", file_ids: files.map(&:id))
12
- # store = llm.vector_stores.poll(vector: store)
11
+ # store = llm.vector_stores.create_and_poll(name: "PDF Store", file_ids: files.map(&:id))
13
12
  # print "[-] store is ready", "\n"
14
13
  # chunks = llm.vector_stores.search(vector: store, query: "What is Ruby?")
15
14
  # chunks.each { |chunk| puts chunk }
@@ -50,6 +49,14 @@ class LLM::OpenAI
50
49
  LLM::Response.new(res)
51
50
  end
52
51
 
52
+ ##
53
+ # Create a vector store and poll until its status is "completed"
54
+ # @param (see LLM::OpenAI::VectorStores#create)
55
+ # @return (see LLM::OpenAI::VectorStores#poll)
56
+ def create_and_poll(...)
57
+ poll(vector: create(...))
58
+ end
59
+
53
60
  ##
54
61
  # Get a vector store
55
62
  # @param [String, #id] vector The ID of the vector store
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.16.0"
4
+ VERSION = "0.16.2"
5
5
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.16.0
4
+ version: 0.16.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri