llm.rb 0.7.0 → 0.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a7175b2fe81c74e007dd41db2e0fe1bd3f3639bed375af25da0f8ed2778ea2b5
4
- data.tar.gz: 1c752e61cb288fed412b342b66279e7dfdb0337705e33af3e2a1deb1d408b8d0
3
+ metadata.gz: 7d5d93a645b666da3d6947c2076189063aec26e7bc3381cfb84d6a6aea4ce8fa
4
+ data.tar.gz: db764cd8e9180a3c21ca5bf2b35d8a5fa3f525cb63101381aca09f9e92cb5d37
5
5
  SHA512:
6
- metadata.gz: 9af91ba96e63b2c43c7f6a836db5fed48da19ba1f2bdbb48894cc71fb940eca261930fec6b8fd9a6f641fc9c69402de9cdd6fd7f9cad9a7035b69ddad04de65a
7
- data.tar.gz: b3f8af44ebb2522aba58621d19a19424805471f6f9a6f8ec834ebe417e6aa08e9f0233ef19a12695e829d44ea61b164ae57697379aebdf24450b829fdc04ac25
6
+ metadata.gz: c17999419e02e8c2d9d689299d149dd76077b1f52154557253dd9fe3876ff1109eaaa29d7d7aceda8147f38aa572261b455eaa253fbede212b874af42c8e03e0
7
+ data.tar.gz: 1061af3f752a1e8cbc37c8929a23f6bfa97328f66f3adf030e435f468785ffa6a3aae32fe5120936f872b60d8a48a31fcbf35cee0de66b8ee12ddedf17228beb
data/README.md CHANGED
@@ -22,7 +22,7 @@ images, files, and JSON Schema generation.
22
22
  - 🗣️ Text-to-speech, transcription, and translation
23
23
  - 🖼️ Image generation, editing, and variation support
24
24
  - 📎 File uploads and prompt-aware file interaction
25
- - 💡 Multimodal prompts (text, URLs, files)
25
+ - 💡 Multimodal prompts (text, images, PDFs, URLs, files)
26
26
 
27
27
  #### Embeddings
28
28
  - 🧮 Text embeddings and vector support
@@ -78,8 +78,8 @@ llm = LLM.voyageai(key: "yourapikey")
78
78
 
79
79
  The following example enables lazy mode for a
80
80
  [LLM::Chat](https://0x1eef.github.io/x/llm.rb/LLM/Chat.html)
81
- object by entering into a "lazy" conversation where messages are buffered and
82
- sent to the provider only when necessary. Both lazy and non-lazy conversations
81
+ object by entering into a conversation where messages are buffered and
82
+ sent to the provider only when necessary. Both lazy and non-lazy conversations
83
83
  maintain a message thread that can be reused as context throughout a conversation.
84
84
  The example captures the spirit of llm.rb by demonstrating how objects cooperate
85
85
  together through composition, and it uses the stateless chat completions API that
@@ -97,6 +97,8 @@ msgs = bot.chat do |prompt|
97
97
  prompt.user "Tell me the answer to (5 + 15) * 2"
98
98
  prompt.user "Tell me the answer to ((5 + 15) * 2) / 10"
99
99
  end
100
+
101
+ # At this point, we execute a single request
100
102
  msgs.each { print "[#{_1.role}] ", _1.content, "\n" }
101
103
 
102
104
  ##
@@ -134,18 +136,18 @@ require "llm"
134
136
 
135
137
  llm = LLM.openai(key: ENV["KEY"])
136
138
  schema = llm.schema.object({fruit: llm.schema.string.enum("Apple", "Orange", "Pineapple")})
137
- bot = LLM::Chat.new(llm, schema:)
139
+ bot = LLM::Chat.new(llm, schema:).lazy
138
140
  bot.chat "Your favorite fruit is Pineapple", role: :system
139
141
  bot.chat "What fruit is your favorite?", role: :user
140
142
  bot.messages.find(&:assistant?).content! # => {fruit: "Pineapple"}
141
143
 
142
144
  schema = llm.schema.object({answer: llm.schema.integer.required})
143
- bot = LLM::Chat.new(llm, schema:)
145
+ bot = LLM::Chat.new(llm, schema:).lazy
144
146
  bot.chat "Tell me the answer to ((5 + 5) / 2)", role: :user
145
147
  bot.messages.find(&:assistant?).content! # => {answer: 5}
146
148
 
147
149
  schema = llm.schema.object({probability: llm.schema.number.required})
148
- bot = LLM::Chat.new(llm, schema:)
150
+ bot = LLM::Chat.new(llm, schema:).lazy
149
151
  bot.chat "Does the earth orbit the sun?", role: :user
150
152
  bot.messages.find(&:assistant?).content! # => {probability: 1}
151
153
  ```
@@ -175,14 +177,18 @@ arbitrary commands from a LLM without sanitizing the input first :) Without furt
175
177
  #!/usr/bin/env ruby
176
178
  require "llm"
177
179
 
178
- llm = LLM.openai(key: ENV["KEY"])
180
+ llm = LLM.openai(key: ENV["KEY"])
179
181
  tool = LLM.function(:system) do |fn|
180
182
  fn.description "Run a shell command"
181
183
  fn.params do |schema|
182
184
  schema.object(command: schema.string.required)
183
185
  end
184
186
  fn.define do |params|
185
- system(params.command)
187
+ ro, wo = IO.pipe
188
+ re, we = IO.pipe
189
+ Process.wait Process.spawn(params.command, out: wo, err: we)
190
+ [wo,we].each(&:close)
191
+ {stderr: re.read, stdout: ro.read}
186
192
  end
187
193
  end
188
194
 
@@ -196,8 +202,8 @@ bot.chat "What operating system am I running? (short version please!)", role: :u
196
202
  bot.chat bot.functions.map(&:call) # report return value to the LLM
197
203
 
198
204
  ##
199
- # Thu May 1 10:01:02 UTC 2025
200
- # FreeBSD
205
+ # {stderr: "", stdout: "Thu May 1 10:01:02 UTC 2025"}
206
+ # {stderr: "", stdout: "FreeBSD"}
201
207
  ```
202
208
 
203
209
  ### Audio
@@ -6,6 +6,10 @@ class LLM::Chat
6
6
  module Builder
7
7
  private
8
8
 
9
+ ##
10
+ # @param [String] prompt The prompt
11
+ # @param [Hash] params
12
+ # @return [LLM::Response::Respond]
9
13
  def create_response!(prompt, params)
10
14
  @provider.responses.create(
11
15
  prompt,
@@ -13,6 +17,10 @@ class LLM::Chat
13
17
  )
14
18
  end
15
19
 
20
+ ##
21
+ # @param [String] prompt The prompt
22
+ # @param [Hash] params
23
+ # @return [LLM::Response::Completion]
16
24
  def create_completion!(prompt, params)
17
25
  @provider.complete(
18
26
  prompt,
@@ -6,22 +6,42 @@ class LLM::Chat
6
6
  module Conversable
7
7
  private
8
8
 
9
+ ##
10
+ # Queues a response to be sent to the provider.
11
+ # @param [String] prompt The prompt
12
+ # @param [Hash] params
13
+ # @return [void]
9
14
  def async_response(prompt, params = {})
10
15
  role = params.delete(:role)
11
16
  @messages << [LLM::Message.new(role, prompt), @params.merge(params), :respond]
12
17
  end
13
18
 
19
+ ##
20
+ # Sends a response to the provider and returns the response.
21
+ # @param [String] prompt The prompt
22
+ # @param [Hash] params
23
+ # @return [LLM::Response::Respond]
14
24
  def sync_response(prompt, params = {})
15
25
  role = params[:role]
16
26
  @response = create_response!(prompt, params)
17
27
  @messages.concat [Message.new(role, prompt), @response.outputs[0]]
18
28
  end
19
29
 
30
+ ##
31
+ # Queues a completion to be sent to the provider.
32
+ # @param [String] prompt The prompt
33
+ # @param [Hash] params
34
+ # @return [void]
20
35
  def async_completion(prompt, params = {})
21
36
  role = params.delete(:role)
22
37
  @messages.push [LLM::Message.new(role, prompt), @params.merge(params), :complete]
23
38
  end
24
39
 
40
+ ##
41
+ # Sends a completion to the provider and returns the completion.
42
+ # @param [String] prompt The prompt
43
+ # @param [Hash] params
44
+ # @return [LLM::Response::Completion]
25
45
  def sync_completion(prompt, params = {})
26
46
  role = params[:role]
27
47
  completion = create_completion!(prompt, params)
data/lib/llm/multipart.rb CHANGED
@@ -27,20 +27,6 @@ class LLM::Multipart
27
27
  "multipart/form-data; boundary=#{@boundary}"
28
28
  end
29
29
 
30
- ##
31
- # Returns the multipart request body parts
32
- # @return [Array<String>]
33
- def parts
34
- params.map do |key, value|
35
- locals = {key: key.to_s.b, boundary: boundary.to_s.b}
36
- if value.respond_to?(:path)
37
- file_part(key, value, locals)
38
- else
39
- data_part(key, value, locals)
40
- end
41
- end
42
- end
43
-
44
30
  ##
45
31
  # Returns the multipart request body
46
32
  # @return [String]
@@ -54,47 +40,63 @@ class LLM::Multipart
54
40
 
55
41
  attr_reader :params
56
42
 
57
- def attributes(file)
58
- {
59
- filename: File.basename(file.path).b,
60
- content_type: LLM::Mime[file].b
61
- }
43
+ def file(locals, file)
44
+ locals = locals.merge(attributes(file))
45
+ build_file(locals) do |body|
46
+ IO.copy_stream(file.path, body)
47
+ body << "\r\n"
48
+ end
62
49
  end
63
50
 
64
- def multipart_header(type:, locals:)
65
- if type == :file
66
- str = StringIO.new("".b)
67
- str << "--#{locals[:boundary]}" \
51
+ def form(locals, value)
52
+ locals = locals.merge(value:)
53
+ build_form(locals) do |body|
54
+ body << value.to_s
55
+ body << "\r\n"
56
+ end
57
+ end
58
+
59
+ def build_file(locals)
60
+ StringIO.new("".b).tap do |io|
61
+ io << "--#{locals[:boundary]}" \
68
62
  "\r\n" \
69
63
  "Content-Disposition: form-data; name=\"#{locals[:key]}\";" \
70
64
  "filename=\"#{locals[:filename]}\"" \
71
65
  "\r\n" \
72
66
  "Content-Type: #{locals[:content_type]}" \
73
67
  "\r\n\r\n"
74
- elsif type == :data
75
- str = StringIO.new("".b)
76
- str << "--#{locals[:boundary]}" \
68
+ yield(io)
69
+ end
70
+ end
71
+
72
+ def build_form(locals)
73
+ StringIO.new("".b).tap do |io|
74
+ io << "--#{locals[:boundary]}" \
77
75
  "\r\n" \
78
76
  "Content-Disposition: form-data; name=\"#{locals[:key]}\"" \
79
77
  "\r\n\r\n"
80
- else
81
- raise "unknown type: #{type}"
78
+ yield(io)
82
79
  end
83
80
  end
84
81
 
85
- def file_part(key, file, locals)
86
- locals = locals.merge(attributes(file))
87
- multipart_header(type: :file, locals:).tap do |io|
88
- IO.copy_stream(file.path, io)
89
- io << "\r\n"
82
+ ##
83
+ # Returns the multipart request body parts
84
+ # @return [Array<String>]
85
+ def parts
86
+ params.map do |key, value|
87
+ locals = {key: key.to_s.b, boundary: boundary.to_s.b}
88
+ if value.respond_to?(:path)
89
+ file(locals, value)
90
+ else
91
+ form(locals, value)
92
+ end
90
93
  end
91
94
  end
92
95
 
93
- def data_part(key, value, locals)
94
- locals = locals.merge(value:)
95
- multipart_header(type: :data, locals:).tap do |io|
96
- io << value.to_s
97
- io << "\r\n"
98
- end
96
+ def attributes(file)
97
+ {
98
+ filename: File.basename(file.path).b,
99
+ content_type: LLM::Mime[file].b
100
+ }
99
101
  end
100
102
  end
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.7.0"
4
+ VERSION = "0.7.1"
5
5
  end
data/llm.gemspec CHANGED
@@ -8,14 +8,15 @@ Gem::Specification.new do |spec|
8
8
  spec.authors = ["Antar Azri", "0x1eef"]
9
9
  spec.email = ["azantar@proton.me", "0x1eef@proton.me"]
10
10
 
11
- spec.summary = "llm.rb is a lightweight Ruby library that provides a " \
12
- "common interface and set of functionality for multple " \
13
- "Large Language Models (LLMs). It is designed to be simple, " \
14
- "flexible, and easy to use."
11
+ spec.summary = "llm.rb is a zero-dependency Ruby toolkit for " \
12
+ "Large Language Models that includes OpenAI, Gemini, " \
13
+ "Anthropic, Ollama, and LlamaCpp. It’s fast, simple " \
14
+ "and composable – with full support for chat, tool calling, audio, " \
15
+ "images, files, and JSON Schema generation."
15
16
  spec.description = spec.summary
16
17
  spec.homepage = "https://github.com/llmrb/llm"
17
18
  spec.license = "0BSDL"
18
- spec.required_ruby_version = ">= 3.0.0"
19
+ spec.required_ruby_version = ">= 3.2.0"
19
20
 
20
21
  spec.metadata["homepage_uri"] = spec.homepage
21
22
  spec.metadata["source_code_uri"] = "https://github.com/llmrb/llm"
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.0
4
+ version: 0.7.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2025-05-09 00:00:00.000000000 Z
12
+ date: 2025-05-11 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: webmock
@@ -151,9 +151,10 @@ dependencies:
151
151
  - - "~>"
152
152
  - !ruby/object:Gem::Version
153
153
  version: '2.8'
154
- description: llm.rb is a lightweight Ruby library that provides a common interface
155
- and set of functionality for multple Large Language Models (LLMs). It is designed
156
- to be simple, flexible, and easy to use.
154
+ description: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
155
+ includes OpenAI, Gemini, Anthropic, Ollama, and LlamaCpp. It’s fast, simple and
156
+ composable with full support for chat, tool calling, audio, images, files, and
157
+ JSON Schema generation.
157
158
  email:
158
159
  - azantar@proton.me
159
160
  - 0x1eef@proton.me
@@ -259,7 +260,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
259
260
  requirements:
260
261
  - - ">="
261
262
  - !ruby/object:Gem::Version
262
- version: 3.0.0
263
+ version: 3.2.0
263
264
  required_rubygems_version: !ruby/object:Gem::Requirement
264
265
  requirements:
265
266
  - - ">="
@@ -269,7 +270,8 @@ requirements: []
269
270
  rubygems_version: 3.5.23
270
271
  signing_key:
271
272
  specification_version: 4
272
- summary: llm.rb is a lightweight Ruby library that provides a common interface and
273
- set of functionality for multple Large Language Models (LLMs). It is designed to
274
- be simple, flexible, and easy to use.
273
+ summary: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that includes
274
+ OpenAI, Gemini, Anthropic, Ollama, and LlamaCpp. It’s fast, simple and composable
275
+ with full support for chat, tool calling, audio, images, files, and JSON Schema
276
+ generation.
275
277
  test_files: []