llm.rb 0.9.0 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +15 -22
- data/lib/llm/bot/conversable.rb +0 -2
- data/lib/llm/buffer.rb +16 -4
- data/lib/llm/providers/anthropic.rb +2 -2
- data/lib/llm/providers/gemini/images.rb +10 -1
- data/lib/llm/providers/gemini.rb +2 -2
- data/lib/llm/providers/openai.rb +2 -2
- data/lib/llm/version.rb +1 -1
- data/llm.gemspec +7 -5
- metadata +8 -8
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 15ddbff68b600d6a8926e872ed687edaff585ed075136415c8b8921438c5d32f
|
4
|
+
data.tar.gz: bf498f154df07de201f1529253559db03e850182afdf84eb6b99efb527920a2f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6256cf08682c5f9bf5aff9a6065aec57e23e8f68bc54acd302eee4f6c08628feddf438043e58898611be6ffa0d61c07f437da3c6cd16007d256c6fd4adee4218
|
7
|
+
data.tar.gz: a7c0510691a9aa08447dc65146a763a37ca1aefb59de9ee10c2eca691d725a971d9dcdc35dc1316ff473b3ff002ef1b5c2963601a8044ec70b2c2302132e4627
|
data/README.md
CHANGED
@@ -1,17 +1,16 @@
|
|
1
1
|
## About
|
2
2
|
|
3
3
|
llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
|
4
|
-
includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp.
|
5
|
-
|
6
|
-
|
7
|
-
generation.
|
4
|
+
includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. The
|
5
|
+
toolkit includes full support for chat, streaming, tool calling, audio,
|
6
|
+
images, files, and JSON Schema generation.
|
8
7
|
|
9
8
|
## Features
|
10
9
|
|
11
10
|
#### General
|
12
11
|
- ✅ A single unified interface for multiple providers
|
13
12
|
- 📦 Zero dependencies outside Ruby's standard library
|
14
|
-
- 🚀
|
13
|
+
- 🚀 Efficient API design that minimizes the request count
|
15
14
|
|
16
15
|
#### Chat, Agents
|
17
16
|
- 🧠 Stateless and stateful chat via completions and responses API
|
@@ -31,6 +30,12 @@ generation.
|
|
31
30
|
|
32
31
|
## Demos
|
33
32
|
|
33
|
+
> The
|
34
|
+
> [llmrb/llm-shell](https://github.com/llmrb/llm-shell)
|
35
|
+
> project is built with llm.rb and its demos have been
|
36
|
+
> included to provide a better idea of what llm.rb
|
37
|
+
> is capable of.
|
38
|
+
|
34
39
|
<details>
|
35
40
|
<summary><b>1. Tools: "system" function</b></summary>
|
36
41
|
<img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/toolcalls.gif">
|
@@ -115,13 +120,13 @@ msgs.each { print "[#{_1.role}] ", _1.content, "\n" }
|
|
115
120
|
> There Is More Than One Way To Do It (TIMTOWTDI) when you are
|
116
121
|
> using llm.rb – and this is especially true when it
|
117
122
|
> comes to streaming. See the streaming documentation in
|
118
|
-
> [docs/](docs/STREAMING.md#
|
123
|
+
> [docs/](docs/STREAMING.md#scopes) for more details.
|
119
124
|
|
120
125
|
The following example streams the messages in a conversation
|
121
126
|
as they are generated in real-time. This feature can be useful
|
122
|
-
|
123
|
-
|
124
|
-
|
127
|
+
when you want to stream a conversation in real time, or when you
|
128
|
+
want to avoid potential read timeouts during the generation of a
|
129
|
+
response.
|
125
130
|
|
126
131
|
The `stream` option can be set to an IO object, or the value `true`
|
127
132
|
to enable streaming – and at the end of the request, `bot.chat`
|
@@ -497,22 +502,10 @@ else there's the API reference. It covers classes and methods that the README gl
|
|
497
502
|
over or doesn't cover at all. The API reference is available at
|
498
503
|
[0x1eef.github.io/x/llm.rb](https://0x1eef.github.io/x/llm.rb).
|
499
504
|
|
500
|
-
|
501
505
|
### Guides
|
502
506
|
|
503
507
|
The [docs/](docs/) directory contains some additional documentation that
|
504
|
-
didn't quite make it into the README.
|
505
|
-
the library follows, some strategies for memory management, and other
|
506
|
-
provider-specific features.
|
507
|
-
|
508
|
-
## See also
|
509
|
-
|
510
|
-
**[llmrb/llm-shell](https://github.com/llmrb/llm-shell)**
|
511
|
-
|
512
|
-
An extensible, developer-oriented command line utility that is powered by
|
513
|
-
llm.rb and serves as a demonstration of the library's capabilities. The
|
514
|
-
[demo](https://github.com/llmrb/llm-shell#demos) section has a number of GIF
|
515
|
-
previews might be especially interesting.
|
508
|
+
didn't quite make it into the README.
|
516
509
|
|
517
510
|
## Install
|
518
511
|
|
data/lib/llm/bot/conversable.rb
CHANGED
data/lib/llm/buffer.rb
CHANGED
@@ -2,7 +2,6 @@
|
|
2
2
|
|
3
3
|
module LLM
|
4
4
|
##
|
5
|
-
# @private
|
6
5
|
# {LLM::Buffer LLM::Buffer} provides an Enumerable object that
|
7
6
|
# yields each message in a conversation on-demand, and only sends
|
8
7
|
# a request to the LLM when a response is needed.
|
@@ -23,9 +22,13 @@ module LLM
|
|
23
22
|
# Yields each message in the conversation thread
|
24
23
|
# @raise (see LLM::Provider#complete)
|
25
24
|
# @return [void]
|
26
|
-
def each
|
27
|
-
|
28
|
-
|
25
|
+
def each(...)
|
26
|
+
if block_given?
|
27
|
+
empty! unless @pending.empty?
|
28
|
+
@completed.each { yield(_1) }
|
29
|
+
else
|
30
|
+
enum_for(:each, ...)
|
31
|
+
end
|
29
32
|
end
|
30
33
|
|
31
34
|
##
|
@@ -54,6 +57,15 @@ module LLM
|
|
54
57
|
end
|
55
58
|
alias_method :push, :<<
|
56
59
|
|
60
|
+
##
|
61
|
+
# @param [Integer, #to_i] index
|
62
|
+
# The message index
|
63
|
+
# @return [LLM::Message, nil]
|
64
|
+
# Returns a message, or nil
|
65
|
+
def [](index)
|
66
|
+
@completed[index.to_i] || to_a[index.to_i]
|
67
|
+
end
|
68
|
+
|
57
69
|
##
|
58
70
|
# @return [String]
|
59
71
|
def inspect
|
@@ -76,10 +76,10 @@ module LLM
|
|
76
76
|
|
77
77
|
##
|
78
78
|
# Returns the default model for chat completions
|
79
|
-
# @see https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table claude-
|
79
|
+
# @see https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table claude-sonnet-4-20250514
|
80
80
|
# @return [String]
|
81
81
|
def default_model
|
82
|
-
"claude-
|
82
|
+
"claude-sonnet-4-20250514"
|
83
83
|
end
|
84
84
|
|
85
85
|
private
|
@@ -43,7 +43,7 @@ class LLM::Gemini
|
|
43
43
|
def create(prompt:, model: "gemini-2.0-flash-exp-image-generation", **params)
|
44
44
|
req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
|
45
45
|
body = JSON.dump({
|
46
|
-
contents: [{parts: {text: prompt}}],
|
46
|
+
contents: [{parts: [{text: create_prompt}, {text: prompt}]}],
|
47
47
|
generationConfig: {responseModalities: ["TEXT", "IMAGE"]}
|
48
48
|
}.merge!(params))
|
49
49
|
req.body = body
|
@@ -93,6 +93,15 @@ class LLM::Gemini
|
|
93
93
|
@provider.instance_variable_get(:@key)
|
94
94
|
end
|
95
95
|
|
96
|
+
def create_prompt
|
97
|
+
<<~PROMPT
|
98
|
+
Your task is to generate one or more image(s) from
|
99
|
+
text I will provide to you. Your response *MUST* include
|
100
|
+
at least one image, and your response *MUST NOT* include
|
101
|
+
any text or other content.
|
102
|
+
PROMPT
|
103
|
+
end
|
104
|
+
|
96
105
|
[:response_parser, :headers, :execute, :set_body_stream].each do |m|
|
97
106
|
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
98
107
|
end
|
data/lib/llm/providers/gemini.rb
CHANGED
@@ -126,10 +126,10 @@ module LLM
|
|
126
126
|
|
127
127
|
##
|
128
128
|
# Returns the default model for chat completions
|
129
|
-
# @see https://ai.google.dev/gemini-api/docs/models#gemini-
|
129
|
+
# @see https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash gemini-2.5-flash
|
130
130
|
# @return [String]
|
131
131
|
def default_model
|
132
|
-
"gemini-
|
132
|
+
"gemini-2.5-flash"
|
133
133
|
end
|
134
134
|
|
135
135
|
private
|
data/lib/llm/providers/openai.rb
CHANGED
@@ -121,10 +121,10 @@ module LLM
|
|
121
121
|
|
122
122
|
##
|
123
123
|
# Returns the default model for chat completions
|
124
|
-
# @see https://platform.openai.com/docs/models/gpt-
|
124
|
+
# @see https://platform.openai.com/docs/models/gpt-4.1 gpt-4.1
|
125
125
|
# @return [String]
|
126
126
|
def default_model
|
127
|
-
"gpt-
|
127
|
+
"gpt-4.1"
|
128
128
|
end
|
129
129
|
|
130
130
|
private
|
data/lib/llm/version.rb
CHANGED
data/llm.gemspec
CHANGED
@@ -8,11 +8,13 @@ Gem::Specification.new do |spec|
|
|
8
8
|
spec.authors = ["Antar Azri", "0x1eef"]
|
9
9
|
spec.email = ["azantar@proton.me", "0x1eef@proton.me"]
|
10
10
|
|
11
|
-
spec.summary =
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
11
|
+
spec.summary = <<~SUMMARY
|
12
|
+
llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
|
13
|
+
includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. The
|
14
|
+
toolkit includes full support for chat, streaming, tool calling, audio,
|
15
|
+
images, files, and JSON Schema generation.
|
16
|
+
SUMMARY
|
17
|
+
|
16
18
|
spec.description = spec.summary
|
17
19
|
spec.homepage = "https://github.com/llmrb/llm"
|
18
20
|
spec.license = "0BSDL"
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm.rb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.10.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Antar Azri
|
@@ -151,9 +151,9 @@ dependencies:
|
|
151
151
|
- !ruby/object:Gem::Version
|
152
152
|
version: '2.8'
|
153
153
|
description: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
|
154
|
-
includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp.
|
155
|
-
|
156
|
-
|
154
|
+
includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. The toolkit
|
155
|
+
includes full support for chat, streaming, tool calling, audio, images, files, and
|
156
|
+
JSON Schema generation.
|
157
157
|
email:
|
158
158
|
- azantar@proton.me
|
159
159
|
- 0x1eef@proton.me
|
@@ -282,10 +282,10 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
282
282
|
- !ruby/object:Gem::Version
|
283
283
|
version: '0'
|
284
284
|
requirements: []
|
285
|
-
rubygems_version: 3.
|
285
|
+
rubygems_version: 3.7.1
|
286
286
|
specification_version: 4
|
287
287
|
summary: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that includes
|
288
|
-
OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp.
|
289
|
-
|
290
|
-
|
288
|
+
OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. The toolkit includes
|
289
|
+
full support for chat, streaming, tool calling, audio, images, files, and JSON Schema
|
290
|
+
generation.
|
291
291
|
test_files: []
|