llm.rb 0.6.1 → 0.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +18 -77
- data/lib/json/schema/boolean.rb +1 -1
- data/lib/llm/function.rb +0 -1
- data/lib/llm/providers/anthropic/format.rb +3 -0
- data/lib/llm/providers/gemini/format.rb +2 -4
- data/lib/llm/providers/llamacpp.rb +43 -0
- data/lib/llm/providers/ollama/format.rb +1 -2
- data/lib/llm/providers/openai/format.rb +2 -4
- data/lib/llm/response/completion.rb +1 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +22 -13
- metadata +3 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 0abd0e522f099e1e53a3b4d7d3e4648a7c42d2ce23e9c17b9e09f97803e1e31d
|
4
|
+
data.tar.gz: 2c7b8b38570b4cafb20c61e479f16bc9a7be3e81a651a189d6e15967da796fe3
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: b309e59522e4f80a78d6a34fdf5b7aa0f06ab14ad79e845b92f5ad87537aba07532bb5b6d24bbb6d13386b32a0235ee6fc0308c7fe27496bb95da2016a27dc02
|
7
|
+
data.tar.gz: 61e2ecd8a53d600977f8a97592b63ed978d2659ed49513508aea2e3ab66c7fe163790eec45dc2a58ccd8aa54fca6fcb89eeb58243861c3a3238e19d86a2a00c7
|
data/README.md
CHANGED
@@ -1,17 +1,17 @@
|
|
1
1
|
## About
|
2
2
|
|
3
|
-
llm.rb is a zero-dependency Ruby toolkit for Large Language Models
|
4
|
-
OpenAI, Gemini, Anthropic, and
|
5
|
-
with full support for chat, tool calling, audio,
|
6
|
-
JSON Schema generation.
|
3
|
+
llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
|
4
|
+
includes OpenAI, Gemini, Anthropic, Ollama, and LlamaCpp. It’s fast, simple
|
5
|
+
and composable – with full support for chat, tool calling, audio,
|
6
|
+
images, files, and JSON Schema generation.
|
7
7
|
|
8
8
|
## Features
|
9
9
|
|
10
10
|
#### General
|
11
|
-
- ✅
|
11
|
+
- ✅ A single unified interface for multiple providers
|
12
12
|
- 📦 Zero dependencies outside Ruby's standard library
|
13
|
-
- 🔌 Model introspection and selection
|
14
13
|
- 🚀 Optimized for performance and low memory usage
|
14
|
+
- 🔌 Retrieve models dynamically for introspection and selection
|
15
15
|
|
16
16
|
#### Chat, Agents
|
17
17
|
- 🧠 Stateless and stateful chat via completions and responses API
|
@@ -46,6 +46,7 @@ llm = LLM.openai(key: "yourapikey")
|
|
46
46
|
llm = LLM.gemini(key: "yourapikey")
|
47
47
|
llm = LLM.anthropic(key: "yourapikey")
|
48
48
|
llm = LLM.ollama(key: nil)
|
49
|
+
llm = LLM.llamacpp(key: nil)
|
49
50
|
llm = LLM.voyageai(key: "yourapikey")
|
50
51
|
```
|
51
52
|
|
@@ -144,9 +145,8 @@ composition of objects, the generation of a schema is delegated to another objec
|
|
144
145
|
who is responsible for and an expert in the generation of JSON schemas. We will use
|
145
146
|
the
|
146
147
|
[llmrb/json-schema](https://github.com/llmrb/json-schema)
|
147
|
-
library
|
148
|
-
|
149
|
-
The interface is designed so you could drop in any other library in its place:
|
148
|
+
library for the sake of the examples – the interface is designed so you
|
149
|
+
could drop in any other library in its place:
|
150
150
|
|
151
151
|
```ruby
|
152
152
|
#!/usr/bin/env ruby
|
@@ -483,35 +483,9 @@ bot.chat "Hello #{model.id} :)"
|
|
483
483
|
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
484
484
|
```
|
485
485
|
|
486
|
-
|
486
|
+
## Documentation
|
487
487
|
|
488
|
-
|
489
|
-
|
490
|
-
When it comes to the generation of audio, images, and video memory consumption
|
491
|
-
can be a potential problem. There are a few strategies in place to deal with this,
|
492
|
-
and one lesser known strategy is to let a child process handle the memory cost
|
493
|
-
by delegating media generation to a child process.
|
494
|
-
|
495
|
-
Once a child process exits, any memory it had used is freed immediately and
|
496
|
-
the parent process can continue to have a small memory footprint. In a sense
|
497
|
-
it is similar to being able to use malloc + free from Ruby. The following example
|
498
|
-
demonstrates how that might look like in practice:
|
499
|
-
|
500
|
-
```ruby
|
501
|
-
#!/usr/bin/env ruby
|
502
|
-
require "llm"
|
503
|
-
|
504
|
-
llm = LLM.gemini(key: ENV["KEY"])
|
505
|
-
fork do
|
506
|
-
%w[dog cat sheep goat capybara].each do |animal|
|
507
|
-
res = llm.images.create(prompt: "a #{animal} on a rocket to the moon")
|
508
|
-
IO.copy_stream res.images[0], "#{animal}.png"
|
509
|
-
end
|
510
|
-
end
|
511
|
-
Process.wait
|
512
|
-
```
|
513
|
-
|
514
|
-
## API reference
|
488
|
+
### API
|
515
489
|
|
516
490
|
The README tries to provide a high-level overview of the library. For everything
|
517
491
|
else there's the API reference. It covers classes and methods that the README glances
|
@@ -519,31 +493,11 @@ over or doesn't cover at all. The API reference is available at
|
|
519
493
|
[0x1eef.github.io/x/llm.rb](https://0x1eef.github.io/x/llm.rb).
|
520
494
|
|
521
495
|
|
522
|
-
###
|
523
|
-
|
524
|
-
#### Gemini
|
525
|
-
|
526
|
-
* [LLM::Gemini](https://0x1eef.github.io/x/llm.rb/LLM/Gemini.html)
|
527
|
-
* [LLM::Gemini::Images](https://0x1eef.github.io/x/llm.rb/LLM/Gemini/Images.html)
|
528
|
-
* [LLM::Gemini::Audio](https://0x1eef.github.io/x/llm.rb/LLM/Gemini/Audio.html)
|
529
|
-
|
530
|
-
#### OpenAI
|
496
|
+
### Guides
|
531
497
|
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
#### Anthropic
|
537
|
-
* [LLM::Anthropic](https://0x1eef.github.io/x/llm.rb/LLM/Anthropic.html)
|
538
|
-
|
539
|
-
#### Ollama
|
540
|
-
* [LLM::Ollama](https://0x1eef.github.io/x/llm.rb/LLM/Ollama.html)
|
541
|
-
|
542
|
-
## Install
|
543
|
-
|
544
|
-
llm.rb can be installed via rubygems.org:
|
545
|
-
|
546
|
-
gem install llm.rb
|
498
|
+
The [docs/](docs/) directory contains some additional documentation that
|
499
|
+
didn't quite make it into the README. It covers the design guidelines that
|
500
|
+
the library follows, and some strategies for memory management.
|
547
501
|
|
548
502
|
## See also
|
549
503
|
|
@@ -554,24 +508,11 @@ llm.rb and serves as a demonstration of the library's capabilities. The
|
|
554
508
|
[demo](https://github.com/llmrb/llm-shell#demos) section has a number of GIF
|
555
509
|
previews might be especially interesting!
|
556
510
|
|
511
|
+
## Install
|
557
512
|
|
558
|
-
|
559
|
-
|
560
|
-
llm.rb provides a clean, dependency-free interface to Large Language Models,
|
561
|
-
treating Ruby itself — not Rails or any specific framework — as the primary platform.
|
562
|
-
It avoids hidden magic, complex metaprogramming, and heavy DSLs. It is intentionally
|
563
|
-
simple and won't compromise on being a simple library, even if that means saying no to
|
564
|
-
certain features.
|
565
|
-
|
566
|
-
Instead, it embraces a general-purpose, object-oriented design that prioritizes
|
567
|
-
explicitness, composability, and clarity. Code should be easy to follow, test, and adapt.
|
568
|
-
For that reason we favor small, cooperating objects over deeply nested blocks — a pattern
|
569
|
-
that often emerges in DSL-heavy libraries.
|
513
|
+
llm.rb can be installed via rubygems.org:
|
570
514
|
|
571
|
-
|
572
|
-
from global state or non-standard dependencies. While inspired by ideas from other ecosystems
|
573
|
-
(especially Python) it is not a port of any other library — it is a Ruby library written
|
574
|
-
by Rubyists who value borrowing good ideas from other languages and ecosystems.
|
515
|
+
gem install llm.rb
|
575
516
|
|
576
517
|
## License
|
577
518
|
|
data/lib/json/schema/boolean.rb
CHANGED
@@ -5,7 +5,7 @@ class JSON::Schema
|
|
5
5
|
# The {JSON::Schema::Boolean JSON::Schema::Boolean} class represents a
|
6
6
|
# boolean value in a JSON schema. It is a subclass of
|
7
7
|
# {JSON::Schema::Leaf JSON::Schema::Leaf}.
|
8
|
-
class
|
8
|
+
class Boolean < Leaf
|
9
9
|
def to_h
|
10
10
|
super.merge!({type: "boolean"})
|
11
11
|
end
|
data/lib/llm/function.rb
CHANGED
@@ -19,8 +19,7 @@ class LLM::Gemini
|
|
19
19
|
private
|
20
20
|
|
21
21
|
##
|
22
|
-
# @param [
|
23
|
-
# The schema to format
|
22
|
+
# @param [Hash] params
|
24
23
|
# @return [Hash]
|
25
24
|
def format_schema(params)
|
26
25
|
return {} unless params and params[:schema]
|
@@ -29,8 +28,7 @@ class LLM::Gemini
|
|
29
28
|
end
|
30
29
|
|
31
30
|
##
|
32
|
-
# @param [
|
33
|
-
# The tools to format
|
31
|
+
# @param [Hash] params
|
34
32
|
# @return [Hash]
|
35
33
|
def format_tools(params)
|
36
34
|
return {} unless params and params[:tools]&.any?
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LLM
|
4
|
+
##
|
5
|
+
# The LlamaCpp class implements a provider for
|
6
|
+
# [llama.cpp](https://github.com/ggml-org/llama.cpp)
|
7
|
+
# through the OpenAI-compatible API provided by the
|
8
|
+
# llama-server binary.
|
9
|
+
class LlamaCpp < OpenAI
|
10
|
+
##
|
11
|
+
# @param (see LLM::Provider#initialize)
|
12
|
+
# @return [LLM::LlamaCpp]
|
13
|
+
def initialize(host: "localhost", port: 8080, ssl: false, **)
|
14
|
+
super
|
15
|
+
end
|
16
|
+
|
17
|
+
##
|
18
|
+
# @raise [NotImplementedError]
|
19
|
+
def files
|
20
|
+
raise NotImplementedError
|
21
|
+
end
|
22
|
+
|
23
|
+
##
|
24
|
+
# @raise [NotImplementedError]
|
25
|
+
def images
|
26
|
+
raise NotImplementedError
|
27
|
+
end
|
28
|
+
|
29
|
+
##
|
30
|
+
# @raise [NotImplementedError]
|
31
|
+
def audio
|
32
|
+
raise NotImplementedError
|
33
|
+
end
|
34
|
+
|
35
|
+
##
|
36
|
+
# Returns the default model for chat completions
|
37
|
+
# @see https://ollama.com/library llama3.2
|
38
|
+
# @return [String]
|
39
|
+
def default_model
|
40
|
+
"llama3.2"
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
@@ -26,8 +26,7 @@ class LLM::OpenAI
|
|
26
26
|
private
|
27
27
|
|
28
28
|
##
|
29
|
-
# @param [
|
30
|
-
# The schema to format
|
29
|
+
# @param [Hash] params
|
31
30
|
# @return [Hash]
|
32
31
|
def format_schema(params)
|
33
32
|
return {} unless params and params[:schema]
|
@@ -41,8 +40,7 @@ class LLM::OpenAI
|
|
41
40
|
end
|
42
41
|
|
43
42
|
##
|
44
|
-
# @param [
|
45
|
-
# The tools to format
|
43
|
+
# @param [Hash] params
|
46
44
|
# @return [Hash]
|
47
45
|
def format_tools(params)
|
48
46
|
return {} unless params and params[:tools]&.any?
|
data/lib/llm/version.rb
CHANGED
data/lib/llm.rb
CHANGED
@@ -21,7 +21,7 @@ module LLM
|
|
21
21
|
module_function
|
22
22
|
|
23
23
|
##
|
24
|
-
# @param
|
24
|
+
# @param (see LLM::Provider#initialize)
|
25
25
|
# @return (see LLM::Anthropic#initialize)
|
26
26
|
def anthropic(**)
|
27
27
|
require_relative "llm/providers/anthropic" unless defined?(LLM::Anthropic)
|
@@ -30,7 +30,7 @@ module LLM
|
|
30
30
|
end
|
31
31
|
|
32
32
|
##
|
33
|
-
# @param
|
33
|
+
# @param (see LLM::Provider#initialize)
|
34
34
|
# @return (see LLM::VoyageAI#initialize)
|
35
35
|
def voyageai(**)
|
36
36
|
require_relative "llm/providers/voyageai" unless defined?(LLM::VoyageAI)
|
@@ -38,7 +38,7 @@ module LLM
|
|
38
38
|
end
|
39
39
|
|
40
40
|
##
|
41
|
-
# @param
|
41
|
+
# @param (see LLM::Provider#initialize)
|
42
42
|
# @return (see LLM::Gemini#initialize)
|
43
43
|
def gemini(**)
|
44
44
|
require_relative "llm/providers/gemini" unless defined?(LLM::Gemini)
|
@@ -46,7 +46,7 @@ module LLM
|
|
46
46
|
end
|
47
47
|
|
48
48
|
##
|
49
|
-
# @param
|
49
|
+
# @param (see LLM::Provider#initialize)
|
50
50
|
# @return (see LLM::Ollama#initialize)
|
51
51
|
def ollama(key: nil, **)
|
52
52
|
require_relative "llm/providers/ollama" unless defined?(LLM::Ollama)
|
@@ -54,7 +54,16 @@ module LLM
|
|
54
54
|
end
|
55
55
|
|
56
56
|
##
|
57
|
-
# @param
|
57
|
+
# @param key (see LLM::Provider#initialize)
|
58
|
+
# @return (see LLM::LlamaCpp#initialize)
|
59
|
+
def llamacpp(key: nil, **)
|
60
|
+
require_relative "llm/providers/openai" unless defined?(LLM::OpenAI)
|
61
|
+
require_relative "llm/providers/llamacpp" unless defined?(LLM::LlamaCpp)
|
62
|
+
LLM::LlamaCpp.new(key:, **)
|
63
|
+
end
|
64
|
+
|
65
|
+
##
|
66
|
+
# @param key (see LLM::Provider#initialize)
|
58
67
|
# @return (see LLM::OpenAI#initialize)
|
59
68
|
def openai(**)
|
60
69
|
require_relative "llm/providers/openai" unless defined?(LLM::OpenAI)
|
@@ -64,15 +73,15 @@ module LLM
|
|
64
73
|
##
|
65
74
|
# Define a function
|
66
75
|
# @example
|
67
|
-
#
|
68
|
-
#
|
69
|
-
#
|
70
|
-
#
|
71
|
-
#
|
72
|
-
#
|
73
|
-
#
|
76
|
+
# LLM.function(:system) do |fn|
|
77
|
+
# fn.description "Run system command"
|
78
|
+
# fn.params do |schema|
|
79
|
+
# schema.object(command: schema.string.required)
|
80
|
+
# end
|
81
|
+
# fn.define do |params|
|
82
|
+
# system(params.command)
|
83
|
+
# end
|
74
84
|
# end
|
75
|
-
# end
|
76
85
|
# @param [Symbol] name The name of the function
|
77
86
|
# @param [Proc] b The block to define the function
|
78
87
|
# @return [LLM::Function] The function object
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm.rb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.6.
|
4
|
+
version: 0.6.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Antar Azri
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2025-05-
|
12
|
+
date: 2025-05-07 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: webmock
|
@@ -202,6 +202,7 @@ files:
|
|
202
202
|
- lib/llm/providers/gemini/models.rb
|
203
203
|
- lib/llm/providers/gemini/response_parser.rb
|
204
204
|
- lib/llm/providers/gemini/response_parser/completion_parser.rb
|
205
|
+
- lib/llm/providers/llamacpp.rb
|
205
206
|
- lib/llm/providers/ollama.rb
|
206
207
|
- lib/llm/providers/ollama/error_handler.rb
|
207
208
|
- lib/llm/providers/ollama/format.rb
|