llm.rb 4.0.0 → 4.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +2 -2
- data/README.md +226 -192
- data/lib/llm/agent.rb +226 -0
- data/lib/llm/bot.rb +57 -28
- data/lib/llm/error.rb +4 -0
- data/lib/llm/function/tracing.rb +19 -0
- data/lib/llm/function.rb +16 -3
- data/lib/llm/json_adapter.rb +1 -1
- data/lib/llm/message.rb +7 -0
- data/lib/llm/prompt.rb +85 -0
- data/lib/llm/provider.rb +74 -10
- data/lib/llm/providers/anthropic/error_handler.rb +27 -5
- data/lib/llm/providers/anthropic/files.rb +22 -16
- data/lib/llm/providers/anthropic/models.rb +4 -3
- data/lib/llm/providers/anthropic.rb +6 -5
- data/lib/llm/providers/deepseek.rb +3 -3
- data/lib/llm/providers/gemini/error_handler.rb +34 -12
- data/lib/llm/providers/gemini/files.rb +18 -13
- data/lib/llm/providers/gemini/images.rb +4 -3
- data/lib/llm/providers/gemini/models.rb +4 -3
- data/lib/llm/providers/gemini.rb +36 -13
- data/lib/llm/providers/llamacpp.rb +3 -3
- data/lib/llm/providers/ollama/error_handler.rb +28 -6
- data/lib/llm/providers/ollama/models.rb +4 -3
- data/lib/llm/providers/ollama.rb +9 -7
- data/lib/llm/providers/openai/audio.rb +10 -7
- data/lib/llm/providers/openai/error_handler.rb +41 -14
- data/lib/llm/providers/openai/files.rb +19 -14
- data/lib/llm/providers/openai/images.rb +10 -7
- data/lib/llm/providers/openai/models.rb +4 -3
- data/lib/llm/providers/openai/moderations.rb +4 -3
- data/lib/llm/providers/openai/responses.rb +10 -7
- data/lib/llm/providers/openai/vector_stores.rb +34 -23
- data/lib/llm/providers/openai.rb +9 -7
- data/lib/llm/providers/xai.rb +3 -3
- data/lib/llm/providers/zai.rb +2 -2
- data/lib/llm/schema/object.rb +2 -2
- data/lib/llm/schema.rb +16 -2
- data/lib/llm/server_tool.rb +3 -3
- data/lib/llm/session.rb +3 -0
- data/lib/llm/tracer/logger.rb +192 -0
- data/lib/llm/tracer/null.rb +49 -0
- data/lib/llm/tracer/telemetry.rb +255 -0
- data/lib/llm/tracer.rb +134 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +5 -3
- data/llm.gemspec +4 -1
- metadata +39 -3
- data/lib/llm/builder.rb +0 -61
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 567ae33357581ae1602a337ee53dbc86328b4b6c3ee5af5f0b86cf810c039e64
|
|
4
|
+
data.tar.gz: 4a3d332aad0a2f824966a850c6be36e48894a871d1831f70527e46df5614b207
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 6c04ea5dcf20e9757b0bac1d0eb4cc27176fe966f4ad6fd0f5f06b708d8653522be25b454c9e831f2a5d30fa7676f04d289692fb94ae3bd06f98ab0576ccf7f3
|
|
7
|
+
data.tar.gz: 2f0d46d75e75382a601863fc0c10e4218efb3029c0650d021586007895d6647fd006eeffd7c6e7027204fcb75168be0e4f51b974d08df95397f1a233baa4239b
|
data/LICENSE
CHANGED
data/README.md
CHANGED
|
@@ -1,6 +1,11 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
>
|
|
1
|
+
<p align="center">
|
|
2
|
+
<a href="llm.rb"><img src="https://github.com/llmrb/llm.rb/raw/main/llm.png" width="200" height="200" border="0" alt="llm.rb"></a>
|
|
3
|
+
</p>
|
|
4
|
+
<p align="center">
|
|
5
|
+
<a href="https://0x1eef.github.io/x/llm.rb?rebuild=1"><img src="https://img.shields.io/badge/docs-0x1eef.github.io-blue.svg" alt="RubyDoc"></a>
|
|
6
|
+
<a href="https://opensource.org/license/0bsd"><img src="https://img.shields.io/badge/License-0BSD-orange.svg?" alt="License"></a>
|
|
7
|
+
<a href="https://github.com/llmrb/llm.rb/tags"><img src="https://img.shields.io/badge/version-4.2.0-green.svg?" alt="Version"></a>
|
|
8
|
+
</p>
|
|
4
9
|
|
|
5
10
|
## About
|
|
6
11
|
|
|
@@ -9,104 +14,167 @@ includes OpenAI, Gemini, Anthropic, xAI (Grok), zAI, DeepSeek, Ollama,
|
|
|
9
14
|
and LlamaCpp. The toolkit includes full support for chat, streaming,
|
|
10
15
|
tool calling, audio, images, files, and structured outputs.
|
|
11
16
|
|
|
17
|
+
And it is licensed under the [0BSD License](https://choosealicense.com/licenses/0bsd/) –
|
|
18
|
+
one of the most permissive open source licenses, with minimal conditions for use,
|
|
19
|
+
modification, and/or distribution. Attribution is appreciated, but not required
|
|
20
|
+
by the license. Built with [good music](https://www.youtube.com/watch?v=SNvaqwTbn14)
|
|
21
|
+
and a lot of ☕️.
|
|
22
|
+
|
|
12
23
|
## Quick start
|
|
13
24
|
|
|
14
25
|
#### REPL
|
|
15
26
|
|
|
16
|
-
|
|
27
|
+
The [LLM::Session](https://0x1eef.github.io/x/llm.rb/LLM/Session.html) class provides
|
|
28
|
+
a session with an LLM provider that maintains conversation history and context across
|
|
29
|
+
multiple requests. The following example implements a simple REPL loop, and the response
|
|
30
|
+
is streamed to the terminal in real-time as it arrives from the provider. The provider
|
|
31
|
+
happens to be OpenAI in this case but it could be any other provider, and `$stdout`
|
|
32
|
+
could be any object that implements the `#<<` method:
|
|
17
33
|
|
|
18
34
|
```ruby
|
|
19
35
|
#!/usr/bin/env ruby
|
|
20
36
|
require "llm"
|
|
21
37
|
|
|
22
|
-
llm = LLM.openai(key: ENV
|
|
23
|
-
|
|
38
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
39
|
+
ses = LLM::Session.new(llm, stream: $stdout)
|
|
24
40
|
loop do
|
|
25
41
|
print "> "
|
|
26
|
-
|
|
42
|
+
ses.talk(STDIN.gets)
|
|
27
43
|
puts
|
|
28
44
|
end
|
|
29
45
|
```
|
|
30
46
|
|
|
31
|
-
####
|
|
47
|
+
#### Schema
|
|
48
|
+
|
|
49
|
+
The [LLM::Schema](https://0x1eef.github.io/x/llm.rb/LLM/Schema.html) class provides
|
|
50
|
+
a simple DSL for describing the structure of a response that an LLM emits according
|
|
51
|
+
to a JSON schema. The schema lets a client describe what JSON object an LLM should
|
|
52
|
+
emit, and the LLM will abide by the schema to the best of its ability:
|
|
53
|
+
|
|
54
|
+
```ruby
|
|
55
|
+
#!/usr/bin/env ruby
|
|
56
|
+
require "llm"
|
|
57
|
+
require "pp"
|
|
58
|
+
|
|
59
|
+
class Report < LLM::Schema
|
|
60
|
+
property :category, String, "Report category", required: true
|
|
61
|
+
property :summary, String, "Short summary", required: true
|
|
62
|
+
property :services, Array[String], "Impacted services", required: true
|
|
63
|
+
property :timestamp, String, "When it happened", optional: true
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
67
|
+
ses = LLM::Session.new(llm, schema: Report)
|
|
68
|
+
res = ses.talk("Structure this report: 'Database latency spiked at 10:42 UTC, causing 5% request timeouts for 12 minutes.'")
|
|
69
|
+
pp res.messages.first(&:assistant?).content!
|
|
70
|
+
|
|
71
|
+
##
|
|
72
|
+
# {
|
|
73
|
+
# "category" => "Performance Incident",
|
|
74
|
+
# "summary" => "Database latency spiked, causing 5% request timeouts for 12 minutes.",
|
|
75
|
+
# "services" => ["Database"],
|
|
76
|
+
# "timestamp" => "2024-06-05T10:42:00Z"
|
|
77
|
+
# }
|
|
78
|
+
```
|
|
32
79
|
|
|
33
|
-
|
|
34
|
-
> roles, but the examples in this README stick to `user` roles since they are
|
|
35
|
-
> supported across all providers.
|
|
80
|
+
#### Tools
|
|
36
81
|
|
|
37
|
-
|
|
82
|
+
The [LLM::Tool](https://0x1eef.github.io/x/llm.rb/LLM/Tool.html) class lets you
|
|
83
|
+
define callable tools for the model. Each tool is described to the LLM as a function
|
|
84
|
+
it can invoke to fetch information or perform an action. The model decides when to
|
|
85
|
+
call tools based on the conversation; when it does, llm.rb runs the tool and sends
|
|
86
|
+
the result back on the next request. The following example implements a simple tool
|
|
87
|
+
that runs shell commands:
|
|
38
88
|
|
|
39
89
|
```ruby
|
|
40
90
|
#!/usr/bin/env ruby
|
|
41
91
|
require "llm"
|
|
42
92
|
|
|
43
|
-
|
|
44
|
-
|
|
93
|
+
class System < LLM::Tool
|
|
94
|
+
name "system"
|
|
95
|
+
description "Run a shell command"
|
|
96
|
+
param :command, String, "Command to execute", required: true
|
|
45
97
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
it.user "How many days were in that year?"
|
|
98
|
+
def call(command:)
|
|
99
|
+
{success: system(command)}
|
|
100
|
+
end
|
|
50
101
|
end
|
|
51
102
|
|
|
52
|
-
|
|
53
|
-
|
|
103
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
104
|
+
ses = LLM::Session.new(llm, tools: [System])
|
|
105
|
+
ses.talk("Run `date`.")
|
|
106
|
+
ses.talk(ses.functions.map(&:call)) # report return value to the LLM
|
|
54
107
|
```
|
|
55
108
|
|
|
56
|
-
####
|
|
109
|
+
#### Agents
|
|
57
110
|
|
|
58
|
-
|
|
111
|
+
The [LLM::Agent](https://0x1eef.github.io/x/llm.rb/LLM/Agent.html)
|
|
112
|
+
class provides a class-level DSL for defining reusable, preconfigured
|
|
113
|
+
assistants with defaults for model, tools, schema, and instructions.
|
|
114
|
+
Instructions are injected only on the first request, and unlike
|
|
115
|
+
[LLM::Session](https://0x1eef.github.io/x/llm.rb/LLM/Session.html),
|
|
116
|
+
an [LLM::Agent](https://0x1eef.github.io/x/llm.rb/LLM/Agent.html)
|
|
117
|
+
will automatically call tools when needed:
|
|
59
118
|
|
|
60
119
|
```ruby
|
|
61
120
|
#!/usr/bin/env ruby
|
|
62
121
|
require "llm"
|
|
63
122
|
|
|
64
|
-
class
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
123
|
+
class SystemAdmin < LLM::Agent
|
|
124
|
+
model "gpt-4.1"
|
|
125
|
+
instructions "You are a Linux system admin"
|
|
126
|
+
tools Shell
|
|
127
|
+
schema Result
|
|
68
128
|
end
|
|
69
129
|
|
|
70
|
-
llm = LLM.openai(key: ENV
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
res = bot.chat bot.image_url(img.urls.first)
|
|
74
|
-
data = res.choices.find(&:assistant?).content!
|
|
75
|
-
|
|
76
|
-
puts "age: #{data["age"]}"
|
|
77
|
-
puts "confidence: #{data["confidence"]}"
|
|
78
|
-
puts "notes: #{data["notes"]}" if data["notes"]
|
|
130
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
131
|
+
agent = SystemAdmin.new(llm)
|
|
132
|
+
res = agent.talk("Run 'date'")
|
|
79
133
|
```
|
|
80
134
|
|
|
81
|
-
####
|
|
135
|
+
#### Prompts
|
|
82
136
|
|
|
83
|
-
|
|
137
|
+
The [LLM::Prompt](https://0x1eef.github.io/x/llm.rb/LLM/Prompt.html)
|
|
138
|
+
class represents a single request composed of multiple messages.
|
|
139
|
+
It is useful when a single turn needs more than one message, for example:
|
|
140
|
+
system instructions plus one or more user messages, or a replay of
|
|
141
|
+
prior context:
|
|
84
142
|
|
|
85
143
|
```ruby
|
|
86
144
|
#!/usr/bin/env ruby
|
|
87
145
|
require "llm"
|
|
88
146
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
description "Run a shell command"
|
|
92
|
-
param :command, String, "Command to execute", required: true
|
|
147
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
148
|
+
ses = LLM::Session.new(llm)
|
|
93
149
|
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
150
|
+
prompt = ses.prompt do
|
|
151
|
+
system "Be concise and show your reasoning briefly."
|
|
152
|
+
user "If a train goes 60 mph for 1.5 hours, how far does it travel?"
|
|
153
|
+
user "Now double the speed for the same time."
|
|
97
154
|
end
|
|
98
155
|
|
|
99
|
-
|
|
100
|
-
|
|
156
|
+
ses.talk(prompt)
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
But prompts are not session-scoped. [LLM::Prompt](https://0x1eef.github.io/x/llm.rb/LLM/Prompt.html)
|
|
160
|
+
is a first-class object that you can build and pass around independently of a session.
|
|
161
|
+
This enables patterns where you compose a prompt in one part of your code,
|
|
162
|
+
and execute it through a session elsewhere:
|
|
101
163
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
164
|
+
```ruby
|
|
165
|
+
#!/usr/bin/env ruby
|
|
166
|
+
require "llm"
|
|
167
|
+
|
|
168
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
169
|
+
ses = LLM::Session.new(llm)
|
|
170
|
+
|
|
171
|
+
prompt = LLM::Prompt.new(llm) do
|
|
172
|
+
system "Be concise and show your reasoning briefly."
|
|
173
|
+
user "If a train goes 60 mph for 1.5 hours, how far does it travel?"
|
|
174
|
+
user "Now double the speed for the same time."
|
|
105
175
|
end
|
|
106
176
|
|
|
107
|
-
|
|
108
|
-
bot.chat(bot.functions.map(&:call))
|
|
109
|
-
bot.messages.select(&:assistant?).each { |m| puts "[#{m.role}] #{m.content}" }
|
|
177
|
+
ses.talk(prompt)
|
|
110
178
|
```
|
|
111
179
|
|
|
112
180
|
## Features
|
|
@@ -115,11 +183,18 @@ bot.messages.select(&:assistant?).each { |m| puts "[#{m.role}] #{m.content}" }
|
|
|
115
183
|
- ✅ Unified API across providers
|
|
116
184
|
- 📦 Zero runtime deps (stdlib-only)
|
|
117
185
|
- 🧩 Pluggable JSON adapters (JSON, Oj, Yajl, etc)
|
|
118
|
-
-
|
|
186
|
+
- 🧱 Builtin tracer API ([LLM::Tracer](https://0x1eef.github.io/x/llm.rb/LLM/Tracer.html))
|
|
187
|
+
|
|
188
|
+
#### Optionals
|
|
189
|
+
|
|
190
|
+
- ♻️ Optional persistent HTTP pool via net-http-persistent ([net-http-persistent](https://github.com/drbrain/net-http-persistent))
|
|
191
|
+
- 📈 Optional telemetry support via OpenTelemetry ([opentelemetry-sdk](https://github.com/open-telemetry/opentelemetry-ruby))
|
|
192
|
+
- 🪵 Optional logging support via Ruby's standard library ([ruby/logger](https://github.com/ruby/logger))
|
|
119
193
|
|
|
120
194
|
#### Chat, Agents
|
|
121
195
|
- 🧠 Stateless + stateful chat (completions + responses)
|
|
122
196
|
- 🤖 Tool calling / function execution
|
|
197
|
+
- 🔁 Agent tool-call auto-execution (bounded)
|
|
123
198
|
- 🗂️ JSON Schema structured output
|
|
124
199
|
- 📡 Streaming responses
|
|
125
200
|
|
|
@@ -230,115 +305,97 @@ res3 = llm.responses.create "message 3", previous_response_id: res2.response_id
|
|
|
230
305
|
puts res3.output_text
|
|
231
306
|
```
|
|
232
307
|
|
|
233
|
-
####
|
|
234
|
-
|
|
235
|
-
The llm.rb library is thread-safe and can be used in a multi-threaded
|
|
236
|
-
environments but it is important to keep in mind that the
|
|
237
|
-
[LLM::Provider](https://0x1eef.github.io/x/llm.rb/LLM/Provider.html)
|
|
238
|
-
and
|
|
239
|
-
[LLM::Bot](https://0x1eef.github.io/x/llm.rb/LLM/Bot.html)
|
|
240
|
-
classes should be instantiated once per thread, and not shared
|
|
241
|
-
between threads. Generally the library tries to avoid global or
|
|
242
|
-
shared state but where it exists reentrant locks are used to
|
|
243
|
-
ensure thread-safety.
|
|
244
|
-
|
|
245
|
-
### Conversations
|
|
308
|
+
#### Telemetry
|
|
246
309
|
|
|
247
|
-
|
|
310
|
+
The llm.rb library includes telemetry support through its tracer API, and it
|
|
311
|
+
can be used to trace LLM requests. It can be useful for debugging, monitoring,
|
|
312
|
+
and observability. The primary use case in mind is integration with tools like
|
|
313
|
+
[LangSmith](https://www.langsmith.com/).
|
|
248
314
|
|
|
249
|
-
The
|
|
250
|
-
[
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
315
|
+
The telemetry implementation uses the [opentelemetry-sdk](https://github.com/open-telemetry/opentelemetry-ruby)
|
|
316
|
+
and is based on the [gen-ai telemetry spec(s)](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/).
|
|
317
|
+
This feature is optional, disabled by default, and the [opentelemetry-sdk](https://github.com/open-telemetry/opentelemetry-ruby)
|
|
318
|
+
gem should be installed separately. Please also note that llm.rb will take care of
|
|
319
|
+
loading and configuring the [opentelemetry-sdk](https://github.com/open-telemetry/opentelemetry-ruby)
|
|
320
|
+
library for you, and llm.rb configures an in-memory exporter that doesn't have
|
|
321
|
+
external dependencies by default:
|
|
256
322
|
|
|
257
323
|
```ruby
|
|
258
324
|
#!/usr/bin/env ruby
|
|
259
325
|
require "llm"
|
|
326
|
+
require "pp"
|
|
260
327
|
|
|
261
|
-
llm
|
|
262
|
-
|
|
263
|
-
image_url = "https://upload.wikimedia.org/wikipedia/commons/9/97/The_Earth_seen_from_Apollo_17.jpg"
|
|
264
|
-
image_path = "/tmp/llm-logo.png"
|
|
265
|
-
pdf_path = "/tmp/llm-handbook.pdf"
|
|
266
|
-
|
|
267
|
-
prompt = bot.build_prompt do
|
|
268
|
-
it.user ["Tell me about this image", bot.image_url(image_url)]
|
|
269
|
-
it.user ["Tell me about this image", bot.local_file(image_path)]
|
|
270
|
-
it.user ["Tell me about this PDF", bot.local_file(pdf_path)]
|
|
271
|
-
end
|
|
272
|
-
bot.chat(prompt)
|
|
273
|
-
bot.messages.each { |m| puts "[#{m.role}] #{m.content}" }
|
|
274
|
-
```
|
|
328
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
329
|
+
llm.tracer = LLM::Tracer::Telemetry.new(llm)
|
|
275
330
|
|
|
276
|
-
|
|
331
|
+
ses = LLM::Session.new(llm)
|
|
332
|
+
ses.talk "Hello world!"
|
|
333
|
+
ses.talk "Adios."
|
|
334
|
+
ses.tracer.spans.each { |span| pp span }
|
|
335
|
+
```
|
|
277
336
|
|
|
278
|
-
The
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
337
|
+
The llm.rb library also supports export through the OpenTelemetry Protocol (OTLP).
|
|
338
|
+
OTLP is a standard protocol for exporting telemetry data, and it is supported by
|
|
339
|
+
multiple observability tools. By default the export is batched in the background,
|
|
340
|
+
and happens automatically but short lived scripts might need to
|
|
341
|
+
[explicitly flush](https://0x1eef.github.io/x/llm.rb/LLM/Tracer/Telemetry#flush!-instance_method)
|
|
342
|
+
the exporter before they exit – otherwise some telemetry data could be lost:
|
|
284
343
|
|
|
285
344
|
```ruby
|
|
286
|
-
#!/usr/bin/env ruby
|
|
287
|
-
require "llm"
|
|
345
|
+
#!/usr/bin/env ruby
|
|
346
|
+
require "llm"
|
|
347
|
+
require "opentelemetry-exporter-otlp"
|
|
288
348
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
prompt = bot.build_prompt do
|
|
296
|
-
it.user ["Tell me about this image", bot.image_url(image_url)]
|
|
297
|
-
it.user ["Tell me about this image", bot.local_file(image_path)]
|
|
298
|
-
it.user ["Tell me about the PDF", bot.local_file(pdf_path)]
|
|
299
|
-
end
|
|
300
|
-
bot.chat(prompt)
|
|
301
|
-
```
|
|
349
|
+
endpoint = "https://api.smith.langchain.com/otel/v1/traces"
|
|
350
|
+
exporter = OpenTelemetry::Exporter::OTLP::Exporter.new(endpoint:)
|
|
351
|
+
|
|
352
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
353
|
+
llm.tracer = LLM::Tracer::Telemetry.new(llm, exporter:)
|
|
302
354
|
|
|
303
|
-
|
|
355
|
+
ses = LLM::Session.new(llm)
|
|
356
|
+
ses.talk "hello"
|
|
357
|
+
ses.talk "how are you?"
|
|
304
358
|
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
359
|
+
at_exit do
|
|
360
|
+
# Helpful for short-lived scripts, otherwise the exporter
|
|
361
|
+
# might not have time to flush pending telemetry data
|
|
362
|
+
ses.tracer.flush!
|
|
363
|
+
end
|
|
364
|
+
```
|
|
365
|
+
|
|
366
|
+
#### Logger
|
|
367
|
+
|
|
368
|
+
The llm.rb library includes simple logging support through its
|
|
369
|
+
tracer API, and Ruby's standard library ([ruby/logger](https://github.com/ruby/logger)).
|
|
370
|
+
This feature is optional, disabled by default, and it can be useful for debugging and/or
|
|
371
|
+
monitoring requests to LLM providers. The `path` or `io` options can be used to choose
|
|
372
|
+
where logs are written to, and by default it is set to `$stdout`:
|
|
310
373
|
|
|
311
374
|
```ruby
|
|
312
375
|
#!/usr/bin/env ruby
|
|
313
376
|
require "llm"
|
|
314
377
|
|
|
315
|
-
class Player < LLM::Schema
|
|
316
|
-
property :name, String, "The player's name", required: true
|
|
317
|
-
property :position, Array[Number], "The player's [x, y] position", required: true
|
|
318
|
-
end
|
|
319
|
-
|
|
320
378
|
llm = LLM.openai(key: ENV["KEY"])
|
|
321
|
-
|
|
322
|
-
prompt = bot.build_prompt do
|
|
323
|
-
it.user "The player's name is Sam and their position is (7, 12)."
|
|
324
|
-
it.user "Return the player's name and position"
|
|
325
|
-
end
|
|
379
|
+
llm.tracer = LLM::Tracer::Logger.new(llm, io: $stdout)
|
|
326
380
|
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
381
|
+
ses = LLM::Session.new(llm)
|
|
382
|
+
ses.talk "Hello world!"
|
|
383
|
+
ses.talk "Adios."
|
|
330
384
|
```
|
|
331
385
|
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
#### Introduction
|
|
386
|
+
#### Thread Safety
|
|
335
387
|
|
|
336
|
-
|
|
337
|
-
it is
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
[LLM::
|
|
388
|
+
The llm.rb library is thread-safe and can be used in a multi-threaded
|
|
389
|
+
environments but it is important to keep in mind that the
|
|
390
|
+
[LLM::Provider](https://0x1eef.github.io/x/llm.rb/LLM/Provider.html)
|
|
391
|
+
and
|
|
392
|
+
[LLM::Session](https://0x1eef.github.io/x/llm.rb/LLM/Session.html)
|
|
393
|
+
classes should be instantiated once per thread, and not shared
|
|
394
|
+
between threads. Generally the library tries to avoid global or
|
|
395
|
+
shared state but where it exists reentrant locks are used to
|
|
396
|
+
ensure thread-safety.
|
|
341
397
|
|
|
398
|
+
### Tools
|
|
342
399
|
|
|
343
400
|
#### LLM::Function
|
|
344
401
|
|
|
@@ -346,13 +403,7 @@ The following example demonstrates [LLM::Function](https://0x1eef.github.io/x/ll
|
|
|
346
403
|
and how it can define a local function (which happens to be a tool), and how
|
|
347
404
|
a provider (such as OpenAI) can then detect when we should call the function.
|
|
348
405
|
Its most notable feature is that it can act as a closure and has access to
|
|
349
|
-
its surrounding scope, which can be useful in some situations
|
|
350
|
-
|
|
351
|
-
The
|
|
352
|
-
[LLM::Bot#functions](https://0x1eef.github.io/x/llm.rb/LLM/Bot.html#functions-instance_method)
|
|
353
|
-
method returns an array of functions that can be called after a `chat` interaction
|
|
354
|
-
if the LLM detects a function should be called. You would then typically call these
|
|
355
|
-
functions and send their results back to the LLM in a subsequent `chat` call:
|
|
406
|
+
its surrounding scope, which can be useful in some situations:
|
|
356
407
|
|
|
357
408
|
```ruby
|
|
358
409
|
#!/usr/bin/env ruby
|
|
@@ -373,14 +424,14 @@ tool = LLM.function(:system) do |fn|
|
|
|
373
424
|
end
|
|
374
425
|
end
|
|
375
426
|
|
|
376
|
-
|
|
377
|
-
|
|
427
|
+
ses = LLM::Session.new(llm, tools: [tool])
|
|
428
|
+
ses.talk "Your task is to run shell commands via a tool.", role: :user
|
|
378
429
|
|
|
379
|
-
|
|
380
|
-
|
|
430
|
+
ses.talk "What is the current date?", role: :user
|
|
431
|
+
ses.talk ses.functions.map(&:call) # report return value to the LLM
|
|
381
432
|
|
|
382
|
-
|
|
383
|
-
|
|
433
|
+
ses.talk "What operating system am I running?", role: :user
|
|
434
|
+
ses.talk ses.functions.map(&:call) # report return value to the LLM
|
|
384
435
|
|
|
385
436
|
##
|
|
386
437
|
# {stderr: "", stdout: "Thu May 1 10:01:02 UTC 2025"}
|
|
@@ -420,14 +471,14 @@ class System < LLM::Tool
|
|
|
420
471
|
end
|
|
421
472
|
|
|
422
473
|
llm = LLM.openai(key: ENV["KEY"])
|
|
423
|
-
|
|
424
|
-
|
|
474
|
+
ses = LLM::Session.new(llm, tools: [System])
|
|
475
|
+
ses.talk "Your task is to run shell commands via a tool.", role: :user
|
|
425
476
|
|
|
426
|
-
|
|
427
|
-
|
|
477
|
+
ses.talk "What is the current date?", role: :user
|
|
478
|
+
ses.talk ses.functions.map(&:call) # report return value to the LLM
|
|
428
479
|
|
|
429
|
-
|
|
430
|
-
|
|
480
|
+
ses.talk "What operating system am I running?", role: :user
|
|
481
|
+
ses.talk ses.functions.map(&:call) # report return value to the LLM
|
|
431
482
|
|
|
432
483
|
##
|
|
433
484
|
# {stderr: "", stdout: "Thu May 1 10:01:02 UTC 2025"}
|
|
@@ -450,53 +501,36 @@ it has been uploaded. The file (a specialized instance of
|
|
|
450
501
|
require "llm"
|
|
451
502
|
|
|
452
503
|
llm = LLM.openai(key: ENV["KEY"])
|
|
453
|
-
|
|
504
|
+
ses = LLM::Session.new(llm)
|
|
454
505
|
file = llm.files.create(file: "/tmp/llm-book.pdf")
|
|
455
|
-
res =
|
|
456
|
-
res.
|
|
506
|
+
res = ses.talk ["Tell me about this file", file]
|
|
507
|
+
res.messages.each { |m| puts "[#{m.role}] #{m.content}" }
|
|
457
508
|
```
|
|
458
509
|
|
|
459
510
|
### Prompts
|
|
460
511
|
|
|
461
512
|
#### Multimodal
|
|
462
513
|
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
provider.
|
|
470
|
-
|
|
471
|
-
For instance, to specify an image URL, you would use
|
|
472
|
-
`bot.image_url`. For a local file, `bot.local_file`. For an
|
|
473
|
-
already uploaded file managed by the LLM provider's Files API,
|
|
474
|
-
`bot.remote_file`. This approach ensures clarity and allows
|
|
475
|
-
llm.rb to correctly format the input for each provider's
|
|
476
|
-
specific requirements.
|
|
514
|
+
LLMs are great with text, but many can also handle images, audio, video,
|
|
515
|
+
and URLs. With llm.rb you pass those inputs by tagging them with one of
|
|
516
|
+
the following methods. And for multipart prompts, we can pass an array
|
|
517
|
+
where each element is a part of the input. See the example below for
|
|
518
|
+
details, in the meantime here are the methods to know for multimodal
|
|
519
|
+
inputs:
|
|
477
520
|
|
|
478
|
-
|
|
479
|
-
|
|
521
|
+
* `ses.image_url` for an image URL
|
|
522
|
+
* `ses.local_file` for a local file
|
|
523
|
+
* `ses.remote_file` for a file already uploaded via the provider's Files API
|
|
480
524
|
|
|
481
525
|
```ruby
|
|
482
526
|
#!/usr/bin/env ruby
|
|
483
527
|
require "llm"
|
|
484
528
|
|
|
485
529
|
llm = LLM.openai(key: ENV["KEY"])
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
res1 = bot.chat ["Tell me about this image URL", bot.image_url(image_url)]
|
|
492
|
-
res1.choices.each { |m| puts "[#{m.role}] #{m.content}" }
|
|
493
|
-
|
|
494
|
-
file = llm.files.create(file: pdf_path)
|
|
495
|
-
res2 = bot.chat ["Tell me about this PDF", bot.remote_file(file)]
|
|
496
|
-
res2.choices.each { |m| puts "[#{m.role}] #{m.content}" }
|
|
497
|
-
|
|
498
|
-
res3 = bot.chat ["Tell me about this image", bot.local_file(image_path)]
|
|
499
|
-
res3.choices.each { |m| puts "[#{m.role}] #{m.content}" }
|
|
530
|
+
ses = LLM::Session.new(llm)
|
|
531
|
+
res = ses.talk ["Tell me about this image URL", ses.image_url(url)]
|
|
532
|
+
res = ses.talk ["Tell me about this PDF", ses.remote_file(file)]
|
|
533
|
+
res = ses.talk ["Tell me about this image", ses.local_file(path)]
|
|
500
534
|
```
|
|
501
535
|
|
|
502
536
|
### Audio
|
|
@@ -674,9 +708,9 @@ end
|
|
|
674
708
|
##
|
|
675
709
|
# Select a model
|
|
676
710
|
model = llm.models.all.find { |m| m.id == "gpt-3.5-turbo" }
|
|
677
|
-
|
|
678
|
-
res =
|
|
679
|
-
res.
|
|
711
|
+
ses = LLM::Session.new(llm, model: model.id)
|
|
712
|
+
res = ses.talk "Hello #{model.id} :)"
|
|
713
|
+
res.messages.each { |m| puts "[#{m.role}] #{m.content}" }
|
|
680
714
|
```
|
|
681
715
|
|
|
682
716
|
## Install
|