llm.rb 4.1.0 → 4.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +2 -2
- data/README.md +186 -172
- data/lib/llm/agent.rb +49 -37
- data/lib/llm/bot.rb +57 -28
- data/lib/llm/function/tracing.rb +19 -0
- data/lib/llm/function.rb +16 -3
- data/lib/llm/json_adapter.rb +1 -1
- data/lib/llm/message.rb +7 -0
- data/lib/llm/prompt.rb +85 -0
- data/lib/llm/provider.rb +56 -10
- data/lib/llm/providers/anthropic/error_handler.rb +27 -5
- data/lib/llm/providers/anthropic/files.rb +22 -16
- data/lib/llm/providers/anthropic/models.rb +4 -3
- data/lib/llm/providers/anthropic.rb +6 -5
- data/lib/llm/providers/deepseek.rb +3 -3
- data/lib/llm/providers/gemini/error_handler.rb +34 -12
- data/lib/llm/providers/gemini/files.rb +18 -13
- data/lib/llm/providers/gemini/images.rb +4 -3
- data/lib/llm/providers/gemini/models.rb +4 -3
- data/lib/llm/providers/gemini.rb +9 -7
- data/lib/llm/providers/llamacpp.rb +3 -3
- data/lib/llm/providers/ollama/error_handler.rb +28 -6
- data/lib/llm/providers/ollama/models.rb +4 -3
- data/lib/llm/providers/ollama.rb +9 -7
- data/lib/llm/providers/openai/audio.rb +10 -7
- data/lib/llm/providers/openai/error_handler.rb +41 -14
- data/lib/llm/providers/openai/files.rb +19 -14
- data/lib/llm/providers/openai/images.rb +10 -7
- data/lib/llm/providers/openai/models.rb +4 -3
- data/lib/llm/providers/openai/moderations.rb +4 -3
- data/lib/llm/providers/openai/responses.rb +10 -7
- data/lib/llm/providers/openai/vector_stores.rb +34 -23
- data/lib/llm/providers/openai.rb +9 -7
- data/lib/llm/providers/xai.rb +3 -3
- data/lib/llm/providers/zai.rb +2 -2
- data/lib/llm/schema/object.rb +2 -2
- data/lib/llm/schema.rb +16 -2
- data/lib/llm/server_tool.rb +3 -3
- data/lib/llm/session.rb +3 -0
- data/lib/llm/tracer/logger.rb +192 -0
- data/lib/llm/tracer/null.rb +49 -0
- data/lib/llm/tracer/telemetry.rb +255 -0
- data/lib/llm/tracer.rb +134 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +4 -3
- data/llm.gemspec +4 -1
- metadata +38 -3
- data/lib/llm/builder.rb +0 -79
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 567ae33357581ae1602a337ee53dbc86328b4b6c3ee5af5f0b86cf810c039e64
|
|
4
|
+
data.tar.gz: 4a3d332aad0a2f824966a850c6be36e48894a871d1831f70527e46df5614b207
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 6c04ea5dcf20e9757b0bac1d0eb4cc27176fe966f4ad6fd0f5f06b708d8653522be25b454c9e831f2a5d30fa7676f04d289692fb94ae3bd06f98ab0576ccf7f3
|
|
7
|
+
data.tar.gz: 2f0d46d75e75382a601863fc0c10e4218efb3029c0650d021586007895d6647fd006eeffd7c6e7027204fcb75168be0e4f51b974d08df95397f1a233baa4239b
|
data/LICENSE
CHANGED
data/README.md
CHANGED
|
@@ -1,6 +1,11 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
>
|
|
1
|
+
<p align="center">
|
|
2
|
+
<a href="llm.rb"><img src="https://github.com/llmrb/llm.rb/raw/main/llm.png" width="200" height="200" border="0" alt="llm.rb"></a>
|
|
3
|
+
</p>
|
|
4
|
+
<p align="center">
|
|
5
|
+
<a href="https://0x1eef.github.io/x/llm.rb?rebuild=1"><img src="https://img.shields.io/badge/docs-0x1eef.github.io-blue.svg" alt="RubyDoc"></a>
|
|
6
|
+
<a href="https://opensource.org/license/0bsd"><img src="https://img.shields.io/badge/License-0BSD-orange.svg?" alt="License"></a>
|
|
7
|
+
<a href="https://github.com/llmrb/llm.rb/tags"><img src="https://img.shields.io/badge/version-4.2.0-green.svg?" alt="Version"></a>
|
|
8
|
+
</p>
|
|
4
9
|
|
|
5
10
|
## About
|
|
6
11
|
|
|
@@ -9,30 +14,39 @@ includes OpenAI, Gemini, Anthropic, xAI (Grok), zAI, DeepSeek, Ollama,
|
|
|
9
14
|
and LlamaCpp. The toolkit includes full support for chat, streaming,
|
|
10
15
|
tool calling, audio, images, files, and structured outputs.
|
|
11
16
|
|
|
17
|
+
And it is licensed under the [0BSD License](https://choosealicense.com/licenses/0bsd/) –
|
|
18
|
+
one of the most permissive open source licenses, with minimal conditions for use,
|
|
19
|
+
modification, and/or distribution. Attribution is appreciated, but not required
|
|
20
|
+
by the license. Built with [good music](https://www.youtube.com/watch?v=SNvaqwTbn14)
|
|
21
|
+
and a lot of ☕️.
|
|
22
|
+
|
|
12
23
|
## Quick start
|
|
13
24
|
|
|
14
25
|
#### REPL
|
|
15
26
|
|
|
16
|
-
The [LLM::
|
|
27
|
+
The [LLM::Session](https://0x1eef.github.io/x/llm.rb/LLM/Session.html) class provides
|
|
17
28
|
a session with an LLM provider that maintains conversation history and context across
|
|
18
|
-
multiple requests. The following example implements a simple REPL loop
|
|
29
|
+
multiple requests. The following example implements a simple REPL loop, and the response
|
|
30
|
+
is streamed to the terminal in real-time as it arrives from the provider. The provider
|
|
31
|
+
happens to be OpenAI in this case but it could be any other provider, and `$stdout`
|
|
32
|
+
could be any object that implements the `#<<` method:
|
|
19
33
|
|
|
20
34
|
```ruby
|
|
21
35
|
#!/usr/bin/env ruby
|
|
22
36
|
require "llm"
|
|
23
37
|
|
|
24
38
|
llm = LLM.openai(key: ENV["KEY"])
|
|
25
|
-
|
|
39
|
+
ses = LLM::Session.new(llm, stream: $stdout)
|
|
26
40
|
loop do
|
|
27
41
|
print "> "
|
|
28
|
-
|
|
42
|
+
ses.talk(STDIN.gets)
|
|
29
43
|
puts
|
|
30
44
|
end
|
|
31
45
|
```
|
|
32
46
|
|
|
33
47
|
#### Schema
|
|
34
48
|
|
|
35
|
-
The [LLM::Schema](https://0x1eef.github.io/x/llm.rb/LLM/
|
|
49
|
+
The [LLM::Schema](https://0x1eef.github.io/x/llm.rb/LLM/Schema.html) class provides
|
|
36
50
|
a simple DSL for describing the structure of a response that an LLM emits according
|
|
37
51
|
to a JSON schema. The schema lets a client describe what JSON object an LLM should
|
|
38
52
|
emit, and the LLM will abide by the schema to the best of its ability:
|
|
@@ -40,21 +54,32 @@ emit, and the LLM will abide by the schema to the best of its ability:
|
|
|
40
54
|
```ruby
|
|
41
55
|
#!/usr/bin/env ruby
|
|
42
56
|
require "llm"
|
|
57
|
+
require "pp"
|
|
43
58
|
|
|
44
|
-
class
|
|
45
|
-
property :
|
|
46
|
-
property :
|
|
47
|
-
property :
|
|
59
|
+
class Report < LLM::Schema
|
|
60
|
+
property :category, String, "Report category", required: true
|
|
61
|
+
property :summary, String, "Short summary", required: true
|
|
62
|
+
property :services, Array[String], "Impacted services", required: true
|
|
63
|
+
property :timestamp, String, "When it happened", optional: true
|
|
48
64
|
end
|
|
49
65
|
|
|
50
66
|
llm = LLM.openai(key: ENV["KEY"])
|
|
51
|
-
|
|
52
|
-
|
|
67
|
+
ses = LLM::Session.new(llm, schema: Report)
|
|
68
|
+
res = ses.talk("Structure this report: 'Database latency spiked at 10:42 UTC, causing 5% request timeouts for 12 minutes.'")
|
|
69
|
+
pp res.messages.first(&:assistant?).content!
|
|
70
|
+
|
|
71
|
+
##
|
|
72
|
+
# {
|
|
73
|
+
# "category" => "Performance Incident",
|
|
74
|
+
# "summary" => "Database latency spiked, causing 5% request timeouts for 12 minutes.",
|
|
75
|
+
# "services" => ["Database"],
|
|
76
|
+
# "timestamp" => "2024-06-05T10:42:00Z"
|
|
77
|
+
# }
|
|
53
78
|
```
|
|
54
79
|
|
|
55
80
|
#### Tools
|
|
56
81
|
|
|
57
|
-
The [LLM::Tool](https://0x1eef.github.io/x/llm.rb/LLM/
|
|
82
|
+
The [LLM::Tool](https://0x1eef.github.io/x/llm.rb/LLM/Tool.html) class lets you
|
|
58
83
|
define callable tools for the model. Each tool is described to the LLM as a function
|
|
59
84
|
it can invoke to fetch information or perform an action. The model decides when to
|
|
60
85
|
call tools based on the conversation; when it does, llm.rb runs the tool and sends
|
|
@@ -76,19 +101,19 @@ class System < LLM::Tool
|
|
|
76
101
|
end
|
|
77
102
|
|
|
78
103
|
llm = LLM.openai(key: ENV["KEY"])
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
104
|
+
ses = LLM::Session.new(llm, tools: [System])
|
|
105
|
+
ses.talk("Run `date`.")
|
|
106
|
+
ses.talk(ses.functions.map(&:call)) # report return value to the LLM
|
|
82
107
|
```
|
|
83
108
|
|
|
84
109
|
#### Agents
|
|
85
110
|
|
|
86
|
-
The [LLM::Agent](https://0x1eef.github.io/x/llm.rb/LLM/
|
|
111
|
+
The [LLM::Agent](https://0x1eef.github.io/x/llm.rb/LLM/Agent.html)
|
|
87
112
|
class provides a class-level DSL for defining reusable, preconfigured
|
|
88
113
|
assistants with defaults for model, tools, schema, and instructions.
|
|
89
114
|
Instructions are injected only on the first request, and unlike
|
|
90
|
-
[LLM::
|
|
91
|
-
an [LLM::Agent](https://0x1eef.github.io/x/llm.rb/LLM/
|
|
115
|
+
[LLM::Session](https://0x1eef.github.io/x/llm.rb/LLM/Session.html),
|
|
116
|
+
an [LLM::Agent](https://0x1eef.github.io/x/llm.rb/LLM/Agent.html)
|
|
92
117
|
will automatically call tools when needed:
|
|
93
118
|
|
|
94
119
|
```ruby
|
|
@@ -104,28 +129,52 @@ end
|
|
|
104
129
|
|
|
105
130
|
llm = LLM.openai(key: ENV["KEY"])
|
|
106
131
|
agent = SystemAdmin.new(llm)
|
|
107
|
-
res = agent.
|
|
132
|
+
res = agent.talk("Run 'date'")
|
|
108
133
|
```
|
|
109
134
|
|
|
110
135
|
#### Prompts
|
|
111
136
|
|
|
112
|
-
The [LLM::
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
137
|
+
The [LLM::Prompt](https://0x1eef.github.io/x/llm.rb/LLM/Prompt.html)
|
|
138
|
+
class represents a single request composed of multiple messages.
|
|
139
|
+
It is useful when a single turn needs more than one message, for example:
|
|
140
|
+
system instructions plus one or more user messages, or a replay of
|
|
141
|
+
prior context:
|
|
142
|
+
|
|
143
|
+
```ruby
|
|
144
|
+
#!/usr/bin/env ruby
|
|
145
|
+
require "llm"
|
|
146
|
+
|
|
147
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
148
|
+
ses = LLM::Session.new(llm)
|
|
149
|
+
|
|
150
|
+
prompt = ses.prompt do
|
|
151
|
+
system "Be concise and show your reasoning briefly."
|
|
152
|
+
user "If a train goes 60 mph for 1.5 hours, how far does it travel?"
|
|
153
|
+
user "Now double the speed for the same time."
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
ses.talk(prompt)
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
But prompts are not session-scoped. [LLM::Prompt](https://0x1eef.github.io/x/llm.rb/LLM/Prompt.html)
|
|
160
|
+
is a first-class object that you can build and pass around independently of a session.
|
|
161
|
+
This enables patterns where you compose a prompt in one part of your code,
|
|
162
|
+
and execute it through a session elsewhere:
|
|
116
163
|
|
|
117
164
|
```ruby
|
|
118
165
|
#!/usr/bin/env ruby
|
|
119
166
|
require "llm"
|
|
120
167
|
|
|
121
168
|
llm = LLM.openai(key: ENV["KEY"])
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
169
|
+
ses = LLM::Session.new(llm)
|
|
170
|
+
|
|
171
|
+
prompt = LLM::Prompt.new(llm) do
|
|
172
|
+
system "Be concise and show your reasoning briefly."
|
|
173
|
+
user "If a train goes 60 mph for 1.5 hours, how far does it travel?"
|
|
174
|
+
user "Now double the speed for the same time."
|
|
127
175
|
end
|
|
128
|
-
|
|
176
|
+
|
|
177
|
+
ses.talk(prompt)
|
|
129
178
|
```
|
|
130
179
|
|
|
131
180
|
## Features
|
|
@@ -134,7 +183,13 @@ bot.chat(prompt)
|
|
|
134
183
|
- ✅ Unified API across providers
|
|
135
184
|
- 📦 Zero runtime deps (stdlib-only)
|
|
136
185
|
- 🧩 Pluggable JSON adapters (JSON, Oj, Yajl, etc)
|
|
137
|
-
-
|
|
186
|
+
- 🧱 Builtin tracer API ([LLM::Tracer](https://0x1eef.github.io/x/llm.rb/LLM/Tracer.html))
|
|
187
|
+
|
|
188
|
+
#### Optionals
|
|
189
|
+
|
|
190
|
+
- ♻️ Optional persistent HTTP pool via net-http-persistent ([net-http-persistent](https://github.com/drbrain/net-http-persistent))
|
|
191
|
+
- 📈 Optional telemetry support via OpenTelemetry ([opentelemetry-sdk](https://github.com/open-telemetry/opentelemetry-ruby))
|
|
192
|
+
- 🪵 Optional logging support via Ruby's standard library ([ruby/logger](https://github.com/ruby/logger))
|
|
138
193
|
|
|
139
194
|
#### Chat, Agents
|
|
140
195
|
- 🧠 Stateless + stateful chat (completions + responses)
|
|
@@ -250,115 +305,97 @@ res3 = llm.responses.create "message 3", previous_response_id: res2.response_id
|
|
|
250
305
|
puts res3.output_text
|
|
251
306
|
```
|
|
252
307
|
|
|
253
|
-
####
|
|
254
|
-
|
|
255
|
-
The llm.rb library is thread-safe and can be used in a multi-threaded
|
|
256
|
-
environments but it is important to keep in mind that the
|
|
257
|
-
[LLM::Provider](https://0x1eef.github.io/x/llm.rb/LLM/Provider.html)
|
|
258
|
-
and
|
|
259
|
-
[LLM::Bot](https://0x1eef.github.io/x/llm.rb/LLM/Bot.html)
|
|
260
|
-
classes should be instantiated once per thread, and not shared
|
|
261
|
-
between threads. Generally the library tries to avoid global or
|
|
262
|
-
shared state but where it exists reentrant locks are used to
|
|
263
|
-
ensure thread-safety.
|
|
264
|
-
|
|
265
|
-
### Conversations
|
|
308
|
+
#### Telemetry
|
|
266
309
|
|
|
267
|
-
|
|
310
|
+
The llm.rb library includes telemetry support through its tracer API, and it
|
|
311
|
+
can be used to trace LLM requests. It can be useful for debugging, monitoring,
|
|
312
|
+
and observability. The primary use case in mind is integration with tools like
|
|
313
|
+
[LangSmith](https://www.langsmith.com/).
|
|
268
314
|
|
|
269
|
-
The
|
|
270
|
-
[
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
315
|
+
The telemetry implementation uses the [opentelemetry-sdk](https://github.com/open-telemetry/opentelemetry-ruby)
|
|
316
|
+
and is based on the [gen-ai telemetry spec(s)](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/).
|
|
317
|
+
This feature is optional, disabled by default, and the [opentelemetry-sdk](https://github.com/open-telemetry/opentelemetry-ruby)
|
|
318
|
+
gem should be installed separately. Please also note that llm.rb will take care of
|
|
319
|
+
loading and configuring the [opentelemetry-sdk](https://github.com/open-telemetry/opentelemetry-ruby)
|
|
320
|
+
library for you, and llm.rb configures an in-memory exporter that doesn't have
|
|
321
|
+
external dependencies by default:
|
|
276
322
|
|
|
277
323
|
```ruby
|
|
278
324
|
#!/usr/bin/env ruby
|
|
279
325
|
require "llm"
|
|
326
|
+
require "pp"
|
|
280
327
|
|
|
281
|
-
llm
|
|
282
|
-
|
|
283
|
-
image_url = "https://upload.wikimedia.org/wikipedia/commons/9/97/The_Earth_seen_from_Apollo_17.jpg"
|
|
284
|
-
image_path = "/tmp/llm-logo.png"
|
|
285
|
-
pdf_path = "/tmp/llm-handbook.pdf"
|
|
286
|
-
|
|
287
|
-
prompt = bot.build_prompt do
|
|
288
|
-
it.user ["Tell me about this image", bot.image_url(image_url)]
|
|
289
|
-
it.user ["Tell me about this image", bot.local_file(image_path)]
|
|
290
|
-
it.user ["Tell me about this PDF", bot.local_file(pdf_path)]
|
|
291
|
-
end
|
|
292
|
-
bot.chat(prompt)
|
|
293
|
-
bot.messages.each { |m| puts "[#{m.role}] #{m.content}" }
|
|
294
|
-
```
|
|
328
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
329
|
+
llm.tracer = LLM::Tracer::Telemetry.new(llm)
|
|
295
330
|
|
|
296
|
-
|
|
331
|
+
ses = LLM::Session.new(llm)
|
|
332
|
+
ses.talk "Hello world!"
|
|
333
|
+
ses.talk "Adios."
|
|
334
|
+
ses.tracer.spans.each { |span| pp span }
|
|
335
|
+
```
|
|
297
336
|
|
|
298
|
-
The
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
337
|
+
The llm.rb library also supports export through the OpenTelemetry Protocol (OTLP).
|
|
338
|
+
OTLP is a standard protocol for exporting telemetry data, and it is supported by
|
|
339
|
+
multiple observability tools. By default the export is batched in the background,
|
|
340
|
+
and happens automatically but short lived scripts might need to
|
|
341
|
+
[explicitly flush](https://0x1eef.github.io/x/llm.rb/LLM/Tracer/Telemetry#flush!-instance_method)
|
|
342
|
+
the exporter before they exit – otherwise some telemetry data could be lost:
|
|
304
343
|
|
|
305
344
|
```ruby
|
|
306
|
-
#!/usr/bin/env ruby
|
|
307
|
-
require "llm"
|
|
345
|
+
#!/usr/bin/env ruby
|
|
346
|
+
require "llm"
|
|
347
|
+
require "opentelemetry-exporter-otlp"
|
|
308
348
|
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
prompt = bot.build_prompt do
|
|
316
|
-
it.user ["Tell me about this image", bot.image_url(image_url)]
|
|
317
|
-
it.user ["Tell me about this image", bot.local_file(image_path)]
|
|
318
|
-
it.user ["Tell me about the PDF", bot.local_file(pdf_path)]
|
|
319
|
-
end
|
|
320
|
-
bot.chat(prompt)
|
|
321
|
-
```
|
|
349
|
+
endpoint = "https://api.smith.langchain.com/otel/v1/traces"
|
|
350
|
+
exporter = OpenTelemetry::Exporter::OTLP::Exporter.new(endpoint:)
|
|
351
|
+
|
|
352
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
353
|
+
llm.tracer = LLM::Tracer::Telemetry.new(llm, exporter:)
|
|
322
354
|
|
|
323
|
-
|
|
355
|
+
ses = LLM::Session.new(llm)
|
|
356
|
+
ses.talk "hello"
|
|
357
|
+
ses.talk "how are you?"
|
|
324
358
|
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
359
|
+
at_exit do
|
|
360
|
+
# Helpful for short-lived scripts, otherwise the exporter
|
|
361
|
+
# might not have time to flush pending telemetry data
|
|
362
|
+
ses.tracer.flush!
|
|
363
|
+
end
|
|
364
|
+
```
|
|
365
|
+
|
|
366
|
+
#### Logger
|
|
367
|
+
|
|
368
|
+
The llm.rb library includes simple logging support through its
|
|
369
|
+
tracer API, and Ruby's standard library ([ruby/logger](https://github.com/ruby/logger)).
|
|
370
|
+
This feature is optional, disabled by default, and it can be useful for debugging and/or
|
|
371
|
+
monitoring requests to LLM providers. The `path` or `io` options can be used to choose
|
|
372
|
+
where logs are written to, and by default it is set to `$stdout`:
|
|
330
373
|
|
|
331
374
|
```ruby
|
|
332
375
|
#!/usr/bin/env ruby
|
|
333
376
|
require "llm"
|
|
334
377
|
|
|
335
|
-
class Player < LLM::Schema
|
|
336
|
-
property :name, String, "The player's name", required: true
|
|
337
|
-
property :position, Array[Number], "The player's [x, y] position", required: true
|
|
338
|
-
end
|
|
339
|
-
|
|
340
378
|
llm = LLM.openai(key: ENV["KEY"])
|
|
341
|
-
|
|
342
|
-
prompt = bot.build_prompt do
|
|
343
|
-
it.system "The player's name is Sam and their position is (7, 12)."
|
|
344
|
-
it.user "Return the player's name and position"
|
|
345
|
-
end
|
|
379
|
+
llm.tracer = LLM::Tracer::Logger.new(llm, io: $stdout)
|
|
346
380
|
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
381
|
+
ses = LLM::Session.new(llm)
|
|
382
|
+
ses.talk "Hello world!"
|
|
383
|
+
ses.talk "Adios."
|
|
350
384
|
```
|
|
351
385
|
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
#### Introduction
|
|
386
|
+
#### Thread Safety
|
|
355
387
|
|
|
356
|
-
|
|
357
|
-
it is
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
[LLM::
|
|
388
|
+
The llm.rb library is thread-safe and can be used in a multi-threaded
|
|
389
|
+
environments but it is important to keep in mind that the
|
|
390
|
+
[LLM::Provider](https://0x1eef.github.io/x/llm.rb/LLM/Provider.html)
|
|
391
|
+
and
|
|
392
|
+
[LLM::Session](https://0x1eef.github.io/x/llm.rb/LLM/Session.html)
|
|
393
|
+
classes should be instantiated once per thread, and not shared
|
|
394
|
+
between threads. Generally the library tries to avoid global or
|
|
395
|
+
shared state but where it exists reentrant locks are used to
|
|
396
|
+
ensure thread-safety.
|
|
361
397
|
|
|
398
|
+
### Tools
|
|
362
399
|
|
|
363
400
|
#### LLM::Function
|
|
364
401
|
|
|
@@ -366,13 +403,7 @@ The following example demonstrates [LLM::Function](https://0x1eef.github.io/x/ll
|
|
|
366
403
|
and how it can define a local function (which happens to be a tool), and how
|
|
367
404
|
a provider (such as OpenAI) can then detect when we should call the function.
|
|
368
405
|
Its most notable feature is that it can act as a closure and has access to
|
|
369
|
-
its surrounding scope, which can be useful in some situations
|
|
370
|
-
|
|
371
|
-
The
|
|
372
|
-
[LLM::Bot#functions](https://0x1eef.github.io/x/llm.rb/LLM/Bot.html#functions-instance_method)
|
|
373
|
-
method returns an array of functions that can be called after a `chat` interaction
|
|
374
|
-
if the LLM detects a function should be called. You would then typically call these
|
|
375
|
-
functions and send their results back to the LLM in a subsequent `chat` call:
|
|
406
|
+
its surrounding scope, which can be useful in some situations:
|
|
376
407
|
|
|
377
408
|
```ruby
|
|
378
409
|
#!/usr/bin/env ruby
|
|
@@ -393,14 +424,14 @@ tool = LLM.function(:system) do |fn|
|
|
|
393
424
|
end
|
|
394
425
|
end
|
|
395
426
|
|
|
396
|
-
|
|
397
|
-
|
|
427
|
+
ses = LLM::Session.new(llm, tools: [tool])
|
|
428
|
+
ses.talk "Your task is to run shell commands via a tool.", role: :user
|
|
398
429
|
|
|
399
|
-
|
|
400
|
-
|
|
430
|
+
ses.talk "What is the current date?", role: :user
|
|
431
|
+
ses.talk ses.functions.map(&:call) # report return value to the LLM
|
|
401
432
|
|
|
402
|
-
|
|
403
|
-
|
|
433
|
+
ses.talk "What operating system am I running?", role: :user
|
|
434
|
+
ses.talk ses.functions.map(&:call) # report return value to the LLM
|
|
404
435
|
|
|
405
436
|
##
|
|
406
437
|
# {stderr: "", stdout: "Thu May 1 10:01:02 UTC 2025"}
|
|
@@ -440,14 +471,14 @@ class System < LLM::Tool
|
|
|
440
471
|
end
|
|
441
472
|
|
|
442
473
|
llm = LLM.openai(key: ENV["KEY"])
|
|
443
|
-
|
|
444
|
-
|
|
474
|
+
ses = LLM::Session.new(llm, tools: [System])
|
|
475
|
+
ses.talk "Your task is to run shell commands via a tool.", role: :user
|
|
445
476
|
|
|
446
|
-
|
|
447
|
-
|
|
477
|
+
ses.talk "What is the current date?", role: :user
|
|
478
|
+
ses.talk ses.functions.map(&:call) # report return value to the LLM
|
|
448
479
|
|
|
449
|
-
|
|
450
|
-
|
|
480
|
+
ses.talk "What operating system am I running?", role: :user
|
|
481
|
+
ses.talk ses.functions.map(&:call) # report return value to the LLM
|
|
451
482
|
|
|
452
483
|
##
|
|
453
484
|
# {stderr: "", stdout: "Thu May 1 10:01:02 UTC 2025"}
|
|
@@ -470,53 +501,36 @@ it has been uploaded. The file (a specialized instance of
|
|
|
470
501
|
require "llm"
|
|
471
502
|
|
|
472
503
|
llm = LLM.openai(key: ENV["KEY"])
|
|
473
|
-
|
|
504
|
+
ses = LLM::Session.new(llm)
|
|
474
505
|
file = llm.files.create(file: "/tmp/llm-book.pdf")
|
|
475
|
-
res =
|
|
476
|
-
res.
|
|
506
|
+
res = ses.talk ["Tell me about this file", file]
|
|
507
|
+
res.messages.each { |m| puts "[#{m.role}] #{m.content}" }
|
|
477
508
|
```
|
|
478
509
|
|
|
479
510
|
### Prompts
|
|
480
511
|
|
|
481
512
|
#### Multimodal
|
|
482
513
|
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
provider.
|
|
490
|
-
|
|
491
|
-
For instance, to specify an image URL, you would use
|
|
492
|
-
`bot.image_url`. For a local file, `bot.local_file`. For an
|
|
493
|
-
already uploaded file managed by the LLM provider's Files API,
|
|
494
|
-
`bot.remote_file`. This approach ensures clarity and allows
|
|
495
|
-
llm.rb to correctly format the input for each provider's
|
|
496
|
-
specific requirements.
|
|
514
|
+
LLMs are great with text, but many can also handle images, audio, video,
|
|
515
|
+
and URLs. With llm.rb you pass those inputs by tagging them with one of
|
|
516
|
+
the following methods. And for multipart prompts, we can pass an array
|
|
517
|
+
where each element is a part of the input. See the example below for
|
|
518
|
+
details, in the meantime here are the methods to know for multimodal
|
|
519
|
+
inputs:
|
|
497
520
|
|
|
498
|
-
|
|
499
|
-
|
|
521
|
+
* `ses.image_url` for an image URL
|
|
522
|
+
* `ses.local_file` for a local file
|
|
523
|
+
* `ses.remote_file` for a file already uploaded via the provider's Files API
|
|
500
524
|
|
|
501
525
|
```ruby
|
|
502
526
|
#!/usr/bin/env ruby
|
|
503
527
|
require "llm"
|
|
504
528
|
|
|
505
529
|
llm = LLM.openai(key: ENV["KEY"])
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
res1 = bot.chat ["Tell me about this image URL", bot.image_url(image_url)]
|
|
512
|
-
res1.choices.each { |m| puts "[#{m.role}] #{m.content}" }
|
|
513
|
-
|
|
514
|
-
file = llm.files.create(file: pdf_path)
|
|
515
|
-
res2 = bot.chat ["Tell me about this PDF", bot.remote_file(file)]
|
|
516
|
-
res2.choices.each { |m| puts "[#{m.role}] #{m.content}" }
|
|
517
|
-
|
|
518
|
-
res3 = bot.chat ["Tell me about this image", bot.local_file(image_path)]
|
|
519
|
-
res3.choices.each { |m| puts "[#{m.role}] #{m.content}" }
|
|
530
|
+
ses = LLM::Session.new(llm)
|
|
531
|
+
res = ses.talk ["Tell me about this image URL", ses.image_url(url)]
|
|
532
|
+
res = ses.talk ["Tell me about this PDF", ses.remote_file(file)]
|
|
533
|
+
res = ses.talk ["Tell me about this image", ses.local_file(path)]
|
|
520
534
|
```
|
|
521
535
|
|
|
522
536
|
### Audio
|
|
@@ -694,9 +708,9 @@ end
|
|
|
694
708
|
##
|
|
695
709
|
# Select a model
|
|
696
710
|
model = llm.models.all.find { |m| m.id == "gpt-3.5-turbo" }
|
|
697
|
-
|
|
698
|
-
res =
|
|
699
|
-
res.
|
|
711
|
+
ses = LLM::Session.new(llm, model: model.id)
|
|
712
|
+
res = ses.talk "Hello #{model.id} :)"
|
|
713
|
+
res.messages.each { |m| puts "[#{m.role}] #{m.content}" }
|
|
700
714
|
```
|
|
701
715
|
|
|
702
716
|
## Install
|