llm.rb 0.10.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +0 -0
- data/README.md +81 -122
- data/lib/llm/bot/builder.rb +2 -2
- data/lib/llm/bot/conversable.rb +0 -0
- data/lib/llm/bot/prompt/completion.rb +0 -0
- data/lib/llm/bot/prompt/respond.rb +0 -0
- data/lib/llm/bot.rb +9 -11
- data/lib/llm/buffer.rb +0 -0
- data/lib/llm/error.rb +0 -0
- data/lib/llm/event_handler.rb +0 -0
- data/lib/llm/eventstream/event.rb +0 -0
- data/lib/llm/eventstream/parser.rb +0 -0
- data/lib/llm/eventstream.rb +0 -0
- data/lib/llm/file.rb +18 -9
- data/lib/llm/function.rb +18 -13
- data/lib/llm/json/schema/array.rb +0 -0
- data/lib/llm/json/schema/boolean.rb +0 -0
- data/lib/llm/json/schema/integer.rb +0 -0
- data/lib/llm/json/schema/leaf.rb +0 -0
- data/lib/llm/json/schema/null.rb +0 -0
- data/lib/llm/json/schema/number.rb +0 -0
- data/lib/llm/json/schema/object.rb +0 -0
- data/lib/llm/json/schema/string.rb +0 -0
- data/lib/llm/json/schema/version.rb +0 -0
- data/lib/llm/json/schema.rb +0 -0
- data/lib/llm/message.rb +8 -0
- data/lib/llm/mime.rb +0 -0
- data/lib/llm/multipart.rb +0 -0
- data/lib/llm/object/builder.rb +0 -0
- data/lib/llm/object/kernel.rb +8 -0
- data/lib/llm/object.rb +7 -0
- data/lib/llm/provider.rb +9 -11
- data/lib/llm/providers/anthropic/error_handler.rb +0 -0
- data/lib/llm/providers/anthropic/format/completion_format.rb +10 -5
- data/lib/llm/providers/anthropic/format.rb +0 -0
- data/lib/llm/providers/anthropic/models.rb +2 -7
- data/lib/llm/providers/anthropic/response/completion.rb +39 -0
- data/lib/llm/providers/anthropic/stream_parser.rb +0 -0
- data/lib/llm/providers/anthropic.rb +3 -24
- data/lib/llm/providers/deepseek/format/completion_format.rb +3 -3
- data/lib/llm/providers/deepseek/format.rb +0 -0
- data/lib/llm/providers/deepseek.rb +6 -0
- data/lib/llm/providers/gemini/audio.rb +6 -10
- data/lib/llm/providers/gemini/error_handler.rb +0 -0
- data/lib/llm/providers/gemini/files.rb +11 -14
- data/lib/llm/providers/gemini/format/completion_format.rb +20 -5
- data/lib/llm/providers/gemini/format.rb +0 -0
- data/lib/llm/providers/gemini/images.rb +8 -7
- data/lib/llm/providers/gemini/models.rb +2 -8
- data/lib/llm/providers/gemini/{response_parser/completion_parser.rb → response/completion.rb} +10 -24
- data/lib/llm/providers/gemini/response/embedding.rb +8 -0
- data/lib/llm/providers/gemini/response/file.rb +11 -0
- data/lib/llm/providers/gemini/response/image.rb +26 -0
- data/lib/llm/providers/gemini/stream_parser.rb +0 -0
- data/lib/llm/providers/gemini.rb +5 -8
- data/lib/llm/providers/llamacpp.rb +6 -0
- data/lib/llm/providers/ollama/error_handler.rb +0 -0
- data/lib/llm/providers/ollama/format/completion_format.rb +8 -5
- data/lib/llm/providers/ollama/format.rb +0 -0
- data/lib/llm/providers/ollama/models.rb +2 -8
- data/lib/llm/providers/ollama/response/completion.rb +28 -0
- data/lib/llm/providers/ollama/response/embedding.rb +10 -0
- data/lib/llm/providers/ollama/stream_parser.rb +0 -0
- data/lib/llm/providers/ollama.rb +5 -8
- data/lib/llm/providers/openai/audio.rb +6 -6
- data/lib/llm/providers/openai/error_handler.rb +0 -0
- data/lib/llm/providers/openai/files.rb +14 -15
- data/lib/llm/providers/openai/format/completion_format.rb +11 -4
- data/lib/llm/providers/openai/format/moderation_format.rb +2 -2
- data/lib/llm/providers/openai/format/respond_format.rb +7 -4
- data/lib/llm/providers/openai/format.rb +0 -0
- data/lib/llm/providers/openai/images.rb +8 -7
- data/lib/llm/providers/openai/models.rb +2 -7
- data/lib/llm/providers/openai/moderations.rb +9 -11
- data/lib/llm/providers/openai/response/audio.rb +7 -0
- data/lib/llm/providers/openai/{response_parser/completion_parser.rb → response/completion.rb} +15 -31
- data/lib/llm/providers/openai/response/embedding.rb +9 -0
- data/lib/llm/providers/openai/response/file.rb +7 -0
- data/lib/llm/providers/openai/response/image.rb +16 -0
- data/lib/llm/providers/openai/response/moderations.rb +34 -0
- data/lib/llm/providers/openai/{response_parser/respond_parser.rb → response/responds.rb} +7 -28
- data/lib/llm/providers/openai/responses.rb +10 -9
- data/lib/llm/providers/openai/stream_parser.rb +0 -0
- data/lib/llm/providers/openai/vector_stores.rb +106 -0
- data/lib/llm/providers/openai.rb +14 -8
- data/lib/llm/response.rb +37 -13
- data/lib/llm/utils.rb +0 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +2 -12
- data/llm.gemspec +1 -1
- metadata +18 -29
- data/lib/llm/model.rb +0 -32
- data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +0 -51
- data/lib/llm/providers/anthropic/response_parser.rb +0 -24
- data/lib/llm/providers/gemini/response_parser.rb +0 -46
- data/lib/llm/providers/ollama/response_parser/completion_parser.rb +0 -42
- data/lib/llm/providers/ollama/response_parser.rb +0 -30
- data/lib/llm/providers/openai/response_parser.rb +0 -65
- data/lib/llm/providers/voyageai/error_handler.rb +0 -32
- data/lib/llm/providers/voyageai/response_parser.rb +0 -13
- data/lib/llm/providers/voyageai.rb +0 -44
- data/lib/llm/response/audio.rb +0 -13
- data/lib/llm/response/audio_transcription.rb +0 -14
- data/lib/llm/response/audio_translation.rb +0 -14
- data/lib/llm/response/completion.rb +0 -51
- data/lib/llm/response/download_file.rb +0 -15
- data/lib/llm/response/embedding.rb +0 -23
- data/lib/llm/response/file.rb +0 -42
- data/lib/llm/response/filelist.rb +0 -18
- data/lib/llm/response/image.rb +0 -29
- data/lib/llm/response/modellist.rb +0 -18
- data/lib/llm/response/moderationlist/moderation.rb +0 -47
- data/lib/llm/response/moderationlist.rb +0 -51
- data/lib/llm/response/respond.rb +0 -56
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: ce6a4e56ce25f397337733009249edb930cade1641bd84d7939ef0d349c6e92a
|
4
|
+
data.tar.gz: b9506c346dd19af655f342e9b6a64aed25cd3f4ecffb6bb32cb36e1221e71c6a
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: a4a53b8e5aeaae2cd26f5303366ce1974ecfdb4176a76c5aa01f46cb9997d10a402de782b0b14f97bf3c65dc4c200b976c1279a037ce29ae44ab64fd1799ffd9
|
7
|
+
data.tar.gz: 96128a756147d8cad13e85f91247fdb43b6bf5c469348318c27f27518670c61b6466546d65bf1f553b086b2a42b15a5225e0ac5355b680ba46214b32bc20d754
|
data/LICENSE
CHANGED
File without changes
|
data/README.md
CHANGED
@@ -10,7 +10,7 @@ images, files, and JSON Schema generation.
|
|
10
10
|
#### General
|
11
11
|
- ✅ A single unified interface for multiple providers
|
12
12
|
- 📦 Zero dependencies outside Ruby's standard library
|
13
|
-
- 🚀 Efficient API design that minimizes the
|
13
|
+
- 🚀 Efficient API design that minimizes the number of requests made
|
14
14
|
|
15
15
|
#### Chat, Agents
|
16
16
|
- 🧠 Stateless and stateful chat via completions and responses API
|
@@ -27,29 +27,7 @@ images, files, and JSON Schema generation.
|
|
27
27
|
#### Miscellaneous
|
28
28
|
- 🧮 Text embeddings and vector support
|
29
29
|
- 🔌 Retrieve models dynamically for introspection and selection
|
30
|
-
|
31
|
-
## Demos
|
32
|
-
|
33
|
-
> The
|
34
|
-
> [llmrb/llm-shell](https://github.com/llmrb/llm-shell)
|
35
|
-
> project is built with llm.rb and its demos have been
|
36
|
-
> included to provide a better idea of what llm.rb
|
37
|
-
> is capable of.
|
38
|
-
|
39
|
-
<details>
|
40
|
-
<summary><b>1. Tools: "system" function</b></summary>
|
41
|
-
<img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/toolcalls.gif">
|
42
|
-
</details>
|
43
|
-
|
44
|
-
<details>
|
45
|
-
<summary><b>2. Files: import at runtime</b></summary>
|
46
|
-
<img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/files-runtime.gif">
|
47
|
-
</details>
|
48
|
-
|
49
|
-
<details>
|
50
|
-
<summary><b>3. Files: import at boot time</b></summary>
|
51
|
-
<img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/files-boottime.gif">
|
52
|
-
</details>
|
30
|
+
- 🧱 Includes support for OpenAI's responses, moderations, and vector stores APIs
|
53
31
|
|
54
32
|
## Examples
|
55
33
|
|
@@ -72,7 +50,6 @@ llm = LLM.openai(key: "yourapikey")
|
|
72
50
|
llm = LLM.gemini(key: "yourapikey")
|
73
51
|
llm = LLM.anthropic(key: "yourapikey")
|
74
52
|
llm = LLM.deepseek(key: "yourapikey")
|
75
|
-
llm = LLM.voyageai(key: "yourapikey")
|
76
53
|
|
77
54
|
##
|
78
55
|
# local providers
|
@@ -91,24 +68,24 @@ llm = LLM.llamacpp(key: nil)
|
|
91
68
|
|
92
69
|
The following example creates an instance of
|
93
70
|
[LLM::Bot](https://0x1eef.github.io/x/llm.rb/LLM/Bot.html)
|
94
|
-
|
95
|
-
sent to the provider on-demand.
|
96
|
-
|
97
|
-
and avoids unneccessary requests until an attempt to iterate over
|
71
|
+
and enters into a conversation where messages are buffered and
|
72
|
+
sent to the provider on-demand. The implementation is designed to
|
73
|
+
buffer messages by waiting until an attempt to iterate over
|
98
74
|
[LLM::Bot#messages](https://0x1eef.github.io/x/llm.rb/LLM/Bot.html#messages-instance_method)
|
99
|
-
is made:
|
75
|
+
is made before sending a request to the LLM:
|
100
76
|
|
101
77
|
```ruby
|
102
78
|
#!/usr/bin/env ruby
|
103
79
|
require "llm"
|
104
80
|
|
105
|
-
llm = LLM.openai(key: ENV["
|
81
|
+
llm = LLM.openai(key: ENV["OPENAI_SECRET"])
|
106
82
|
bot = LLM::Bot.new(llm)
|
83
|
+
url = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/9a/Cognac_glass.jpg/500px-Cognac_glass.jpg"
|
107
84
|
msgs = bot.chat do |prompt|
|
108
|
-
prompt.system
|
109
|
-
prompt.user "Tell me
|
110
|
-
prompt.user "Tell me
|
111
|
-
prompt.user "
|
85
|
+
prompt.system "Your task is to answer all user queries"
|
86
|
+
prompt.user ["Tell me about this URL", URI(url)]
|
87
|
+
prompt.user ["Tell me about this pdf", File.open("spec/fixtures/documents/freebsd.sysctl.pdf", "r")]
|
88
|
+
prompt.user "Is the URL and PDF similar to each other?"
|
112
89
|
end
|
113
90
|
|
114
91
|
# At this point, we execute a single request
|
@@ -123,27 +100,24 @@ msgs.each { print "[#{_1.role}] ", _1.content, "\n" }
|
|
123
100
|
> [docs/](docs/STREAMING.md#scopes) for more details.
|
124
101
|
|
125
102
|
The following example streams the messages in a conversation
|
126
|
-
as they are generated in real-time.
|
127
|
-
|
128
|
-
|
129
|
-
response
|
130
|
-
|
131
|
-
The `stream` option can be set to an IO object, or the value `true`
|
132
|
-
to enable streaming – and at the end of the request, `bot.chat`
|
133
|
-
returns the same response as the non-streaming version which allows
|
134
|
-
you to process a response in the same way:
|
103
|
+
as they are generated in real-time. The `stream` option can
|
104
|
+
be set to an IO object, or the value `true` to enable streaming
|
105
|
+
– and at the end of the request, `bot.chat` returns the
|
106
|
+
same response as the non-streaming version which allows you
|
107
|
+
to process a response in the same way:
|
135
108
|
|
136
109
|
```ruby
|
137
110
|
#!/usr/bin/env ruby
|
138
111
|
require "llm"
|
139
112
|
|
140
|
-
llm = LLM.openai(key: ENV["
|
113
|
+
llm = LLM.openai(key: ENV["OPENAI_SECRET"])
|
141
114
|
bot = LLM::Bot.new(llm)
|
115
|
+
url = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/9a/Cognac_glass.jpg/500px-Cognac_glass.jpg"
|
142
116
|
bot.chat(stream: $stdout) do |prompt|
|
143
|
-
prompt.system "
|
144
|
-
prompt.user "Tell me
|
145
|
-
prompt.user "Tell me
|
146
|
-
prompt.user "
|
117
|
+
prompt.system "Your task is to answer all user queries"
|
118
|
+
prompt.user ["Tell me about this URL", URI(url)]
|
119
|
+
prompt.user ["Tell me about this pdf", File.open("spec/fixtures/documents/freebsd.sysctl.pdf", "r")]
|
120
|
+
prompt.user "Is the URL and PDF similar to each other?"
|
147
121
|
end.to_a
|
148
122
|
```
|
149
123
|
|
@@ -202,11 +176,7 @@ The
|
|
202
176
|
method returns an array of functions that can be called after sending a message and
|
203
177
|
it will only be populated if the LLM detects a function should be called. Each function
|
204
178
|
corresponds to an element in the "tools" array. The array is emptied after a function call,
|
205
|
-
and potentially repopulated on the next message
|
206
|
-
|
207
|
-
The following example defines an agent that can run system commands based on natural language,
|
208
|
-
and it is only intended to be a fun demo of tool calling - it is not recommended to run
|
209
|
-
arbitrary commands from a LLM without sanitizing the input first :) Without further ado:
|
179
|
+
and potentially repopulated on the next message:
|
210
180
|
|
211
181
|
```ruby
|
212
182
|
#!/usr/bin/env ruby
|
@@ -218,10 +188,10 @@ tool = LLM.function(:system) do |fn|
|
|
218
188
|
fn.params do |schema|
|
219
189
|
schema.object(command: schema.string.required)
|
220
190
|
end
|
221
|
-
fn.define do |
|
191
|
+
fn.define do |command:|
|
222
192
|
ro, wo = IO.pipe
|
223
193
|
re, we = IO.pipe
|
224
|
-
Process.wait Process.spawn(
|
194
|
+
Process.wait Process.spawn(command, out: wo, err: we)
|
225
195
|
[wo,we].each(&:close)
|
226
196
|
{stderr: re.read, stdout: ro.read}
|
227
197
|
end
|
@@ -241,6 +211,60 @@ bot.chat bot.functions.map(&:call) # report return value to the LLM
|
|
241
211
|
# {stderr: "", stdout: "FreeBSD"}
|
242
212
|
```
|
243
213
|
|
214
|
+
### Files
|
215
|
+
|
216
|
+
#### Create
|
217
|
+
|
218
|
+
The OpenAI and Gemini providers provide a Files API where a client can upload files
|
219
|
+
that can be referenced from a prompt, and with other APIs as well. The following
|
220
|
+
example uses the OpenAI provider to describe the contents of a PDF file after
|
221
|
+
it has been uploaded. The file (a specialized instance of
|
222
|
+
[LLM::Response](https://0x1eef.github.io/x/llm.rb/LLM/Response.html)
|
223
|
+
) is given as part of a prompt that is understood by llm.rb:
|
224
|
+
|
225
|
+
```ruby
|
226
|
+
#!/usr/bin/env ruby
|
227
|
+
require "llm"
|
228
|
+
|
229
|
+
llm = LLM.openai(key: ENV["KEY"])
|
230
|
+
bot = LLM::Bot.new(llm)
|
231
|
+
file = llm.files.create(file: "/books/goodread.pdf")
|
232
|
+
bot.chat(["Tell me about this file", file])
|
233
|
+
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
234
|
+
```
|
235
|
+
|
236
|
+
### Prompts
|
237
|
+
|
238
|
+
#### Multimodal
|
239
|
+
|
240
|
+
It is generally a given that an LLM will understand text but they can also
|
241
|
+
understand and generate other types of media as well: audio, images, video,
|
242
|
+
and even URLs. The object given as a prompt in llm.rb can be a string to
|
243
|
+
represent text, a URI object to represent a URL, an LLM::Response object
|
244
|
+
to represent a file stored with the LLM, and so on. These are objects you
|
245
|
+
can throw at the prompt and have them be understood automatically.
|
246
|
+
|
247
|
+
A prompt can also have multiple parts, and in that case, an array is given
|
248
|
+
as a prompt. Each element is considered to part of the prompt:
|
249
|
+
|
250
|
+
```ruby
|
251
|
+
#!/usr/bin/env ruby
|
252
|
+
require "llm"
|
253
|
+
|
254
|
+
llm = LLM.openai(key: ENV["KEY"])
|
255
|
+
bot = LLM::Bot.new(llm)
|
256
|
+
|
257
|
+
bot.chat ["Tell me about this URL", URI("https://example.com/path/to/image.png")]
|
258
|
+
[bot.messages.find(&:assistant?)].each { print "[#{_1.role}] ", _1.content, "\n" }
|
259
|
+
|
260
|
+
file = llm.files.create(file: "/books/goodread.pdf")
|
261
|
+
bot.chat ["Tell me about this PDF", file]
|
262
|
+
[bot.messages.find(&:assistant?)].each { print "[#{_1.role}] ", _1.content, "\n" }
|
263
|
+
|
264
|
+
bot.chat ["Tell me about this image", File.open("/images/nemothefish.png", "r")]
|
265
|
+
[bot.messages.find(&:assistant?)].each { print "[#{_1.role}] ", _1.content, "\n" }
|
266
|
+
```
|
267
|
+
|
244
268
|
### Audio
|
245
269
|
|
246
270
|
#### Speech
|
@@ -368,71 +392,6 @@ res.urls.each.with_index do |url, index|
|
|
368
392
|
end
|
369
393
|
```
|
370
394
|
|
371
|
-
### Files
|
372
|
-
|
373
|
-
#### Create
|
374
|
-
|
375
|
-
Most LLM providers provide a Files API where you can upload files
|
376
|
-
that can be referenced from a prompt and llm.rb has first-class support
|
377
|
-
for this feature. The following example uses the OpenAI provider to describe
|
378
|
-
the contents of a PDF file after it has been uploaded. The file (an instance
|
379
|
-
of [LLM::Response::File](https://0x1eef.github.io/x/llm.rb/LLM/Response/File.html))
|
380
|
-
is passed directly to the chat method, and generally any object a prompt supports
|
381
|
-
can be given to the chat method:
|
382
|
-
|
383
|
-
|
384
|
-
```ruby
|
385
|
-
#!/usr/bin/env ruby
|
386
|
-
require "llm"
|
387
|
-
|
388
|
-
llm = LLM.openai(key: ENV["KEY"])
|
389
|
-
bot = LLM::Bot.new(llm)
|
390
|
-
file = llm.files.create(file: "/documents/openbsd_is_awesome.pdf")
|
391
|
-
bot.chat(file)
|
392
|
-
bot.chat("What is this file about?")
|
393
|
-
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
394
|
-
|
395
|
-
##
|
396
|
-
# [assistant] This file is about OpenBSD, a free and open-source Unix-like operating system
|
397
|
-
# based on the Berkeley Software Distribution (BSD). It is known for its
|
398
|
-
# emphasis on security, code correctness, and code simplicity. The file
|
399
|
-
# contains information about the features, installation, and usage of OpenBSD.
|
400
|
-
```
|
401
|
-
|
402
|
-
### Prompts
|
403
|
-
|
404
|
-
#### Multimodal
|
405
|
-
|
406
|
-
Generally all providers accept text prompts but some providers can
|
407
|
-
also understand URLs, and various file types (eg images, audio, video,
|
408
|
-
etc). The llm.rb approach to multimodal prompts is to let you pass `URI`
|
409
|
-
objects to describe links, `LLM::File` | `LLM::Response::File` objects
|
410
|
-
to describe files, `String` objects to describe text blobs, or an array
|
411
|
-
of the aforementioned objects to describe multiple objects in a single
|
412
|
-
prompt. Each object is a first class citizen that can be passed directly
|
413
|
-
to a prompt:
|
414
|
-
|
415
|
-
```ruby
|
416
|
-
#!/usr/bin/env ruby
|
417
|
-
require "llm"
|
418
|
-
|
419
|
-
llm = LLM.openai(key: ENV["KEY"])
|
420
|
-
bot = LLM::Bot.new(llm)
|
421
|
-
|
422
|
-
bot.chat [URI("https://example.com/path/to/image.png"), "Describe the image in the link"]
|
423
|
-
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
424
|
-
|
425
|
-
file = llm.files.create(file: "/documents/openbsd_is_awesome.pdf")
|
426
|
-
bot.chat [file, "What is this file about?"]
|
427
|
-
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
428
|
-
|
429
|
-
bot.chat [LLM.File("/images/puffy.png"), "What is this image about?"]
|
430
|
-
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
431
|
-
|
432
|
-
bot.chat [LLM.File("/images/beastie.png"), "What is this image about?"]
|
433
|
-
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
434
|
-
```
|
435
|
-
|
436
395
|
### Embeddings
|
437
396
|
|
438
397
|
#### Text
|
@@ -458,7 +417,7 @@ print res.embeddings.size, "\n"
|
|
458
417
|
print res.embeddings[0].size, "\n"
|
459
418
|
|
460
419
|
##
|
461
|
-
# LLM::Response
|
420
|
+
# LLM::Response
|
462
421
|
# 3
|
463
422
|
# 1536
|
464
423
|
```
|
data/lib/llm/bot/builder.rb
CHANGED
@@ -9,7 +9,7 @@ class LLM::Bot
|
|
9
9
|
##
|
10
10
|
# @param [String] prompt The prompt
|
11
11
|
# @param [Hash] params
|
12
|
-
# @return [LLM::Response
|
12
|
+
# @return [LLM::Response]
|
13
13
|
def create_response!(prompt, params)
|
14
14
|
@provider.responses.create(
|
15
15
|
prompt,
|
@@ -20,7 +20,7 @@ class LLM::Bot
|
|
20
20
|
##
|
21
21
|
# @param [String] prompt The prompt
|
22
22
|
# @param [Hash] params
|
23
|
-
# @return [LLM::Response
|
23
|
+
# @return [LLM::Response]
|
24
24
|
def create_completion!(prompt, params)
|
25
25
|
@provider.complete(
|
26
26
|
prompt,
|
data/lib/llm/bot/conversable.rb
CHANGED
File without changes
|
File without changes
|
File without changes
|
data/lib/llm/bot.rb
CHANGED
@@ -2,10 +2,10 @@
|
|
2
2
|
|
3
3
|
module LLM
|
4
4
|
##
|
5
|
-
# {LLM::Bot LLM::Bot} provides
|
5
|
+
# {LLM::Bot LLM::Bot} provides an object that can maintain a
|
6
6
|
# a conversation. A conversation can use the chat completions API
|
7
|
-
# that all LLM providers support or the responses API that
|
8
|
-
#
|
7
|
+
# that all LLM providers support or the responses API that currently
|
8
|
+
# only OpenAI supports.
|
9
9
|
#
|
10
10
|
# @example example #1
|
11
11
|
# #!/usr/bin/env ruby
|
@@ -14,10 +14,9 @@ module LLM
|
|
14
14
|
# llm = LLM.openai(ENV["KEY"])
|
15
15
|
# bot = LLM::Bot.new(llm)
|
16
16
|
# msgs = bot.chat do |prompt|
|
17
|
-
# prompt.
|
18
|
-
# prompt.user "
|
19
|
-
# prompt.user "
|
20
|
-
# prompt.user "Why did the chicken cross the road ?"
|
17
|
+
# prompt.user "What programming language should I learn next ?"
|
18
|
+
# prompt.user "Can you recommend a good book ?"
|
19
|
+
# prompt.user "Can you suggest a fun project to practice ?"
|
21
20
|
# end
|
22
21
|
# msgs.each { print "[#{_1.role}]", _1.content, "\n" }
|
23
22
|
#
|
@@ -27,10 +26,9 @@ module LLM
|
|
27
26
|
#
|
28
27
|
# llm = LLM.openai(ENV["KEY"])
|
29
28
|
# bot = LLM::Bot.new(llm)
|
30
|
-
# bot.chat "
|
31
|
-
# bot.chat "
|
32
|
-
# bot.chat "
|
33
|
-
# bot.chat "Why did the chicken cross the road ?", role: :user
|
29
|
+
# bot.chat "What programming language should I learn next ?", role: :user
|
30
|
+
# bot.chat "Can you recommend a good book ?", role: :user
|
31
|
+
# bot.chat "Can you suggest a fun project to practice ?", role: :user
|
34
32
|
# bot.messages.each { print "[#{_1.role}]", _1.content, "\n" }
|
35
33
|
class Bot
|
36
34
|
require_relative "bot/prompt/completion"
|
data/lib/llm/buffer.rb
CHANGED
File without changes
|
data/lib/llm/error.rb
CHANGED
File without changes
|
data/lib/llm/event_handler.rb
CHANGED
File without changes
|
File without changes
|
File without changes
|
data/lib/llm/eventstream.rb
CHANGED
File without changes
|
data/lib/llm/file.rb
CHANGED
@@ -29,12 +29,19 @@ class LLM::File
|
|
29
29
|
end
|
30
30
|
|
31
31
|
##
|
32
|
-
# @return [
|
32
|
+
# @return [Boolean]
|
33
33
|
# Returns true if the file is an image
|
34
34
|
def image?
|
35
35
|
mime_type.start_with?("image/")
|
36
36
|
end
|
37
37
|
|
38
|
+
##
|
39
|
+
# @return [Boolean]
|
40
|
+
# Returns true if the file is a PDF document
|
41
|
+
def pdf?
|
42
|
+
mime_type == "application/pdf"
|
43
|
+
end
|
44
|
+
|
38
45
|
##
|
39
46
|
# @return [Integer]
|
40
47
|
# Returns the size of the file in bytes
|
@@ -68,14 +75,16 @@ class LLM::File
|
|
68
75
|
end
|
69
76
|
|
70
77
|
##
|
71
|
-
# @param [String]
|
72
|
-
# The path to
|
78
|
+
# @param [String, File, LLM::Response] obj
|
79
|
+
# The path to the file, or an existing file reference
|
73
80
|
# @return [LLM::File]
|
74
|
-
def LLM.File(
|
75
|
-
case
|
76
|
-
when
|
77
|
-
|
78
|
-
|
79
|
-
|
81
|
+
def LLM.File(obj)
|
82
|
+
case obj
|
83
|
+
when File
|
84
|
+
obj.close unless obj.closed?
|
85
|
+
LLM.File(obj.path)
|
86
|
+
when LLM::File, LLM::Response then obj
|
87
|
+
when String then LLM::File.new(obj)
|
88
|
+
else raise TypeError, "don't know how to handle #{obj.class} objects"
|
80
89
|
end
|
81
90
|
end
|
data/lib/llm/function.rb
CHANGED
@@ -10,15 +10,15 @@
|
|
10
10
|
# fn.params do |schema|
|
11
11
|
# schema.object(command: schema.string.required)
|
12
12
|
# end
|
13
|
-
# fn.define do |
|
14
|
-
# {success: Kernel.system(
|
13
|
+
# fn.define do |command:|
|
14
|
+
# {success: Kernel.system(command)}
|
15
15
|
# end
|
16
16
|
# end
|
17
17
|
#
|
18
18
|
# @example example #2
|
19
19
|
# class System
|
20
|
-
# def call(
|
21
|
-
# {success: Kernel.system(
|
20
|
+
# def call(command:)
|
21
|
+
# {success: Kernel.system(command)}
|
22
22
|
# end
|
23
23
|
# end
|
24
24
|
#
|
@@ -33,6 +33,11 @@ class LLM::Function
|
|
33
33
|
class Return < Struct.new(:id, :value)
|
34
34
|
end
|
35
35
|
|
36
|
+
##
|
37
|
+
# Returns the function ID
|
38
|
+
# @return [String, nil]
|
39
|
+
attr_accessor :id
|
40
|
+
|
36
41
|
##
|
37
42
|
# Returns the function name
|
38
43
|
# @return [String]
|
@@ -43,11 +48,6 @@ class LLM::Function
|
|
43
48
|
# @return [Array, nil]
|
44
49
|
attr_accessor :arguments
|
45
50
|
|
46
|
-
##
|
47
|
-
# Returns the function ID
|
48
|
-
# @return [String, nil]
|
49
|
-
attr_accessor :id
|
50
|
-
|
51
51
|
##
|
52
52
|
# @param [String] name The function name
|
53
53
|
# @yieldparam [LLM::Function] self The function object
|
@@ -61,10 +61,14 @@ class LLM::Function
|
|
61
61
|
|
62
62
|
##
|
63
63
|
# Set the function description
|
64
|
-
# @param [String]
|
64
|
+
# @param [String] desc The function description
|
65
65
|
# @return [void]
|
66
|
-
def description(
|
67
|
-
|
66
|
+
def description(desc = nil)
|
67
|
+
if desc
|
68
|
+
@description = desc
|
69
|
+
else
|
70
|
+
@description
|
71
|
+
end
|
68
72
|
end
|
69
73
|
|
70
74
|
##
|
@@ -87,7 +91,8 @@ class LLM::Function
|
|
87
91
|
# Call the function
|
88
92
|
# @return [LLM::Function::Return] The result of the function call
|
89
93
|
def call
|
90
|
-
|
94
|
+
runner = ((Class === @runner) ? @runner.new : @runner)
|
95
|
+
Return.new(id, runner.call(**arguments))
|
91
96
|
ensure
|
92
97
|
@called = true
|
93
98
|
end
|
File without changes
|
File without changes
|
File without changes
|
data/lib/llm/json/schema/leaf.rb
CHANGED
File without changes
|
data/lib/llm/json/schema/null.rb
CHANGED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
data/lib/llm/json/schema.rb
CHANGED
File without changes
|
data/lib/llm/message.rb
CHANGED
@@ -109,6 +109,14 @@ module LLM
|
|
109
109
|
tool_calls.any?
|
110
110
|
end
|
111
111
|
|
112
|
+
##
|
113
|
+
# @return [Boolean]
|
114
|
+
# Returns true when the message represents a function return
|
115
|
+
def tool_return?
|
116
|
+
LLM::Function::Return === content ||
|
117
|
+
[*content].grep(LLM::Function::Return).any?
|
118
|
+
end
|
119
|
+
|
112
120
|
##
|
113
121
|
# Returns a string representation of the message
|
114
122
|
# @return [String]
|
data/lib/llm/mime.rb
CHANGED
File without changes
|
data/lib/llm/multipart.rb
CHANGED
File without changes
|
data/lib/llm/object/builder.rb
CHANGED
File without changes
|
data/lib/llm/object/kernel.rb
CHANGED
@@ -12,6 +12,10 @@ class LLM::Object
|
|
12
12
|
::Kernel.instance_method(:instance_of?).bind(self).call(...)
|
13
13
|
end
|
14
14
|
|
15
|
+
def extend(...)
|
16
|
+
::Kernel.instance_method(:extend).bind(self).call(...)
|
17
|
+
end
|
18
|
+
|
15
19
|
def method(...)
|
16
20
|
::Kernel.instance_method(:method).bind(self).call(...)
|
17
21
|
end
|
@@ -41,5 +45,9 @@ class LLM::Object
|
|
41
45
|
"#<#{self.class}:0x#{object_id.to_s(16)} properties=#{to_h.inspect}>"
|
42
46
|
end
|
43
47
|
alias_method :to_s, :inspect
|
48
|
+
|
49
|
+
def pretty_print(q)
|
50
|
+
q.text(inspect)
|
51
|
+
end
|
44
52
|
end
|
45
53
|
end
|
data/lib/llm/object.rb
CHANGED
data/lib/llm/provider.rb
CHANGED
@@ -44,7 +44,7 @@ class LLM::Provider
|
|
44
44
|
# Other embedding parameters
|
45
45
|
# @raise [NotImplementedError]
|
46
46
|
# When the method is not implemented by a subclass
|
47
|
-
# @return [LLM::Response
|
47
|
+
# @return [LLM::Response]
|
48
48
|
def embed(input, model: nil, **params)
|
49
49
|
raise NotImplementedError
|
50
50
|
end
|
@@ -68,7 +68,7 @@ class LLM::Provider
|
|
68
68
|
# @option params [Array<LLM::Function>, nil] :tools Defaults to nil
|
69
69
|
# @raise [NotImplementedError]
|
70
70
|
# When the method is not implemented by a subclass
|
71
|
-
# @return [LLM::Response
|
71
|
+
# @return [LLM::Response]
|
72
72
|
def complete(prompt, params = {})
|
73
73
|
raise NotImplementedError
|
74
74
|
end
|
@@ -174,6 +174,13 @@ class LLM::Provider
|
|
174
174
|
raise NotImplementedError
|
175
175
|
end
|
176
176
|
|
177
|
+
##
|
178
|
+
# @return [LLM::OpenAI::VectorStore]
|
179
|
+
# Returns an interface to the vector stores API
|
180
|
+
def vector_stores
|
181
|
+
raise NotImplementedError
|
182
|
+
end
|
183
|
+
|
177
184
|
##
|
178
185
|
# @return [String]
|
179
186
|
# Returns the role of the assistant in the conversation.
|
@@ -222,15 +229,6 @@ class LLM::Provider
|
|
222
229
|
raise NotImplementedError
|
223
230
|
end
|
224
231
|
|
225
|
-
##
|
226
|
-
# @return [Module]
|
227
|
-
# Returns the module responsible for parsing a successful LLM response
|
228
|
-
# @raise [NotImplementedError]
|
229
|
-
# (see LLM::Provider#complete)
|
230
|
-
def response_parser
|
231
|
-
raise NotImplementedError
|
232
|
-
end
|
233
|
-
|
234
232
|
##
|
235
233
|
# @return [Class]
|
236
234
|
# Returns the class responsible for handling an unsuccessful LLM response
|
File without changes
|