llm.rb 0.8.0 → 0.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +64 -51
- data/lib/llm/{chat → bot}/builder.rb +1 -1
- data/lib/llm/bot/conversable.rb +29 -0
- data/lib/llm/{chat → bot}/prompt/completion.rb +14 -4
- data/lib/llm/{chat → bot}/prompt/respond.rb +16 -5
- data/lib/llm/{chat.rb → bot.rb} +48 -66
- data/lib/llm/buffer.rb +7 -3
- data/lib/llm/error.rb +22 -22
- data/lib/llm/event_handler.rb +44 -0
- data/lib/llm/eventstream/event.rb +69 -0
- data/lib/llm/eventstream/parser.rb +88 -0
- data/lib/llm/eventstream.rb +8 -0
- data/lib/llm/function.rb +9 -12
- data/lib/llm/object/builder.rb +8 -9
- data/lib/llm/object/kernel.rb +1 -1
- data/lib/llm/object.rb +7 -1
- data/lib/llm/provider.rb +61 -26
- data/lib/llm/providers/anthropic/error_handler.rb +3 -3
- data/lib/llm/providers/anthropic/models.rb +3 -7
- data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +3 -3
- data/lib/llm/providers/anthropic/response_parser.rb +1 -0
- data/lib/llm/providers/anthropic/stream_parser.rb +66 -0
- data/lib/llm/providers/anthropic.rb +9 -4
- data/lib/llm/providers/gemini/error_handler.rb +4 -4
- data/lib/llm/providers/gemini/files.rb +12 -15
- data/lib/llm/providers/gemini/images.rb +4 -8
- data/lib/llm/providers/gemini/models.rb +3 -7
- data/lib/llm/providers/gemini/stream_parser.rb +69 -0
- data/lib/llm/providers/gemini.rb +19 -11
- data/lib/llm/providers/ollama/error_handler.rb +3 -3
- data/lib/llm/providers/ollama/format/completion_format.rb +1 -1
- data/lib/llm/providers/ollama/models.rb +3 -7
- data/lib/llm/providers/ollama/stream_parser.rb +44 -0
- data/lib/llm/providers/ollama.rb +13 -6
- data/lib/llm/providers/openai/audio.rb +5 -9
- data/lib/llm/providers/openai/error_handler.rb +3 -3
- data/lib/llm/providers/openai/files.rb +12 -15
- data/lib/llm/providers/openai/images.rb +8 -11
- data/lib/llm/providers/openai/models.rb +3 -7
- data/lib/llm/providers/openai/moderations.rb +3 -7
- data/lib/llm/providers/openai/response_parser/completion_parser.rb +3 -3
- data/lib/llm/providers/openai/response_parser.rb +3 -0
- data/lib/llm/providers/openai/responses.rb +10 -12
- data/lib/llm/providers/openai/stream_parser.rb +77 -0
- data/lib/llm/providers/openai.rb +11 -7
- data/lib/llm/providers/voyageai/error_handler.rb +3 -3
- data/lib/llm/providers/voyageai.rb +1 -1
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +4 -2
- data/llm.gemspec +7 -5
- metadata +32 -27
- data/lib/llm/chat/conversable.rb +0 -53
- /data/lib/{json → llm/json}/schema/array.rb +0 -0
- /data/lib/{json → llm/json}/schema/boolean.rb +0 -0
- /data/lib/{json → llm/json}/schema/integer.rb +0 -0
- /data/lib/{json → llm/json}/schema/leaf.rb +0 -0
- /data/lib/{json → llm/json}/schema/null.rb +0 -0
- /data/lib/{json → llm/json}/schema/number.rb +0 -0
- /data/lib/{json → llm/json}/schema/object.rb +0 -0
- /data/lib/{json → llm/json}/schema/string.rb +0 -0
- /data/lib/{json → llm/json}/schema/version.rb +0 -0
- /data/lib/{json → llm/json}/schema.rb +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 20812e4c8cfc7ebee81190054e7483b00b24e0f0f567f630bfb4dc0ac962193f
|
4
|
+
data.tar.gz: 4ff26fa74520b29da3b6aa10331d5cd618e13d4df53cb9f0c5b7a7691f5fb42e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4ed6e3f0426fc0967cb59cc14ae0d0afe552a93e2a29b8d173bf000341b77800ea31ef9088d45a696d353ac0e7f58cc1b3d7f86c3c15d757ae73efb33523b56d
|
7
|
+
data.tar.gz: 136e14863ef92264e270f3b6616a9660298cdc477a2a15db1b89b9360be1ce2be3dde0ee66d97be68d57c5bf68f4f0fc9764657187fbf2ae226f1a5ed6579189
|
data/README.md
CHANGED
@@ -1,21 +1,22 @@
|
|
1
1
|
## About
|
2
2
|
|
3
3
|
llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
|
4
|
-
includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp.
|
5
|
-
|
6
|
-
|
4
|
+
includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. The
|
5
|
+
toolkit includes full support for chat, streaming, tool calling, audio,
|
6
|
+
images, files, and JSON Schema generation.
|
7
7
|
|
8
8
|
## Features
|
9
9
|
|
10
10
|
#### General
|
11
11
|
- ✅ A single unified interface for multiple providers
|
12
12
|
- 📦 Zero dependencies outside Ruby's standard library
|
13
|
-
- 🚀
|
13
|
+
- 🚀 Efficient API design that minimizes the request count
|
14
14
|
|
15
15
|
#### Chat, Agents
|
16
16
|
- 🧠 Stateless and stateful chat via completions and responses API
|
17
17
|
- 🤖 Tool calling and function execution
|
18
18
|
- 🗂️ JSON Schema support for structured, validated responses
|
19
|
+
- 📡 Streaming support for real-time response updates
|
19
20
|
|
20
21
|
#### Media
|
21
22
|
- 🗣️ Text-to-speech, transcription, and translation
|
@@ -31,17 +32,17 @@ tool calling, audio, images, files, and JSON Schema generation.
|
|
31
32
|
|
32
33
|
<details>
|
33
34
|
<summary><b>1. Tools: "system" function</b></summary>
|
34
|
-
<img src="share/llm-shell/examples/toolcalls.gif">
|
35
|
+
<img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/toolcalls.gif">
|
35
36
|
</details>
|
36
37
|
|
37
38
|
<details>
|
38
39
|
<summary><b>2. Files: import at runtime</b></summary>
|
39
|
-
<img src="share/llm-shell/examples/files-runtime.gif">
|
40
|
+
<img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/files-runtime.gif">
|
40
41
|
</details>
|
41
42
|
|
42
43
|
<details>
|
43
44
|
<summary><b>3. Files: import at boot time</b></summary>
|
44
|
-
<img src="share/llm-shell/examples/files-boottime.gif">
|
45
|
+
<img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/files-boottime.gif">
|
45
46
|
</details>
|
46
47
|
|
47
48
|
## Examples
|
@@ -60,7 +61,7 @@ using an API key (if required) and an optional set of configuration options via
|
|
60
61
|
require "llm"
|
61
62
|
|
62
63
|
##
|
63
|
-
#
|
64
|
+
# remote providers
|
64
65
|
llm = LLM.openai(key: "yourapikey")
|
65
66
|
llm = LLM.gemini(key: "yourapikey")
|
66
67
|
llm = LLM.anthropic(key: "yourapikey")
|
@@ -79,24 +80,24 @@ llm = LLM.llamacpp(key: nil)
|
|
79
80
|
|
80
81
|
> This example uses the stateless chat completions API that all
|
81
82
|
> providers support. A similar example for OpenAI's stateful
|
82
|
-
> responses API is available in the [docs/](docs/OPENAI.md)
|
83
|
+
> responses API is available in the [docs/](docs/OPENAI.md#responses)
|
83
84
|
> directory.
|
84
85
|
|
85
|
-
The following example
|
86
|
-
[LLM::
|
87
|
-
|
88
|
-
sent to the provider
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
86
|
+
The following example creates an instance of
|
87
|
+
[LLM::Bot](https://0x1eef.github.io/x/llm.rb/LLM/Bot.html)
|
88
|
+
by entering into a conversation where messages are buffered and
|
89
|
+
sent to the provider on-demand. This is the default behavior
|
90
|
+
because it can reduce the number of requests sent to a provider,
|
91
|
+
and avoids unneccessary requests until an attempt to iterate over
|
92
|
+
[LLM::Bot#messages](https://0x1eef.github.io/x/llm.rb/LLM/Bot.html#messages-instance_method)
|
93
|
+
is made:
|
93
94
|
|
94
95
|
```ruby
|
95
96
|
#!/usr/bin/env ruby
|
96
97
|
require "llm"
|
97
98
|
|
98
99
|
llm = LLM.openai(key: ENV["KEY"])
|
99
|
-
bot = LLM::
|
100
|
+
bot = LLM::Bot.new(llm)
|
100
101
|
msgs = bot.chat do |prompt|
|
101
102
|
prompt.system File.read("./share/llm/prompts/system.txt")
|
102
103
|
prompt.user "Tell me the answer to 5 + 15"
|
@@ -106,21 +107,38 @@ end
|
|
106
107
|
|
107
108
|
# At this point, we execute a single request
|
108
109
|
msgs.each { print "[#{_1.role}] ", _1.content, "\n" }
|
110
|
+
```
|
109
111
|
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
#
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
112
|
+
#### Streaming
|
113
|
+
|
114
|
+
> There Is More Than One Way To Do It (TIMTOWTDI) when you are
|
115
|
+
> using llm.rb – and this is especially true when it
|
116
|
+
> comes to streaming. See the streaming documentation in
|
117
|
+
> [docs/](docs/STREAMING.md#flexibility) for more details.
|
118
|
+
|
119
|
+
The following example streams the messages in a conversation
|
120
|
+
as they are generated in real-time. This feature can be useful
|
121
|
+
in case you want to see the contents of a message as it is
|
122
|
+
generated, or in case you want to avoid potential read timeouts
|
123
|
+
during the generation of a response.
|
124
|
+
|
125
|
+
The `stream` option can be set to an IO object, or the value `true`
|
126
|
+
to enable streaming – and at the end of the request, `bot.chat`
|
127
|
+
returns the same response as the non-streaming version which allows
|
128
|
+
you to process a response in the same way:
|
129
|
+
|
130
|
+
```ruby
|
131
|
+
#!/usr/bin/env ruby
|
132
|
+
require "llm"
|
133
|
+
|
134
|
+
llm = LLM.openai(key: ENV["KEY"])
|
135
|
+
bot = LLM::Bot.new(llm)
|
136
|
+
bot.chat(stream: $stdout) do |prompt|
|
137
|
+
prompt.system "You are my math assistant."
|
138
|
+
prompt.user "Tell me the answer to 5 + 15"
|
139
|
+
prompt.user "Tell me the answer to (5 + 15) * 2"
|
140
|
+
prompt.user "Tell me the answer to ((5 + 15) * 2) / 10"
|
141
|
+
end.to_a
|
124
142
|
```
|
125
143
|
|
126
144
|
### Schema
|
@@ -130,12 +148,7 @@ msgs.each { print "[#{_1.role}] ", _1.content, "\n" }
|
|
130
148
|
All LLM providers except Anthropic and DeepSeek allow a client to describe
|
131
149
|
the structure of a response that a LLM emits according to a schema that is
|
132
150
|
described by JSON. The schema lets a client describe what JSON object (or value)
|
133
|
-
an LLM should emit, and the LLM will abide by the schema
|
134
|
-
See also: [JSON Schema website](https://json-schema.org/overview/what-is-jsonschema).
|
135
|
-
We will use the
|
136
|
-
[llmrb/json-schema](https://github.com/llmrb/json-schema)
|
137
|
-
library for the sake of the examples – the interface is designed so you
|
138
|
-
could drop in any other library in its place:
|
151
|
+
an LLM should emit, and the LLM will abide by the schema:
|
139
152
|
|
140
153
|
```ruby
|
141
154
|
#!/usr/bin/env ruby
|
@@ -145,14 +158,14 @@ require "llm"
|
|
145
158
|
# Objects
|
146
159
|
llm = LLM.openai(key: ENV["KEY"])
|
147
160
|
schema = llm.schema.object(answer: llm.schema.integer.required)
|
148
|
-
bot = LLM::
|
161
|
+
bot = LLM::Bot.new(llm, schema:)
|
149
162
|
bot.chat "Does the earth orbit the sun?", role: :user
|
150
163
|
bot.messages.find(&:assistant?).content! # => {probability: 1}
|
151
164
|
|
152
165
|
##
|
153
166
|
# Enums
|
154
167
|
schema = llm.schema.object(fruit: llm.schema.string.enum("Apple", "Orange", "Pineapple"))
|
155
|
-
bot = LLM::
|
168
|
+
bot = LLM::Bot.new(llm, schema:)
|
156
169
|
bot.chat "Your favorite fruit is Pineapple", role: :system
|
157
170
|
bot.chat "What fruit is your favorite?", role: :user
|
158
171
|
bot.messages.find(&:assistant?).content! # => {fruit: "Pineapple"}
|
@@ -160,7 +173,7 @@ bot.messages.find(&:assistant?).content! # => {fruit: "Pineapple"}
|
|
160
173
|
##
|
161
174
|
# Arrays
|
162
175
|
schema = llm.schema.object(answers: llm.schema.array(llm.schema.integer.required))
|
163
|
-
bot = LLM::
|
176
|
+
bot = LLM::Bot.new(llm, schema:)
|
164
177
|
bot.chat "Answer all of my questions", role: :system
|
165
178
|
bot.chat "Tell me the answer to ((5 + 5) / 2)", role: :user
|
166
179
|
bot.chat "Tell me the answer to ((5 + 5) / 2) * 2", role: :user
|
@@ -172,14 +185,14 @@ bot.messages.find(&:assistant?).content! # => {answers: [5, 10, 11]}
|
|
172
185
|
|
173
186
|
#### Functions
|
174
187
|
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
188
|
+
All providers support a powerful feature known as tool calling, and although
|
189
|
+
it is a little complex to understand at first, it can be powerful for building
|
190
|
+
agents. The following example demonstrates how we can define a local function
|
191
|
+
(which happens to be a tool), and a provider (such as OpenAI) can then detect
|
192
|
+
when we should call the function.
|
180
193
|
|
181
194
|
The
|
182
|
-
[LLM::
|
195
|
+
[LLM::Bot#functions](https://0x1eef.github.io/x/llm.rb/LLM/Bot.html#functions-instance_method)
|
183
196
|
method returns an array of functions that can be called after sending a message and
|
184
197
|
it will only be populated if the LLM detects a function should be called. Each function
|
185
198
|
corresponds to an element in the "tools" array. The array is emptied after a function call,
|
@@ -208,7 +221,7 @@ tool = LLM.function(:system) do |fn|
|
|
208
221
|
end
|
209
222
|
end
|
210
223
|
|
211
|
-
bot = LLM::
|
224
|
+
bot = LLM::Bot.new(llm, tools: [tool])
|
212
225
|
bot.chat "Your task is to run shell commands via a tool.", role: :system
|
213
226
|
|
214
227
|
bot.chat "What is the current date?", role: :user
|
@@ -367,7 +380,7 @@ can be given to the chat method:
|
|
367
380
|
require "llm"
|
368
381
|
|
369
382
|
llm = LLM.openai(key: ENV["KEY"])
|
370
|
-
bot = LLM::
|
383
|
+
bot = LLM::Bot.new(llm)
|
371
384
|
file = llm.files.create(file: "/documents/openbsd_is_awesome.pdf")
|
372
385
|
bot.chat(file)
|
373
386
|
bot.chat("What is this file about?")
|
@@ -398,7 +411,7 @@ to a prompt:
|
|
398
411
|
require "llm"
|
399
412
|
|
400
413
|
llm = LLM.openai(key: ENV["KEY"])
|
401
|
-
bot = LLM::
|
414
|
+
bot = LLM::Bot.new(llm)
|
402
415
|
|
403
416
|
bot.chat [URI("https://example.com/path/to/image.png"), "Describe the image in the link"]
|
404
417
|
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
@@ -469,7 +482,7 @@ end
|
|
469
482
|
##
|
470
483
|
# Select a model
|
471
484
|
model = llm.models.all.find { |m| m.id == "gpt-3.5-turbo" }
|
472
|
-
bot = LLM::
|
485
|
+
bot = LLM::Bot.new(llm, model:)
|
473
486
|
bot.chat "Hello #{model.id} :)"
|
474
487
|
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
475
488
|
```
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::Bot
|
4
|
+
##
|
5
|
+
# @private
|
6
|
+
module Conversable
|
7
|
+
private
|
8
|
+
|
9
|
+
##
|
10
|
+
# Queues a response to be sent to the provider.
|
11
|
+
# @param [String] prompt The prompt
|
12
|
+
# @param [Hash] params
|
13
|
+
# @return [void]
|
14
|
+
def async_response(prompt, params = {})
|
15
|
+
role = params.delete(:role)
|
16
|
+
@messages << [LLM::Message.new(role, prompt), @params.merge(params), :respond]
|
17
|
+
end
|
18
|
+
|
19
|
+
##
|
20
|
+
# Queues a completion to be sent to the provider.
|
21
|
+
# @param [String] prompt The prompt
|
22
|
+
# @param [Hash] params
|
23
|
+
# @return [void]
|
24
|
+
def async_completion(prompt, params = {})
|
25
|
+
role = params.delete(:role)
|
26
|
+
@messages.push [LLM::Message.new(role, prompt), @params.merge(params), :complete]
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -1,20 +1,30 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
module LLM::
|
4
|
-
class Completion < Struct.new(:bot)
|
3
|
+
module LLM::Bot::Prompt
|
4
|
+
class Completion < Struct.new(:bot, :defaults)
|
5
|
+
##
|
6
|
+
# @param [LLM::Bot] bot
|
7
|
+
# @param [Hash] defaults
|
8
|
+
# @return [LLM::Bot::Prompt::Completion]
|
9
|
+
def initialize(bot, defaults)
|
10
|
+
super(bot, defaults || {})
|
11
|
+
end
|
12
|
+
|
5
13
|
##
|
6
14
|
# @param [String] prompt
|
7
15
|
# @param [Hash] params (see LLM::Provider#complete)
|
8
|
-
# @return [LLM::
|
16
|
+
# @return [LLM::Bot]
|
9
17
|
def system(prompt, params = {})
|
18
|
+
params = defaults.merge(params)
|
10
19
|
bot.chat prompt, params.merge(role: :system)
|
11
20
|
end
|
12
21
|
|
13
22
|
##
|
14
23
|
# @param [String] prompt
|
15
24
|
# @param [Hash] params (see LLM::Provider#complete)
|
16
|
-
# @return [LLM::
|
25
|
+
# @return [LLM::Bot]
|
17
26
|
def user(prompt, params = {})
|
27
|
+
params = defaults.merge(params)
|
18
28
|
bot.chat prompt, params.merge(role: :user)
|
19
29
|
end
|
20
30
|
end
|
@@ -1,28 +1,39 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
module LLM::
|
4
|
-
class Respond < Struct.new(:bot)
|
3
|
+
module LLM::Bot::Prompt
|
4
|
+
class Respond < Struct.new(:bot, :defaults)
|
5
|
+
##
|
6
|
+
# @param [LLM::Bot] bot
|
7
|
+
# @param [Hash] defaults
|
8
|
+
# @return [LLM::Bot::Prompt::Completion]
|
9
|
+
def initialize(bot, defaults)
|
10
|
+
super(bot, defaults || {})
|
11
|
+
end
|
12
|
+
|
5
13
|
##
|
6
14
|
# @param [String] prompt
|
7
15
|
# @param [Hash] params (see LLM::Provider#complete)
|
8
|
-
# @return [LLM::
|
16
|
+
# @return [LLM::Bot]
|
9
17
|
def system(prompt, params = {})
|
18
|
+
params = defaults.merge(params)
|
10
19
|
bot.respond prompt, params.merge(role: :system)
|
11
20
|
end
|
12
21
|
|
13
22
|
##
|
14
23
|
# @param [String] prompt
|
15
24
|
# @param [Hash] params (see LLM::Provider#complete)
|
16
|
-
# @return [LLM::
|
25
|
+
# @return [LLM::Bot]
|
17
26
|
def developer(prompt, params = {})
|
27
|
+
params = defaults.merge(params)
|
18
28
|
bot.respond prompt, params.merge(role: :developer)
|
19
29
|
end
|
20
30
|
|
21
31
|
##
|
22
32
|
# @param [String] prompt
|
23
33
|
# @param [Hash] params (see LLM::Provider#complete)
|
24
|
-
# @return [LLM::
|
34
|
+
# @return [LLM::Bot]
|
25
35
|
def user(prompt, params = {})
|
36
|
+
params = defaults.merge(params)
|
26
37
|
bot.respond prompt, params.merge(role: :user)
|
27
38
|
end
|
28
39
|
end
|
data/lib/llm/{chat.rb → bot.rb}
RENAMED
@@ -2,47 +2,48 @@
|
|
2
2
|
|
3
3
|
module LLM
|
4
4
|
##
|
5
|
-
# {LLM::
|
6
|
-
#
|
7
|
-
#
|
8
|
-
#
|
5
|
+
# {LLM::Bot LLM::Bot} provides a bot object that can maintain a
|
6
|
+
# a conversation. A conversation can use the chat completions API
|
7
|
+
# that all LLM providers support or the responses API that a select
|
8
|
+
# few LLM providers support.
|
9
9
|
#
|
10
|
-
# @example
|
10
|
+
# @example example #1
|
11
11
|
# #!/usr/bin/env ruby
|
12
12
|
# require "llm"
|
13
13
|
#
|
14
14
|
# llm = LLM.openai(ENV["KEY"])
|
15
|
-
# bot = LLM::
|
15
|
+
# bot = LLM::Bot.new(llm)
|
16
16
|
# msgs = bot.chat do |prompt|
|
17
17
|
# prompt.system "Answer the following questions."
|
18
18
|
# prompt.user "What is 5 + 7 ?"
|
19
19
|
# prompt.user "Why is the sky blue ?"
|
20
20
|
# prompt.user "Why did the chicken cross the road ?"
|
21
21
|
# end
|
22
|
-
# msgs.
|
22
|
+
# msgs.each { print "[#{_1.role}]", _1.content, "\n" }
|
23
23
|
#
|
24
|
-
# @example
|
24
|
+
# @example example #2
|
25
25
|
# #!/usr/bin/env ruby
|
26
26
|
# require "llm"
|
27
27
|
#
|
28
28
|
# llm = LLM.openai(ENV["KEY"])
|
29
|
-
# bot = LLM::
|
29
|
+
# bot = LLM::Bot.new(llm)
|
30
30
|
# bot.chat "Answer the following questions.", role: :system
|
31
31
|
# bot.chat "What is 5 + 7 ?", role: :user
|
32
32
|
# bot.chat "Why is the sky blue ?", role: :user
|
33
33
|
# bot.chat "Why did the chicken cross the road ?", role: :user
|
34
|
-
# bot.messages.
|
35
|
-
class
|
36
|
-
require_relative "
|
37
|
-
require_relative "
|
38
|
-
require_relative "
|
39
|
-
require_relative "
|
34
|
+
# bot.messages.each { print "[#{_1.role}]", _1.content, "\n" }
|
35
|
+
class Bot
|
36
|
+
require_relative "bot/prompt/completion"
|
37
|
+
require_relative "bot/prompt/respond"
|
38
|
+
require_relative "bot/conversable"
|
39
|
+
require_relative "bot/builder"
|
40
40
|
|
41
41
|
include Conversable
|
42
42
|
include Builder
|
43
43
|
|
44
44
|
##
|
45
|
-
#
|
45
|
+
# Returns an Enumerable for the messages in a conversation
|
46
|
+
# @return [LLM::Buffer<LLM::Message>]
|
46
47
|
attr_reader :messages
|
47
48
|
|
48
49
|
##
|
@@ -58,72 +59,68 @@ module LLM
|
|
58
59
|
def initialize(provider, params = {})
|
59
60
|
@provider = provider
|
60
61
|
@params = {model: provider.default_model, schema: nil}.compact.merge!(params)
|
61
|
-
@
|
62
|
-
@messages = [].extend(Array)
|
62
|
+
@messages = LLM::Buffer.new(provider)
|
63
63
|
end
|
64
64
|
|
65
65
|
##
|
66
66
|
# Maintain a conversation via the chat completions API
|
67
|
-
# @
|
68
|
-
#
|
69
|
-
#
|
70
|
-
#
|
71
|
-
#
|
67
|
+
# @overload def chat(prompt, params = {})
|
68
|
+
# @param prompt (see LLM::Provider#complete)
|
69
|
+
# @param params The params
|
70
|
+
# @return [LLM::Bot]
|
71
|
+
# Returns self
|
72
|
+
# @overload def chat(prompt, params, &block)
|
73
|
+
# @param prompt (see LLM::Provider#complete)
|
74
|
+
# @param params The params
|
75
|
+
# @yield prompt Yields a prompt
|
76
|
+
# @return [LLM::Buffer]
|
77
|
+
# Returns messages
|
72
78
|
def chat(prompt = nil, params = {})
|
73
79
|
if block_given?
|
74
|
-
|
80
|
+
params = prompt
|
81
|
+
yield Prompt::Completion.new(self, params)
|
75
82
|
messages
|
76
83
|
elsif prompt.nil?
|
77
84
|
raise ArgumentError, "wrong number of arguments (given 0, expected 1)"
|
78
85
|
else
|
79
86
|
params = {role: :user}.merge!(params)
|
80
|
-
tap {
|
87
|
+
tap { async_completion(prompt, params) }
|
81
88
|
end
|
82
89
|
end
|
83
90
|
|
84
91
|
##
|
85
92
|
# Maintain a conversation via the responses API
|
86
|
-
# @
|
87
|
-
#
|
88
|
-
#
|
89
|
-
#
|
90
|
-
#
|
93
|
+
# @overload def respond(prompt, params = {})
|
94
|
+
# @param prompt (see LLM::Provider#complete)
|
95
|
+
# @param params The params
|
96
|
+
# @return [LLM::Bot]
|
97
|
+
# Returns self
|
98
|
+
# @overload def respond(prompt, params, &block)
|
99
|
+
# @note Not all LLM providers support this API
|
100
|
+
# @param prompt (see LLM::Provider#complete)
|
101
|
+
# @param params The params
|
102
|
+
# @yield prompt Yields a prompt
|
103
|
+
# @return [LLM::Buffer]
|
104
|
+
# Returns messages
|
91
105
|
def respond(prompt = nil, params = {})
|
92
106
|
if block_given?
|
93
|
-
|
107
|
+
params = prompt
|
108
|
+
yield Prompt::Respond.new(self, params)
|
94
109
|
messages
|
95
110
|
elsif prompt.nil?
|
96
111
|
raise ArgumentError, "wrong number of arguments (given 0, expected 1)"
|
97
112
|
else
|
98
113
|
params = {role: :user}.merge!(params)
|
99
|
-
tap {
|
114
|
+
tap { async_response(prompt, params) }
|
100
115
|
end
|
101
116
|
end
|
102
117
|
|
103
|
-
##
|
104
|
-
# Enables lazy mode for the conversation.
|
105
|
-
# @return [LLM::Chat]
|
106
|
-
def lazy
|
107
|
-
tap do
|
108
|
-
next if lazy?
|
109
|
-
@lazy = true
|
110
|
-
@messages = LLM::Buffer.new(@provider)
|
111
|
-
end
|
112
|
-
end
|
113
|
-
|
114
|
-
##
|
115
|
-
# @return [Boolean]
|
116
|
-
# Returns true if the conversation is lazy
|
117
|
-
def lazy?
|
118
|
-
@lazy
|
119
|
-
end
|
120
|
-
|
121
118
|
##
|
122
119
|
# @return [String]
|
123
120
|
def inspect
|
124
121
|
"#<#{self.class.name}:0x#{object_id.to_s(16)} " \
|
125
122
|
"@provider=#{@provider.class}, @params=#{@params.inspect}, " \
|
126
|
-
"@messages=#{@messages.inspect}
|
123
|
+
"@messages=#{@messages.inspect}>"
|
127
124
|
end
|
128
125
|
|
129
126
|
##
|
@@ -135,20 +132,5 @@ module LLM
|
|
135
132
|
.flat_map(&:functions)
|
136
133
|
.select(&:pending?)
|
137
134
|
end
|
138
|
-
|
139
|
-
private
|
140
|
-
|
141
|
-
##
|
142
|
-
# @private
|
143
|
-
module Array
|
144
|
-
def find(...)
|
145
|
-
reverse_each.find(...)
|
146
|
-
end
|
147
|
-
|
148
|
-
def unread
|
149
|
-
reject(&:read?)
|
150
|
-
end
|
151
|
-
end
|
152
|
-
private_constant :Array
|
153
135
|
end
|
154
136
|
end
|
data/lib/llm/buffer.rb
CHANGED
@@ -23,9 +23,13 @@ module LLM
|
|
23
23
|
# Yields each message in the conversation thread
|
24
24
|
# @raise (see LLM::Provider#complete)
|
25
25
|
# @return [void]
|
26
|
-
def each
|
27
|
-
|
28
|
-
|
26
|
+
def each(...)
|
27
|
+
if block_given?
|
28
|
+
empty! unless @pending.empty?
|
29
|
+
@completed.each { yield(_1) }
|
30
|
+
else
|
31
|
+
enum_for(:each, ...)
|
32
|
+
end
|
29
33
|
end
|
30
34
|
|
31
35
|
##
|
data/lib/llm/error.rb
CHANGED
@@ -8,34 +8,34 @@ module LLM
|
|
8
8
|
block_given? ? yield(self) : nil
|
9
9
|
super
|
10
10
|
end
|
11
|
+
end
|
11
12
|
|
13
|
+
##
|
14
|
+
# The superclass of all HTTP protocol errors
|
15
|
+
class ResponseError < Error
|
12
16
|
##
|
13
|
-
#
|
14
|
-
|
15
|
-
|
16
|
-
# @return [Net::HTTPResponse]
|
17
|
-
# Returns the response associated with an error
|
18
|
-
attr_accessor :response
|
17
|
+
# @return [Net::HTTPResponse]
|
18
|
+
# Returns the response associated with an error
|
19
|
+
attr_accessor :response
|
19
20
|
|
20
|
-
|
21
|
-
|
22
|
-
end
|
21
|
+
def message
|
22
|
+
[super, response.body].join("\n")
|
23
23
|
end
|
24
|
+
end
|
24
25
|
|
25
|
-
|
26
|
-
|
27
|
-
|
26
|
+
##
|
27
|
+
# HTTPUnauthorized
|
28
|
+
UnauthorizedError = Class.new(ResponseError)
|
28
29
|
|
29
|
-
|
30
|
-
|
31
|
-
|
30
|
+
##
|
31
|
+
# HTTPTooManyRequests
|
32
|
+
RateLimitError = Class.new(ResponseError)
|
32
33
|
|
33
|
-
|
34
|
-
|
35
|
-
|
34
|
+
##
|
35
|
+
# When an given an input object that is not understood
|
36
|
+
FormatError = Class.new(Error)
|
36
37
|
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
end
|
38
|
+
##
|
39
|
+
# When given a prompt object that is not understood
|
40
|
+
PromptError = Class.new(FormatError)
|
41
41
|
end
|