llm.rb 0.6.2 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +47 -77
- data/lib/llm/chat/builder.rb +23 -0
- data/lib/llm/chat/conversable.rb +33 -0
- data/lib/llm/chat/prompt/completion.rb +21 -0
- data/lib/llm/chat/prompt/respond.rb +29 -0
- data/lib/llm/chat.rb +49 -57
- data/lib/llm/function.rb +68 -5
- data/lib/llm/version.rb +1 -1
- metadata +6 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a7175b2fe81c74e007dd41db2e0fe1bd3f3639bed375af25da0f8ed2778ea2b5
|
4
|
+
data.tar.gz: 1c752e61cb288fed412b342b66279e7dfdb0337705e33af3e2a1deb1d408b8d0
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 9af91ba96e63b2c43c7f6a836db5fed48da19ba1f2bdbb48894cc71fb940eca261930fec6b8fd9a6f641fc9c69402de9cdd6fd7f9cad9a7035b69ddad04de65a
|
7
|
+
data.tar.gz: b3f8af44ebb2522aba58621d19a19424805471f6f9a6f8ec834ebe417e6aa08e9f0233ef19a12695e829d44ea61b164ae57697379aebdf24450b829fdc04ac25
|
data/README.md
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
|
4
4
|
includes OpenAI, Gemini, Anthropic, Ollama, and LlamaCpp. It’s fast, simple
|
5
|
-
and composable – with full support for chat, tool calling, audio,
|
5
|
+
and composable – with full support for chat, tool calling, audio,
|
6
6
|
images, files, and JSON Schema generation.
|
7
7
|
|
8
8
|
## Features
|
@@ -27,6 +27,23 @@ images, files, and JSON Schema generation.
|
|
27
27
|
#### Embeddings
|
28
28
|
- 🧮 Text embeddings and vector support
|
29
29
|
|
30
|
+
## Demos
|
31
|
+
|
32
|
+
<details>
|
33
|
+
<summary><b>1. Tools: "system" function</b></summary>
|
34
|
+
<img src="share/llm-shell/examples/toolcalls.gif">
|
35
|
+
</details>
|
36
|
+
|
37
|
+
<details>
|
38
|
+
<summary><b>2. Files: import at boot time</b></summary>
|
39
|
+
<img src="share/llm-shell/examples/files-boottime.gif">
|
40
|
+
</details>
|
41
|
+
|
42
|
+
<details>
|
43
|
+
<summary><b>3. Files: import at runtime</b></summary>
|
44
|
+
<img src="share/llm-shell/examples/files-runtime.gif">
|
45
|
+
</details>
|
46
|
+
|
30
47
|
## Examples
|
31
48
|
|
32
49
|
### Providers
|
@@ -54,6 +71,11 @@ llm = LLM.voyageai(key: "yourapikey")
|
|
54
71
|
|
55
72
|
#### Completions
|
56
73
|
|
74
|
+
> This example uses the stateless chat completions API that all
|
75
|
+
> providers support. A similar example for OpenAI's stateful
|
76
|
+
> responses API is available in the [docs/](docs/OPENAI_RESPONSES.md)
|
77
|
+
> directory.
|
78
|
+
|
57
79
|
The following example enables lazy mode for a
|
58
80
|
[LLM::Chat](https://0x1eef.github.io/x/llm.rb/LLM/Chat.html)
|
59
81
|
object by entering into a "lazy" conversation where messages are buffered and
|
@@ -67,13 +89,15 @@ all LLM providers support:
|
|
67
89
|
#!/usr/bin/env ruby
|
68
90
|
require "llm"
|
69
91
|
|
70
|
-
llm
|
71
|
-
bot
|
72
|
-
bot.chat
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
92
|
+
llm = LLM.openai(key: ENV["KEY"])
|
93
|
+
bot = LLM::Chat.new(llm).lazy
|
94
|
+
msgs = bot.chat do |prompt|
|
95
|
+
prompt.system File.read("./share/llm/prompts/system.txt")
|
96
|
+
prompt.user "Tell me the answer to 5 + 15"
|
97
|
+
prompt.user "Tell me the answer to (5 + 15) * 2"
|
98
|
+
prompt.user "Tell me the answer to ((5 + 15) * 2) / 10"
|
99
|
+
end
|
100
|
+
msgs.each { print "[#{_1.role}] ", _1.content, "\n" }
|
77
101
|
|
78
102
|
##
|
79
103
|
# [system] You are my math assistant.
|
@@ -91,46 +115,6 @@ bot.messages.each { print "[#{_1.role}] ", _1.content, "\n" }
|
|
91
115
|
# The answer to ((5 + 15) * 2) / 10 is 4.
|
92
116
|
```
|
93
117
|
|
94
|
-
#### Responses
|
95
|
-
|
96
|
-
The responses API is a recent addition
|
97
|
-
[provided by OpenAI](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses)
|
98
|
-
that lets a client store message state on their servers – and in turn
|
99
|
-
a client can avoid maintaining state manually as well as avoid sending
|
100
|
-
the entire conversation with each request that is made. Although it is
|
101
|
-
primarily supported by OpenAI at the moment, we might see other providers
|
102
|
-
support it in the future. For now
|
103
|
-
[llm.rb supports the responses API](https://0x1eef.github.io/x/llm.rb/LLM/OpenAI/Responses.html)
|
104
|
-
for the OpenAI provider:
|
105
|
-
|
106
|
-
```ruby
|
107
|
-
#!/usr/bin/env ruby
|
108
|
-
require "llm"
|
109
|
-
|
110
|
-
llm = LLM.openai(key: ENV["KEY"])
|
111
|
-
bot = LLM::Chat.new(llm).lazy
|
112
|
-
bot.respond File.read("./share/llm/prompts/system.txt"), role: :developer
|
113
|
-
bot.respond "Tell me the answer to 5 + 15", role: :user
|
114
|
-
bot.respond "Tell me the answer to (5 + 15) * 2", role: :user
|
115
|
-
bot.respond "Tell me the answer to ((5 + 15) * 2) / 10", role: :user
|
116
|
-
bot.messages.each { print "[#{_1.role}] ", _1.content, "\n" }
|
117
|
-
|
118
|
-
##
|
119
|
-
# [developer] You are my math assistant.
|
120
|
-
# I will provide you with (simple) equations.
|
121
|
-
# You will provide answers in the format "The answer to <equation> is <answer>".
|
122
|
-
# I will provide you a set of messages. Reply to all of them.
|
123
|
-
# A message is considered unanswered if there is no corresponding assistant response.
|
124
|
-
#
|
125
|
-
# [user] Tell me the answer to 5 + 15
|
126
|
-
# [user] Tell me the answer to (5 + 15) * 2
|
127
|
-
# [user] Tell me the answer to ((5 + 15) * 2) / 10
|
128
|
-
#
|
129
|
-
# [assistant] The answer to 5 + 15 is 20.
|
130
|
-
# The answer to (5 + 15) * 2 is 40.
|
131
|
-
# The answer to ((5 + 15) * 2) / 10 is 4.
|
132
|
-
```
|
133
|
-
|
134
118
|
### Schema
|
135
119
|
|
136
120
|
#### Structured
|
@@ -139,13 +123,9 @@ All LLM providers except Anthropic allow a client to describe the structure
|
|
139
123
|
of a response that a LLM emits according to a schema that is described by JSON.
|
140
124
|
The schema lets a client describe what JSON object (or value) an LLM should emit,
|
141
125
|
and the LLM will abide by the schema. See also: [JSON Schema website](https://json-schema.org/overview/what-is-jsonschema).
|
142
|
-
|
143
|
-
True to the llm.rb spirit of doing one thing well, and solving problems through the
|
144
|
-
composition of objects, the generation of a schema is delegated to another object
|
145
|
-
who is responsible for and an expert in the generation of JSON schemas. We will use
|
146
|
-
the
|
126
|
+
We will use the
|
147
127
|
[llmrb/json-schema](https://github.com/llmrb/json-schema)
|
148
|
-
library for the sake of the examples – the interface is designed so you
|
128
|
+
library for the sake of the examples – the interface is designed so you
|
149
129
|
could drop in any other library in its place:
|
150
130
|
|
151
131
|
```ruby
|
@@ -153,11 +133,11 @@ could drop in any other library in its place:
|
|
153
133
|
require "llm"
|
154
134
|
|
155
135
|
llm = LLM.openai(key: ENV["KEY"])
|
156
|
-
schema = llm.schema.object({
|
136
|
+
schema = llm.schema.object({fruit: llm.schema.string.enum("Apple", "Orange", "Pineapple")})
|
157
137
|
bot = LLM::Chat.new(llm, schema:)
|
158
|
-
bot.chat "
|
159
|
-
bot.chat "What
|
160
|
-
bot.messages.find(&:assistant?).content! # => {
|
138
|
+
bot.chat "Your favorite fruit is Pineapple", role: :system
|
139
|
+
bot.chat "What fruit is your favorite?", role: :user
|
140
|
+
bot.messages.find(&:assistant?).content! # => {fruit: "Pineapple"}
|
161
141
|
|
162
142
|
schema = llm.schema.object({answer: llm.schema.integer.required})
|
163
143
|
bot = LLM::Chat.new(llm, schema:)
|
@@ -228,8 +208,7 @@ Some but not all providers implement audio generation capabilities that
|
|
228
208
|
can create speech from text, transcribe audio to text, or translate
|
229
209
|
audio to text (usually English). The following example uses the OpenAI provider
|
230
210
|
to create an audio file from a text prompt. The audio is then moved to
|
231
|
-
`${HOME}/hello.mp3` as the final step
|
232
|
-
documentation for more information on how to use the audio generation API:
|
211
|
+
`${HOME}/hello.mp3` as the final step:
|
233
212
|
|
234
213
|
```ruby
|
235
214
|
#!/usr/bin/env ruby
|
@@ -245,8 +224,7 @@ IO.copy_stream res.audio, File.join(Dir.home, "hello.mp3")
|
|
245
224
|
The following example transcribes an audio file to text. The audio file
|
246
225
|
(`${HOME}/hello.mp3`) was theoretically created in the previous example,
|
247
226
|
and the result is printed to the console. The example uses the OpenAI
|
248
|
-
provider to transcribe the audio file
|
249
|
-
documentation for more information on how to use the audio transcription API:
|
227
|
+
provider to transcribe the audio file:
|
250
228
|
|
251
229
|
```ruby
|
252
230
|
#!/usr/bin/env ruby
|
@@ -264,9 +242,7 @@ print res.text, "\n" # => "Hello world."
|
|
264
242
|
The following example translates an audio file to text. In this example
|
265
243
|
the audio file (`${HOME}/bomdia.mp3`) is theoretically in Portuguese,
|
266
244
|
and it is translated to English. The example uses the OpenAI provider,
|
267
|
-
and at the time of writing, it can only translate to English
|
268
|
-
consult the provider's documentation for more information on how to use
|
269
|
-
the audio translation API:
|
245
|
+
and at the time of writing, it can only translate to English:
|
270
246
|
|
271
247
|
```ruby
|
272
248
|
#!/usr/bin/env ruby
|
@@ -308,11 +284,7 @@ end
|
|
308
284
|
The following example is focused on editing a local image with the aid
|
309
285
|
of a prompt. The image (`/images/cat.png`) is returned to us with the cat
|
310
286
|
now wearing a hat. The image is then moved to `${HOME}/catwithhat.png` as
|
311
|
-
the final step
|
312
|
-
|
313
|
-
Results and quality may vary, consider prompt adjustments if the results
|
314
|
-
are not as expected, and consult the provider's documentation
|
315
|
-
for more information on how to use the image editing API:
|
287
|
+
the final step:
|
316
288
|
|
317
289
|
```ruby
|
318
290
|
#!/usr/bin/env ruby
|
@@ -336,8 +308,7 @@ end
|
|
336
308
|
The following example is focused on creating variations of a local image.
|
337
309
|
The image (`/images/cat.png`) is returned to us with five different variations.
|
338
310
|
The images are then moved to `${HOME}/catvariation0.png`, `${HOME}/catvariation1.png`
|
339
|
-
and so on as the final step
|
340
|
-
on how to use the image variations API:
|
311
|
+
and so on as the final step:
|
341
312
|
|
342
313
|
```ruby
|
343
314
|
#!/usr/bin/env ruby
|
@@ -458,10 +429,8 @@ print res.embeddings[0].size, "\n"
|
|
458
429
|
Almost all LLM providers provide a models endpoint that allows a client to
|
459
430
|
query the list of models that are available to use. The list is dynamic,
|
460
431
|
maintained by LLM providers, and it is independent of a specific llm.rb release.
|
461
|
-
True to the llm.rb spirit of small, composable objects that cooperate with
|
462
|
-
each other, a
|
463
432
|
[LLM::Model](https://0x1eef.github.io/x/llm.rb/LLM/Model.html)
|
464
|
-
|
433
|
+
objects can be used instead of a string that describes a model name (although
|
465
434
|
either works). Let's take a look at an example:
|
466
435
|
|
467
436
|
```ruby
|
@@ -497,7 +466,8 @@ over or doesn't cover at all. The API reference is available at
|
|
497
466
|
|
498
467
|
The [docs/](docs/) directory contains some additional documentation that
|
499
468
|
didn't quite make it into the README. It covers the design guidelines that
|
500
|
-
the library follows,
|
469
|
+
the library follows, some strategies for memory management, and other
|
470
|
+
provider-specific features.
|
501
471
|
|
502
472
|
## See also
|
503
473
|
|
@@ -506,7 +476,7 @@ the library follows, and some strategies for memory management.
|
|
506
476
|
An extensible, developer-oriented command line utility that is powered by
|
507
477
|
llm.rb and serves as a demonstration of the library's capabilities. The
|
508
478
|
[demo](https://github.com/llmrb/llm-shell#demos) section has a number of GIF
|
509
|
-
previews might be especially interesting
|
479
|
+
previews might be especially interesting.
|
510
480
|
|
511
481
|
## Install
|
512
482
|
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::Chat
|
4
|
+
##
|
5
|
+
# @private
|
6
|
+
module Builder
|
7
|
+
private
|
8
|
+
|
9
|
+
def create_response!(prompt, params)
|
10
|
+
@provider.responses.create(
|
11
|
+
prompt,
|
12
|
+
@params.merge(params.merge(@response ? {previous_response_id: @response.id} : {}))
|
13
|
+
)
|
14
|
+
end
|
15
|
+
|
16
|
+
def create_completion!(prompt, params)
|
17
|
+
@provider.complete(
|
18
|
+
prompt,
|
19
|
+
@params.merge(params.merge(messages:))
|
20
|
+
)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::Chat
|
4
|
+
##
|
5
|
+
# @private
|
6
|
+
module Conversable
|
7
|
+
private
|
8
|
+
|
9
|
+
def async_response(prompt, params = {})
|
10
|
+
role = params.delete(:role)
|
11
|
+
@messages << [LLM::Message.new(role, prompt), @params.merge(params), :respond]
|
12
|
+
end
|
13
|
+
|
14
|
+
def sync_response(prompt, params = {})
|
15
|
+
role = params[:role]
|
16
|
+
@response = create_response!(prompt, params)
|
17
|
+
@messages.concat [Message.new(role, prompt), @response.outputs[0]]
|
18
|
+
end
|
19
|
+
|
20
|
+
def async_completion(prompt, params = {})
|
21
|
+
role = params.delete(:role)
|
22
|
+
@messages.push [LLM::Message.new(role, prompt), @params.merge(params), :complete]
|
23
|
+
end
|
24
|
+
|
25
|
+
def sync_completion(prompt, params = {})
|
26
|
+
role = params[:role]
|
27
|
+
completion = create_completion!(prompt, params)
|
28
|
+
@messages.concat [Message.new(role, prompt), completion.choices[0]]
|
29
|
+
end
|
30
|
+
|
31
|
+
include LLM
|
32
|
+
end
|
33
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LLM::Chat::Prompt
|
4
|
+
class Completion < Struct.new(:bot)
|
5
|
+
##
|
6
|
+
# @param [String] prompt
|
7
|
+
# @param [Hash] params (see LLM::Provider#complete)
|
8
|
+
# @return [LLM::Chat]
|
9
|
+
def system(prompt, params = {})
|
10
|
+
bot.chat prompt, params.merge(role: :system)
|
11
|
+
end
|
12
|
+
|
13
|
+
##
|
14
|
+
# @param [String] prompt
|
15
|
+
# @param [Hash] params (see LLM::Provider#complete)
|
16
|
+
# @return [LLM::Chat]
|
17
|
+
def user(prompt, params = {})
|
18
|
+
bot.chat prompt, params.merge(role: :user)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LLM::Chat::Prompt
|
4
|
+
class Respond < Struct.new(:bot)
|
5
|
+
##
|
6
|
+
# @param [String] prompt
|
7
|
+
# @param [Hash] params (see LLM::Provider#complete)
|
8
|
+
# @return [LLM::Chat]
|
9
|
+
def system(prompt, params = {})
|
10
|
+
bot.respond prompt, params.merge(role: :system)
|
11
|
+
end
|
12
|
+
|
13
|
+
##
|
14
|
+
# @param [String] prompt
|
15
|
+
# @param [Hash] params (see LLM::Provider#complete)
|
16
|
+
# @return [LLM::Chat]
|
17
|
+
def developer(prompt, params = {})
|
18
|
+
bot.respond prompt, params.merge(role: :developer)
|
19
|
+
end
|
20
|
+
|
21
|
+
##
|
22
|
+
# @param [String] prompt
|
23
|
+
# @param [Hash] params (see LLM::Provider#complete)
|
24
|
+
# @return [LLM::Chat]
|
25
|
+
def user(prompt, params = {})
|
26
|
+
bot.respond prompt, params.merge(role: :user)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
data/lib/llm/chat.rb
CHANGED
@@ -11,14 +11,36 @@ module LLM
|
|
11
11
|
# #!/usr/bin/env ruby
|
12
12
|
# require "llm"
|
13
13
|
#
|
14
|
+
# llm = LLM.openai(ENV["KEY"])
|
15
|
+
# bot = LLM::Chat.new(llm).lazy
|
16
|
+
# msgs = bot.chat do |prompt|
|
17
|
+
# prompt.system "Answer the following questions."
|
18
|
+
# prompt.user "What is 5 + 7 ?"
|
19
|
+
# prompt.user "Why is the sky blue ?"
|
20
|
+
# prompt.user "Why did the chicken cross the road ?"
|
21
|
+
# end
|
22
|
+
# msgs.map { print "[#{_1.role}]", _1.content, "\n" }
|
23
|
+
#
|
24
|
+
# @example
|
25
|
+
# #!/usr/bin/env ruby
|
26
|
+
# require "llm"
|
27
|
+
#
|
14
28
|
# llm = LLM.openai(ENV["KEY"])
|
15
29
|
# bot = LLM::Chat.new(llm).lazy
|
16
|
-
# bot.chat
|
17
|
-
# bot.chat
|
18
|
-
# bot.chat
|
19
|
-
# bot.chat
|
30
|
+
# bot.chat "Answer the following questions.", role: :system
|
31
|
+
# bot.chat "What is 5 + 7 ?", role: :user
|
32
|
+
# bot.chat "Why is the sky blue ?", role: :user
|
33
|
+
# bot.chat "Why did the chicken cross the road ?", role: :user
|
20
34
|
# bot.messages.map { print "[#{_1.role}]", _1.content, "\n" }
|
21
35
|
class Chat
|
36
|
+
require_relative "chat/prompt/completion"
|
37
|
+
require_relative "chat/prompt/respond"
|
38
|
+
require_relative "chat/conversable"
|
39
|
+
require_relative "chat/builder"
|
40
|
+
|
41
|
+
include Conversable
|
42
|
+
include Builder
|
43
|
+
|
22
44
|
##
|
23
45
|
# @return [Array<LLM::Message>]
|
24
46
|
attr_reader :messages
|
@@ -44,18 +66,18 @@ module LLM
|
|
44
66
|
# Maintain a conversation via the chat completions API
|
45
67
|
# @param prompt (see LLM::Provider#complete)
|
46
68
|
# @param params (see LLM::Provider#complete)
|
47
|
-
# @
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
69
|
+
# @yieldparam [LLM::Chat::CompletionPrompt] prompt Yields a prompt
|
70
|
+
# @return [LLM::Chat, Array<LLM::Message>, LLM::Buffer]
|
71
|
+
# Returns self unless given a block, otherwise returns messages
|
72
|
+
def chat(prompt = nil, params = {})
|
73
|
+
if block_given?
|
74
|
+
yield Prompt::Completion.new(self)
|
75
|
+
messages
|
76
|
+
elsif prompt.nil?
|
77
|
+
raise ArgumentError, "wrong number of arguments (given 0, expected 1)"
|
54
78
|
else
|
55
|
-
|
56
|
-
|
57
|
-
@messages.concat [Message.new(role, prompt), completion.choices[0]]
|
58
|
-
self
|
79
|
+
params = {role: :user}.merge!(params)
|
80
|
+
tap { lazy? ? async_completion(prompt, params) : sync_completion(prompt, params) }
|
59
81
|
end
|
60
82
|
end
|
61
83
|
|
@@ -64,36 +86,20 @@ module LLM
|
|
64
86
|
# @note Not all LLM providers support this API
|
65
87
|
# @param prompt (see LLM::Provider#complete)
|
66
88
|
# @param params (see LLM::Provider#complete)
|
67
|
-
# @return [LLM::Chat]
|
68
|
-
|
69
|
-
|
70
|
-
if
|
71
|
-
|
72
|
-
|
73
|
-
|
89
|
+
# @return [LLM::Chat, Array<LLM::Message>, LLM::Buffer]
|
90
|
+
# Returns self unless given a block, otherwise returns messages
|
91
|
+
def respond(prompt = nil, params = {})
|
92
|
+
if block_given?
|
93
|
+
yield Prompt::Respond.new(self)
|
94
|
+
messages
|
95
|
+
elsif prompt.nil?
|
96
|
+
raise ArgumentError, "wrong number of arguments (given 0, expected 1)"
|
74
97
|
else
|
75
|
-
|
76
|
-
|
77
|
-
@messages.concat [Message.new(role, prompt), @response.outputs[0]]
|
78
|
-
self
|
98
|
+
params = {role: :user}.merge!(params)
|
99
|
+
tap { lazy? ? async_response(prompt, params) : sync_response(prompt, params) }
|
79
100
|
end
|
80
101
|
end
|
81
102
|
|
82
|
-
##
|
83
|
-
# The last message in the conversation.
|
84
|
-
# @note
|
85
|
-
# The `read_response` and `recent_message` methods are aliases of
|
86
|
-
# the `last_message` method, and you can choose the name that best
|
87
|
-
# fits your context or code style.
|
88
|
-
# @param [#to_s] role
|
89
|
-
# The role of the last message.
|
90
|
-
# @return [LLM::Message]
|
91
|
-
def last_message(role: @provider.assistant_role)
|
92
|
-
messages.reverse_each.find { _1.role == role.to_s }
|
93
|
-
end
|
94
|
-
alias_method :recent_message, :last_message
|
95
|
-
alias_method :read_response, :last_message
|
96
|
-
|
97
103
|
##
|
98
104
|
# Enables lazy mode for the conversation.
|
99
105
|
# @return [LLM::Chat]
|
@@ -121,13 +127,13 @@ module LLM
|
|
121
127
|
end
|
122
128
|
|
123
129
|
##
|
124
|
-
# Returns an array of functions that
|
130
|
+
# Returns an array of functions that can be called
|
125
131
|
# @return [Array<LLM::Function>]
|
126
132
|
def functions
|
127
133
|
messages
|
128
134
|
.select(&:assistant?)
|
129
135
|
.flat_map(&:functions)
|
130
|
-
.
|
136
|
+
.select(&:pending?)
|
131
137
|
end
|
132
138
|
|
133
139
|
private
|
@@ -144,19 +150,5 @@ module LLM
|
|
144
150
|
end
|
145
151
|
end
|
146
152
|
private_constant :Array
|
147
|
-
|
148
|
-
def respond!(prompt, params)
|
149
|
-
@provider.responses.create(
|
150
|
-
prompt,
|
151
|
-
@params.merge(params.merge(@response ? {previous_response_id: @response.id} : {}))
|
152
|
-
)
|
153
|
-
end
|
154
|
-
|
155
|
-
def complete!(prompt, params)
|
156
|
-
@provider.complete(
|
157
|
-
prompt,
|
158
|
-
@params.merge(params.merge(messages:))
|
159
|
-
)
|
160
|
-
end
|
161
153
|
end
|
162
154
|
end
|
data/lib/llm/function.rb
CHANGED
@@ -1,5 +1,37 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
##
|
4
|
+
# The {LLM::Function LLM::Function} class represents a function that can
|
5
|
+
# be called by an LLM. It comes in two forms: a Proc-based function,
|
6
|
+
# or a Class-based function.
|
7
|
+
#
|
8
|
+
# @example
|
9
|
+
# # Proc-based
|
10
|
+
# LLM.function(:system) do |fn|
|
11
|
+
# fn.description "Runs system commands, emits their output"
|
12
|
+
# fn.params do |schema|
|
13
|
+
# schema.object(command: schema.string.required)
|
14
|
+
# end
|
15
|
+
# fn.define do |params|
|
16
|
+
# Kernel.system(params.command)
|
17
|
+
# end
|
18
|
+
# end
|
19
|
+
#
|
20
|
+
# @example
|
21
|
+
# # Class-based
|
22
|
+
# class System
|
23
|
+
# def call(params)
|
24
|
+
# Kernel.system(params.command)
|
25
|
+
# end
|
26
|
+
# end
|
27
|
+
#
|
28
|
+
# LLM.function(:system) do |fn|
|
29
|
+
# fn.description "Runs system commands, emits their output"
|
30
|
+
# fn.params do |schema|
|
31
|
+
# schema.object(command: schema.string.required)
|
32
|
+
# end
|
33
|
+
# fn.register(System)
|
34
|
+
# end
|
3
35
|
class LLM::Function
|
4
36
|
class Return < Struct.new(:id, :value)
|
5
37
|
end
|
@@ -25,6 +57,8 @@ class LLM::Function
|
|
25
57
|
def initialize(name, &b)
|
26
58
|
@name = name
|
27
59
|
@schema = JSON::Schema.new
|
60
|
+
@called = false
|
61
|
+
@cancelled = false
|
28
62
|
yield(self)
|
29
63
|
end
|
30
64
|
|
@@ -45,21 +79,36 @@ class LLM::Function
|
|
45
79
|
|
46
80
|
##
|
47
81
|
# Set the function implementation
|
48
|
-
# @param [Proc] b The function implementation
|
82
|
+
# @param [Proc, Class] b The function implementation
|
49
83
|
# @return [void]
|
50
|
-
def define(&b)
|
51
|
-
@runner = b
|
84
|
+
def define(klass = nil, &b)
|
85
|
+
@runner = klass || b
|
52
86
|
end
|
87
|
+
alias_method :register, :define
|
53
88
|
|
54
89
|
##
|
55
90
|
# Call the function
|
56
|
-
# @return [
|
91
|
+
# @return [LLM::Function::Return] The result of the function call
|
57
92
|
def call
|
58
|
-
Return.new id, @runner.call(arguments)
|
93
|
+
Return.new id, (Class === @runner) ? @runner.new.call(arguments) : @runner.call(arguments)
|
59
94
|
ensure
|
60
95
|
@called = true
|
61
96
|
end
|
62
97
|
|
98
|
+
##
|
99
|
+
# Returns a value that communicates that the function call was cancelled
|
100
|
+
# @example
|
101
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
102
|
+
# bot = LLM::Chat.new(llm, tools: [fn1, fn2])
|
103
|
+
# bot.chat "I want to run the functions"
|
104
|
+
# bot.chat bot.functions.map(&:cancel)
|
105
|
+
# @return [LLM::Function::Return]
|
106
|
+
def cancel(reason: "function call cancelled")
|
107
|
+
Return.new(id, {cancelled: true, reason:})
|
108
|
+
ensure
|
109
|
+
@cancelled = true
|
110
|
+
end
|
111
|
+
|
63
112
|
##
|
64
113
|
# Returns true when a function has been called
|
65
114
|
# @return [Boolean]
|
@@ -67,6 +116,20 @@ class LLM::Function
|
|
67
116
|
@called
|
68
117
|
end
|
69
118
|
|
119
|
+
##
|
120
|
+
# Returns true when a function has been cancelled
|
121
|
+
# @return [Boolean]
|
122
|
+
def cancelled?
|
123
|
+
@cancelled
|
124
|
+
end
|
125
|
+
|
126
|
+
##
|
127
|
+
# Returns true when a function has neither been called nor cancelled
|
128
|
+
# @return [Boolean]
|
129
|
+
def pending?
|
130
|
+
!@called && !@cancelled
|
131
|
+
end
|
132
|
+
|
70
133
|
##
|
71
134
|
# @return [Hash]
|
72
135
|
def format(provider)
|
data/lib/llm/version.rb
CHANGED
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm.rb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.7.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Antar Azri
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2025-05-
|
12
|
+
date: 2025-05-09 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: webmock
|
@@ -176,6 +176,10 @@ files:
|
|
176
176
|
- lib/llm.rb
|
177
177
|
- lib/llm/buffer.rb
|
178
178
|
- lib/llm/chat.rb
|
179
|
+
- lib/llm/chat/builder.rb
|
180
|
+
- lib/llm/chat/conversable.rb
|
181
|
+
- lib/llm/chat/prompt/completion.rb
|
182
|
+
- lib/llm/chat/prompt/respond.rb
|
179
183
|
- lib/llm/core_ext/ostruct.rb
|
180
184
|
- lib/llm/error.rb
|
181
185
|
- lib/llm/file.rb
|