llm.rb 4.0.0 → 4.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +65 -45
- data/lib/llm/agent.rb +214 -0
- data/lib/llm/bot.rb +1 -1
- data/lib/llm/builder.rb +22 -4
- data/lib/llm/error.rb +4 -0
- data/lib/llm/provider.rb +18 -0
- data/lib/llm/providers/gemini.rb +27 -6
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +1 -0
- metadata +2 -1
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: cc70b8eb2d7ce82b3959d2b7dc795a89511a1962ed443a5344bb00ef55863033
|
|
4
|
+
data.tar.gz: a9245348fccc085710ae28097b9ce9c0ec9ce8e8f5ea4e23f97a9bde5fc50fee
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: b1a0e67e1d938792da4cf52ff6b05dba568b71c77d28ef18c11510c7f0c37b21d5514f659ae6997193774755aede0bd5af4a1239247fc396b8a4815258723eb6
|
|
7
|
+
data.tar.gz: 87bfee8769ba983ffccef6bfb276922501e8cc68b2b4f2be6857408739b7307403c120de7db1b83c612a6af61e30860366419abb790662feb679ddf6f1234102
|
data/README.md
CHANGED
|
@@ -13,13 +13,15 @@ tool calling, audio, images, files, and structured outputs.
|
|
|
13
13
|
|
|
14
14
|
#### REPL
|
|
15
15
|
|
|
16
|
-
|
|
16
|
+
The [LLM::Bot](https://0x1eef.github.io/x/llm.rb/LLM/LLM/Bot.html) class provides
|
|
17
|
+
a session with an LLM provider that maintains conversation history and context across
|
|
18
|
+
multiple requests. The following example implements a simple REPL loop:
|
|
17
19
|
|
|
18
20
|
```ruby
|
|
19
21
|
#!/usr/bin/env ruby
|
|
20
22
|
require "llm"
|
|
21
23
|
|
|
22
|
-
llm = LLM.openai(key: ENV
|
|
24
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
23
25
|
bot = LLM::Bot.new(llm, stream: $stdout)
|
|
24
26
|
loop do
|
|
25
27
|
print "> "
|
|
@@ -28,34 +30,12 @@ loop do
|
|
|
28
30
|
end
|
|
29
31
|
```
|
|
30
32
|
|
|
31
|
-
#### Prompts
|
|
32
|
-
|
|
33
|
-
> ℹ️ **Tip:** Some providers (such as OpenAI) support `system` and `developer`
|
|
34
|
-
> roles, but the examples in this README stick to `user` roles since they are
|
|
35
|
-
> supported across all providers.
|
|
36
|
-
|
|
37
|
-
A prompt builder that produces a chain of messages that can be sent in one request:
|
|
38
|
-
|
|
39
|
-
```ruby
|
|
40
|
-
#!/usr/bin/env ruby
|
|
41
|
-
require "llm"
|
|
42
|
-
|
|
43
|
-
llm = LLM.openai(key: ENV.fetch("KEY"))
|
|
44
|
-
bot = LLM::Bot.new(llm)
|
|
45
|
-
|
|
46
|
-
prompt = bot.build_prompt do
|
|
47
|
-
it.user "Answer concisely."
|
|
48
|
-
it.user "Was 2024 a leap year?"
|
|
49
|
-
it.user "How many days were in that year?"
|
|
50
|
-
end
|
|
51
|
-
|
|
52
|
-
res = bot.chat(prompt)
|
|
53
|
-
res.choices.each { |m| puts "[#{m.role}] #{m.content}" }
|
|
54
|
-
```
|
|
55
|
-
|
|
56
33
|
#### Schema
|
|
57
34
|
|
|
58
|
-
|
|
35
|
+
The [LLM::Schema](https://0x1eef.github.io/x/llm.rb/LLM/LLM/Schema.html) class provides
|
|
36
|
+
a simple DSL for describing the structure of a response that an LLM emits according
|
|
37
|
+
to a JSON schema. The schema lets a client describe what JSON object an LLM should
|
|
38
|
+
emit, and the LLM will abide by the schema to the best of its ability:
|
|
59
39
|
|
|
60
40
|
```ruby
|
|
61
41
|
#!/usr/bin/env ruby
|
|
@@ -67,20 +47,19 @@ class Estimation < LLM::Schema
|
|
|
67
47
|
property :notes, String, "Short notes", optional: true
|
|
68
48
|
end
|
|
69
49
|
|
|
70
|
-
llm = LLM.openai(key: ENV
|
|
50
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
71
51
|
bot = LLM::Bot.new(llm, schema: Estimation)
|
|
72
|
-
|
|
73
|
-
res = bot.chat bot.image_url(img.urls.first)
|
|
74
|
-
data = res.choices.find(&:assistant?).content!
|
|
75
|
-
|
|
76
|
-
puts "age: #{data["age"]}"
|
|
77
|
-
puts "confidence: #{data["confidence"]}"
|
|
78
|
-
puts "notes: #{data["notes"]}" if data["notes"]
|
|
52
|
+
bot.chat("Estimate age and confidence for a man in his 30s.")
|
|
79
53
|
```
|
|
80
54
|
|
|
81
55
|
#### Tools
|
|
82
56
|
|
|
83
|
-
|
|
57
|
+
The [LLM::Tool](https://0x1eef.github.io/x/llm.rb/LLM/LLM/Tool.html) class lets you
|
|
58
|
+
define callable tools for the model. Each tool is described to the LLM as a function
|
|
59
|
+
it can invoke to fetch information or perform an action. The model decides when to
|
|
60
|
+
call tools based on the conversation; when it does, llm.rb runs the tool and sends
|
|
61
|
+
the result back on the next request. The following example implements a simple tool
|
|
62
|
+
that runs shell commands:
|
|
84
63
|
|
|
85
64
|
```ruby
|
|
86
65
|
#!/usr/bin/env ruby
|
|
@@ -96,17 +75,57 @@ class System < LLM::Tool
|
|
|
96
75
|
end
|
|
97
76
|
end
|
|
98
77
|
|
|
99
|
-
llm
|
|
100
|
-
bot
|
|
78
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
79
|
+
bot = LLM::Bot.new(llm, tools: [System])
|
|
80
|
+
bot.chat("Run `date`.")
|
|
81
|
+
bot.chat(bot.functions.map(&:call)) # report return value to the LLM
|
|
82
|
+
```
|
|
101
83
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
84
|
+
#### Agents
|
|
85
|
+
|
|
86
|
+
The [LLM::Agent](https://0x1eef.github.io/x/llm.rb/LLM/LLM/Agent.html)
|
|
87
|
+
class provides a class-level DSL for defining reusable, preconfigured
|
|
88
|
+
assistants with defaults for model, tools, schema, and instructions.
|
|
89
|
+
Instructions are injected only on the first request, and unlike
|
|
90
|
+
[LLM::Bot](https://0x1eef.github.io/x/llm.rb/LLM/LLM/Bot.html),
|
|
91
|
+
an [LLM::Agent](https://0x1eef.github.io/x/llm.rb/LLM/LLM/Agent.html)
|
|
92
|
+
will automatically call tools when needed:
|
|
93
|
+
|
|
94
|
+
```ruby
|
|
95
|
+
#!/usr/bin/env ruby
|
|
96
|
+
require "llm"
|
|
97
|
+
|
|
98
|
+
class SystemAdmin < LLM::Agent
|
|
99
|
+
model "gpt-4.1"
|
|
100
|
+
instructions "You are a Linux system admin"
|
|
101
|
+
tools Shell
|
|
102
|
+
schema Result
|
|
105
103
|
end
|
|
106
104
|
|
|
105
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
106
|
+
agent = SystemAdmin.new(llm)
|
|
107
|
+
res = agent.chat("Run 'date'")
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
#### Prompts
|
|
111
|
+
|
|
112
|
+
The [LLM::Bot#build_prompt](https://0x1eef.github.io/x/llm.rb/LLM/LLM/Bot.html#build_prompt-instance_method)
|
|
113
|
+
method provides a simple DSL for building a chain of messages that
|
|
114
|
+
can be sent in a single request. A conversation with an LLM consists
|
|
115
|
+
of messages that have a role (eg system, user), and content:
|
|
116
|
+
|
|
117
|
+
```ruby
|
|
118
|
+
#!/usr/bin/env ruby
|
|
119
|
+
require "llm"
|
|
120
|
+
|
|
121
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
122
|
+
bot = LLM::Bot.new(llm)
|
|
123
|
+
prompt = bot.build_prompt do
|
|
124
|
+
it.system "Answer concisely."
|
|
125
|
+
it.user "Was 2024 a leap year?"
|
|
126
|
+
it.user "How many days were in that year?"
|
|
127
|
+
end
|
|
107
128
|
bot.chat(prompt)
|
|
108
|
-
bot.chat(bot.functions.map(&:call))
|
|
109
|
-
bot.messages.select(&:assistant?).each { |m| puts "[#{m.role}] #{m.content}" }
|
|
110
129
|
```
|
|
111
130
|
|
|
112
131
|
## Features
|
|
@@ -120,6 +139,7 @@ bot.messages.select(&:assistant?).each { |m| puts "[#{m.role}] #{m.content}" }
|
|
|
120
139
|
#### Chat, Agents
|
|
121
140
|
- 🧠 Stateless + stateful chat (completions + responses)
|
|
122
141
|
- 🤖 Tool calling / function execution
|
|
142
|
+
- 🔁 Agent tool-call auto-execution (bounded)
|
|
123
143
|
- 🗂️ JSON Schema structured output
|
|
124
144
|
- 📡 Streaming responses
|
|
125
145
|
|
|
@@ -320,7 +340,7 @@ end
|
|
|
320
340
|
llm = LLM.openai(key: ENV["KEY"])
|
|
321
341
|
bot = LLM::Bot.new(llm, schema: Player)
|
|
322
342
|
prompt = bot.build_prompt do
|
|
323
|
-
it.
|
|
343
|
+
it.system "The player's name is Sam and their position is (7, 12)."
|
|
324
344
|
it.user "Return the player's name and position"
|
|
325
345
|
end
|
|
326
346
|
|
data/lib/llm/agent.rb
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM
|
|
4
|
+
##
|
|
5
|
+
# {LLM::Agent LLM::Agent} provides a class-level DSL for defining
|
|
6
|
+
# reusable, preconfigured assistants with defaults for model,
|
|
7
|
+
# tools, schema, and instructions.
|
|
8
|
+
#
|
|
9
|
+
# @note
|
|
10
|
+
# Unlike {LLM::Bot LLM::Bot}, this class will automatically run
|
|
11
|
+
# tool calls for you.
|
|
12
|
+
#
|
|
13
|
+
# @note
|
|
14
|
+
# Instructions are injected only on the first request.
|
|
15
|
+
#
|
|
16
|
+
# @note
|
|
17
|
+
# This idea originally came from RubyLLM and was adapted to llm.rb.
|
|
18
|
+
#
|
|
19
|
+
# @example
|
|
20
|
+
# class SystemAdmin < LLM::Agent
|
|
21
|
+
# model "gpt-4.1-nano"
|
|
22
|
+
# instructions "You are a Linux system admin"
|
|
23
|
+
# tools Shell
|
|
24
|
+
# schema Result
|
|
25
|
+
# end
|
|
26
|
+
#
|
|
27
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
28
|
+
# agent = SystemAdmin.new(llm)
|
|
29
|
+
# agent.chat("Run 'date'")
|
|
30
|
+
class Agent
|
|
31
|
+
##
|
|
32
|
+
# Set or get the default model
|
|
33
|
+
# @param [String, nil] model
|
|
34
|
+
# The model identifier
|
|
35
|
+
# @return [String, nil]
|
|
36
|
+
# Returns the current model when no argument is provided
|
|
37
|
+
def self.model(model = nil)
|
|
38
|
+
return @model if model.nil?
|
|
39
|
+
@model = model
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
##
|
|
43
|
+
# Set or get the default tools
|
|
44
|
+
# @param [Array<LLM::Function>, nil] tools
|
|
45
|
+
# One or more tools
|
|
46
|
+
# @return [Array<LLM::Function>]
|
|
47
|
+
# Returns the current tools when no argument is provided
|
|
48
|
+
def self.tools(*tools)
|
|
49
|
+
return @tools || [] if tools.empty?
|
|
50
|
+
@tools = tools.flatten
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
##
|
|
54
|
+
# Set or get the default schema
|
|
55
|
+
# @param [#to_json, nil] schema
|
|
56
|
+
# The schema
|
|
57
|
+
# @return [#to_json, nil]
|
|
58
|
+
# Returns the current schema when no argument is provided
|
|
59
|
+
def self.schema(schema = nil)
|
|
60
|
+
return @schema if schema.nil?
|
|
61
|
+
@schema = schema
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
##
|
|
65
|
+
# Set or get the default instructions
|
|
66
|
+
# @param [String, nil] instructions
|
|
67
|
+
# The system instructions
|
|
68
|
+
# @return [String, nil]
|
|
69
|
+
# Returns the current instructions when no argument is provided
|
|
70
|
+
def self.instructions(instructions = nil)
|
|
71
|
+
return @instructions if instructions.nil?
|
|
72
|
+
@instructions = instructions
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
##
|
|
76
|
+
# @param [LLM::Provider] provider
|
|
77
|
+
# A provider
|
|
78
|
+
# @param [Hash] params
|
|
79
|
+
# The parameters to maintain throughout the conversation.
|
|
80
|
+
# Any parameter the provider supports can be included and
|
|
81
|
+
# not only those listed here.
|
|
82
|
+
# @option params [String] :model Defaults to the provider's default model
|
|
83
|
+
# @option params [Array<LLM::Function>, nil] :tools Defaults to nil
|
|
84
|
+
# @option params [#to_json, nil] :schema Defaults to nil
|
|
85
|
+
def initialize(provider, params = {})
|
|
86
|
+
defaults = {model: self.class.model, tools: self.class.tools, schema: self.class.schema}.compact
|
|
87
|
+
@provider = provider
|
|
88
|
+
@bot = LLM::Bot.new(provider, defaults.merge(params))
|
|
89
|
+
@instructions_applied = false
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
##
|
|
93
|
+
# Maintain a conversation via the chat completions API.
|
|
94
|
+
# This method immediately sends a request to the LLM and returns the response.
|
|
95
|
+
#
|
|
96
|
+
# @param prompt (see LLM::Provider#complete)
|
|
97
|
+
# @param [Hash] params The params passed to the provider, including optional :stream, :tools, :schema etc.
|
|
98
|
+
# @option params [Integer] :max_tool_rounds The maxinum number of tool call iterations (default 10)
|
|
99
|
+
# @return [LLM::Response] Returns the LLM's response for this turn.
|
|
100
|
+
# @example
|
|
101
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
102
|
+
# agent = LLM::Agent.new(llm)
|
|
103
|
+
# response = agent.chat("Hello, what is your name?")
|
|
104
|
+
# puts response.choices[0].content
|
|
105
|
+
def chat(prompt, params = {})
|
|
106
|
+
i, max = 0, Integer(params.delete(:max_tool_rounds) || 10)
|
|
107
|
+
res = @bot.chat(apply_instructions(prompt), params)
|
|
108
|
+
until @bot.functions.empty?
|
|
109
|
+
raise LLM::ToolLoopError, "pending tool calls remain" if i >= max
|
|
110
|
+
res = @bot.chat @bot.functions.map(&:call), params
|
|
111
|
+
i += 1
|
|
112
|
+
end
|
|
113
|
+
@instructions_applied = true
|
|
114
|
+
res
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
##
|
|
118
|
+
# Maintain a conversation via the responses API.
|
|
119
|
+
# This method immediately sends a request to the LLM and returns the response.
|
|
120
|
+
#
|
|
121
|
+
# @note Not all LLM providers support this API
|
|
122
|
+
# @param prompt (see LLM::Provider#complete)
|
|
123
|
+
# @param [Hash] params The params passed to the provider, including optional :stream, :tools, :schema etc.
|
|
124
|
+
# @option params [Integer] :max_tool_rounds The maxinum number of tool call iterations (default 10)
|
|
125
|
+
# @return [LLM::Response] Returns the LLM's response for this turn.
|
|
126
|
+
# @example
|
|
127
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
128
|
+
# agent = LLM::Agent.new(llm)
|
|
129
|
+
# res = agent.respond("What is the capital of France?")
|
|
130
|
+
# puts res.output_text
|
|
131
|
+
def respond(prompt, params = {})
|
|
132
|
+
i, max = 0, Integer(params.delete(:max_tool_rounds) || 10)
|
|
133
|
+
res = @bot.respond(apply_instructions(prompt), params)
|
|
134
|
+
until @bot.functions.empty?
|
|
135
|
+
raise LLM::ToolLoopError, "pending tool calls remain" if i >= max
|
|
136
|
+
res = @bot.respond @bot.functions.map(&:call), params
|
|
137
|
+
i += 1
|
|
138
|
+
end
|
|
139
|
+
@instructions_applied = true
|
|
140
|
+
res
|
|
141
|
+
end
|
|
142
|
+
|
|
143
|
+
##
|
|
144
|
+
# @return [LLM::Buffer<LLM::Message>]
|
|
145
|
+
def messages
|
|
146
|
+
@bot.messages
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
##
|
|
150
|
+
# @return [Array<LLM::Function>]
|
|
151
|
+
def functions
|
|
152
|
+
@bot.functions
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
##
|
|
156
|
+
# @return [LLM::Object]
|
|
157
|
+
def usage
|
|
158
|
+
@bot.usage
|
|
159
|
+
end
|
|
160
|
+
|
|
161
|
+
##
|
|
162
|
+
# @return [LLM::Builder]
|
|
163
|
+
def build_prompt(&)
|
|
164
|
+
@bot.build_prompt(&)
|
|
165
|
+
end
|
|
166
|
+
|
|
167
|
+
##
|
|
168
|
+
# @param [String] url
|
|
169
|
+
# The URL
|
|
170
|
+
# @return [LLM::Object]
|
|
171
|
+
# Returns a tagged object
|
|
172
|
+
def image_url(url)
|
|
173
|
+
@bot.image_url(url)
|
|
174
|
+
end
|
|
175
|
+
|
|
176
|
+
##
|
|
177
|
+
# @param [String] path
|
|
178
|
+
# The path
|
|
179
|
+
# @return [LLM::Object]
|
|
180
|
+
# Returns a tagged object
|
|
181
|
+
def local_file(path)
|
|
182
|
+
@bot.local_file(path)
|
|
183
|
+
end
|
|
184
|
+
|
|
185
|
+
##
|
|
186
|
+
# @param [LLM::Response] res
|
|
187
|
+
# The response
|
|
188
|
+
# @return [LLM::Object]
|
|
189
|
+
# Returns a tagged object
|
|
190
|
+
def remote_file(res)
|
|
191
|
+
@bot.remote_file(res)
|
|
192
|
+
end
|
|
193
|
+
|
|
194
|
+
private
|
|
195
|
+
|
|
196
|
+
def apply_instructions(prompt)
|
|
197
|
+
instr = self.class.instructions
|
|
198
|
+
return prompt unless instr
|
|
199
|
+
if LLM::Builder === prompt
|
|
200
|
+
messages = prompt.to_a
|
|
201
|
+
builder = LLM::Builder.new(@provider) do |builder|
|
|
202
|
+
builder.system instr unless @instructions_applied
|
|
203
|
+
messages.each { |msg| builder.chat(msg.content, role: msg.role) }
|
|
204
|
+
end
|
|
205
|
+
builder.tap(&:call)
|
|
206
|
+
else
|
|
207
|
+
build_prompt do
|
|
208
|
+
_1.system instr unless @instructions_applied
|
|
209
|
+
_1.user prompt
|
|
210
|
+
end
|
|
211
|
+
end
|
|
212
|
+
end
|
|
213
|
+
end
|
|
214
|
+
end
|
data/lib/llm/bot.rb
CHANGED
data/lib/llm/builder.rb
CHANGED
|
@@ -4,6 +4,9 @@
|
|
|
4
4
|
# The {LLM::Builder LLM::Builder} class can build a collection
|
|
5
5
|
# of messages that can be sent in a single request.
|
|
6
6
|
#
|
|
7
|
+
# @note
|
|
8
|
+
# This API is not meant to be used directly.
|
|
9
|
+
#
|
|
7
10
|
# @example
|
|
8
11
|
# llm = LLM.openai(key: ENV["KEY"])
|
|
9
12
|
# bot = LLM::Bot.new(llm)
|
|
@@ -16,7 +19,8 @@ class LLM::Builder
|
|
|
16
19
|
##
|
|
17
20
|
# @param [Proc] evaluator
|
|
18
21
|
# The evaluator
|
|
19
|
-
def initialize(&evaluator)
|
|
22
|
+
def initialize(provider, &evaluator)
|
|
23
|
+
@provider = provider
|
|
20
24
|
@buffer = []
|
|
21
25
|
@evaluator = evaluator
|
|
22
26
|
end
|
|
@@ -33,7 +37,13 @@ class LLM::Builder
|
|
|
33
37
|
# @param [Symbol] role
|
|
34
38
|
# The role (eg user, system)
|
|
35
39
|
# @return [void]
|
|
36
|
-
def chat(content, role:
|
|
40
|
+
def chat(content, role: @provider.user_role)
|
|
41
|
+
role = case role.to_sym
|
|
42
|
+
when :system then @provider.system_role
|
|
43
|
+
when :user then @provider.user_role
|
|
44
|
+
when :developer then @provider.developer_role
|
|
45
|
+
else role
|
|
46
|
+
end
|
|
37
47
|
@buffer << LLM::Message.new(role, content)
|
|
38
48
|
end
|
|
39
49
|
|
|
@@ -42,7 +52,7 @@ class LLM::Builder
|
|
|
42
52
|
# The message content
|
|
43
53
|
# @return [void]
|
|
44
54
|
def user(content)
|
|
45
|
-
chat(content, role:
|
|
55
|
+
chat(content, role: @provider.user_role)
|
|
46
56
|
end
|
|
47
57
|
|
|
48
58
|
##
|
|
@@ -50,7 +60,15 @@ class LLM::Builder
|
|
|
50
60
|
# The message content
|
|
51
61
|
# @return [void]
|
|
52
62
|
def system(content)
|
|
53
|
-
chat(content, role:
|
|
63
|
+
chat(content, role: @provider.system_role)
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
##
|
|
67
|
+
# @param [String] content
|
|
68
|
+
# The message content
|
|
69
|
+
# @return [void]
|
|
70
|
+
def developer(content)
|
|
71
|
+
chat(content, role: @provider.developer_role)
|
|
54
72
|
end
|
|
55
73
|
|
|
56
74
|
##
|
data/lib/llm/error.rb
CHANGED
data/lib/llm/provider.rb
CHANGED
|
@@ -234,6 +234,24 @@ class LLM::Provider
|
|
|
234
234
|
raise NotImplementedError
|
|
235
235
|
end
|
|
236
236
|
|
|
237
|
+
##
|
|
238
|
+
# @return [Symbol]
|
|
239
|
+
def user_role
|
|
240
|
+
:user
|
|
241
|
+
end
|
|
242
|
+
|
|
243
|
+
##
|
|
244
|
+
# @return [Symbol]
|
|
245
|
+
def system_role
|
|
246
|
+
:system
|
|
247
|
+
end
|
|
248
|
+
|
|
249
|
+
##
|
|
250
|
+
# @return [Symbol]
|
|
251
|
+
def developer_role
|
|
252
|
+
:developer
|
|
253
|
+
end
|
|
254
|
+
|
|
237
255
|
private
|
|
238
256
|
|
|
239
257
|
attr_reader :client, :base_uri, :host, :port, :timeout, :ssl
|
data/lib/llm/providers/gemini.rb
CHANGED
|
@@ -103,12 +103,6 @@ module LLM
|
|
|
103
103
|
LLM::Gemini::Models.new(self)
|
|
104
104
|
end
|
|
105
105
|
|
|
106
|
-
##
|
|
107
|
-
# @return (see LLM::Provider#assistant_role)
|
|
108
|
-
def assistant_role
|
|
109
|
-
"model"
|
|
110
|
-
end
|
|
111
|
-
|
|
112
106
|
##
|
|
113
107
|
# Returns the default model for chat completions
|
|
114
108
|
# @see https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash gemini-2.5-flash
|
|
@@ -141,6 +135,33 @@ module LLM
|
|
|
141
135
|
ResponseAdapter.adapt(complete(query, tools: [server_tools[:google_search]]), type: :web_search)
|
|
142
136
|
end
|
|
143
137
|
|
|
138
|
+
##
|
|
139
|
+
# @return [Symbol]
|
|
140
|
+
# Returns the providers user role
|
|
141
|
+
def user_role
|
|
142
|
+
:user
|
|
143
|
+
end
|
|
144
|
+
|
|
145
|
+
##
|
|
146
|
+
# @return [Symbol]
|
|
147
|
+
# Returns the providers system role
|
|
148
|
+
def system_role
|
|
149
|
+
:user
|
|
150
|
+
end
|
|
151
|
+
|
|
152
|
+
##
|
|
153
|
+
# @return [Symbol]
|
|
154
|
+
# Returns the providers developer role
|
|
155
|
+
def developer_role
|
|
156
|
+
:user
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
##
|
|
160
|
+
# @return (see LLM::Provider#assistant_role)
|
|
161
|
+
def assistant_role
|
|
162
|
+
"model"
|
|
163
|
+
end
|
|
164
|
+
|
|
144
165
|
private
|
|
145
166
|
|
|
146
167
|
def headers
|
data/lib/llm/version.rb
CHANGED
data/lib/llm.rb
CHANGED
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: llm.rb
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 4.
|
|
4
|
+
version: 4.1.0
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Antar Azri
|
|
@@ -178,6 +178,7 @@ files:
|
|
|
178
178
|
- LICENSE
|
|
179
179
|
- README.md
|
|
180
180
|
- lib/llm.rb
|
|
181
|
+
- lib/llm/agent.rb
|
|
181
182
|
- lib/llm/bot.rb
|
|
182
183
|
- lib/llm/buffer.rb
|
|
183
184
|
- lib/llm/builder.rb
|