llm.rb 4.20.2 → 4.21.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +21 -0
- data/README.md +86 -24
- data/lib/llm/agent.rb +13 -1
- data/lib/llm/context.rb +7 -0
- data/lib/llm/sequel/agent.rb +107 -0
- data/lib/llm/skill.rb +116 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +1 -0
- data/lib/sequel/plugins/agent.rb +8 -0
- metadata +4 -1
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: f0bca66b2bd8873cf39abb3be19dc99ca20d558e40ef3e9f475bf1f33faef6b6
|
|
4
|
+
data.tar.gz: c73a2c5093e7e09557242919feb5a377f25b0fa8a11249a9f346673ad7d3a921
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 2a00191aaab47702a794f9fa86d782f21832be2a7ef309bd558aa482100d7c66ddbdf3320e89c80af2942c6e33295f10d387702130162fbac7cc98fd9b24c9a8
|
|
7
|
+
data.tar.gz: a6709f6fd265af673da771f635f34c68e28e490405700c1a59b18253391dbbcae09ce677a4251994d898a851ec08dc598c5ff858e516e25b1206948f509abf67
|
data/CHANGELOG.md
CHANGED
|
@@ -2,8 +2,29 @@
|
|
|
2
2
|
|
|
3
3
|
## Unreleased
|
|
4
4
|
|
|
5
|
+
Changes since `v4.21.0`.
|
|
6
|
+
|
|
7
|
+
## v4.21.0
|
|
8
|
+
|
|
5
9
|
Changes since `v4.20.2`.
|
|
6
10
|
|
|
11
|
+
This release expands higher-level composition in llm.rb. It adds Sequel agent
|
|
12
|
+
persistence through `plugin :agent` and introduces directory-backed skills
|
|
13
|
+
that load from `SKILL.md`, resolve named tools, and plug directly into
|
|
14
|
+
`LLM::Context` and `LLM::Agent`.
|
|
15
|
+
|
|
16
|
+
### Change
|
|
17
|
+
|
|
18
|
+
* **Add `plugin :agent` for Sequel models** <br>
|
|
19
|
+
Add Sequel support for `plugin :agent`, similar to ActiveRecord's
|
|
20
|
+
`acts_as_agent`, so models can wrap `LLM::Agent` with built-in
|
|
21
|
+
persistence.
|
|
22
|
+
|
|
23
|
+
* **Load directory-backed skills through `LLM::Context` and `LLM::Agent`** <br>
|
|
24
|
+
Add `skills:` to `LLM::Context` and `skills ...` to `LLM::Agent` so
|
|
25
|
+
directories with `SKILL.md` can be loaded, resolved into tools, and run
|
|
26
|
+
through the normal llm.rb tool path.
|
|
27
|
+
|
|
7
28
|
## v4.20.2
|
|
8
29
|
|
|
9
30
|
Changes since `v4.20.1`.
|
data/README.md
CHANGED
|
@@ -4,22 +4,28 @@
|
|
|
4
4
|
<p align="center">
|
|
5
5
|
<a href="https://0x1eef.github.io/x/llm.rb?rebuild=1"><img src="https://img.shields.io/badge/docs-0x1eef.github.io-blue.svg" alt="RubyDoc"></a>
|
|
6
6
|
<a href="https://opensource.org/license/0bsd"><img src="https://img.shields.io/badge/License-0BSD-orange.svg?" alt="License"></a>
|
|
7
|
-
<a href="https://github.com/llmrb/llm.rb/tags"><img src="https://img.shields.io/badge/version-4.
|
|
7
|
+
<a href="https://github.com/llmrb/llm.rb/tags"><img src="https://img.shields.io/badge/version-4.21.0-green.svg?" alt="Version"></a>
|
|
8
8
|
</p>
|
|
9
9
|
|
|
10
10
|
## About
|
|
11
11
|
|
|
12
12
|
llm.rb is a lightweight runtime for building capable AI systems in Ruby.
|
|
13
|
+
<br>
|
|
14
|
+
|
|
15
|
+
It is also the most capable AI Ruby runtime that exists _today_, and that claim is
|
|
16
|
+
backed up by research. Maybe it won't always be true, and that would be good news too -
|
|
17
|
+
because it would mean the Ruby ecosystem is getting stronger.
|
|
13
18
|
|
|
14
|
-
|
|
15
|
-
contexts, agents, tools, MCP servers, streaming, schemas, files, and
|
|
16
|
-
state, so real systems can be built out of one coherent execution
|
|
17
|
-
of a pile of adapters.
|
|
19
|
+
llm.rb is not just an API wrapper: it gives you one runtime for providers,
|
|
20
|
+
contexts, agents, tools, skills, MCP servers, streaming, schemas, files, and
|
|
21
|
+
persisted state, so real systems can be built out of one coherent execution
|
|
22
|
+
model instead of a pile of adapters.
|
|
18
23
|
|
|
19
|
-
|
|
20
|
-
|
|
24
|
+
llm.rb is designed for Ruby, and although it works great in Rails, it is not tightly
|
|
25
|
+
coupled to it. It runs on the standard library by default (zero dependencies),
|
|
26
|
+
loads optional pieces only when needed, includes built-in ActiveRecord support through
|
|
21
27
|
`acts_as_llm` and `acts_as_agent`, includes built-in Sequel support through
|
|
22
|
-
`plugin :llm`, and is designed for engineers who want control over
|
|
28
|
+
`plugin :llm` and `plugin :agent`, and is designed for engineers who want control over
|
|
23
29
|
long-lived, tool-capable, stateful AI workflows instead of just
|
|
24
30
|
request/response helpers.
|
|
25
31
|
|
|
@@ -101,13 +107,18 @@ same context object.
|
|
|
101
107
|
integration stack.
|
|
102
108
|
- **ActiveRecord and Sequel persistence are built in** <br>
|
|
103
109
|
llm.rb includes built-in ActiveRecord support through `acts_as_llm` and
|
|
104
|
-
`acts_as_agent`, plus built-in Sequel support through `plugin :llm
|
|
110
|
+
`acts_as_agent`, plus built-in Sequel support through `plugin :llm` and
|
|
111
|
+
`plugin :agent`.
|
|
105
112
|
Use `acts_as_llm` when you want to wrap `LLM::Context`, `acts_as_agent`
|
|
106
|
-
when you want to wrap `LLM::Agent`,
|
|
107
|
-
|
|
108
|
-
integrations support `provider:` and `context:` hooks,
|
|
109
|
-
:string` for text columns or `format: :jsonb` for native
|
|
110
|
-
storage when ORM JSON typecasting support is enabled.
|
|
113
|
+
when you want to wrap `LLM::Agent`, `plugin :llm` when you want a
|
|
114
|
+
`LLM::Context` on a Sequel model, or `plugin :agent` when you want an
|
|
115
|
+
`LLM::Agent`. These integrations support `provider:` and `context:` hooks,
|
|
116
|
+
plus `format: :string` for text columns or `format: :jsonb` for native
|
|
117
|
+
PostgreSQL JSON storage when ORM JSON typecasting support is enabled.
|
|
118
|
+
- **ORM models can become persistent agents** <br>
|
|
119
|
+
Turn an ActiveRecord or Sequel model into an agent-capable model with
|
|
120
|
+
built-in persistence, stored on the same table, with `jsonb` support when
|
|
121
|
+
your ORM and database support native JSON columns.
|
|
111
122
|
- **Persistent HTTP pooling is shared process-wide** <br>
|
|
112
123
|
When enabled, separate
|
|
113
124
|
[`LLM::Provider`](https://0x1eef.github.io/x/llm.rb/LLM/Provider.html)
|
|
@@ -126,6 +137,11 @@ same context object.
|
|
|
126
137
|
- **Tools are explicit** <br>
|
|
127
138
|
Run local tools, provider-native tools, and MCP tools through the same path
|
|
128
139
|
with fewer special cases.
|
|
140
|
+
- **Skills are just tools loaded from directories** <br>
|
|
141
|
+
Point llm.rb at directories with a `SKILL.md`, resolve named tools through
|
|
142
|
+
the registry, and run those skills through `LLM::Context` or `LLM::Agent`
|
|
143
|
+
without creating a second execution model. If you are familiar with skills
|
|
144
|
+
in Claude or Codex, llm.rb supports the same general idea.
|
|
129
145
|
- **Providers are normalized, not flattened** <br>
|
|
130
146
|
Share one API surface across providers without losing access to provider-
|
|
131
147
|
specific capabilities where they matter.
|
|
@@ -165,6 +181,7 @@ same context object.
|
|
|
165
181
|
- **Run Tools While Streaming** — overlap model output with tool latency
|
|
166
182
|
- **Concurrent Execution** — threads, async tasks, and fibers
|
|
167
183
|
- **Agents** — reusable assistants with tool auto-execution
|
|
184
|
+
- **Skills** — directory-backed capabilities loaded from `SKILL.md`
|
|
168
185
|
- **Structured Outputs** — JSON Schema-based responses
|
|
169
186
|
- **Responses API** — stateful response workflows where providers support them
|
|
170
187
|
- **MCP Support** — stdio and HTTP MCP clients with prompt and tool support
|
|
@@ -189,7 +206,7 @@ gem install llm.rb
|
|
|
189
206
|
|
|
190
207
|
#### REPL
|
|
191
208
|
|
|
192
|
-
This example uses [`LLM::Context`](https://0x1eef.github.io/x/llm.rb/LLM/Context.html) directly for an interactive REPL. <br> See the [deepdive](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) for more examples.
|
|
209
|
+
This example uses [`LLM::Context`](https://0x1eef.github.io/x/llm.rb/LLM/Context.html) directly for an interactive REPL. <br> See the [deepdive (web)](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) or [deepdive (markdown)](resources/deepdive.md) for more examples.
|
|
193
210
|
|
|
194
211
|
```ruby
|
|
195
212
|
require "llm"
|
|
@@ -206,7 +223,7 @@ end
|
|
|
206
223
|
|
|
207
224
|
#### Streaming
|
|
208
225
|
|
|
209
|
-
This example uses [`LLM::Stream`](https://0x1eef.github.io/x/llm.rb/LLM/Stream.html) directly so visible output and tool execution can happen together. <br> See the [deepdive](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) for more examples.
|
|
226
|
+
This example uses [`LLM::Stream`](https://0x1eef.github.io/x/llm.rb/LLM/Stream.html) directly so visible output and tool execution can happen together. <br> See the [deepdive (web)](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) or [deepdive (markdown)](resources/deepdive.md) for more examples.
|
|
210
227
|
|
|
211
228
|
```ruby
|
|
212
229
|
require "llm"
|
|
@@ -238,9 +255,37 @@ ctx.talk("Run `date` and `uname -a`.")
|
|
|
238
255
|
ctx.talk(ctx.wait(:thread)) while ctx.functions.any?
|
|
239
256
|
```
|
|
240
257
|
|
|
258
|
+
#### Reasoning
|
|
259
|
+
|
|
260
|
+
This example uses [`LLM::Stream`](https://0x1eef.github.io/x/llm.rb/LLM/Stream.html) with the OpenAI Responses API so reasoning output is streamed separately from visible assistant output. See the [deepdive (web)](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) or [deepdive (markdown)](resources/deepdive.md) for more examples.
|
|
261
|
+
|
|
262
|
+
```ruby
|
|
263
|
+
require "llm"
|
|
264
|
+
|
|
265
|
+
class Stream < LLM::Stream
|
|
266
|
+
def on_content(content)
|
|
267
|
+
$stdout << content
|
|
268
|
+
end
|
|
269
|
+
|
|
270
|
+
def on_reasoning_content(content)
|
|
271
|
+
$stderr << content
|
|
272
|
+
end
|
|
273
|
+
end
|
|
274
|
+
|
|
275
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
276
|
+
ctx = LLM::Context.new(
|
|
277
|
+
llm,
|
|
278
|
+
model: "gpt-5.4-mini",
|
|
279
|
+
mode: :responses,
|
|
280
|
+
reasoning: {effort: "medium"},
|
|
281
|
+
stream: Stream.new
|
|
282
|
+
)
|
|
283
|
+
ctx.talk("Solve 17 * 19 and show your work.")
|
|
284
|
+
```
|
|
285
|
+
|
|
241
286
|
#### Request Cancellation
|
|
242
287
|
|
|
243
|
-
Need to cancel a stream? llm.rb has you covered through [`LLM::Context#interrupt!`](https://0x1eef.github.io/x/llm.rb/LLM/Context.html#interrupt-21-instance_method). <br> See the [deepdive](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) for more examples.
|
|
288
|
+
Need to cancel a stream? llm.rb has you covered through [`LLM::Context#interrupt!`](https://0x1eef.github.io/x/llm.rb/LLM/Context.html#interrupt-21-instance_method). <br> See the [deepdive (web)](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) or [deepdive (markdown)](resources/deepdive.md) for more examples.
|
|
244
289
|
|
|
245
290
|
```ruby
|
|
246
291
|
require "llm"
|
|
@@ -260,7 +305,7 @@ worker.join
|
|
|
260
305
|
|
|
261
306
|
#### Sequel (ORM)
|
|
262
307
|
|
|
263
|
-
The `plugin :llm` integration wraps [`LLM::Context`](https://0x1eef.github.io/x/llm.rb/LLM/Context.html) on a `Sequel::Model` and keeps tool execution explicit. <br> See the [deepdive](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) for more examples.
|
|
308
|
+
The `plugin :llm` integration wraps [`LLM::Context`](https://0x1eef.github.io/x/llm.rb/LLM/Context.html) on a `Sequel::Model` and keeps tool execution explicit. <br> See the [deepdive (web)](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) or [deepdive (markdown)](resources/deepdive.md) for more examples.
|
|
264
309
|
|
|
265
310
|
```ruby
|
|
266
311
|
require "llm"
|
|
@@ -280,7 +325,7 @@ puts ctx.talk("What is my favorite language?").content
|
|
|
280
325
|
#### ActiveRecord (ORM): acts_as_llm
|
|
281
326
|
|
|
282
327
|
The `acts_as_llm` method wraps [`LLM::Context`](https://0x1eef.github.io/x/llm.rb/LLM/Context.html) and
|
|
283
|
-
provides full control over tool execution. <br> See the [deepdive](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) for more examples.
|
|
328
|
+
provides full control over tool execution. <br> See the [deepdive (web)](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) or [deepdive (markdown)](resources/deepdive.md) for more examples.
|
|
284
329
|
|
|
285
330
|
```ruby
|
|
286
331
|
require "llm"
|
|
@@ -300,7 +345,7 @@ puts ctx.talk("What is my favorite language?").content
|
|
|
300
345
|
#### ActiveRecord (ORM): acts_as_agent
|
|
301
346
|
|
|
302
347
|
The `acts_as_agent` method wraps [`LLM::Agent`](https://0x1eef.github.io/x/llm.rb/LLM/Agent.html) and
|
|
303
|
-
manages tool execution for you. <br> See the [deepdive](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) for more examples.
|
|
348
|
+
manages tool execution for you. <br> See the [deepdive (web)](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) or [deepdive (markdown)](resources/deepdive.md) for more examples.
|
|
304
349
|
|
|
305
350
|
```ruby
|
|
306
351
|
require "llm"
|
|
@@ -329,7 +374,7 @@ puts ticket.talk("How do I rotate my API key?").content
|
|
|
329
374
|
|
|
330
375
|
#### Agent
|
|
331
376
|
|
|
332
|
-
This example uses [`LLM::Agent`](https://0x1eef.github.io/x/llm.rb/LLM/Agent.html) directly and lets the agent manage tool execution. <br> See the [deepdive](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) for more examples.
|
|
377
|
+
This example uses [`LLM::Agent`](https://0x1eef.github.io/x/llm.rb/LLM/Agent.html) directly and lets the agent manage tool execution. <br> See the [deepdive (web)](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) or [deepdive (markdown)](resources/deepdive.md) for more examples.
|
|
333
378
|
|
|
334
379
|
```ruby
|
|
335
380
|
require "llm"
|
|
@@ -346,9 +391,26 @@ agent = ShellAgent.new(llm)
|
|
|
346
391
|
puts agent.talk("What time is it on this system?").content
|
|
347
392
|
```
|
|
348
393
|
|
|
394
|
+
#### Skills
|
|
395
|
+
|
|
396
|
+
This example uses [`LLM::Agent`](https://0x1eef.github.io/x/llm.rb/LLM/Agent.html) with directory-backed skills so `SKILL.md` capabilities run through the normal tool path. If you have used skills in Claude or Codex, this is the same kind of building block. <br> See the [deepdive (web)](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) or [deepdive (markdown)](resources/deepdive.md) for more examples.
|
|
397
|
+
|
|
398
|
+
```ruby
|
|
399
|
+
require "llm"
|
|
400
|
+
|
|
401
|
+
class Agent < LLM::Agent
|
|
402
|
+
model "gpt-5.4-mini"
|
|
403
|
+
instructions "You are a concise release assistant."
|
|
404
|
+
skills "./skills/release", "./skills/review"
|
|
405
|
+
end
|
|
406
|
+
|
|
407
|
+
llm = LLM.openai(key: ENV["KEY"])
|
|
408
|
+
puts Agent.new(llm).talk("Use the review skill.").content
|
|
409
|
+
```
|
|
410
|
+
|
|
349
411
|
#### MCP
|
|
350
412
|
|
|
351
|
-
This example uses [`LLM::MCP`](https://0x1eef.github.io/x/llm.rb/LLM/MCP.html) over HTTP so remote GitHub MCP tools run through the same `LLM::Context` tool path as local tools.
|
|
413
|
+
This example uses [`LLM::MCP`](https://0x1eef.github.io/x/llm.rb/LLM/MCP.html) over HTTP so remote GitHub MCP tools run through the same `LLM::Context` tool path as local tools. See the [deepdive (web)](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) or [deepdive (markdown)](resources/deepdive.md) for more examples.
|
|
352
414
|
|
|
353
415
|
```ruby
|
|
354
416
|
require "llm"
|
|
@@ -379,8 +441,8 @@ how capable the runtime can be in a real application:
|
|
|
379
441
|
|
|
380
442
|
## Resources
|
|
381
443
|
|
|
382
|
-
- [deepdive](https://0x1eef.github.io/x/llm.rb/file.deepdive.html)
|
|
383
|
-
examples guide.
|
|
444
|
+
- [deepdive (web)](https://0x1eef.github.io/x/llm.rb/file.deepdive.html) and
|
|
445
|
+
[deepdive (markdown)](resources/deepdive.md) are the examples guide.
|
|
384
446
|
- [relay](https://github.com/llmrb/relay) shows a real application built on
|
|
385
447
|
top of llm.rb.
|
|
386
448
|
- [doc site](https://0x1eef.github.io/x/llm.rb?rebuild=1) has the API docs.
|
data/lib/llm/agent.rb
CHANGED
|
@@ -59,6 +59,17 @@ module LLM
|
|
|
59
59
|
@tools = tools.flatten
|
|
60
60
|
end
|
|
61
61
|
|
|
62
|
+
##
|
|
63
|
+
# Set or get the default skills
|
|
64
|
+
# @param [Array<String>, nil] skills
|
|
65
|
+
# One or more skill directories
|
|
66
|
+
# @return [Array<String>, nil]
|
|
67
|
+
# Returns the current skills when no argument is provided
|
|
68
|
+
def self.skills(*skills)
|
|
69
|
+
return @skills if skills.empty?
|
|
70
|
+
@skills = skills.flatten
|
|
71
|
+
end
|
|
72
|
+
|
|
62
73
|
##
|
|
63
74
|
# Set or get the default schema
|
|
64
75
|
# @param [#to_json, nil] schema
|
|
@@ -110,10 +121,11 @@ module LLM
|
|
|
110
121
|
# not only those listed here.
|
|
111
122
|
# @option params [String] :model Defaults to the provider's default model
|
|
112
123
|
# @option params [Array<LLM::Function>, nil] :tools Defaults to nil
|
|
124
|
+
# @option params [Array<String>, nil] :skills Defaults to nil
|
|
113
125
|
# @option params [#to_json, nil] :schema Defaults to nil
|
|
114
126
|
# @option params [Symbol, Array<Symbol>, nil] :concurrency Defaults to the agent class concurrency
|
|
115
127
|
def initialize(llm, params = {})
|
|
116
|
-
defaults = {model: self.class.model, tools: self.class.tools, schema: self.class.schema}.compact
|
|
128
|
+
defaults = {model: self.class.model, tools: self.class.tools, skills: self.class.skills, schema: self.class.schema}.compact
|
|
117
129
|
@concurrency = params.delete(:concurrency) || self.class.concurrency
|
|
118
130
|
@llm = llm
|
|
119
131
|
@ctx = LLM::Context.new(llm, defaults.merge(params))
|
data/lib/llm/context.rb
CHANGED
|
@@ -64,10 +64,13 @@ module LLM
|
|
|
64
64
|
# @option params [Symbol] :mode Defaults to :completions
|
|
65
65
|
# @option params [String] :model Defaults to the provider's default model
|
|
66
66
|
# @option params [Array<LLM::Function>, nil] :tools Defaults to nil
|
|
67
|
+
# @option params [Array<String>, nil] :skills Defaults to nil
|
|
67
68
|
def initialize(llm, params = {})
|
|
68
69
|
@llm = llm
|
|
69
70
|
@mode = params.delete(:mode) || :completions
|
|
71
|
+
tools = [*params.delete(:tools), *load_skills(params.delete(:skills))]
|
|
70
72
|
@params = {model: llm.default_model, schema: nil}.compact.merge!(params)
|
|
73
|
+
@params[:tools] = tools unless tools.empty?
|
|
71
74
|
@messages = LLM::Buffer.new(llm)
|
|
72
75
|
end
|
|
73
76
|
|
|
@@ -345,6 +348,10 @@ module LLM
|
|
|
345
348
|
stream.extra[:tracer] = tracer
|
|
346
349
|
stream.extra[:model] = model
|
|
347
350
|
end
|
|
351
|
+
|
|
352
|
+
def load_skills(skills)
|
|
353
|
+
[*skills].map { LLM::Skill.load(_1).to_tool(llm) }
|
|
354
|
+
end
|
|
348
355
|
end
|
|
349
356
|
|
|
350
357
|
# Backward-compatible alias
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::Sequel
|
|
4
|
+
##
|
|
5
|
+
# Sequel plugin for persisting {LLM::Agent LLM::Agent} state.
|
|
6
|
+
#
|
|
7
|
+
# This wrapper reuses the same record-backed runtime surface as
|
|
8
|
+
# {LLM::Sequel::Plugin}, but builds an {LLM::Agent LLM::Agent} instead of an
|
|
9
|
+
# {LLM::Context LLM::Context}. Agent defaults such as model, tools, schema,
|
|
10
|
+
# instructions, and concurrency are configured on the model class and
|
|
11
|
+
# forwarded to an internal agent subclass.
|
|
12
|
+
module Agent
|
|
13
|
+
EMPTY_HASH = LLM::Sequel::Plugin::EMPTY_HASH
|
|
14
|
+
DEFAULT_USAGE_COLUMNS = LLM::Sequel::Plugin::DEFAULT_USAGE_COLUMNS
|
|
15
|
+
DEFAULTS = LLM::Sequel::Plugin::DEFAULTS
|
|
16
|
+
|
|
17
|
+
def self.apply(model, **)
|
|
18
|
+
model.extend ClassMethods
|
|
19
|
+
model.include LLM::Sequel::Plugin::InstanceMethods
|
|
20
|
+
model.include InstanceMethods
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
def self.configure(model, options = EMPTY_HASH, &block)
|
|
24
|
+
options = DEFAULTS.merge(options)
|
|
25
|
+
usage_columns = DEFAULT_USAGE_COLUMNS.merge(options[:usage_columns] || EMPTY_HASH)
|
|
26
|
+
model.instance_variable_set(
|
|
27
|
+
:@llm_agent_options,
|
|
28
|
+
options.merge(usage_columns: usage_columns.freeze).freeze
|
|
29
|
+
)
|
|
30
|
+
model.instance_exec(&block) if block
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
module ClassMethods
|
|
34
|
+
def llm_plugin_options
|
|
35
|
+
@llm_agent_options || Agent::DEFAULTS
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def model(model = nil)
|
|
39
|
+
return agent.model if model.nil?
|
|
40
|
+
agent.model(model)
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def tools(*tools)
|
|
44
|
+
return agent.tools if tools.empty?
|
|
45
|
+
agent.tools(*tools)
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
def schema(schema = nil)
|
|
49
|
+
return agent.schema if schema.nil?
|
|
50
|
+
agent.schema(schema)
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def instructions(instructions = nil)
|
|
54
|
+
return agent.instructions if instructions.nil?
|
|
55
|
+
agent.instructions(instructions)
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def concurrency(concurrency = nil)
|
|
59
|
+
return agent.concurrency if concurrency.nil?
|
|
60
|
+
agent.concurrency(concurrency)
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
def agent
|
|
64
|
+
@agent ||= Class.new(LLM::Agent)
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
module InstanceMethods
|
|
69
|
+
private
|
|
70
|
+
|
|
71
|
+
def ctx
|
|
72
|
+
@ctx ||= begin
|
|
73
|
+
options = self.class.llm_plugin_options
|
|
74
|
+
params = resolve_options(options[:context]).dup
|
|
75
|
+
params[:model] ||= self[columns[:model_column]]
|
|
76
|
+
ctx = self.class.agent.new(llm, params.compact)
|
|
77
|
+
data = self[columns[:data_column]]
|
|
78
|
+
if data.nil? || data == ""
|
|
79
|
+
ctx
|
|
80
|
+
else
|
|
81
|
+
case options[:format]
|
|
82
|
+
when :string then ctx.restore(string: data)
|
|
83
|
+
when :json, :jsonb then ctx.restore(data:)
|
|
84
|
+
else raise ArgumentError, "Unknown format: #{options[:format].inspect}"
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
def resolve_option(option)
|
|
91
|
+
case option
|
|
92
|
+
when Proc then instance_exec(&option)
|
|
93
|
+
when Symbol then send(option)
|
|
94
|
+
when Hash then option.dup
|
|
95
|
+
else option
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def resolve_options(option)
|
|
100
|
+
case option
|
|
101
|
+
when Proc, Symbol, Hash then resolve_option(option)
|
|
102
|
+
else Agent::EMPTY_HASH.dup
|
|
103
|
+
end
|
|
104
|
+
end
|
|
105
|
+
end
|
|
106
|
+
end
|
|
107
|
+
end
|
data/lib/llm/skill.rb
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM
|
|
4
|
+
##
|
|
5
|
+
# {LLM::Skill LLM::Skill} represents a directory-backed packaged capability.
|
|
6
|
+
# A skill directory must contain a `SKILL.md` file with YAML frontmatter.
|
|
7
|
+
# Skills can expose themselves as normal {LLM::Tool LLM::Tool} classes through
|
|
8
|
+
# {#to_tool}. This keeps skills on the same execution path as local tools.
|
|
9
|
+
class Skill
|
|
10
|
+
##
|
|
11
|
+
# Load a skill from a directory.
|
|
12
|
+
# @param [String, Pathname] path
|
|
13
|
+
# @return [LLM::Skill]
|
|
14
|
+
def self.load(path)
|
|
15
|
+
new(path).tap(&:load!)
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
##
|
|
19
|
+
# Returns the skill directory.
|
|
20
|
+
# @return [String]
|
|
21
|
+
attr_reader :path
|
|
22
|
+
|
|
23
|
+
##
|
|
24
|
+
# Returns the skill name.
|
|
25
|
+
# @return [String]
|
|
26
|
+
attr_reader :name
|
|
27
|
+
|
|
28
|
+
##
|
|
29
|
+
# Returns the skill description.
|
|
30
|
+
# @return [String]
|
|
31
|
+
attr_reader :description
|
|
32
|
+
|
|
33
|
+
##
|
|
34
|
+
# Returns the skill instructions.
|
|
35
|
+
# @return [String]
|
|
36
|
+
attr_reader :instructions
|
|
37
|
+
|
|
38
|
+
##
|
|
39
|
+
# Returns the skill frontmatter.
|
|
40
|
+
# @return [LLM::Object]
|
|
41
|
+
attr_reader :frontmatter
|
|
42
|
+
|
|
43
|
+
##
|
|
44
|
+
# Returns the skill tools.
|
|
45
|
+
# @return [Array<Class<LLM::Tool>>]
|
|
46
|
+
attr_reader :tools
|
|
47
|
+
|
|
48
|
+
def initialize(path)
|
|
49
|
+
@path = path.to_s
|
|
50
|
+
@name = ::File.basename(@path)
|
|
51
|
+
@description = "Skill: #{@name}"
|
|
52
|
+
@instructions = ""
|
|
53
|
+
@frontmatter = LLM::Object.from({})
|
|
54
|
+
@tools = []
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
##
|
|
58
|
+
# Load and parse the skill.
|
|
59
|
+
# @return [LLM::Skill]
|
|
60
|
+
def load!
|
|
61
|
+
path = ::File.join(@path, "SKILL.md")
|
|
62
|
+
parse(::File.read(path))
|
|
63
|
+
self
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
##
|
|
67
|
+
# Execute the skill by wrapping it in a small agent with the skill
|
|
68
|
+
# instructions. The provider is bound explicitly by the caller.
|
|
69
|
+
# @param [LLM::Provider] llm
|
|
70
|
+
# @param [Hash] input
|
|
71
|
+
# @return [Hash]
|
|
72
|
+
def call(llm, **)
|
|
73
|
+
instructions = self.instructions
|
|
74
|
+
tools = self.tools
|
|
75
|
+
agent = Class.new(LLM::Agent) do
|
|
76
|
+
instructions instructions
|
|
77
|
+
tools(*tools)
|
|
78
|
+
end.new(llm)
|
|
79
|
+
res = agent.talk(instructions)
|
|
80
|
+
{content: res.content}
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
##
|
|
84
|
+
# Expose the skill as a normal LLM::Tool. The provider is bound explicitly
|
|
85
|
+
# when the tool class is built.
|
|
86
|
+
# @param [LLM::Provider] llm
|
|
87
|
+
# @return [Class<LLM::Tool>]
|
|
88
|
+
def to_tool(llm)
|
|
89
|
+
skill = self
|
|
90
|
+
Class.new(LLM::Tool) do
|
|
91
|
+
name skill.name
|
|
92
|
+
description skill.description
|
|
93
|
+
|
|
94
|
+
define_method(:call) do |**input|
|
|
95
|
+
skill.call(llm, **input)
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
private
|
|
101
|
+
|
|
102
|
+
def parse(content)
|
|
103
|
+
match = content.match(/\A---\s*\n(.*?)\n---\s*\n?(.*)\z/m)
|
|
104
|
+
unless match
|
|
105
|
+
@instructions = content
|
|
106
|
+
return
|
|
107
|
+
end
|
|
108
|
+
require "yaml" unless defined?(::YAML)
|
|
109
|
+
@frontmatter = LLM::Object.from(YAML.safe_load(match[1]) || {})
|
|
110
|
+
@name = @frontmatter.name || @name
|
|
111
|
+
@description = @frontmatter.description || @description
|
|
112
|
+
@tools = [*@frontmatter.tools].map { LLM::Tool.find_by_name!(_1) }
|
|
113
|
+
@instructions = match[2]
|
|
114
|
+
end
|
|
115
|
+
end
|
|
116
|
+
end
|
data/lib/llm/version.rb
CHANGED
data/lib/llm.rb
CHANGED
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: llm.rb
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 4.
|
|
4
|
+
version: 4.21.0
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Antar Azri
|
|
@@ -371,9 +371,11 @@ files:
|
|
|
371
371
|
- lib/llm/schema/parser.rb
|
|
372
372
|
- lib/llm/schema/string.rb
|
|
373
373
|
- lib/llm/schema/version.rb
|
|
374
|
+
- lib/llm/sequel/agent.rb
|
|
374
375
|
- lib/llm/sequel/plugin.rb
|
|
375
376
|
- lib/llm/server_tool.rb
|
|
376
377
|
- lib/llm/session.rb
|
|
378
|
+
- lib/llm/skill.rb
|
|
377
379
|
- lib/llm/stream.rb
|
|
378
380
|
- lib/llm/stream/queue.rb
|
|
379
381
|
- lib/llm/tool.rb
|
|
@@ -386,6 +388,7 @@ files:
|
|
|
386
388
|
- lib/llm/usage.rb
|
|
387
389
|
- lib/llm/utils.rb
|
|
388
390
|
- lib/llm/version.rb
|
|
391
|
+
- lib/sequel/plugins/agent.rb
|
|
389
392
|
- lib/sequel/plugins/llm.rb
|
|
390
393
|
- llm.gemspec
|
|
391
394
|
homepage: https://github.com/llmrb/llm.rb
|