llm.rb 0.3.3 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +41 -1
- data/lib/json/schema/array.rb +22 -0
- data/lib/json/schema/boolean.rb +9 -0
- data/lib/json/schema/integer.rb +21 -0
- data/lib/json/schema/leaf.rb +40 -0
- data/lib/json/schema/null.rb +9 -0
- data/lib/json/schema/number.rb +21 -0
- data/lib/json/schema/object.rb +26 -0
- data/lib/json/schema/string.rb +9 -0
- data/lib/json/schema.rb +73 -0
- data/lib/llm/chat.rb +4 -2
- data/lib/llm/message.rb +7 -0
- data/lib/llm/provider.rb +25 -9
- data/lib/llm/providers/anthropic.rb +0 -1
- data/lib/llm/providers/gemini.rb +13 -3
- data/lib/llm/providers/ollama.rb +10 -3
- data/lib/llm/providers/openai/responses.rb +6 -4
- data/lib/llm/providers/openai.rb +16 -3
- data/lib/llm/version.rb +1 -1
- data/spec/llm/conversation_spec.rb +67 -1
- metadata +10 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 3d8311b461c49095ba393be6d3456bf7c3032a07e4480f4fd3e4cd9133f2b6e7
|
4
|
+
data.tar.gz: 5f9ce812a4a27e0982ea5072bca3f0494317ee0274987e57df792cc0e0d2fcfc
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 1d860cabe75a0718e0c06d686127d6d24c65b4dc8967aaf490b892bfad10bf88268791e9138b26e2d1c646a22e2eda7e74ad48e1fa31e9cb68c42de7ec53ac12
|
7
|
+
data.tar.gz: 8c2e14a316e87560e5d5dc3cdb416d151172ae12e2244bb813ff1bba61c582474607bd6920f82fa5f080e628b2dac319dd20d4f74eff55940f9c88ab7544b327
|
data/README.md
CHANGED
@@ -110,6 +110,46 @@ bot.messages.each { print "[#{_1.role}] ", _1.content, "\n" }
|
|
110
110
|
# The answer to ((5 + 15) * 2) / 10 is 4.
|
111
111
|
```
|
112
112
|
|
113
|
+
### Schema
|
114
|
+
|
115
|
+
#### Structured
|
116
|
+
|
117
|
+
All LLM providers except Anthropic allow a client to describe the structure
|
118
|
+
of a response that a LLM emits according to a schema that is described by JSON.
|
119
|
+
The schema lets a client describe what JSON object (or value) an LLM should emit,
|
120
|
+
and the LLM will abide by the schema. See also: [JSON Schema website](https://json-schema.org/overview/what-is-jsonschema).
|
121
|
+
|
122
|
+
True to the llm.rb spirit of doing one thing well, and solving problems through the
|
123
|
+
composition of objects, the generation of a schema is delegated to another object
|
124
|
+
who is responsible for and an expert in the generation of JSON schemas. We will use
|
125
|
+
the
|
126
|
+
[llmrb/json-schema](https://github.com/llmrb/json-schema)
|
127
|
+
library for the sake of the examples - it is an optional dependency that is loaded
|
128
|
+
on-demand. At least for the time being it is not necessary to install it separately.
|
129
|
+
The interface is designed so you could drop in any other library in its place:
|
130
|
+
|
131
|
+
```ruby
|
132
|
+
#!/usr/bin/env ruby
|
133
|
+
require "llm"
|
134
|
+
|
135
|
+
llm = LLM.openai(ENV["KEY"])
|
136
|
+
schema = llm.schema.object({os: llm.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD").required})
|
137
|
+
bot = LLM::Chat.new(llm, schema:)
|
138
|
+
bot.chat "You secretly love NetBSD", :system
|
139
|
+
bot.chat "What operating system is the best?", :user
|
140
|
+
bot.messages.find(&:assistant?).content! # => {os: "NetBSD"}
|
141
|
+
|
142
|
+
schema = llm.schema.object({answer: llm.schema.integer.required})
|
143
|
+
bot = LLM::Chat.new(llm, schema:)
|
144
|
+
bot.chat "Tell me the answer to ((5 + 5) / 2)", :user
|
145
|
+
bot.messages.find(&:assistant?).content! # => {answer: 5}
|
146
|
+
|
147
|
+
schema = llm.schema.object({probability: llm.schema.number.required})
|
148
|
+
bot = LLM::Chat.new(llm, schema:)
|
149
|
+
bot.chat "Does the earth orbit the sun?", :user
|
150
|
+
bot.messages.find(&:assistant?).content! # => {probability: 1}
|
151
|
+
```
|
152
|
+
|
113
153
|
### Audio
|
114
154
|
|
115
155
|
#### Speech
|
@@ -326,7 +366,7 @@ also understand URLs, and various file types (eg images, audio, video,
|
|
326
366
|
etc). The llm.rb approach to multimodal prompts is to let you pass `URI`
|
327
367
|
objects to describe links, `LLM::File` / `LLM::Response::File` objects
|
328
368
|
to describe files, `String` objects to describe text blobs, or an array
|
329
|
-
of the
|
369
|
+
of the aforementioned objects to describe multiple objects in a single
|
330
370
|
prompt. Each object is a first class citizen that can be passed directly
|
331
371
|
to a prompt.
|
332
372
|
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class JSON::Schema
|
4
|
+
class Array < Leaf
|
5
|
+
def initialize(items, **rest)
|
6
|
+
@items = items
|
7
|
+
super(**rest)
|
8
|
+
end
|
9
|
+
|
10
|
+
def to_h
|
11
|
+
super.merge!({type: "array", items:})
|
12
|
+
end
|
13
|
+
|
14
|
+
def to_json(options = {})
|
15
|
+
to_h.to_json(options)
|
16
|
+
end
|
17
|
+
|
18
|
+
private
|
19
|
+
|
20
|
+
attr_reader :items
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class JSON::Schema
|
4
|
+
class Integer < Leaf
|
5
|
+
def min(i)
|
6
|
+
tap { @minimum = i }
|
7
|
+
end
|
8
|
+
|
9
|
+
def max(i)
|
10
|
+
tap { @maximum = i }
|
11
|
+
end
|
12
|
+
|
13
|
+
def to_h
|
14
|
+
super.merge!({
|
15
|
+
type: "integer",
|
16
|
+
minimum: @minimum,
|
17
|
+
maximum: @maximum
|
18
|
+
}).compact
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,40 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class JSON::Schema
|
4
|
+
class Leaf
|
5
|
+
def initialize
|
6
|
+
@description = nil
|
7
|
+
@default = nil
|
8
|
+
@enum = nil
|
9
|
+
@required = nil
|
10
|
+
end
|
11
|
+
|
12
|
+
def description(str)
|
13
|
+
tap { @description = str }
|
14
|
+
end
|
15
|
+
|
16
|
+
def default(value)
|
17
|
+
tap { @default = value }
|
18
|
+
end
|
19
|
+
|
20
|
+
def enum(*values)
|
21
|
+
tap { @enum = values }
|
22
|
+
end
|
23
|
+
|
24
|
+
def required
|
25
|
+
tap { @required = true }
|
26
|
+
end
|
27
|
+
|
28
|
+
def to_h
|
29
|
+
{description: @description, default: @default, enum: @enum}.compact
|
30
|
+
end
|
31
|
+
|
32
|
+
def to_json(options = {})
|
33
|
+
to_h.to_json(options)
|
34
|
+
end
|
35
|
+
|
36
|
+
def required?
|
37
|
+
@required
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class JSON::Schema
|
4
|
+
class Number < Leaf
|
5
|
+
def min(i)
|
6
|
+
tap { @minimum = i }
|
7
|
+
end
|
8
|
+
|
9
|
+
def max(i)
|
10
|
+
tap { @maximum = i }
|
11
|
+
end
|
12
|
+
|
13
|
+
def to_h
|
14
|
+
super.merge!({
|
15
|
+
type: "number",
|
16
|
+
minimum: @minimum,
|
17
|
+
maximum: @maximum
|
18
|
+
}).compact
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class JSON::Schema
|
4
|
+
class Object < Leaf
|
5
|
+
def initialize(properties, **rest)
|
6
|
+
@properties = properties
|
7
|
+
super(**rest)
|
8
|
+
end
|
9
|
+
|
10
|
+
def to_h
|
11
|
+
super.merge!({type: "object", properties:, required:})
|
12
|
+
end
|
13
|
+
|
14
|
+
def to_json(options = {})
|
15
|
+
to_h.to_json(options)
|
16
|
+
end
|
17
|
+
|
18
|
+
private
|
19
|
+
|
20
|
+
attr_reader :properties
|
21
|
+
|
22
|
+
def required
|
23
|
+
@properties.filter_map { _2.required? ? _1 : nil }
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
data/lib/json/schema.rb
ADDED
@@ -0,0 +1,73 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module JSON
|
4
|
+
end unless defined?(JSON)
|
5
|
+
|
6
|
+
class JSON::Schema
|
7
|
+
require_relative "schema/leaf"
|
8
|
+
require_relative "schema/object"
|
9
|
+
require_relative "schema/array"
|
10
|
+
require_relative "schema/string"
|
11
|
+
require_relative "schema/number"
|
12
|
+
require_relative "schema/integer"
|
13
|
+
require_relative "schema/boolean"
|
14
|
+
require_relative "schema/null"
|
15
|
+
|
16
|
+
##
|
17
|
+
# Returns an object
|
18
|
+
# @param properties [Hash] A hash of properties
|
19
|
+
# @param rest [Hash] Any other options
|
20
|
+
# @return [JSON::Schema::Object]
|
21
|
+
def object(properties, **rest)
|
22
|
+
Object.new(properties, **rest)
|
23
|
+
end
|
24
|
+
|
25
|
+
##
|
26
|
+
# Returns an array
|
27
|
+
# @param items [Array] An array of items
|
28
|
+
# @param rest [Hash] Any other options
|
29
|
+
# @return [JSON::Schema::Array]
|
30
|
+
def array(items, **rest)
|
31
|
+
Array.new(items, **rest)
|
32
|
+
end
|
33
|
+
|
34
|
+
##
|
35
|
+
# Returns a string
|
36
|
+
# @param rest [Hash] Any other options
|
37
|
+
# @return [JSON::Schema::String]
|
38
|
+
def string(...)
|
39
|
+
String.new(...)
|
40
|
+
end
|
41
|
+
|
42
|
+
##
|
43
|
+
# Returns a number
|
44
|
+
# @param rest [Hash] Any other options
|
45
|
+
# @return [JSON::Schema::Number] a number
|
46
|
+
def number(...)
|
47
|
+
Number.new(...)
|
48
|
+
end
|
49
|
+
|
50
|
+
##
|
51
|
+
# Returns an integer
|
52
|
+
# @param rest [Hash] Any other options
|
53
|
+
# @return [JSON::Schema::Integer]
|
54
|
+
def integer(...)
|
55
|
+
Integer.new(...)
|
56
|
+
end
|
57
|
+
|
58
|
+
##
|
59
|
+
# Returns a boolean
|
60
|
+
# @param rest [Hash] Any other options
|
61
|
+
# @return [JSON::Schema::Boolean]
|
62
|
+
def boolean(...)
|
63
|
+
Boolean.new(...)
|
64
|
+
end
|
65
|
+
|
66
|
+
##
|
67
|
+
# Returns null
|
68
|
+
# @param rest [Hash] Any other options
|
69
|
+
# @return [JSON::Schema::Null]
|
70
|
+
def null(...)
|
71
|
+
Null.new(...)
|
72
|
+
end
|
73
|
+
end
|
data/lib/llm/chat.rb
CHANGED
@@ -27,13 +27,15 @@ module LLM
|
|
27
27
|
##
|
28
28
|
# @param [LLM::Provider] provider
|
29
29
|
# A provider
|
30
|
+
# @param [to_json] schema
|
31
|
+
# The JSON schema to maintain throughout the conversation
|
30
32
|
# @param [String] model
|
31
33
|
# The model to maintain throughout the conversation
|
32
34
|
# @param [Hash] params
|
33
35
|
# Other parameters to maintain throughout the conversation
|
34
|
-
def initialize(provider, model: provider.default_model, **params)
|
36
|
+
def initialize(provider, model: provider.default_model, schema: nil, **params)
|
35
37
|
@provider = provider
|
36
|
-
@params = params.merge!(model:)
|
38
|
+
@params = params.merge!(model:, schema:)
|
37
39
|
@lazy = false
|
38
40
|
@messages = []
|
39
41
|
end
|
data/lib/llm/message.rb
CHANGED
data/lib/llm/provider.rb
CHANGED
@@ -64,12 +64,14 @@ class LLM::Provider
|
|
64
64
|
# The role of the prompt (e.g. :user, :system)
|
65
65
|
# @param [String] model
|
66
66
|
# The model to use for the completion
|
67
|
+
# @param [#to_json, nil] schema
|
68
|
+
# The schema that describes the expected response format
|
67
69
|
# @param [Hash] params
|
68
70
|
# Other completion parameters
|
69
71
|
# @raise [NotImplementedError]
|
70
72
|
# When the method is not implemented by a subclass
|
71
73
|
# @return [LLM::Response::Completion]
|
72
|
-
def complete(prompt, role = :user, model: default_model, **params)
|
74
|
+
def complete(prompt, role = :user, model: default_model, schema: nil, **params)
|
73
75
|
raise NotImplementedError
|
74
76
|
end
|
75
77
|
|
@@ -81,12 +83,13 @@ class LLM::Provider
|
|
81
83
|
# @param prompt (see LLM::Provider#complete)
|
82
84
|
# @param role (see LLM::Provider#complete)
|
83
85
|
# @param model (see LLM::Provider#complete)
|
86
|
+
# @param schema (see LLM::Provider#complete)
|
84
87
|
# @param [Hash] params
|
85
88
|
# Other completion parameters to maintain throughout a chat
|
86
89
|
# @raise (see LLM::Provider#complete)
|
87
90
|
# @return [LLM::Chat]
|
88
|
-
def chat(prompt, role = :user, model: default_model, **params)
|
89
|
-
LLM::Chat.new(self, **params.merge(model:)).lazy.chat(prompt, role)
|
91
|
+
def chat(prompt, role = :user, model: default_model, schema: nil, **params)
|
92
|
+
LLM::Chat.new(self, **params.merge(model:, schema:)).lazy.chat(prompt, role)
|
90
93
|
end
|
91
94
|
|
92
95
|
##
|
@@ -97,12 +100,13 @@ class LLM::Provider
|
|
97
100
|
# @param prompt (see LLM::Provider#complete)
|
98
101
|
# @param role (see LLM::Provider#complete)
|
99
102
|
# @param model (see LLM::Provider#complete)
|
103
|
+
# @param schema (see LLM::Provider#complete)
|
100
104
|
# @param [Hash] params
|
101
105
|
# Other completion parameters to maintain throughout a chat
|
102
106
|
# @raise (see LLM::Provider#complete)
|
103
107
|
# @return [LLM::Chat]
|
104
|
-
def chat!(prompt, role = :user, model: default_model, **params)
|
105
|
-
LLM::Chat.new(self, **params.merge(model:)).chat(prompt, role)
|
108
|
+
def chat!(prompt, role = :user, model: default_model, schema: nil, **params)
|
109
|
+
LLM::Chat.new(self, **params.merge(model:, schema:)).chat(prompt, role)
|
106
110
|
end
|
107
111
|
|
108
112
|
##
|
@@ -113,12 +117,13 @@ class LLM::Provider
|
|
113
117
|
# @param prompt (see LLM::Provider#complete)
|
114
118
|
# @param role (see LLM::Provider#complete)
|
115
119
|
# @param model (see LLM::Provider#complete)
|
120
|
+
# @param schema (see LLM::Provider#complete)
|
116
121
|
# @param [Hash] params
|
117
122
|
# Other completion parameters to maintain throughout a chat
|
118
123
|
# @raise (see LLM::Provider#complete)
|
119
124
|
# @return [LLM::Chat]
|
120
|
-
def respond(prompt, role = :user, model: default_model, **params)
|
121
|
-
LLM::Chat.new(self, **params.merge(model:)).lazy.respond(prompt, role)
|
125
|
+
def respond(prompt, role = :user, model: default_model, schema: nil, **params)
|
126
|
+
LLM::Chat.new(self, **params.merge(model:, schema:)).lazy.respond(prompt, role)
|
122
127
|
end
|
123
128
|
|
124
129
|
##
|
@@ -129,12 +134,13 @@ class LLM::Provider
|
|
129
134
|
# @param prompt (see LLM::Provider#complete)
|
130
135
|
# @param role (see LLM::Provider#complete)
|
131
136
|
# @param model (see LLM::Provider#complete)
|
137
|
+
# @param schema (see LLM::Provider#complete)
|
132
138
|
# @param [Hash] params
|
133
139
|
# Other completion parameters to maintain throughout a chat
|
134
140
|
# @raise (see LLM::Provider#complete)
|
135
141
|
# @return [LLM::Chat]
|
136
|
-
def respond!(prompt, role = :user, model: default_model, **params)
|
137
|
-
LLM::Chat.new(self, **params.merge(model:)).respond(prompt, role)
|
142
|
+
def respond!(prompt, role = :user, model: default_model, schema: nil, **params)
|
143
|
+
LLM::Chat.new(self, **params.merge(model:, schema:)).respond(prompt, role)
|
138
144
|
end
|
139
145
|
|
140
146
|
##
|
@@ -191,6 +197,16 @@ class LLM::Provider
|
|
191
197
|
raise NotImplementedError
|
192
198
|
end
|
193
199
|
|
200
|
+
##
|
201
|
+
# Returns an object that can generate a JSON schema
|
202
|
+
# @return [JSON::Schema]
|
203
|
+
def schema
|
204
|
+
@schema ||= begin
|
205
|
+
require_relative "../json/schema"
|
206
|
+
JSON::Schema.new
|
207
|
+
end
|
208
|
+
end
|
209
|
+
|
194
210
|
private
|
195
211
|
|
196
212
|
##
|
@@ -55,7 +55,6 @@ module LLM
|
|
55
55
|
messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
|
56
56
|
body = JSON.dump({messages: format(messages)}.merge!(params))
|
57
57
|
set_body_stream(req, StringIO.new(body))
|
58
|
-
|
59
58
|
res = request(@http, req)
|
60
59
|
Response::Completion.new(res).extend(response_parser)
|
61
60
|
end
|
data/lib/llm/providers/gemini.rb
CHANGED
@@ -67,20 +67,20 @@ module LLM
|
|
67
67
|
# @param prompt (see LLM::Provider#complete)
|
68
68
|
# @param role (see LLM::Provider#complete)
|
69
69
|
# @param model (see LLM::Provider#complete)
|
70
|
+
# @param schema (see LLM::Provider#complete)
|
70
71
|
# @param params (see LLM::Provider#complete)
|
71
72
|
# @example (see LLM::Provider#complete)
|
72
73
|
# @raise (see LLM::Provider#request)
|
73
74
|
# @raise [LLM::Error::PromptError]
|
74
75
|
# When given an object a provider does not understand
|
75
76
|
# @return (see LLM::Provider#complete)
|
76
|
-
def complete(prompt, role = :user, model: default_model, **params)
|
77
|
+
def complete(prompt, role = :user, model: default_model, schema: nil, **params)
|
77
78
|
model.respond_to?(:id) ? model.id : model
|
78
79
|
path = ["/v1beta/models/#{model}", "generateContent?key=#{@secret}"].join(":")
|
79
80
|
req = Net::HTTP::Post.new(path, headers)
|
80
81
|
messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
|
81
|
-
body = JSON.dump({contents: format(messages)})
|
82
|
+
body = JSON.dump({contents: format(messages)}.merge!(expand_schema(schema)))
|
82
83
|
set_body_stream(req, StringIO.new(body))
|
83
|
-
|
84
84
|
res = request(@http, req)
|
85
85
|
Response::Completion.new(res).extend(response_parser)
|
86
86
|
end
|
@@ -136,6 +136,16 @@ module LLM
|
|
136
136
|
}
|
137
137
|
end
|
138
138
|
|
139
|
+
def expand_schema(schema)
|
140
|
+
return {} unless schema
|
141
|
+
{
|
142
|
+
"generationConfig" => {
|
143
|
+
"response_mime_type" => "application/json",
|
144
|
+
"response_schema" => schema
|
145
|
+
}
|
146
|
+
}
|
147
|
+
end
|
148
|
+
|
139
149
|
def response_parser
|
140
150
|
LLM::Gemini::ResponseParser
|
141
151
|
end
|
data/lib/llm/providers/ollama.rb
CHANGED
@@ -60,13 +60,15 @@ module LLM
|
|
60
60
|
# @raise [LLM::Error::PromptError]
|
61
61
|
# When given an object a provider does not understand
|
62
62
|
# @return (see LLM::Provider#complete)
|
63
|
-
def complete(prompt, role = :user, model: default_model, **params)
|
64
|
-
params = {model:, stream: false}
|
63
|
+
def complete(prompt, role = :user, model: default_model, schema: nil, **params)
|
64
|
+
params = {model:, stream: false}
|
65
|
+
.merge!(expand_schema(schema))
|
66
|
+
.merge!(params)
|
67
|
+
.compact
|
65
68
|
req = Net::HTTP::Post.new("/api/chat", headers)
|
66
69
|
messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
|
67
70
|
body = JSON.dump({messages: format(messages)}.merge!(params))
|
68
71
|
set_body_stream(req, StringIO.new(body))
|
69
|
-
|
70
72
|
res = request(@http, req)
|
71
73
|
Response::Completion.new(res).extend(response_parser)
|
72
74
|
end
|
@@ -102,6 +104,11 @@ module LLM
|
|
102
104
|
}
|
103
105
|
end
|
104
106
|
|
107
|
+
def expand_schema(schema)
|
108
|
+
return {} unless schema
|
109
|
+
{format: schema}
|
110
|
+
end
|
111
|
+
|
105
112
|
def response_parser
|
106
113
|
LLM::Ollama::ResponseParser
|
107
114
|
end
|
@@ -52,13 +52,15 @@ class LLM::OpenAI
|
|
52
52
|
# @raise [LLM::Error::PromptError]
|
53
53
|
# When given an object a provider does not understand
|
54
54
|
# @return [LLM::Response::Output]
|
55
|
-
def create(prompt, role = :user, model: @provider.default_model, **params)
|
56
|
-
params = {model:}
|
55
|
+
def create(prompt, role = :user, model: @provider.default_model, schema: nil, **params)
|
56
|
+
params = {model:}
|
57
|
+
.merge!(expand_schema(schema))
|
58
|
+
.merge!(params)
|
59
|
+
.compact
|
57
60
|
req = Net::HTTP::Post.new("/v1/responses", headers)
|
58
61
|
messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
|
59
62
|
body = JSON.dump({input: format(messages, :response)}.merge!(params))
|
60
63
|
set_body_stream(req, StringIO.new(body))
|
61
|
-
|
62
64
|
res = request(http, req)
|
63
65
|
LLM::Response::Output.new(res).extend(response_parser)
|
64
66
|
end
|
@@ -96,7 +98,7 @@ class LLM::OpenAI
|
|
96
98
|
@provider.instance_variable_get(:@http)
|
97
99
|
end
|
98
100
|
|
99
|
-
[:response_parser, :headers, :request, :set_body_stream].each do |m|
|
101
|
+
[:response_parser, :headers, :request, :set_body_stream, :expand_schema].each do |m|
|
100
102
|
define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
|
101
103
|
end
|
102
104
|
end
|
data/lib/llm/providers/openai.rb
CHANGED
@@ -44,19 +44,22 @@ module LLM
|
|
44
44
|
# @param prompt (see LLM::Provider#complete)
|
45
45
|
# @param role (see LLM::Provider#complete)
|
46
46
|
# @param model (see LLM::Provider#complete)
|
47
|
+
# @param schema (see LLM::Provider#complete)
|
47
48
|
# @param params (see LLM::Provider#complete)
|
48
49
|
# @example (see LLM::Provider#complete)
|
49
50
|
# @raise (see LLM::Provider#request)
|
50
51
|
# @raise [LLM::Error::PromptError]
|
51
52
|
# When given an object a provider does not understand
|
52
53
|
# @return (see LLM::Provider#complete)
|
53
|
-
def complete(prompt, role = :user, model: default_model, **params)
|
54
|
-
params = {model:}
|
54
|
+
def complete(prompt, role = :user, model: default_model, schema: nil, **params)
|
55
|
+
params = {model:}
|
56
|
+
.merge!(expand_schema(schema))
|
57
|
+
.merge!(params)
|
58
|
+
.compact
|
55
59
|
req = Net::HTTP::Post.new("/v1/chat/completions", headers)
|
56
60
|
messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
|
57
61
|
body = JSON.dump({messages: format(messages, :complete)}.merge!(params))
|
58
62
|
set_body_stream(req, StringIO.new(body))
|
59
|
-
|
60
63
|
res = request(@http, req)
|
61
64
|
Response::Completion.new(res).extend(response_parser)
|
62
65
|
end
|
@@ -131,5 +134,15 @@ module LLM
|
|
131
134
|
def error_handler
|
132
135
|
LLM::OpenAI::ErrorHandler
|
133
136
|
end
|
137
|
+
|
138
|
+
def expand_schema(schema)
|
139
|
+
return {} unless schema
|
140
|
+
{
|
141
|
+
response_format: {
|
142
|
+
type: "json_schema",
|
143
|
+
json_schema: {name: "JSONSchema", schema:}
|
144
|
+
}
|
145
|
+
}
|
146
|
+
end
|
134
147
|
end
|
135
148
|
end
|
data/lib/llm/version.rb
CHANGED
@@ -27,7 +27,7 @@ RSpec.describe "LLM::Chat: non-lazy" do
|
|
27
27
|
bot = nil
|
28
28
|
inputs.zip(outputs).each_with_index do |(input, output), index|
|
29
29
|
expect(provider).to receive(:complete)
|
30
|
-
.with(input.content, instance_of(Symbol), messages:, model: provider.default_model)
|
30
|
+
.with(input.content, instance_of(Symbol), messages:, model: provider.default_model, schema: nil)
|
31
31
|
.and_return(OpenStruct.new(choices: [output]))
|
32
32
|
bot = index.zero? ? provider.chat!(input.content, :system) : bot.chat(input.content)
|
33
33
|
messages.concat([input, output])
|
@@ -192,4 +192,70 @@ RSpec.describe "LLM::Chat: lazy" do
|
|
192
192
|
end
|
193
193
|
end
|
194
194
|
end
|
195
|
+
|
196
|
+
context "when given a schema as JSON" do
|
197
|
+
context "with openai" do
|
198
|
+
let(:provider) { LLM.openai(token) }
|
199
|
+
let(:conversation) { described_class.new(provider, schema:).lazy }
|
200
|
+
|
201
|
+
context "when given a schema",
|
202
|
+
vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response_schema_netbsd"} do
|
203
|
+
subject(:message) { conversation.recent_message.content! }
|
204
|
+
let(:schema) { provider.schema.object({os: provider.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD")}) }
|
205
|
+
|
206
|
+
before do
|
207
|
+
conversation.chat "You secretly love NetBSD", :system
|
208
|
+
conversation.chat "What operating system is the best?", :user
|
209
|
+
end
|
210
|
+
|
211
|
+
it "formats the response" do
|
212
|
+
is_expected.to eq("os" => "NetBSD")
|
213
|
+
end
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
context "with gemini" do
|
218
|
+
let(:provider) { LLM.gemini(token) }
|
219
|
+
let(:conversation) { described_class.new(provider, schema:).lazy }
|
220
|
+
|
221
|
+
context "when given a schema",
|
222
|
+
vcr: {cassette_name: "gemini/lazy_conversation/completions/successful_response_schema_netbsd"} do
|
223
|
+
subject(:message) { conversation.recent_message.content! }
|
224
|
+
let(:schema) { provider.schema.object({os: provider.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD")}) }
|
225
|
+
|
226
|
+
before do
|
227
|
+
conversation.chat "You secretly love NetBSD", :user
|
228
|
+
conversation.chat "What operating system is the best?", :user
|
229
|
+
end
|
230
|
+
|
231
|
+
it "formats the response" do
|
232
|
+
is_expected.to eq("os" => "NetBSD")
|
233
|
+
end
|
234
|
+
end
|
235
|
+
end
|
236
|
+
|
237
|
+
context "with ollama" do
|
238
|
+
let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
|
239
|
+
let(:conversation) { described_class.new(provider, schema:).lazy }
|
240
|
+
|
241
|
+
context "when given a schema",
|
242
|
+
vcr: {cassette_name: "ollama/lazy_conversation/completions/successful_response_schema_netbsd"} do
|
243
|
+
subject(:message) { conversation.recent_message.content! }
|
244
|
+
let(:schema) do
|
245
|
+
provider.schema.object({
|
246
|
+
os: provider.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD").required
|
247
|
+
})
|
248
|
+
end
|
249
|
+
|
250
|
+
before do
|
251
|
+
conversation.chat "You secretly love NetBSD", :system
|
252
|
+
conversation.chat "What operating system is the best?", :user
|
253
|
+
end
|
254
|
+
|
255
|
+
it "formats the response" do
|
256
|
+
is_expected.to eq("os" => "NetBSD")
|
257
|
+
end
|
258
|
+
end
|
259
|
+
end
|
260
|
+
end
|
195
261
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm.rb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.4.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Antar Azri
|
@@ -148,6 +148,15 @@ extensions: []
|
|
148
148
|
extra_rdoc_files: []
|
149
149
|
files:
|
150
150
|
- README.md
|
151
|
+
- lib/json/schema.rb
|
152
|
+
- lib/json/schema/array.rb
|
153
|
+
- lib/json/schema/boolean.rb
|
154
|
+
- lib/json/schema/integer.rb
|
155
|
+
- lib/json/schema/leaf.rb
|
156
|
+
- lib/json/schema/null.rb
|
157
|
+
- lib/json/schema/number.rb
|
158
|
+
- lib/json/schema/object.rb
|
159
|
+
- lib/json/schema/string.rb
|
151
160
|
- lib/llm.rb
|
152
161
|
- lib/llm/buffer.rb
|
153
162
|
- lib/llm/chat.rb
|