llm.rb 0.3.2 → 0.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +40 -7
- data/lib/llm/chat.rb +5 -3
- data/lib/llm/core_ext/ostruct.rb +1 -1
- data/lib/llm/file.rb +8 -1
- data/lib/llm/model.rb +27 -2
- data/lib/llm/provider.rb +20 -28
- data/lib/llm/providers/anthropic/format.rb +19 -6
- data/lib/llm/providers/anthropic/models.rb +62 -0
- data/lib/llm/providers/anthropic.rb +23 -8
- data/lib/llm/providers/gemini/format.rb +6 -1
- data/lib/llm/providers/gemini/images.rb +3 -3
- data/lib/llm/providers/gemini/models.rb +69 -0
- data/lib/llm/providers/gemini/response_parser.rb +1 -5
- data/lib/llm/providers/gemini.rb +20 -5
- data/lib/llm/providers/ollama/format.rb +11 -3
- data/lib/llm/providers/ollama/models.rb +66 -0
- data/lib/llm/providers/ollama.rb +23 -8
- data/lib/llm/providers/openai/audio.rb +0 -2
- data/lib/llm/providers/openai/format.rb +6 -1
- data/lib/llm/providers/openai/images.rb +1 -1
- data/lib/llm/providers/openai/models.rb +62 -0
- data/lib/llm/providers/openai/response_parser.rb +1 -5
- data/lib/llm/providers/openai/responses.rb +10 -6
- data/lib/llm/providers/openai.rb +24 -7
- data/lib/llm/response/modellist.rb +18 -0
- data/lib/llm/response.rb +1 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +2 -1
- data/spec/anthropic/completion_spec.rb +36 -0
- data/spec/anthropic/models_spec.rb +21 -0
- data/spec/gemini/images_spec.rb +4 -12
- data/spec/gemini/models_spec.rb +21 -0
- data/spec/llm/conversation_spec.rb +5 -3
- data/spec/ollama/models_spec.rb +20 -0
- data/spec/openai/completion_spec.rb +19 -0
- data/spec/openai/images_spec.rb +2 -6
- data/spec/openai/models_spec.rb +21 -0
- metadata +11 -6
- data/share/llm/models/anthropic.yml +0 -35
- data/share/llm/models/gemini.yml +0 -35
- data/share/llm/models/ollama.yml +0 -155
- data/share/llm/models/openai.yml +0 -46
@@ -0,0 +1,66 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::Ollama
|
4
|
+
##
|
5
|
+
# The {LLM::Ollama::Models LLM::Ollama::Models} class provides a model
|
6
|
+
# object for interacting with [Ollama's models API](https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models).
|
7
|
+
# The models API allows a client to query Ollama for a list of models
|
8
|
+
# that are available for use with the Ollama API.
|
9
|
+
#
|
10
|
+
# @example
|
11
|
+
# #!/usr/bin/env ruby
|
12
|
+
# require "llm"
|
13
|
+
#
|
14
|
+
# llm = LLM.ollama(nil)
|
15
|
+
# res = llm.models.all
|
16
|
+
# res.each do |model|
|
17
|
+
# print "id: ", model.id, "\n"
|
18
|
+
# end
|
19
|
+
class Models
|
20
|
+
include LLM::Utils
|
21
|
+
|
22
|
+
##
|
23
|
+
# Returns a new Models object
|
24
|
+
# @param provider [LLM::Provider]
|
25
|
+
# @return [LLM::Ollama::Models]
|
26
|
+
def initialize(provider)
|
27
|
+
@provider = provider
|
28
|
+
end
|
29
|
+
|
30
|
+
##
|
31
|
+
# List all models
|
32
|
+
# @example
|
33
|
+
# llm = LLM.ollama(nil)
|
34
|
+
# res = llm.models.all
|
35
|
+
# res.each do |model|
|
36
|
+
# print "id: ", model.id, "\n"
|
37
|
+
# end
|
38
|
+
# @see https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models Ollama docs
|
39
|
+
# @see https://ollama.com/library Ollama library
|
40
|
+
# @param [Hash] params Other parameters (see Ollama docs)
|
41
|
+
# @raise (see LLM::Provider#request)
|
42
|
+
# @return [LLM::Response::ModelList]
|
43
|
+
def all(**params)
|
44
|
+
query = URI.encode_www_form(params)
|
45
|
+
req = Net::HTTP::Get.new("/api/tags?#{query}", headers)
|
46
|
+
res = request(http, req)
|
47
|
+
LLM::Response::ModelList.new(res).tap { |modellist|
|
48
|
+
models = modellist.body["models"].map do |model|
|
49
|
+
model = model.transform_keys { snakecase(_1) }
|
50
|
+
LLM::Model.from_hash(model).tap { _1.provider = @provider }
|
51
|
+
end
|
52
|
+
modellist.models = models
|
53
|
+
}
|
54
|
+
end
|
55
|
+
|
56
|
+
private
|
57
|
+
|
58
|
+
def http
|
59
|
+
@provider.instance_variable_get(:@http)
|
60
|
+
end
|
61
|
+
|
62
|
+
[:headers, :request].each do |m|
|
63
|
+
define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
data/lib/llm/providers/ollama.rb
CHANGED
@@ -22,6 +22,7 @@ module LLM
|
|
22
22
|
require_relative "ollama/error_handler"
|
23
23
|
require_relative "ollama/response_parser"
|
24
24
|
require_relative "ollama/format"
|
25
|
+
require_relative "ollama/models"
|
25
26
|
include Format
|
26
27
|
|
27
28
|
HOST = "localhost"
|
@@ -56,16 +57,28 @@ module LLM
|
|
56
57
|
# @param params (see LLM::Provider#complete)
|
57
58
|
# @example (see LLM::Provider#complete)
|
58
59
|
# @raise (see LLM::Provider#request)
|
60
|
+
# @raise [LLM::Error::PromptError]
|
61
|
+
# When given an object a provider does not understand
|
59
62
|
# @return (see LLM::Provider#complete)
|
60
|
-
def complete(prompt, role = :user, model:
|
61
|
-
params
|
62
|
-
req
|
63
|
+
def complete(prompt, role = :user, model: default_model, **params)
|
64
|
+
params = {model:, stream: false}.merge!(params)
|
65
|
+
req = Net::HTTP::Post.new("/api/chat", headers)
|
63
66
|
messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
|
64
|
-
|
65
|
-
|
67
|
+
body = JSON.dump({messages: format(messages)}.merge!(params))
|
68
|
+
set_body_stream(req, StringIO.new(body))
|
69
|
+
|
70
|
+
res = request(@http, req)
|
66
71
|
Response::Completion.new(res).extend(response_parser)
|
67
72
|
end
|
68
73
|
|
74
|
+
##
|
75
|
+
# Provides an interface to Ollama's models API
|
76
|
+
# @see https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models Ollama docs
|
77
|
+
# @return [LLM::Ollama::Models]
|
78
|
+
def models
|
79
|
+
LLM::Ollama::Models.new(self)
|
80
|
+
end
|
81
|
+
|
69
82
|
##
|
70
83
|
# @return (see LLM::Provider#assistant_role)
|
71
84
|
def assistant_role
|
@@ -73,9 +86,11 @@ module LLM
|
|
73
86
|
end
|
74
87
|
|
75
88
|
##
|
76
|
-
#
|
77
|
-
|
78
|
-
|
89
|
+
# Returns the default model for chat completions
|
90
|
+
# @see https://ollama.com/library llama3.2
|
91
|
+
# @return [String]
|
92
|
+
def default_model
|
93
|
+
"llama3.2"
|
79
94
|
end
|
80
95
|
|
81
96
|
private
|
@@ -42,7 +42,12 @@ class LLM::OpenAI
|
|
42
42
|
when URI
|
43
43
|
[{type: :image_url, image_url: {url: content.to_s}}]
|
44
44
|
when LLM::File
|
45
|
-
|
45
|
+
file = content
|
46
|
+
if file.image?
|
47
|
+
[{type: :image_url, image_url: {url: file.to_data_uri}}]
|
48
|
+
else
|
49
|
+
[{type: :file, file: {filename: file.basename, file_data: file.to_data_uri}}]
|
50
|
+
end
|
46
51
|
when LLM::Response::File
|
47
52
|
[{type: :file, file: {file_id: content.id}}]
|
48
53
|
when String
|
@@ -24,7 +24,7 @@ class LLM::OpenAI
|
|
24
24
|
# llm = LLM.openai(ENV["KEY"])
|
25
25
|
# res = llm.images.create prompt: "A dog on a rocket to the moon",
|
26
26
|
# response_format: "b64_json"
|
27
|
-
#
|
27
|
+
# IO.copy_stream res.images[0], "rocket.png"
|
28
28
|
class Images
|
29
29
|
##
|
30
30
|
# Returns a new Images object
|
@@ -0,0 +1,62 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::OpenAI
|
4
|
+
##
|
5
|
+
# The {LLM::OpenAI::Models LLM::OpenAI::Models} class provides a model
|
6
|
+
# object for interacting with [OpenAI's models API](https://platform.openai.com/docs/api-reference/models/list).
|
7
|
+
# The models API allows a client to query OpenAI for a list of models
|
8
|
+
# that are available for use with the OpenAI API.
|
9
|
+
#
|
10
|
+
# @example
|
11
|
+
# #!/usr/bin/env ruby
|
12
|
+
# require "llm"
|
13
|
+
#
|
14
|
+
# llm = LLM.openai(ENV["KEY"])
|
15
|
+
# res = llm.models.all
|
16
|
+
# res.each do |model|
|
17
|
+
# print "id: ", model.id, "\n"
|
18
|
+
# end
|
19
|
+
class Models
|
20
|
+
##
|
21
|
+
# Returns a new Models object
|
22
|
+
# @param provider [LLM::Provider]
|
23
|
+
# @return [LLM::OpenAI::Files]
|
24
|
+
def initialize(provider)
|
25
|
+
@provider = provider
|
26
|
+
end
|
27
|
+
|
28
|
+
##
|
29
|
+
# List all models
|
30
|
+
# @example
|
31
|
+
# llm = LLM.openai(ENV["KEY"])
|
32
|
+
# res = llm.models.all
|
33
|
+
# res.each do |model|
|
34
|
+
# print "id: ", model.id, "\n"
|
35
|
+
# end
|
36
|
+
# @see https://platform.openai.com/docs/api-reference/models/list OpenAI docs
|
37
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
38
|
+
# @raise (see LLM::Provider#request)
|
39
|
+
# @return [LLM::Response::FileList]
|
40
|
+
def all(**params)
|
41
|
+
query = URI.encode_www_form(params)
|
42
|
+
req = Net::HTTP::Get.new("/v1/models?#{query}", headers)
|
43
|
+
res = request(http, req)
|
44
|
+
LLM::Response::ModelList.new(res).tap { |modellist|
|
45
|
+
models = modellist.body["data"].map do |model|
|
46
|
+
LLM::Model.from_hash(model).tap { _1.provider = @provider }
|
47
|
+
end
|
48
|
+
modellist.models = models
|
49
|
+
}
|
50
|
+
end
|
51
|
+
|
52
|
+
private
|
53
|
+
|
54
|
+
def http
|
55
|
+
@provider.instance_variable_get(:@http)
|
56
|
+
end
|
57
|
+
|
58
|
+
[:headers, :request, :set_body_stream].each do |m|
|
59
|
+
define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
@@ -69,11 +69,7 @@ class LLM::OpenAI
|
|
69
69
|
urls: body["data"].filter_map { _1["url"] },
|
70
70
|
images: body["data"].filter_map do
|
71
71
|
next unless _1["b64_json"]
|
72
|
-
|
73
|
-
mime_type: nil,
|
74
|
-
encoded: _1["b64_json"],
|
75
|
-
binary: _1["b64_json"].unpack1("m0")
|
76
|
-
)
|
72
|
+
StringIO.new(_1["b64_json"].unpack1("m0"))
|
77
73
|
end
|
78
74
|
}
|
79
75
|
end
|
@@ -49,13 +49,17 @@ class LLM::OpenAI
|
|
49
49
|
# @param model (see LLM::Provider#complete)
|
50
50
|
# @param [Hash] params Response params
|
51
51
|
# @raise (see LLM::Provider#request)
|
52
|
+
# @raise [LLM::Error::PromptError]
|
53
|
+
# When given an object a provider does not understand
|
52
54
|
# @return [LLM::Response::Output]
|
53
|
-
def create(prompt, role = :user, model:
|
54
|
-
params
|
55
|
-
req
|
55
|
+
def create(prompt, role = :user, model: @provider.default_model, **params)
|
56
|
+
params = {model:}.merge!(params)
|
57
|
+
req = Net::HTTP::Post.new("/v1/responses", headers)
|
56
58
|
messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
|
57
|
-
|
58
|
-
|
59
|
+
body = JSON.dump({input: format(messages, :response)}.merge!(params))
|
60
|
+
set_body_stream(req, StringIO.new(body))
|
61
|
+
|
62
|
+
res = request(http, req)
|
59
63
|
LLM::Response::Output.new(res).extend(response_parser)
|
60
64
|
end
|
61
65
|
|
@@ -92,7 +96,7 @@ class LLM::OpenAI
|
|
92
96
|
@provider.instance_variable_get(:@http)
|
93
97
|
end
|
94
98
|
|
95
|
-
[:response_parser, :headers, :request].each do |m|
|
99
|
+
[:response_parser, :headers, :request, :set_body_stream].each do |m|
|
96
100
|
define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
|
97
101
|
end
|
98
102
|
end
|
data/lib/llm/providers/openai.rb
CHANGED
@@ -12,6 +12,7 @@ module LLM
|
|
12
12
|
require_relative "openai/images"
|
13
13
|
require_relative "openai/audio"
|
14
14
|
require_relative "openai/files"
|
15
|
+
require_relative "openai/models"
|
15
16
|
include Format
|
16
17
|
|
17
18
|
HOST = "api.openai.com"
|
@@ -46,13 +47,17 @@ module LLM
|
|
46
47
|
# @param params (see LLM::Provider#complete)
|
47
48
|
# @example (see LLM::Provider#complete)
|
48
49
|
# @raise (see LLM::Provider#request)
|
50
|
+
# @raise [LLM::Error::PromptError]
|
51
|
+
# When given an object a provider does not understand
|
49
52
|
# @return (see LLM::Provider#complete)
|
50
|
-
def complete(prompt, role = :user, model:
|
51
|
-
params
|
52
|
-
req
|
53
|
+
def complete(prompt, role = :user, model: default_model, **params)
|
54
|
+
params = {model:}.merge!(params)
|
55
|
+
req = Net::HTTP::Post.new("/v1/chat/completions", headers)
|
53
56
|
messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
|
54
|
-
|
55
|
-
|
57
|
+
body = JSON.dump({messages: format(messages, :complete)}.merge!(params))
|
58
|
+
set_body_stream(req, StringIO.new(body))
|
59
|
+
|
60
|
+
res = request(@http, req)
|
56
61
|
Response::Completion.new(res).extend(response_parser)
|
57
62
|
end
|
58
63
|
|
@@ -88,14 +93,26 @@ module LLM
|
|
88
93
|
LLM::OpenAI::Files.new(self)
|
89
94
|
end
|
90
95
|
|
96
|
+
##
|
97
|
+
# Provides an interface to OpenAI's models API
|
98
|
+
# @see https://platform.openai.com/docs/api-reference/models/list OpenAI docs
|
99
|
+
# @return [LLM::OpenAI::Models]
|
100
|
+
def models
|
101
|
+
LLM::OpenAI::Models.new(self)
|
102
|
+
end
|
103
|
+
|
91
104
|
##
|
92
105
|
# @return (see LLM::Provider#assistant_role)
|
93
106
|
def assistant_role
|
94
107
|
"assistant"
|
95
108
|
end
|
96
109
|
|
97
|
-
|
98
|
-
|
110
|
+
##
|
111
|
+
# Returns the default model for chat completions
|
112
|
+
# @see https://platform.openai.com/docs/models/gpt-4o-mini gpt-4o-mini
|
113
|
+
# @return [String]
|
114
|
+
def default_model
|
115
|
+
"gpt-4o-mini"
|
99
116
|
end
|
100
117
|
|
101
118
|
private
|
@@ -0,0 +1,18 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LLM
|
4
|
+
##
|
5
|
+
# The {LLM::Response::ModelList LLM::Response::ModelList} class represents a
|
6
|
+
# list of model objects that are returned by a provider. It is an Enumerable
|
7
|
+
# object, and can be used to iterate over the model objects in a way that is
|
8
|
+
# similar to an array. Each element is an instance of OpenStruct.
|
9
|
+
class Response::ModelList < Response
|
10
|
+
include Enumerable
|
11
|
+
|
12
|
+
attr_accessor :models
|
13
|
+
|
14
|
+
def each(&)
|
15
|
+
@models.each(&)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
data/lib/llm/response.rb
CHANGED
data/lib/llm/version.rb
CHANGED
data/lib/llm.rb
CHANGED
@@ -1,6 +1,8 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module LLM
|
4
|
+
require "stringio"
|
5
|
+
require_relative "llm/core_ext/ostruct"
|
4
6
|
require_relative "llm/version"
|
5
7
|
require_relative "llm/utils"
|
6
8
|
require_relative "llm/error"
|
@@ -13,7 +15,6 @@ module LLM
|
|
13
15
|
require_relative "llm/provider"
|
14
16
|
require_relative "llm/chat"
|
15
17
|
require_relative "llm/buffer"
|
16
|
-
require_relative "llm/core_ext/ostruct"
|
17
18
|
|
18
19
|
module_function
|
19
20
|
|
@@ -42,6 +42,42 @@ RSpec.describe "LLM::Anthropic: completions" do
|
|
42
42
|
end
|
43
43
|
end
|
44
44
|
|
45
|
+
context "when given a URI to an image",
|
46
|
+
vcr: {cassette_name: "anthropic/completions/successful_response_uri_image"} do
|
47
|
+
subject { response.choices[0].content.downcase[0..2] }
|
48
|
+
let(:response) do
|
49
|
+
anthropic.complete([
|
50
|
+
"Is this image the flag of brazil ? ",
|
51
|
+
"Answer with yes or no. ",
|
52
|
+
"Nothing else.",
|
53
|
+
uri
|
54
|
+
], :user)
|
55
|
+
end
|
56
|
+
let(:uri) { URI("https://upload.wikimedia.org/wikipedia/en/thumb/0/05/Flag_of_Brazil.svg/250px-Flag_of_Brazil.svg.png") }
|
57
|
+
|
58
|
+
it "describes the image" do
|
59
|
+
is_expected.to eq("yes")
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
context "when given a local reference to an image",
|
64
|
+
vcr: {cassette_name: "anthropic/completions/successful_response_file_image"} do
|
65
|
+
subject { response.choices[0].content.downcase[0..2] }
|
66
|
+
let(:response) do
|
67
|
+
anthropic.complete([
|
68
|
+
"Is this image a representation of a blue book ?",
|
69
|
+
"Answer with yes or no.",
|
70
|
+
"Nothing else.",
|
71
|
+
file
|
72
|
+
], :user)
|
73
|
+
end
|
74
|
+
let(:file) { LLM::File("spec/fixtures/images/bluebook.png") }
|
75
|
+
|
76
|
+
it "describes the image" do
|
77
|
+
is_expected.to eq("yes")
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
45
81
|
context "when given an unauthorized response",
|
46
82
|
vcr: {cassette_name: "anthropic/completions/unauthorized_response"} do
|
47
83
|
subject(:response) { anthropic.complete("Hello", :user) }
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "setup"
|
4
|
+
|
5
|
+
RSpec.describe "LLM::Anthropic::Models" do
|
6
|
+
let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
|
7
|
+
let(:provider) { LLM.anthropic(token) }
|
8
|
+
|
9
|
+
context "when given a successful list operation",
|
10
|
+
vcr: {cassette_name: "anthropic/models/successful_list"} do
|
11
|
+
subject { provider.models.all }
|
12
|
+
|
13
|
+
it "is successful" do
|
14
|
+
is_expected.to be_instance_of(LLM::Response::ModelList)
|
15
|
+
end
|
16
|
+
|
17
|
+
it "returns a list of models" do
|
18
|
+
expect(subject.models).to all(be_a(LLM::Model))
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
data/spec/gemini/images_spec.rb
CHANGED
@@ -14,12 +14,8 @@ RSpec.describe "LLM::Gemini::Images" do
|
|
14
14
|
expect(response).to be_instance_of(LLM::Response::Image)
|
15
15
|
end
|
16
16
|
|
17
|
-
it "returns an
|
18
|
-
expect(response.images[0]
|
19
|
-
end
|
20
|
-
|
21
|
-
it "returns a binary string" do
|
22
|
-
expect(response.images[0].binary).to be_instance_of(String)
|
17
|
+
it "returns an IO-like object" do
|
18
|
+
expect(response.images[0]).to be_instance_of(StringIO)
|
23
19
|
end
|
24
20
|
end
|
25
21
|
|
@@ -36,12 +32,8 @@ RSpec.describe "LLM::Gemini::Images" do
|
|
36
32
|
expect(response).to be_instance_of(LLM::Response::Image)
|
37
33
|
end
|
38
34
|
|
39
|
-
it "returns
|
40
|
-
expect(response.images[0]
|
41
|
-
end
|
42
|
-
|
43
|
-
it "returns a url" do
|
44
|
-
expect(response.images[0].binary).to be_instance_of(String)
|
35
|
+
it "returns an IO-like object" do
|
36
|
+
expect(response.images[0]).to be_instance_of(StringIO)
|
45
37
|
end
|
46
38
|
end
|
47
39
|
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "setup"
|
4
|
+
|
5
|
+
RSpec.describe "LLM::Gemini::Models" do
|
6
|
+
let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
|
7
|
+
let(:provider) { LLM.gemini(token) }
|
8
|
+
|
9
|
+
context "when given a successful list operation",
|
10
|
+
vcr: {cassette_name: "gemini/models/successful_list"} do
|
11
|
+
subject { provider.models.all }
|
12
|
+
|
13
|
+
it "is successful" do
|
14
|
+
is_expected.to be_instance_of(LLM::Response::ModelList)
|
15
|
+
end
|
16
|
+
|
17
|
+
it "returns a list of models" do
|
18
|
+
expect(subject.models).to all(be_a(LLM::Model))
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
@@ -27,7 +27,7 @@ RSpec.describe "LLM::Chat: non-lazy" do
|
|
27
27
|
bot = nil
|
28
28
|
inputs.zip(outputs).each_with_index do |(input, output), index|
|
29
29
|
expect(provider).to receive(:complete)
|
30
|
-
.with(input.content, instance_of(Symbol), messages:)
|
30
|
+
.with(input.content, instance_of(Symbol), messages:, model: provider.default_model)
|
31
31
|
.and_return(OpenStruct.new(choices: [output]))
|
32
32
|
bot = index.zero? ? provider.chat!(input.content, :system) : bot.chat(input.content)
|
33
33
|
messages.concat([input, output])
|
@@ -117,7 +117,8 @@ RSpec.describe "LLM::Chat: lazy" do
|
|
117
117
|
|
118
118
|
context "when given a specific model",
|
119
119
|
vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response_o3_mini"} do
|
120
|
-
let(:
|
120
|
+
let(:model) { provider.models.all.find { _1.id == "o3-mini" } }
|
121
|
+
let(:conversation) { described_class.new(provider, model:).lazy }
|
121
122
|
|
122
123
|
it "maintains the model throughout a conversation" do
|
123
124
|
conversation.chat(prompt, :system)
|
@@ -179,7 +180,8 @@ RSpec.describe "LLM::Chat: lazy" do
|
|
179
180
|
|
180
181
|
context "when given a specific model",
|
181
182
|
vcr: {cassette_name: "openai/lazy_conversation/responses/successful_response_o3_mini"} do
|
182
|
-
let(:
|
183
|
+
let(:model) { provider.models.all.find { _1.id == "o3-mini" } }
|
184
|
+
let(:conversation) { described_class.new(provider, model:).lazy }
|
183
185
|
|
184
186
|
it "maintains the model throughout a conversation" do
|
185
187
|
conversation.respond(prompt, :developer)
|
@@ -0,0 +1,20 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "setup"
|
4
|
+
|
5
|
+
RSpec.describe "LLM::Ollama::Models" do
|
6
|
+
let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
|
7
|
+
|
8
|
+
context "when given a successful list operation",
|
9
|
+
vcr: {cassette_name: "ollama/models/successful_list"} do
|
10
|
+
subject { provider.models.all }
|
11
|
+
|
12
|
+
it "is successful" do
|
13
|
+
is_expected.to be_instance_of(LLM::Response::ModelList)
|
14
|
+
end
|
15
|
+
|
16
|
+
it "returns a list of models" do
|
17
|
+
expect(subject.models).to all(be_a(LLM::Model))
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -64,6 +64,25 @@ RSpec.describe "LLM::OpenAI: completions" do
|
|
64
64
|
end
|
65
65
|
end
|
66
66
|
|
67
|
+
context "when asked to describe an audio file",
|
68
|
+
vcr: {cassette_name: "openai/completions/describe_pdf_document"} do
|
69
|
+
let(:file) { LLM::File("spec/fixtures/documents/freebsd.sysctl.pdf") }
|
70
|
+
let(:response) do
|
71
|
+
openai.complete([
|
72
|
+
"This PDF document describes sysctl nodes on FreeBSD",
|
73
|
+
"Answer yes or no.",
|
74
|
+
"Nothing else",
|
75
|
+
file
|
76
|
+
], :user)
|
77
|
+
end
|
78
|
+
|
79
|
+
subject { response.choices[0].content.downcase[0..2] }
|
80
|
+
|
81
|
+
it "is successful" do
|
82
|
+
is_expected.to eq("yes")
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
67
86
|
context "when given a 'bad request' response",
|
68
87
|
vcr: {cassette_name: "openai/completions/bad_request"} do
|
69
88
|
subject(:response) { openai.complete(URI("/foobar.exe"), :user) }
|
data/spec/openai/images_spec.rb
CHANGED
@@ -40,12 +40,8 @@ RSpec.describe "LLM::OpenAI::Images" do
|
|
40
40
|
expect(response.images).to be_instance_of(Array)
|
41
41
|
end
|
42
42
|
|
43
|
-
it "returns an
|
44
|
-
expect(response.images[0]
|
45
|
-
end
|
46
|
-
|
47
|
-
it "returns an binary string" do
|
48
|
-
expect(response.images[0].binary).to be_instance_of(String)
|
43
|
+
it "returns an IO-like object" do
|
44
|
+
expect(response.images[0]).to be_instance_of(StringIO)
|
49
45
|
end
|
50
46
|
end
|
51
47
|
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "setup"
|
4
|
+
|
5
|
+
RSpec.describe "LLM::OpenAI::Models" do
|
6
|
+
let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
|
7
|
+
let(:provider) { LLM.openai(token) }
|
8
|
+
|
9
|
+
context "when given a successful list operation",
|
10
|
+
vcr: {cassette_name: "openai/models/successful_list"} do
|
11
|
+
subject { provider.models.all }
|
12
|
+
|
13
|
+
it "is successful" do
|
14
|
+
is_expected.to be_instance_of(LLM::Response::ModelList)
|
15
|
+
end
|
16
|
+
|
17
|
+
it "returns a list of models" do
|
18
|
+
expect(subject.models).to all(be_a(LLM::Model))
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|