llm.rb 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE.txt +21 -0
  3. data/README.md +146 -0
  4. data/lib/llm/conversation.rb +38 -0
  5. data/lib/llm/core_ext/ostruct.rb +37 -0
  6. data/lib/llm/error.rb +28 -0
  7. data/lib/llm/file.rb +66 -0
  8. data/lib/llm/http_client.rb +29 -0
  9. data/lib/llm/lazy_conversation.rb +39 -0
  10. data/lib/llm/message.rb +55 -0
  11. data/lib/llm/message_queue.rb +47 -0
  12. data/lib/llm/provider.rb +114 -0
  13. data/lib/llm/providers/anthropic/error_handler.rb +32 -0
  14. data/lib/llm/providers/anthropic/format.rb +31 -0
  15. data/lib/llm/providers/anthropic/response_parser.rb +29 -0
  16. data/lib/llm/providers/anthropic.rb +63 -0
  17. data/lib/llm/providers/gemini/error_handler.rb +43 -0
  18. data/lib/llm/providers/gemini/format.rb +31 -0
  19. data/lib/llm/providers/gemini/response_parser.rb +31 -0
  20. data/lib/llm/providers/gemini.rb +64 -0
  21. data/lib/llm/providers/ollama/error_handler.rb +32 -0
  22. data/lib/llm/providers/ollama/format.rb +28 -0
  23. data/lib/llm/providers/ollama/response_parser.rb +18 -0
  24. data/lib/llm/providers/ollama.rb +51 -0
  25. data/lib/llm/providers/openai/error_handler.rb +32 -0
  26. data/lib/llm/providers/openai/format.rb +28 -0
  27. data/lib/llm/providers/openai/response_parser.rb +35 -0
  28. data/lib/llm/providers/openai.rb +62 -0
  29. data/lib/llm/response/completion.rb +50 -0
  30. data/lib/llm/response/embedding.rb +23 -0
  31. data/lib/llm/response.rb +24 -0
  32. data/lib/llm/version.rb +5 -0
  33. data/lib/llm.rb +47 -0
  34. data/llm.gemspec +40 -0
  35. data/spec/anthropic/completion_spec.rb +76 -0
  36. data/spec/gemini/completion_spec.rb +80 -0
  37. data/spec/gemini/embedding_spec.rb +33 -0
  38. data/spec/llm/conversation_spec.rb +56 -0
  39. data/spec/llm/lazy_conversation_spec.rb +110 -0
  40. data/spec/ollama/completion_spec.rb +52 -0
  41. data/spec/ollama/embedding_spec.rb +15 -0
  42. data/spec/openai/completion_spec.rb +99 -0
  43. data/spec/openai/embedding_spec.rb +33 -0
  44. data/spec/readme_spec.rb +64 -0
  45. data/spec/setup.rb +29 -0
  46. metadata +194 -0
data/lib/llm.rb ADDED
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ require_relative "llm/version"
5
+ require_relative "llm/error"
6
+ require_relative "llm/message"
7
+ require_relative "llm/response"
8
+ require_relative "llm/file"
9
+ require_relative "llm/provider"
10
+ require_relative "llm/conversation"
11
+ require_relative "llm/lazy_conversation"
12
+ require_relative "llm/core_ext/ostruct"
13
+
14
+ module_function
15
+
16
+ ##
17
+ # @param secret (see LLM::Anthropic#initialize)
18
+ # @return (see LLM::Anthropic#initialize)
19
+ def anthropic(secret, **)
20
+ require_relative "llm/providers/anthropic" unless defined?(LLM::Anthropic)
21
+ LLM::Anthropic.new(secret, **)
22
+ end
23
+
24
+ ##
25
+ # @param secret (see LLM::Gemini#initialize)
26
+ # @return (see LLM::Gemini#initialize)
27
+ def gemini(secret, **)
28
+ require_relative "llm/providers/gemini" unless defined?(LLM::Gemini)
29
+ LLM::Gemini.new(secret, **)
30
+ end
31
+
32
+ ##
33
+ # @param host (see LLM::Ollama#initialize)
34
+ # @return (see LLM::Ollama#initialize)
35
+ def ollama(secret)
36
+ require_relative "llm/providers/ollama" unless defined?(LLM::Ollama)
37
+ LLM::Ollama.new(secret)
38
+ end
39
+
40
+ ##
41
+ # @param secret (see LLM::OpenAI#initialize)
42
+ # @return (see LLM::OpenAI#initialize)
43
+ def openai(secret, **)
44
+ require_relative "llm/providers/openai" unless defined?(LLM::OpenAI)
45
+ LLM::OpenAI.new(secret, **)
46
+ end
47
+ end
data/llm.gemspec ADDED
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "lib/llm/version"
4
+
5
+ Gem::Specification.new do |spec|
6
+ spec.name = "llm.rb"
7
+ spec.version = LLM::VERSION
8
+ spec.authors = ["Antar Azri", "0x1eef"]
9
+ spec.email = ["azantar@proton.me", "0x1eef@proton.me"]
10
+
11
+ spec.summary = "llm.rb is a lightweight Ruby library that provides a " \
12
+ "common interface and set of functionality for multple " \
13
+ "Large Language Models (LLMs). It is designed to be simple, " \
14
+ "flexible, and easy to use."
15
+ spec.description = spec.summary
16
+ spec.homepage = "https://github.com/llmrb/llm"
17
+ spec.license = "MIT"
18
+ spec.required_ruby_version = ">= 3.0.0"
19
+
20
+ spec.metadata["homepage_uri"] = spec.homepage
21
+ spec.metadata["source_code_uri"] = "https://github.com/llmrb/llm"
22
+ spec.metadata["changelog_uri"] = "https://github.com/llmrb/llm/blob/main/CHANGELOG.md"
23
+
24
+ spec.files = Dir[
25
+ "README.md", "LICENSE.txt",
26
+ "lib/*.rb", "lib/**/*.rb",
27
+ "spec/*.rb", "spec/**/*.rb",
28
+ "llm.gemspec"
29
+ ]
30
+ spec.require_paths = ["lib"]
31
+
32
+ spec.add_runtime_dependency "net-http", "~> 0.6.0"
33
+ spec.add_runtime_dependency "json"
34
+
35
+ spec.add_development_dependency "webmock", "~> 3.24.0"
36
+ spec.add_development_dependency "yard", "~> 0.9.37"
37
+ spec.add_development_dependency "kramdown", "~> 2.4"
38
+ spec.add_development_dependency "webrick", "~> 1.8"
39
+ spec.add_development_dependency "test-cmd.rb", "~> 0.12.0"
40
+ end
@@ -0,0 +1,76 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::Anthropic: completions" do
6
+ subject(:anthropic) { LLM.anthropic("") }
7
+
8
+ before(:each, :success) do
9
+ stub_request(:post, "https://api.anthropic.com/v1/messages")
10
+ .with(headers: {"Content-Type" => "application/json"})
11
+ .to_return(
12
+ status: 200,
13
+ body: fixture("anthropic/completions/ok_completion.json"),
14
+ headers: {"Content-Type" => "application/json"}
15
+ )
16
+ end
17
+
18
+ before(:each, :unauthorized) do
19
+ stub_request(:post, "https://api.anthropic.com/v1/messages")
20
+ .with(headers: {"Content-Type" => "application/json"})
21
+ .to_return(
22
+ status: 403,
23
+ body: fixture("anthropic/completions/unauthorized_completion.json"),
24
+ headers: {"Content-Type" => "application/json"}
25
+ )
26
+ end
27
+
28
+ context "when given a successful response", :success do
29
+ subject(:response) { anthropic.complete("Hello, world", :user) }
30
+
31
+ it "returns a completion" do
32
+ expect(response).to be_a(LLM::Response::Completion)
33
+ end
34
+
35
+ it "returns a model" do
36
+ expect(response.model).to eq("claude-3-5-sonnet-20240620")
37
+ end
38
+
39
+ it "includes token usage" do
40
+ expect(response).to have_attributes(
41
+ prompt_tokens: 2095,
42
+ completion_tokens: 503,
43
+ total_tokens: 2598
44
+ )
45
+ end
46
+
47
+ context "with a choice" do
48
+ subject(:choice) { response.choices[0] }
49
+
50
+ it "has choices" do
51
+ expect(choice).to have_attributes(
52
+ role: "assistant",
53
+ content: "Hi! My name is Claude."
54
+ )
55
+ end
56
+
57
+ it "includes the response" do
58
+ expect(choice.extra[:completion]).to eq(response)
59
+ end
60
+ end
61
+ end
62
+
63
+ context "when given an unauthorized response", :unauthorized do
64
+ subject(:response) { anthropic.complete("Hello", :user) }
65
+
66
+ it "raises an error" do
67
+ expect { response }.to raise_error(LLM::Error::Unauthorized)
68
+ end
69
+
70
+ it "includes the response" do
71
+ response
72
+ rescue LLM::Error::Unauthorized => ex
73
+ expect(ex.response).to be_kind_of(Net::HTTPResponse)
74
+ end
75
+ end
76
+ end
@@ -0,0 +1,80 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::Gemini: completions" do
6
+ subject(:gemini) { LLM.gemini("") }
7
+
8
+ before(:each, :success) do
9
+ stub_request(:post, "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=")
10
+ .with(headers: {"Content-Type" => "application/json"})
11
+ .to_return(
12
+ status: 200,
13
+ body: fixture("gemini/completions/ok_completion.json"),
14
+ headers: {"Content-Type" => "application/json"}
15
+ )
16
+ end
17
+
18
+ before(:each, :unauthorized) do
19
+ stub_request(:post, "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=")
20
+ .with(headers: {"Content-Type" => "application/json"})
21
+ .to_return(
22
+ status: 400,
23
+ body: fixture("gemini/completions/unauthorized_completion.json"),
24
+ headers: {"Content-Type" => "application/json"}
25
+ )
26
+ end
27
+
28
+ context "when given a successful response", :success do
29
+ subject(:response) { gemini.complete(LLM::Message.new("user", "Hello!")) }
30
+
31
+ it "returns a completion" do
32
+ expect(response).to be_a(LLM::Response::Completion)
33
+ end
34
+
35
+ it "returns a model" do
36
+ expect(response.model).to eq("gemini-1.5-flash-001")
37
+ end
38
+
39
+ it "includes token usage" do
40
+ expect(response).to have_attributes(
41
+ prompt_tokens: 2,
42
+ completion_tokens: 10,
43
+ total_tokens: 12
44
+ )
45
+ end
46
+
47
+ context "with a choice" do
48
+ subject(:choice) { response.choices[0] }
49
+
50
+ it "has choices" do
51
+ expect(response).to be_a(LLM::Response::Completion).and have_attributes(
52
+ choices: [
53
+ have_attributes(
54
+ role: "model",
55
+ content: "Hello! How can I help you today? \n"
56
+ )
57
+ ]
58
+ )
59
+ end
60
+
61
+ it "includes the response" do
62
+ expect(choice.extra[:completion]).to eq(response)
63
+ end
64
+ end
65
+ end
66
+
67
+ context "when given an unauthorized response", :unauthorized do
68
+ subject(:response) { gemini.complete(LLM::Message.new("user", "Hello!")) }
69
+
70
+ it "raises an error" do
71
+ expect { response }.to raise_error(LLM::Error::Unauthorized)
72
+ end
73
+
74
+ it "includes a response" do
75
+ response
76
+ rescue LLM::Error::Unauthorized => ex
77
+ expect(ex.response).to be_kind_of(Net::HTTPResponse)
78
+ end
79
+ end
80
+ end
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::OpenAI: embeddings" do
6
+ let(:gemini) { LLM.gemini("") }
7
+
8
+ before(:each, :success) do
9
+ stub_request(:post, "https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004:embedContent?key=")
10
+ .with(headers: {"Content-Type" => "application/json"})
11
+ .to_return(
12
+ status: 200,
13
+ body: fixture("gemini/embeddings/hello_world_embedding.json"),
14
+ headers: {"Content-Type" => "application/json"}
15
+ )
16
+ end
17
+
18
+ context "when given a successful response", :success do
19
+ subject(:response) { gemini.embed("Hello, world") }
20
+
21
+ it "returns an embedding" do
22
+ expect(response).to be_instance_of(LLM::Response::Embedding)
23
+ end
24
+
25
+ it "returns a model" do
26
+ expect(response.model).to eq("text-embedding-004")
27
+ end
28
+
29
+ it "has embeddings" do
30
+ expect(response.embeddings).to be_instance_of(Array)
31
+ end
32
+ end
33
+ end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ RSpec.describe LLM::Conversation do
4
+ shared_examples "a multi-turn conversation" do
5
+ context "when given a thread of messages" do
6
+ let(:inputs) do
7
+ [
8
+ LLM::Message.new(:system, "Provide concise, short answers about The Netherlands"),
9
+ LLM::Message.new(:user, "What is the capital of The Netherlands?"),
10
+ LLM::Message.new(:user, "How many people live in the capital?")
11
+ ]
12
+ end
13
+
14
+ let(:outputs) do
15
+ [
16
+ LLM::Message.new(:assistant, "Ok, got it"),
17
+ LLM::Message.new(:assistant, "The capital of The Netherlands is Amsterdam"),
18
+ LLM::Message.new(:assistant, "The population of Amsterdam is about 900,000")
19
+ ]
20
+ end
21
+
22
+ let(:messages) { [] }
23
+
24
+ it "maintains a conversation" do
25
+ bot = nil
26
+ inputs.zip(outputs).each_with_index do |(input, output), index|
27
+ expect(provider).to receive(:complete)
28
+ .with(input.content, instance_of(Symbol), messages:)
29
+ .and_return(OpenStruct.new(choices: [output]))
30
+ bot = index.zero? ? provider.chat!(input.content, :system) : bot.chat(input.content)
31
+ messages.concat([input, output])
32
+ end
33
+ end
34
+ end
35
+ end
36
+
37
+ context "with openai" do
38
+ subject(:provider) { LLM.openai("") }
39
+ include_examples "a multi-turn conversation"
40
+ end
41
+
42
+ context "with gemini" do
43
+ subject(:provider) { LLM.gemini("") }
44
+ include_examples "a multi-turn conversation"
45
+ end
46
+
47
+ context "with anthropic" do
48
+ subject(:provider) { LLM.anthropic("") }
49
+ include_examples "a multi-turn conversation"
50
+ end
51
+
52
+ context "with ollama" do
53
+ subject(:provider) { LLM.ollama("") }
54
+ include_examples "a multi-turn conversation"
55
+ end
56
+ end
@@ -0,0 +1,110 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe LLM::LazyConversation do
6
+ context "with gemini" do
7
+ let(:provider) { LLM.gemini("") }
8
+ let(:conversation) { described_class.new(provider) }
9
+
10
+ context "when given a thread of messages" do
11
+ subject(:message) { conversation.messages.to_a[-1] }
12
+
13
+ before do
14
+ stub_request(:post, "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=")
15
+ .with(
16
+ headers: {"Content-Type" => "application/json"},
17
+ body: request_fixture("gemini/completions/ok_completion.json")
18
+ )
19
+ .to_return(
20
+ status: 200,
21
+ body: response_fixture("gemini/completions/ok_completion.json"),
22
+ headers: {"Content-Type" => "application/json"}
23
+ )
24
+ end
25
+
26
+ before do
27
+ conversation.chat "Hello"
28
+ conversation.chat "I have a question"
29
+ conversation.chat "How are you?"
30
+ end
31
+
32
+ it "maintains a conversation" do
33
+ expect(message).to have_attributes(
34
+ role: "model",
35
+ content: "Hello! How can I help you today? \n"
36
+ )
37
+ end
38
+ end
39
+ end
40
+
41
+ context "with openai" do
42
+ let(:provider) { LLM.openai("") }
43
+ let(:conversation) { described_class.new(provider) }
44
+
45
+ context "when given a thread of messages" do
46
+ subject(:message) { conversation.messages.to_a[-1] }
47
+
48
+ before do
49
+ stub_request(:post, "https://api.openai.com/v1/chat/completions")
50
+ .with(
51
+ headers: {"Content-Type" => "application/json"},
52
+ body: request_fixture("openai/completions/ok_completion.json")
53
+ )
54
+ .to_return(
55
+ status: 200,
56
+ body: response_fixture("openai/completions/ok_completion.json"),
57
+ headers: {"Content-Type" => "application/json"}
58
+ )
59
+ end
60
+
61
+ before do
62
+ conversation.chat "Hello"
63
+ conversation.chat "I have a question"
64
+ conversation.chat "How are you?"
65
+ end
66
+
67
+ it "maintains a conversation" do
68
+ expect(message).to have_attributes(
69
+ role: "assistant",
70
+ content: "Hello! How can I assist you today?"
71
+ )
72
+ end
73
+ end
74
+ end
75
+
76
+ context "with ollama" do
77
+ let(:provider) { LLM.ollama("") }
78
+ let(:conversation) { described_class.new(provider) }
79
+
80
+ context "when given a thread of messages" do
81
+ subject(:message) { conversation.messages.to_a[-1] }
82
+
83
+ before do
84
+ stub_request(:post, "http://localhost:11434/api/chat")
85
+ .with(
86
+ headers: {"Content-Type" => "application/json"},
87
+ body: request_fixture("ollama/completions/ok_completion.json")
88
+ )
89
+ .to_return(
90
+ status: 200,
91
+ body: response_fixture("ollama/completions/ok_completion.json"),
92
+ headers: {"Content-Type" => "application/json"}
93
+ )
94
+ end
95
+
96
+ before do
97
+ conversation.chat "Hello"
98
+ conversation.chat "I have a question"
99
+ conversation.chat "How are you?"
100
+ end
101
+
102
+ it "maintains a conversation" do
103
+ expect(message).to have_attributes(
104
+ role: "assistant",
105
+ content: "Hello! How are you today?"
106
+ )
107
+ end
108
+ end
109
+ end
110
+ end
@@ -0,0 +1,52 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::Ollama: completions" do
6
+ subject(:ollama) { LLM.ollama("") }
7
+
8
+ before(:each, :success) do
9
+ stub_request(:post, "localhost:11434/api/chat")
10
+ .with(headers: {"Content-Type" => "application/json"})
11
+ .to_return(
12
+ status: 200,
13
+ body: fixture("ollama/completions/ok_completion.json"),
14
+ headers: {"Content-Type" => "application/json"}
15
+ )
16
+ end
17
+
18
+ context "when given a successful response", :success do
19
+ subject(:response) { ollama.complete("Hello!", :user) }
20
+
21
+ it "returns a completion" do
22
+ expect(response).to be_a(LLM::Response::Completion)
23
+ end
24
+
25
+ it "returns a model" do
26
+ expect(response.model).to eq("llama3.2")
27
+ end
28
+
29
+ it "includes token usage" do
30
+ expect(response).to have_attributes(
31
+ prompt_tokens: 26,
32
+ completion_tokens: 298,
33
+ total_tokens: 324
34
+ )
35
+ end
36
+
37
+ context "with a choice" do
38
+ subject(:choice) { response.choices[0] }
39
+
40
+ it "has choices" do
41
+ expect(choice).to have_attributes(
42
+ role: "assistant",
43
+ content: "Hello! How are you today?"
44
+ )
45
+ end
46
+
47
+ it "includes the response" do
48
+ expect(choice.extra[:completion]).to eq(response)
49
+ end
50
+ end
51
+ end
52
+ end
@@ -0,0 +1,15 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::Ollama: embeddings" do
6
+ let(:ollama) { LLM.ollama("") }
7
+
8
+ context "when given a successful response", :success do
9
+ subject(:response) { ollama.embed("Hello, world") }
10
+
11
+ it "raises NotImplementedError" do
12
+ expect { response }.to raise_error(NotImplementedError)
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,99 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::OpenAI: completions" do
6
+ subject(:openai) { LLM.openai("") }
7
+
8
+ before(:each, :success) do
9
+ stub_request(:post, "https://api.openai.com/v1/chat/completions")
10
+ .with(headers: {"Content-Type" => "application/json"})
11
+ .to_return(
12
+ status: 200,
13
+ body: fixture("openai/completions/ok_completion.json"),
14
+ headers: {"Content-Type" => "application/json"}
15
+ )
16
+ end
17
+
18
+ before(:each, :unauthorized) do
19
+ stub_request(:post, "https://api.openai.com/v1/chat/completions")
20
+ .with(headers: {"Content-Type" => "application/json"})
21
+ .to_return(
22
+ status: 401,
23
+ body: fixture("openai/completions/unauthorized_completion.json"),
24
+ headers: {"Content-Type" => "application/json"}
25
+ )
26
+ end
27
+
28
+ before(:each, :bad_request) do
29
+ stub_request(:post, "https://api.openai.com/v1/chat/completions")
30
+ .with(headers: {"Content-Type" => "application/json"})
31
+ .to_return(
32
+ status: 400,
33
+ body: fixture("openai/completions/badrequest_completion.json")
34
+ )
35
+ end
36
+
37
+ context "when given a successful response", :success do
38
+ subject(:response) { openai.complete("Hello!", :user) }
39
+
40
+ it "returns a completion" do
41
+ expect(response).to be_a(LLM::Response::Completion)
42
+ end
43
+
44
+ it "returns a model" do
45
+ expect(response.model).to eq("gpt-4o-mini-2024-07-18")
46
+ end
47
+
48
+ it "includes token usage" do
49
+ expect(response).to have_attributes(
50
+ prompt_tokens: 9,
51
+ completion_tokens: 9,
52
+ total_tokens: 18
53
+ )
54
+ end
55
+
56
+ context "with a choice" do
57
+ subject(:choice) { response.choices[0] }
58
+
59
+ it "has choices" do
60
+ expect(choice).to have_attributes(
61
+ role: "assistant",
62
+ content: "Hello! How can I assist you today?"
63
+ )
64
+ end
65
+
66
+ it "includes the response" do
67
+ expect(choice.extra[:completion]).to eq(response)
68
+ end
69
+ end
70
+ end
71
+
72
+ context "when given an unauthorized response", :unauthorized do
73
+ subject(:response) { openai.complete(LLM::Message.new("Hello!", :user)) }
74
+
75
+ it "raises an error" do
76
+ expect { response }.to raise_error(LLM::Error::Unauthorized)
77
+ end
78
+
79
+ it "includes the response" do
80
+ response
81
+ rescue LLM::Error::Unauthorized => ex
82
+ expect(ex.response).to be_kind_of(Net::HTTPResponse)
83
+ end
84
+ end
85
+
86
+ context "when given a 'bad request' response", :bad_request do
87
+ subject(:response) { openai.complete(URI("/foobar.exe"), :user) }
88
+
89
+ it "raises an error" do
90
+ expect { response }.to raise_error(LLM::Error::BadResponse)
91
+ end
92
+
93
+ it "includes the response" do
94
+ response
95
+ rescue LLM::Error => ex
96
+ expect(ex.response).to be_instance_of(Net::HTTPBadRequest)
97
+ end
98
+ end
99
+ end
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::OpenAI: embeddings" do
6
+ let(:openai) { LLM.openai("") }
7
+
8
+ before(:each, :success) do
9
+ stub_request(:post, "https://api.openai.com/v1/embeddings")
10
+ .with(headers: {"Content-Type" => "application/json"})
11
+ .to_return(
12
+ status: 200,
13
+ body: fixture("openai/embeddings/hello_world_embedding.json"),
14
+ headers: {"Content-Type" => "application/json"}
15
+ )
16
+ end
17
+
18
+ context "when given a successful response", :success do
19
+ subject(:response) { openai.embed("Hello, world") }
20
+
21
+ it "returns an embedding" do
22
+ expect(response).to be_instance_of(LLM::Response::Embedding)
23
+ end
24
+
25
+ it "returns a model" do
26
+ expect(response.model).to eq("text-embedding-3-small")
27
+ end
28
+
29
+ it "has embeddings" do
30
+ expect(response.embeddings).to be_instance_of(Array)
31
+ end
32
+ end
33
+ end