lluminary 0.2.2 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 187dc38fab8e09fd65ffb2db359e914f16dc79ace8ff9d058de01f547db0921c
|
4
|
+
data.tar.gz: 6a9a08f01d36966529c3c97e1877834df7a59e41347f68ffcba4c37a56597a4e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 202c4b2f46a3a0f35e978406055781fe3c85d390ca5e520955b8fad80734dbe6434a9034767f149141876a28a69e50e2636c0064d1119ede7e2314df7e49bac1
|
7
|
+
data.tar.gz: f0c17df74edfc0db4b7eb97917355f6c32da8761fa294ddd030f86bdc0540f208ae5220700b2d1a752ad25a1665367f7bfc6f02e51df3fe76b4ed79a51bef46a
|
@@ -0,0 +1,19 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Lluminary
|
4
|
+
module Models
|
5
|
+
module Google
|
6
|
+
class Gemini20Flash < Lluminary::Models::Base
|
7
|
+
NAME = "gemini-2.0-flash"
|
8
|
+
|
9
|
+
def compatible_with?(provider_name)
|
10
|
+
provider_name == :google
|
11
|
+
end
|
12
|
+
|
13
|
+
def name
|
14
|
+
NAME
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
@@ -0,0 +1,65 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "openai"
|
4
|
+
require "json"
|
5
|
+
require_relative "../provider_error"
|
6
|
+
|
7
|
+
# This is a quick and dirty implementation of a provider that works with Google's AI studio.
|
8
|
+
# It does not currently support vertex. Plans are to eventually create a separate gem similar
|
9
|
+
# `gemini-ai` that can work with either AI studio or Vertex. For now, this just uses the
|
10
|
+
# OpenAI compatible endpoint.
|
11
|
+
module Lluminary
|
12
|
+
module Providers
|
13
|
+
class Google < Base
|
14
|
+
NAME = :google
|
15
|
+
DEFAULT_MODEL = Models::Google::Gemini20Flash
|
16
|
+
|
17
|
+
attr_reader :client, :config
|
18
|
+
|
19
|
+
def initialize(**config_overrides)
|
20
|
+
super
|
21
|
+
@config = { model: DEFAULT_MODEL }.merge(config)
|
22
|
+
@client =
|
23
|
+
::OpenAI::Client.new(
|
24
|
+
access_token: config[:api_key],
|
25
|
+
api_version: "",
|
26
|
+
uri_base: "https://generativelanguage.googleapis.com/v1beta/openai"
|
27
|
+
)
|
28
|
+
end
|
29
|
+
|
30
|
+
def call(prompt, _task)
|
31
|
+
response =
|
32
|
+
client.chat(
|
33
|
+
parameters: {
|
34
|
+
model: model.class::NAME,
|
35
|
+
messages: [{ role: "user", content: prompt }],
|
36
|
+
response_format: {
|
37
|
+
type: "json_object"
|
38
|
+
}
|
39
|
+
}
|
40
|
+
)
|
41
|
+
|
42
|
+
content = response.dig("choices", 0, "message", "content")
|
43
|
+
|
44
|
+
{
|
45
|
+
raw: content,
|
46
|
+
parsed:
|
47
|
+
begin
|
48
|
+
JSON.parse(content) if content
|
49
|
+
rescue JSON::ParserError
|
50
|
+
nil
|
51
|
+
end
|
52
|
+
}
|
53
|
+
end
|
54
|
+
|
55
|
+
def model
|
56
|
+
@model ||= config[:model].new
|
57
|
+
end
|
58
|
+
|
59
|
+
def models
|
60
|
+
response = @client.models.list
|
61
|
+
response["data"].map { |model| model["id"].split("/").last }
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
data/lib/lluminary/task.rb
CHANGED
@@ -3,9 +3,6 @@ require "ostruct"
|
|
3
3
|
require "json"
|
4
4
|
require_relative "schema"
|
5
5
|
require_relative "validation_error"
|
6
|
-
require_relative "models/base"
|
7
|
-
require_relative "models/openai/gpt35_turbo"
|
8
|
-
require_relative "models/bedrock/anthropic_claude_instant_v1"
|
9
6
|
|
10
7
|
module Lluminary
|
11
8
|
# Base class for all Lluminary tasks.
|
@@ -25,18 +22,21 @@ module Lluminary
|
|
25
22
|
def use_provider(provider_name, **config)
|
26
23
|
provider_class =
|
27
24
|
case provider_name
|
25
|
+
when :anthropic
|
26
|
+
require_relative "providers/anthropic"
|
27
|
+
Providers::Anthropic
|
28
|
+
when :bedrock
|
29
|
+
require_relative "providers/bedrock"
|
30
|
+
Providers::Bedrock
|
31
|
+
when :google
|
32
|
+
require_relative "providers/google"
|
33
|
+
Providers::Google
|
28
34
|
when :openai
|
29
35
|
require_relative "providers/openai"
|
30
36
|
Providers::OpenAI
|
31
37
|
when :test
|
32
38
|
require_relative "providers/test"
|
33
39
|
Providers::Test
|
34
|
-
when :bedrock
|
35
|
-
require_relative "providers/bedrock"
|
36
|
-
Providers::Bedrock
|
37
|
-
when :anthropic
|
38
|
-
require_relative "providers/anthropic"
|
39
|
-
Providers::Anthropic
|
40
40
|
else
|
41
41
|
raise ArgumentError, "Unknown provider: #{provider_name}"
|
42
42
|
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "spec_helper"
|
4
|
+
|
5
|
+
RSpec.describe Lluminary::Models::Google::Gemini20Flash do
|
6
|
+
subject(:model) { described_class.new }
|
7
|
+
|
8
|
+
describe "#NAME" do
|
9
|
+
it "has the correct model name" do
|
10
|
+
expect(described_class::NAME).to eq("gemini-2.0-flash")
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
describe "#compatible_with?" do
|
15
|
+
it "returns true for google provider" do
|
16
|
+
expect(model.compatible_with?(:google)).to be true
|
17
|
+
end
|
18
|
+
|
19
|
+
it "returns false for other providers" do
|
20
|
+
expect(model.compatible_with?(:openai)).to be false
|
21
|
+
expect(model.compatible_with?(:bedrock)).to be false
|
22
|
+
expect(model.compatible_with?(:anthropic)).to be false
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
describe "#name" do
|
27
|
+
it "returns the model name" do
|
28
|
+
expect(model.name).to eq("gemini-2.0-flash")
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,84 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require "spec_helper"
|
3
|
+
|
4
|
+
RSpec.describe Lluminary::Providers::Google do
|
5
|
+
let(:config) { { api_key: "test-key" } }
|
6
|
+
let(:provider) { described_class.new(**config) }
|
7
|
+
|
8
|
+
describe "#client" do
|
9
|
+
it "returns the OpenAI client instance" do
|
10
|
+
expect(provider.client).to be_a(OpenAI::Client)
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
describe "#models" do
|
15
|
+
let(:mock_models_response) do
|
16
|
+
{
|
17
|
+
"object" => "list",
|
18
|
+
"data" => [
|
19
|
+
{
|
20
|
+
"id" => "models/gemini-2.0-flash",
|
21
|
+
"object" => "model",
|
22
|
+
"owned_by" => "google"
|
23
|
+
},
|
24
|
+
{
|
25
|
+
"id" => "models/gemini-1.5-pro",
|
26
|
+
"object" => "model",
|
27
|
+
"owned_by" => "google"
|
28
|
+
}
|
29
|
+
]
|
30
|
+
}
|
31
|
+
end
|
32
|
+
|
33
|
+
before do
|
34
|
+
allow_any_instance_of(OpenAI::Client).to receive(:models).and_return(
|
35
|
+
double("ModelsClient", list: mock_models_response)
|
36
|
+
)
|
37
|
+
end
|
38
|
+
|
39
|
+
it "returns an array of model IDs as strings with the 'models/' prefix removed" do
|
40
|
+
expect(provider.models).to eq(%w[gemini-2.0-flash gemini-1.5-pro])
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
describe "#call" do
|
45
|
+
let(:prompt) { "Test prompt" }
|
46
|
+
let(:task) { "Test task" }
|
47
|
+
let(:mock_response) do
|
48
|
+
{
|
49
|
+
"choices" => [
|
50
|
+
{ "message" => { "content" => '{"summary": "Test response"}' } }
|
51
|
+
]
|
52
|
+
}
|
53
|
+
end
|
54
|
+
|
55
|
+
before do
|
56
|
+
allow_any_instance_of(OpenAI::Client).to receive(:chat).and_return(
|
57
|
+
mock_response
|
58
|
+
)
|
59
|
+
end
|
60
|
+
|
61
|
+
it "returns a hash with raw and parsed response" do
|
62
|
+
response = provider.call(prompt, task)
|
63
|
+
expect(response).to eq(
|
64
|
+
{
|
65
|
+
raw: '{"summary": "Test response"}',
|
66
|
+
parsed: {
|
67
|
+
"summary" => "Test response"
|
68
|
+
}
|
69
|
+
}
|
70
|
+
)
|
71
|
+
end
|
72
|
+
|
73
|
+
context "when the response is not valid JSON" do
|
74
|
+
let(:mock_response) do
|
75
|
+
{ "choices" => [{ "message" => { "content" => "not valid json" } }] }
|
76
|
+
end
|
77
|
+
|
78
|
+
it "returns raw response with nil parsed value" do
|
79
|
+
response = provider.call(prompt, task)
|
80
|
+
expect(response).to eq({ raw: "not valid json", parsed: nil })
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: lluminary
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.2.
|
4
|
+
version: 0.2.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Doug Hughes
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2025-05-
|
11
|
+
date: 2025-05-24 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: activemodel
|
@@ -216,11 +216,13 @@ files:
|
|
216
216
|
- lib/lluminary/models/bedrock/amazon_nova_pro_v1.rb
|
217
217
|
- lib/lluminary/models/bedrock/anthropic_claude_instant_v1.rb
|
218
218
|
- lib/lluminary/models/bedrock/base.rb
|
219
|
+
- lib/lluminary/models/google/gemini_20_flash.rb
|
219
220
|
- lib/lluminary/models/openai/gpt35_turbo.rb
|
220
221
|
- lib/lluminary/provider_error.rb
|
221
222
|
- lib/lluminary/providers/anthropic.rb
|
222
223
|
- lib/lluminary/providers/base.rb
|
223
224
|
- lib/lluminary/providers/bedrock.rb
|
225
|
+
- lib/lluminary/providers/google.rb
|
224
226
|
- lib/lluminary/providers/openai.rb
|
225
227
|
- lib/lluminary/providers/test.rb
|
226
228
|
- lib/lluminary/result.rb
|
@@ -243,9 +245,11 @@ files:
|
|
243
245
|
- spec/lluminary/models/base_spec.rb
|
244
246
|
- spec/lluminary/models/bedrock/amazon_nova_pro_v1_spec.rb
|
245
247
|
- spec/lluminary/models/bedrock/anthropic_claude_instant_v1_spec.rb
|
248
|
+
- spec/lluminary/models/google/gemini_20_flash_spec.rb
|
246
249
|
- spec/lluminary/models/openai/gpt35_turbo_spec.rb
|
247
250
|
- spec/lluminary/providers/anthropic_spec.rb
|
248
251
|
- spec/lluminary/providers/bedrock_spec.rb
|
252
|
+
- spec/lluminary/providers/google_spec.rb
|
249
253
|
- spec/lluminary/providers/openai_spec.rb
|
250
254
|
- spec/lluminary/providers/test_spec.rb
|
251
255
|
- spec/lluminary/result_spec.rb
|