lluminary 0.2.1 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: cacc0edbbc80b69f4c95d6b9888707651dffc5189432920b1c814abed095ea01
4
- data.tar.gz: 647f143f0be6053b7ca80868e2d6bd5058cfee8fae0d0e12a51ad59f8e8d5233
3
+ metadata.gz: '015093c0cffd9d6e752bfd3bd6b03b16fb3740f33f1e7b11285908a4be4f3317'
4
+ data.tar.gz: 4120b07419eee36bfe7d9b00cf2224ae35739ac21be9e31f3b45cb531e88b2f8
5
5
  SHA512:
6
- metadata.gz: 63dce0789e7b175ad324d7de8f7bbd4c90da575e314f14cbd3c4ab9c6b3cab5b942cd279a15b545ac322f267bda221efd3988c6b7e9c7f2f930f1de0c1af87ac
7
- data.tar.gz: 30a523b37b850f6603e466e05c49ceb8eeb03fb8ee81c3b237b97b175ace6082e0e97ace99b210e2807fb1d9feab2520a4b95753d5690e7fb46d02df1faa0389
6
+ metadata.gz: d7424d0cdfbace6684373aa11dbc3d73d5ae2e4b7dddcb2f28eff13d5ac07e5a69672fca7da7fed0ad02be92faceb45d80bc5d339906e584e5d2732584e6a0c9
7
+ data.tar.gz: e529bd7a10437c74240b8849880108268c43b6ff6312c047b0b4809b75c80d0e800cbe2d7dc51b7fea09bdf6acf1e4af716defdd9937bf81cce8eb1b4b3bb63b
@@ -0,0 +1,17 @@
1
+ # frozen_string_literal: true
2
+
3
+ # require_relative "../base"
4
+
5
+ module Lluminary
6
+ module Models
7
+ module Anthropic
8
+ class Claude35Sonnet < Lluminary::Models::Base
9
+ NAME = "claude-3-5-sonnet-latest"
10
+
11
+ def compatible_with?(provider_name)
12
+ provider_name == :anthropic
13
+ end
14
+ end
15
+ end
16
+ end
17
+ end
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "anthropic"
4
+ require "json"
5
+ require_relative "../provider_error"
6
+
7
+ module Lluminary
8
+ module Providers
9
+ # Provider for Anthropic's models.
10
+ # Implements the Base provider interface for Anthropic's API.
11
+ class Anthropic < Base
12
+ NAME = :anthropic
13
+ DEFAULT_MODEL = Models::Anthropic::Claude35Sonnet
14
+
15
+ attr_reader :client, :config
16
+
17
+ def initialize(**config_overrides)
18
+ super
19
+ @config = { model: DEFAULT_MODEL }.merge(config)
20
+ @client = ::Anthropic::Client.new(api_key: config[:api_key])
21
+ end
22
+
23
+ def call(prompt, _task)
24
+ message =
25
+ client.messages.create(
26
+ max_tokens: 1024, # TODO: make this configurable
27
+ messages: [{ role: "user", content: prompt }],
28
+ model: model.class::NAME
29
+ )
30
+
31
+ content = message.content.first.text
32
+
33
+ {
34
+ raw: content,
35
+ parsed:
36
+ begin
37
+ JSON.parse(content) if content
38
+ rescue JSON::ParserError
39
+ nil
40
+ end
41
+ }
42
+ end
43
+
44
+ def model
45
+ @model ||= config[:model].new
46
+ end
47
+
48
+ def models
49
+ response = @client.models.list
50
+ response.data.map { |model| model.id }
51
+ end
52
+ end
53
+ end
54
+ end
@@ -34,6 +34,9 @@ module Lluminary
34
34
  when :bedrock
35
35
  require_relative "providers/bedrock"
36
36
  Providers::Bedrock
37
+ when :anthropic
38
+ require_relative "providers/anthropic"
39
+ Providers::Anthropic
37
40
  else
38
41
  raise ArgumentError, "Unknown provider: #{provider_name}"
39
42
  end
data/lib/lluminary.rb CHANGED
@@ -3,10 +3,16 @@
3
3
  require_relative "lluminary/version"
4
4
  require_relative "lluminary/result"
5
5
  require_relative "lluminary/task"
6
- # automatically require all providers
7
- Dir[File.join(__dir__, "lluminary/providers/*.rb")].each { |file| require file }
8
- # automatically require all models
6
+ # require base model first
7
+ require_relative "lluminary/models/base"
8
+ # automatically require all models first
9
9
  Dir[File.join(__dir__, "lluminary/models/**/*.rb")].each { |file| require file }
10
+ # require base provider first
11
+ require_relative "lluminary/providers/base"
12
+ # then require all other providers
13
+ Dir[File.join(__dir__, "lluminary/providers/*.rb")].each do |file|
14
+ require file unless file.end_with?("base.rb")
15
+ end
10
16
  require_relative "lluminary/config"
11
17
 
12
18
  # Lluminary is a framework for building and running LLM-powered tasks.
@@ -0,0 +1,104 @@
1
+ # frozen_string_literal: true
2
+ require "spec_helper"
3
+ require "lluminary/providers/anthropic"
4
+
5
+ RSpec.describe Lluminary::Providers::Anthropic do
6
+ let(:config) { { api_key: "test-key" } }
7
+ let(:provider) { described_class.new(**config) }
8
+
9
+ describe "#client" do
10
+ it "returns the Anthropic client instance" do
11
+ expect(provider.client).to be_a(Anthropic::Client)
12
+ end
13
+ end
14
+
15
+ describe "#models" do
16
+ let(:mock_models_response) do
17
+ mock_model_info_1 = double("ModelInfo", id: "claude-3-5-sonnet-latest")
18
+ mock_model_info_2 = double("ModelInfo", id: "claude-3-haiku-20240307")
19
+
20
+ double(
21
+ "Page",
22
+ data: [mock_model_info_1, mock_model_info_2],
23
+ has_more: false,
24
+ first_id: "claude-3-5-sonnet-latest",
25
+ last_id: "claude-3-haiku-20240307"
26
+ )
27
+ end
28
+
29
+ before do
30
+ models_client = double("ModelsClient")
31
+ allow_any_instance_of(Anthropic::Client).to receive(:models).and_return(
32
+ models_client
33
+ )
34
+ allow(models_client).to receive(:list).and_return(mock_models_response)
35
+ end
36
+
37
+ it "returns an array of model IDs as strings" do
38
+ expect(provider.models).to eq(
39
+ %w[claude-3-5-sonnet-latest claude-3-haiku-20240307]
40
+ )
41
+ end
42
+ end
43
+
44
+ describe "#call" do
45
+ let(:prompt) { "Test prompt" }
46
+ let(:task) { "Test task" }
47
+ let(:mock_response) do
48
+ OpenStruct.new(
49
+ content: [OpenStruct.new(text: '{"summary": "Test response"}')]
50
+ )
51
+ end
52
+
53
+ before do
54
+ messages_client = double("MessagesClient")
55
+ allow_any_instance_of(Anthropic::Client).to receive(:messages).and_return(
56
+ messages_client
57
+ )
58
+ allow(messages_client).to receive(:create).and_return(mock_response)
59
+ end
60
+
61
+ it "returns a hash with raw and parsed response" do
62
+ response = provider.call(prompt, task)
63
+ expect(response).to eq(
64
+ {
65
+ raw: '{"summary": "Test response"}',
66
+ parsed: {
67
+ "summary" => "Test response"
68
+ }
69
+ }
70
+ )
71
+ end
72
+
73
+ context "when the response is not valid JSON" do
74
+ let(:mock_response) do
75
+ OpenStruct.new(content: [OpenStruct.new(text: "not valid json")])
76
+ end
77
+
78
+ it "returns raw response with nil parsed value" do
79
+ response = provider.call(prompt, task)
80
+ expect(response).to eq({ raw: "not valid json", parsed: nil })
81
+ end
82
+ end
83
+ end
84
+
85
+ describe "#model" do
86
+ it "returns the default model when not specified" do
87
+ expect(provider.model).to be_a(
88
+ Lluminary::Models::Anthropic::Claude35Sonnet
89
+ )
90
+ end
91
+
92
+ it "returns the specified model when provided in config" do
93
+ model_class = double("ModelClass")
94
+ model_instance = double("ModelInstance")
95
+
96
+ allow(model_class).to receive(:new).and_return(model_instance)
97
+
98
+ custom_provider =
99
+ described_class.new(model: model_class, api_key: "test-key")
100
+
101
+ expect(custom_provider.model).to eq(model_instance)
102
+ end
103
+ end
104
+ end
@@ -19,6 +19,9 @@ RSpec.describe Lluminary::Task do
19
19
  end
20
20
 
21
21
  let(:task_with_test) { Class.new(described_class) { use_provider :test } }
22
+ let(:task_with_anthropic) do
23
+ Class.new(described_class) { use_provider :anthropic, api_key: "test" }
24
+ end
22
25
 
23
26
  describe ".call" do
24
27
  it "returns a result with a raw response from the provider" do
@@ -44,6 +47,12 @@ RSpec.describe Lluminary::Task do
44
47
  task_class.provider = custom_provider
45
48
  expect(task_class.provider).to eq(custom_provider)
46
49
  end
50
+
51
+ it "with :anthropic provider sets the Anthropic provider" do
52
+ expect(task_with_anthropic.provider).to be_a(
53
+ Lluminary::Providers::Anthropic
54
+ )
55
+ end
47
56
  end
48
57
 
49
58
  describe ".use_provider" do
@@ -76,6 +85,15 @@ RSpec.describe Lluminary::Task do
76
85
  )
77
86
  end
78
87
 
88
+ it "with :anthropic instantiates Anthropic provider with config" do
89
+ task_class.use_provider(:anthropic, api_key: "test")
90
+ expect(task_class.provider).to be_a(Lluminary::Providers::Anthropic)
91
+ expect(task_class.provider.config).to include(
92
+ api_key: "test",
93
+ model: Lluminary::Models::Anthropic::Claude35Sonnet
94
+ )
95
+ end
96
+
79
97
  it "raises ArgumentError for unknown provider" do
80
98
  expect { task_class.use_provider(:unknown) }.to raise_error(
81
99
  ArgumentError,
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: lluminary
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.1
4
+ version: 0.2.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Doug Hughes
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-05-04 00:00:00.000000000 Z
11
+ date: 2025-05-16 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: activemodel
@@ -30,6 +30,20 @@ dependencies:
30
30
  - - "<"
31
31
  - !ruby/object:Gem::Version
32
32
  version: '9'
33
+ - !ruby/object:Gem::Dependency
34
+ name: anthropic-sdk-beta
35
+ requirement: !ruby/object:Gem::Requirement
36
+ requirements:
37
+ - - "~>"
38
+ - !ruby/object:Gem::Version
39
+ version: 0.1.0.pre.beta.6
40
+ type: :runtime
41
+ prerelease: false
42
+ version_requirements: !ruby/object:Gem::Requirement
43
+ requirements:
44
+ - - "~>"
45
+ - !ruby/object:Gem::Version
46
+ version: 0.1.0.pre.beta.6
33
47
  - !ruby/object:Gem::Dependency
34
48
  name: aws-sdk-bedrock
35
49
  requirement: !ruby/object:Gem::Requirement
@@ -157,33 +171,33 @@ dependencies:
157
171
  - !ruby/object:Gem::Version
158
172
  version: '1.50'
159
173
  - !ruby/object:Gem::Dependency
160
- name: syntax_tree
174
+ name: simplecov
161
175
  requirement: !ruby/object:Gem::Requirement
162
176
  requirements:
163
177
  - - "~>"
164
178
  - !ruby/object:Gem::Version
165
- version: '6.2'
179
+ version: 0.22.0
166
180
  type: :development
167
181
  prerelease: false
168
182
  version_requirements: !ruby/object:Gem::Requirement
169
183
  requirements:
170
184
  - - "~>"
171
185
  - !ruby/object:Gem::Version
172
- version: '6.2'
186
+ version: 0.22.0
173
187
  - !ruby/object:Gem::Dependency
174
- name: simplecov
188
+ name: syntax_tree
175
189
  requirement: !ruby/object:Gem::Requirement
176
190
  requirements:
177
191
  - - "~>"
178
192
  - !ruby/object:Gem::Version
179
- version: 0.22.0
193
+ version: '6.2'
180
194
  type: :development
181
195
  prerelease: false
182
196
  version_requirements: !ruby/object:Gem::Requirement
183
197
  requirements:
184
198
  - - "~>"
185
199
  - !ruby/object:Gem::Version
186
- version: 0.22.0
200
+ version: '6.2'
187
201
  description: 'Lluminary is a framework for building applications that leverage Large
188
202
  Language Models. It provides a structured way to define tasks, manage prompts, and
189
203
  handle LLM interactions.
@@ -197,12 +211,14 @@ extra_rdoc_files: []
197
211
  files:
198
212
  - lib/lluminary.rb
199
213
  - lib/lluminary/config.rb
214
+ - lib/lluminary/models/anthropic/claude_3_5_sonnet.rb
200
215
  - lib/lluminary/models/base.rb
201
216
  - lib/lluminary/models/bedrock/amazon_nova_pro_v1.rb
202
217
  - lib/lluminary/models/bedrock/anthropic_claude_instant_v1.rb
203
218
  - lib/lluminary/models/bedrock/base.rb
204
219
  - lib/lluminary/models/openai/gpt35_turbo.rb
205
220
  - lib/lluminary/provider_error.rb
221
+ - lib/lluminary/providers/anthropic.rb
206
222
  - lib/lluminary/providers/base.rb
207
223
  - lib/lluminary/providers/bedrock.rb
208
224
  - lib/lluminary/providers/openai.rb
@@ -211,8 +227,6 @@ files:
211
227
  - lib/lluminary/schema.rb
212
228
  - lib/lluminary/schema_model.rb
213
229
  - lib/lluminary/task.rb
214
- - lib/lluminary/tasks/describe_openai_model.rb
215
- - lib/lluminary/tasks/identify_and_describe_open_ai_models.rb
216
230
  - lib/lluminary/validation_error.rb
217
231
  - lib/lluminary/version.rb
218
232
  - spec/examples/analyze_text_spec.rb
@@ -230,6 +244,7 @@ files:
230
244
  - spec/lluminary/models/bedrock/amazon_nova_pro_v1_spec.rb
231
245
  - spec/lluminary/models/bedrock/anthropic_claude_instant_v1_spec.rb
232
246
  - spec/lluminary/models/openai/gpt35_turbo_spec.rb
247
+ - spec/lluminary/providers/anthropic_spec.rb
233
248
  - spec/lluminary/providers/bedrock_spec.rb
234
249
  - spec/lluminary/providers/openai_spec.rb
235
250
  - spec/lluminary/providers/test_spec.rb
@@ -1,61 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Lluminary
4
- module Tasks
5
- class DescribeOpenAiModel < Lluminary::Task
6
- use_provider :openai
7
-
8
- input_schema do
9
- string :model, description: "The OpenAI model to describe"
10
- end
11
-
12
- # {
13
- # "id": "gpt-4o-2024-11-20",
14
- # "family": "gpt-4o",
15
- # "variant": "standard",
16
- # "release_date": "2024-11-20",
17
- # "status": "GA",
18
- # "inputs": {"text": true, "image": true, "audio": false},
19
- # "outputs": {"text": true, "audio": false}
20
- # }
21
-
22
- output_schema do
23
- hash :model_description, description: "The description of the model" do
24
- string :id,
25
- description:
26
- "The full OpenAI API model ID being described. EG: 'gpt-4o-2024-11-20'"
27
- string :family,
28
- description:
29
- "The OpenAI model family. EG: 'gpt-4o' or 'gpt-4.1-mini'"
30
- string :variant, description: "The OpenAI model variant"
31
- string :release_date,
32
- description: "The model's release date, if known."
33
- string :status,
34
- description: "The OpenAI model status. EG: GA or preview"
35
- hash :inputs, description: "The model's inputs" do
36
- boolean :text, description: "Whether the model can process text"
37
- boolean :image, description: "Whether the model can process images"
38
- boolean :audio, description: "Whether the model can process audio"
39
- string :other_inputs,
40
- description: "Other inputs the model can process"
41
- end
42
- hash :outputs, description: "The model's outputs" do
43
- boolean :text, description: "Whether the model can output text"
44
- boolean :image, description: "Whether the model can output images"
45
- boolean :audio, description: "Whether the model can output audio"
46
- string :other_outputs,
47
- description: "Other outputs the model can return"
48
- end
49
- end
50
- end
51
-
52
- def task_prompt
53
- <<~PROMPT
54
- You are an expert in OpenAI models. You will be given a model ID and asked to describe the model using structured data.
55
-
56
- Model ID: #{input.model}
57
- PROMPT
58
- end
59
- end
60
- end
61
- end
@@ -1,51 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Lluminary
4
- module Tasks
5
- class IdentifyAndDescribeOpenAiModels < Lluminary::Task
6
- use_provider :bedrock, model: Lluminary::Models::Bedrock::AmazonNovaProV1
7
-
8
- input_schema do
9
- array :models, description: "List of OpenAI models" do
10
- string
11
- end
12
- end
13
-
14
- output_schema do
15
- array :root_models,
16
- description: "List of root models and their versions" do
17
- hash do
18
- string :name,
19
- description:
20
- "The root name of the model. For example, 'gpt-4' or 'gpt-4o'"
21
- array :versions,
22
- description:
23
- "List of versions of the root model. For example, '0125-preview' or '0613' or '2024-04-09'" do
24
- string
25
- end
26
- end
27
- end
28
- end
29
-
30
- def task_prompt
31
- <<~PROMPT
32
- You are an expert in OpenAI models. You will be given a list of OpenAI models and asked to group them together by the "root" model type and capability and list the various versions of the root model.
33
-
34
- Keep in mind that some "root" models have names with the same root name but different capabilities. For example, "gpt-4o" and "gpt-4o-audio" are distinct models, since they have different capabilities and each has their own versions.
35
-
36
- "gpt-4.5-preview" and "gpt-4.5-preview-2025-02-27" are examples of the "gpt-4.5" root model. There are two versions of the "gpt-4.5" root model: "preview" and "preview-2025-02-27".
37
-
38
- Given the following list of models, please group them together by the "root" model type and list their versions.
39
-
40
- Your response will be used to generate code that will make use of the models and their verisons.
41
-
42
- It's critical that you represent every model and version from the following list in your response. Any model or version that is missed will be excluded from subsequent code generation and that will make them very, very sad. We don't want any sad models.
43
-
44
- DO NOT include any other models or versions in your response other than those from ones listed below. Use your expertise in OpenAI models to distinguish between different "root" models and their versions.
45
-
46
- Models: #{models.join(", ")}
47
- PROMPT
48
- end
49
- end
50
- end
51
- end