lluminary 0.2.1 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: cacc0edbbc80b69f4c95d6b9888707651dffc5189432920b1c814abed095ea01
4
- data.tar.gz: 647f143f0be6053b7ca80868e2d6bd5058cfee8fae0d0e12a51ad59f8e8d5233
3
+ metadata.gz: 187dc38fab8e09fd65ffb2db359e914f16dc79ace8ff9d058de01f547db0921c
4
+ data.tar.gz: 6a9a08f01d36966529c3c97e1877834df7a59e41347f68ffcba4c37a56597a4e
5
5
  SHA512:
6
- metadata.gz: 63dce0789e7b175ad324d7de8f7bbd4c90da575e314f14cbd3c4ab9c6b3cab5b942cd279a15b545ac322f267bda221efd3988c6b7e9c7f2f930f1de0c1af87ac
7
- data.tar.gz: 30a523b37b850f6603e466e05c49ceb8eeb03fb8ee81c3b237b97b175ace6082e0e97ace99b210e2807fb1d9feab2520a4b95753d5690e7fb46d02df1faa0389
6
+ metadata.gz: 202c4b2f46a3a0f35e978406055781fe3c85d390ca5e520955b8fad80734dbe6434a9034767f149141876a28a69e50e2636c0064d1119ede7e2314df7e49bac1
7
+ data.tar.gz: f0c17df74edfc0db4b7eb97917355f6c32da8761fa294ddd030f86bdc0540f208ae5220700b2d1a752ad25a1665367f7bfc6f02e51df3fe76b4ed79a51bef46a
@@ -0,0 +1,17 @@
1
+ # frozen_string_literal: true
2
+
3
+ # require_relative "../base"
4
+
5
+ module Lluminary
6
+ module Models
7
+ module Anthropic
8
+ class Claude35Sonnet < Lluminary::Models::Base
9
+ NAME = "claude-3-5-sonnet-latest"
10
+
11
+ def compatible_with?(provider_name)
12
+ provider_name == :anthropic
13
+ end
14
+ end
15
+ end
16
+ end
17
+ end
@@ -0,0 +1,19 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Lluminary
4
+ module Models
5
+ module Google
6
+ class Gemini20Flash < Lluminary::Models::Base
7
+ NAME = "gemini-2.0-flash"
8
+
9
+ def compatible_with?(provider_name)
10
+ provider_name == :google
11
+ end
12
+
13
+ def name
14
+ NAME
15
+ end
16
+ end
17
+ end
18
+ end
19
+ end
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "anthropic"
4
+ require "json"
5
+ require_relative "../provider_error"
6
+
7
+ module Lluminary
8
+ module Providers
9
+ # Provider for Anthropic's models.
10
+ # Implements the Base provider interface for Anthropic's API.
11
+ class Anthropic < Base
12
+ NAME = :anthropic
13
+ DEFAULT_MODEL = Models::Anthropic::Claude35Sonnet
14
+
15
+ attr_reader :client, :config
16
+
17
+ def initialize(**config_overrides)
18
+ super
19
+ @config = { model: DEFAULT_MODEL }.merge(config)
20
+ @client = ::Anthropic::Client.new(api_key: config[:api_key])
21
+ end
22
+
23
+ def call(prompt, _task)
24
+ message =
25
+ client.messages.create(
26
+ max_tokens: 1024, # TODO: make this configurable
27
+ messages: [{ role: "user", content: prompt }],
28
+ model: model.class::NAME
29
+ )
30
+
31
+ content = message.content.first.text
32
+
33
+ {
34
+ raw: content,
35
+ parsed:
36
+ begin
37
+ JSON.parse(content) if content
38
+ rescue JSON::ParserError
39
+ nil
40
+ end
41
+ }
42
+ end
43
+
44
+ def model
45
+ @model ||= config[:model].new
46
+ end
47
+
48
+ def models
49
+ response = @client.models.list
50
+ response.data.map { |model| model.id }
51
+ end
52
+ end
53
+ end
54
+ end
@@ -0,0 +1,65 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "openai"
4
+ require "json"
5
+ require_relative "../provider_error"
6
+
7
+ # This is a quick and dirty implementation of a provider that works with Google's AI studio.
8
+ # It does not currently support vertex. Plans are to eventually create a separate gem similar
9
+ # `gemini-ai` that can work with either AI studio or Vertex. For now, this just uses the
10
+ # OpenAI compatible endpoint.
11
+ module Lluminary
12
+ module Providers
13
+ class Google < Base
14
+ NAME = :google
15
+ DEFAULT_MODEL = Models::Google::Gemini20Flash
16
+
17
+ attr_reader :client, :config
18
+
19
+ def initialize(**config_overrides)
20
+ super
21
+ @config = { model: DEFAULT_MODEL }.merge(config)
22
+ @client =
23
+ ::OpenAI::Client.new(
24
+ access_token: config[:api_key],
25
+ api_version: "",
26
+ uri_base: "https://generativelanguage.googleapis.com/v1beta/openai"
27
+ )
28
+ end
29
+
30
+ def call(prompt, _task)
31
+ response =
32
+ client.chat(
33
+ parameters: {
34
+ model: model.class::NAME,
35
+ messages: [{ role: "user", content: prompt }],
36
+ response_format: {
37
+ type: "json_object"
38
+ }
39
+ }
40
+ )
41
+
42
+ content = response.dig("choices", 0, "message", "content")
43
+
44
+ {
45
+ raw: content,
46
+ parsed:
47
+ begin
48
+ JSON.parse(content) if content
49
+ rescue JSON::ParserError
50
+ nil
51
+ end
52
+ }
53
+ end
54
+
55
+ def model
56
+ @model ||= config[:model].new
57
+ end
58
+
59
+ def models
60
+ response = @client.models.list
61
+ response["data"].map { |model| model["id"].split("/").last }
62
+ end
63
+ end
64
+ end
65
+ end
@@ -3,9 +3,6 @@ require "ostruct"
3
3
  require "json"
4
4
  require_relative "schema"
5
5
  require_relative "validation_error"
6
- require_relative "models/base"
7
- require_relative "models/openai/gpt35_turbo"
8
- require_relative "models/bedrock/anthropic_claude_instant_v1"
9
6
 
10
7
  module Lluminary
11
8
  # Base class for all Lluminary tasks.
@@ -25,15 +22,21 @@ module Lluminary
25
22
  def use_provider(provider_name, **config)
26
23
  provider_class =
27
24
  case provider_name
25
+ when :anthropic
26
+ require_relative "providers/anthropic"
27
+ Providers::Anthropic
28
+ when :bedrock
29
+ require_relative "providers/bedrock"
30
+ Providers::Bedrock
31
+ when :google
32
+ require_relative "providers/google"
33
+ Providers::Google
28
34
  when :openai
29
35
  require_relative "providers/openai"
30
36
  Providers::OpenAI
31
37
  when :test
32
38
  require_relative "providers/test"
33
39
  Providers::Test
34
- when :bedrock
35
- require_relative "providers/bedrock"
36
- Providers::Bedrock
37
40
  else
38
41
  raise ArgumentError, "Unknown provider: #{provider_name}"
39
42
  end
data/lib/lluminary.rb CHANGED
@@ -3,10 +3,16 @@
3
3
  require_relative "lluminary/version"
4
4
  require_relative "lluminary/result"
5
5
  require_relative "lluminary/task"
6
- # automatically require all providers
7
- Dir[File.join(__dir__, "lluminary/providers/*.rb")].each { |file| require file }
8
- # automatically require all models
6
+ # require base model first
7
+ require_relative "lluminary/models/base"
8
+ # automatically require all models first
9
9
  Dir[File.join(__dir__, "lluminary/models/**/*.rb")].each { |file| require file }
10
+ # require base provider first
11
+ require_relative "lluminary/providers/base"
12
+ # then require all other providers
13
+ Dir[File.join(__dir__, "lluminary/providers/*.rb")].each do |file|
14
+ require file unless file.end_with?("base.rb")
15
+ end
10
16
  require_relative "lluminary/config"
11
17
 
12
18
  # Lluminary is a framework for building and running LLM-powered tasks.
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "spec_helper"
4
+
5
+ RSpec.describe Lluminary::Models::Google::Gemini20Flash do
6
+ subject(:model) { described_class.new }
7
+
8
+ describe "#NAME" do
9
+ it "has the correct model name" do
10
+ expect(described_class::NAME).to eq("gemini-2.0-flash")
11
+ end
12
+ end
13
+
14
+ describe "#compatible_with?" do
15
+ it "returns true for google provider" do
16
+ expect(model.compatible_with?(:google)).to be true
17
+ end
18
+
19
+ it "returns false for other providers" do
20
+ expect(model.compatible_with?(:openai)).to be false
21
+ expect(model.compatible_with?(:bedrock)).to be false
22
+ expect(model.compatible_with?(:anthropic)).to be false
23
+ end
24
+ end
25
+
26
+ describe "#name" do
27
+ it "returns the model name" do
28
+ expect(model.name).to eq("gemini-2.0-flash")
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,104 @@
1
+ # frozen_string_literal: true
2
+ require "spec_helper"
3
+ require "lluminary/providers/anthropic"
4
+
5
+ RSpec.describe Lluminary::Providers::Anthropic do
6
+ let(:config) { { api_key: "test-key" } }
7
+ let(:provider) { described_class.new(**config) }
8
+
9
+ describe "#client" do
10
+ it "returns the Anthropic client instance" do
11
+ expect(provider.client).to be_a(Anthropic::Client)
12
+ end
13
+ end
14
+
15
+ describe "#models" do
16
+ let(:mock_models_response) do
17
+ mock_model_info_1 = double("ModelInfo", id: "claude-3-5-sonnet-latest")
18
+ mock_model_info_2 = double("ModelInfo", id: "claude-3-haiku-20240307")
19
+
20
+ double(
21
+ "Page",
22
+ data: [mock_model_info_1, mock_model_info_2],
23
+ has_more: false,
24
+ first_id: "claude-3-5-sonnet-latest",
25
+ last_id: "claude-3-haiku-20240307"
26
+ )
27
+ end
28
+
29
+ before do
30
+ models_client = double("ModelsClient")
31
+ allow_any_instance_of(Anthropic::Client).to receive(:models).and_return(
32
+ models_client
33
+ )
34
+ allow(models_client).to receive(:list).and_return(mock_models_response)
35
+ end
36
+
37
+ it "returns an array of model IDs as strings" do
38
+ expect(provider.models).to eq(
39
+ %w[claude-3-5-sonnet-latest claude-3-haiku-20240307]
40
+ )
41
+ end
42
+ end
43
+
44
+ describe "#call" do
45
+ let(:prompt) { "Test prompt" }
46
+ let(:task) { "Test task" }
47
+ let(:mock_response) do
48
+ OpenStruct.new(
49
+ content: [OpenStruct.new(text: '{"summary": "Test response"}')]
50
+ )
51
+ end
52
+
53
+ before do
54
+ messages_client = double("MessagesClient")
55
+ allow_any_instance_of(Anthropic::Client).to receive(:messages).and_return(
56
+ messages_client
57
+ )
58
+ allow(messages_client).to receive(:create).and_return(mock_response)
59
+ end
60
+
61
+ it "returns a hash with raw and parsed response" do
62
+ response = provider.call(prompt, task)
63
+ expect(response).to eq(
64
+ {
65
+ raw: '{"summary": "Test response"}',
66
+ parsed: {
67
+ "summary" => "Test response"
68
+ }
69
+ }
70
+ )
71
+ end
72
+
73
+ context "when the response is not valid JSON" do
74
+ let(:mock_response) do
75
+ OpenStruct.new(content: [OpenStruct.new(text: "not valid json")])
76
+ end
77
+
78
+ it "returns raw response with nil parsed value" do
79
+ response = provider.call(prompt, task)
80
+ expect(response).to eq({ raw: "not valid json", parsed: nil })
81
+ end
82
+ end
83
+ end
84
+
85
+ describe "#model" do
86
+ it "returns the default model when not specified" do
87
+ expect(provider.model).to be_a(
88
+ Lluminary::Models::Anthropic::Claude35Sonnet
89
+ )
90
+ end
91
+
92
+ it "returns the specified model when provided in config" do
93
+ model_class = double("ModelClass")
94
+ model_instance = double("ModelInstance")
95
+
96
+ allow(model_class).to receive(:new).and_return(model_instance)
97
+
98
+ custom_provider =
99
+ described_class.new(model: model_class, api_key: "test-key")
100
+
101
+ expect(custom_provider.model).to eq(model_instance)
102
+ end
103
+ end
104
+ end
@@ -0,0 +1,84 @@
1
+ # frozen_string_literal: true
2
+ require "spec_helper"
3
+
4
+ RSpec.describe Lluminary::Providers::Google do
5
+ let(:config) { { api_key: "test-key" } }
6
+ let(:provider) { described_class.new(**config) }
7
+
8
+ describe "#client" do
9
+ it "returns the OpenAI client instance" do
10
+ expect(provider.client).to be_a(OpenAI::Client)
11
+ end
12
+ end
13
+
14
+ describe "#models" do
15
+ let(:mock_models_response) do
16
+ {
17
+ "object" => "list",
18
+ "data" => [
19
+ {
20
+ "id" => "models/gemini-2.0-flash",
21
+ "object" => "model",
22
+ "owned_by" => "google"
23
+ },
24
+ {
25
+ "id" => "models/gemini-1.5-pro",
26
+ "object" => "model",
27
+ "owned_by" => "google"
28
+ }
29
+ ]
30
+ }
31
+ end
32
+
33
+ before do
34
+ allow_any_instance_of(OpenAI::Client).to receive(:models).and_return(
35
+ double("ModelsClient", list: mock_models_response)
36
+ )
37
+ end
38
+
39
+ it "returns an array of model IDs as strings with the 'models/' prefix removed" do
40
+ expect(provider.models).to eq(%w[gemini-2.0-flash gemini-1.5-pro])
41
+ end
42
+ end
43
+
44
+ describe "#call" do
45
+ let(:prompt) { "Test prompt" }
46
+ let(:task) { "Test task" }
47
+ let(:mock_response) do
48
+ {
49
+ "choices" => [
50
+ { "message" => { "content" => '{"summary": "Test response"}' } }
51
+ ]
52
+ }
53
+ end
54
+
55
+ before do
56
+ allow_any_instance_of(OpenAI::Client).to receive(:chat).and_return(
57
+ mock_response
58
+ )
59
+ end
60
+
61
+ it "returns a hash with raw and parsed response" do
62
+ response = provider.call(prompt, task)
63
+ expect(response).to eq(
64
+ {
65
+ raw: '{"summary": "Test response"}',
66
+ parsed: {
67
+ "summary" => "Test response"
68
+ }
69
+ }
70
+ )
71
+ end
72
+
73
+ context "when the response is not valid JSON" do
74
+ let(:mock_response) do
75
+ { "choices" => [{ "message" => { "content" => "not valid json" } }] }
76
+ end
77
+
78
+ it "returns raw response with nil parsed value" do
79
+ response = provider.call(prompt, task)
80
+ expect(response).to eq({ raw: "not valid json", parsed: nil })
81
+ end
82
+ end
83
+ end
84
+ end
@@ -19,6 +19,9 @@ RSpec.describe Lluminary::Task do
19
19
  end
20
20
 
21
21
  let(:task_with_test) { Class.new(described_class) { use_provider :test } }
22
+ let(:task_with_anthropic) do
23
+ Class.new(described_class) { use_provider :anthropic, api_key: "test" }
24
+ end
22
25
 
23
26
  describe ".call" do
24
27
  it "returns a result with a raw response from the provider" do
@@ -44,6 +47,12 @@ RSpec.describe Lluminary::Task do
44
47
  task_class.provider = custom_provider
45
48
  expect(task_class.provider).to eq(custom_provider)
46
49
  end
50
+
51
+ it "with :anthropic provider sets the Anthropic provider" do
52
+ expect(task_with_anthropic.provider).to be_a(
53
+ Lluminary::Providers::Anthropic
54
+ )
55
+ end
47
56
  end
48
57
 
49
58
  describe ".use_provider" do
@@ -76,6 +85,15 @@ RSpec.describe Lluminary::Task do
76
85
  )
77
86
  end
78
87
 
88
+ it "with :anthropic instantiates Anthropic provider with config" do
89
+ task_class.use_provider(:anthropic, api_key: "test")
90
+ expect(task_class.provider).to be_a(Lluminary::Providers::Anthropic)
91
+ expect(task_class.provider.config).to include(
92
+ api_key: "test",
93
+ model: Lluminary::Models::Anthropic::Claude35Sonnet
94
+ )
95
+ end
96
+
79
97
  it "raises ArgumentError for unknown provider" do
80
98
  expect { task_class.use_provider(:unknown) }.to raise_error(
81
99
  ArgumentError,
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: lluminary
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.1
4
+ version: 0.2.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Doug Hughes
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-05-04 00:00:00.000000000 Z
11
+ date: 2025-05-24 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: activemodel
@@ -30,6 +30,20 @@ dependencies:
30
30
  - - "<"
31
31
  - !ruby/object:Gem::Version
32
32
  version: '9'
33
+ - !ruby/object:Gem::Dependency
34
+ name: anthropic-sdk-beta
35
+ requirement: !ruby/object:Gem::Requirement
36
+ requirements:
37
+ - - "~>"
38
+ - !ruby/object:Gem::Version
39
+ version: 0.1.0.pre.beta.6
40
+ type: :runtime
41
+ prerelease: false
42
+ version_requirements: !ruby/object:Gem::Requirement
43
+ requirements:
44
+ - - "~>"
45
+ - !ruby/object:Gem::Version
46
+ version: 0.1.0.pre.beta.6
33
47
  - !ruby/object:Gem::Dependency
34
48
  name: aws-sdk-bedrock
35
49
  requirement: !ruby/object:Gem::Requirement
@@ -157,33 +171,33 @@ dependencies:
157
171
  - !ruby/object:Gem::Version
158
172
  version: '1.50'
159
173
  - !ruby/object:Gem::Dependency
160
- name: syntax_tree
174
+ name: simplecov
161
175
  requirement: !ruby/object:Gem::Requirement
162
176
  requirements:
163
177
  - - "~>"
164
178
  - !ruby/object:Gem::Version
165
- version: '6.2'
179
+ version: 0.22.0
166
180
  type: :development
167
181
  prerelease: false
168
182
  version_requirements: !ruby/object:Gem::Requirement
169
183
  requirements:
170
184
  - - "~>"
171
185
  - !ruby/object:Gem::Version
172
- version: '6.2'
186
+ version: 0.22.0
173
187
  - !ruby/object:Gem::Dependency
174
- name: simplecov
188
+ name: syntax_tree
175
189
  requirement: !ruby/object:Gem::Requirement
176
190
  requirements:
177
191
  - - "~>"
178
192
  - !ruby/object:Gem::Version
179
- version: 0.22.0
193
+ version: '6.2'
180
194
  type: :development
181
195
  prerelease: false
182
196
  version_requirements: !ruby/object:Gem::Requirement
183
197
  requirements:
184
198
  - - "~>"
185
199
  - !ruby/object:Gem::Version
186
- version: 0.22.0
200
+ version: '6.2'
187
201
  description: 'Lluminary is a framework for building applications that leverage Large
188
202
  Language Models. It provides a structured way to define tasks, manage prompts, and
189
203
  handle LLM interactions.
@@ -197,22 +211,24 @@ extra_rdoc_files: []
197
211
  files:
198
212
  - lib/lluminary.rb
199
213
  - lib/lluminary/config.rb
214
+ - lib/lluminary/models/anthropic/claude_3_5_sonnet.rb
200
215
  - lib/lluminary/models/base.rb
201
216
  - lib/lluminary/models/bedrock/amazon_nova_pro_v1.rb
202
217
  - lib/lluminary/models/bedrock/anthropic_claude_instant_v1.rb
203
218
  - lib/lluminary/models/bedrock/base.rb
219
+ - lib/lluminary/models/google/gemini_20_flash.rb
204
220
  - lib/lluminary/models/openai/gpt35_turbo.rb
205
221
  - lib/lluminary/provider_error.rb
222
+ - lib/lluminary/providers/anthropic.rb
206
223
  - lib/lluminary/providers/base.rb
207
224
  - lib/lluminary/providers/bedrock.rb
225
+ - lib/lluminary/providers/google.rb
208
226
  - lib/lluminary/providers/openai.rb
209
227
  - lib/lluminary/providers/test.rb
210
228
  - lib/lluminary/result.rb
211
229
  - lib/lluminary/schema.rb
212
230
  - lib/lluminary/schema_model.rb
213
231
  - lib/lluminary/task.rb
214
- - lib/lluminary/tasks/describe_openai_model.rb
215
- - lib/lluminary/tasks/identify_and_describe_open_ai_models.rb
216
232
  - lib/lluminary/validation_error.rb
217
233
  - lib/lluminary/version.rb
218
234
  - spec/examples/analyze_text_spec.rb
@@ -229,8 +245,11 @@ files:
229
245
  - spec/lluminary/models/base_spec.rb
230
246
  - spec/lluminary/models/bedrock/amazon_nova_pro_v1_spec.rb
231
247
  - spec/lluminary/models/bedrock/anthropic_claude_instant_v1_spec.rb
248
+ - spec/lluminary/models/google/gemini_20_flash_spec.rb
232
249
  - spec/lluminary/models/openai/gpt35_turbo_spec.rb
250
+ - spec/lluminary/providers/anthropic_spec.rb
233
251
  - spec/lluminary/providers/bedrock_spec.rb
252
+ - spec/lluminary/providers/google_spec.rb
234
253
  - spec/lluminary/providers/openai_spec.rb
235
254
  - spec/lluminary/providers/test_spec.rb
236
255
  - spec/lluminary/result_spec.rb
@@ -1,61 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Lluminary
4
- module Tasks
5
- class DescribeOpenAiModel < Lluminary::Task
6
- use_provider :openai
7
-
8
- input_schema do
9
- string :model, description: "The OpenAI model to describe"
10
- end
11
-
12
- # {
13
- # "id": "gpt-4o-2024-11-20",
14
- # "family": "gpt-4o",
15
- # "variant": "standard",
16
- # "release_date": "2024-11-20",
17
- # "status": "GA",
18
- # "inputs": {"text": true, "image": true, "audio": false},
19
- # "outputs": {"text": true, "audio": false}
20
- # }
21
-
22
- output_schema do
23
- hash :model_description, description: "The description of the model" do
24
- string :id,
25
- description:
26
- "The full OpenAI API model ID being described. EG: 'gpt-4o-2024-11-20'"
27
- string :family,
28
- description:
29
- "The OpenAI model family. EG: 'gpt-4o' or 'gpt-4.1-mini'"
30
- string :variant, description: "The OpenAI model variant"
31
- string :release_date,
32
- description: "The model's release date, if known."
33
- string :status,
34
- description: "The OpenAI model status. EG: GA or preview"
35
- hash :inputs, description: "The model's inputs" do
36
- boolean :text, description: "Whether the model can process text"
37
- boolean :image, description: "Whether the model can process images"
38
- boolean :audio, description: "Whether the model can process audio"
39
- string :other_inputs,
40
- description: "Other inputs the model can process"
41
- end
42
- hash :outputs, description: "The model's outputs" do
43
- boolean :text, description: "Whether the model can output text"
44
- boolean :image, description: "Whether the model can output images"
45
- boolean :audio, description: "Whether the model can output audio"
46
- string :other_outputs,
47
- description: "Other outputs the model can return"
48
- end
49
- end
50
- end
51
-
52
- def task_prompt
53
- <<~PROMPT
54
- You are an expert in OpenAI models. You will be given a model ID and asked to describe the model using structured data.
55
-
56
- Model ID: #{input.model}
57
- PROMPT
58
- end
59
- end
60
- end
61
- end
@@ -1,51 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Lluminary
4
- module Tasks
5
- class IdentifyAndDescribeOpenAiModels < Lluminary::Task
6
- use_provider :bedrock, model: Lluminary::Models::Bedrock::AmazonNovaProV1
7
-
8
- input_schema do
9
- array :models, description: "List of OpenAI models" do
10
- string
11
- end
12
- end
13
-
14
- output_schema do
15
- array :root_models,
16
- description: "List of root models and their versions" do
17
- hash do
18
- string :name,
19
- description:
20
- "The root name of the model. For example, 'gpt-4' or 'gpt-4o'"
21
- array :versions,
22
- description:
23
- "List of versions of the root model. For example, '0125-preview' or '0613' or '2024-04-09'" do
24
- string
25
- end
26
- end
27
- end
28
- end
29
-
30
- def task_prompt
31
- <<~PROMPT
32
- You are an expert in OpenAI models. You will be given a list of OpenAI models and asked to group them together by the "root" model type and capability and list the various versions of the root model.
33
-
34
- Keep in mind that some "root" models have names with the same root name but different capabilities. For example, "gpt-4o" and "gpt-4o-audio" are distinct models, since they have different capabilities and each has their own versions.
35
-
36
- "gpt-4.5-preview" and "gpt-4.5-preview-2025-02-27" are examples of the "gpt-4.5" root model. There are two versions of the "gpt-4.5" root model: "preview" and "preview-2025-02-27".
37
-
38
- Given the following list of models, please group them together by the "root" model type and list their versions.
39
-
40
- Your response will be used to generate code that will make use of the models and their verisons.
41
-
42
- It's critical that you represent every model and version from the following list in your response. Any model or version that is missed will be excluded from subsequent code generation and that will make them very, very sad. We don't want any sad models.
43
-
44
- DO NOT include any other models or versions in your response other than those from ones listed below. Use your expertise in OpenAI models to distinguish between different "root" models and their versions.
45
-
46
- Models: #{models.join(", ")}
47
- PROMPT
48
- end
49
- end
50
- end
51
- end