lluminary 0.2.0 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/lluminary/models/anthropic/claude_3_5_sonnet.rb +17 -0
- data/lib/lluminary/models/base.rb +11 -0
- data/lib/lluminary/providers/anthropic.rb +54 -0
- data/lib/lluminary/schema.rb +11 -7
- data/lib/lluminary/schema_model.rb +16 -2
- data/lib/lluminary/task.rb +40 -0
- data/lib/lluminary.rb +9 -3
- data/spec/lluminary/models/base_spec.rb +32 -0
- data/spec/lluminary/providers/anthropic_spec.rb +104 -0
- data/spec/lluminary/schema_model_spec.rb +259 -0
- data/spec/lluminary/schema_spec.rb +80 -241
- data/spec/lluminary/task_custom_validation_spec.rb +262 -0
- data/spec/lluminary/task_spec.rb +18 -0
- data/spec/spec_helper.rb +3 -0
- metadata +34 -2
@@ -0,0 +1,262 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require "spec_helper"
|
3
|
+
|
4
|
+
RSpec.describe "Task custom validations" do
|
5
|
+
# Test task classes
|
6
|
+
class TaskWithInputValidation < Lluminary::Task
|
7
|
+
use_provider :test
|
8
|
+
|
9
|
+
input_schema do
|
10
|
+
string :text
|
11
|
+
integer :min_length, description: "Minimum word length to count"
|
12
|
+
|
13
|
+
validate :validate_input_min_length
|
14
|
+
end
|
15
|
+
|
16
|
+
output_schema { integer :word_count }
|
17
|
+
|
18
|
+
def validate_input_min_length
|
19
|
+
min_length_value = @input.attributes["min_length"]
|
20
|
+
if min_length_value && min_length_value <= 0
|
21
|
+
@input.errors.add(:min_length, "must be positive")
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
def task_prompt
|
26
|
+
"Count words in: #{text}"
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
class TaskWithOutputValidation < Lluminary::Task
|
31
|
+
use_provider :test
|
32
|
+
|
33
|
+
input_schema { string :text }
|
34
|
+
|
35
|
+
output_schema do
|
36
|
+
string :sentiment,
|
37
|
+
description: "Sentiment of the text (positive, negative, neutral)"
|
38
|
+
integer :confidence, description: "Confidence score from 0-100"
|
39
|
+
|
40
|
+
validate :validate_confidence_range
|
41
|
+
end
|
42
|
+
|
43
|
+
def validate_confidence_range
|
44
|
+
confidence_value = @output.attributes["confidence"]
|
45
|
+
if confidence_value && (confidence_value < 0 || confidence_value > 100)
|
46
|
+
@output.errors.add(:confidence, "must be between 0 and 100")
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
def task_prompt
|
51
|
+
"Analyze sentiment of: #{text}"
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
class TaskWithBothValidations < Lluminary::Task
|
56
|
+
use_provider :test
|
57
|
+
|
58
|
+
input_schema do
|
59
|
+
string :text
|
60
|
+
array :hashtags, description: "Hashtags to analyze" do
|
61
|
+
string
|
62
|
+
end
|
63
|
+
|
64
|
+
validate :validate_hashtags
|
65
|
+
end
|
66
|
+
|
67
|
+
output_schema do
|
68
|
+
array :relevant_hashtags do
|
69
|
+
string
|
70
|
+
end
|
71
|
+
hash :analysis do
|
72
|
+
string :top_hashtag
|
73
|
+
integer :count
|
74
|
+
end
|
75
|
+
|
76
|
+
validate :validate_top_hashtag
|
77
|
+
end
|
78
|
+
|
79
|
+
def validate_hashtags
|
80
|
+
hashtags_value = @input.attributes["hashtags"]
|
81
|
+
if hashtags_value && hashtags_value.any? &&
|
82
|
+
!hashtags_value.all? { |h| h.start_with?("#") }
|
83
|
+
@input.errors.add(:hashtags, "must all start with # symbol")
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def validate_top_hashtag
|
88
|
+
top_hashtag = @output.attributes.dig("analysis", "top_hashtag")
|
89
|
+
relevant_hashtags = @output.attributes["relevant_hashtags"]
|
90
|
+
if top_hashtag && relevant_hashtags &&
|
91
|
+
!relevant_hashtags.include?(top_hashtag)
|
92
|
+
@output.errors.add(
|
93
|
+
:analysis,
|
94
|
+
"top hashtag must be in the relevant_hashtags list"
|
95
|
+
)
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
def task_prompt
|
100
|
+
"Analyze hashtags in: #{text}"
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
# Override Test provider for predictable responses
|
105
|
+
class TestProvider < Lluminary::Providers::Test
|
106
|
+
def initialize(response_data = nil)
|
107
|
+
@response_data = response_data || {}
|
108
|
+
end
|
109
|
+
|
110
|
+
def call(prompt, task)
|
111
|
+
if @response_data[task.class.name]
|
112
|
+
@response_data[task.class.name]
|
113
|
+
else
|
114
|
+
{ raw: "{}", parsed: {} }
|
115
|
+
end
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
describe "input validations" do
|
120
|
+
before do
|
121
|
+
# Reset the provider to use our test provider
|
122
|
+
TaskWithInputValidation.provider = TestProvider.new
|
123
|
+
end
|
124
|
+
|
125
|
+
it "validates input with custom methods" do
|
126
|
+
task = TaskWithInputValidation.new(text: "Hello world", min_length: 0)
|
127
|
+
expect(task.valid?).to be false
|
128
|
+
expect(task.input.errors.full_messages).to include(
|
129
|
+
"Min length must be positive"
|
130
|
+
)
|
131
|
+
end
|
132
|
+
|
133
|
+
it "accepts valid input" do
|
134
|
+
task = TaskWithInputValidation.new(text: "Hello world", min_length: 3)
|
135
|
+
expect(task.valid?).to be true
|
136
|
+
expect(task.input.errors.full_messages).to be_empty
|
137
|
+
end
|
138
|
+
|
139
|
+
it "rejects invalid input in call" do
|
140
|
+
result = TaskWithInputValidation.call(text: "Hello world", min_length: -5)
|
141
|
+
expect(result.input.valid?).to be false
|
142
|
+
expect(result.output).to be_nil
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
describe "output validations" do
|
147
|
+
before do
|
148
|
+
# Setup test provider with custom responses
|
149
|
+
responses = {
|
150
|
+
"TaskWithOutputValidation" => {
|
151
|
+
raw: '{"sentiment": "positive", "confidence": 150}',
|
152
|
+
parsed: {
|
153
|
+
"sentiment" => "positive",
|
154
|
+
"confidence" => 150 # Invalid: over 100
|
155
|
+
}
|
156
|
+
}
|
157
|
+
}
|
158
|
+
TaskWithOutputValidation.provider = TestProvider.new(responses)
|
159
|
+
end
|
160
|
+
|
161
|
+
it "validates output with custom methods" do
|
162
|
+
result = TaskWithOutputValidation.call(text: "I love this product!")
|
163
|
+
expect(result.output.valid?).to be false
|
164
|
+
expect(result.output.errors.full_messages).to include(
|
165
|
+
"Confidence must be between 0 and 100"
|
166
|
+
)
|
167
|
+
end
|
168
|
+
|
169
|
+
it "works with valid output" do
|
170
|
+
# Patch the provider with valid data for this test
|
171
|
+
valid_responses = {
|
172
|
+
"TaskWithOutputValidation" => {
|
173
|
+
raw: '{"sentiment": "positive", "confidence": 95}',
|
174
|
+
parsed: {
|
175
|
+
"sentiment" => "positive",
|
176
|
+
"confidence" => 95 # Valid: between 0-100
|
177
|
+
}
|
178
|
+
}
|
179
|
+
}
|
180
|
+
TaskWithOutputValidation.provider = TestProvider.new(valid_responses)
|
181
|
+
|
182
|
+
result = TaskWithOutputValidation.call(text: "I love this product!")
|
183
|
+
expect(result.output.valid?).to be true
|
184
|
+
expect(result.output.errors.full_messages).to be_empty
|
185
|
+
expect(result.output.sentiment).to eq("positive")
|
186
|
+
expect(result.output.confidence).to eq(95)
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
describe "both input and output validations" do
|
191
|
+
before do
|
192
|
+
# Setup test provider with custom responses
|
193
|
+
responses = {
|
194
|
+
"TaskWithBothValidations" => {
|
195
|
+
raw:
|
196
|
+
'{"relevant_hashtags": ["#ruby", "#rails"], "analysis": {"top_hashtag": "#javascript", "count": 5}}',
|
197
|
+
parsed: {
|
198
|
+
"relevant_hashtags" => %w[#ruby #rails],
|
199
|
+
"analysis" => {
|
200
|
+
"top_hashtag" => "#javascript", # Invalid: not in relevant_hashtags
|
201
|
+
"count" => 5
|
202
|
+
}
|
203
|
+
}
|
204
|
+
}
|
205
|
+
}
|
206
|
+
TaskWithBothValidations.provider = TestProvider.new(responses)
|
207
|
+
end
|
208
|
+
|
209
|
+
it "validates input with custom methods" do
|
210
|
+
task =
|
211
|
+
TaskWithBothValidations.new(
|
212
|
+
text: "Hello world",
|
213
|
+
hashtags: %w[ruby rails]
|
214
|
+
)
|
215
|
+
expect(task.valid?).to be false
|
216
|
+
expect(task.input.errors.full_messages).to include(
|
217
|
+
"Hashtags must all start with # symbol"
|
218
|
+
)
|
219
|
+
end
|
220
|
+
|
221
|
+
it "validates output with custom methods" do
|
222
|
+
# Input is valid for this test
|
223
|
+
result =
|
224
|
+
TaskWithBothValidations.call(
|
225
|
+
text: "Hello world",
|
226
|
+
hashtags: %w[#ruby #rails]
|
227
|
+
)
|
228
|
+
expect(result.output.valid?).to be false
|
229
|
+
expect(result.output.errors.full_messages).to include(
|
230
|
+
"Analysis top hashtag must be in the relevant_hashtags list"
|
231
|
+
)
|
232
|
+
end
|
233
|
+
|
234
|
+
it "works with valid input and output" do
|
235
|
+
# Patch the provider with valid data for this test
|
236
|
+
valid_responses = {
|
237
|
+
"TaskWithBothValidations" => {
|
238
|
+
raw:
|
239
|
+
'{"relevant_hashtags": ["#ruby", "#rails"], "analysis": {"top_hashtag": "#ruby", "count": 5}}',
|
240
|
+
parsed: {
|
241
|
+
"relevant_hashtags" => %w[#ruby #rails],
|
242
|
+
"analysis" => {
|
243
|
+
"top_hashtag" => "#ruby", # Valid: in relevant_hashtags
|
244
|
+
"count" => 5
|
245
|
+
}
|
246
|
+
}
|
247
|
+
}
|
248
|
+
}
|
249
|
+
TaskWithBothValidations.provider = TestProvider.new(valid_responses)
|
250
|
+
|
251
|
+
result =
|
252
|
+
TaskWithBothValidations.call(
|
253
|
+
text: "Hello world",
|
254
|
+
hashtags: %w[#ruby #rails]
|
255
|
+
)
|
256
|
+
expect(result.input.valid?).to be true
|
257
|
+
expect(result.output.valid?).to be true
|
258
|
+
expect(result.output.relevant_hashtags).to eq(%w[#ruby #rails])
|
259
|
+
expect(result.output.analysis["top_hashtag"]).to eq("#ruby")
|
260
|
+
end
|
261
|
+
end
|
262
|
+
end
|
data/spec/lluminary/task_spec.rb
CHANGED
@@ -19,6 +19,9 @@ RSpec.describe Lluminary::Task do
|
|
19
19
|
end
|
20
20
|
|
21
21
|
let(:task_with_test) { Class.new(described_class) { use_provider :test } }
|
22
|
+
let(:task_with_anthropic) do
|
23
|
+
Class.new(described_class) { use_provider :anthropic, api_key: "test" }
|
24
|
+
end
|
22
25
|
|
23
26
|
describe ".call" do
|
24
27
|
it "returns a result with a raw response from the provider" do
|
@@ -44,6 +47,12 @@ RSpec.describe Lluminary::Task do
|
|
44
47
|
task_class.provider = custom_provider
|
45
48
|
expect(task_class.provider).to eq(custom_provider)
|
46
49
|
end
|
50
|
+
|
51
|
+
it "with :anthropic provider sets the Anthropic provider" do
|
52
|
+
expect(task_with_anthropic.provider).to be_a(
|
53
|
+
Lluminary::Providers::Anthropic
|
54
|
+
)
|
55
|
+
end
|
47
56
|
end
|
48
57
|
|
49
58
|
describe ".use_provider" do
|
@@ -76,6 +85,15 @@ RSpec.describe Lluminary::Task do
|
|
76
85
|
)
|
77
86
|
end
|
78
87
|
|
88
|
+
it "with :anthropic instantiates Anthropic provider with config" do
|
89
|
+
task_class.use_provider(:anthropic, api_key: "test")
|
90
|
+
expect(task_class.provider).to be_a(Lluminary::Providers::Anthropic)
|
91
|
+
expect(task_class.provider.config).to include(
|
92
|
+
api_key: "test",
|
93
|
+
model: Lluminary::Models::Anthropic::Claude35Sonnet
|
94
|
+
)
|
95
|
+
end
|
96
|
+
|
79
97
|
it "raises ArgumentError for unknown provider" do
|
80
98
|
expect { task_class.use_provider(:unknown) }.to raise_error(
|
81
99
|
ArgumentError,
|
data/spec/spec_helper.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: lluminary
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.2.
|
4
|
+
version: 0.2.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Doug Hughes
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2025-
|
11
|
+
date: 2025-05-16 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: activemodel
|
@@ -30,6 +30,20 @@ dependencies:
|
|
30
30
|
- - "<"
|
31
31
|
- !ruby/object:Gem::Version
|
32
32
|
version: '9'
|
33
|
+
- !ruby/object:Gem::Dependency
|
34
|
+
name: anthropic-sdk-beta
|
35
|
+
requirement: !ruby/object:Gem::Requirement
|
36
|
+
requirements:
|
37
|
+
- - "~>"
|
38
|
+
- !ruby/object:Gem::Version
|
39
|
+
version: 0.1.0.pre.beta.6
|
40
|
+
type: :runtime
|
41
|
+
prerelease: false
|
42
|
+
version_requirements: !ruby/object:Gem::Requirement
|
43
|
+
requirements:
|
44
|
+
- - "~>"
|
45
|
+
- !ruby/object:Gem::Version
|
46
|
+
version: 0.1.0.pre.beta.6
|
33
47
|
- !ruby/object:Gem::Dependency
|
34
48
|
name: aws-sdk-bedrock
|
35
49
|
requirement: !ruby/object:Gem::Requirement
|
@@ -156,6 +170,20 @@ dependencies:
|
|
156
170
|
- - "~>"
|
157
171
|
- !ruby/object:Gem::Version
|
158
172
|
version: '1.50'
|
173
|
+
- !ruby/object:Gem::Dependency
|
174
|
+
name: simplecov
|
175
|
+
requirement: !ruby/object:Gem::Requirement
|
176
|
+
requirements:
|
177
|
+
- - "~>"
|
178
|
+
- !ruby/object:Gem::Version
|
179
|
+
version: 0.22.0
|
180
|
+
type: :development
|
181
|
+
prerelease: false
|
182
|
+
version_requirements: !ruby/object:Gem::Requirement
|
183
|
+
requirements:
|
184
|
+
- - "~>"
|
185
|
+
- !ruby/object:Gem::Version
|
186
|
+
version: 0.22.0
|
159
187
|
- !ruby/object:Gem::Dependency
|
160
188
|
name: syntax_tree
|
161
189
|
requirement: !ruby/object:Gem::Requirement
|
@@ -183,12 +211,14 @@ extra_rdoc_files: []
|
|
183
211
|
files:
|
184
212
|
- lib/lluminary.rb
|
185
213
|
- lib/lluminary/config.rb
|
214
|
+
- lib/lluminary/models/anthropic/claude_3_5_sonnet.rb
|
186
215
|
- lib/lluminary/models/base.rb
|
187
216
|
- lib/lluminary/models/bedrock/amazon_nova_pro_v1.rb
|
188
217
|
- lib/lluminary/models/bedrock/anthropic_claude_instant_v1.rb
|
189
218
|
- lib/lluminary/models/bedrock/base.rb
|
190
219
|
- lib/lluminary/models/openai/gpt35_turbo.rb
|
191
220
|
- lib/lluminary/provider_error.rb
|
221
|
+
- lib/lluminary/providers/anthropic.rb
|
192
222
|
- lib/lluminary/providers/base.rb
|
193
223
|
- lib/lluminary/providers/bedrock.rb
|
194
224
|
- lib/lluminary/providers/openai.rb
|
@@ -214,12 +244,14 @@ files:
|
|
214
244
|
- spec/lluminary/models/bedrock/amazon_nova_pro_v1_spec.rb
|
215
245
|
- spec/lluminary/models/bedrock/anthropic_claude_instant_v1_spec.rb
|
216
246
|
- spec/lluminary/models/openai/gpt35_turbo_spec.rb
|
247
|
+
- spec/lluminary/providers/anthropic_spec.rb
|
217
248
|
- spec/lluminary/providers/bedrock_spec.rb
|
218
249
|
- spec/lluminary/providers/openai_spec.rb
|
219
250
|
- spec/lluminary/providers/test_spec.rb
|
220
251
|
- spec/lluminary/result_spec.rb
|
221
252
|
- spec/lluminary/schema_model_spec.rb
|
222
253
|
- spec/lluminary/schema_spec.rb
|
254
|
+
- spec/lluminary/task_custom_validation_spec.rb
|
223
255
|
- spec/lluminary/task_spec.rb
|
224
256
|
- spec/spec_helper.rb
|
225
257
|
homepage: https://github.com/dhughes/lluminary
|