lluminary 0.1.4 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/lluminary/models/base.rb +177 -66
- data/lib/lluminary/schema.rb +37 -7
- data/lib/lluminary/schema_model.rb +149 -65
- data/lib/lluminary/task.rb +37 -0
- data/lib/lluminary/tasks/describe_openai_model.rb +61 -0
- data/lib/lluminary/tasks/identify_and_describe_open_ai_models.rb +51 -0
- data/spec/examples/character_profiler_spec.rb +85 -0
- data/spec/lluminary/models/base_spec.rb +933 -100
- data/spec/lluminary/schema_model_spec.rb +259 -0
- data/spec/lluminary/schema_spec.rb +228 -134
- data/spec/lluminary/task_custom_validation_spec.rb +262 -0
- data/spec/spec_helper.rb +3 -0
- metadata +20 -2
@@ -0,0 +1,262 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require "spec_helper"
|
3
|
+
|
4
|
+
RSpec.describe "Task custom validations" do
|
5
|
+
# Test task classes
|
6
|
+
class TaskWithInputValidation < Lluminary::Task
|
7
|
+
use_provider :test
|
8
|
+
|
9
|
+
input_schema do
|
10
|
+
string :text
|
11
|
+
integer :min_length, description: "Minimum word length to count"
|
12
|
+
|
13
|
+
validate :validate_input_min_length
|
14
|
+
end
|
15
|
+
|
16
|
+
output_schema { integer :word_count }
|
17
|
+
|
18
|
+
def validate_input_min_length
|
19
|
+
min_length_value = @input.attributes["min_length"]
|
20
|
+
if min_length_value && min_length_value <= 0
|
21
|
+
@input.errors.add(:min_length, "must be positive")
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
def task_prompt
|
26
|
+
"Count words in: #{text}"
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
class TaskWithOutputValidation < Lluminary::Task
|
31
|
+
use_provider :test
|
32
|
+
|
33
|
+
input_schema { string :text }
|
34
|
+
|
35
|
+
output_schema do
|
36
|
+
string :sentiment,
|
37
|
+
description: "Sentiment of the text (positive, negative, neutral)"
|
38
|
+
integer :confidence, description: "Confidence score from 0-100"
|
39
|
+
|
40
|
+
validate :validate_confidence_range
|
41
|
+
end
|
42
|
+
|
43
|
+
def validate_confidence_range
|
44
|
+
confidence_value = @output.attributes["confidence"]
|
45
|
+
if confidence_value && (confidence_value < 0 || confidence_value > 100)
|
46
|
+
@output.errors.add(:confidence, "must be between 0 and 100")
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
def task_prompt
|
51
|
+
"Analyze sentiment of: #{text}"
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
class TaskWithBothValidations < Lluminary::Task
|
56
|
+
use_provider :test
|
57
|
+
|
58
|
+
input_schema do
|
59
|
+
string :text
|
60
|
+
array :hashtags, description: "Hashtags to analyze" do
|
61
|
+
string
|
62
|
+
end
|
63
|
+
|
64
|
+
validate :validate_hashtags
|
65
|
+
end
|
66
|
+
|
67
|
+
output_schema do
|
68
|
+
array :relevant_hashtags do
|
69
|
+
string
|
70
|
+
end
|
71
|
+
hash :analysis do
|
72
|
+
string :top_hashtag
|
73
|
+
integer :count
|
74
|
+
end
|
75
|
+
|
76
|
+
validate :validate_top_hashtag
|
77
|
+
end
|
78
|
+
|
79
|
+
def validate_hashtags
|
80
|
+
hashtags_value = @input.attributes["hashtags"]
|
81
|
+
if hashtags_value && hashtags_value.any? &&
|
82
|
+
!hashtags_value.all? { |h| h.start_with?("#") }
|
83
|
+
@input.errors.add(:hashtags, "must all start with # symbol")
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def validate_top_hashtag
|
88
|
+
top_hashtag = @output.attributes.dig("analysis", "top_hashtag")
|
89
|
+
relevant_hashtags = @output.attributes["relevant_hashtags"]
|
90
|
+
if top_hashtag && relevant_hashtags &&
|
91
|
+
!relevant_hashtags.include?(top_hashtag)
|
92
|
+
@output.errors.add(
|
93
|
+
:analysis,
|
94
|
+
"top hashtag must be in the relevant_hashtags list"
|
95
|
+
)
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
def task_prompt
|
100
|
+
"Analyze hashtags in: #{text}"
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
# Override Test provider for predictable responses
|
105
|
+
class TestProvider < Lluminary::Providers::Test
|
106
|
+
def initialize(response_data = nil)
|
107
|
+
@response_data = response_data || {}
|
108
|
+
end
|
109
|
+
|
110
|
+
def call(prompt, task)
|
111
|
+
if @response_data[task.class.name]
|
112
|
+
@response_data[task.class.name]
|
113
|
+
else
|
114
|
+
{ raw: "{}", parsed: {} }
|
115
|
+
end
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
describe "input validations" do
|
120
|
+
before do
|
121
|
+
# Reset the provider to use our test provider
|
122
|
+
TaskWithInputValidation.provider = TestProvider.new
|
123
|
+
end
|
124
|
+
|
125
|
+
it "validates input with custom methods" do
|
126
|
+
task = TaskWithInputValidation.new(text: "Hello world", min_length: 0)
|
127
|
+
expect(task.valid?).to be false
|
128
|
+
expect(task.input.errors.full_messages).to include(
|
129
|
+
"Min length must be positive"
|
130
|
+
)
|
131
|
+
end
|
132
|
+
|
133
|
+
it "accepts valid input" do
|
134
|
+
task = TaskWithInputValidation.new(text: "Hello world", min_length: 3)
|
135
|
+
expect(task.valid?).to be true
|
136
|
+
expect(task.input.errors.full_messages).to be_empty
|
137
|
+
end
|
138
|
+
|
139
|
+
it "rejects invalid input in call" do
|
140
|
+
result = TaskWithInputValidation.call(text: "Hello world", min_length: -5)
|
141
|
+
expect(result.input.valid?).to be false
|
142
|
+
expect(result.output).to be_nil
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
describe "output validations" do
|
147
|
+
before do
|
148
|
+
# Setup test provider with custom responses
|
149
|
+
responses = {
|
150
|
+
"TaskWithOutputValidation" => {
|
151
|
+
raw: '{"sentiment": "positive", "confidence": 150}',
|
152
|
+
parsed: {
|
153
|
+
"sentiment" => "positive",
|
154
|
+
"confidence" => 150 # Invalid: over 100
|
155
|
+
}
|
156
|
+
}
|
157
|
+
}
|
158
|
+
TaskWithOutputValidation.provider = TestProvider.new(responses)
|
159
|
+
end
|
160
|
+
|
161
|
+
it "validates output with custom methods" do
|
162
|
+
result = TaskWithOutputValidation.call(text: "I love this product!")
|
163
|
+
expect(result.output.valid?).to be false
|
164
|
+
expect(result.output.errors.full_messages).to include(
|
165
|
+
"Confidence must be between 0 and 100"
|
166
|
+
)
|
167
|
+
end
|
168
|
+
|
169
|
+
it "works with valid output" do
|
170
|
+
# Patch the provider with valid data for this test
|
171
|
+
valid_responses = {
|
172
|
+
"TaskWithOutputValidation" => {
|
173
|
+
raw: '{"sentiment": "positive", "confidence": 95}',
|
174
|
+
parsed: {
|
175
|
+
"sentiment" => "positive",
|
176
|
+
"confidence" => 95 # Valid: between 0-100
|
177
|
+
}
|
178
|
+
}
|
179
|
+
}
|
180
|
+
TaskWithOutputValidation.provider = TestProvider.new(valid_responses)
|
181
|
+
|
182
|
+
result = TaskWithOutputValidation.call(text: "I love this product!")
|
183
|
+
expect(result.output.valid?).to be true
|
184
|
+
expect(result.output.errors.full_messages).to be_empty
|
185
|
+
expect(result.output.sentiment).to eq("positive")
|
186
|
+
expect(result.output.confidence).to eq(95)
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
describe "both input and output validations" do
|
191
|
+
before do
|
192
|
+
# Setup test provider with custom responses
|
193
|
+
responses = {
|
194
|
+
"TaskWithBothValidations" => {
|
195
|
+
raw:
|
196
|
+
'{"relevant_hashtags": ["#ruby", "#rails"], "analysis": {"top_hashtag": "#javascript", "count": 5}}',
|
197
|
+
parsed: {
|
198
|
+
"relevant_hashtags" => %w[#ruby #rails],
|
199
|
+
"analysis" => {
|
200
|
+
"top_hashtag" => "#javascript", # Invalid: not in relevant_hashtags
|
201
|
+
"count" => 5
|
202
|
+
}
|
203
|
+
}
|
204
|
+
}
|
205
|
+
}
|
206
|
+
TaskWithBothValidations.provider = TestProvider.new(responses)
|
207
|
+
end
|
208
|
+
|
209
|
+
it "validates input with custom methods" do
|
210
|
+
task =
|
211
|
+
TaskWithBothValidations.new(
|
212
|
+
text: "Hello world",
|
213
|
+
hashtags: %w[ruby rails]
|
214
|
+
)
|
215
|
+
expect(task.valid?).to be false
|
216
|
+
expect(task.input.errors.full_messages).to include(
|
217
|
+
"Hashtags must all start with # symbol"
|
218
|
+
)
|
219
|
+
end
|
220
|
+
|
221
|
+
it "validates output with custom methods" do
|
222
|
+
# Input is valid for this test
|
223
|
+
result =
|
224
|
+
TaskWithBothValidations.call(
|
225
|
+
text: "Hello world",
|
226
|
+
hashtags: %w[#ruby #rails]
|
227
|
+
)
|
228
|
+
expect(result.output.valid?).to be false
|
229
|
+
expect(result.output.errors.full_messages).to include(
|
230
|
+
"Analysis top hashtag must be in the relevant_hashtags list"
|
231
|
+
)
|
232
|
+
end
|
233
|
+
|
234
|
+
it "works with valid input and output" do
|
235
|
+
# Patch the provider with valid data for this test
|
236
|
+
valid_responses = {
|
237
|
+
"TaskWithBothValidations" => {
|
238
|
+
raw:
|
239
|
+
'{"relevant_hashtags": ["#ruby", "#rails"], "analysis": {"top_hashtag": "#ruby", "count": 5}}',
|
240
|
+
parsed: {
|
241
|
+
"relevant_hashtags" => %w[#ruby #rails],
|
242
|
+
"analysis" => {
|
243
|
+
"top_hashtag" => "#ruby", # Valid: in relevant_hashtags
|
244
|
+
"count" => 5
|
245
|
+
}
|
246
|
+
}
|
247
|
+
}
|
248
|
+
}
|
249
|
+
TaskWithBothValidations.provider = TestProvider.new(valid_responses)
|
250
|
+
|
251
|
+
result =
|
252
|
+
TaskWithBothValidations.call(
|
253
|
+
text: "Hello world",
|
254
|
+
hashtags: %w[#ruby #rails]
|
255
|
+
)
|
256
|
+
expect(result.input.valid?).to be true
|
257
|
+
expect(result.output.valid?).to be true
|
258
|
+
expect(result.output.relevant_hashtags).to eq(%w[#ruby #rails])
|
259
|
+
expect(result.output.analysis["top_hashtag"]).to eq("#ruby")
|
260
|
+
end
|
261
|
+
end
|
262
|
+
end
|
data/spec/spec_helper.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: lluminary
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1
|
4
|
+
version: 0.2.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Doug Hughes
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2025-04
|
11
|
+
date: 2025-05-04 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: activemodel
|
@@ -170,6 +170,20 @@ dependencies:
|
|
170
170
|
- - "~>"
|
171
171
|
- !ruby/object:Gem::Version
|
172
172
|
version: '6.2'
|
173
|
+
- !ruby/object:Gem::Dependency
|
174
|
+
name: simplecov
|
175
|
+
requirement: !ruby/object:Gem::Requirement
|
176
|
+
requirements:
|
177
|
+
- - "~>"
|
178
|
+
- !ruby/object:Gem::Version
|
179
|
+
version: 0.22.0
|
180
|
+
type: :development
|
181
|
+
prerelease: false
|
182
|
+
version_requirements: !ruby/object:Gem::Requirement
|
183
|
+
requirements:
|
184
|
+
- - "~>"
|
185
|
+
- !ruby/object:Gem::Version
|
186
|
+
version: 0.22.0
|
173
187
|
description: 'Lluminary is a framework for building applications that leverage Large
|
174
188
|
Language Models. It provides a structured way to define tasks, manage prompts, and
|
175
189
|
handle LLM interactions.
|
@@ -197,9 +211,12 @@ files:
|
|
197
211
|
- lib/lluminary/schema.rb
|
198
212
|
- lib/lluminary/schema_model.rb
|
199
213
|
- lib/lluminary/task.rb
|
214
|
+
- lib/lluminary/tasks/describe_openai_model.rb
|
215
|
+
- lib/lluminary/tasks/identify_and_describe_open_ai_models.rb
|
200
216
|
- lib/lluminary/validation_error.rb
|
201
217
|
- lib/lluminary/version.rb
|
202
218
|
- spec/examples/analyze_text_spec.rb
|
219
|
+
- spec/examples/character_profiler_spec.rb
|
203
220
|
- spec/examples/color_analyzer_spec.rb
|
204
221
|
- spec/examples/content_analyzer_spec.rb
|
205
222
|
- spec/examples/historical_event_analyzer_spec.rb
|
@@ -219,6 +236,7 @@ files:
|
|
219
236
|
- spec/lluminary/result_spec.rb
|
220
237
|
- spec/lluminary/schema_model_spec.rb
|
221
238
|
- spec/lluminary/schema_spec.rb
|
239
|
+
- spec/lluminary/task_custom_validation_spec.rb
|
222
240
|
- spec/lluminary/task_spec.rb
|
223
241
|
- spec/spec_helper.rb
|
224
242
|
homepage: https://github.com/dhughes/lluminary
|