lex-preference-learning 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Gemfile +11 -0
- data/lex-preference-learning.gemspec +29 -0
- data/lib/legion/extensions/preference_learning/actors/decay.rb +41 -0
- data/lib/legion/extensions/preference_learning/client.rb +24 -0
- data/lib/legion/extensions/preference_learning/helpers/constants.rb +31 -0
- data/lib/legion/extensions/preference_learning/helpers/option.rb +74 -0
- data/lib/legion/extensions/preference_learning/helpers/preference_engine.rb +117 -0
- data/lib/legion/extensions/preference_learning/runners/preference_learning.rb +80 -0
- data/lib/legion/extensions/preference_learning/version.rb +9 -0
- data/lib/legion/extensions/preference_learning.rb +15 -0
- data/spec/legion/extensions/preference_learning/client_spec.rb +17 -0
- data/spec/legion/extensions/preference_learning/helpers/constants_spec.rb +67 -0
- data/spec/legion/extensions/preference_learning/helpers/option_spec.rb +104 -0
- data/spec/legion/extensions/preference_learning/helpers/preference_engine_spec.rb +151 -0
- data/spec/legion/extensions/preference_learning/runners/preference_learning_spec.rb +86 -0
- data/spec/spec_helper.rb +20 -0
- metadata +77 -0
checksums.yaml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
---
|
|
2
|
+
SHA256:
|
|
3
|
+
metadata.gz: 18319db7d0f4c66978913f108382a6bd615682687f175c43797c540ca357f5b3
|
|
4
|
+
data.tar.gz: 83f4100aad3f4ef92f8a5f8d50671c4f4e0d75dcc6ae64460794df468c10b84d
|
|
5
|
+
SHA512:
|
|
6
|
+
metadata.gz: 9b567fb54f9e788f1d260bb31b3a6d2e1e05ab1510f0c94bbc2eebc4a7ce40827b779db3b024183be14dcbd2fc8e1370c3607020b38983f1659d8ee14c623407
|
|
7
|
+
data.tar.gz: 3cdb684473150ae3b33ba406725c1259aca185e1439ffdb01b285ea327541edffb9b3534a1601e61057408265d39b3c102a0f79463e1a4966056143598df7d0c
|
data/Gemfile
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative 'lib/legion/extensions/preference_learning/version'
|
|
4
|
+
|
|
5
|
+
Gem::Specification.new do |spec|
|
|
6
|
+
spec.name = 'lex-preference-learning'
|
|
7
|
+
spec.version = Legion::Extensions::PreferenceLearning::VERSION
|
|
8
|
+
spec.authors = ['Esity']
|
|
9
|
+
spec.email = ['matthewdiverson@gmail.com']
|
|
10
|
+
|
|
11
|
+
spec.summary = 'LEX Preference Learning'
|
|
12
|
+
spec.description = 'Learns and models preferences from choices and feedback for brain-modeled agentic AI'
|
|
13
|
+
spec.homepage = 'https://github.com/LegionIO/lex-preference-learning'
|
|
14
|
+
spec.license = 'MIT'
|
|
15
|
+
spec.required_ruby_version = '>= 3.4'
|
|
16
|
+
|
|
17
|
+
spec.metadata['homepage_uri'] = spec.homepage
|
|
18
|
+
spec.metadata['source_code_uri'] = 'https://github.com/LegionIO/lex-preference-learning'
|
|
19
|
+
spec.metadata['documentation_uri'] = 'https://github.com/LegionIO/lex-preference-learning'
|
|
20
|
+
spec.metadata['changelog_uri'] = 'https://github.com/LegionIO/lex-preference-learning'
|
|
21
|
+
spec.metadata['bug_tracker_uri'] = 'https://github.com/LegionIO/lex-preference-learning/issues'
|
|
22
|
+
spec.metadata['rubygems_mfa_required'] = 'true'
|
|
23
|
+
|
|
24
|
+
spec.files = Dir.chdir(File.expand_path(__dir__)) do
|
|
25
|
+
Dir.glob('{lib,spec}/**/*') + %w[lex-preference-learning.gemspec Gemfile]
|
|
26
|
+
end
|
|
27
|
+
spec.require_paths = ['lib']
|
|
28
|
+
spec.add_development_dependency 'legion-gaia'
|
|
29
|
+
end
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'legion/extensions/actors/every'
|
|
4
|
+
|
|
5
|
+
module Legion
|
|
6
|
+
module Extensions
|
|
7
|
+
module PreferenceLearning
|
|
8
|
+
module Actor
|
|
9
|
+
class Decay < Legion::Extensions::Actors::Every
|
|
10
|
+
def runner_class
|
|
11
|
+
Legion::Extensions::PreferenceLearning::Runners::PreferenceLearning
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
def runner_function
|
|
15
|
+
'update_preference_learning'
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
def time
|
|
19
|
+
300
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def run_now?
|
|
23
|
+
false
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def use_runner?
|
|
27
|
+
false
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def check_subtask?
|
|
31
|
+
false
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
def generate_task?
|
|
35
|
+
false
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
end
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'legion/extensions/preference_learning/helpers/constants'
|
|
4
|
+
require 'legion/extensions/preference_learning/helpers/option'
|
|
5
|
+
require 'legion/extensions/preference_learning/helpers/preference_engine'
|
|
6
|
+
require 'legion/extensions/preference_learning/runners/preference_learning'
|
|
7
|
+
|
|
8
|
+
module Legion
|
|
9
|
+
module Extensions
|
|
10
|
+
module PreferenceLearning
|
|
11
|
+
class Client
|
|
12
|
+
include Runners::PreferenceLearning
|
|
13
|
+
|
|
14
|
+
def initialize(**)
|
|
15
|
+
@preference_engine = Helpers::PreferenceEngine.new
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
private
|
|
19
|
+
|
|
20
|
+
attr_reader :preference_engine
|
|
21
|
+
end
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
end
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Legion
|
|
4
|
+
module Extensions
|
|
5
|
+
module PreferenceLearning
|
|
6
|
+
module Helpers
|
|
7
|
+
module Constants
|
|
8
|
+
PREFERENCE_LABELS = {
|
|
9
|
+
(0.8..) => :strongly_preferred,
|
|
10
|
+
(0.6...0.8) => :preferred,
|
|
11
|
+
(0.4...0.6) => :neutral,
|
|
12
|
+
(0.2...0.4) => :disliked,
|
|
13
|
+
(..0.2) => :strongly_disliked
|
|
14
|
+
}.freeze
|
|
15
|
+
|
|
16
|
+
MAX_OPTIONS = 200
|
|
17
|
+
MAX_COMPARISONS = 1000
|
|
18
|
+
MAX_HISTORY = 500
|
|
19
|
+
|
|
20
|
+
DEFAULT_PREFERENCE = 0.5
|
|
21
|
+
PREFERENCE_FLOOR = 0.0
|
|
22
|
+
PREFERENCE_CEILING = 1.0
|
|
23
|
+
|
|
24
|
+
WIN_BOOST = 0.08
|
|
25
|
+
LOSS_PENALTY = 0.06
|
|
26
|
+
DECAY_RATE = 0.01
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
end
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'securerandom'
|
|
4
|
+
|
|
5
|
+
module Legion
|
|
6
|
+
module Extensions
|
|
7
|
+
module PreferenceLearning
|
|
8
|
+
module Helpers
|
|
9
|
+
class Option
|
|
10
|
+
attr_reader :id, :label, :domain, :created_at
|
|
11
|
+
attr_accessor :preference_score, :wins, :losses, :times_seen
|
|
12
|
+
|
|
13
|
+
def initialize(label:, domain: :general)
|
|
14
|
+
@id = SecureRandom.uuid
|
|
15
|
+
@label = label
|
|
16
|
+
@domain = domain
|
|
17
|
+
@preference_score = Constants::DEFAULT_PREFERENCE
|
|
18
|
+
@wins = 0
|
|
19
|
+
@losses = 0
|
|
20
|
+
@times_seen = 0
|
|
21
|
+
@created_at = Time.now.utc
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def win!
|
|
25
|
+
@wins += 1
|
|
26
|
+
@times_seen += 1
|
|
27
|
+
@preference_score = clamp(@preference_score + Constants::WIN_BOOST)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def lose!
|
|
31
|
+
@losses += 1
|
|
32
|
+
@times_seen += 1
|
|
33
|
+
@preference_score = clamp(@preference_score - Constants::LOSS_PENALTY)
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
def win_rate
|
|
37
|
+
total = @wins + @losses
|
|
38
|
+
return 0.0 if total.zero?
|
|
39
|
+
|
|
40
|
+
@wins.to_f / (total + 1)
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def preference_label
|
|
44
|
+
Constants::PREFERENCE_LABELS.each do |range, label|
|
|
45
|
+
return label if range.cover?(@preference_score)
|
|
46
|
+
end
|
|
47
|
+
:neutral
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
def to_h
|
|
51
|
+
{
|
|
52
|
+
id: @id,
|
|
53
|
+
label: @label,
|
|
54
|
+
domain: @domain,
|
|
55
|
+
preference_score: @preference_score,
|
|
56
|
+
wins: @wins,
|
|
57
|
+
losses: @losses,
|
|
58
|
+
times_seen: @times_seen,
|
|
59
|
+
win_rate: win_rate,
|
|
60
|
+
preference_label: preference_label,
|
|
61
|
+
created_at: @created_at
|
|
62
|
+
}
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
private
|
|
66
|
+
|
|
67
|
+
def clamp(value)
|
|
68
|
+
value.clamp(Constants::PREFERENCE_FLOOR, Constants::PREFERENCE_CEILING)
|
|
69
|
+
end
|
|
70
|
+
end
|
|
71
|
+
end
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
end
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Legion
|
|
4
|
+
module Extensions
|
|
5
|
+
module PreferenceLearning
|
|
6
|
+
module Helpers
|
|
7
|
+
class PreferenceEngine
|
|
8
|
+
def initialize
|
|
9
|
+
@options = {} # id => Option
|
|
10
|
+
@comparisons = 0
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
def register_option(label:, domain: :general)
|
|
14
|
+
return { error: 'max options reached' } if @options.size >= Constants::MAX_OPTIONS
|
|
15
|
+
|
|
16
|
+
option = Option.new(label: label, domain: domain)
|
|
17
|
+
@options[option.id] = option
|
|
18
|
+
option.to_h
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
def record_comparison(winner_id:, loser_id:)
|
|
22
|
+
winner = @options[winner_id]
|
|
23
|
+
loser = @options[loser_id]
|
|
24
|
+
return { error: 'option not found' } unless winner && loser
|
|
25
|
+
|
|
26
|
+
@comparisons += 1
|
|
27
|
+
winner.win!
|
|
28
|
+
loser.lose!
|
|
29
|
+
|
|
30
|
+
{
|
|
31
|
+
winner_id: winner_id,
|
|
32
|
+
loser_id: loser_id,
|
|
33
|
+
comparisons: @comparisons,
|
|
34
|
+
winner_score: winner.preference_score,
|
|
35
|
+
loser_score: loser.preference_score
|
|
36
|
+
}
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
def predict_preference(option_a_id:, option_b_id:)
|
|
40
|
+
a = @options[option_a_id]
|
|
41
|
+
b = @options[option_b_id]
|
|
42
|
+
return { error: 'option not found' } unless a && b
|
|
43
|
+
|
|
44
|
+
diff = (a.preference_score - b.preference_score).abs
|
|
45
|
+
confidence = diff.clamp(0.0, 1.0)
|
|
46
|
+
preferred = a.preference_score >= b.preference_score ? a : b
|
|
47
|
+
|
|
48
|
+
{
|
|
49
|
+
preferred_id: preferred.id,
|
|
50
|
+
preferred_label: preferred.label,
|
|
51
|
+
confidence: confidence,
|
|
52
|
+
score_a: a.preference_score,
|
|
53
|
+
score_b: b.preference_score
|
|
54
|
+
}
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
def top_preferences(domain: nil, limit: 5)
|
|
58
|
+
filtered(domain).sort_by { |o| -o.preference_score }.first(limit).map(&:to_h)
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def bottom_preferences(domain: nil, limit: 5)
|
|
62
|
+
filtered(domain).sort_by(&:preference_score).first(limit).map(&:to_h)
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
def preferences_by_domain(domain:)
|
|
66
|
+
@options.values
|
|
67
|
+
.select { |o| o.domain == domain }
|
|
68
|
+
.sort_by { |o| -o.preference_score }
|
|
69
|
+
.map(&:to_h)
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
def preference_stability
|
|
73
|
+
scores = @options.values.map(&:preference_score)
|
|
74
|
+
return 0.0 if scores.size < 2
|
|
75
|
+
|
|
76
|
+
mean = scores.sum / scores.size.to_f
|
|
77
|
+
variance = scores.sum { |s| (s - mean)**2 } / scores.size.to_f
|
|
78
|
+
Math.sqrt(variance)
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
def most_compared(limit: 10)
|
|
82
|
+
@options.values
|
|
83
|
+
.sort_by { |o| -o.times_seen }
|
|
84
|
+
.first(limit)
|
|
85
|
+
.map(&:to_h)
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
def decay_all
|
|
89
|
+
@options.each_value do |option|
|
|
90
|
+
delta = (option.preference_score - Constants::DEFAULT_PREFERENCE) * Constants::DECAY_RATE
|
|
91
|
+
option.preference_score = (option.preference_score - delta)
|
|
92
|
+
.clamp(Constants::PREFERENCE_FLOOR, Constants::PREFERENCE_CEILING)
|
|
93
|
+
end
|
|
94
|
+
@options.size
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
def to_h
|
|
98
|
+
{
|
|
99
|
+
total_options: @options.size,
|
|
100
|
+
comparisons: @comparisons,
|
|
101
|
+
stability: preference_stability,
|
|
102
|
+
options: @options.values.map(&:to_h)
|
|
103
|
+
}
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
private
|
|
107
|
+
|
|
108
|
+
def filtered(domain)
|
|
109
|
+
return @options.values if domain.nil?
|
|
110
|
+
|
|
111
|
+
@options.values.select { |o| o.domain == domain }
|
|
112
|
+
end
|
|
113
|
+
end
|
|
114
|
+
end
|
|
115
|
+
end
|
|
116
|
+
end
|
|
117
|
+
end
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Legion
|
|
4
|
+
module Extensions
|
|
5
|
+
module PreferenceLearning
|
|
6
|
+
module Runners
|
|
7
|
+
module PreferenceLearning
|
|
8
|
+
include Legion::Extensions::Helpers::Lex if Legion::Extensions.const_defined?(:Helpers) &&
|
|
9
|
+
Legion::Extensions::Helpers.const_defined?(:Lex)
|
|
10
|
+
|
|
11
|
+
def register_preference_option(label:, domain: :general, **)
|
|
12
|
+
result = preference_engine.register_option(label: label, domain: domain)
|
|
13
|
+
if result[:error]
|
|
14
|
+
Legion::Logging.warn "[preference_learning] register failed: #{result[:error]}"
|
|
15
|
+
else
|
|
16
|
+
Legion::Logging.debug "[preference_learning] registered option id=#{result[:id]} label=#{label} domain=#{domain}"
|
|
17
|
+
end
|
|
18
|
+
result
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
def record_preference_comparison(winner_id:, loser_id:, **)
|
|
22
|
+
result = preference_engine.record_comparison(winner_id: winner_id, loser_id: loser_id)
|
|
23
|
+
if result[:error]
|
|
24
|
+
Legion::Logging.warn "[preference_learning] comparison failed: #{result[:error]}"
|
|
25
|
+
else
|
|
26
|
+
Legion::Logging.info "[preference_learning] comparison: winner=#{winner_id} loser=#{loser_id} total=#{result[:comparisons]}"
|
|
27
|
+
end
|
|
28
|
+
result
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
def predict_preference_outcome(option_a_id:, option_b_id:, **)
|
|
32
|
+
result = preference_engine.predict_preference(option_a_id: option_a_id, option_b_id: option_b_id)
|
|
33
|
+
if result[:error]
|
|
34
|
+
Legion::Logging.warn "[preference_learning] predict failed: #{result[:error]}"
|
|
35
|
+
else
|
|
36
|
+
Legion::Logging.debug "[preference_learning] predict: preferred=#{result[:preferred_label]} confidence=#{result[:confidence].round(2)}"
|
|
37
|
+
end
|
|
38
|
+
result
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def top_preferences_report(domain: nil, limit: 5, **)
|
|
42
|
+
options = preference_engine.top_preferences(domain: domain, limit: limit)
|
|
43
|
+
Legion::Logging.debug "[preference_learning] top #{limit} preferences domain=#{domain.inspect} count=#{options.size}"
|
|
44
|
+
{ domain: domain, limit: limit, options: options }
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
def preference_stability_report(**)
|
|
48
|
+
stability = preference_engine.preference_stability
|
|
49
|
+
label = stability < 0.1 ? :stable : :variable
|
|
50
|
+
Legion::Logging.debug "[preference_learning] stability=#{stability.round(4)} label=#{label}"
|
|
51
|
+
{ stability: stability, label: label }
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
def update_preference_learning(**)
|
|
55
|
+
count = preference_engine.decay_all
|
|
56
|
+
Legion::Logging.debug "[preference_learning] decay cycle: options_updated=#{count}"
|
|
57
|
+
{ decayed: count }
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
def preference_learning_stats(**)
|
|
61
|
+
engine_hash = preference_engine.to_h
|
|
62
|
+
Legion::Logging.debug "[preference_learning] stats: total_options=#{engine_hash[:total_options]} comparisons=#{engine_hash[:comparisons]}"
|
|
63
|
+
engine_hash.merge(stability_label: preference_engine_stability_label)
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
private
|
|
67
|
+
|
|
68
|
+
def preference_engine
|
|
69
|
+
@preference_engine ||= Helpers::PreferenceEngine.new
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
def preference_engine_stability_label
|
|
73
|
+
stability = preference_engine.preference_stability
|
|
74
|
+
stability < 0.1 ? :stable : :variable
|
|
75
|
+
end
|
|
76
|
+
end
|
|
77
|
+
end
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
end
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'legion/extensions/preference_learning/version'
|
|
4
|
+
require 'legion/extensions/preference_learning/helpers/constants'
|
|
5
|
+
require 'legion/extensions/preference_learning/helpers/option'
|
|
6
|
+
require 'legion/extensions/preference_learning/helpers/preference_engine'
|
|
7
|
+
require 'legion/extensions/preference_learning/runners/preference_learning'
|
|
8
|
+
|
|
9
|
+
module Legion
|
|
10
|
+
module Extensions
|
|
11
|
+
module PreferenceLearning
|
|
12
|
+
extend Legion::Extensions::Core if Legion::Extensions.const_defined? :Core
|
|
13
|
+
end
|
|
14
|
+
end
|
|
15
|
+
end
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'legion/extensions/preference_learning/client'
|
|
4
|
+
|
|
5
|
+
RSpec.describe Legion::Extensions::PreferenceLearning::Client do
|
|
6
|
+
let(:client) { described_class.new }
|
|
7
|
+
|
|
8
|
+
it 'responds to runner methods' do
|
|
9
|
+
expect(client).to respond_to(:register_preference_option)
|
|
10
|
+
expect(client).to respond_to(:record_preference_comparison)
|
|
11
|
+
expect(client).to respond_to(:predict_preference_outcome)
|
|
12
|
+
expect(client).to respond_to(:top_preferences_report)
|
|
13
|
+
expect(client).to respond_to(:preference_stability_report)
|
|
14
|
+
expect(client).to respond_to(:update_preference_learning)
|
|
15
|
+
expect(client).to respond_to(:preference_learning_stats)
|
|
16
|
+
end
|
|
17
|
+
end
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
RSpec.describe Legion::Extensions::PreferenceLearning::Helpers::Constants do
|
|
4
|
+
describe 'PREFERENCE_LABELS' do
|
|
5
|
+
subject(:labels) { described_class::PREFERENCE_LABELS }
|
|
6
|
+
|
|
7
|
+
it 'labels 0.9 as strongly_preferred' do
|
|
8
|
+
label = labels.find { |range, _| range.cover?(0.9) }&.last
|
|
9
|
+
expect(label).to eq(:strongly_preferred)
|
|
10
|
+
end
|
|
11
|
+
|
|
12
|
+
it 'labels 0.7 as preferred' do
|
|
13
|
+
label = labels.find { |range, _| range.cover?(0.7) }&.last
|
|
14
|
+
expect(label).to eq(:preferred)
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
it 'labels 0.5 as neutral' do
|
|
18
|
+
label = labels.find { |range, _| range.cover?(0.5) }&.last
|
|
19
|
+
expect(label).to eq(:neutral)
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
it 'labels 0.3 as disliked' do
|
|
23
|
+
label = labels.find { |range, _| range.cover?(0.3) }&.last
|
|
24
|
+
expect(label).to eq(:disliked)
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
it 'labels 0.1 as strongly_disliked' do
|
|
28
|
+
label = labels.find { |range, _| range.cover?(0.1) }&.last
|
|
29
|
+
expect(label).to eq(:strongly_disliked)
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
describe 'numeric constants' do
|
|
34
|
+
it 'defines MAX_OPTIONS' do
|
|
35
|
+
expect(described_class::MAX_OPTIONS).to eq(200)
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
it 'defines MAX_COMPARISONS' do
|
|
39
|
+
expect(described_class::MAX_COMPARISONS).to eq(1000)
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
it 'defines MAX_HISTORY' do
|
|
43
|
+
expect(described_class::MAX_HISTORY).to eq(500)
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
it 'defines DEFAULT_PREFERENCE as 0.5' do
|
|
47
|
+
expect(described_class::DEFAULT_PREFERENCE).to eq(0.5)
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
it 'defines PREFERENCE_FLOOR as 0.0' do
|
|
51
|
+
expect(described_class::PREFERENCE_FLOOR).to eq(0.0)
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
it 'defines PREFERENCE_CEILING as 1.0' do
|
|
55
|
+
expect(described_class::PREFERENCE_CEILING).to eq(1.0)
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
it 'WIN_BOOST is greater than LOSS_PENALTY inverted' do
|
|
59
|
+
expect(described_class::WIN_BOOST).to eq(0.08)
|
|
60
|
+
expect(described_class::LOSS_PENALTY).to eq(0.06)
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
it 'defines DECAY_RATE' do
|
|
64
|
+
expect(described_class::DECAY_RATE).to eq(0.01)
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
end
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
RSpec.describe Legion::Extensions::PreferenceLearning::Helpers::Option do
|
|
4
|
+
subject(:option) { described_class.new(label: 'Option A', domain: :taste) }
|
|
5
|
+
|
|
6
|
+
describe '#initialize' do
|
|
7
|
+
it 'assigns a uuid id' do
|
|
8
|
+
expect(option.id).to match(/\A[0-9a-f-]{36}\z/)
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
it 'sets label and domain' do
|
|
12
|
+
expect(option.label).to eq('Option A')
|
|
13
|
+
expect(option.domain).to eq(:taste)
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
it 'starts with default preference score' do
|
|
17
|
+
expect(option.preference_score).to eq(0.5)
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
it 'starts with zero wins and losses' do
|
|
21
|
+
expect(option.wins).to eq(0)
|
|
22
|
+
expect(option.losses).to eq(0)
|
|
23
|
+
expect(option.times_seen).to eq(0)
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
it 'records created_at' do
|
|
27
|
+
expect(option.created_at).to be_a(Time)
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
describe '#win!' do
|
|
32
|
+
it 'increments wins and times_seen' do
|
|
33
|
+
option.win!
|
|
34
|
+
expect(option.wins).to eq(1)
|
|
35
|
+
expect(option.times_seen).to eq(1)
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
it 'increases preference score' do
|
|
39
|
+
before = option.preference_score
|
|
40
|
+
option.win!
|
|
41
|
+
expect(option.preference_score).to be > before
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
it 'does not exceed ceiling' do
|
|
45
|
+
20.times { option.win! }
|
|
46
|
+
expect(option.preference_score).to be <= 1.0
|
|
47
|
+
end
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
describe '#lose!' do
|
|
51
|
+
it 'increments losses and times_seen' do
|
|
52
|
+
option.lose!
|
|
53
|
+
expect(option.losses).to eq(1)
|
|
54
|
+
expect(option.times_seen).to eq(1)
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
it 'decreases preference score' do
|
|
58
|
+
before = option.preference_score
|
|
59
|
+
option.lose!
|
|
60
|
+
expect(option.preference_score).to be < before
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
it 'does not go below floor' do
|
|
64
|
+
20.times { option.lose! }
|
|
65
|
+
expect(option.preference_score).to be >= 0.0
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
describe '#win_rate' do
|
|
70
|
+
it 'returns 0.0 when no comparisons' do
|
|
71
|
+
expect(option.win_rate).to eq(0.0)
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
it 'returns a value between 0 and 1 after comparisons' do
|
|
75
|
+
option.win!
|
|
76
|
+
option.lose!
|
|
77
|
+
expect(option.win_rate).to be_between(0.0, 1.0)
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
describe '#preference_label' do
|
|
82
|
+
it 'returns :neutral for default score' do
|
|
83
|
+
expect(option.preference_label).to eq(:neutral)
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
it 'returns :strongly_preferred after many wins' do
|
|
87
|
+
15.times { option.win! }
|
|
88
|
+
expect(option.preference_label).to eq(:strongly_preferred)
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
it 'returns :strongly_disliked after many losses' do
|
|
92
|
+
15.times { option.lose! }
|
|
93
|
+
expect(option.preference_label).to eq(:strongly_disliked)
|
|
94
|
+
end
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
describe '#to_h' do
|
|
98
|
+
it 'includes all expected keys' do
|
|
99
|
+
h = option.to_h
|
|
100
|
+
expect(h.keys).to include(:id, :label, :domain, :preference_score, :wins, :losses,
|
|
101
|
+
:times_seen, :win_rate, :preference_label, :created_at)
|
|
102
|
+
end
|
|
103
|
+
end
|
|
104
|
+
end
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
RSpec.describe Legion::Extensions::PreferenceLearning::Helpers::PreferenceEngine do
|
|
4
|
+
subject(:engine) { described_class.new }
|
|
5
|
+
|
|
6
|
+
let(:opt_a_id) { engine.register_option(label: 'Apple', domain: :food)[:id] }
|
|
7
|
+
let(:opt_b_id) { engine.register_option(label: 'Banana', domain: :food)[:id] }
|
|
8
|
+
|
|
9
|
+
describe '#register_option' do
|
|
10
|
+
it 'returns a hash with id, label, domain' do
|
|
11
|
+
result = engine.register_option(label: 'Cherry', domain: :food)
|
|
12
|
+
expect(result[:id]).to match(/\A[0-9a-f-]{36}\z/)
|
|
13
|
+
expect(result[:label]).to eq('Cherry')
|
|
14
|
+
expect(result[:domain]).to eq(:food)
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
it 'starts with default preference score' do
|
|
18
|
+
result = engine.register_option(label: 'Date')
|
|
19
|
+
expect(result[:preference_score]).to eq(0.5)
|
|
20
|
+
end
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
describe '#record_comparison' do
|
|
24
|
+
it 'increases winner score and decreases loser score' do
|
|
25
|
+
engine.record_comparison(winner_id: opt_a_id, loser_id: opt_b_id)
|
|
26
|
+
top = engine.top_preferences(domain: :food, limit: 1)
|
|
27
|
+
expect(top.first[:id]).to eq(opt_a_id)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
it 'returns comparison summary' do
|
|
31
|
+
result = engine.record_comparison(winner_id: opt_a_id, loser_id: opt_b_id)
|
|
32
|
+
expect(result[:comparisons]).to eq(1)
|
|
33
|
+
expect(result[:winner_score]).to be > 0.5
|
|
34
|
+
expect(result[:loser_score]).to be < 0.5
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
it 'returns error for unknown option' do
|
|
38
|
+
result = engine.record_comparison(winner_id: 'bad-id', loser_id: opt_b_id)
|
|
39
|
+
expect(result[:error]).to eq('option not found')
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
describe '#predict_preference' do
|
|
44
|
+
it 'predicts winner after comparisons' do
|
|
45
|
+
3.times { engine.record_comparison(winner_id: opt_a_id, loser_id: opt_b_id) }
|
|
46
|
+
result = engine.predict_preference(option_a_id: opt_a_id, option_b_id: opt_b_id)
|
|
47
|
+
expect(result[:preferred_id]).to eq(opt_a_id)
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
it 'returns confidence as a 0..1 value' do
|
|
51
|
+
engine.record_comparison(winner_id: opt_a_id, loser_id: opt_b_id)
|
|
52
|
+
result = engine.predict_preference(option_a_id: opt_a_id, option_b_id: opt_b_id)
|
|
53
|
+
expect(result[:confidence]).to be_between(0.0, 1.0)
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
it 'returns error for unknown option' do
|
|
57
|
+
result = engine.predict_preference(option_a_id: 'bad-id', option_b_id: opt_b_id)
|
|
58
|
+
expect(result[:error]).to eq('option not found')
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
describe '#top_preferences' do
|
|
63
|
+
it 'returns options sorted by score descending' do
|
|
64
|
+
engine.record_comparison(winner_id: opt_a_id, loser_id: opt_b_id)
|
|
65
|
+
top = engine.top_preferences(domain: :food, limit: 2)
|
|
66
|
+
expect(top.first[:id]).to eq(opt_a_id)
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
it 'respects limit' do
|
|
70
|
+
3.times { engine.register_option(label: "opt#{it}") }
|
|
71
|
+
top = engine.top_preferences(limit: 2)
|
|
72
|
+
expect(top.size).to be <= 2
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
it 'returns all domains when domain is nil' do
|
|
76
|
+
opt_a_id
|
|
77
|
+
engine.register_option(label: 'X', domain: :color)
|
|
78
|
+
result = engine.top_preferences(domain: nil, limit: 10)
|
|
79
|
+
domains = result.map { |o| o[:domain] }.uniq
|
|
80
|
+
expect(domains).to include(:food, :color)
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
|
|
84
|
+
describe '#bottom_preferences' do
|
|
85
|
+
it 'returns options sorted by score ascending' do
|
|
86
|
+
engine.record_comparison(winner_id: opt_a_id, loser_id: opt_b_id)
|
|
87
|
+
bottom = engine.bottom_preferences(domain: :food, limit: 1)
|
|
88
|
+
expect(bottom.first[:id]).to eq(opt_b_id)
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
describe '#preferences_by_domain' do
|
|
93
|
+
it 'returns only options for given domain' do
|
|
94
|
+
fresh = described_class.new
|
|
95
|
+
fresh.register_option(label: 'Apple', domain: :food)
|
|
96
|
+
fresh.register_option(label: 'Red', domain: :color)
|
|
97
|
+
result = fresh.preferences_by_domain(domain: :food)
|
|
98
|
+
expect(result.map { |o| o[:domain] }.uniq).to eq([:food])
|
|
99
|
+
end
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
describe '#preference_stability' do
|
|
103
|
+
it 'returns 0.0 with fewer than 2 options' do
|
|
104
|
+
engine2 = described_class.new
|
|
105
|
+
engine2.register_option(label: 'Solo')
|
|
106
|
+
expect(engine2.preference_stability).to eq(0.0)
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
it 'returns a non-negative float with multiple options' do
|
|
110
|
+
opt_a_id
|
|
111
|
+
opt_b_id
|
|
112
|
+
3.times { engine.record_comparison(winner_id: opt_a_id, loser_id: opt_b_id) }
|
|
113
|
+
expect(engine.preference_stability).to be >= 0.0
|
|
114
|
+
end
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
describe '#most_compared' do
|
|
118
|
+
it 'returns options sorted by times_seen descending' do
|
|
119
|
+
3.times { engine.record_comparison(winner_id: opt_a_id, loser_id: opt_b_id) }
|
|
120
|
+
result = engine.most_compared(limit: 2)
|
|
121
|
+
expect(result.first[:times_seen]).to be >= result.last[:times_seen]
|
|
122
|
+
end
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
describe '#decay_all' do
|
|
126
|
+
it 'returns the count of options' do
|
|
127
|
+
opt_a_id
|
|
128
|
+
opt_b_id
|
|
129
|
+
expect(engine.decay_all).to eq(2)
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
it 'nudges scores toward default (0.5)' do
|
|
133
|
+
3.times { engine.record_comparison(winner_id: opt_a_id, loser_id: opt_b_id) }
|
|
134
|
+
before_a = engine.top_preferences(domain: :food, limit: 1).first[:preference_score]
|
|
135
|
+
engine.decay_all
|
|
136
|
+
after_a = engine.top_preferences(domain: :food, limit: 1).first[:preference_score]
|
|
137
|
+
expect(after_a).to be < before_a
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
|
|
141
|
+
describe '#to_h' do
|
|
142
|
+
it 'includes total_options, comparisons, stability, options' do
|
|
143
|
+
opt_a_id
|
|
144
|
+
opt_b_id
|
|
145
|
+
h = engine.to_h
|
|
146
|
+
expect(h[:total_options]).to eq(2)
|
|
147
|
+
expect(h[:comparisons]).to eq(0)
|
|
148
|
+
expect(h[:options]).to be_an(Array)
|
|
149
|
+
end
|
|
150
|
+
end
|
|
151
|
+
end
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'legion/extensions/preference_learning/client'
|
|
4
|
+
|
|
5
|
+
RSpec.describe Legion::Extensions::PreferenceLearning::Runners::PreferenceLearning do
|
|
6
|
+
let(:client) { Legion::Extensions::PreferenceLearning::Client.new }
|
|
7
|
+
|
|
8
|
+
let(:opt_a_id) { client.register_preference_option(label: 'Alpha', domain: :general)[:id] }
|
|
9
|
+
let(:opt_b_id) { client.register_preference_option(label: 'Beta', domain: :general)[:id] }
|
|
10
|
+
|
|
11
|
+
describe '#register_preference_option' do
|
|
12
|
+
it 'registers an option and returns id' do
|
|
13
|
+
result = client.register_preference_option(label: 'Gamma')
|
|
14
|
+
expect(result[:id]).not_to be_nil
|
|
15
|
+
expect(result[:label]).to eq('Gamma')
|
|
16
|
+
end
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
describe '#record_preference_comparison' do
|
|
20
|
+
it 'records winner/loser and returns comparison data' do
|
|
21
|
+
result = client.record_preference_comparison(winner_id: opt_a_id, loser_id: opt_b_id)
|
|
22
|
+
expect(result[:comparisons]).to eq(1)
|
|
23
|
+
expect(result[:winner_score]).to be > 0.5
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
it 'returns error for invalid ids' do
|
|
27
|
+
result = client.record_preference_comparison(winner_id: 'nope', loser_id: opt_b_id)
|
|
28
|
+
expect(result[:error]).to eq('option not found')
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
describe '#predict_preference_outcome' do
|
|
33
|
+
it 'returns preferred_id and confidence' do
|
|
34
|
+
3.times { client.record_preference_comparison(winner_id: opt_a_id, loser_id: opt_b_id) }
|
|
35
|
+
result = client.predict_preference_outcome(option_a_id: opt_a_id, option_b_id: opt_b_id)
|
|
36
|
+
expect(result[:preferred_id]).to eq(opt_a_id)
|
|
37
|
+
expect(result[:confidence]).to be_between(0.0, 1.0)
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
it 'returns error for invalid id' do
|
|
41
|
+
result = client.predict_preference_outcome(option_a_id: 'bad', option_b_id: opt_b_id)
|
|
42
|
+
expect(result[:error]).to eq('option not found')
|
|
43
|
+
end
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
describe '#top_preferences_report' do
|
|
47
|
+
it 'returns domain, limit, and options array' do
|
|
48
|
+
opt_a_id
|
|
49
|
+
opt_b_id
|
|
50
|
+
result = client.top_preferences_report(domain: :general, limit: 5)
|
|
51
|
+
expect(result[:domain]).to eq(:general)
|
|
52
|
+
expect(result[:limit]).to eq(5)
|
|
53
|
+
expect(result[:options]).to be_an(Array)
|
|
54
|
+
end
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
describe '#preference_stability_report' do
|
|
58
|
+
it 'returns stability float and label' do
|
|
59
|
+
opt_a_id
|
|
60
|
+
opt_b_id
|
|
61
|
+
result = client.preference_stability_report
|
|
62
|
+
expect(result[:stability]).to be_a(Float)
|
|
63
|
+
expect(%i[stable variable]).to include(result[:label])
|
|
64
|
+
end
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
describe '#update_preference_learning' do
|
|
68
|
+
it 'returns decayed count' do
|
|
69
|
+
opt_a_id
|
|
70
|
+
opt_b_id
|
|
71
|
+
result = client.update_preference_learning
|
|
72
|
+
expect(result[:decayed]).to eq(2)
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
describe '#preference_learning_stats' do
|
|
77
|
+
it 'returns total_options, comparisons, stability, stability_label' do
|
|
78
|
+
opt_a_id
|
|
79
|
+
opt_b_id
|
|
80
|
+
result = client.preference_learning_stats
|
|
81
|
+
expect(result[:total_options]).to eq(2)
|
|
82
|
+
expect(result[:comparisons]).to eq(0)
|
|
83
|
+
expect(result[:stability_label]).not_to be_nil
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
end
|
data/spec/spec_helper.rb
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'bundler/setup'
|
|
4
|
+
|
|
5
|
+
module Legion
|
|
6
|
+
module Logging
|
|
7
|
+
def self.debug(_msg); end
|
|
8
|
+
def self.info(_msg); end
|
|
9
|
+
def self.warn(_msg); end
|
|
10
|
+
def self.error(_msg); end
|
|
11
|
+
end
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
require 'legion/extensions/preference_learning'
|
|
15
|
+
|
|
16
|
+
RSpec.configure do |config|
|
|
17
|
+
config.example_status_persistence_file_path = '.rspec_status'
|
|
18
|
+
config.disable_monkey_patching!
|
|
19
|
+
config.expect_with(:rspec) { |c| c.syntax = :expect }
|
|
20
|
+
end
|
metadata
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
|
2
|
+
name: lex-preference-learning
|
|
3
|
+
version: !ruby/object:Gem::Version
|
|
4
|
+
version: 0.1.0
|
|
5
|
+
platform: ruby
|
|
6
|
+
authors:
|
|
7
|
+
- Esity
|
|
8
|
+
bindir: bin
|
|
9
|
+
cert_chain: []
|
|
10
|
+
date: 1980-01-02 00:00:00.000000000 Z
|
|
11
|
+
dependencies:
|
|
12
|
+
- !ruby/object:Gem::Dependency
|
|
13
|
+
name: legion-gaia
|
|
14
|
+
requirement: !ruby/object:Gem::Requirement
|
|
15
|
+
requirements:
|
|
16
|
+
- - ">="
|
|
17
|
+
- !ruby/object:Gem::Version
|
|
18
|
+
version: '0'
|
|
19
|
+
type: :development
|
|
20
|
+
prerelease: false
|
|
21
|
+
version_requirements: !ruby/object:Gem::Requirement
|
|
22
|
+
requirements:
|
|
23
|
+
- - ">="
|
|
24
|
+
- !ruby/object:Gem::Version
|
|
25
|
+
version: '0'
|
|
26
|
+
description: Learns and models preferences from choices and feedback for brain-modeled
|
|
27
|
+
agentic AI
|
|
28
|
+
email:
|
|
29
|
+
- matthewdiverson@gmail.com
|
|
30
|
+
executables: []
|
|
31
|
+
extensions: []
|
|
32
|
+
extra_rdoc_files: []
|
|
33
|
+
files:
|
|
34
|
+
- Gemfile
|
|
35
|
+
- lex-preference-learning.gemspec
|
|
36
|
+
- lib/legion/extensions/preference_learning.rb
|
|
37
|
+
- lib/legion/extensions/preference_learning/actors/decay.rb
|
|
38
|
+
- lib/legion/extensions/preference_learning/client.rb
|
|
39
|
+
- lib/legion/extensions/preference_learning/helpers/constants.rb
|
|
40
|
+
- lib/legion/extensions/preference_learning/helpers/option.rb
|
|
41
|
+
- lib/legion/extensions/preference_learning/helpers/preference_engine.rb
|
|
42
|
+
- lib/legion/extensions/preference_learning/runners/preference_learning.rb
|
|
43
|
+
- lib/legion/extensions/preference_learning/version.rb
|
|
44
|
+
- spec/legion/extensions/preference_learning/client_spec.rb
|
|
45
|
+
- spec/legion/extensions/preference_learning/helpers/constants_spec.rb
|
|
46
|
+
- spec/legion/extensions/preference_learning/helpers/option_spec.rb
|
|
47
|
+
- spec/legion/extensions/preference_learning/helpers/preference_engine_spec.rb
|
|
48
|
+
- spec/legion/extensions/preference_learning/runners/preference_learning_spec.rb
|
|
49
|
+
- spec/spec_helper.rb
|
|
50
|
+
homepage: https://github.com/LegionIO/lex-preference-learning
|
|
51
|
+
licenses:
|
|
52
|
+
- MIT
|
|
53
|
+
metadata:
|
|
54
|
+
homepage_uri: https://github.com/LegionIO/lex-preference-learning
|
|
55
|
+
source_code_uri: https://github.com/LegionIO/lex-preference-learning
|
|
56
|
+
documentation_uri: https://github.com/LegionIO/lex-preference-learning
|
|
57
|
+
changelog_uri: https://github.com/LegionIO/lex-preference-learning
|
|
58
|
+
bug_tracker_uri: https://github.com/LegionIO/lex-preference-learning/issues
|
|
59
|
+
rubygems_mfa_required: 'true'
|
|
60
|
+
rdoc_options: []
|
|
61
|
+
require_paths:
|
|
62
|
+
- lib
|
|
63
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
|
64
|
+
requirements:
|
|
65
|
+
- - ">="
|
|
66
|
+
- !ruby/object:Gem::Version
|
|
67
|
+
version: '3.4'
|
|
68
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
|
69
|
+
requirements:
|
|
70
|
+
- - ">="
|
|
71
|
+
- !ruby/object:Gem::Version
|
|
72
|
+
version: '0'
|
|
73
|
+
requirements: []
|
|
74
|
+
rubygems_version: 3.6.9
|
|
75
|
+
specification_version: 4
|
|
76
|
+
summary: LEX Preference Learning
|
|
77
|
+
test_files: []
|