lex-moral-reasoning 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: c500a79fb70b329c9bf3838fefa3f63b158f9c8b71c63d14284350f8003b4aa3
4
+ data.tar.gz: 5a09f5ff1d011912e60cbf414e3201f10efdaea0bc0103df2ebdf30439a903ef
5
+ SHA512:
6
+ metadata.gz: 7dda3845893fe4eb6d8ad7d34e66040cb6a273072fd9f1ca28408218ebe3dcfc028dc6781f1686b06389abb2cc5f0ce069ccda9bb2c2e29e05f03219c219f14f
7
+ data.tar.gz: 0ba52f923c31347ce52ce8f65c9cd55f49fcf948a625a51eed49a5280aedcf28167fc6ea3947d630f1ef83e0d231ed925064d83fcb48d7f77c1e666470ae86db
@@ -0,0 +1,15 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative 'runners/moral_reasoning'
4
+
5
+ module Legion
6
+ module Extensions
7
+ module MoralReasoning
8
+ class Client
9
+ include Runners::MoralReasoning
10
+
11
+ def initialize(**); end
12
+ end
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,45 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Legion
4
+ module Extensions
5
+ module MoralReasoning
6
+ module Helpers
7
+ module Constants
8
+ MAX_DILEMMAS = 100
9
+ MAX_PRINCIPLES = 50
10
+ MAX_HISTORY = 300
11
+
12
+ DEFAULT_WEIGHT = 0.5
13
+ WEIGHT_FLOOR = 0.1
14
+ WEIGHT_CEILING = 1.0
15
+ REINFORCEMENT_RATE = 0.1
16
+ DECAY_RATE = 0.01
17
+
18
+ # Haidt's 6 Moral Foundations
19
+ MORAL_FOUNDATIONS = %i[care fairness loyalty authority sanctity liberty].freeze
20
+
21
+ # Kohlberg's 6 Stages (grouped into 3 levels)
22
+ KOHLBERG_STAGES = %i[obedience self_interest conformity law_and_order social_contract universal_ethics].freeze
23
+
24
+ KOHLBERG_LEVELS = {
25
+ preconventional: %i[obedience self_interest],
26
+ conventional: %i[conformity law_and_order],
27
+ postconventional: %i[social_contract universal_ethics]
28
+ }.freeze
29
+
30
+ # Ethical frameworks for dilemma resolution
31
+ ETHICAL_FRAMEWORKS = %i[utilitarian deontological virtue care justice rights].freeze
32
+
33
+ # Dilemma severity labels keyed by endless/beginless ranges
34
+ SEVERITY_LABELS = {
35
+ (0.8..) => :critical,
36
+ (0.6...0.8) => :serious,
37
+ (0.4...0.6) => :moderate,
38
+ (0.2...0.4) => :minor,
39
+ (..0.2) => :trivial
40
+ }.freeze
41
+ end
42
+ end
43
+ end
44
+ end
45
+ end
@@ -0,0 +1,64 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Legion
4
+ module Extensions
5
+ module MoralReasoning
6
+ module Helpers
7
+ class Dilemma
8
+ include Constants
9
+
10
+ attr_reader :id, :description, :domain, :severity, :options,
11
+ :chosen_option, :reasoning, :framework_used,
12
+ :created_at, :resolved_at
13
+
14
+ def initialize(id:, description:, options:, domain: :general, severity: 0.5)
15
+ @id = id
16
+ @description = description
17
+ @domain = domain
18
+ @severity = severity.clamp(0.0, 1.0)
19
+ @options = options
20
+ @chosen_option = nil
21
+ @reasoning = nil
22
+ @framework_used = nil
23
+ @resolved = false
24
+ @created_at = Time.now.utc
25
+ @resolved_at = nil
26
+ end
27
+
28
+ def severity_label
29
+ SEVERITY_LABELS.find { |range, _| range.cover?(@severity) }&.last
30
+ end
31
+
32
+ def resolve(option_id:, reasoning:, framework:)
33
+ @chosen_option = option_id
34
+ @reasoning = reasoning
35
+ @framework_used = framework
36
+ @resolved = true
37
+ @resolved_at = Time.now.utc
38
+ end
39
+
40
+ def resolved?
41
+ @resolved
42
+ end
43
+
44
+ def to_h
45
+ {
46
+ id: @id,
47
+ description: @description,
48
+ domain: @domain,
49
+ severity: @severity,
50
+ severity_label: severity_label,
51
+ options: @options,
52
+ chosen_option: @chosen_option,
53
+ reasoning: @reasoning,
54
+ framework_used: @framework_used,
55
+ resolved: @resolved,
56
+ created_at: @created_at,
57
+ resolved_at: @resolved_at
58
+ }
59
+ end
60
+ end
61
+ end
62
+ end
63
+ end
64
+ end
@@ -0,0 +1,136 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Legion
4
+ module Extensions
5
+ module MoralReasoning
6
+ module Helpers
7
+ module LlmEnhancer
8
+ SYSTEM_PROMPT = <<~PROMPT
9
+ You are the moral reasoning engine for an autonomous AI agent built on LegionIO.
10
+ You apply ethical frameworks to evaluate actions and resolve dilemmas.
11
+ Be rigorous, analytical, and fair. Consider multiple perspectives.
12
+ Output structured reasoning, not opinions. Be concise.
13
+ PROMPT
14
+
15
+ module_function
16
+
17
+ def available?
18
+ !!(defined?(Legion::LLM) && Legion::LLM.respond_to?(:started?) && Legion::LLM.started?)
19
+ rescue StandardError
20
+ false
21
+ end
22
+
23
+ def evaluate_action(action:, description:, foundations:)
24
+ response = llm_ask(build_evaluate_action_prompt(action: action, description: description,
25
+ foundations: foundations))
26
+ parse_evaluate_action_response(response)
27
+ rescue StandardError => e
28
+ Legion::Logging.warn "[moral_reasoning:llm] evaluate_action failed: #{e.message}"
29
+ nil
30
+ end
31
+
32
+ def resolve_dilemma(dilemma_description:, options:, framework:)
33
+ response = llm_ask(build_resolve_dilemma_prompt(dilemma_description: dilemma_description,
34
+ options: options, framework: framework))
35
+ parse_resolve_dilemma_response(response)
36
+ rescue StandardError => e
37
+ Legion::Logging.warn "[moral_reasoning:llm] resolve_dilemma failed: #{e.message}"
38
+ nil
39
+ end
40
+
41
+ def llm_ask(prompt)
42
+ chat = Legion::LLM.chat
43
+ chat.with_instructions(SYSTEM_PROMPT)
44
+ chat.ask(prompt)
45
+ end
46
+ private_class_method :llm_ask
47
+
48
+ def build_evaluate_action_prompt(action:, description:, foundations:)
49
+ foundation_lines = foundations.map { |name, strength| " #{name}: #{strength.round(3)}" }.join("\n")
50
+ desc = description.to_s.empty? ? 'no description' : description
51
+ <<~PROMPT
52
+ Evaluate this action using moral foundations theory.
53
+
54
+ ACTION: #{action}
55
+ DESCRIPTION: #{desc}
56
+
57
+ Current foundation strengths:
58
+ #{foundation_lines}
59
+
60
+ For each foundation, estimate the moral impact of this action (-1.0 to 1.0).
61
+ Negative = harmful to that foundation, Positive = reinforces that foundation.
62
+
63
+ Format EXACTLY as:
64
+ REASONING: <1-2 paragraph analysis>
65
+ IMPACT: care=<float> | fairness=<float> | loyalty=<float> | authority=<float> | sanctity=<float> | liberty=<float>
66
+ PROMPT
67
+ end
68
+ private_class_method :build_evaluate_action_prompt
69
+
70
+ def parse_evaluate_action_response(response)
71
+ return nil unless response&.content
72
+
73
+ text = response.content
74
+ reasoning_match = text.match(/REASONING:\s*(.+?)(?=\nIMPACT:|\z)/im)
75
+ impact_match = text.match(/IMPACT:\s*(.+)/i)
76
+ return nil unless reasoning_match && impact_match
77
+
78
+ foundation_impacts = parse_impact_string(impact_match.captures.first.strip)
79
+ return nil if foundation_impacts.empty?
80
+
81
+ { reasoning: reasoning_match.captures.first.strip, foundation_impacts: foundation_impacts }
82
+ end
83
+ private_class_method :parse_evaluate_action_response
84
+
85
+ def parse_impact_string(impact_str)
86
+ impact_str.split('|').each_with_object({}) do |pair, hash|
87
+ key, val = pair.strip.split('=')
88
+ hash[key.strip.to_sym] = val.strip.to_f.clamp(-1.0, 1.0) if key && val
89
+ end
90
+ end
91
+ private_class_method :parse_impact_string
92
+
93
+ def build_resolve_dilemma_prompt(dilemma_description:, options:, framework:)
94
+ option_lines = options.map do |opt|
95
+ foundations_str = opt.fetch(:foundations, []).join(', ')
96
+ "- #{opt[:id] || opt[:action]}: #{opt[:description]} (foundations: #{foundations_str})"
97
+ end.join("\n")
98
+ <<~PROMPT
99
+ Resolve this moral dilemma using the #{framework} ethical framework.
100
+
101
+ DILEMMA: #{dilemma_description}
102
+
103
+ OPTIONS:
104
+ #{option_lines}
105
+
106
+ Apply #{framework} reasoning to select the best option.
107
+
108
+ Format EXACTLY as:
109
+ CHOSEN: <option label>
110
+ CONFIDENCE: <0.0-1.0>
111
+ REASONING: <1-2 paragraph justification using the specified framework>
112
+ PROMPT
113
+ end
114
+ private_class_method :build_resolve_dilemma_prompt
115
+
116
+ def parse_resolve_dilemma_response(response)
117
+ return nil unless response&.content
118
+
119
+ text = response.content
120
+ chosen_match = text.match(/CHOSEN:\s*(.+)/i)
121
+ confidence_match = text.match(/CONFIDENCE:\s*([\d.]+)/i)
122
+ reasoning_match = text.match(/REASONING:\s*(.+)/im)
123
+ return nil unless chosen_match && confidence_match && reasoning_match
124
+
125
+ {
126
+ chosen_option: chosen_match.captures.first.strip,
127
+ confidence: confidence_match.captures.first.strip.to_f.clamp(0.0, 1.0),
128
+ reasoning: reasoning_match.captures.first.strip
129
+ }
130
+ end
131
+ private_class_method :parse_resolve_dilemma_response
132
+ end
133
+ end
134
+ end
135
+ end
136
+ end
@@ -0,0 +1,235 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Legion
4
+ module Extensions
5
+ module MoralReasoning
6
+ module Helpers
7
+ module FrameworkEvaluators
8
+ CARE_FOUNDATIONS = %i[care fairness sanctity].freeze
9
+ DUTY_FOUNDATIONS = %i[authority loyalty].freeze
10
+ JUSTICE_FOUNDATIONS = %i[fairness liberty].freeze
11
+ RIGHTS_FOUNDATIONS = %i[liberty fairness authority].freeze
12
+
13
+ private
14
+
15
+ def evaluate_by_utility(dilemma)
16
+ rank_options(dilemma) do |option|
17
+ option.fetch(:foundations, []).sum { |fid| @foundations[fid]&.weight.to_f }
18
+ end
19
+ end
20
+
21
+ def evaluate_by_duty(dilemma)
22
+ rank_options(dilemma) do |option|
23
+ option.fetch(:foundations, []).count { |fid| DUTY_FOUNDATIONS.include?(fid) }.to_f
24
+ end
25
+ end
26
+
27
+ def evaluate_by_virtue(dilemma)
28
+ rank_options(dilemma) do |option|
29
+ option.fetch(:foundations, []).count { |fid| CARE_FOUNDATIONS.include?(fid) }.to_f
30
+ end
31
+ end
32
+
33
+ def evaluate_by_care(dilemma)
34
+ rank_options(dilemma) do |option|
35
+ option.fetch(:foundations, []).count { |fid| fid == :care }.to_f
36
+ end
37
+ end
38
+
39
+ def evaluate_by_justice(dilemma)
40
+ rank_options(dilemma) do |option|
41
+ option.fetch(:foundations, []).count { |fid| JUSTICE_FOUNDATIONS.include?(fid) }.to_f
42
+ end
43
+ end
44
+
45
+ def evaluate_by_rights(dilemma)
46
+ rank_options(dilemma) do |option|
47
+ option.fetch(:foundations, []).count { |fid| RIGHTS_FOUNDATIONS.include?(fid) }.to_f
48
+ end
49
+ end
50
+
51
+ def rank_options(dilemma)
52
+ scored = dilemma.options.map do |option|
53
+ score = yield(option)
54
+ { id: option[:id], description: option[:description], score: score }
55
+ end
56
+ scored.sort_by { |r| -r[:score] }
57
+ end
58
+ end
59
+
60
+ class MoralEngine
61
+ include Constants
62
+ include FrameworkEvaluators
63
+
64
+ KOHLBERG_STAGE_DESCRIPTIONS = {
65
+ obedience: 'Avoid punishment; obey authority unconditionally',
66
+ self_interest: 'Act for direct reward; reciprocal exchange',
67
+ conformity: 'Conform to social norms; be a good person',
68
+ law_and_order: 'Follow rules, laws, and authority to maintain social order',
69
+ social_contract: 'Uphold democratic principles; greatest good for greatest number',
70
+ universal_ethics: 'Follow self-chosen universal ethical principles'
71
+ }.freeze
72
+
73
+ FRAMEWORK_STRATEGIES = {
74
+ utilitarian: :evaluate_by_utility,
75
+ deontological: :evaluate_by_duty,
76
+ virtue: :evaluate_by_virtue,
77
+ care: :evaluate_by_care,
78
+ justice: :evaluate_by_justice,
79
+ rights: :evaluate_by_rights
80
+ }.freeze
81
+
82
+ attr_reader :stage, :dilemmas, :principles, :history
83
+
84
+ def initialize
85
+ @foundations = MORAL_FOUNDATIONS.to_h { |f| [f, MoralFoundation.new(id: f)] }
86
+ @principles = []
87
+ @dilemmas = {}
88
+ @stage = :social_contract
89
+ @history = []
90
+ end
91
+
92
+ def evaluate_action(action:, affected_foundations:, domain: :general)
93
+ score = score_foundations(affected_foundations)
94
+ normalized = affected_foundations.empty? ? 0.0 : score / affected_foundations.size
95
+ add_history(type: :evaluation, action: action, domain: domain, score: normalized)
96
+ { action: action, domain: domain, score: normalized, foundations: affected_foundations }
97
+ end
98
+
99
+ def pose_dilemma(description:, options:, domain: :general, severity: 0.5)
100
+ return { success: false, reason: :max_dilemmas_reached } if @dilemmas.size >= MAX_DILEMMAS
101
+
102
+ id = generate_id('dilemma')
103
+ dilemma = Dilemma.new(id: id, description: description, options: options,
104
+ domain: domain, severity: severity)
105
+ @dilemmas[id] = dilemma
106
+ { success: true, dilemma: dilemma.to_h }
107
+ end
108
+
109
+ def resolve_dilemma(dilemma_id:, option_id:, reasoning:, framework:)
110
+ dilemma = @dilemmas[dilemma_id]
111
+ return { success: false, reason: :not_found } unless dilemma
112
+ return { success: false, reason: :already_resolved } if dilemma.resolved?
113
+
114
+ chosen = dilemma.options.find { |o| o[:id] == option_id }
115
+ return { success: false, reason: :invalid_option } unless chosen
116
+
117
+ dilemma.resolve(option_id: option_id, reasoning: reasoning, framework: framework)
118
+ reinforce_chosen_foundations(chosen)
119
+ weaken_unchosen_foundations(dilemma.options, option_id)
120
+ add_history(type: :resolution, dilemma_id: dilemma_id, option_id: option_id,
121
+ framework: framework, severity: dilemma.severity)
122
+ { success: true, dilemma: dilemma.to_h }
123
+ end
124
+
125
+ def apply_framework(dilemma_id:, framework:)
126
+ dilemma = @dilemmas[dilemma_id]
127
+ return { success: false, reason: :not_found } unless dilemma
128
+ return { success: false, reason: :unknown_framework } unless ETHICAL_FRAMEWORKS.include?(framework)
129
+
130
+ strategy = FRAMEWORK_STRATEGIES.fetch(framework)
131
+ rankings = send(strategy, dilemma)
132
+ { success: true, dilemma_id: dilemma_id, framework: framework, rankings: rankings }
133
+ end
134
+
135
+ def add_principle(name:, description:, foundation:, weight: DEFAULT_WEIGHT)
136
+ return { success: false, reason: :max_principles_reached } if @principles.size >= MAX_PRINCIPLES
137
+ return { success: false, reason: :unknown_foundation } unless MORAL_FOUNDATIONS.include?(foundation)
138
+
139
+ principle = {
140
+ id: generate_id('principle'),
141
+ name: name,
142
+ description: description,
143
+ foundation: foundation,
144
+ weight: weight.clamp(WEIGHT_FLOOR, WEIGHT_CEILING),
145
+ created_at: Time.now.utc
146
+ }
147
+ @principles << principle
148
+ { success: true, principle: principle }
149
+ end
150
+
151
+ def moral_development
152
+ resolved = resolved_dilemmas
153
+ return { advanced: false, stage: @stage, reason: :insufficient_resolutions } if resolved.size < 3
154
+
155
+ avg_severity = resolved.sum(&:severity) / resolved.size.to_f
156
+ complexity_met = avg_severity >= 0.4 && resolved.size >= 5
157
+ current_idx = KOHLBERG_STAGES.index(@stage)
158
+
159
+ if complexity_met && current_idx < KOHLBERG_STAGES.size - 1
160
+ @stage = KOHLBERG_STAGES[current_idx + 1]
161
+ { advanced: true, stage: @stage, previous_stage: KOHLBERG_STAGES[current_idx] }
162
+ else
163
+ { advanced: false, stage: @stage, reason: :complexity_threshold_not_met }
164
+ end
165
+ end
166
+
167
+ def foundation_profile
168
+ @foundations.transform_values(&:to_h)
169
+ end
170
+
171
+ def stage_info
172
+ level = KOHLBERG_LEVELS.find { |_, stages| stages.include?(@stage) }&.first
173
+ { stage: @stage, level: level, description: KOHLBERG_STAGE_DESCRIPTIONS.fetch(@stage, 'Unknown') }
174
+ end
175
+
176
+ def unresolved_dilemmas
177
+ @dilemmas.values.reject(&:resolved?)
178
+ end
179
+
180
+ def resolved_dilemmas
181
+ @dilemmas.values.select(&:resolved?)
182
+ end
183
+
184
+ def decay_all
185
+ @foundations.each_value(&:decay)
186
+ end
187
+
188
+ def to_h
189
+ {
190
+ stage: @stage,
191
+ total_dilemmas: @dilemmas.size,
192
+ resolved_dilemmas: resolved_dilemmas.size,
193
+ unresolved_dilemmas: unresolved_dilemmas.size,
194
+ principles: @principles.size,
195
+ history_entries: @history.size,
196
+ foundation_profile: foundation_profile
197
+ }
198
+ end
199
+
200
+ private
201
+
202
+ def score_foundations(affected_foundations)
203
+ affected_foundations.sum do |fid|
204
+ foundation = @foundations.fetch(fid, nil)
205
+ next 0.0 unless foundation
206
+
207
+ foundation.weight * foundation.sensitivity
208
+ end
209
+ end
210
+
211
+ def reinforce_chosen_foundations(chosen_option)
212
+ chosen_option.fetch(:foundations, []).each do |fid|
213
+ @foundations[fid]&.reinforce(amount: chosen_option.fetch(:severity, 1.0))
214
+ end
215
+ end
216
+
217
+ def weaken_unchosen_foundations(options, chosen_id)
218
+ options.reject { |o| o[:id] == chosen_id }.each do |option|
219
+ option.fetch(:foundations, []).each { |fid| @foundations[fid]&.weaken(amount: 0.5) }
220
+ end
221
+ end
222
+
223
+ def add_history(entry)
224
+ @history << entry.merge(timestamp: Time.now.utc)
225
+ @history.shift if @history.size > MAX_HISTORY
226
+ end
227
+
228
+ def generate_id(prefix)
229
+ "#{prefix}_#{Time.now.utc.to_f}_#{rand(1000)}"
230
+ end
231
+ end
232
+ end
233
+ end
234
+ end
235
+ end
@@ -0,0 +1,41 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Legion
4
+ module Extensions
5
+ module MoralReasoning
6
+ module Helpers
7
+ class MoralFoundation
8
+ include Constants
9
+
10
+ attr_reader :id, :weight, :sensitivity
11
+
12
+ def initialize(id:, weight: DEFAULT_WEIGHT, sensitivity: DEFAULT_WEIGHT)
13
+ @id = id
14
+ @weight = weight.clamp(WEIGHT_FLOOR, WEIGHT_CEILING)
15
+ @sensitivity = sensitivity.clamp(0.0, 1.0)
16
+ end
17
+
18
+ def reinforce(amount: 1.0)
19
+ @weight = (@weight + (amount * REINFORCEMENT_RATE)).clamp(WEIGHT_FLOOR, WEIGHT_CEILING)
20
+ end
21
+
22
+ def weaken(amount: 1.0)
23
+ @weight = (@weight - (amount * REINFORCEMENT_RATE)).clamp(WEIGHT_FLOOR, WEIGHT_CEILING)
24
+ end
25
+
26
+ def decay
27
+ @weight = (@weight - DECAY_RATE).clamp(WEIGHT_FLOOR, WEIGHT_CEILING)
28
+ end
29
+
30
+ def to_h
31
+ {
32
+ id: @id,
33
+ weight: @weight,
34
+ sensitivity: @sensitivity
35
+ }
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
41
+ end
@@ -0,0 +1,117 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Legion
4
+ module Extensions
5
+ module MoralReasoning
6
+ module Runners
7
+ module MoralReasoning
8
+ include Legion::Extensions::Helpers::Lex if Legion::Extensions.const_defined?(:Helpers) &&
9
+ Legion::Extensions::Helpers.const_defined?(:Lex)
10
+
11
+ def evaluate_moral_action(action:, affected_foundations:, domain: :general, description: nil, **)
12
+ Legion::Logging.debug "[moral_reasoning] evaluate_action: action=#{action} domain=#{domain}"
13
+
14
+ if Helpers::LlmEnhancer.available?
15
+ current_foundations = engine.foundation_profile.transform_values { |f| f[:weight] }
16
+ llm_result = Helpers::LlmEnhancer.evaluate_action(
17
+ action: action,
18
+ description: description.to_s,
19
+ foundations: current_foundations
20
+ )
21
+ if llm_result
22
+ Legion::Logging.debug "[moral_reasoning] using LLM evaluation for action=#{action}"
23
+ result = engine.evaluate_action(
24
+ action: action,
25
+ affected_foundations: affected_foundations,
26
+ domain: domain
27
+ )
28
+ return { success: true, source: :llm, reasoning: llm_result[:reasoning],
29
+ foundation_impacts: llm_result[:foundation_impacts] }.merge(result)
30
+ end
31
+ end
32
+
33
+ result = engine.evaluate_action(action: action, affected_foundations: affected_foundations, domain: domain)
34
+ { success: true, source: :mechanical }.merge(result)
35
+ end
36
+
37
+ def pose_moral_dilemma(description:, options:, domain: :general, severity: 0.5, **)
38
+ Legion::Logging.info "[moral_reasoning] pose_dilemma: domain=#{domain} severity=#{severity}"
39
+ engine.pose_dilemma(description: description, options: options, domain: domain, severity: severity)
40
+ end
41
+
42
+ def resolve_moral_dilemma(dilemma_id:, option_id:, reasoning:, framework:, **)
43
+ Legion::Logging.info "[moral_reasoning] resolve_dilemma: id=#{dilemma_id} framework=#{framework}"
44
+
45
+ if Helpers::LlmEnhancer.available?
46
+ dilemma = engine.dilemmas[dilemma_id]
47
+ if dilemma && !dilemma.resolved?
48
+ llm_result = Helpers::LlmEnhancer.resolve_dilemma(
49
+ dilemma_description: dilemma.description,
50
+ options: dilemma.options,
51
+ framework: framework
52
+ )
53
+ if llm_result
54
+ Legion::Logging.debug "[moral_reasoning] using LLM resolution for dilemma=#{dilemma_id}"
55
+ result = engine.resolve_dilemma(
56
+ dilemma_id: dilemma_id,
57
+ option_id: option_id,
58
+ reasoning: llm_result[:reasoning],
59
+ framework: framework
60
+ )
61
+ return result.merge(source: :llm, llm_chosen: llm_result[:chosen_option],
62
+ llm_confidence: llm_result[:confidence])
63
+ end
64
+ end
65
+ end
66
+
67
+ engine.resolve_dilemma(dilemma_id: dilemma_id, option_id: option_id,
68
+ reasoning: reasoning, framework: framework)
69
+ end
70
+
71
+ def apply_ethical_framework(dilemma_id:, framework:, **)
72
+ Legion::Logging.debug "[moral_reasoning] apply_framework: id=#{dilemma_id} framework=#{framework}"
73
+ engine.apply_framework(dilemma_id: dilemma_id, framework: framework)
74
+ end
75
+
76
+ def add_moral_principle(name:, description:, foundation:, weight: Helpers::Constants::DEFAULT_WEIGHT, **)
77
+ Legion::Logging.info "[moral_reasoning] add_principle: name=#{name} foundation=#{foundation}"
78
+ engine.add_principle(name: name, description: description, foundation: foundation, weight: weight)
79
+ end
80
+
81
+ def check_moral_development(**)
82
+ Legion::Logging.debug '[moral_reasoning] check_moral_development'
83
+ result = engine.moral_development
84
+ { success: true }.merge(result)
85
+ end
86
+
87
+ def moral_foundation_profile(**)
88
+ Legion::Logging.debug '[moral_reasoning] foundation_profile'
89
+ { success: true, foundations: engine.foundation_profile }
90
+ end
91
+
92
+ def moral_stage_info(**)
93
+ Legion::Logging.debug '[moral_reasoning] stage_info'
94
+ { success: true }.merge(engine.stage_info)
95
+ end
96
+
97
+ def update_moral_reasoning(**)
98
+ Legion::Logging.debug '[moral_reasoning] decay_all'
99
+ engine.decay_all
100
+ { success: true, foundations: engine.foundation_profile }
101
+ end
102
+
103
+ def moral_reasoning_stats(**)
104
+ Legion::Logging.debug '[moral_reasoning] stats'
105
+ { success: true }.merge(engine.to_h)
106
+ end
107
+
108
+ private
109
+
110
+ def engine
111
+ @engine ||= Helpers::MoralEngine.new
112
+ end
113
+ end
114
+ end
115
+ end
116
+ end
117
+ end
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Legion
4
+ module Extensions
5
+ module MoralReasoning
6
+ VERSION = '0.1.1'
7
+ end
8
+ end
9
+ end
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative 'moral_reasoning/version'
4
+ require_relative 'moral_reasoning/helpers/constants'
5
+ require_relative 'moral_reasoning/helpers/moral_foundation'
6
+ require_relative 'moral_reasoning/helpers/dilemma'
7
+ require_relative 'moral_reasoning/helpers/moral_engine'
8
+ require_relative 'moral_reasoning/helpers/llm_enhancer'
9
+ require_relative 'moral_reasoning/runners/moral_reasoning'
10
+ require_relative 'moral_reasoning/client'
11
+
12
+ module Legion
13
+ module Extensions
14
+ module MoralReasoning
15
+ extend Legion::Extensions::Core if defined?(Legion::Extensions::Core)
16
+ end
17
+ end
18
+ end
metadata ADDED
@@ -0,0 +1,68 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: lex-moral-reasoning
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.1
5
+ platform: ruby
6
+ authors:
7
+ - Esity
8
+ bindir: bin
9
+ cert_chain: []
10
+ date: 1980-01-02 00:00:00.000000000 Z
11
+ dependencies:
12
+ - !ruby/object:Gem::Dependency
13
+ name: legion-gaia
14
+ requirement: !ruby/object:Gem::Requirement
15
+ requirements:
16
+ - - ">="
17
+ - !ruby/object:Gem::Version
18
+ version: '0'
19
+ type: :development
20
+ prerelease: false
21
+ version_requirements: !ruby/object:Gem::Requirement
22
+ requirements:
23
+ - - ">="
24
+ - !ruby/object:Gem::Version
25
+ version: '0'
26
+ description: Moral reasoning for LegionIO — Kohlberg stages, Haidt moral foundations,
27
+ ethical framework evaluation, and dilemma resolution
28
+ email:
29
+ - matthewdiverson@gmail.com
30
+ executables: []
31
+ extensions: []
32
+ extra_rdoc_files: []
33
+ files:
34
+ - lib/legion/extensions/moral_reasoning.rb
35
+ - lib/legion/extensions/moral_reasoning/client.rb
36
+ - lib/legion/extensions/moral_reasoning/helpers/constants.rb
37
+ - lib/legion/extensions/moral_reasoning/helpers/dilemma.rb
38
+ - lib/legion/extensions/moral_reasoning/helpers/llm_enhancer.rb
39
+ - lib/legion/extensions/moral_reasoning/helpers/moral_engine.rb
40
+ - lib/legion/extensions/moral_reasoning/helpers/moral_foundation.rb
41
+ - lib/legion/extensions/moral_reasoning/runners/moral_reasoning.rb
42
+ - lib/legion/extensions/moral_reasoning/version.rb
43
+ homepage: https://github.com/LegionIO/lex-moral-reasoning
44
+ licenses:
45
+ - MIT
46
+ metadata:
47
+ homepage_uri: https://github.com/LegionIO/lex-moral-reasoning
48
+ source_code_uri: https://github.com/LegionIO/lex-moral-reasoning
49
+ changelog_uri: https://github.com/LegionIO/lex-moral-reasoning/blob/master/CHANGELOG.md
50
+ rubygems_mfa_required: 'true'
51
+ rdoc_options: []
52
+ require_paths:
53
+ - lib
54
+ required_ruby_version: !ruby/object:Gem::Requirement
55
+ requirements:
56
+ - - ">="
57
+ - !ruby/object:Gem::Version
58
+ version: '3.4'
59
+ required_rubygems_version: !ruby/object:Gem::Requirement
60
+ requirements:
61
+ - - ">="
62
+ - !ruby/object:Gem::Version
63
+ version: '0'
64
+ requirements: []
65
+ rubygems_version: 3.6.9
66
+ specification_version: 4
67
+ summary: LegionIO moral reasoning extension
68
+ test_files: []