lex-agentic-affect 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +12 -0
- data/Gemfile +5 -0
- data/LICENSE +21 -0
- data/README.md +13 -0
- data/lex-agentic-affect.gemspec +30 -0
- data/lib/legion/extensions/agentic/affect/appraisal/client.rb +20 -0
- data/lib/legion/extensions/agentic/affect/appraisal/helpers/appraisal.rb +112 -0
- data/lib/legion/extensions/agentic/affect/appraisal/helpers/appraisal_engine.rb +129 -0
- data/lib/legion/extensions/agentic/affect/appraisal/helpers/constants.rb +43 -0
- data/lib/legion/extensions/agentic/affect/appraisal/runners/appraisal.rb +105 -0
- data/lib/legion/extensions/agentic/affect/appraisal/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/appraisal.rb +19 -0
- data/lib/legion/extensions/agentic/affect/cognitive_empathy/client.rb +19 -0
- data/lib/legion/extensions/agentic/affect/cognitive_empathy/helpers/constants.rb +37 -0
- data/lib/legion/extensions/agentic/affect/cognitive_empathy/helpers/empathy_engine.rb +151 -0
- data/lib/legion/extensions/agentic/affect/cognitive_empathy/helpers/perspective.rb +92 -0
- data/lib/legion/extensions/agentic/affect/cognitive_empathy/runners/cognitive_empathy.rb +93 -0
- data/lib/legion/extensions/agentic/affect/cognitive_empathy/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/cognitive_empathy.rb +20 -0
- data/lib/legion/extensions/agentic/affect/contagion/client.rb +28 -0
- data/lib/legion/extensions/agentic/affect/contagion/helpers/constants.rb +34 -0
- data/lib/legion/extensions/agentic/affect/contagion/helpers/contagion_engine.rb +184 -0
- data/lib/legion/extensions/agentic/affect/contagion/helpers/meme.rb +97 -0
- data/lib/legion/extensions/agentic/affect/contagion/runners/cognitive_contagion.rb +125 -0
- data/lib/legion/extensions/agentic/affect/contagion/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/contagion.rb +19 -0
- data/lib/legion/extensions/agentic/affect/defusion/client.rb +28 -0
- data/lib/legion/extensions/agentic/affect/defusion/helpers/constants.rb +64 -0
- data/lib/legion/extensions/agentic/affect/defusion/helpers/defusion_engine.rb +167 -0
- data/lib/legion/extensions/agentic/affect/defusion/helpers/thought.rb +92 -0
- data/lib/legion/extensions/agentic/affect/defusion/runners/cognitive_defusion.rb +127 -0
- data/lib/legion/extensions/agentic/affect/defusion/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/defusion.rb +19 -0
- data/lib/legion/extensions/agentic/affect/emotion/actors/momentum_decay.rb +45 -0
- data/lib/legion/extensions/agentic/affect/emotion/client.rb +36 -0
- data/lib/legion/extensions/agentic/affect/emotion/helpers/baseline.rb +52 -0
- data/lib/legion/extensions/agentic/affect/emotion/helpers/momentum.rb +52 -0
- data/lib/legion/extensions/agentic/affect/emotion/helpers/valence.rb +92 -0
- data/lib/legion/extensions/agentic/affect/emotion/runners/gut.rb +102 -0
- data/lib/legion/extensions/agentic/affect/emotion/runners/valence.rb +120 -0
- data/lib/legion/extensions/agentic/affect/emotion/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/emotion.rb +20 -0
- data/lib/legion/extensions/agentic/affect/empathy/client.rb +21 -0
- data/lib/legion/extensions/agentic/affect/empathy/helpers/constants.rb +54 -0
- data/lib/legion/extensions/agentic/affect/empathy/helpers/mental_model.rb +185 -0
- data/lib/legion/extensions/agentic/affect/empathy/helpers/model_store.rb +88 -0
- data/lib/legion/extensions/agentic/affect/empathy/runners/empathy.rb +173 -0
- data/lib/legion/extensions/agentic/affect/empathy/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/empathy.rb +20 -0
- data/lib/legion/extensions/agentic/affect/fatigue/client.rb +26 -0
- data/lib/legion/extensions/agentic/affect/fatigue/helpers/constants.rb +54 -0
- data/lib/legion/extensions/agentic/affect/fatigue/helpers/energy_model.rb +181 -0
- data/lib/legion/extensions/agentic/affect/fatigue/helpers/fatigue_store.rb +146 -0
- data/lib/legion/extensions/agentic/affect/fatigue/runners/fatigue.rb +89 -0
- data/lib/legion/extensions/agentic/affect/fatigue/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/fatigue.rb +19 -0
- data/lib/legion/extensions/agentic/affect/flow/client.rb +25 -0
- data/lib/legion/extensions/agentic/affect/flow/helpers/constants.rb +84 -0
- data/lib/legion/extensions/agentic/affect/flow/helpers/flow_detector.rb +166 -0
- data/lib/legion/extensions/agentic/affect/flow/runners/flow.rb +129 -0
- data/lib/legion/extensions/agentic/affect/flow/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/flow.rb +18 -0
- data/lib/legion/extensions/agentic/affect/interoception/actors/decay.rb +45 -0
- data/lib/legion/extensions/agentic/affect/interoception/client.rb +28 -0
- data/lib/legion/extensions/agentic/affect/interoception/helpers/body_budget.rb +152 -0
- data/lib/legion/extensions/agentic/affect/interoception/helpers/constants.rb +68 -0
- data/lib/legion/extensions/agentic/affect/interoception/helpers/somatic_marker.rb +75 -0
- data/lib/legion/extensions/agentic/affect/interoception/runners/interoception.rb +101 -0
- data/lib/legion/extensions/agentic/affect/interoception/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/interoception.rb +20 -0
- data/lib/legion/extensions/agentic/affect/mood/client.rb +21 -0
- data/lib/legion/extensions/agentic/affect/mood/helpers/constants.rb +78 -0
- data/lib/legion/extensions/agentic/affect/mood/helpers/mood_state.rb +154 -0
- data/lib/legion/extensions/agentic/affect/mood/runners/mood.rb +122 -0
- data/lib/legion/extensions/agentic/affect/mood/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/mood.rb +18 -0
- data/lib/legion/extensions/agentic/affect/motivation/client.rb +26 -0
- data/lib/legion/extensions/agentic/affect/motivation/helpers/constants.rb +48 -0
- data/lib/legion/extensions/agentic/affect/motivation/helpers/drive_state.rb +98 -0
- data/lib/legion/extensions/agentic/affect/motivation/helpers/motivation_store.rb +106 -0
- data/lib/legion/extensions/agentic/affect/motivation/runners/motivation.rb +165 -0
- data/lib/legion/extensions/agentic/affect/motivation/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/motivation.rb +19 -0
- data/lib/legion/extensions/agentic/affect/reappraisal/actors/auto_regulate.rb +45 -0
- data/lib/legion/extensions/agentic/affect/reappraisal/client.rb +28 -0
- data/lib/legion/extensions/agentic/affect/reappraisal/helpers/constants.rb +82 -0
- data/lib/legion/extensions/agentic/affect/reappraisal/helpers/emotional_event.rb +98 -0
- data/lib/legion/extensions/agentic/affect/reappraisal/helpers/llm_enhancer.rb +88 -0
- data/lib/legion/extensions/agentic/affect/reappraisal/helpers/reappraisal_engine.rb +153 -0
- data/lib/legion/extensions/agentic/affect/reappraisal/runners/cognitive_reappraisal.rb +164 -0
- data/lib/legion/extensions/agentic/affect/reappraisal/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/reappraisal.rb +20 -0
- data/lib/legion/extensions/agentic/affect/regulation/client.rb +25 -0
- data/lib/legion/extensions/agentic/affect/regulation/helpers/constants.rb +71 -0
- data/lib/legion/extensions/agentic/affect/regulation/helpers/regulation_model.rb +175 -0
- data/lib/legion/extensions/agentic/affect/regulation/runners/emotional_regulation.rb +127 -0
- data/lib/legion/extensions/agentic/affect/regulation/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/regulation.rb +18 -0
- data/lib/legion/extensions/agentic/affect/resilience/client.rb +27 -0
- data/lib/legion/extensions/agentic/affect/resilience/helpers/adversity_tracker.rb +130 -0
- data/lib/legion/extensions/agentic/affect/resilience/helpers/constants.rb +79 -0
- data/lib/legion/extensions/agentic/affect/resilience/helpers/resilience_model.rb +165 -0
- data/lib/legion/extensions/agentic/affect/resilience/runners/resilience.rb +150 -0
- data/lib/legion/extensions/agentic/affect/resilience/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/resilience.rb +19 -0
- data/lib/legion/extensions/agentic/affect/resonance/client.rb +24 -0
- data/lib/legion/extensions/agentic/affect/resonance/helpers/category.rb +75 -0
- data/lib/legion/extensions/agentic/affect/resonance/helpers/constants.rb +47 -0
- data/lib/legion/extensions/agentic/affect/resonance/helpers/resonance_engine.rb +115 -0
- data/lib/legion/extensions/agentic/affect/resonance/runners/cognitive_resonance.rb +94 -0
- data/lib/legion/extensions/agentic/affect/resonance/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/resonance.rb +19 -0
- data/lib/legion/extensions/agentic/affect/reward/client.rb +26 -0
- data/lib/legion/extensions/agentic/affect/reward/helpers/constants.rb +67 -0
- data/lib/legion/extensions/agentic/affect/reward/helpers/reward_signal.rb +178 -0
- data/lib/legion/extensions/agentic/affect/reward/helpers/reward_store.rb +142 -0
- data/lib/legion/extensions/agentic/affect/reward/runners/reward.rb +92 -0
- data/lib/legion/extensions/agentic/affect/reward/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/reward.rb +19 -0
- data/lib/legion/extensions/agentic/affect/somatic_marker/actors/decay.rb +45 -0
- data/lib/legion/extensions/agentic/affect/somatic_marker/client.rb +29 -0
- data/lib/legion/extensions/agentic/affect/somatic_marker/helpers/body_state.rb +69 -0
- data/lib/legion/extensions/agentic/affect/somatic_marker/helpers/constants.rb +43 -0
- data/lib/legion/extensions/agentic/affect/somatic_marker/helpers/marker_store.rb +160 -0
- data/lib/legion/extensions/agentic/affect/somatic_marker/helpers/somatic_marker.rb +74 -0
- data/lib/legion/extensions/agentic/affect/somatic_marker/runners/somatic_marker.rb +132 -0
- data/lib/legion/extensions/agentic/affect/somatic_marker/version.rb +13 -0
- data/lib/legion/extensions/agentic/affect/somatic_marker.rb +20 -0
- data/lib/legion/extensions/agentic/affect/version.rb +11 -0
- data/lib/legion/extensions/agentic/affect.rb +34 -0
- data/spec/legion/extensions/agentic/affect/appraisal/client_spec.rb +52 -0
- data/spec/legion/extensions/agentic/affect/appraisal/helpers/appraisal_engine_spec.rb +161 -0
- data/spec/legion/extensions/agentic/affect/appraisal/helpers/appraisal_spec.rb +175 -0
- data/spec/legion/extensions/agentic/affect/appraisal/helpers/constants_spec.rb +49 -0
- data/spec/legion/extensions/agentic/affect/appraisal/runners/appraisal_spec.rb +116 -0
- data/spec/legion/extensions/agentic/affect/cognitive_empathy/client_spec.rb +62 -0
- data/spec/legion/extensions/agentic/affect/cognitive_empathy/helpers/empathy_engine_spec.rb +316 -0
- data/spec/legion/extensions/agentic/affect/cognitive_empathy/helpers/perspective_spec.rb +132 -0
- data/spec/legion/extensions/agentic/affect/cognitive_empathy/runners/cognitive_empathy_spec.rb +200 -0
- data/spec/legion/extensions/agentic/affect/contagion/client_spec.rb +63 -0
- data/spec/legion/extensions/agentic/affect/contagion/helpers/constants_spec.rb +86 -0
- data/spec/legion/extensions/agentic/affect/contagion/helpers/contagion_engine_spec.rb +241 -0
- data/spec/legion/extensions/agentic/affect/contagion/helpers/meme_spec.rb +160 -0
- data/spec/legion/extensions/agentic/affect/contagion/runners/cognitive_contagion_spec.rb +211 -0
- data/spec/legion/extensions/agentic/affect/defusion/client_spec.rb +80 -0
- data/spec/legion/extensions/agentic/affect/defusion/helpers/constants_spec.rb +84 -0
- data/spec/legion/extensions/agentic/affect/defusion/helpers/defusion_engine_spec.rb +250 -0
- data/spec/legion/extensions/agentic/affect/defusion/helpers/thought_spec.rb +178 -0
- data/spec/legion/extensions/agentic/affect/defusion/runners/cognitive_defusion_spec.rb +185 -0
- data/spec/legion/extensions/agentic/affect/emotion/actors/momentum_decay_spec.rb +46 -0
- data/spec/legion/extensions/agentic/affect/emotion/client_spec.rb +46 -0
- data/spec/legion/extensions/agentic/affect/emotion/helpers/baseline_spec.rb +48 -0
- data/spec/legion/extensions/agentic/affect/emotion/helpers/momentum_spec.rb +45 -0
- data/spec/legion/extensions/agentic/affect/emotion/helpers/valence_spec.rb +91 -0
- data/spec/legion/extensions/agentic/affect/emotion/runners/gut_spec.rb +73 -0
- data/spec/legion/extensions/agentic/affect/emotion/runners/valence_spec.rb +67 -0
- data/spec/legion/extensions/agentic/affect/empathy/client_spec.rb +20 -0
- data/spec/legion/extensions/agentic/affect/empathy/helpers/constants_spec.rb +23 -0
- data/spec/legion/extensions/agentic/affect/empathy/helpers/mental_model_spec.rb +150 -0
- data/spec/legion/extensions/agentic/affect/empathy/helpers/model_store_spec.rb +94 -0
- data/spec/legion/extensions/agentic/affect/empathy/runners/empathy_spec.rb +127 -0
- data/spec/legion/extensions/agentic/affect/fatigue/client_spec.rb +66 -0
- data/spec/legion/extensions/agentic/affect/fatigue/helpers/constants_spec.rb +130 -0
- data/spec/legion/extensions/agentic/affect/fatigue/helpers/energy_model_spec.rb +281 -0
- data/spec/legion/extensions/agentic/affect/fatigue/helpers/fatigue_store_spec.rb +157 -0
- data/spec/legion/extensions/agentic/affect/fatigue/runners/fatigue_spec.rb +127 -0
- data/spec/legion/extensions/agentic/affect/flow/client_spec.rb +58 -0
- data/spec/legion/extensions/agentic/affect/flow/helpers/constants_spec.rb +112 -0
- data/spec/legion/extensions/agentic/affect/flow/helpers/flow_detector_spec.rb +268 -0
- data/spec/legion/extensions/agentic/affect/flow/runners/flow_spec.rb +222 -0
- data/spec/legion/extensions/agentic/affect/interoception/client_spec.rb +52 -0
- data/spec/legion/extensions/agentic/affect/interoception/helpers/body_budget_spec.rb +178 -0
- data/spec/legion/extensions/agentic/affect/interoception/helpers/somatic_marker_spec.rb +120 -0
- data/spec/legion/extensions/agentic/affect/interoception/runners/interoception_spec.rb +108 -0
- data/spec/legion/extensions/agentic/affect/mood/client_spec.rb +20 -0
- data/spec/legion/extensions/agentic/affect/mood/helpers/constants_spec.rb +29 -0
- data/spec/legion/extensions/agentic/affect/mood/helpers/mood_state_spec.rb +94 -0
- data/spec/legion/extensions/agentic/affect/mood/runners/mood_spec.rb +71 -0
- data/spec/legion/extensions/agentic/affect/motivation/client_spec.rb +35 -0
- data/spec/legion/extensions/agentic/affect/motivation/helpers/constants_spec.rb +111 -0
- data/spec/legion/extensions/agentic/affect/motivation/helpers/drive_state_spec.rb +183 -0
- data/spec/legion/extensions/agentic/affect/motivation/helpers/motivation_store_spec.rb +185 -0
- data/spec/legion/extensions/agentic/affect/motivation/runners/motivation_spec.rb +248 -0
- data/spec/legion/extensions/agentic/affect/reappraisal/actors/auto_regulate_spec.rb +46 -0
- data/spec/legion/extensions/agentic/affect/reappraisal/client_spec.rb +64 -0
- data/spec/legion/extensions/agentic/affect/reappraisal/helpers/constants_spec.rb +102 -0
- data/spec/legion/extensions/agentic/affect/reappraisal/helpers/emotional_event_spec.rb +177 -0
- data/spec/legion/extensions/agentic/affect/reappraisal/helpers/llm_enhancer_spec.rb +161 -0
- data/spec/legion/extensions/agentic/affect/reappraisal/helpers/reappraisal_engine_spec.rb +211 -0
- data/spec/legion/extensions/agentic/affect/reappraisal/runners/cognitive_reappraisal_spec.rb +312 -0
- data/spec/legion/extensions/agentic/affect/regulation/client_spec.rb +61 -0
- data/spec/legion/extensions/agentic/affect/regulation/helpers/constants_spec.rb +108 -0
- data/spec/legion/extensions/agentic/affect/regulation/helpers/regulation_model_spec.rb +200 -0
- data/spec/legion/extensions/agentic/affect/regulation/runners/emotional_regulation_spec.rb +190 -0
- data/spec/legion/extensions/agentic/affect/resilience/client_spec.rb +36 -0
- data/spec/legion/extensions/agentic/affect/resilience/helpers/adversity_tracker_spec.rb +164 -0
- data/spec/legion/extensions/agentic/affect/resilience/helpers/constants_spec.rb +78 -0
- data/spec/legion/extensions/agentic/affect/resilience/helpers/resilience_model_spec.rb +133 -0
- data/spec/legion/extensions/agentic/affect/resilience/runners/resilience_spec.rb +150 -0
- data/spec/legion/extensions/agentic/affect/resonance/client_spec.rb +66 -0
- data/spec/legion/extensions/agentic/affect/resonance/cognitive_resonance_spec.rb +27 -0
- data/spec/legion/extensions/agentic/affect/resonance/helpers/category_spec.rb +146 -0
- data/spec/legion/extensions/agentic/affect/resonance/helpers/constants_spec.rb +104 -0
- data/spec/legion/extensions/agentic/affect/resonance/helpers/resonance_engine_spec.rb +189 -0
- data/spec/legion/extensions/agentic/affect/resonance/runners/cognitive_resonance_spec.rb +197 -0
- data/spec/legion/extensions/agentic/affect/reward/client_spec.rb +42 -0
- data/spec/legion/extensions/agentic/affect/reward/helpers/constants_spec.rb +91 -0
- data/spec/legion/extensions/agentic/affect/reward/helpers/reward_signal_spec.rb +296 -0
- data/spec/legion/extensions/agentic/affect/reward/helpers/reward_store_spec.rb +167 -0
- data/spec/legion/extensions/agentic/affect/reward/runners/reward_spec.rb +149 -0
- data/spec/legion/extensions/agentic/affect/somatic_marker/client_spec.rb +83 -0
- data/spec/legion/extensions/agentic/affect/somatic_marker/helpers/body_state_spec.rb +155 -0
- data/spec/legion/extensions/agentic/affect/somatic_marker/helpers/marker_store_spec.rb +233 -0
- data/spec/legion/extensions/agentic/affect/somatic_marker/helpers/somatic_marker_spec.rb +172 -0
- data/spec/legion/extensions/agentic/affect/somatic_marker/runners/somatic_marker_spec.rb +181 -0
- data/spec/spec_helper.rb +46 -0
- metadata +302 -0
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Legion
|
|
4
|
+
module Extensions
|
|
5
|
+
module Agentic
|
|
6
|
+
module Affect
|
|
7
|
+
module Resonance
|
|
8
|
+
module Runners
|
|
9
|
+
module CognitiveResonance
|
|
10
|
+
include Legion::Extensions::Helpers::Lex if Legion::Extensions.const_defined?(:Helpers) &&
|
|
11
|
+
Legion::Extensions::Helpers.const_defined?(:Lex)
|
|
12
|
+
|
|
13
|
+
def present_input(input:, engine: nil, **)
|
|
14
|
+
resonance_engine = engine || default_engine
|
|
15
|
+
return { success: false, error: :empty_input } if input.nil? || input.empty?
|
|
16
|
+
|
|
17
|
+
result = resonance_engine.present_input(input: input)
|
|
18
|
+
Legion::Logging.debug "[cognitive_resonance] present_input outcome=#{result[:outcome]} " \
|
|
19
|
+
"category=#{result[:category_id][0..7]}"
|
|
20
|
+
{ success: true }.merge(result)
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
def classify(input:, engine: nil, **)
|
|
24
|
+
resonance_engine = engine || default_engine
|
|
25
|
+
return { success: false, error: :empty_input } if input.nil? || input.empty?
|
|
26
|
+
|
|
27
|
+
normalized = input.map { |v| v.to_f.clamp(0.0, 1.0) }
|
|
28
|
+
match = resonance_engine.best_match(normalized)
|
|
29
|
+
|
|
30
|
+
if match
|
|
31
|
+
quality_label = Helpers::Constants.match_label(match[:quality])
|
|
32
|
+
Legion::Logging.debug "[cognitive_resonance] classify category=#{match[:id][0..7]} " \
|
|
33
|
+
"quality=#{match[:quality].round(3)} label=#{quality_label}"
|
|
34
|
+
{
|
|
35
|
+
success: true,
|
|
36
|
+
found: true,
|
|
37
|
+
category_id: match[:id],
|
|
38
|
+
quality: match[:quality],
|
|
39
|
+
label: quality_label
|
|
40
|
+
}
|
|
41
|
+
else
|
|
42
|
+
Legion::Logging.debug '[cognitive_resonance] classify found=false (no categories)'
|
|
43
|
+
{ success: true, found: false, category_id: nil, quality: 0.0, label: :none }
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
def adjust_vigilance(amount:, engine: nil, **)
|
|
48
|
+
resonance_engine = engine || default_engine
|
|
49
|
+
clamped_amount = amount.to_f.clamp(-1.0, 1.0)
|
|
50
|
+
new_vigilance = resonance_engine.adjust_vigilance(amount: clamped_amount)
|
|
51
|
+
vigilance_label = Helpers::Constants.vigilance_label(new_vigilance)
|
|
52
|
+
|
|
53
|
+
Legion::Logging.debug "[cognitive_resonance] vigilance=#{new_vigilance.round(3)} label=#{vigilance_label}"
|
|
54
|
+
{
|
|
55
|
+
success: true,
|
|
56
|
+
vigilance: new_vigilance,
|
|
57
|
+
vigilance_label: vigilance_label,
|
|
58
|
+
adjustment: clamped_amount
|
|
59
|
+
}
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
def resonance_report(engine: nil, **)
|
|
63
|
+
resonance_engine = engine || default_engine
|
|
64
|
+
report = resonance_engine.resonance_report
|
|
65
|
+
Legion::Logging.debug "[cognitive_resonance] report categories=#{report[:category_count]} " \
|
|
66
|
+
"vigilance=#{report[:vigilance].round(3)}"
|
|
67
|
+
{ success: true }.merge(report)
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
def category_count(engine: nil, **)
|
|
71
|
+
resonance_engine = engine || default_engine
|
|
72
|
+
count = resonance_engine.category_count
|
|
73
|
+
Legion::Logging.debug "[cognitive_resonance] category_count=#{count}"
|
|
74
|
+
{ success: true, count: count }
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
def reset_engine(**)
|
|
78
|
+
@default_engine = nil
|
|
79
|
+
Legion::Logging.debug '[cognitive_resonance] engine reset'
|
|
80
|
+
{ success: true, reset: true }
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
private
|
|
84
|
+
|
|
85
|
+
def default_engine
|
|
86
|
+
@default_engine ||= Helpers::ResonanceEngine.new
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
end
|
|
93
|
+
end
|
|
94
|
+
end
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'legion/extensions/agentic/affect/resonance/version'
|
|
4
|
+
require 'legion/extensions/agentic/affect/resonance/helpers/constants'
|
|
5
|
+
require 'legion/extensions/agentic/affect/resonance/helpers/category'
|
|
6
|
+
require 'legion/extensions/agentic/affect/resonance/helpers/resonance_engine'
|
|
7
|
+
require 'legion/extensions/agentic/affect/resonance/runners/cognitive_resonance'
|
|
8
|
+
require 'legion/extensions/agentic/affect/resonance/client'
|
|
9
|
+
|
|
10
|
+
module Legion
|
|
11
|
+
module Extensions
|
|
12
|
+
module Agentic
|
|
13
|
+
module Affect
|
|
14
|
+
module Resonance
|
|
15
|
+
end
|
|
16
|
+
end
|
|
17
|
+
end
|
|
18
|
+
end
|
|
19
|
+
end
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'legion/extensions/agentic/affect/reward/helpers/constants'
|
|
4
|
+
require 'legion/extensions/agentic/affect/reward/helpers/reward_signal'
|
|
5
|
+
require 'legion/extensions/agentic/affect/reward/helpers/reward_store'
|
|
6
|
+
require 'legion/extensions/agentic/affect/reward/runners/reward'
|
|
7
|
+
|
|
8
|
+
module Legion
|
|
9
|
+
module Extensions
|
|
10
|
+
module Agentic
|
|
11
|
+
module Affect
|
|
12
|
+
module Reward
|
|
13
|
+
class Client
|
|
14
|
+
include Runners::Reward
|
|
15
|
+
|
|
16
|
+
attr_reader :reward_store
|
|
17
|
+
|
|
18
|
+
def initialize(reward_store: nil, **)
|
|
19
|
+
@reward_store = reward_store || Helpers::RewardStore.new
|
|
20
|
+
end
|
|
21
|
+
end
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
end
|
|
26
|
+
end
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Legion
|
|
4
|
+
module Extensions
|
|
5
|
+
module Agentic
|
|
6
|
+
module Affect
|
|
7
|
+
module Reward
|
|
8
|
+
module Helpers
|
|
9
|
+
module Constants
|
|
10
|
+
# Reward sources with weights (sum to 1.0)
|
|
11
|
+
# Each source contributes independently to the composite reward signal
|
|
12
|
+
REWARD_SOURCES = {
|
|
13
|
+
prediction_accuracy: { weight: 0.20, description: 'Correct predictions reinforced' },
|
|
14
|
+
curiosity_resolved: { weight: 0.15, description: 'Wonder resolution satisfaction' },
|
|
15
|
+
goal_achieved: { weight: 0.20, description: 'Intention completion reward' },
|
|
16
|
+
social_approval: { weight: 0.10, description: 'Trust increase from peers' },
|
|
17
|
+
flow_state: { weight: 0.10, description: 'Intrinsic flow motivation' },
|
|
18
|
+
error_avoidance: { weight: 0.10, description: 'Low error rate maintenance' },
|
|
19
|
+
novelty_encounter: { weight: 0.10, description: 'Novel experience exploration' },
|
|
20
|
+
homeostatic_balance: { weight: 0.05, description: 'System stability maintenance' }
|
|
21
|
+
}.freeze
|
|
22
|
+
|
|
23
|
+
# EMA alpha for running reward average
|
|
24
|
+
REWARD_ALPHA = 0.15
|
|
25
|
+
|
|
26
|
+
# EMA alpha for reward prediction (expected reward baseline)
|
|
27
|
+
PREDICTION_ALPHA = 0.1
|
|
28
|
+
|
|
29
|
+
# Minimum RPE magnitude to trigger learning signal
|
|
30
|
+
RPE_THRESHOLD = 0.05
|
|
31
|
+
|
|
32
|
+
# Reward signal range
|
|
33
|
+
REWARD_RANGE = { min: -1.0, max: 1.0 }.freeze
|
|
34
|
+
|
|
35
|
+
# RPE classification thresholds
|
|
36
|
+
RPE_LEVELS = {
|
|
37
|
+
large_positive: 0.3, # "Way better than expected!" — strong reinforcement
|
|
38
|
+
positive: 0.1, # "Better than expected" — moderate reinforcement
|
|
39
|
+
neutral: 0.05, # "About as expected" — maintenance
|
|
40
|
+
negative: -0.1, # "Worse than expected" — mild suppression
|
|
41
|
+
large_negative: -0.3 # "Way worse than expected!" — strong suppression
|
|
42
|
+
}.freeze
|
|
43
|
+
|
|
44
|
+
# Temporal discount factor (per tick, for weighted history)
|
|
45
|
+
TEMPORAL_DISCOUNT = 0.95
|
|
46
|
+
|
|
47
|
+
# History cap
|
|
48
|
+
MAX_REWARD_HISTORY = 200
|
|
49
|
+
|
|
50
|
+
# Domain-specific reward history cap
|
|
51
|
+
MAX_DOMAIN_HISTORY = 50
|
|
52
|
+
|
|
53
|
+
# Anhedonia threshold — running average below this triggers concern
|
|
54
|
+
ANHEDONIA_THRESHOLD = -0.3
|
|
55
|
+
|
|
56
|
+
# Euphoria threshold — running average above this triggers concern
|
|
57
|
+
EUPHORIA_THRESHOLD = 0.7
|
|
58
|
+
|
|
59
|
+
# Reward momentum (how much prior reward influences next prediction)
|
|
60
|
+
MOMENTUM_WINDOW = 10
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
end
|
|
64
|
+
end
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
end
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Legion
|
|
4
|
+
module Extensions
|
|
5
|
+
module Agentic
|
|
6
|
+
module Affect
|
|
7
|
+
module Reward
|
|
8
|
+
module Helpers
|
|
9
|
+
class RewardSignal
|
|
10
|
+
attr_reader :running_average, :predicted_reward, :last_rpe,
|
|
11
|
+
:history, :domain_history, :tick_count
|
|
12
|
+
|
|
13
|
+
def initialize
|
|
14
|
+
@running_average = 0.0
|
|
15
|
+
@predicted_reward = 0.0
|
|
16
|
+
@last_rpe = 0.0
|
|
17
|
+
@history = []
|
|
18
|
+
@domain_history = {}
|
|
19
|
+
@tick_count = 0
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def compute(source_signals)
|
|
23
|
+
@tick_count += 1
|
|
24
|
+
raw_reward = weighted_sum(source_signals)
|
|
25
|
+
reward = raw_reward.clamp(Constants::REWARD_RANGE[:min], Constants::REWARD_RANGE[:max])
|
|
26
|
+
|
|
27
|
+
@last_rpe = reward - @predicted_reward
|
|
28
|
+
@running_average = ema(@running_average, reward, Constants::REWARD_ALPHA)
|
|
29
|
+
@predicted_reward = ema(@predicted_reward, reward, Constants::PREDICTION_ALPHA)
|
|
30
|
+
|
|
31
|
+
record(reward, source_signals)
|
|
32
|
+
|
|
33
|
+
{
|
|
34
|
+
reward: reward.round(4),
|
|
35
|
+
rpe: @last_rpe.round(4),
|
|
36
|
+
rpe_class: classify_rpe(@last_rpe),
|
|
37
|
+
running_average: @running_average.round(4),
|
|
38
|
+
predicted_reward: @predicted_reward.round(4),
|
|
39
|
+
sources: source_signals,
|
|
40
|
+
learning_signal: learning_signal?
|
|
41
|
+
}
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
def record_domain_reward(domain, reward)
|
|
45
|
+
@domain_history[domain] ||= []
|
|
46
|
+
@domain_history[domain] << { reward: reward, at: Time.now.utc }
|
|
47
|
+
@domain_history[domain].shift while @domain_history[domain].size > Constants::MAX_DOMAIN_HISTORY
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
def domain_average(domain)
|
|
51
|
+
entries = @domain_history[domain]
|
|
52
|
+
return 0.0 if entries.nil? || entries.empty?
|
|
53
|
+
|
|
54
|
+
entries.sum { |e| e[:reward] } / entries.size.to_f
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
def domain_trend(domain)
|
|
58
|
+
entries = @domain_history[domain]
|
|
59
|
+
return :no_data if entries.nil? || entries.size < 5
|
|
60
|
+
|
|
61
|
+
recent = entries.last(10)
|
|
62
|
+
values = recent.map { |e| e[:reward] }
|
|
63
|
+
first_half = values[0...(values.size / 2)]
|
|
64
|
+
second_half = values[(values.size / 2)..]
|
|
65
|
+
diff = mean(second_half) - mean(first_half)
|
|
66
|
+
|
|
67
|
+
if diff > 0.05
|
|
68
|
+
:improving
|
|
69
|
+
elsif diff < -0.05
|
|
70
|
+
:declining
|
|
71
|
+
else
|
|
72
|
+
:stable
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
def anhedonic?
|
|
77
|
+
@running_average < Constants::ANHEDONIA_THRESHOLD
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
def euphoric?
|
|
81
|
+
@running_average > Constants::EUPHORIA_THRESHOLD
|
|
82
|
+
end
|
|
83
|
+
|
|
84
|
+
def learning_signal?
|
|
85
|
+
@last_rpe.abs >= Constants::RPE_THRESHOLD
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
def recent_rewards(limit = 20)
|
|
89
|
+
@history.last(limit)
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
def discounted_return(window = nil)
|
|
93
|
+
entries = window ? @history.last(window) : @history
|
|
94
|
+
return 0.0 if entries.empty?
|
|
95
|
+
|
|
96
|
+
total = 0.0
|
|
97
|
+
entries.reverse_each.with_index do |entry, idx|
|
|
98
|
+
total += entry[:reward] * (Constants::TEMPORAL_DISCOUNT**idx)
|
|
99
|
+
end
|
|
100
|
+
total
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
def reward_volatility
|
|
104
|
+
return 0.0 if @history.size < 3
|
|
105
|
+
|
|
106
|
+
recent = @history.last(Constants::MOMENTUM_WINDOW).map { |h| h[:reward] }
|
|
107
|
+
avg = mean(recent)
|
|
108
|
+
variance = recent.sum { |r| (r - avg)**2 } / recent.size.to_f
|
|
109
|
+
Math.sqrt(variance)
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
def to_h
|
|
113
|
+
{
|
|
114
|
+
running_average: @running_average.round(4),
|
|
115
|
+
predicted_reward: @predicted_reward.round(4),
|
|
116
|
+
last_rpe: @last_rpe.round(4),
|
|
117
|
+
rpe_class: classify_rpe(@last_rpe),
|
|
118
|
+
tick_count: @tick_count,
|
|
119
|
+
learning_signal: learning_signal?,
|
|
120
|
+
anhedonic: anhedonic?,
|
|
121
|
+
euphoric: euphoric?,
|
|
122
|
+
volatility: reward_volatility.round(4),
|
|
123
|
+
domains_tracked: @domain_history.keys.size,
|
|
124
|
+
history_size: @history.size
|
|
125
|
+
}
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
private
|
|
129
|
+
|
|
130
|
+
def weighted_sum(source_signals)
|
|
131
|
+
total = 0.0
|
|
132
|
+
Constants::REWARD_SOURCES.each do |source, config|
|
|
133
|
+
value = source_signals[source] || 0.0
|
|
134
|
+
total += value * config[:weight]
|
|
135
|
+
end
|
|
136
|
+
total
|
|
137
|
+
end
|
|
138
|
+
|
|
139
|
+
def classify_rpe(rpe)
|
|
140
|
+
if rpe >= Constants::RPE_LEVELS[:large_positive]
|
|
141
|
+
:large_positive
|
|
142
|
+
elsif rpe >= Constants::RPE_LEVELS[:positive]
|
|
143
|
+
:positive
|
|
144
|
+
elsif rpe >= -Constants::RPE_LEVELS[:neutral]
|
|
145
|
+
:neutral
|
|
146
|
+
elsif rpe >= Constants::RPE_LEVELS[:large_negative]
|
|
147
|
+
:negative
|
|
148
|
+
else
|
|
149
|
+
:large_negative
|
|
150
|
+
end
|
|
151
|
+
end
|
|
152
|
+
|
|
153
|
+
def ema(current, observed, alpha)
|
|
154
|
+
(current * (1.0 - alpha)) + (observed * alpha)
|
|
155
|
+
end
|
|
156
|
+
|
|
157
|
+
def mean(values)
|
|
158
|
+
return 0.0 if values.empty?
|
|
159
|
+
|
|
160
|
+
values.sum / values.size.to_f
|
|
161
|
+
end
|
|
162
|
+
|
|
163
|
+
def record(reward, sources)
|
|
164
|
+
@history << {
|
|
165
|
+
reward: reward,
|
|
166
|
+
rpe: @last_rpe,
|
|
167
|
+
sources: sources,
|
|
168
|
+
at: Time.now.utc
|
|
169
|
+
}
|
|
170
|
+
@history.shift while @history.size > Constants::MAX_REWARD_HISTORY
|
|
171
|
+
end
|
|
172
|
+
end
|
|
173
|
+
end
|
|
174
|
+
end
|
|
175
|
+
end
|
|
176
|
+
end
|
|
177
|
+
end
|
|
178
|
+
end
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Legion
|
|
4
|
+
module Extensions
|
|
5
|
+
module Agentic
|
|
6
|
+
module Affect
|
|
7
|
+
module Reward
|
|
8
|
+
module Helpers
|
|
9
|
+
class RewardStore
|
|
10
|
+
attr_reader :signal
|
|
11
|
+
|
|
12
|
+
def initialize(signal: nil)
|
|
13
|
+
@signal = signal || RewardSignal.new
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def process_tick(tick_results)
|
|
17
|
+
source_signals = extract_signals(tick_results)
|
|
18
|
+
result = @signal.compute(source_signals)
|
|
19
|
+
|
|
20
|
+
domain = extract_domain(tick_results)
|
|
21
|
+
@signal.record_domain_reward(domain, result[:reward]) if domain
|
|
22
|
+
|
|
23
|
+
result
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def domain_report(domain)
|
|
27
|
+
{
|
|
28
|
+
domain: domain,
|
|
29
|
+
average: @signal.domain_average(domain),
|
|
30
|
+
trend: @signal.domain_trend(domain),
|
|
31
|
+
history: @signal.domain_history[domain]&.last(10) || []
|
|
32
|
+
}
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
def all_domain_averages
|
|
36
|
+
@signal.domain_history.keys.to_h do |domain|
|
|
37
|
+
[domain, @signal.domain_average(domain).round(4)]
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def health_assessment
|
|
42
|
+
avg = @signal.running_average
|
|
43
|
+
vol = @signal.reward_volatility
|
|
44
|
+
|
|
45
|
+
if @signal.anhedonic?
|
|
46
|
+
{ status: :anhedonic, description: 'Persistently low reward — possible disengagement', severity: :high }
|
|
47
|
+
elsif @signal.euphoric?
|
|
48
|
+
{ status: :euphoric, description: 'Persistently high reward — possible overconfidence', severity: :moderate }
|
|
49
|
+
elsif vol > 0.4
|
|
50
|
+
{ status: :volatile, description: 'Highly variable reward — unstable learning signals', severity: :moderate }
|
|
51
|
+
elsif avg.between?(-0.1, 0.1)
|
|
52
|
+
{ status: :neutral, description: 'Low reward signal — minimal learning happening', severity: :low }
|
|
53
|
+
else
|
|
54
|
+
{ status: :healthy, description: 'Balanced reward signal — healthy learning', severity: :none }
|
|
55
|
+
end
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
private
|
|
59
|
+
|
|
60
|
+
def extract_signals(tick_results)
|
|
61
|
+
{
|
|
62
|
+
prediction_accuracy: extract_prediction_reward(tick_results),
|
|
63
|
+
curiosity_resolved: extract_curiosity_reward(tick_results),
|
|
64
|
+
goal_achieved: extract_goal_reward(tick_results),
|
|
65
|
+
social_approval: extract_social_reward(tick_results),
|
|
66
|
+
flow_state: extract_flow_reward(tick_results),
|
|
67
|
+
error_avoidance: extract_error_reward(tick_results),
|
|
68
|
+
novelty_encounter: extract_novelty_reward(tick_results),
|
|
69
|
+
homeostatic_balance: extract_homeostatic_reward(tick_results)
|
|
70
|
+
}
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
def extract_prediction_reward(tick_results)
|
|
74
|
+
accuracy = tick_results.dig(:prediction_engine, :rolling_accuracy)
|
|
75
|
+
return 0.0 unless accuracy
|
|
76
|
+
|
|
77
|
+
(accuracy - 0.5) * 2.0
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
def extract_curiosity_reward(tick_results)
|
|
81
|
+
resolved = tick_results.dig(:curiosity, :resolved_count) || 0
|
|
82
|
+
intensity = tick_results.dig(:curiosity, :intensity) || 0.0
|
|
83
|
+
|
|
84
|
+
resolved_signal = [resolved * 0.3, 1.0].min
|
|
85
|
+
(resolved_signal + (intensity * 0.2)).clamp(-1.0, 1.0)
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
def extract_goal_reward(tick_results)
|
|
89
|
+
completed = tick_results.dig(:volition, :completed_count) || 0
|
|
90
|
+
failed = tick_results.dig(:volition, :failed_count) || 0
|
|
91
|
+
|
|
92
|
+
((completed * 0.4) - (failed * 0.3)).clamp(-1.0, 1.0)
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
def extract_social_reward(tick_results)
|
|
96
|
+
trust_delta = tick_results.dig(:trust, :composite_delta) || 0.0
|
|
97
|
+
(trust_delta * 2.0).clamp(-1.0, 1.0)
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
def extract_flow_reward(tick_results)
|
|
101
|
+
in_flow = tick_results.dig(:flow, :in_flow)
|
|
102
|
+
score = tick_results.dig(:flow, :score) || 0.0
|
|
103
|
+
|
|
104
|
+
return score * 0.8 if in_flow
|
|
105
|
+
|
|
106
|
+
-0.1
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
def extract_error_reward(tick_results)
|
|
110
|
+
error_rate = tick_results.dig(:prediction_engine, :error_rate)
|
|
111
|
+
return 0.0 unless error_rate
|
|
112
|
+
|
|
113
|
+
(1.0 - (error_rate * 2.0)).clamp(-1.0, 1.0)
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
def extract_novelty_reward(tick_results)
|
|
117
|
+
novelty = tick_results.dig(:attention, :novelty_score) || 0.0
|
|
118
|
+
spotlight_count = tick_results.dig(:attention, :spotlight_count) || 0
|
|
119
|
+
|
|
120
|
+
((novelty * 0.5) + [spotlight_count * 0.1, 0.5].min).clamp(-1.0, 1.0)
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
def extract_homeostatic_reward(tick_results)
|
|
124
|
+
deviation = tick_results.dig(:homeostasis, :worst_deviation) || 0.0
|
|
125
|
+
allostatic = tick_results.dig(:homeostasis, :allostatic_load) || 0.0
|
|
126
|
+
|
|
127
|
+
stability = 1.0 - [deviation, allostatic].max
|
|
128
|
+
(stability - 0.5).clamp(-1.0, 1.0)
|
|
129
|
+
end
|
|
130
|
+
|
|
131
|
+
def extract_domain(tick_results)
|
|
132
|
+
tick_results.dig(:volition, :current_domain) ||
|
|
133
|
+
tick_results.dig(:curiosity, :active_domain) ||
|
|
134
|
+
tick_results.dig(:attention, :focus_domain)
|
|
135
|
+
end
|
|
136
|
+
end
|
|
137
|
+
end
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
end
|
|
141
|
+
end
|
|
142
|
+
end
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Legion
|
|
4
|
+
module Extensions
|
|
5
|
+
module Agentic
|
|
6
|
+
module Affect
|
|
7
|
+
module Reward
|
|
8
|
+
module Runners
|
|
9
|
+
module Reward
|
|
10
|
+
include Legion::Extensions::Helpers::Lex if Legion::Extensions.const_defined?(:Helpers) &&
|
|
11
|
+
Legion::Extensions::Helpers.const_defined?(:Lex)
|
|
12
|
+
|
|
13
|
+
def compute_reward(tick_results: {}, **)
|
|
14
|
+
result = reward_store.process_tick(tick_results)
|
|
15
|
+
|
|
16
|
+
Legion::Logging.debug "[reward] reward=#{result[:reward]} rpe=#{result[:rpe]} " \
|
|
17
|
+
"class=#{result[:rpe_class]} learning=#{result[:learning_signal]}"
|
|
18
|
+
|
|
19
|
+
result
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def reward_status(**)
|
|
23
|
+
sig = reward_store.signal
|
|
24
|
+
health = reward_store.health_assessment
|
|
25
|
+
|
|
26
|
+
Legion::Logging.debug "[reward] status: avg=#{sig.running_average.round(3)} " \
|
|
27
|
+
"predicted=#{sig.predicted_reward.round(3)} health=#{health[:status]}"
|
|
28
|
+
|
|
29
|
+
sig.to_h.merge(health: health)
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
def reward_for(domain:, **)
|
|
33
|
+
report = reward_store.domain_report(domain)
|
|
34
|
+
Legion::Logging.debug "[reward] domain=#{domain} avg=#{report[:average].round(3)} trend=#{report[:trend]}"
|
|
35
|
+
report
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def reward_history(limit: 20, **)
|
|
39
|
+
recent = reward_store.signal.recent_rewards(limit)
|
|
40
|
+
Legion::Logging.debug "[reward] history: #{recent.size} entries"
|
|
41
|
+
|
|
42
|
+
{
|
|
43
|
+
history: recent,
|
|
44
|
+
total: reward_store.signal.history.size,
|
|
45
|
+
discounted_return: reward_store.signal.discounted_return(limit).round(4)
|
|
46
|
+
}
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
def domain_rewards(**)
|
|
50
|
+
averages = reward_store.all_domain_averages
|
|
51
|
+
Legion::Logging.debug "[reward] domains: #{averages.size} tracked"
|
|
52
|
+
|
|
53
|
+
{
|
|
54
|
+
domains: averages,
|
|
55
|
+
domain_count: averages.size,
|
|
56
|
+
best_domain: averages.max_by { |_, v| v }&.first,
|
|
57
|
+
worst_domain: averages.min_by { |_, v| v }&.first
|
|
58
|
+
}
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def reward_stats(**)
|
|
62
|
+
sig = reward_store.signal
|
|
63
|
+
health = reward_store.health_assessment
|
|
64
|
+
|
|
65
|
+
Legion::Logging.debug '[reward] stats'
|
|
66
|
+
|
|
67
|
+
{
|
|
68
|
+
running_average: sig.running_average.round(4),
|
|
69
|
+
predicted_reward: sig.predicted_reward.round(4),
|
|
70
|
+
volatility: sig.reward_volatility.round(4),
|
|
71
|
+
tick_count: sig.tick_count,
|
|
72
|
+
health: health,
|
|
73
|
+
domains_tracked: sig.domain_history.keys.size,
|
|
74
|
+
history_size: sig.history.size,
|
|
75
|
+
discounted_return: sig.discounted_return.round(4),
|
|
76
|
+
anhedonic: sig.anhedonic?,
|
|
77
|
+
euphoric: sig.euphoric?
|
|
78
|
+
}
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
private
|
|
82
|
+
|
|
83
|
+
def reward_store
|
|
84
|
+
@reward_store ||= Helpers::RewardStore.new
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
end
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'legion/extensions/agentic/affect/reward/version'
|
|
4
|
+
require 'legion/extensions/agentic/affect/reward/helpers/constants'
|
|
5
|
+
require 'legion/extensions/agentic/affect/reward/helpers/reward_signal'
|
|
6
|
+
require 'legion/extensions/agentic/affect/reward/helpers/reward_store'
|
|
7
|
+
require 'legion/extensions/agentic/affect/reward/runners/reward'
|
|
8
|
+
require 'legion/extensions/agentic/affect/reward/client'
|
|
9
|
+
|
|
10
|
+
module Legion
|
|
11
|
+
module Extensions
|
|
12
|
+
module Agentic
|
|
13
|
+
module Affect
|
|
14
|
+
module Reward
|
|
15
|
+
end
|
|
16
|
+
end
|
|
17
|
+
end
|
|
18
|
+
end
|
|
19
|
+
end
|