lex-reflection 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: f00b5c12fad1944aecab293210605cb153c7b687c9e83f6edf6dd7e0ff7c1ef4
4
+ data.tar.gz: 00b17affa2c5c7f41e2d77db913194752f060284ade1310bc3ec82e96842aad5
5
+ SHA512:
6
+ metadata.gz: 8e8427810654fd6319cfcad7ed17a96b3dbf91fbc0be92615a594f0fecfef2a6c9c4c3afbeac6d4340a1401bf378d3acdc985972c55fb8d1be09df9160f8fb94
7
+ data.tar.gz: 1f3c636b1d80e6c20065ca081d66d46f9b8280f7dba424b1fbf7d9629bf85ce830fb7ab79d7a011911a0de86cdebb290fde8d423947d10cdb01a9a3fb1b04c20
data/Gemfile ADDED
@@ -0,0 +1,15 @@
1
+ # frozen_string_literal: true
2
+
3
+ source 'https://rubygems.org'
4
+ gemspec
5
+
6
+ group :test do
7
+ gem 'rake'
8
+ gem 'rspec'
9
+ gem 'rspec_junit_formatter'
10
+ gem 'rubocop', require: false
11
+ gem 'rubocop-rspec', require: false
12
+ gem 'simplecov'
13
+ end
14
+
15
+ gem 'legion-gaia', path: '../../legion-gaia'
data/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Esity
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
data/README.md ADDED
@@ -0,0 +1,86 @@
1
+ # lex-reflection
2
+
3
+ Meta-cognitive monitoring and adaptation engine for the LegionIO cognitive architecture. Generates structured self-assessments from tick data.
4
+
5
+ ## What It Does
6
+
7
+ Each tick, analyzes cognitive performance across seven categories: prediction calibration, curiosity effectiveness, emotional stability, trust drift, memory health, cognitive load, and mode patterns. Detects notable patterns and generates reflection entries with severity labels and actionable recommendations. Maintains per-category health scores that combine into a weighted `cognitive_health` composite. Reflections can be marked as acted on to track which adaptations were followed.
8
+
9
+ ## Usage
10
+
11
+ ```ruby
12
+ client = Legion::Extensions::Reflection::Client.new
13
+
14
+ # Reflect on each tick's results
15
+ result = client.reflect(
16
+ tick_results: {
17
+ prediction_engine: { confidence: 0.35 },
18
+ working_memory_integration: { curiosity_intensity: 0.9 },
19
+ emotional_evaluation: { stability: 0.4, valence: -0.2 },
20
+ memory_consolidation: { pruned: 8, total: 10 },
21
+ elapsed: 4.8,
22
+ budget: 5.0
23
+ }
24
+ )
25
+ # => { reflections_generated: 2, cognitive_health: 0.62,
26
+ # new_reflections: [
27
+ # { category: :prediction_calibration, severity: :significant,
28
+ # observation: 'Prediction accuracy is below threshold',
29
+ # recommendation: :investigate, acted_on: false }
30
+ # ], total_reflections: 2 }
31
+
32
+ # Check cognitive health
33
+ client.cognitive_health
34
+ # => { health: 0.62, category_scores: { prediction_calibration: 0.35, ... },
35
+ # unacted_count: 2, critical_count: 0, significant_count: 1 }
36
+
37
+ # Mark a reflection as acted on
38
+ client.adapt(reflection_id: reflection_id)
39
+
40
+ # Query reflections
41
+ client.recent_reflections(limit: 5)
42
+ client.reflections_by_category(category: :emotional_stability)
43
+ client.reflection_stats
44
+ ```
45
+
46
+ ## Monitored Categories
47
+
48
+ | Category | Signal |
49
+ |----------|--------|
50
+ | `prediction_calibration` | `tick_results[:prediction_engine][:confidence]` |
51
+ | `curiosity_effectiveness` | `tick_results[:working_memory_integration][:curiosity_intensity]` |
52
+ | `emotional_stability` | `tick_results[:emotional_evaluation][:stability]` |
53
+ | `memory_health` | pruned/total ratio from `memory_consolidation` |
54
+ | `cognitive_load` | elapsed/budget ratio |
55
+ | `trust_drift` | trust composite delta |
56
+ | `mode_patterns` | mode oscillation frequency |
57
+
58
+ ## Severity Levels
59
+
60
+ `:trivial` < `:notable` < `:significant` < `:critical`
61
+
62
+ ## LLM Enhancement
63
+
64
+ When `legion-llm` is loaded and started, `Helpers::LlmEnhancer` enriches reflection output with analytically generated prose.
65
+
66
+ **Methods**:
67
+
68
+ `LlmEnhancer.enhance_reflection(monitors_data:, health_scores:)` — takes the raw monitors data array and per-category health score hash produced by `Helpers::Monitors`, and returns `{ observations: { category_sym => "text", ... } }`. The runner replaces the `observation` string on each reflection entry with the LLM-generated text. All category, severity, and recommendation symbols are left untouched — only the human-readable observation string changes.
69
+
70
+ `LlmEnhancer.reflect_on_dream(dream_results:)` — takes the phase result hash from a completed dream cycle and returns `{ reflection: "..." }` containing a 2-4 sentence first-person, present-tense reflection on what emerged. Used by the `reflect_on_dream` runner method, which is called from `lex-dream`'s `dream_reflection` phase (phase 7). Returns `source: :llm` or `source: :mechanical` in the runner result.
71
+
72
+ **Availability gate**: `LlmEnhancer.available?` checks `Legion::LLM.started?`. Returns `false` if `legion-llm` is not loaded, not configured, or raises any error.
73
+
74
+ **Fallback**: When LLM is unavailable or either method returns `nil`, `reflect` uses the template observation strings from `Helpers::Monitors`, and `reflect_on_dream` uses `build_mechanical_dream_reflection` (assembles a sentence from memory audit, contradiction, and agenda counts).
75
+
76
+ ## Development
77
+
78
+ ```bash
79
+ bundle install
80
+ bundle exec rspec
81
+ bundle exec rubocop
82
+ ```
83
+
84
+ ## License
85
+
86
+ MIT
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative 'lib/legion/extensions/reflection/version'
4
+
5
+ Gem::Specification.new do |spec|
6
+ spec.name = 'lex-reflection'
7
+ spec.version = Legion::Extensions::Reflection::VERSION
8
+ spec.authors = ['Esity']
9
+ spec.email = ['matthewdiverson@gmail.com']
10
+
11
+ spec.summary = 'LEX Reflection'
12
+ spec.description = 'Metacognitive self-monitoring for brain-modeled agentic AI'
13
+ spec.homepage = 'https://github.com/LegionIO/lex-reflection'
14
+ spec.license = 'MIT'
15
+ spec.required_ruby_version = '>= 3.4'
16
+
17
+ spec.metadata['homepage_uri'] = spec.homepage
18
+ spec.metadata['source_code_uri'] = 'https://github.com/LegionIO/lex-reflection'
19
+ spec.metadata['documentation_uri'] = 'https://github.com/LegionIO/lex-reflection'
20
+ spec.metadata['changelog_uri'] = 'https://github.com/LegionIO/lex-reflection'
21
+ spec.metadata['bug_tracker_uri'] = 'https://github.com/LegionIO/lex-reflection/issues'
22
+ spec.metadata['rubygems_mfa_required'] = 'true'
23
+
24
+ spec.files = Dir.chdir(File.expand_path(__dir__)) do
25
+ Dir.glob('{lib,spec}/**/*') + %w[lex-reflection.gemspec Gemfile LICENSE README.md]
26
+ end
27
+ spec.require_paths = ['lib']
28
+ spec.add_development_dependency 'legion-gaia'
29
+ end
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'legion/extensions/reflection/helpers/constants'
4
+ require 'legion/extensions/reflection/helpers/reflection'
5
+ require 'legion/extensions/reflection/helpers/reflection_store'
6
+ require 'legion/extensions/reflection/helpers/monitors'
7
+ require 'legion/extensions/reflection/runners/reflection'
8
+
9
+ module Legion
10
+ module Extensions
11
+ module Reflection
12
+ class Client
13
+ include Runners::Reflection
14
+
15
+ attr_reader :reflection_store
16
+
17
+ def initialize(store: nil, **)
18
+ @reflection_store = store || Helpers::ReflectionStore.new
19
+ end
20
+ end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Legion
4
+ module Extensions
5
+ module Reflection
6
+ module Helpers
7
+ module Constants
8
+ CATEGORIES = %i[
9
+ prediction_calibration
10
+ curiosity_effectiveness
11
+ emotional_stability
12
+ trust_drift
13
+ memory_health
14
+ cognitive_load
15
+ mode_patterns
16
+ ].freeze
17
+
18
+ SEVERITIES = %i[trivial notable significant critical].freeze
19
+
20
+ RECOMMENDATIONS = %i[
21
+ increase_curiosity
22
+ decrease_curiosity
23
+ stabilize_emotion
24
+ rebuild_trust
25
+ consolidate_memory
26
+ reduce_load
27
+ celebrate_success
28
+ investigate
29
+ no_action
30
+ ].freeze
31
+
32
+ # Thresholds for monitor triggers
33
+ PREDICTION_ACCURACY_LOW = 0.4
34
+ PREDICTION_ACCURACY_DROP = 0.2
35
+ CURIOSITY_RESOLUTION_LOW = 0.2
36
+ CURIOSITY_RESOLUTION_HIGH = 0.8
37
+ EMOTION_INSTABILITY_THRESHOLD = 0.3
38
+ EMOTION_FLATNESS_THRESHOLD = 0.05
39
+ TRUST_DROP_THRESHOLD = 0.15
40
+ MEMORY_DECAY_RATIO_HIGH = 0.8
41
+ BUDGET_OVER_THRESHOLD = 0.9
42
+ MODE_OSCILLATION_THRESHOLD = 5
43
+
44
+ # Rolling window for metrics
45
+ METRIC_WINDOW_SIZE = 20
46
+
47
+ # Health score weights
48
+ HEALTH_WEIGHTS = {
49
+ prediction_calibration: 0.25,
50
+ curiosity_effectiveness: 0.15,
51
+ emotional_stability: 0.15,
52
+ trust_drift: 0.15,
53
+ memory_health: 0.15,
54
+ cognitive_load: 0.15
55
+ }.freeze
56
+
57
+ MAX_REFLECTIONS = 100
58
+ end
59
+ end
60
+ end
61
+ end
62
+ end
@@ -0,0 +1,162 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Legion
4
+ module Extensions
5
+ module Reflection
6
+ module Helpers
7
+ module LlmEnhancer
8
+ SYSTEM_PROMPT = <<~PROMPT
9
+ You are the metacognitive reflection engine for an autonomous AI agent built on LegionIO.
10
+ You analyze post-tick cognitive metrics and produce insightful observations.
11
+ Be analytical and specific. Reference the actual numbers. Identify correlations between metrics.
12
+ Write as internal reflection, not a report. Present tense, first person.
13
+ PROMPT
14
+
15
+ # Maps prompt label -> actual category symbol from Constants::CATEGORIES
16
+ CATEGORY_LABEL_MAP = {
17
+ 'EMOTION' => :emotional_stability,
18
+ 'PREDICTION' => :prediction_calibration,
19
+ 'MEMORY' => :memory_health,
20
+ 'TRUST' => :trust_drift,
21
+ 'CURIOSITY' => :curiosity_effectiveness,
22
+ 'IDENTITY' => :mode_patterns
23
+ }.freeze
24
+
25
+ REFLECTION_CATEGORIES = CATEGORY_LABEL_MAP.keys.freeze
26
+
27
+ module_function
28
+
29
+ def available?
30
+ defined?(Legion::LLM) && Legion::LLM.respond_to?(:started?) && Legion::LLM.started?
31
+ rescue StandardError
32
+ false
33
+ end
34
+
35
+ def enhance_reflection(monitors_data:, health_scores:)
36
+ prompt = build_enhance_reflection_prompt(monitors_data: monitors_data, health_scores: health_scores)
37
+ response = llm_ask(prompt)
38
+ parse_enhance_reflection_response(response)
39
+ rescue StandardError => e
40
+ Legion::Logging.warn "[reflection:llm] enhance_reflection failed: #{e.message}"
41
+ nil
42
+ end
43
+
44
+ def reflect_on_dream(dream_results:)
45
+ prompt = build_reflect_on_dream_prompt(dream_results: dream_results)
46
+ response = llm_ask(prompt)
47
+ parse_reflect_on_dream_response(response)
48
+ rescue StandardError => e
49
+ Legion::Logging.warn "[reflection:llm] reflect_on_dream failed: #{e.message}"
50
+ nil
51
+ end
52
+
53
+ # --- Private helpers ---
54
+
55
+ def llm_ask(prompt)
56
+ chat = Legion::LLM.chat
57
+ chat.with_instructions(SYSTEM_PROMPT)
58
+ chat.ask(prompt)
59
+ end
60
+ private_class_method :llm_ask
61
+
62
+ def build_enhance_reflection_prompt(monitors_data:, health_scores:)
63
+ metrics_lines = format_monitors_data(monitors_data)
64
+ health_lines = health_scores.map { |cat, score| "#{cat}: #{score.round(3)}" }.join("\n")
65
+
66
+ <<~PROMPT
67
+ Analyze these post-tick cognitive metrics and generate insightful observations.
68
+
69
+ METRICS:
70
+ #{metrics_lines}
71
+
72
+ HEALTH SCORES:
73
+ #{health_lines}
74
+
75
+ For each category, write 1-2 sentences of genuine analytical observation.
76
+ Look for correlations between categories. Note concerning or interesting patterns.
77
+
78
+ Format EXACTLY as (one line per category):
79
+ EMOTION: <observation>
80
+ PREDICTION: <observation>
81
+ MEMORY: <observation>
82
+ TRUST: <observation>
83
+ CURIOSITY: <observation>
84
+ IDENTITY: <observation>
85
+ PROMPT
86
+ end
87
+ private_class_method :build_enhance_reflection_prompt
88
+
89
+ def format_monitors_data(monitors_data)
90
+ return '' unless monitors_data.is_a?(Array)
91
+
92
+ monitors_data.filter_map do |entry|
93
+ next unless entry.is_a?(Hash) && entry[:category]
94
+
95
+ metrics = entry[:metrics]
96
+ if metrics.is_a?(Hash) && metrics.any?
97
+ metric_str = metrics.map { |k, v| "#{k}=#{v.is_a?(Float) ? v.round(3) : v}" }.join(', ')
98
+ "#{entry[:category].to_s.upcase}: #{metric_str}"
99
+ else
100
+ entry[:category].to_s.upcase
101
+ end
102
+ end.join("\n")
103
+ end
104
+ private_class_method :format_monitors_data
105
+
106
+ def parse_enhance_reflection_response(response)
107
+ return nil unless response&.content
108
+
109
+ observations = {}
110
+ CATEGORY_LABEL_MAP.each do |label, category_sym|
111
+ match = response.content.match(/^#{label}:\s*(.+)$/i)
112
+ observations[category_sym] = match.captures.first.strip if match
113
+ end
114
+
115
+ observations.empty? ? nil : { observations: observations }
116
+ end
117
+ private_class_method :parse_enhance_reflection_response
118
+
119
+ def build_reflect_on_dream_prompt(dream_results:)
120
+ summary = format_dream_results(dream_results)
121
+
122
+ <<~PROMPT
123
+ Reflect on the completed dream cycle and its cognitive significance.
124
+
125
+ DREAM CYCLE RESULTS:
126
+ #{summary}
127
+
128
+ Generate a first-person, present-tense reflection on what emerged from this dream cycle.
129
+ Be specific about patterns, consolidations, and what needs attention.
130
+
131
+ Format EXACTLY as:
132
+ REFLECTION: <2-4 sentences of internal reflection>
133
+ PROMPT
134
+ end
135
+ private_class_method :build_reflect_on_dream_prompt
136
+
137
+ def format_dream_results(dream_results)
138
+ return 'no results' unless dream_results.is_a?(Hash) && dream_results.any?
139
+
140
+ dream_results.map do |phase, result|
141
+ next unless result.is_a?(Hash)
142
+
143
+ summary = result.except(:error).map { |k, v| "#{k}=#{v}" }.first(4).join(', ')
144
+ "#{phase}: #{summary}"
145
+ end.compact.join("\n")
146
+ end
147
+ private_class_method :format_dream_results
148
+
149
+ def parse_reflect_on_dream_response(response)
150
+ return nil unless response&.content
151
+
152
+ match = response.content.match(/REFLECTION:\s*(.+)/im)
153
+ return nil unless match
154
+
155
+ { reflection: match.captures.first.strip }
156
+ end
157
+ private_class_method :parse_reflect_on_dream_response
158
+ end
159
+ end
160
+ end
161
+ end
162
+ end
@@ -0,0 +1,182 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Legion
4
+ module Extensions
5
+ module Reflection
6
+ module Helpers
7
+ module Monitors
8
+ module_function
9
+
10
+ def run_all(tick_results, metric_history)
11
+ reflections = []
12
+ reflections.concat(monitor_predictions(tick_results, metric_history))
13
+ reflections.concat(monitor_curiosity(tick_results))
14
+ reflections.concat(monitor_emotions(tick_results))
15
+ reflections.concat(monitor_trust(tick_results, metric_history))
16
+ reflections.concat(monitor_memory(tick_results))
17
+ reflections.concat(monitor_cognitive_load(tick_results))
18
+ reflections
19
+ end
20
+
21
+ def monitor_predictions(tick_results, history)
22
+ prediction = tick_results[:prediction_engine]
23
+ return [] unless prediction.is_a?(Hash)
24
+
25
+ reflections = []
26
+ confidence = prediction[:confidence]
27
+
28
+ if confidence.is_a?(Numeric) && confidence < Constants::PREDICTION_ACCURACY_LOW
29
+ reflections << ReflectionFactory.new_reflection(
30
+ category: :prediction_calibration,
31
+ observation: "Prediction confidence is low at #{(confidence * 100).round}%",
32
+ severity: ReflectionFactory.severity_for_drop(1.0 - confidence),
33
+ metrics: { confidence: confidence },
34
+ recommendation: :increase_curiosity
35
+ )
36
+ end
37
+
38
+ reflections.concat(detect_accuracy_trend(history))
39
+ reflections
40
+ end
41
+
42
+ def monitor_curiosity(tick_results)
43
+ curiosity = tick_results[:working_memory_integration]
44
+ return [] unless curiosity.is_a?(Hash)
45
+
46
+ reflections = []
47
+
48
+ resolution_rate = curiosity[:resolution_rate] if curiosity.key?(:resolution_rate)
49
+ if resolution_rate.is_a?(Numeric)
50
+ if resolution_rate < Constants::CURIOSITY_RESOLUTION_LOW
51
+ reflections << ReflectionFactory.new_reflection(
52
+ category: :curiosity_effectiveness,
53
+ observation: "Curiosity resolution rate is low at #{(resolution_rate * 100).round}%",
54
+ severity: :notable,
55
+ metrics: { resolution_rate: resolution_rate },
56
+ recommendation: :decrease_curiosity
57
+ )
58
+ elsif resolution_rate > Constants::CURIOSITY_RESOLUTION_HIGH
59
+ reflections << ReflectionFactory.new_reflection(
60
+ category: :curiosity_effectiveness,
61
+ observation: "Curiosity resolution rate is excellent at #{(resolution_rate * 100).round}%",
62
+ severity: :trivial,
63
+ metrics: { resolution_rate: resolution_rate },
64
+ recommendation: :celebrate_success
65
+ )
66
+ end
67
+ end
68
+
69
+ reflections
70
+ end
71
+
72
+ def monitor_emotions(tick_results)
73
+ emotion = tick_results[:emotional_evaluation]
74
+ return [] unless emotion.is_a?(Hash)
75
+
76
+ reflections = []
77
+ stability = emotion[:stability] || emotion.dig(:momentum, :stability)
78
+
79
+ if stability.is_a?(Numeric)
80
+ if stability < Constants::EMOTION_INSTABILITY_THRESHOLD
81
+ reflections << ReflectionFactory.new_reflection(
82
+ category: :emotional_stability,
83
+ observation: "Emotional state is unstable (stability: #{stability.round(2)})",
84
+ severity: :significant,
85
+ metrics: { stability: stability },
86
+ recommendation: :stabilize_emotion
87
+ )
88
+ elsif stability > (1.0 - Constants::EMOTION_FLATNESS_THRESHOLD)
89
+ reflections << ReflectionFactory.new_reflection(
90
+ category: :emotional_stability,
91
+ observation: 'Emotional state is unusually flat — possible disengagement',
92
+ severity: :notable,
93
+ metrics: { stability: stability },
94
+ recommendation: :investigate
95
+ )
96
+ end
97
+ end
98
+
99
+ reflections
100
+ end
101
+
102
+ def monitor_trust(tick_results, history)
103
+ trust = tick_results[:action_selection]
104
+ return [] unless trust.is_a?(Hash) && trust[:trust_score].is_a?(Numeric)
105
+
106
+ trust_scores = history.filter_map { |h| h.dig(:action_selection, :trust_score) }
107
+ return [] if trust_scores.size < 3
108
+
109
+ recent_avg = trust_scores.last(5).sum / trust_scores.last(5).size.to_f
110
+ older_avg = trust_scores.first(5).sum / trust_scores.first(5).size.to_f
111
+ drop = older_avg - recent_avg
112
+
113
+ return [] unless drop > Constants::TRUST_DROP_THRESHOLD
114
+
115
+ [ReflectionFactory.new_reflection(
116
+ category: :trust_drift,
117
+ observation: "Trust scores have dropped by #{(drop * 100).round}% recently",
118
+ severity: ReflectionFactory.severity_for_drop(drop),
119
+ metrics: { drop: drop, recent_avg: recent_avg, older_avg: older_avg },
120
+ recommendation: :rebuild_trust
121
+ )]
122
+ end
123
+
124
+ def monitor_memory(tick_results)
125
+ memory = tick_results[:memory_consolidation]
126
+ return [] unless memory.is_a?(Hash)
127
+
128
+ pruned = memory[:pruned] || 0
129
+ total = memory[:total] || 1
130
+ ratio = pruned.to_f / [total, 1].max
131
+
132
+ return [] unless ratio > Constants::MEMORY_DECAY_RATIO_HIGH
133
+
134
+ [ReflectionFactory.new_reflection(
135
+ category: :memory_health,
136
+ observation: "High memory decay ratio: #{(ratio * 100).round}% of traces pruned",
137
+ severity: :significant,
138
+ metrics: { pruned: pruned, total: total, ratio: ratio },
139
+ recommendation: :consolidate_memory
140
+ )]
141
+ end
142
+
143
+ def monitor_cognitive_load(tick_results)
144
+ elapsed = tick_results[:elapsed]
145
+ budget = tick_results[:budget]
146
+ return [] unless elapsed.is_a?(Numeric) && budget.is_a?(Numeric) && budget.positive?
147
+
148
+ utilization = elapsed / budget
149
+ return [] unless utilization > Constants::BUDGET_OVER_THRESHOLD
150
+
151
+ [ReflectionFactory.new_reflection(
152
+ category: :cognitive_load,
153
+ observation: "Tick budget utilization at #{(utilization * 100).round}%",
154
+ severity: utilization > 1.0 ? :significant : :notable,
155
+ metrics: { utilization: utilization, elapsed: elapsed, budget: budget },
156
+ recommendation: :reduce_load
157
+ )]
158
+ end
159
+
160
+ def detect_accuracy_trend(history)
161
+ accuracies = history.filter_map { |h| h.dig(:prediction_engine, :confidence) }
162
+ return [] if accuracies.size < 5
163
+
164
+ recent = accuracies.last(5).sum / 5.0
165
+ older = accuracies.first(5).sum / 5.0
166
+ drop = older - recent
167
+
168
+ return [] unless drop > Constants::PREDICTION_ACCURACY_DROP
169
+
170
+ [ReflectionFactory.new_reflection(
171
+ category: :prediction_calibration,
172
+ observation: "Prediction accuracy trending down: #{(older * 100).round}% -> #{(recent * 100).round}%",
173
+ severity: ReflectionFactory.severity_for_drop(drop),
174
+ metrics: { trend_drop: drop, recent_avg: recent, older_avg: older },
175
+ recommendation: :increase_curiosity
176
+ )]
177
+ end
178
+ end
179
+ end
180
+ end
181
+ end
182
+ end
@@ -0,0 +1,50 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'securerandom'
4
+
5
+ module Legion
6
+ module Extensions
7
+ module Reflection
8
+ module Helpers
9
+ module ReflectionFactory
10
+ module_function
11
+
12
+ def new_reflection(category:, observation:, severity: :notable,
13
+ metrics: {}, recommendation: :no_action)
14
+ raise ArgumentError, "invalid category: #{category}" unless Constants::CATEGORIES.include?(category)
15
+ raise ArgumentError, "invalid severity: #{severity}" unless Constants::SEVERITIES.include?(severity)
16
+
17
+ {
18
+ reflection_id: SecureRandom.uuid,
19
+ category: category,
20
+ observation: observation,
21
+ severity: severity,
22
+ metrics: metrics,
23
+ recommendation: recommendation,
24
+ created_at: Time.now.utc,
25
+ acted_on: false
26
+ }
27
+ end
28
+
29
+ def severity_weight(severity)
30
+ case severity
31
+ when :critical then 1.0
32
+ when :significant then 0.7
33
+ when :notable then 0.4
34
+ when :trivial then 0.1
35
+ else 0.0
36
+ end
37
+ end
38
+
39
+ def severity_for_drop(drop)
40
+ if drop >= 0.4 then :critical
41
+ elsif drop >= 0.25 then :significant
42
+ elsif drop >= 0.1 then :notable
43
+ else :trivial
44
+ end
45
+ end
46
+ end
47
+ end
48
+ end
49
+ end
50
+ end