lex-agentic-language 0.1.9 → 0.1.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/lib/legion/extensions/agentic/language/narrator/helpers/llm_enhancer.rb +24 -2
- data/lib/legion/extensions/agentic/language/narrator/runners/narrator.rb +20 -1
- data/lib/legion/extensions/agentic/language/version.rb +1 -1
- data/spec/legion/extensions/agentic/language/narrator/helpers/llm_enhancer_spec.rb +15 -3
- data/spec/legion/extensions/agentic/language/narrator/runners/narrator_llm_spec.rb +15 -5
- metadata +1 -1
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 2e44972f87bca2dccf617db64c48e8e3b99d2174bb45f1d21cd71a72a2489a59
|
|
4
|
+
data.tar.gz: 1e9fb62235879f874ef6c2ca07b6f58dc104fcc5fcb3d64591dcdf5d6477484f
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: aed85b23f9a856e2a26ac20bfdacd601c0ad09c8394f38bedb8195d28852fab393ee170b29d176109c83d6ab7cc426883270621c0bc629a7cab6244f94d38cc5
|
|
7
|
+
data.tar.gz: e57dbce2e2254462c9940b57b7dcac4b349e8edb3dad723e4c4d3c06bcc0dc6882cf6a4e52dfdb75bed12fdde5bd755f18c6e447c242fbd5d3fe4dcc42b5917b
|
data/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,9 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## [0.1.10] - 2026-04-27
|
|
4
|
+
### Fixed
|
|
5
|
+
- Narrator LLM enhancement now skips empty idle narration and rate-limits provider failure warnings without warning-level backtrace floods. Fixes #7
|
|
6
|
+
|
|
3
7
|
## [0.1.9] - 2026-04-22
|
|
4
8
|
### Added
|
|
5
9
|
- InnerSpeech::Actor::DecayInnerSpeech (60s) — first actor in the extension, enables autonomous salience decay
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
|
+
require 'concurrent/atomic/atomic_reference'
|
|
4
|
+
|
|
3
5
|
module Legion
|
|
4
6
|
module Extensions
|
|
5
7
|
module Agentic
|
|
@@ -13,6 +15,8 @@ module Legion
|
|
|
13
15
|
Write 3-5 sentences that feel like genuine introspection, not a report.
|
|
14
16
|
Vary your sentence structure. Use present tense. Be concise and vivid.
|
|
15
17
|
PROMPT
|
|
18
|
+
FAILURE_LOG_INTERVAL = 60
|
|
19
|
+
FAILURE_LOGGED_AT = Concurrent::AtomicReference.new
|
|
16
20
|
|
|
17
21
|
module_function
|
|
18
22
|
|
|
@@ -33,8 +37,7 @@ module Legion
|
|
|
33
37
|
response = llm_ask(prompt)
|
|
34
38
|
parse_narrate_response(response)
|
|
35
39
|
rescue StandardError => e
|
|
36
|
-
|
|
37
|
-
log.warn(e.backtrace)
|
|
40
|
+
log_failure(e)
|
|
38
41
|
nil
|
|
39
42
|
end
|
|
40
43
|
|
|
@@ -69,6 +72,25 @@ module Legion
|
|
|
69
72
|
|
|
70
73
|
private_class_method :pipeline_available?
|
|
71
74
|
|
|
75
|
+
def log_failure(error)
|
|
76
|
+
if log_failure_now?
|
|
77
|
+
log.warn("[narrator:llm] narrate failed: #{error.class}: #{error.message}")
|
|
78
|
+
FAILURE_LOGGED_AT.set(Time.now.utc)
|
|
79
|
+
end
|
|
80
|
+
log.debug(error.backtrace)
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
private_class_method :log_failure
|
|
84
|
+
|
|
85
|
+
def log_failure_now?
|
|
86
|
+
last_logged_at = FAILURE_LOGGED_AT.get
|
|
87
|
+
return true unless last_logged_at
|
|
88
|
+
|
|
89
|
+
(Time.now.utc - last_logged_at) >= FAILURE_LOG_INTERVAL
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
private_class_method :log_failure_now?
|
|
93
|
+
|
|
72
94
|
def build_narrate_prompt(sections_data)
|
|
73
95
|
parts = [
|
|
74
96
|
'Generate a first-person internal monologue based on the following cognitive state:',
|
|
@@ -13,7 +13,7 @@ module Legion
|
|
|
13
13
|
def narrate(tick_results: {}, cognitive_state: {}, **)
|
|
14
14
|
entry = Helpers::Synthesizer.narrate(tick_results: tick_results, cognitive_state: cognitive_state)
|
|
15
15
|
|
|
16
|
-
if Helpers::LlmEnhancer.available?
|
|
16
|
+
if Helpers::LlmEnhancer.available? && meaningful_for_llm?(tick_results, cognitive_state)
|
|
17
17
|
sections_data = build_llm_sections_data(tick_results, cognitive_state, entry)
|
|
18
18
|
llm_result = Helpers::LlmEnhancer.narrate(sections_data: sections_data)
|
|
19
19
|
if llm_result
|
|
@@ -103,6 +103,25 @@ module Legion
|
|
|
103
103
|
@journal ||= Helpers::Journal.new
|
|
104
104
|
end
|
|
105
105
|
|
|
106
|
+
def meaningful_for_llm?(tick_results, cognitive_state)
|
|
107
|
+
meaningful_hash?(tick_results) || meaningful_hash?(cognitive_state)
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
def meaningful_hash?(value)
|
|
111
|
+
return false unless value.is_a?(Hash)
|
|
112
|
+
|
|
113
|
+
value.any? do |_, nested|
|
|
114
|
+
case nested
|
|
115
|
+
when Hash
|
|
116
|
+
meaningful_hash?(nested)
|
|
117
|
+
when Array
|
|
118
|
+
nested.any?
|
|
119
|
+
else
|
|
120
|
+
!nested.nil?
|
|
121
|
+
end
|
|
122
|
+
end
|
|
123
|
+
end
|
|
124
|
+
|
|
106
125
|
def build_llm_sections_data(tick_results, cognitive_state, entry)
|
|
107
126
|
{
|
|
108
127
|
emotion: llm_emotion_data(tick_results, cognitive_state),
|
|
@@ -78,16 +78,28 @@ RSpec.describe Legion::Extensions::Agentic::Language::Narrator::Helpers::LlmEnha
|
|
|
78
78
|
end
|
|
79
79
|
|
|
80
80
|
context 'when an error occurs' do
|
|
81
|
-
|
|
81
|
+
before { described_class::FAILURE_LOGGED_AT.set(nil) }
|
|
82
|
+
|
|
83
|
+
it 'returns nil and logs a concise warning with debug backtrace' do
|
|
82
84
|
llm_double = double('Legion::LLM', started?: true)
|
|
83
85
|
allow(llm_double).to receive(:chat).and_raise(StandardError, 'connection failed')
|
|
84
86
|
stub_const('Legion::LLM', llm_double)
|
|
85
87
|
|
|
86
|
-
expect(Legion::Logging).to receive(:warn).with(/narrator:llm.*
|
|
87
|
-
expect(Legion::Logging).to receive(:
|
|
88
|
+
expect(Legion::Logging).to receive(:warn).with(/narrator:llm.*StandardError: connection failed/)
|
|
89
|
+
expect(Legion::Logging).to receive(:debug).with(instance_of(Array))
|
|
88
90
|
result = described_class.narrate(sections_data: sections_data)
|
|
89
91
|
expect(result).to be_nil
|
|
90
92
|
end
|
|
93
|
+
|
|
94
|
+
it 'throttles repeated failure warnings' do
|
|
95
|
+
llm_double = double('Legion::LLM', started?: true)
|
|
96
|
+
allow(llm_double).to receive(:chat).and_raise(StandardError, 'connection failed')
|
|
97
|
+
stub_const('Legion::LLM', llm_double)
|
|
98
|
+
allow(Legion::Logging).to receive(:debug)
|
|
99
|
+
|
|
100
|
+
expect(Legion::Logging).to receive(:warn).once
|
|
101
|
+
2.times { described_class.narrate(sections_data: sections_data) }
|
|
102
|
+
end
|
|
91
103
|
end
|
|
92
104
|
|
|
93
105
|
context 'with empty sections_data' do
|
|
@@ -4,9 +4,10 @@ RSpec.describe Legion::Extensions::Agentic::Language::Narrator::Runners::Narrato
|
|
|
4
4
|
let(:client) { Legion::Extensions::Agentic::Language::Narrator::Client.new }
|
|
5
5
|
|
|
6
6
|
describe '#narrate with LLM available' do
|
|
7
|
+
let(:chat_double) { double('chat') }
|
|
8
|
+
|
|
7
9
|
before do
|
|
8
10
|
response_double = double('response', content: 'I feel a deep sense of focus and possibility.')
|
|
9
|
-
chat_double = double('chat')
|
|
10
11
|
allow(chat_double).to receive(:with_instructions)
|
|
11
12
|
allow(chat_double).to receive(:ask).and_return(response_double)
|
|
12
13
|
llm_double = double('Legion::LLM', started?: true)
|
|
@@ -22,8 +23,8 @@ RSpec.describe Legion::Extensions::Agentic::Language::Narrator::Runners::Narrato
|
|
|
22
23
|
expect(result[:source]).to eq(:llm)
|
|
23
24
|
end
|
|
24
25
|
|
|
25
|
-
it 'returns the LLM narrative string' do
|
|
26
|
-
result = client.narrate(tick_results: {}, cognitive_state: {})
|
|
26
|
+
it 'returns the LLM narrative string when there is meaningful cognitive data' do
|
|
27
|
+
result = client.narrate(tick_results: { emotional_evaluation: { valence: 0.6 } }, cognitive_state: {})
|
|
27
28
|
expect(result[:narrative]).to eq('I feel a deep sense of focus and possibility.')
|
|
28
29
|
end
|
|
29
30
|
|
|
@@ -34,9 +35,18 @@ RSpec.describe Legion::Extensions::Agentic::Language::Narrator::Runners::Narrato
|
|
|
34
35
|
end
|
|
35
36
|
|
|
36
37
|
it 'appends to journal' do
|
|
37
|
-
client.narrate(tick_results: {}, cognitive_state: {})
|
|
38
|
+
client.narrate(tick_results: { emotional_evaluation: { valence: 0.6 } }, cognitive_state: {})
|
|
38
39
|
expect(client.journal.size).to eq(1)
|
|
39
40
|
end
|
|
41
|
+
|
|
42
|
+
it 'uses the mechanical pipeline for empty idle narration' do
|
|
43
|
+
expect(chat_double).not_to receive(:ask)
|
|
44
|
+
|
|
45
|
+
result = client.narrate(tick_results: {}, cognitive_state: {})
|
|
46
|
+
|
|
47
|
+
expect(result).not_to have_key(:source)
|
|
48
|
+
expect(result[:narrative]).to be_a(String)
|
|
49
|
+
end
|
|
40
50
|
end
|
|
41
51
|
|
|
42
52
|
describe '#narrate with LLM available but narrate returns nil' do
|
|
@@ -50,7 +60,7 @@ RSpec.describe Legion::Extensions::Agentic::Language::Narrator::Runners::Narrato
|
|
|
50
60
|
end
|
|
51
61
|
|
|
52
62
|
it 'falls back to mechanical pipeline' do
|
|
53
|
-
result = client.narrate(tick_results: {}, cognitive_state: {})
|
|
63
|
+
result = client.narrate(tick_results: { emotional_evaluation: { valence: 0.6 } }, cognitive_state: {})
|
|
54
64
|
expect(result).not_to have_key(:source)
|
|
55
65
|
expect(result[:narrative]).to be_a(String)
|
|
56
66
|
end
|