lex-agentic-language 0.1.9 → 0.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2388849ad378d8489580d4638dc181111798e6ce0a153e99fe40f2c441152b96
4
- data.tar.gz: 05c2295ad8bf6a17e2592cce376b6c15408c4bf5f0a35786cdfe650ecd50ffa1
3
+ metadata.gz: 79cfb7e1534a2010a54b836636c9a5406fefda03bb97cb700f131f1a92a6e77c
4
+ data.tar.gz: 026cd8c1a5831eed7555d231471595bb2d3ab5ba832d3a98c90646c01baa54f4
5
5
  SHA512:
6
- metadata.gz: 4b68a01ec6173f4ad3bcb486ed33c9ffe87c269da569c6615e49d96366f79a6f997a1265827f511343a735f15cfdc3464f032a0a9db825556b6b38d2519af96d
7
- data.tar.gz: 9baccb232491d12120647c84aa5aaae42a2936f2aec7f83b681fab2226af871f13433277c964fc612afbef07c17275e4c2a5231168f2f158545658d07469e5b1
6
+ metadata.gz: 347a89a7300d788e8b76b1882c52ab280ad99c8ed1c38981cc7de53071dbf905678b086847d9923b7fa9242a08189e78adc658a7f553aace3b0669712d466119
7
+ data.tar.gz: 3b15a51fe618f5f478c48182688803a626558eca1d20a66093b5cdf384e3dbc53e41a96f61a3c4360711273cda056154cc997ac62b84a6a3de5e986519a9a0b4
data/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # Changelog
2
2
 
3
+ ## [0.1.11] - 2026-05-07
4
+ ### Fixed
5
+ - Narrator LLM enhancement now sends native chat message payloads with system/user roles while preserving legacy session fallback for older test doubles.
6
+ - LLM narration state maps reflection `unacted_count` into pending adaptations.
7
+
8
+ ## [0.1.10] - 2026-04-27
9
+ ### Fixed
10
+ - Narrator LLM enhancement now skips empty idle narration and rate-limits provider failure warnings without warning-level backtrace floods. Fixes #7
11
+
3
12
  ## [0.1.9] - 2026-04-22
4
13
  ### Added
5
14
  - InnerSpeech::Actor::DecayInnerSpeech (60s) — first actor in the extension, enables autonomous salience decay
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require 'concurrent/atomic/atomic_reference'
4
+
3
5
  module Legion
4
6
  module Extensions
5
7
  module Agentic
@@ -13,6 +15,8 @@ module Legion
13
15
  Write 3-5 sentences that feel like genuine introspection, not a report.
14
16
  Vary your sentence structure. Use present tense. Be concise and vivid.
15
17
  PROMPT
18
+ FAILURE_LOG_INTERVAL = 60
19
+ FAILURE_LOGGED_AT = Concurrent::AtomicReference.new
16
20
 
17
21
  module_function
18
22
 
@@ -33,8 +37,7 @@ module Legion
33
37
  response = llm_ask(prompt)
34
38
  parse_narrate_response(response)
35
39
  rescue StandardError => e
36
- log.warn("[narrator:llm] narrate failed: #{e.message}")
37
- log.warn(e.backtrace)
40
+ log_failure(e)
38
41
  nil
39
42
  end
40
43
 
@@ -48,16 +51,39 @@ module Legion
48
51
  caller: { extension: 'lex-agentic-language', mode: :narrator }
49
52
  )
50
53
  content = response&.message&.dig(:content)
51
- ::Struct.new(:content).new(content) if content
52
54
  else
53
- chat = Legion::LLM.chat # rubocop:disable Legion/HelperMigration/DirectLlm
54
- chat.with_instructions(SYSTEM_PROMPT)
55
- chat.ask(prompt)
55
+ response = Legion::LLM.chat( # rubocop:disable Legion/HelperMigration/DirectLlm
56
+ message: [
57
+ { role: 'system', content: SYSTEM_PROMPT },
58
+ { role: 'user', content: prompt }
59
+ ],
60
+ caller: { extension: 'lex-agentic-language', mode: :narrator }
61
+ )
62
+ content = extract_response_content(response, prompt)
56
63
  end
64
+ ::Struct.new(:content).new(content) if content
57
65
  end
58
66
 
59
67
  private_class_method :llm_ask
60
68
 
69
+ def extract_response_content(response, prompt = nil)
70
+ return response.strip if response.is_a?(String)
71
+ return response.content if response.respond_to?(:content)
72
+
73
+ if response.respond_to?(:ask)
74
+ response.with_instructions(SYSTEM_PROMPT) if response.respond_to?(:with_instructions)
75
+ asked = response.ask(prompt)
76
+ return extract_response_content(asked)
77
+ end
78
+ return nil unless response.is_a?(Hash)
79
+
80
+ response[:content] || response['content'] ||
81
+ response.dig(:message, :content) || response.dig('message', 'content') ||
82
+ response[:response] || response['response']
83
+ end
84
+
85
+ private_class_method :extract_response_content
86
+
61
87
  def pipeline_available?
62
88
  !!(defined?(Legion::LLM::Pipeline::GaiaCaller) &&
63
89
  Legion::LLM.respond_to?(:pipeline_enabled?) &&
@@ -69,6 +95,25 @@ module Legion
69
95
 
70
96
  private_class_method :pipeline_available?
71
97
 
98
+ def log_failure(error)
99
+ if log_failure_now?
100
+ log.warn("[narrator:llm] narrate failed: #{error.class}: #{error.message}")
101
+ FAILURE_LOGGED_AT.set(Time.now.utc)
102
+ end
103
+ log.debug(error.backtrace)
104
+ end
105
+
106
+ private_class_method :log_failure
107
+
108
+ def log_failure_now?
109
+ last_logged_at = FAILURE_LOGGED_AT.get
110
+ return true unless last_logged_at
111
+
112
+ (Time.now.utc - last_logged_at) >= FAILURE_LOG_INTERVAL
113
+ end
114
+
115
+ private_class_method :log_failure_now?
116
+
72
117
  def build_narrate_prompt(sections_data)
73
118
  parts = [
74
119
  'Generate a first-person internal monologue based on the following cognitive state:',
@@ -13,7 +13,7 @@ module Legion
13
13
  def narrate(tick_results: {}, cognitive_state: {}, **)
14
14
  entry = Helpers::Synthesizer.narrate(tick_results: tick_results, cognitive_state: cognitive_state)
15
15
 
16
- if Helpers::LlmEnhancer.available?
16
+ if Helpers::LlmEnhancer.available? && meaningful_for_llm?(tick_results, cognitive_state)
17
17
  sections_data = build_llm_sections_data(tick_results, cognitive_state, entry)
18
18
  llm_result = Helpers::LlmEnhancer.narrate(sections_data: sections_data)
19
19
  if llm_result
@@ -103,6 +103,25 @@ module Legion
103
103
  @journal ||= Helpers::Journal.new
104
104
  end
105
105
 
106
+ def meaningful_for_llm?(tick_results, cognitive_state)
107
+ meaningful_hash?(tick_results) || meaningful_hash?(cognitive_state)
108
+ end
109
+
110
+ def meaningful_hash?(value)
111
+ return false unless value.is_a?(Hash)
112
+
113
+ value.any? do |_, nested|
114
+ case nested
115
+ when Hash
116
+ meaningful_hash?(nested)
117
+ when Array
118
+ nested.any?
119
+ else
120
+ !nested.nil?
121
+ end
122
+ end
123
+ end
124
+
106
125
  def build_llm_sections_data(tick_results, cognitive_state, entry)
107
126
  {
108
127
  emotion: llm_emotion_data(tick_results, cognitive_state),
@@ -168,7 +187,7 @@ module Legion
168
187
  r = cognitive_state[:reflection] || {}
169
188
  {
170
189
  health: r[:health] || 1.0,
171
- pending_adaptations: r[:pending_adaptations] || 0
190
+ pending_adaptations: r[:pending_adaptations] || r[:unacted_count] || 0
172
191
  }
173
192
  end
174
193
 
@@ -4,7 +4,7 @@ module Legion
4
4
  module Extensions
5
5
  module Agentic
6
6
  module Language
7
- VERSION = '0.1.9'
7
+ VERSION = '0.1.11'
8
8
  end
9
9
  end
10
10
  end
@@ -49,27 +49,32 @@ RSpec.describe Legion::Extensions::Agentic::Language::Narrator::Helpers::LlmEnha
49
49
 
50
50
  context 'when LLM returns a response' do
51
51
  it 'returns the response content as a string' do
52
- response_double = double('response', content: 'I feel alert and curious about what lies ahead.')
53
- chat_double = double('chat')
54
- allow(chat_double).to receive(:with_instructions)
55
- allow(chat_double).to receive(:ask).and_return(response_double)
56
52
  llm_double = double('Legion::LLM', started?: true)
57
- allow(llm_double).to receive(:chat).and_return(chat_double)
53
+ allow(llm_double).to receive(:chat).and_return(content: 'I feel alert and curious about what lies ahead.')
58
54
  stub_const('Legion::LLM', llm_double)
59
55
 
60
56
  result = described_class.narrate(sections_data: sections_data)
61
57
  expect(result).to be_a(String)
62
58
  expect(result).to eq('I feel alert and curious about what lies ahead.')
63
59
  end
60
+
61
+ it 'sends system instructions and user prompt in the native chat message payload' do
62
+ llm_double = double('Legion::LLM', started?: true)
63
+ allow(llm_double).to receive(:chat).and_return(content: 'native narrative')
64
+ stub_const('Legion::LLM', llm_double)
65
+
66
+ described_class.narrate(sections_data: sections_data)
67
+
68
+ expect(llm_double).to have_received(:chat)
69
+ .with(hash_including(message: array_including(hash_including(role: 'system'),
70
+ hash_including(role: 'user'))))
71
+ end
64
72
  end
65
73
 
66
74
  context 'when LLM returns nil response' do
67
75
  it 'returns nil' do
68
- chat_double = double('chat')
69
- allow(chat_double).to receive(:with_instructions)
70
- allow(chat_double).to receive(:ask).and_return(nil)
71
76
  llm_double = double('Legion::LLM', started?: true)
72
- allow(llm_double).to receive(:chat).and_return(chat_double)
77
+ allow(llm_double).to receive(:chat).and_return(nil)
73
78
  stub_const('Legion::LLM', llm_double)
74
79
 
75
80
  result = described_class.narrate(sections_data: sections_data)
@@ -78,26 +83,34 @@ RSpec.describe Legion::Extensions::Agentic::Language::Narrator::Helpers::LlmEnha
78
83
  end
79
84
 
80
85
  context 'when an error occurs' do
81
- it 'returns nil and logs a warning' do
86
+ before { described_class::FAILURE_LOGGED_AT.set(nil) }
87
+
88
+ it 'returns nil and logs a concise warning with debug backtrace' do
82
89
  llm_double = double('Legion::LLM', started?: true)
83
90
  allow(llm_double).to receive(:chat).and_raise(StandardError, 'connection failed')
84
91
  stub_const('Legion::LLM', llm_double)
85
92
 
86
- expect(Legion::Logging).to receive(:warn).with(/narrator:llm.*narrate failed/)
87
- expect(Legion::Logging).to receive(:warn).with(instance_of(Array))
93
+ expect(Legion::Logging).to receive(:warn).with(/narrator:llm.*StandardError: connection failed/)
94
+ expect(Legion::Logging).to receive(:debug).with(instance_of(Array))
88
95
  result = described_class.narrate(sections_data: sections_data)
89
96
  expect(result).to be_nil
90
97
  end
98
+
99
+ it 'throttles repeated failure warnings' do
100
+ llm_double = double('Legion::LLM', started?: true)
101
+ allow(llm_double).to receive(:chat).and_raise(StandardError, 'connection failed')
102
+ stub_const('Legion::LLM', llm_double)
103
+ allow(Legion::Logging).to receive(:debug)
104
+
105
+ expect(Legion::Logging).to receive(:warn).once
106
+ 2.times { described_class.narrate(sections_data: sections_data) }
107
+ end
91
108
  end
92
109
 
93
110
  context 'with empty sections_data' do
94
111
  it 'does not raise and returns a string when LLM responds' do
95
- response_double = double('response', content: 'Everything seems quiet.')
96
- chat_double = double('chat')
97
- allow(chat_double).to receive(:with_instructions)
98
- allow(chat_double).to receive(:ask).and_return(response_double)
99
112
  llm_double = double('Legion::LLM', started?: true)
100
- allow(llm_double).to receive(:chat).and_return(chat_double)
113
+ allow(llm_double).to receive(:chat).and_return(content: 'Everything seems quiet.')
101
114
  stub_const('Legion::LLM', llm_double)
102
115
 
103
116
  result = described_class.narrate(sections_data: {})
@@ -4,9 +4,10 @@ RSpec.describe Legion::Extensions::Agentic::Language::Narrator::Runners::Narrato
4
4
  let(:client) { Legion::Extensions::Agentic::Language::Narrator::Client.new }
5
5
 
6
6
  describe '#narrate with LLM available' do
7
+ let(:chat_double) { double('chat') }
8
+
7
9
  before do
8
10
  response_double = double('response', content: 'I feel a deep sense of focus and possibility.')
9
- chat_double = double('chat')
10
11
  allow(chat_double).to receive(:with_instructions)
11
12
  allow(chat_double).to receive(:ask).and_return(response_double)
12
13
  llm_double = double('Legion::LLM', started?: true)
@@ -22,8 +23,8 @@ RSpec.describe Legion::Extensions::Agentic::Language::Narrator::Runners::Narrato
22
23
  expect(result[:source]).to eq(:llm)
23
24
  end
24
25
 
25
- it 'returns the LLM narrative string' do
26
- result = client.narrate(tick_results: {}, cognitive_state: {})
26
+ it 'returns the LLM narrative string when there is meaningful cognitive data' do
27
+ result = client.narrate(tick_results: { emotional_evaluation: { valence: 0.6 } }, cognitive_state: {})
27
28
  expect(result[:narrative]).to eq('I feel a deep sense of focus and possibility.')
28
29
  end
29
30
 
@@ -34,9 +35,18 @@ RSpec.describe Legion::Extensions::Agentic::Language::Narrator::Runners::Narrato
34
35
  end
35
36
 
36
37
  it 'appends to journal' do
37
- client.narrate(tick_results: {}, cognitive_state: {})
38
+ client.narrate(tick_results: { emotional_evaluation: { valence: 0.6 } }, cognitive_state: {})
38
39
  expect(client.journal.size).to eq(1)
39
40
  end
41
+
42
+ it 'uses the mechanical pipeline for empty idle narration' do
43
+ expect(chat_double).not_to receive(:ask)
44
+
45
+ result = client.narrate(tick_results: {}, cognitive_state: {})
46
+
47
+ expect(result).not_to have_key(:source)
48
+ expect(result[:narrative]).to be_a(String)
49
+ end
40
50
  end
41
51
 
42
52
  describe '#narrate with LLM available but narrate returns nil' do
@@ -50,7 +60,7 @@ RSpec.describe Legion::Extensions::Agentic::Language::Narrator::Runners::Narrato
50
60
  end
51
61
 
52
62
  it 'falls back to mechanical pipeline' do
53
- result = client.narrate(tick_results: {}, cognitive_state: {})
63
+ result = client.narrate(tick_results: { emotional_evaluation: { valence: 0.6 } }, cognitive_state: {})
54
64
  expect(result).not_to have_key(:source)
55
65
  expect(result[:narrative]).to be_a(String)
56
66
  end
@@ -33,6 +33,15 @@ RSpec.describe Legion::Extensions::Agentic::Language::Narrator::Runners::Narrato
33
33
  end
34
34
  end
35
35
 
36
+ describe 'reflection data for LLM narration' do
37
+ it 'maps reflection unacted_count to pending_adaptations' do
38
+ data = client.send(:llm_reflection_data, reflection: { health: 0.7, unacted_count: 3 })
39
+
40
+ expect(data[:health]).to eq(0.7)
41
+ expect(data[:pending_adaptations]).to eq(3)
42
+ end
43
+ end
44
+
36
45
  describe '#recent_entries' do
37
46
  before do
38
47
  5.times { |i| client.narrate(tick_results: { emotional_evaluation: { valence: i * 0.2 } }, cognitive_state: {}) }
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: lex-agentic-language
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.9
4
+ version: 0.1.11
5
5
  platform: ruby
6
6
  authors:
7
7
  - Esity