lex-agentic-memory 0.1.9 → 0.1.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9e7d4306cac20ea6106de6c5961c23651a3a891d1891ae986869904141e8374c
4
- data.tar.gz: 77a5a10d16f09dd6c9f3057219435c676909529740e2d6d429932f542d601e46
3
+ metadata.gz: 6f9d21093f342adfd656a2775f74aa1e53a397c16c484b4826bfa076d06799d7
4
+ data.tar.gz: 917b0430e8fe409bbe1e857d9e3989fe722405fc7a0c746de463663552aea51c
5
5
  SHA512:
6
- metadata.gz: d1d4ad0cb84988d8d76965f45017429caa2d3e77d6bf184673d4353fc249547d61da10853c297891c496c56862d93fa010130f53c28fbd054b61cff91bfb6c0f
7
- data.tar.gz: 86ff6e678ebc1d9f3690966cbef2a3e6f90fb1635eaa78b0911734cf2a3dd230b63672365c9c1e87cf4a3081513cd05d4e2cf74258b969978f9d8512a4347bc2
6
+ metadata.gz: baa0bf1ac1b2e8b49af95680e021643df0c85fd1e9eed423fc41327d7aae4e52c93b951eed9662754469e9d2ed3df63be17d22793b91d7678e2cbf61a69aa841
7
+ data.tar.gz: 6463bbc07b183c771179ce3447b0846fb5b6b48065c16931c439ef5124e2e81091e13079a5c50ab6189acf5646c7fba010ae869949ef053de3612485aa21ec4c
data/CHANGELOG.md CHANGED
@@ -1,5 +1,28 @@
1
1
  # Changelog
2
2
 
3
+ ## [0.1.12] - 2026-03-26
4
+
5
+ ### Fixed
6
+ - `PostgresStore#store` used SQLite-specific `insert_conflict(:replace)` which fails on PostgreSQL with `TypeError: no implicit conversion of Symbol into Integer`. Replaced with proper `insert_conflict(target: :trace_id, update: ...)` syntax that generates correct `ON CONFLICT` SQL
7
+
8
+ ## [0.1.11] - 2026-03-25
9
+
10
+ ### Added
11
+ - `Helpers::HotTier` module: Redis hot-tier cache in front of PostgresStore using `Legion::Cache::RedisHash`. Stores traces as Redis hashes with 24-hour TTL, maintains a sorted-set index per tenant, and provides `cache_trace`, `fetch_trace`, and `evict_trace` operations
12
+ - `PostgresStore#retrieve` checks hot tier first; falls through to DB on miss and populates hot tier on DB hit
13
+ - `PostgresStore#store` writes through to hot tier after successful DB write
14
+ - `PostgresStore#delete` evicts from hot tier before DB delete
15
+ - `PostgresStore#update` evicts stale hot-tier entry after DB update
16
+ - 32 new specs covering HotTier interface, serialize/deserialize round-trip, availability guard, and all four PostgresStore integration points
17
+
18
+ ## [0.1.10] - 2026-03-25
19
+
20
+ ### Added
21
+ - `Helpers::PostgresStore`: write-through durable store backed by Legion::Data (PostgreSQL or MySQL), scoped by tenant_id. Implements full store interface: store, retrieve, retrieve_by_type, retrieve_by_domain, all_traces, delete, update, record_coactivation, associations_for, walk_associations, delete_lowest_confidence, delete_least_recently_used, firmware_traces, flush (no-op), db_ready?
22
+ - `create_store` in `Trace` module now selects PostgresStore when a PostgreSQL or MySQL connection is available with both required tables; falls back to CacheStore or in-memory Store
23
+ - `postgres_available?` and `resolve_tenant_id` private helpers on `Trace` module
24
+ - 46 new specs covering all PostgresStore methods using an in-memory SQLite DB
25
+
3
26
  ## [0.1.9] - 2026-03-25
4
27
 
5
28
  ### Added
@@ -0,0 +1,98 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Legion
4
+ module Extensions
5
+ module Agentic
6
+ module Memory
7
+ module Trace
8
+ module Helpers
9
+ module HotTier
10
+ HOT_TTL = 86_400 # 24 hours
11
+
12
+ module_function
13
+
14
+ # Cache a trace in the Redis hot tier.
15
+ def cache_trace(trace, tenant_id: nil)
16
+ return unless available?
17
+
18
+ tid = tenant_id || trace[:partition_id]
19
+ key = trace_key(tid, trace[:trace_id])
20
+ data = serialize_trace(trace)
21
+ Legion::Cache::RedisHash.hset(key, data)
22
+ Legion::Cache::RedisHash.expire(key, HOT_TTL)
23
+
24
+ index_key = "legion:tier:hot:#{tid}"
25
+ Legion::Cache::RedisHash.zadd(index_key, Time.now.to_f, trace[:trace_id])
26
+ end
27
+
28
+ # Fetch a trace from the hot tier. Returns a deserialized trace hash or nil on miss.
29
+ def fetch_trace(trace_id, tenant_id: nil)
30
+ return nil unless available?
31
+
32
+ key = trace_key(tenant_id, trace_id)
33
+ data = Legion::Cache::RedisHash.hgetall(key)
34
+ return nil if data.nil? || data.empty?
35
+
36
+ deserialize_trace(data)
37
+ end
38
+
39
+ # Evict a trace from the hot tier and remove it from the sorted-set index.
40
+ def evict_trace(trace_id, tenant_id: nil)
41
+ return unless available?
42
+
43
+ key = trace_key(tenant_id, trace_id)
44
+ Legion::Cache.delete(key)
45
+
46
+ index_key = "legion:tier:hot:#{tenant_id}"
47
+ Legion::Cache::RedisHash.zrem(index_key, trace_id)
48
+ end
49
+
50
+ # Returns true when the RedisHash module is loaded and Redis is reachable.
51
+ def available?
52
+ defined?(Legion::Cache::RedisHash) &&
53
+ Legion::Cache::RedisHash.redis_available?
54
+ rescue StandardError
55
+ false
56
+ end
57
+
58
+ # Build the namespaced Redis key for a trace.
59
+ def trace_key(tenant_id, trace_id)
60
+ "legion:trace:#{tenant_id}:#{trace_id}"
61
+ end
62
+
63
+ # Serialize a trace hash to a string-only flat hash suitable for Redis HSET.
64
+ def serialize_trace(trace)
65
+ {
66
+ 'trace_id' => trace[:trace_id].to_s,
67
+ 'trace_type' => trace[:trace_type].to_s,
68
+ 'content_payload' => trace[:content_payload].to_s,
69
+ 'strength' => trace[:strength].to_s,
70
+ 'peak_strength' => trace[:peak_strength].to_s,
71
+ 'confidence' => trace[:confidence].to_s,
72
+ 'storage_tier' => 'hot',
73
+ 'partition_id' => trace[:partition_id].to_s,
74
+ 'last_reinforced' => (trace[:last_reinforced] || Time.now).to_s
75
+ }
76
+ end
77
+
78
+ # Deserialize a Redis string-hash back to a typed trace hash.
79
+ def deserialize_trace(data)
80
+ {
81
+ trace_id: data['trace_id'],
82
+ trace_type: data['trace_type']&.to_sym,
83
+ content_payload: data['content_payload'],
84
+ strength: data['strength']&.to_f,
85
+ peak_strength: data['peak_strength']&.to_f,
86
+ confidence: data['confidence']&.to_f,
87
+ storage_tier: :hot,
88
+ partition_id: data['partition_id'],
89
+ last_reinforced: data['last_reinforced']
90
+ }
91
+ end
92
+ end
93
+ end
94
+ end
95
+ end
96
+ end
97
+ end
98
+ end
@@ -0,0 +1,394 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative 'hot_tier'
4
+
5
+ module Legion
6
+ module Extensions
7
+ module Agentic
8
+ module Memory
9
+ module Trace
10
+ module Helpers
11
+ # Write-through durable store backed by Legion::Data (PostgreSQL or MySQL).
12
+ # All writes go directly to the database — no in-memory dirty tracking, no flush.
13
+ # Scoped by tenant_id so multiple agents can share the same DB tables safely.
14
+ class PostgresStore
15
+ TRACES_TABLE = :memory_traces
16
+ ASSOCIATIONS_TABLE = :memory_associations
17
+
18
+ def initialize(tenant_id: nil)
19
+ @tenant_id = tenant_id
20
+ end
21
+
22
+ # Store (upsert) a trace by trace_id.
23
+ # Returns the trace_id on success, nil if the DB is not ready.
24
+ def store(trace)
25
+ return nil unless db_ready?
26
+
27
+ row = serialize_trace(trace)
28
+ ds = db[TRACES_TABLE]
29
+ if db.adapter_scheme == :mysql2
30
+ ds.insert_conflict(update: row.except(:trace_id)).insert(row)
31
+ else
32
+ ds.insert_conflict(target: :trace_id, update: row.except(:trace_id)).insert(row)
33
+ end
34
+ HotTier.cache_trace(trace, tenant_id: @tenant_id) if HotTier.available?
35
+ trace[:trace_id]
36
+ rescue StandardError => e
37
+ log_warn("store failed: #{e.message}")
38
+ nil
39
+ end
40
+
41
+ # Retrieve a single trace by trace_id (tenant-scoped).
42
+ # Checks the Redis hot tier first; falls through to DB on a miss and caches the result.
43
+ # Returns a trace hash or nil.
44
+ def retrieve(trace_id)
45
+ if HotTier.available?
46
+ cached = HotTier.fetch_trace(trace_id, tenant_id: @tenant_id)
47
+ return cached if cached
48
+ end
49
+
50
+ return nil unless db_ready?
51
+
52
+ row = traces_ds.where(trace_id: trace_id).first
53
+ trace = row ? deserialize_trace(row) : nil
54
+ HotTier.cache_trace(trace, tenant_id: @tenant_id) if HotTier.available? && trace
55
+ trace
56
+ rescue StandardError => e
57
+ log_warn("retrieve failed: #{e.message}")
58
+ nil
59
+ end
60
+
61
+ # Retrieve traces by type, ordered by strength descending.
62
+ def retrieve_by_type(type, limit: 100, min_strength: 0.0)
63
+ return [] unless db_ready?
64
+
65
+ rows = traces_ds
66
+ .where(trace_type: type.to_s)
67
+ .where { strength >= min_strength }
68
+ .order(Sequel.desc(:strength))
69
+ .limit(limit)
70
+ .all
71
+ rows.map { |r| deserialize_trace(r) }
72
+ rescue StandardError => e
73
+ log_warn("retrieve_by_type failed: #{e.message}")
74
+ []
75
+ end
76
+
77
+ # Retrieve traces whose domain_tags column contains the given tag string.
78
+ def retrieve_by_domain(tag, limit: 50)
79
+ return [] unless db_ready?
80
+
81
+ rows = traces_ds
82
+ .where(Sequel.like(:domain_tags, "%#{tag}%"))
83
+ .order(Sequel.desc(:strength))
84
+ .limit(limit)
85
+ .all
86
+ rows.map { |r| deserialize_trace(r) }
87
+ rescue StandardError => e
88
+ log_warn("retrieve_by_domain failed: #{e.message}")
89
+ []
90
+ end
91
+
92
+ # Return all traces for this tenant.
93
+ def all_traces
94
+ return [] unless db_ready?
95
+
96
+ traces_ds.all.map { |r| deserialize_trace(r) }
97
+ rescue StandardError => e
98
+ log_warn("all_traces failed: #{e.message}")
99
+ []
100
+ end
101
+
102
+ # Delete a trace and its association rows.
103
+ def delete(trace_id)
104
+ HotTier.evict_trace(trace_id, tenant_id: @tenant_id) if HotTier.available?
105
+ return unless db_ready?
106
+
107
+ db[ASSOCIATIONS_TABLE].where(trace_id_a: trace_id).delete
108
+ db[ASSOCIATIONS_TABLE].where(trace_id_b: trace_id).delete
109
+ db[TRACES_TABLE].where(trace_id: trace_id).delete
110
+ rescue StandardError => e
111
+ log_warn("delete failed: #{e.message}")
112
+ end
113
+
114
+ # Partial update of a trace by trace_id.
115
+ # Evicts the hot-tier entry so a stale cached version cannot be served.
116
+ def update(trace_id, **fields)
117
+ return unless db_ready?
118
+
119
+ db[TRACES_TABLE].where(trace_id: trace_id).update(map_update_fields(fields))
120
+ HotTier.evict_trace(trace_id, tenant_id: @tenant_id) if HotTier.available?
121
+ rescue StandardError => e
122
+ log_warn("update failed: #{e.message}")
123
+ end
124
+
125
+ # Create or increment a coactivation association between two traces.
126
+ def record_coactivation(id_a, id_b)
127
+ return unless db_ready?
128
+ return if id_a == id_b
129
+
130
+ now = Time.now.utc
131
+ existing = db[ASSOCIATIONS_TABLE]
132
+ .where(trace_id_a: id_a, trace_id_b: id_b)
133
+ .first
134
+
135
+ if existing
136
+ db[ASSOCIATIONS_TABLE]
137
+ .where(id: existing[:id])
138
+ .update(
139
+ coactivation_count: existing[:coactivation_count] + 1,
140
+ updated_at: now
141
+ )
142
+ else
143
+ db[ASSOCIATIONS_TABLE].insert(
144
+ trace_id_a: id_a,
145
+ trace_id_b: id_b,
146
+ coactivation_count: 1,
147
+ linked: false,
148
+ tenant_id: @tenant_id,
149
+ created_at: now,
150
+ updated_at: now
151
+ )
152
+ end
153
+ rescue StandardError => e
154
+ log_warn("record_coactivation failed: #{e.message}")
155
+ end
156
+
157
+ # Return the set of trace IDs associated with a given trace (bidirectional).
158
+ def associations_for(trace_id)
159
+ return [] unless db_ready?
160
+
161
+ a_side = db[ASSOCIATIONS_TABLE]
162
+ .where(trace_id_a: trace_id)
163
+ .select_map(:trace_id_b)
164
+ b_side = db[ASSOCIATIONS_TABLE]
165
+ .where(trace_id_b: trace_id)
166
+ .select_map(:trace_id_a)
167
+ (a_side + b_side).uniq
168
+ rescue StandardError => e
169
+ log_warn("associations_for failed: #{e.message}")
170
+ []
171
+ end
172
+
173
+ # BFS traversal starting from start_id.
174
+ # Returns an array of { trace_id:, depth:, path: } hashes.
175
+ def walk_associations(start_id:, max_hops: 12, min_strength: 0.1)
176
+ return [] unless db_ready?
177
+
178
+ start_row = traces_ds.where(trace_id: start_id).first
179
+ return [] unless start_row
180
+
181
+ results = []
182
+ visited = Set.new([start_id])
183
+ queue = [[start_id, 0, [start_id]]]
184
+
185
+ until queue.empty?
186
+ current_id, depth, path = queue.shift
187
+ neighbor_ids = associations_for(current_id)
188
+
189
+ neighbor_ids.each do |nid|
190
+ next if visited.include?(nid)
191
+
192
+ neighbor_row = traces_ds
193
+ .where(trace_id: nid)
194
+ .where { strength >= min_strength }
195
+ .first
196
+ next unless neighbor_row
197
+
198
+ visited << nid
199
+ neighbor_path = path + [nid]
200
+ results << { trace_id: nid, depth: depth + 1, path: neighbor_path }
201
+ queue << [nid, depth + 1, neighbor_path] if depth + 1 < max_hops
202
+ end
203
+ end
204
+
205
+ results
206
+ rescue StandardError => e
207
+ log_warn("walk_associations failed: #{e.message}")
208
+ []
209
+ end
210
+
211
+ # Delete the N traces with the lowest confidence for a given type (quota enforcement).
212
+ def delete_lowest_confidence(trace_type:, count:)
213
+ return unless db_ready?
214
+
215
+ ids = traces_ds
216
+ .where(trace_type: trace_type.to_s)
217
+ .order(:confidence)
218
+ .limit(count)
219
+ .select_map(:trace_id)
220
+
221
+ ids.each { |tid| delete(tid) }
222
+ rescue StandardError => e
223
+ log_warn("delete_lowest_confidence failed: #{e.message}")
224
+ end
225
+
226
+ # Delete the N least-recently-used traces for a given type (quota enforcement).
227
+ def delete_least_recently_used(trace_type:, count:)
228
+ return unless db_ready?
229
+
230
+ ids = traces_ds
231
+ .where(trace_type: trace_type.to_s)
232
+ .order(:last_reinforced)
233
+ .limit(count)
234
+ .select_map(:trace_id)
235
+
236
+ ids.each { |tid| delete(tid) }
237
+ rescue StandardError => e
238
+ log_warn("delete_least_recently_used failed: #{e.message}")
239
+ end
240
+
241
+ # Convenience: retrieve firmware-type traces.
242
+ def firmware_traces
243
+ retrieve_by_type(:firmware)
244
+ end
245
+
246
+ # No-op — this store is write-through; nothing to flush.
247
+ def flush; end
248
+
249
+ # Returns true when both required tables exist in the connected DB.
250
+ def db_ready?
251
+ defined?(Legion::Data) &&
252
+ Legion::Data.respond_to?(:connection) &&
253
+ Legion::Data.connection&.table_exists?(TRACES_TABLE) &&
254
+ Legion::Data.connection.table_exists?(ASSOCIATIONS_TABLE)
255
+ rescue StandardError
256
+ false
257
+ end
258
+
259
+ private
260
+
261
+ def db
262
+ Legion::Data.connection
263
+ end
264
+
265
+ # Dataset for memory_traces scoped by tenant_id (if set).
266
+ def traces_ds
267
+ ds = db[TRACES_TABLE]
268
+ @tenant_id ? ds.where(tenant_id: @tenant_id) : ds
269
+ end
270
+
271
+ def serialize_trace(trace)
272
+ payload = trace[:content_payload] || trace[:content]
273
+ tags = trace[:domain_tags]
274
+ assocs = trace[:associated_traces]
275
+ conf = trace[:confidence]
276
+ ev = trace[:emotional_valence]
277
+
278
+ {
279
+ trace_id: trace[:trace_id],
280
+ tenant_id: @tenant_id,
281
+ trace_type: trace[:trace_type].to_s,
282
+ content: payload.is_a?(Hash) ? Legion::JSON.dump(payload) : payload.to_s,
283
+ significance: conf,
284
+ confidence: conf,
285
+ associations: assocs.is_a?(Array) ? Legion::JSON.dump(assocs) : '[]',
286
+ domain_tags: tags.is_a?(Array) ? Legion::JSON.dump(tags) : nil,
287
+ strength: trace[:strength],
288
+ peak_strength: trace[:peak_strength],
289
+ base_decay_rate: trace[:base_decay_rate],
290
+ emotional_valence: ev.is_a?(Numeric) ? ev.to_f : 0.0,
291
+ emotional_intensity: trace[:emotional_intensity],
292
+ origin: trace[:origin].to_s,
293
+ source_agent_id: trace[:source_agent_id],
294
+ storage_tier: trace[:storage_tier].to_s,
295
+ last_reinforced: trace[:last_reinforced],
296
+ last_decayed: trace[:last_decayed],
297
+ reinforcement_count: trace[:reinforcement_count],
298
+ unresolved: trace[:unresolved] || false,
299
+ consolidation_candidate: trace[:consolidation_candidate] || false,
300
+ parent_trace_id: trace[:parent_trace_id],
301
+ encryption_key_id: trace[:encryption_key_id],
302
+ partition_id: trace[:partition_id],
303
+ created_at: trace[:created_at] || Time.now.utc,
304
+ accessed_at: Time.now.utc
305
+ }
306
+ end
307
+
308
+ def deserialize_trace(row)
309
+ content = parse_json_or_raw(row[:content])
310
+ {
311
+ trace_id: row[:trace_id],
312
+ trace_type: row[:trace_type]&.to_sym,
313
+ content_payload: content,
314
+ content: content,
315
+ strength: row[:strength],
316
+ peak_strength: row[:peak_strength],
317
+ base_decay_rate: row[:base_decay_rate],
318
+ emotional_valence: row[:emotional_valence].to_f,
319
+ emotional_intensity: row[:emotional_intensity],
320
+ domain_tags: parse_json_array(row[:domain_tags]),
321
+ origin: row[:origin]&.to_sym,
322
+ source_agent_id: row[:source_agent_id],
323
+ created_at: row[:created_at],
324
+ last_reinforced: row[:last_reinforced],
325
+ last_decayed: row[:last_decayed],
326
+ reinforcement_count: row[:reinforcement_count],
327
+ confidence: row[:confidence],
328
+ storage_tier: row[:storage_tier]&.to_sym,
329
+ partition_id: row[:partition_id],
330
+ encryption_key_id: row[:encryption_key_id],
331
+ associated_traces: parse_json_array(row[:associations]),
332
+ parent_trace_id: row[:parent_trace_id],
333
+ child_trace_ids: [],
334
+ unresolved: row[:unresolved] || false,
335
+ consolidation_candidate: row[:consolidation_candidate] || false
336
+ }
337
+ end
338
+
339
+ # Map keyword fields for partial updates, translating to DB column names.
340
+ def map_update_fields(fields)
341
+ mapping = {
342
+ content_payload: :content,
343
+ associated_traces: :associations,
344
+ parent_trace_id: :parent_trace_id,
345
+ child_trace_ids: nil # not stored as a column
346
+ }
347
+
348
+ fields.each_with_object({}) do |(k, v), row|
349
+ col = mapping.key?(k) ? mapping[k] : k
350
+ next if col.nil?
351
+
352
+ row[col] = case col
353
+ when :content
354
+ v.is_a?(Hash) ? Legion::JSON.dump(v) : v.to_s
355
+ when :associations
356
+ v.is_a?(Array) ? Legion::JSON.dump(v) : '[]'
357
+ when :domain_tags
358
+ v.is_a?(Array) ? Legion::JSON.dump(v) : nil
359
+ when :trace_type, :origin, :storage_tier
360
+ v.to_s
361
+ else
362
+ v
363
+ end
364
+ end
365
+ end
366
+
367
+ def parse_json_or_raw(raw)
368
+ return raw unless raw.is_a?(String)
369
+
370
+ parsed = Legion::JSON.load(raw)
371
+ parsed.is_a?(Hash) ? parsed : raw
372
+ rescue StandardError
373
+ raw
374
+ end
375
+
376
+ def parse_json_array(raw)
377
+ return [] unless raw.is_a?(String)
378
+
379
+ result = Legion::JSON.load(raw)
380
+ result.is_a?(Array) ? result : []
381
+ rescue StandardError
382
+ []
383
+ end
384
+
385
+ def log_warn(message)
386
+ Legion::Logging.warn "[memory:postgres_store] #{message}" if defined?(Legion::Logging)
387
+ end
388
+ end
389
+ end
390
+ end
391
+ end
392
+ end
393
+ end
394
+ end
@@ -5,6 +5,7 @@ require 'legion/extensions/agentic/memory/trace/helpers/trace'
5
5
  require 'legion/extensions/agentic/memory/trace/helpers/decay'
6
6
  require 'legion/extensions/agentic/memory/trace/helpers/store'
7
7
  require 'legion/extensions/agentic/memory/trace/helpers/cache_store'
8
+ require 'legion/extensions/agentic/memory/trace/helpers/postgres_store'
8
9
  require 'legion/extensions/agentic/memory/trace/helpers/error_tracer'
9
10
  require 'legion/extensions/agentic/memory/trace/runners/traces'
10
11
  require 'legion/extensions/agentic/memory/trace/runners/consolidation'
@@ -31,7 +32,10 @@ module Legion
31
32
  private
32
33
 
33
34
  def create_store
34
- if defined?(Legion::Cache) && Legion::Cache.respond_to?(:connected?) && Legion::Cache.connected?
35
+ if postgres_available?
36
+ Legion::Logging.debug '[memory] Using shared PostgresStore (write-through)'
37
+ Helpers::PostgresStore.new(tenant_id: resolve_tenant_id)
38
+ elsif defined?(Legion::Cache) && Legion::Cache.respond_to?(:connected?) && Legion::Cache.connected?
35
39
  Legion::Logging.debug '[memory] Using shared CacheStore (memcached)'
36
40
  Helpers::CacheStore.new
37
41
  else
@@ -39,6 +43,23 @@ module Legion
39
43
  Helpers::Store.new
40
44
  end
41
45
  end
46
+
47
+ def postgres_available?
48
+ defined?(Legion::Data) &&
49
+ Legion::Data.respond_to?(:connection) &&
50
+ Legion::Data.connection &&
51
+ %i[postgres mysql2].include?(Legion::Data.connection.adapter_scheme) &&
52
+ Legion::Data.connection.table_exists?(:memory_traces) &&
53
+ Legion::Data.connection.table_exists?(:memory_associations)
54
+ rescue StandardError
55
+ false
56
+ end
57
+
58
+ def resolve_tenant_id
59
+ Legion::Settings[:data]&.dig(:tenant_id)
60
+ rescue StandardError
61
+ nil
62
+ end
42
63
  end
43
64
  end
44
65
  end
@@ -4,7 +4,7 @@ module Legion
4
4
  module Extensions
5
5
  module Agentic
6
6
  module Memory
7
- VERSION = '0.1.9'
7
+ VERSION = '0.1.12'
8
8
  end
9
9
  end
10
10
  end
@@ -99,7 +99,9 @@ RSpec.describe Legion::Extensions::Agentic::Memory::Hologram::Helpers::Hologram
99
99
 
100
100
  describe '#reconstruct' do
101
101
  context 'with sufficient fragments' do
102
- let(:fragments) { hologram.fragment!(4) }
102
+ # Force completeness above RECONSTRUCTION_THRESHOLD (0.3) so the context
103
+ # is deterministic — fragment!(4) uses rand and can produce all-insufficient sets.
104
+ let(:fragments) { hologram.fragment!(4).each { |f| f.completeness = 1.0 } }
103
105
 
104
106
  it 'returns success: true' do
105
107
  expect(hologram.reconstruct(fragments)[:success]).to be true