recursive-llm-ts 4.8.0 → 5.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -3
- package/bin/rlm-go +0 -0
- package/dist/bridge-interface.d.ts +152 -0
- package/dist/rlm.js +10 -0
- package/go/README.md +2 -2
- package/go/cmd/rlm/main.go +40 -7
- package/go/go.mod +14 -4
- package/go/go.sum +53 -2
- package/go/rlm/compression.go +59 -0
- package/go/rlm/context_overflow.go +49 -43
- package/go/rlm/context_savings_test.go +387 -0
- package/go/rlm/doc.go +2 -2
- package/go/rlm/json_extraction.go +140 -0
- package/go/rlm/lcm_agentic_map.go +317 -0
- package/go/rlm/lcm_context_loop.go +309 -0
- package/go/rlm/lcm_delegation.go +257 -0
- package/go/rlm/lcm_episodes.go +313 -0
- package/go/rlm/lcm_episodes_test.go +384 -0
- package/go/rlm/lcm_files.go +424 -0
- package/go/rlm/lcm_map.go +348 -0
- package/go/rlm/lcm_store.go +615 -0
- package/go/rlm/lcm_summarizer.go +239 -0
- package/go/rlm/lcm_test.go +1407 -0
- package/go/rlm/meta_agent.go +18 -2
- package/go/rlm/observability.go +6 -0
- package/go/rlm/openai.go +27 -10
- package/go/rlm/rlm.go +135 -4
- package/go/rlm/store_backend.go +121 -0
- package/go/rlm/store_backend_test.go +428 -0
- package/go/rlm/store_sqlite.go +575 -0
- package/go/rlm/structured.go +15 -83
- package/go/rlm/token_tracking_test.go +859 -0
- package/go/rlm/tokenizer.go +216 -0
- package/go/rlm/tokenizer_test.go +305 -0
- package/go/rlm/types.go +30 -5
- package/go/rlm.test +0 -0
- package/package.json +4 -4
package/go/rlm/meta_agent.go
CHANGED
|
@@ -74,7 +74,15 @@ func (ma *MetaAgent) OptimizeQuery(query string, context string) (string, error)
|
|
|
74
74
|
return query, nil
|
|
75
75
|
}
|
|
76
76
|
|
|
77
|
-
|
|
77
|
+
// Track meta-agent token usage in the parent RLM's stats
|
|
78
|
+
if result.Usage != nil {
|
|
79
|
+
ma.rlm.stats.PromptTokens += result.Usage.PromptTokens
|
|
80
|
+
ma.rlm.stats.CompletionTokens += result.Usage.CompletionTokens
|
|
81
|
+
ma.rlm.stats.TotalTokens += result.Usage.TotalTokens
|
|
82
|
+
}
|
|
83
|
+
ma.rlm.stats.LlmCalls++
|
|
84
|
+
|
|
85
|
+
optimized := strings.TrimSpace(result.Content)
|
|
78
86
|
ma.obs.Debug("meta_agent", "Optimized query: %s", truncateStr(optimized, 200))
|
|
79
87
|
ma.obs.Event("meta_agent.query_optimized", map[string]string{
|
|
80
88
|
"original_length": fmt.Sprintf("%d", len(query)),
|
|
@@ -136,7 +144,15 @@ func (ma *MetaAgent) OptimizeForStructured(query string, context string, schema
|
|
|
136
144
|
return query, nil
|
|
137
145
|
}
|
|
138
146
|
|
|
139
|
-
|
|
147
|
+
// Track meta-agent token usage in the parent RLM's stats
|
|
148
|
+
if result.Usage != nil {
|
|
149
|
+
ma.rlm.stats.PromptTokens += result.Usage.PromptTokens
|
|
150
|
+
ma.rlm.stats.CompletionTokens += result.Usage.CompletionTokens
|
|
151
|
+
ma.rlm.stats.TotalTokens += result.Usage.TotalTokens
|
|
152
|
+
}
|
|
153
|
+
ma.rlm.stats.LlmCalls++
|
|
154
|
+
|
|
155
|
+
optimized := strings.TrimSpace(result.Content)
|
|
140
156
|
ma.obs.Debug("meta_agent", "Optimized structured query: %s", truncateStr(optimized, 200))
|
|
141
157
|
ma.obs.Event("meta_agent.structured_query_optimized", map[string]string{
|
|
142
158
|
"original_length": fmt.Sprintf("%d", len(query)),
|
package/go/rlm/observability.go
CHANGED
|
@@ -445,6 +445,12 @@ func FormatStatsWithObservability(stats RLMStats, obs *Observer) map[string]inte
|
|
|
445
445
|
result["parsing_retries"] = stats.ParsingRetries
|
|
446
446
|
}
|
|
447
447
|
|
|
448
|
+
if stats.TotalTokens > 0 {
|
|
449
|
+
result["total_tokens"] = stats.TotalTokens
|
|
450
|
+
result["prompt_tokens"] = stats.PromptTokens
|
|
451
|
+
result["completion_tokens"] = stats.CompletionTokens
|
|
452
|
+
}
|
|
453
|
+
|
|
448
454
|
if obs != nil && obs.config.Debug {
|
|
449
455
|
events := obs.GetEvents()
|
|
450
456
|
if len(events) > 0 {
|
package/go/rlm/openai.go
CHANGED
|
@@ -34,6 +34,20 @@ type chatResponse struct {
|
|
|
34
34
|
Error *struct {
|
|
35
35
|
Message string `json:"message"`
|
|
36
36
|
} `json:"error"`
|
|
37
|
+
Usage *TokenUsage `json:"usage,omitempty"`
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
// TokenUsage represents token consumption from an LLM API response.
|
|
41
|
+
type TokenUsage struct {
|
|
42
|
+
PromptTokens int `json:"prompt_tokens"`
|
|
43
|
+
CompletionTokens int `json:"completion_tokens"`
|
|
44
|
+
TotalTokens int `json:"total_tokens"`
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// ChatCompletionResult holds the content and token usage from an LLM call.
|
|
48
|
+
type ChatCompletionResult struct {
|
|
49
|
+
Content string
|
|
50
|
+
Usage *TokenUsage
|
|
37
51
|
}
|
|
38
52
|
|
|
39
53
|
var (
|
|
@@ -48,7 +62,7 @@ var (
|
|
|
48
62
|
}
|
|
49
63
|
)
|
|
50
64
|
|
|
51
|
-
func CallChatCompletion(request ChatRequest) (
|
|
65
|
+
func CallChatCompletion(request ChatRequest) (ChatCompletionResult, error) {
|
|
52
66
|
endpoint := buildEndpoint(request.APIBase)
|
|
53
67
|
payload := map[string]interface{}{
|
|
54
68
|
"model": request.Model,
|
|
@@ -61,7 +75,7 @@ func CallChatCompletion(request ChatRequest) (string, error) {
|
|
|
61
75
|
|
|
62
76
|
body, err := json.Marshal(payload)
|
|
63
77
|
if err != nil {
|
|
64
|
-
return
|
|
78
|
+
return ChatCompletionResult{}, err
|
|
65
79
|
}
|
|
66
80
|
|
|
67
81
|
// Use shared client with connection pooling
|
|
@@ -76,7 +90,7 @@ func CallChatCompletion(request ChatRequest) (string, error) {
|
|
|
76
90
|
|
|
77
91
|
req, err := http.NewRequest(http.MethodPost, endpoint, bytes.NewReader(body))
|
|
78
92
|
if err != nil {
|
|
79
|
-
return
|
|
93
|
+
return ChatCompletionResult{}, err
|
|
80
94
|
}
|
|
81
95
|
req.Header.Set("Content-Type", "application/json")
|
|
82
96
|
if request.APIKey != "" {
|
|
@@ -85,7 +99,7 @@ func CallChatCompletion(request ChatRequest) (string, error) {
|
|
|
85
99
|
|
|
86
100
|
resp, err := client.Do(req)
|
|
87
101
|
if err != nil {
|
|
88
|
-
return
|
|
102
|
+
return ChatCompletionResult{}, err
|
|
89
103
|
}
|
|
90
104
|
defer func() {
|
|
91
105
|
_ = resp.Body.Close()
|
|
@@ -93,27 +107,30 @@ func CallChatCompletion(request ChatRequest) (string, error) {
|
|
|
93
107
|
|
|
94
108
|
responseBody, err := io.ReadAll(resp.Body)
|
|
95
109
|
if err != nil {
|
|
96
|
-
return
|
|
110
|
+
return ChatCompletionResult{}, err
|
|
97
111
|
}
|
|
98
112
|
|
|
99
113
|
if resp.StatusCode >= http.StatusBadRequest {
|
|
100
|
-
return
|
|
114
|
+
return ChatCompletionResult{}, NewAPIError(resp.StatusCode, strings.TrimSpace(string(responseBody)))
|
|
101
115
|
}
|
|
102
116
|
|
|
103
117
|
var parsed chatResponse
|
|
104
118
|
if err := json.Unmarshal(responseBody, &parsed); err != nil {
|
|
105
|
-
return
|
|
119
|
+
return ChatCompletionResult{}, err
|
|
106
120
|
}
|
|
107
121
|
|
|
108
122
|
if parsed.Error != nil && parsed.Error.Message != "" {
|
|
109
|
-
return
|
|
123
|
+
return ChatCompletionResult{}, errors.New(parsed.Error.Message)
|
|
110
124
|
}
|
|
111
125
|
|
|
112
126
|
if len(parsed.Choices) == 0 {
|
|
113
|
-
return
|
|
127
|
+
return ChatCompletionResult{}, errors.New("no choices returned by LLM")
|
|
114
128
|
}
|
|
115
129
|
|
|
116
|
-
return
|
|
130
|
+
return ChatCompletionResult{
|
|
131
|
+
Content: parsed.Choices[0].Message.Content,
|
|
132
|
+
Usage: parsed.Usage,
|
|
133
|
+
}, nil
|
|
117
134
|
}
|
|
118
135
|
|
|
119
136
|
func buildEndpoint(apiBase string) string {
|
package/go/rlm/rlm.go
CHANGED
|
@@ -21,6 +21,7 @@ type RLM struct {
|
|
|
21
21
|
observer *Observer
|
|
22
22
|
metaAgent *MetaAgent
|
|
23
23
|
contextOverflow *ContextOverflowConfig
|
|
24
|
+
lcmEngine *LCMEngine // Lossless Context Management engine (optional)
|
|
24
25
|
}
|
|
25
26
|
|
|
26
27
|
func New(model string, config Config) *RLM {
|
|
@@ -37,6 +38,9 @@ func New(model string, config Config) *RLM {
|
|
|
37
38
|
obs = NewNoopObserver()
|
|
38
39
|
}
|
|
39
40
|
|
|
41
|
+
// Configure tokenizer for accurate token counting with this model
|
|
42
|
+
SetDefaultTokenizer(model)
|
|
43
|
+
|
|
40
44
|
r := &RLM{
|
|
41
45
|
model: model,
|
|
42
46
|
recursiveModel: recursiveModel,
|
|
@@ -52,7 +56,6 @@ func New(model string, config Config) *RLM {
|
|
|
52
56
|
stats: RLMStats{},
|
|
53
57
|
observer: obs,
|
|
54
58
|
}
|
|
55
|
-
|
|
56
59
|
// Setup meta-agent if enabled
|
|
57
60
|
if config.MetaAgent != nil && config.MetaAgent.Enabled {
|
|
58
61
|
r.metaAgent = NewMetaAgent(r, *config.MetaAgent, obs)
|
|
@@ -67,6 +70,19 @@ func New(model string, config Config) *RLM {
|
|
|
67
70
|
r.contextOverflow = &defaultConfig
|
|
68
71
|
}
|
|
69
72
|
|
|
73
|
+
// Setup LCM engine if enabled
|
|
74
|
+
if config.LCM != nil && config.LCM.Enabled {
|
|
75
|
+
store := NewLCMStore(fmt.Sprintf("session_%d", time.Now().UnixNano()))
|
|
76
|
+
summarizer := NewLCMSummarizer(model, config.APIBase, config.APIKey, config.TimeoutSeconds, config.ExtraParams, obs)
|
|
77
|
+
modelLimit := 0
|
|
78
|
+
if config.ContextOverflow != nil && config.ContextOverflow.MaxModelTokens > 0 {
|
|
79
|
+
modelLimit = config.ContextOverflow.MaxModelTokens
|
|
80
|
+
} else {
|
|
81
|
+
modelLimit = LookupModelTokenLimit(model)
|
|
82
|
+
}
|
|
83
|
+
r.lcmEngine = NewLCMEngine(*config.LCM, store, summarizer, obs, modelLimit)
|
|
84
|
+
}
|
|
85
|
+
|
|
70
86
|
return r
|
|
71
87
|
}
|
|
72
88
|
|
|
@@ -100,6 +116,13 @@ func (r *RLM) Completion(query string, context string) (string, RLMStats, error)
|
|
|
100
116
|
r.stats.Depth = r.currentDepth
|
|
101
117
|
replEnv := r.buildREPLEnv(query, context)
|
|
102
118
|
systemPrompt := BuildSystemPrompt(len(context), r.currentDepth, query, r.useMetacognitive)
|
|
119
|
+
|
|
120
|
+
// ─── LCM-managed completion flow ────────────────────────────────────
|
|
121
|
+
if r.lcmEngine != nil && r.lcmEngine.IsEnabled() {
|
|
122
|
+
return r.completionWithLCM(query, systemPrompt, replEnv)
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
// ─── Legacy completion flow (no LCM) ────────────────────────────────
|
|
103
126
|
messages := []Message{
|
|
104
127
|
{Role: "system", Content: systemPrompt},
|
|
105
128
|
{Role: "user", Content: query},
|
|
@@ -175,6 +198,89 @@ func (r *RLM) Completion(query string, context string) (string, RLMStats, error)
|
|
|
175
198
|
return "", r.stats, NewMaxIterationsError(r.maxIterations)
|
|
176
199
|
}
|
|
177
200
|
|
|
201
|
+
// completionWithLCM runs the completion loop using the LCM engine for context management.
|
|
202
|
+
// Messages flow through the LCM store: persisted verbatim in the immutable store,
|
|
203
|
+
// active context assembled from recent messages + summary nodes, and compaction
|
|
204
|
+
// triggered via the context control loop after each turn.
|
|
205
|
+
func (r *RLM) completionWithLCM(query string, systemPrompt string, replEnv map[string]interface{}) (string, RLMStats, error) {
|
|
206
|
+
store := r.lcmEngine.GetStore()
|
|
207
|
+
|
|
208
|
+
// Persist system prompt and initial query in the immutable store
|
|
209
|
+
store.PersistMessage(RoleSystem, systemPrompt, nil)
|
|
210
|
+
store.PersistMessage(RoleUser, query, nil)
|
|
211
|
+
|
|
212
|
+
r.observer.Debug("rlm.lcm", "Starting LCM-managed completion, initial tokens: %d",
|
|
213
|
+
store.ActiveContextTokens())
|
|
214
|
+
|
|
215
|
+
for iteration := 0; iteration < r.maxIterations; iteration++ {
|
|
216
|
+
r.stats.Iterations = iteration + 1
|
|
217
|
+
r.observer.Debug("rlm.lcm", "Iteration %d/%d at depth %d (active tokens: %d)",
|
|
218
|
+
iteration+1, r.maxIterations, r.currentDepth, store.ActiveContextTokens())
|
|
219
|
+
|
|
220
|
+
// Run the LCM context control loop (may trigger async or blocking compaction)
|
|
221
|
+
if err := r.lcmEngine.OnNewItem(); err != nil {
|
|
222
|
+
r.observer.Error("rlm.lcm", "Context control loop error: %v", err)
|
|
223
|
+
// Non-fatal: continue with current context
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
// Build messages from the active context (includes summaries with IDs)
|
|
227
|
+
messages := store.BuildMessages()
|
|
228
|
+
|
|
229
|
+
response, err := r.callLLM(messages)
|
|
230
|
+
if err != nil {
|
|
231
|
+
// Check for context overflow — LCM should handle this via compaction,
|
|
232
|
+
// but fall back to blocking compaction if the API still rejects
|
|
233
|
+
if r.contextOverflow != nil && r.contextOverflow.Enabled {
|
|
234
|
+
if _, isOverflow := IsContextOverflow(err); isOverflow {
|
|
235
|
+
r.observer.Debug("rlm.lcm", "Context overflow despite LCM, forcing blocking compaction")
|
|
236
|
+
if compactErr := r.lcmEngine.blockingCompaction(); compactErr != nil {
|
|
237
|
+
r.observer.Error("rlm.lcm", "Emergency compaction failed: %v", compactErr)
|
|
238
|
+
return "", r.stats, err
|
|
239
|
+
}
|
|
240
|
+
// Also try condensing old summaries to free more space
|
|
241
|
+
_ = r.lcmEngine.CondenseOldSummaries()
|
|
242
|
+
iteration-- // Retry
|
|
243
|
+
continue
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
r.observer.Error("rlm.lcm", "LLM call failed on iteration %d: %v", iteration+1, err)
|
|
247
|
+
return "", r.stats, err
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
// Persist assistant response in the immutable store
|
|
251
|
+
store.PersistMessage(RoleAssistant, response, nil)
|
|
252
|
+
|
|
253
|
+
if IsFinal(response) {
|
|
254
|
+
answer, ok := ParseResponse(response, replEnv)
|
|
255
|
+
if ok {
|
|
256
|
+
r.observer.Debug("rlm.lcm", "FINAL answer on iteration %d (store: %d msgs, %d summaries)",
|
|
257
|
+
iteration+1, store.MessageCount(), store.Stats().TotalSummaries)
|
|
258
|
+
r.observer.Event("rlm.lcm.completion_success", map[string]string{
|
|
259
|
+
"iterations": fmt.Sprintf("%d", iteration+1),
|
|
260
|
+
"llm_calls": fmt.Sprintf("%d", r.stats.LlmCalls),
|
|
261
|
+
"total_messages": fmt.Sprintf("%d", store.MessageCount()),
|
|
262
|
+
"total_summaries": fmt.Sprintf("%d", store.Stats().TotalSummaries),
|
|
263
|
+
"compression_ratio": fmt.Sprintf("%.2f", store.Stats().CompressionRatio),
|
|
264
|
+
})
|
|
265
|
+
return answer, r.stats, nil
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
execResult, err := r.repl.Execute(response, replEnv)
|
|
270
|
+
if err != nil {
|
|
271
|
+
r.observer.Debug("rlm.lcm", "REPL execution error: %v", err)
|
|
272
|
+
execResult = fmt.Sprintf("Error: %s", err.Error())
|
|
273
|
+
} else {
|
|
274
|
+
r.observer.Debug("rlm.lcm", "REPL output: %s", truncateStr(execResult, 200))
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
// Persist REPL result as user message in the immutable store
|
|
278
|
+
store.PersistMessage(RoleUser, execResult, nil)
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
return "", r.stats, NewMaxIterationsError(r.maxIterations)
|
|
282
|
+
}
|
|
283
|
+
|
|
178
284
|
func (r *RLM) callLLM(messages []Message) (string, error) {
|
|
179
285
|
r.stats.LlmCalls++
|
|
180
286
|
defaultModel := r.model
|
|
@@ -198,14 +304,22 @@ func (r *RLM) callLLM(messages []Message) (string, error) {
|
|
|
198
304
|
result, err := CallChatCompletion(request)
|
|
199
305
|
duration := time.Since(start)
|
|
200
306
|
|
|
201
|
-
|
|
307
|
+
tokensUsed := 0
|
|
308
|
+
if result.Usage != nil {
|
|
309
|
+
r.stats.PromptTokens += result.Usage.PromptTokens
|
|
310
|
+
r.stats.CompletionTokens += result.Usage.CompletionTokens
|
|
311
|
+
r.stats.TotalTokens += result.Usage.TotalTokens
|
|
312
|
+
tokensUsed = result.Usage.TotalTokens
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
r.observer.LLMCall(defaultModel, len(messages), tokensUsed, duration, err)
|
|
202
316
|
|
|
203
317
|
if err != nil {
|
|
204
318
|
return "", err
|
|
205
319
|
}
|
|
206
320
|
|
|
207
|
-
r.observer.Debug("llm", "Response received (%d chars) in %s", len(result), duration)
|
|
208
|
-
return result, nil
|
|
321
|
+
r.observer.Debug("llm", "Response received (%d chars, %d tokens) in %s", len(result.Content), tokensUsed, duration)
|
|
322
|
+
return result.Content, nil
|
|
209
323
|
}
|
|
210
324
|
|
|
211
325
|
func (r *RLM) buildREPLEnv(query string, context string) map[string]interface{} {
|
|
@@ -294,6 +408,23 @@ func (r *RLM) GetObserver() *Observer {
|
|
|
294
408
|
return r.observer
|
|
295
409
|
}
|
|
296
410
|
|
|
411
|
+
// GetLCMEngine returns the LCM engine if enabled, nil otherwise.
|
|
412
|
+
func (r *RLM) GetLCMEngine() *LCMEngine {
|
|
413
|
+
return r.lcmEngine
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
// LLMMap executes an LLM-Map operation for parallel batch processing.
|
|
417
|
+
func (r *RLM) LLMMap(config LLMMapConfig) (*LLMMapResult, error) {
|
|
418
|
+
mapper := NewLLMMapper(r.model, r.apiBase, r.apiKey, r.timeoutSeconds, r.extraParams, r.observer)
|
|
419
|
+
return mapper.Execute(config)
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
// AgenticMap executes an Agentic-Map operation with full sub-agent sessions per item.
|
|
423
|
+
func (r *RLM) AgenticMap(config AgenticMapConfig) (*AgenticMapResult, error) {
|
|
424
|
+
mapper := NewAgenticMapper(r.model, r.apiBase, r.apiKey, r.timeoutSeconds, r.extraParams, r.observer)
|
|
425
|
+
return mapper.Execute(config)
|
|
426
|
+
}
|
|
427
|
+
|
|
297
428
|
// Shutdown gracefully shuts down the RLM engine and its observer.
|
|
298
429
|
func (r *RLM) Shutdown() {
|
|
299
430
|
if r.observer != nil {
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
package rlm
|
|
2
|
+
|
|
3
|
+
import "io"
|
|
4
|
+
|
|
5
|
+
// ─── Store Backend Interface ─────────────────────────────────────────────────
|
|
6
|
+
// Abstracts the persistence layer for LCM's dual-state memory.
|
|
7
|
+
// The in-memory implementation (LCMStore) remains the default.
|
|
8
|
+
// A SQLite implementation provides crash recovery, indexed full-text search,
|
|
9
|
+
// and transactional writes as described in the LCM paper (Section 2.1).
|
|
10
|
+
|
|
11
|
+
// StoreBackend defines the persistence operations for the LCM store.
|
|
12
|
+
// Implementations must be safe for concurrent use.
|
|
13
|
+
type StoreBackend interface {
|
|
14
|
+
// ─── Message Operations ──────────────────────────────────────────
|
|
15
|
+
// PersistMessage stores a message in the immutable store.
|
|
16
|
+
PersistMessage(msg *StoreMessage) error
|
|
17
|
+
// GetMessage retrieves a message by ID.
|
|
18
|
+
GetMessage(id string) (*StoreMessage, error)
|
|
19
|
+
// GetAllMessages returns all messages in chronological order.
|
|
20
|
+
GetAllMessages() ([]*StoreMessage, error)
|
|
21
|
+
// MessageCount returns the total number of persisted messages.
|
|
22
|
+
MessageCount() (int, error)
|
|
23
|
+
|
|
24
|
+
// ─── Summary Operations ─────────────────────────────────────────
|
|
25
|
+
// PersistSummary stores a summary node in the DAG.
|
|
26
|
+
PersistSummary(node *SummaryNode) error
|
|
27
|
+
// GetSummary retrieves a summary by ID.
|
|
28
|
+
GetSummary(id string) (*SummaryNode, error)
|
|
29
|
+
// GetAllSummaries returns all summary nodes.
|
|
30
|
+
GetAllSummaries() ([]*SummaryNode, error)
|
|
31
|
+
// UpdateSummaryParent sets the parent ID on a summary (for condensation).
|
|
32
|
+
UpdateSummaryParent(summaryID, parentID string) error
|
|
33
|
+
|
|
34
|
+
// ─── Search ─────────────────────────────────────────────────────
|
|
35
|
+
// GrepMessages searches message content with a regex pattern.
|
|
36
|
+
// Returns matching messages with optional summary scope filtering.
|
|
37
|
+
GrepMessages(pattern string, summaryScope *string, maxResults int) ([]*StoreMessage, error)
|
|
38
|
+
|
|
39
|
+
// ─── Lifecycle ──────────────────────────────────────────────────
|
|
40
|
+
io.Closer
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// ─── In-Memory Backend ───────────────────────────────────────────────────────
|
|
44
|
+
// MemoryBackend wraps the existing in-memory maps as a StoreBackend.
|
|
45
|
+
// This is the default backend and requires no external dependencies.
|
|
46
|
+
|
|
47
|
+
type MemoryBackend struct {
|
|
48
|
+
messages map[string]*StoreMessage
|
|
49
|
+
messageSeq []*StoreMessage
|
|
50
|
+
summaries map[string]*SummaryNode
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// NewMemoryBackend creates a new in-memory backend.
|
|
54
|
+
func NewMemoryBackend() *MemoryBackend {
|
|
55
|
+
return &MemoryBackend{
|
|
56
|
+
messages: make(map[string]*StoreMessage),
|
|
57
|
+
messageSeq: make([]*StoreMessage, 0),
|
|
58
|
+
summaries: make(map[string]*SummaryNode),
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
func (m *MemoryBackend) PersistMessage(msg *StoreMessage) error {
|
|
63
|
+
m.messages[msg.ID] = msg
|
|
64
|
+
m.messageSeq = append(m.messageSeq, msg)
|
|
65
|
+
return nil
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
func (m *MemoryBackend) GetMessage(id string) (*StoreMessage, error) {
|
|
69
|
+
msg, ok := m.messages[id]
|
|
70
|
+
if !ok {
|
|
71
|
+
return nil, nil
|
|
72
|
+
}
|
|
73
|
+
return msg, nil
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
func (m *MemoryBackend) GetAllMessages() ([]*StoreMessage, error) {
|
|
77
|
+
result := make([]*StoreMessage, len(m.messageSeq))
|
|
78
|
+
copy(result, m.messageSeq)
|
|
79
|
+
return result, nil
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
func (m *MemoryBackend) MessageCount() (int, error) {
|
|
83
|
+
return len(m.messageSeq), nil
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
func (m *MemoryBackend) PersistSummary(node *SummaryNode) error {
|
|
87
|
+
m.summaries[node.ID] = node
|
|
88
|
+
return nil
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
func (m *MemoryBackend) GetSummary(id string) (*SummaryNode, error) {
|
|
92
|
+
sum, ok := m.summaries[id]
|
|
93
|
+
if !ok {
|
|
94
|
+
return nil, nil
|
|
95
|
+
}
|
|
96
|
+
return sum, nil
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
func (m *MemoryBackend) GetAllSummaries() ([]*SummaryNode, error) {
|
|
100
|
+
result := make([]*SummaryNode, 0, len(m.summaries))
|
|
101
|
+
for _, s := range m.summaries {
|
|
102
|
+
result = append(result, s)
|
|
103
|
+
}
|
|
104
|
+
return result, nil
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
func (m *MemoryBackend) UpdateSummaryParent(summaryID, parentID string) error {
|
|
108
|
+
if sum, ok := m.summaries[summaryID]; ok {
|
|
109
|
+
sum.ParentID = parentID
|
|
110
|
+
}
|
|
111
|
+
return nil
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
func (m *MemoryBackend) GrepMessages(pattern string, summaryScope *string, maxResults int) ([]*StoreMessage, error) {
|
|
115
|
+
// For in-memory, just return all messages (filtering happens in LCMStore.Grep)
|
|
116
|
+
return m.messageSeq, nil
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
func (m *MemoryBackend) Close() error {
|
|
120
|
+
return nil
|
|
121
|
+
}
|