@elizaos/plugin-memory 1.0.5 → 2.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +339 -207
- package/dist/browser/index.browser.js +348 -151
- package/dist/browser/index.browser.js.map +23 -13
- package/dist/cjs/index.node.cjs +2200 -1047
- package/dist/cjs/index.node.js.map +23 -13
- package/dist/evaluators/consolidation.d.ts +19 -0
- package/dist/evaluators/summarization.d.ts +5 -24
- package/dist/index.d.ts +152 -30
- package/dist/node/index.node.js +2242 -1084
- package/dist/node/index.node.js.map +23 -13
- package/dist/prompts/consolidation.d.ts +35 -0
- package/dist/prompts/summarization.d.ts +25 -0
- package/dist/providers/action-results.d.ts +2 -0
- package/dist/providers/long-term-memory.d.ts +18 -11
- package/dist/providers/recent-conversation-summary.d.ts +2 -0
- package/dist/repositories/conversation-summary.d.ts +33 -0
- package/dist/repositories/index.d.ts +17 -0
- package/dist/repositories/long-term-memory.d.ts +53 -0
- package/dist/schemas/conversation-summaries.d.ts +494 -0
- package/dist/schemas/index.d.ts +16 -6
- package/dist/schemas/long-term-memories.d.ts +308 -70
- package/dist/services/memory-service.d.ts +95 -51
- package/dist/types/index.d.ts +299 -55
- package/dist/utils/db-mapping.d.ts +20 -0
- package/dist/utils/decay-scoring.d.ts +41 -0
- package/dist/utils/embedding.d.ts +21 -0
- package/dist/utils/formatting.d.ts +17 -0
- package/dist/utils/index.d.ts +17 -0
- package/dist/utils/search-merging.d.ts +18 -0
- package/dist/utils/token-counter.d.ts +53 -0
- package/package.json +83 -1
- package/dist/actions/remember.d.ts +0 -11
- package/dist/evaluators/long-term-extraction.d.ts +0 -8
- package/dist/providers/short-term-memory.d.ts +0 -19
- package/dist/schemas/memory-access-logs.d.ts +0 -154
- package/dist/schemas/session-summaries.d.ts +0 -283
package/dist/node/index.node.js
CHANGED
|
@@ -12,16 +12,46 @@ var __export = (target, all) => {
|
|
|
12
12
|
// src/services/memory-service.ts
|
|
13
13
|
import {
|
|
14
14
|
Service,
|
|
15
|
-
logger
|
|
15
|
+
logger as logger4,
|
|
16
|
+
ModelType as ModelType2,
|
|
17
|
+
parseKeyValueXml,
|
|
18
|
+
parseBooleanFromText
|
|
16
19
|
} from "@elizaos/core";
|
|
17
|
-
import {
|
|
20
|
+
import { BM25 } from "@elizaos/core";
|
|
21
|
+
|
|
22
|
+
// src/types/index.ts
|
|
23
|
+
var MEMORY_DIMENSION_MAP = {
|
|
24
|
+
384: "dim384",
|
|
25
|
+
512: "dim512",
|
|
26
|
+
768: "dim768",
|
|
27
|
+
1024: "dim1024",
|
|
28
|
+
1536: "dim1536",
|
|
29
|
+
3072: "dim3072"
|
|
30
|
+
};
|
|
31
|
+
var MemoryType;
|
|
32
|
+
((MemoryType2) => {
|
|
33
|
+
MemoryType2["EPISODIC"] = "EPISODIC";
|
|
34
|
+
MemoryType2["SEMANTIC"] = "SEMANTIC";
|
|
35
|
+
MemoryType2["PROCEDURAL"] = "PROCEDURAL";
|
|
36
|
+
})(MemoryType ||= {});
|
|
37
|
+
var DecayFunction;
|
|
38
|
+
((DecayFunction2) => {
|
|
39
|
+
DecayFunction2["EXPONENTIAL"] = "EXPONENTIAL";
|
|
40
|
+
DecayFunction2["LINEAR"] = "LINEAR";
|
|
41
|
+
DecayFunction2["NONE"] = "NONE";
|
|
42
|
+
})(DecayFunction ||= {});
|
|
43
|
+
|
|
44
|
+
// src/repositories/long-term-memory.ts
|
|
45
|
+
import { logger } from "@elizaos/core";
|
|
46
|
+
import { eq, and, desc, sql as sql3, cosineDistance, gte, or, isNull } from "drizzle-orm";
|
|
18
47
|
|
|
19
48
|
// src/schemas/index.ts
|
|
20
49
|
var exports_schemas = {};
|
|
21
50
|
__export(exports_schemas, {
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
51
|
+
longTermMemoryEmbeddings: () => longTermMemoryEmbeddings,
|
|
52
|
+
longTermMemories: () => longTermMemories,
|
|
53
|
+
conversationSummaryEmbeddings: () => conversationSummaryEmbeddings,
|
|
54
|
+
conversationSummaries: () => conversationSummaries
|
|
25
55
|
});
|
|
26
56
|
|
|
27
57
|
// src/schemas/long-term-memories.ts
|
|
@@ -34,1257 +64,2385 @@ import {
|
|
|
34
64
|
real,
|
|
35
65
|
index,
|
|
36
66
|
varchar,
|
|
37
|
-
timestamp
|
|
67
|
+
timestamp,
|
|
68
|
+
boolean,
|
|
69
|
+
vector,
|
|
70
|
+
foreignKey
|
|
38
71
|
} from "drizzle-orm/pg-core";
|
|
72
|
+
import { VECTOR_DIMS } from "@elizaos/core";
|
|
39
73
|
var longTermMemories = pgTable("long_term_memories", {
|
|
40
74
|
id: varchar("id", { length: 36 }).primaryKey(),
|
|
41
75
|
agentId: varchar("agent_id", { length: 36 }).notNull(),
|
|
42
76
|
entityId: varchar("entity_id", { length: 36 }).notNull(),
|
|
43
|
-
|
|
77
|
+
roomId: varchar("room_id", { length: 36 }),
|
|
78
|
+
type: text("type").notNull(),
|
|
44
79
|
content: text("content").notNull(),
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
80
|
+
embeddingContext: text("embedding_context").notNull(),
|
|
81
|
+
confidence: real("confidence").notNull().default(1),
|
|
82
|
+
decayRate: real("decay_rate").notNull().default(0.01),
|
|
83
|
+
decayFunction: text("decay_function").notNull().default("EXPONENTIAL"),
|
|
49
84
|
createdAt: timestamp("created_at").default(sql`now()`).notNull(),
|
|
50
|
-
updatedAt: timestamp("updated_at").default(sql`now()`).notNull(),
|
|
51
85
|
lastAccessedAt: timestamp("last_accessed_at"),
|
|
52
|
-
accessCount: integer("access_count").default(0)
|
|
86
|
+
accessCount: integer("access_count").default(0).notNull(),
|
|
87
|
+
isActive: boolean("is_active").default(true).notNull(),
|
|
88
|
+
source: jsonb("source").notNull().default({}),
|
|
89
|
+
metadata: jsonb("metadata").notNull().default({}),
|
|
90
|
+
supersedesId: varchar("supersedes_id", { length: 36 })
|
|
53
91
|
}, (table) => ({
|
|
54
|
-
agentEntityIdx: index("
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
92
|
+
agentEntityIdx: index("ltm_agent_entity_idx").on(table.agentId, table.entityId),
|
|
93
|
+
typeIdx: index("ltm_type_idx").on(table.type),
|
|
94
|
+
roomIdx: index("ltm_room_idx").on(table.roomId),
|
|
95
|
+
activeIdx: index("ltm_active_idx").on(table.isActive),
|
|
96
|
+
confidenceIdx: index("ltm_confidence_idx").on(table.confidence),
|
|
97
|
+
createdAtIdx: index("ltm_created_at_idx").on(table.createdAt),
|
|
98
|
+
lastAccessedIdx: index("ltm_last_accessed_idx").on(table.lastAccessedAt),
|
|
99
|
+
agentEntityActiveConfidenceIdx: index("ltm_agent_entity_active_conf_idx").on(table.agentId, table.entityId, table.isActive, table.confidence)
|
|
58
100
|
}));
|
|
59
|
-
|
|
101
|
+
var longTermMemoryEmbeddings = pgTable("long_term_memory_embeddings", {
|
|
102
|
+
id: varchar("id", { length: 36 }).primaryKey(),
|
|
103
|
+
memoryId: varchar("memory_id", { length: 36 }).notNull().references(() => longTermMemories.id, { onDelete: "cascade" }),
|
|
104
|
+
dim384: vector("dim_384", { dimensions: VECTOR_DIMS.SMALL }),
|
|
105
|
+
dim512: vector("dim_512", { dimensions: VECTOR_DIMS.MEDIUM }),
|
|
106
|
+
dim768: vector("dim_768", { dimensions: VECTOR_DIMS.LARGE }),
|
|
107
|
+
dim1024: vector("dim_1024", { dimensions: VECTOR_DIMS.XL }),
|
|
108
|
+
dim1536: vector("dim_1536", { dimensions: VECTOR_DIMS.XXL }),
|
|
109
|
+
dim3072: vector("dim_3072", { dimensions: VECTOR_DIMS.XXXL }),
|
|
110
|
+
createdAt: timestamp("created_at").default(sql`now()`).notNull()
|
|
111
|
+
}, (table) => [
|
|
112
|
+
index("idx_ltm_embedding_memory_id").on(table.memoryId),
|
|
113
|
+
foreignKey({
|
|
114
|
+
name: "fk_ltm_embedding_memory",
|
|
115
|
+
columns: [table.memoryId],
|
|
116
|
+
foreignColumns: [longTermMemories.id]
|
|
117
|
+
}).onDelete("cascade")
|
|
118
|
+
]);
|
|
119
|
+
// src/schemas/conversation-summaries.ts
|
|
60
120
|
import { sql as sql2 } from "drizzle-orm";
|
|
61
121
|
import {
|
|
62
122
|
pgTable as pgTable2,
|
|
63
123
|
text as text2,
|
|
64
124
|
integer as integer2,
|
|
65
125
|
jsonb as jsonb2,
|
|
66
|
-
real as real2,
|
|
67
126
|
index as index2,
|
|
68
127
|
varchar as varchar2,
|
|
69
|
-
timestamp as timestamp2
|
|
128
|
+
timestamp as timestamp2,
|
|
129
|
+
vector as vector2,
|
|
130
|
+
foreignKey as foreignKey2
|
|
70
131
|
} from "drizzle-orm/pg-core";
|
|
71
|
-
|
|
132
|
+
import { VECTOR_DIMS as VECTOR_DIMS2 } from "@elizaos/core";
|
|
133
|
+
var conversationSummaries = pgTable2("conversation_summaries", {
|
|
72
134
|
id: varchar2("id", { length: 36 }).primaryKey(),
|
|
73
135
|
agentId: varchar2("agent_id", { length: 36 }).notNull(),
|
|
136
|
+
entityId: varchar2("entity_id", { length: 36 }).notNull(),
|
|
74
137
|
roomId: varchar2("room_id", { length: 36 }).notNull(),
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
138
|
+
level: integer2("level").notNull().default(1),
|
|
139
|
+
parentSummaryId: varchar2("parent_summary_id", { length: 36 }),
|
|
140
|
+
content: text2("content").notNull(),
|
|
141
|
+
tokenCount: integer2("token_count").notNull(),
|
|
79
142
|
startTime: timestamp2("start_time").notNull(),
|
|
80
143
|
endTime: timestamp2("end_time").notNull(),
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
embedding: real2("embedding").array(),
|
|
144
|
+
sourceCount: integer2("source_count").notNull(),
|
|
145
|
+
sourceIds: jsonb2("source_ids").notNull().default([]),
|
|
84
146
|
createdAt: timestamp2("created_at").default(sql2`now()`).notNull(),
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
entityIdx: index2("session_summaries_entity_idx").on(table.entityId),
|
|
89
|
-
startTimeIdx: index2("session_summaries_start_time_idx").on(table.startTime)
|
|
90
|
-
}));
|
|
91
|
-
// src/schemas/memory-access-logs.ts
|
|
92
|
-
import { sql as sql3 } from "drizzle-orm";
|
|
93
|
-
import { pgTable as pgTable3, text as text3, integer as integer3, real as real3, index as index3, varchar as varchar3, timestamp as timestamp3 } from "drizzle-orm/pg-core";
|
|
94
|
-
var memoryAccessLogs = pgTable3("memory_access_logs", {
|
|
95
|
-
id: varchar3("id", { length: 36 }).primaryKey(),
|
|
96
|
-
agentId: varchar3("agent_id", { length: 36 }).notNull(),
|
|
97
|
-
memoryId: varchar3("memory_id", { length: 36 }).notNull(),
|
|
98
|
-
memoryType: text3("memory_type").notNull(),
|
|
99
|
-
accessedAt: timestamp3("accessed_at").default(sql3`now()`).notNull(),
|
|
100
|
-
roomId: varchar3("room_id", { length: 36 }),
|
|
101
|
-
relevanceScore: real3("relevance_score"),
|
|
102
|
-
wasUseful: integer3("was_useful")
|
|
147
|
+
lastAccessedAt: timestamp2("last_accessed_at"),
|
|
148
|
+
accessCount: integer2("access_count").default(0).notNull(),
|
|
149
|
+
metadata: jsonb2("metadata").notNull().default({})
|
|
103
150
|
}, (table) => ({
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
151
|
+
agentEntityRoomIdx: index2("cs_agent_entity_room_idx").on(table.agentId, table.entityId, table.roomId),
|
|
152
|
+
levelIdx: index2("cs_level_idx").on(table.level),
|
|
153
|
+
parentIdx: index2("cs_parent_idx").on(table.parentSummaryId),
|
|
154
|
+
timeRangeIdx: index2("cs_time_range_idx").on(table.startTime, table.endTime),
|
|
155
|
+
createdAtIdx: index2("cs_created_at_idx").on(table.createdAt),
|
|
156
|
+
lastAccessedIdx: index2("cs_last_accessed_idx").on(table.lastAccessedAt),
|
|
157
|
+
entityRoomLevelTimeIdx: index2("cs_entity_room_level_time_idx").on(table.entityId, table.roomId, table.level, table.createdAt)
|
|
107
158
|
}));
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
159
|
+
var conversationSummaryEmbeddings = pgTable2("conversation_summary_embeddings", {
|
|
160
|
+
id: varchar2("id", { length: 36 }).primaryKey(),
|
|
161
|
+
summaryId: varchar2("summary_id", { length: 36 }).notNull().references(() => conversationSummaries.id, { onDelete: "cascade" }),
|
|
162
|
+
dim384: vector2("dim_384", { dimensions: VECTOR_DIMS2.SMALL }),
|
|
163
|
+
dim512: vector2("dim_512", { dimensions: VECTOR_DIMS2.MEDIUM }),
|
|
164
|
+
dim768: vector2("dim_768", { dimensions: VECTOR_DIMS2.LARGE }),
|
|
165
|
+
dim1024: vector2("dim_1024", { dimensions: VECTOR_DIMS2.XL }),
|
|
166
|
+
dim1536: vector2("dim_1536", { dimensions: VECTOR_DIMS2.XXL }),
|
|
167
|
+
dim3072: vector2("dim_3072", { dimensions: VECTOR_DIMS2.XXXL }),
|
|
168
|
+
createdAt: timestamp2("created_at").default(sql2`now()`).notNull()
|
|
169
|
+
}, (table) => [
|
|
170
|
+
index2("idx_cs_embedding_summary_id").on(table.summaryId),
|
|
171
|
+
foreignKey2({
|
|
172
|
+
name: "fk_cs_embedding_summary",
|
|
173
|
+
columns: [table.summaryId],
|
|
174
|
+
foreignColumns: [conversationSummaries.id]
|
|
175
|
+
}).onDelete("cascade")
|
|
176
|
+
]);
|
|
177
|
+
// src/utils/db-mapping.ts
|
|
178
|
+
function mapDbRowToLongTermMemory(row) {
|
|
179
|
+
return {
|
|
180
|
+
id: row.id,
|
|
181
|
+
agentId: row.agentId,
|
|
182
|
+
entityId: row.entityId,
|
|
183
|
+
roomId: row.roomId,
|
|
184
|
+
type: row.type,
|
|
185
|
+
content: row.content,
|
|
186
|
+
embeddingContext: row.embeddingContext,
|
|
187
|
+
embedding: row.embedding,
|
|
188
|
+
confidence: row.confidence,
|
|
189
|
+
decayRate: row.decayRate,
|
|
190
|
+
decayFunction: row.decayFunction,
|
|
191
|
+
createdAt: row.createdAt,
|
|
192
|
+
lastAccessedAt: row.lastAccessedAt,
|
|
193
|
+
accessCount: row.accessCount,
|
|
194
|
+
isActive: row.isActive,
|
|
195
|
+
source: row.source,
|
|
196
|
+
metadata: row.metadata,
|
|
197
|
+
supersedesId: row.supersedesId
|
|
198
|
+
};
|
|
199
|
+
}
|
|
200
|
+
function mapDbRowToConversationSummary(row) {
|
|
201
|
+
return {
|
|
202
|
+
id: row.id,
|
|
203
|
+
agentId: row.agentId,
|
|
204
|
+
entityId: row.entityId,
|
|
205
|
+
roomId: row.roomId,
|
|
206
|
+
level: row.level,
|
|
207
|
+
parentSummaryId: row.parentSummaryId,
|
|
208
|
+
content: row.content,
|
|
209
|
+
embedding: row.embedding,
|
|
210
|
+
tokenCount: row.tokenCount,
|
|
211
|
+
startTime: row.startTime,
|
|
212
|
+
endTime: row.endTime,
|
|
213
|
+
sourceCount: row.sourceCount,
|
|
214
|
+
sourceIds: row.sourceIds,
|
|
215
|
+
createdAt: row.createdAt,
|
|
216
|
+
lastAccessedAt: row.lastAccessedAt,
|
|
217
|
+
accessCount: row.accessCount,
|
|
218
|
+
metadata: row.metadata || {}
|
|
219
|
+
};
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
// src/repositories/long-term-memory.ts
|
|
223
|
+
class LongTermMemoryRepository {
|
|
224
|
+
runtime;
|
|
225
|
+
embeddingDimension;
|
|
226
|
+
constructor(runtime, embeddingDimension) {
|
|
142
227
|
this.runtime = runtime;
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
const
|
|
148
|
-
if (retainRecent) {
|
|
149
|
-
this.memoryConfig.shortTermRetainRecent = parseInt(retainRecent, 10);
|
|
150
|
-
}
|
|
151
|
-
const summarizationInterval = runtime.getSetting("MEMORY_SUMMARIZATION_INTERVAL");
|
|
152
|
-
if (summarizationInterval) {
|
|
153
|
-
this.memoryConfig.shortTermSummarizationInterval = parseInt(summarizationInterval, 10);
|
|
154
|
-
}
|
|
155
|
-
const maxNewMessages = runtime.getSetting("MEMORY_MAX_NEW_MESSAGES");
|
|
156
|
-
if (maxNewMessages) {
|
|
157
|
-
this.memoryConfig.summaryMaxNewMessages = parseInt(maxNewMessages, 10);
|
|
158
|
-
}
|
|
159
|
-
const longTermEnabled = runtime.getSetting("MEMORY_LONG_TERM_ENABLED");
|
|
160
|
-
if (longTermEnabled === "false") {
|
|
161
|
-
this.memoryConfig.longTermExtractionEnabled = false;
|
|
162
|
-
} else if (longTermEnabled === "true") {
|
|
163
|
-
this.memoryConfig.longTermExtractionEnabled = true;
|
|
164
|
-
}
|
|
165
|
-
const confidenceThreshold = runtime.getSetting("MEMORY_CONFIDENCE_THRESHOLD");
|
|
166
|
-
if (confidenceThreshold) {
|
|
167
|
-
this.memoryConfig.longTermConfidenceThreshold = parseFloat(confidenceThreshold);
|
|
168
|
-
}
|
|
169
|
-
const extractionThreshold = runtime.getSetting("MEMORY_EXTRACTION_THRESHOLD");
|
|
170
|
-
if (extractionThreshold) {
|
|
171
|
-
this.memoryConfig.longTermExtractionThreshold = parseInt(extractionThreshold, 10);
|
|
172
|
-
}
|
|
173
|
-
const extractionInterval = runtime.getSetting("MEMORY_EXTRACTION_INTERVAL");
|
|
174
|
-
if (extractionInterval) {
|
|
175
|
-
this.memoryConfig.longTermExtractionInterval = parseInt(extractionInterval, 10);
|
|
176
|
-
}
|
|
177
|
-
logger.info({
|
|
178
|
-
summarizationThreshold: this.memoryConfig.shortTermSummarizationThreshold,
|
|
179
|
-
summarizationInterval: this.memoryConfig.shortTermSummarizationInterval,
|
|
180
|
-
maxNewMessages: this.memoryConfig.summaryMaxNewMessages,
|
|
181
|
-
retainRecent: this.memoryConfig.shortTermRetainRecent,
|
|
182
|
-
longTermEnabled: this.memoryConfig.longTermExtractionEnabled,
|
|
183
|
-
extractionThreshold: this.memoryConfig.longTermExtractionThreshold,
|
|
184
|
-
extractionInterval: this.memoryConfig.longTermExtractionInterval,
|
|
185
|
-
confidenceThreshold: this.memoryConfig.longTermConfidenceThreshold
|
|
186
|
-
}, "MemoryService initialized");
|
|
187
|
-
}
|
|
188
|
-
getDb() {
|
|
189
|
-
const db = this.runtime.db;
|
|
228
|
+
this.embeddingDimension = embeddingDimension;
|
|
229
|
+
}
|
|
230
|
+
async getDb() {
|
|
231
|
+
const adapter = this.runtime.adapter || this.runtime;
|
|
232
|
+
const db = adapter.db;
|
|
190
233
|
if (!db) {
|
|
191
234
|
throw new Error("Database not available");
|
|
192
235
|
}
|
|
193
|
-
return db;
|
|
194
|
-
}
|
|
195
|
-
getConfig() {
|
|
196
|
-
return { ...this.memoryConfig };
|
|
197
|
-
}
|
|
198
|
-
updateConfig(updates) {
|
|
199
|
-
this.memoryConfig = { ...this.memoryConfig, ...updates };
|
|
200
|
-
}
|
|
201
|
-
incrementMessageCount(roomId) {
|
|
202
|
-
const current = this.sessionMessageCounts.get(roomId) || 0;
|
|
203
|
-
const newCount = current + 1;
|
|
204
|
-
this.sessionMessageCounts.set(roomId, newCount);
|
|
205
|
-
return newCount;
|
|
206
|
-
}
|
|
207
|
-
resetMessageCount(roomId) {
|
|
208
|
-
this.sessionMessageCounts.set(roomId, 0);
|
|
209
|
-
}
|
|
210
|
-
async shouldSummarize(roomId) {
|
|
211
|
-
const count = await this.runtime.countMemories(roomId, false, "messages");
|
|
212
|
-
return count >= this.memoryConfig.shortTermSummarizationThreshold;
|
|
213
|
-
}
|
|
214
|
-
getExtractionKey(entityId, roomId) {
|
|
215
|
-
return `memory:extraction:${entityId}:${roomId}`;
|
|
216
|
-
}
|
|
217
|
-
async getLastExtractionCheckpoint(entityId, roomId) {
|
|
218
|
-
const key = this.getExtractionKey(entityId, roomId);
|
|
219
|
-
const cached = this.lastExtractionCheckpoints.get(key);
|
|
220
|
-
if (cached !== undefined) {
|
|
221
|
-
return cached;
|
|
222
|
-
}
|
|
223
|
-
try {
|
|
224
|
-
const checkpoint = await this.runtime.getCache(key);
|
|
225
|
-
const messageCount = checkpoint ?? 0;
|
|
226
|
-
this.lastExtractionCheckpoints.set(key, messageCount);
|
|
227
|
-
return messageCount;
|
|
228
|
-
} catch (error) {
|
|
229
|
-
logger.warn({ error }, "Failed to get extraction checkpoint from cache");
|
|
230
|
-
return 0;
|
|
231
|
-
}
|
|
232
|
-
}
|
|
233
|
-
async setLastExtractionCheckpoint(entityId, roomId, messageCount) {
|
|
234
|
-
const key = this.getExtractionKey(entityId, roomId);
|
|
235
|
-
this.lastExtractionCheckpoints.set(key, messageCount);
|
|
236
236
|
try {
|
|
237
|
-
await
|
|
238
|
-
|
|
237
|
+
const isReady = await adapter.isReady();
|
|
238
|
+
if (!isReady) {
|
|
239
|
+
logger.warn("[LongTermMemoryRepository] Database not ready, attempting reconnect...");
|
|
240
|
+
await new Promise((resolve) => setTimeout(resolve, 1000));
|
|
241
|
+
const stillNotReady = await adapter.isReady();
|
|
242
|
+
if (stillNotReady === false) {
|
|
243
|
+
throw new Error("Database connection lost and could not reconnect");
|
|
244
|
+
}
|
|
245
|
+
}
|
|
239
246
|
} catch (error) {
|
|
240
|
-
logger.error(
|
|
241
|
-
|
|
242
|
-
}
|
|
243
|
-
async shouldRunExtraction(entityId, roomId, currentMessageCount) {
|
|
244
|
-
const threshold = this.memoryConfig.longTermExtractionThreshold;
|
|
245
|
-
const interval = this.memoryConfig.longTermExtractionInterval;
|
|
246
|
-
if (currentMessageCount < threshold) {
|
|
247
|
-
logger.debug({
|
|
248
|
-
entityId,
|
|
249
|
-
roomId,
|
|
250
|
-
currentMessageCount,
|
|
251
|
-
threshold,
|
|
252
|
-
shouldRun: false
|
|
253
|
-
}, "Extraction check: below threshold");
|
|
254
|
-
return false;
|
|
247
|
+
logger.error("[LongTermMemoryRepository] Database health check failed:", error);
|
|
248
|
+
throw new Error("Database connection health check failed");
|
|
255
249
|
}
|
|
256
|
-
|
|
257
|
-
const currentCheckpoint = Math.floor(currentMessageCount / interval) * interval;
|
|
258
|
-
const shouldRun = currentMessageCount >= threshold && currentCheckpoint > lastCheckpoint;
|
|
259
|
-
logger.debug({
|
|
260
|
-
entityId,
|
|
261
|
-
roomId,
|
|
262
|
-
currentMessageCount,
|
|
263
|
-
threshold,
|
|
264
|
-
interval,
|
|
265
|
-
lastCheckpoint,
|
|
266
|
-
currentCheckpoint,
|
|
267
|
-
shouldRun
|
|
268
|
-
}, "Extraction check");
|
|
269
|
-
return shouldRun;
|
|
250
|
+
return db;
|
|
270
251
|
}
|
|
271
|
-
async
|
|
272
|
-
const db = this.getDb();
|
|
252
|
+
async insert(memory, embedding) {
|
|
253
|
+
const db = await this.getDb();
|
|
273
254
|
const id = crypto.randomUUID();
|
|
274
255
|
const now = new Date;
|
|
275
256
|
const newMemory = {
|
|
276
257
|
id,
|
|
277
258
|
createdAt: now,
|
|
278
|
-
|
|
259
|
+
lastAccessedAt: null,
|
|
279
260
|
accessCount: 0,
|
|
261
|
+
isActive: true,
|
|
262
|
+
embedding: embedding || [],
|
|
280
263
|
...memory
|
|
281
264
|
};
|
|
282
|
-
|
|
283
|
-
await
|
|
265
|
+
await db.transaction(async (tx) => {
|
|
266
|
+
await tx.insert(longTermMemories).values({
|
|
284
267
|
id: newMemory.id,
|
|
285
268
|
agentId: newMemory.agentId,
|
|
286
269
|
entityId: newMemory.entityId,
|
|
287
|
-
|
|
270
|
+
roomId: newMemory.roomId || null,
|
|
271
|
+
type: newMemory.type,
|
|
288
272
|
content: newMemory.content,
|
|
289
|
-
|
|
290
|
-
embedding: newMemory.embedding,
|
|
273
|
+
embeddingContext: newMemory.embeddingContext,
|
|
291
274
|
confidence: newMemory.confidence,
|
|
292
|
-
|
|
293
|
-
|
|
275
|
+
decayRate: newMemory.decayRate,
|
|
276
|
+
decayFunction: newMemory.decayFunction,
|
|
294
277
|
createdAt: now,
|
|
295
|
-
|
|
296
|
-
|
|
278
|
+
lastAccessedAt: null,
|
|
279
|
+
accessCount: 0,
|
|
280
|
+
isActive: true,
|
|
281
|
+
source: newMemory.source,
|
|
282
|
+
metadata: newMemory.metadata,
|
|
283
|
+
supersedesId: newMemory.supersedesId || null
|
|
297
284
|
});
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
285
|
+
if (embedding && this.embeddingDimension) {
|
|
286
|
+
const embeddingValues = {
|
|
287
|
+
id: crypto.randomUUID(),
|
|
288
|
+
memoryId: id,
|
|
289
|
+
createdAt: now
|
|
290
|
+
};
|
|
291
|
+
embeddingValues[this.embeddingDimension] = embedding;
|
|
292
|
+
await tx.insert(longTermMemoryEmbeddings).values(embeddingValues);
|
|
293
|
+
}
|
|
294
|
+
});
|
|
295
|
+
logger.info({
|
|
296
|
+
id: newMemory.id,
|
|
297
|
+
type: newMemory.type,
|
|
298
|
+
entityId: newMemory.entityId,
|
|
299
|
+
confidence: newMemory.confidence
|
|
300
|
+
}, "Stored new long-term memory");
|
|
303
301
|
return newMemory;
|
|
304
302
|
}
|
|
305
|
-
async
|
|
306
|
-
const db = this.getDb();
|
|
303
|
+
async findById(id) {
|
|
304
|
+
const db = await this.getDb();
|
|
305
|
+
const results = await db.select().from(longTermMemories).where(eq(longTermMemories.id, id)).limit(1);
|
|
306
|
+
if (results.length === 0) {
|
|
307
|
+
return null;
|
|
308
|
+
}
|
|
309
|
+
return mapDbRowToLongTermMemory(results[0]);
|
|
310
|
+
}
|
|
311
|
+
async update(id, updates, newEmbedding) {
|
|
312
|
+
const db = await this.getDb();
|
|
313
|
+
const updateData = {};
|
|
314
|
+
if (updates.content !== undefined)
|
|
315
|
+
updateData.content = updates.content;
|
|
316
|
+
if (updates.embeddingContext !== undefined)
|
|
317
|
+
updateData.embeddingContext = updates.embeddingContext;
|
|
318
|
+
if (updates.confidence !== undefined)
|
|
319
|
+
updateData.confidence = updates.confidence;
|
|
320
|
+
if (updates.decayRate !== undefined)
|
|
321
|
+
updateData.decayRate = updates.decayRate;
|
|
322
|
+
if (updates.decayFunction !== undefined)
|
|
323
|
+
updateData.decayFunction = updates.decayFunction;
|
|
324
|
+
if (updates.lastAccessedAt !== undefined)
|
|
325
|
+
updateData.lastAccessedAt = updates.lastAccessedAt;
|
|
326
|
+
if (updates.accessCount !== undefined)
|
|
327
|
+
updateData.accessCount = updates.accessCount;
|
|
328
|
+
if (updates.isActive !== undefined)
|
|
329
|
+
updateData.isActive = updates.isActive;
|
|
330
|
+
if (updates.source !== undefined)
|
|
331
|
+
updateData.source = updates.source;
|
|
332
|
+
if (updates.metadata !== undefined)
|
|
333
|
+
updateData.metadata = updates.metadata;
|
|
334
|
+
if (updates.supersedesId !== undefined)
|
|
335
|
+
updateData.supersedesId = updates.supersedesId;
|
|
336
|
+
await db.transaction(async (tx) => {
|
|
337
|
+
await tx.update(longTermMemories).set(updateData).where(eq(longTermMemories.id, id));
|
|
338
|
+
if (newEmbedding && this.embeddingDimension) {
|
|
339
|
+
const embeddingUpdate = {};
|
|
340
|
+
embeddingUpdate[this.embeddingDimension] = newEmbedding;
|
|
341
|
+
await tx.update(longTermMemoryEmbeddings).set(embeddingUpdate).where(eq(longTermMemoryEmbeddings.memoryId, id));
|
|
342
|
+
}
|
|
343
|
+
});
|
|
344
|
+
logger.info({ id }, "Updated long-term memory");
|
|
345
|
+
}
|
|
346
|
+
async delete(id) {
|
|
347
|
+
const db = await this.getDb();
|
|
348
|
+
await db.delete(longTermMemories).where(eq(longTermMemories.id, id));
|
|
349
|
+
logger.info({ id }, "Deleted long-term memory");
|
|
350
|
+
}
|
|
351
|
+
async findByEntity(entityId, type, limit = 20, includeInactive = false) {
|
|
352
|
+
const db = await this.getDb();
|
|
307
353
|
const conditions = [
|
|
308
354
|
eq(longTermMemories.agentId, this.runtime.agentId),
|
|
309
355
|
eq(longTermMemories.entityId, entityId)
|
|
310
356
|
];
|
|
311
|
-
if (
|
|
312
|
-
conditions.push(eq(longTermMemories.
|
|
357
|
+
if (!includeInactive) {
|
|
358
|
+
conditions.push(eq(longTermMemories.isActive, true));
|
|
359
|
+
}
|
|
360
|
+
if (type) {
|
|
361
|
+
conditions.push(eq(longTermMemories.type, type));
|
|
362
|
+
}
|
|
363
|
+
const results = await db.select().from(longTermMemories).where(and(...conditions)).orderBy(desc(longTermMemories.confidence), desc(longTermMemories.createdAt)).limit(limit);
|
|
364
|
+
return results.map((row) => mapDbRowToLongTermMemory(row));
|
|
365
|
+
}
|
|
366
|
+
async vectorSearch(params, queryEmbedding, similarityThreshold = 0.3) {
|
|
367
|
+
if (!this.embeddingDimension) {
|
|
368
|
+
logger.warn("Embedding dimension not set, skipping vector search");
|
|
369
|
+
return [];
|
|
370
|
+
}
|
|
371
|
+
const db = await this.getDb();
|
|
372
|
+
try {
|
|
373
|
+
const similarity = sql3`1 - (${cosineDistance(longTermMemoryEmbeddings[this.embeddingDimension], queryEmbedding)})`;
|
|
374
|
+
const conditions = [
|
|
375
|
+
eq(longTermMemories.agentId, this.runtime.agentId),
|
|
376
|
+
eq(longTermMemories.entityId, params.entityId),
|
|
377
|
+
sql3`${longTermMemoryEmbeddings[this.embeddingDimension]} IS NOT NULL`,
|
|
378
|
+
gte(similarity, similarityThreshold)
|
|
379
|
+
];
|
|
380
|
+
if (params.minConfidence) {
|
|
381
|
+
conditions.push(gte(longTermMemories.confidence, params.minConfidence));
|
|
382
|
+
}
|
|
383
|
+
if (!params.includeInactive) {
|
|
384
|
+
conditions.push(eq(longTermMemories.isActive, true));
|
|
385
|
+
}
|
|
386
|
+
if (params.type) {
|
|
387
|
+
conditions.push(eq(longTermMemories.type, params.type));
|
|
388
|
+
}
|
|
389
|
+
if (params.roomId) {
|
|
390
|
+
conditions.push(or(eq(longTermMemories.roomId, params.roomId), isNull(longTermMemories.roomId)));
|
|
391
|
+
}
|
|
392
|
+
const results = await db.select({
|
|
393
|
+
memory: longTermMemories,
|
|
394
|
+
embedding: longTermMemoryEmbeddings[this.embeddingDimension],
|
|
395
|
+
similarity
|
|
396
|
+
}).from(longTermMemories).innerJoin(longTermMemoryEmbeddings, eq(longTermMemoryEmbeddings.memoryId, longTermMemories.id)).where(and(...conditions)).orderBy(desc(similarity)).limit(params.limit || 20);
|
|
397
|
+
return results.map((row) => ({
|
|
398
|
+
...mapDbRowToLongTermMemory(row.memory),
|
|
399
|
+
embedding: row.embedding,
|
|
400
|
+
relevanceScore: row.similarity,
|
|
401
|
+
activationScore: 0,
|
|
402
|
+
finalScore: 0
|
|
403
|
+
}));
|
|
404
|
+
} catch (error) {
|
|
405
|
+
logger.error("Failed to execute vector search:", JSON.stringify(error));
|
|
406
|
+
return [];
|
|
313
407
|
}
|
|
314
|
-
|
|
315
|
-
|
|
408
|
+
}
|
|
409
|
+
async fetchAllActive() {
|
|
410
|
+
const db = await this.getDb();
|
|
411
|
+
const memories = await db.select().from(longTermMemories).where(and(eq(longTermMemories.agentId, this.runtime.agentId), eq(longTermMemories.isActive, true)));
|
|
412
|
+
return memories.map((row) => ({
|
|
316
413
|
id: row.id,
|
|
317
|
-
agentId: row.agentId,
|
|
318
|
-
entityId: row.entityId,
|
|
319
|
-
category: row.category,
|
|
320
414
|
content: row.content,
|
|
321
|
-
|
|
322
|
-
embedding: row.embedding,
|
|
323
|
-
confidence: row.confidence,
|
|
324
|
-
source: row.source,
|
|
325
|
-
createdAt: row.createdAt,
|
|
326
|
-
updatedAt: row.updatedAt,
|
|
327
|
-
lastAccessedAt: row.lastAccessedAt,
|
|
328
|
-
accessCount: row.accessCount
|
|
415
|
+
embeddingContext: row.embeddingContext
|
|
329
416
|
}));
|
|
330
417
|
}
|
|
331
|
-
async
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
}
|
|
345
|
-
|
|
346
|
-
updateData.embedding = updates.embedding;
|
|
347
|
-
}
|
|
348
|
-
if (updates.lastAccessedAt !== undefined) {
|
|
349
|
-
updateData.lastAccessedAt = updates.lastAccessedAt;
|
|
350
|
-
}
|
|
351
|
-
if (updates.accessCount !== undefined) {
|
|
352
|
-
updateData.accessCount = updates.accessCount;
|
|
418
|
+
async updateAccessMetadata(memoryIds) {
|
|
419
|
+
if (memoryIds.length === 0)
|
|
420
|
+
return;
|
|
421
|
+
const db = await this.getDb();
|
|
422
|
+
const now = new Date;
|
|
423
|
+
try {
|
|
424
|
+
for (const id of memoryIds) {
|
|
425
|
+
await db.update(longTermMemories).set({
|
|
426
|
+
lastAccessedAt: now,
|
|
427
|
+
accessCount: sql3`${longTermMemories.accessCount} + 1`
|
|
428
|
+
}).where(eq(longTermMemories.id, id));
|
|
429
|
+
}
|
|
430
|
+
logger.debug({ count: memoryIds.length }, "Updated access metadata");
|
|
431
|
+
} catch (error) {
|
|
432
|
+
logger.error({ error }, "Failed to update access metadata");
|
|
353
433
|
}
|
|
354
|
-
await db.update(longTermMemories).set(updateData).where(and(eq(longTermMemories.id, id), eq(longTermMemories.agentId, this.runtime.agentId), eq(longTermMemories.entityId, entityId)));
|
|
355
|
-
logger.info(`Updated long-term memory: ${id} for entity ${entityId}`);
|
|
356
434
|
}
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
// src/repositories/conversation-summary.ts
|
|
438
|
+
import { logger as logger2 } from "@elizaos/core";
|
|
439
|
+
import { eq as eq2, and as and2, desc as desc2, sql as sql4, cosineDistance as cosineDistance2 } from "drizzle-orm";
|
|
440
|
+
class ConversationSummaryRepository {
|
|
441
|
+
runtime;
|
|
442
|
+
embeddingDimension;
|
|
443
|
+
constructor(runtime, embeddingDimension) {
|
|
444
|
+
this.runtime = runtime;
|
|
445
|
+
this.embeddingDimension = embeddingDimension;
|
|
361
446
|
}
|
|
362
|
-
async
|
|
363
|
-
const
|
|
364
|
-
const
|
|
365
|
-
if (
|
|
366
|
-
|
|
447
|
+
async getDb() {
|
|
448
|
+
const adapter = this.runtime.adapter || this.runtime;
|
|
449
|
+
const db = adapter.db;
|
|
450
|
+
if (!db) {
|
|
451
|
+
throw new Error("Database not available");
|
|
367
452
|
}
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
updatedAt: row.updatedAt
|
|
384
|
-
};
|
|
453
|
+
try {
|
|
454
|
+
const isReady = await adapter.isReady();
|
|
455
|
+
if (!isReady) {
|
|
456
|
+
logger2.warn("[ConversationSummaryRepository] Database not ready, attempting reconnect...");
|
|
457
|
+
await new Promise((resolve) => setTimeout(resolve, 1000));
|
|
458
|
+
const stillNotReady = await adapter.isReady();
|
|
459
|
+
if (stillNotReady === false) {
|
|
460
|
+
throw new Error("Database connection lost and could not reconnect");
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
} catch (error) {
|
|
464
|
+
logger2.error("[ConversationSummaryRepository] Database health check failed:", error);
|
|
465
|
+
throw new Error("Database connection health check failed");
|
|
466
|
+
}
|
|
467
|
+
return db;
|
|
385
468
|
}
|
|
386
|
-
async
|
|
387
|
-
const db = this.getDb();
|
|
469
|
+
async insert(summary, embedding) {
|
|
470
|
+
const db = await this.getDb();
|
|
388
471
|
const id = crypto.randomUUID();
|
|
389
472
|
const now = new Date;
|
|
390
473
|
const newSummary = {
|
|
391
474
|
id,
|
|
392
475
|
createdAt: now,
|
|
393
|
-
|
|
476
|
+
lastAccessedAt: null,
|
|
477
|
+
accessCount: 0,
|
|
478
|
+
embedding: embedding || [],
|
|
394
479
|
...summary
|
|
395
480
|
};
|
|
396
|
-
await db.
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
481
|
+
await db.transaction(async (tx) => {
|
|
482
|
+
await tx.insert(conversationSummaries).values({
|
|
483
|
+
id: newSummary.id,
|
|
484
|
+
agentId: newSummary.agentId,
|
|
485
|
+
entityId: newSummary.entityId,
|
|
486
|
+
roomId: newSummary.roomId,
|
|
487
|
+
level: newSummary.level,
|
|
488
|
+
parentSummaryId: newSummary.parentSummaryId || null,
|
|
489
|
+
content: newSummary.content,
|
|
490
|
+
tokenCount: newSummary.tokenCount,
|
|
491
|
+
startTime: newSummary.startTime,
|
|
492
|
+
endTime: newSummary.endTime,
|
|
493
|
+
sourceCount: newSummary.sourceCount,
|
|
494
|
+
sourceIds: newSummary.sourceIds,
|
|
495
|
+
createdAt: now,
|
|
496
|
+
lastAccessedAt: null,
|
|
497
|
+
accessCount: 0,
|
|
498
|
+
metadata: newSummary.metadata
|
|
499
|
+
});
|
|
500
|
+
if (embedding && this.embeddingDimension) {
|
|
501
|
+
const embeddingValues = {
|
|
502
|
+
id: crypto.randomUUID(),
|
|
503
|
+
summaryId: id,
|
|
504
|
+
createdAt: now
|
|
505
|
+
};
|
|
506
|
+
embeddingValues[this.embeddingDimension] = embedding;
|
|
507
|
+
await tx.insert(conversationSummaryEmbeddings).values(embeddingValues);
|
|
508
|
+
}
|
|
411
509
|
});
|
|
412
|
-
|
|
510
|
+
logger2.info({
|
|
511
|
+
id: newSummary.id,
|
|
512
|
+
level: newSummary.level,
|
|
513
|
+
tokenCount: newSummary.tokenCount,
|
|
514
|
+
sourceCount: newSummary.sourceCount
|
|
515
|
+
}, "Stored conversation summary");
|
|
413
516
|
return newSummary;
|
|
414
517
|
}
|
|
415
|
-
async
|
|
416
|
-
const db = this.getDb();
|
|
417
|
-
const
|
|
418
|
-
|
|
419
|
-
};
|
|
420
|
-
if (updates.summary !== undefined) {
|
|
421
|
-
updateData.summary = updates.summary;
|
|
422
|
-
}
|
|
423
|
-
if (updates.messageCount !== undefined) {
|
|
424
|
-
updateData.messageCount = updates.messageCount;
|
|
425
|
-
}
|
|
426
|
-
if (updates.lastMessageOffset !== undefined) {
|
|
427
|
-
updateData.lastMessageOffset = updates.lastMessageOffset;
|
|
428
|
-
}
|
|
429
|
-
if (updates.endTime !== undefined) {
|
|
430
|
-
updateData.endTime = updates.endTime;
|
|
431
|
-
}
|
|
432
|
-
if (updates.topics !== undefined) {
|
|
433
|
-
updateData.topics = updates.topics;
|
|
434
|
-
}
|
|
435
|
-
if (updates.metadata !== undefined) {
|
|
436
|
-
updateData.metadata = updates.metadata;
|
|
437
|
-
}
|
|
438
|
-
if (updates.embedding !== undefined) {
|
|
439
|
-
updateData.embedding = updates.embedding;
|
|
440
|
-
}
|
|
441
|
-
await db.update(sessionSummaries).set(updateData).where(and(eq(sessionSummaries.id, id), eq(sessionSummaries.agentId, this.runtime.agentId), eq(sessionSummaries.roomId, roomId)));
|
|
442
|
-
logger.info(`Updated session summary: ${id} for room ${roomId}`);
|
|
443
|
-
}
|
|
444
|
-
async getSessionSummaries(roomId, limit = 5) {
|
|
445
|
-
const db = this.getDb();
|
|
446
|
-
const results = await db.select().from(sessionSummaries).where(and(eq(sessionSummaries.agentId, this.runtime.agentId), eq(sessionSummaries.roomId, roomId))).orderBy(desc(sessionSummaries.updatedAt)).limit(limit);
|
|
447
|
-
return results.map((row) => ({
|
|
448
|
-
id: row.id,
|
|
449
|
-
agentId: row.agentId,
|
|
450
|
-
roomId: row.roomId,
|
|
451
|
-
entityId: row.entityId,
|
|
452
|
-
summary: row.summary,
|
|
453
|
-
messageCount: row.messageCount,
|
|
454
|
-
lastMessageOffset: row.lastMessageOffset,
|
|
455
|
-
startTime: row.startTime,
|
|
456
|
-
endTime: row.endTime,
|
|
457
|
-
topics: row.topics || [],
|
|
458
|
-
metadata: row.metadata,
|
|
459
|
-
embedding: row.embedding,
|
|
460
|
-
createdAt: row.createdAt,
|
|
461
|
-
updatedAt: row.updatedAt
|
|
462
|
-
}));
|
|
518
|
+
async findByLevel(roomId, level) {
|
|
519
|
+
const db = await this.getDb();
|
|
520
|
+
const results = await db.select().from(conversationSummaries).where(and2(eq2(conversationSummaries.agentId, this.runtime.agentId), eq2(conversationSummaries.roomId, roomId), eq2(conversationSummaries.level, level))).orderBy(desc2(conversationSummaries.createdAt));
|
|
521
|
+
return results.map((r) => mapDbRowToConversationSummary(r));
|
|
463
522
|
}
|
|
464
|
-
async
|
|
465
|
-
if (!this.
|
|
466
|
-
|
|
467
|
-
return
|
|
523
|
+
async vectorSearch(entityId, roomId, queryEmbedding, limit = 5) {
|
|
524
|
+
if (!this.embeddingDimension) {
|
|
525
|
+
logger2.warn("Embedding dimension not set, skipping summary search");
|
|
526
|
+
return [];
|
|
468
527
|
}
|
|
469
|
-
const db = this.getDb();
|
|
528
|
+
const db = await this.getDb();
|
|
470
529
|
try {
|
|
471
|
-
const
|
|
472
|
-
const similarity = sql4`1 - (${cosineDistance(longTermMemories.embedding, cleanVector)})`;
|
|
473
|
-
const conditions = [
|
|
474
|
-
eq(longTermMemories.agentId, this.runtime.agentId),
|
|
475
|
-
eq(longTermMemories.entityId, entityId),
|
|
476
|
-
sql4`${longTermMemories.embedding} IS NOT NULL`
|
|
477
|
-
];
|
|
478
|
-
if (matchThreshold > 0) {
|
|
479
|
-
conditions.push(gte(similarity, matchThreshold));
|
|
480
|
-
}
|
|
530
|
+
const similarity = sql4`1 - (${cosineDistance2(conversationSummaryEmbeddings[this.embeddingDimension], queryEmbedding)})`;
|
|
481
531
|
const results = await db.select({
|
|
482
|
-
|
|
532
|
+
summary: conversationSummaries,
|
|
533
|
+
embedding: conversationSummaryEmbeddings[this.embeddingDimension],
|
|
483
534
|
similarity
|
|
484
|
-
}).from(
|
|
485
|
-
return results.map((
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
entityId: row.memory.entityId,
|
|
489
|
-
category: row.memory.category,
|
|
490
|
-
content: row.memory.content,
|
|
491
|
-
metadata: row.memory.metadata,
|
|
492
|
-
embedding: row.memory.embedding,
|
|
493
|
-
confidence: row.memory.confidence,
|
|
494
|
-
source: row.memory.source,
|
|
495
|
-
createdAt: row.memory.createdAt,
|
|
496
|
-
updatedAt: row.memory.updatedAt,
|
|
497
|
-
lastAccessedAt: row.memory.lastAccessedAt,
|
|
498
|
-
accessCount: row.memory.accessCount,
|
|
499
|
-
similarity: row.similarity
|
|
535
|
+
}).from(conversationSummaries).innerJoin(conversationSummaryEmbeddings, eq2(conversationSummaryEmbeddings.summaryId, conversationSummaries.id)).where(and2(eq2(conversationSummaries.agentId, this.runtime.agentId), eq2(conversationSummaries.entityId, entityId), eq2(conversationSummaries.roomId, roomId), sql4`${conversationSummaryEmbeddings[this.embeddingDimension]} IS NOT NULL`)).orderBy(desc2(similarity)).limit(limit * 2);
|
|
536
|
+
return results.map((r) => ({
|
|
537
|
+
...mapDbRowToConversationSummary(r.summary),
|
|
538
|
+
embedding: r.embedding
|
|
500
539
|
}));
|
|
501
540
|
} catch (error) {
|
|
502
|
-
|
|
503
|
-
return
|
|
541
|
+
logger2.error("Failed to search summaries:", JSON.stringify(error));
|
|
542
|
+
return [];
|
|
504
543
|
}
|
|
505
544
|
}
|
|
506
|
-
async
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
545
|
+
async updateAccessMetadata(summaryIds) {
|
|
546
|
+
if (summaryIds.length === 0)
|
|
547
|
+
return;
|
|
548
|
+
const db = await this.getDb();
|
|
549
|
+
const now = new Date;
|
|
550
|
+
try {
|
|
551
|
+
for (const id of summaryIds) {
|
|
552
|
+
await db.update(conversationSummaries).set({
|
|
553
|
+
lastAccessedAt: now,
|
|
554
|
+
accessCount: sql4`${conversationSummaries.accessCount} + 1`
|
|
555
|
+
}).where(eq2(conversationSummaries.id, id));
|
|
515
556
|
}
|
|
516
|
-
|
|
517
|
-
}
|
|
518
|
-
|
|
519
|
-
for (const [category, categoryMemories] of grouped.entries()) {
|
|
520
|
-
const categoryName = category.split("_").map((word) => word.charAt(0).toUpperCase() + word.slice(1)).join(" ");
|
|
521
|
-
const items = categoryMemories.map((m) => `- ${m.content}`).join(`
|
|
522
|
-
`);
|
|
523
|
-
sections.push(`**${categoryName}**:
|
|
524
|
-
${items}`);
|
|
557
|
+
logger2.debug({ count: summaryIds.length }, "Updated summary access metadata");
|
|
558
|
+
} catch (error) {
|
|
559
|
+
logger2.error({ error }, "Failed to update summary access metadata");
|
|
525
560
|
}
|
|
526
|
-
|
|
561
|
+
}
|
|
562
|
+
}
|
|
527
563
|
|
|
528
|
-
|
|
564
|
+
// src/utils/embedding.ts
|
|
565
|
+
import { ModelType, logger as logger3 } from "@elizaos/core";
|
|
566
|
+
async function generateEmbedding(runtime, text3) {
|
|
567
|
+
try {
|
|
568
|
+
const embedding = await runtime.useModel(ModelType.TEXT_EMBEDDING, text3);
|
|
569
|
+
return embedding;
|
|
570
|
+
} catch (error) {
|
|
571
|
+
logger3.error({ error }, "Failed to generate embedding");
|
|
572
|
+
return new Array(1536).fill(0);
|
|
529
573
|
}
|
|
530
574
|
}
|
|
575
|
+
function cleanEmbedding(embedding) {
|
|
576
|
+
return embedding.map((n) => Number.isFinite(n) ? Number(n.toFixed(6)) : 0);
|
|
577
|
+
}
|
|
531
578
|
|
|
532
|
-
// src/
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
# Instructions
|
|
546
|
-
Generate a summary that:
|
|
547
|
-
1. Captures the main topics discussed
|
|
548
|
-
2. Highlights key information shared
|
|
549
|
-
3. Notes any decisions made or questions asked
|
|
550
|
-
4. Maintains context for future reference
|
|
551
|
-
5. Is concise but comprehensive
|
|
552
|
-
|
|
553
|
-
**IMPORTANT**: Keep the summary under 2500 tokens. Be comprehensive but concise.
|
|
554
|
-
|
|
555
|
-
Also extract:
|
|
556
|
-
- **Topics**: List of main topics discussed (comma-separated)
|
|
557
|
-
- **Key Points**: Important facts or decisions (bullet points)
|
|
558
|
-
|
|
559
|
-
Respond in this XML format:
|
|
560
|
-
<summary>
|
|
561
|
-
<text>Your comprehensive summary here</text>
|
|
562
|
-
<topics>topic1, topic2, topic3</topics>
|
|
563
|
-
<keyPoints>
|
|
564
|
-
<point>First key point</point>
|
|
565
|
-
<point>Second key point</point>
|
|
566
|
-
</keyPoints>
|
|
567
|
-
</summary>`;
|
|
568
|
-
var updateSummarizationTemplate = `# Task: Update and Condense Conversation Summary
|
|
569
|
-
|
|
570
|
-
You are updating an existing conversation summary with new messages, while keeping the total summary concise.
|
|
571
|
-
|
|
572
|
-
# Existing Summary
|
|
573
|
-
{{existingSummary}}
|
|
574
|
-
|
|
575
|
-
# Existing Topics
|
|
576
|
-
{{existingTopics}}
|
|
577
|
-
|
|
578
|
-
# New Messages Since Last Summary
|
|
579
|
-
{{newMessages}}
|
|
580
|
-
|
|
581
|
-
# Instructions
|
|
582
|
-
Update the summary by:
|
|
583
|
-
1. Merging the existing summary with insights from the new messages
|
|
584
|
-
2. Removing redundant or less important details to stay under the token limit
|
|
585
|
-
3. Keeping the most important context and decisions
|
|
586
|
-
4. Adding new topics if they emerge
|
|
587
|
-
5. **CRITICAL**: Keep the ENTIRE updated summary under 2500 tokens
|
|
588
|
-
|
|
589
|
-
The goal is a rolling summary that captures the essence of the conversation without growing indefinitely.
|
|
590
|
-
|
|
591
|
-
Respond in this XML format:
|
|
592
|
-
<summary>
|
|
593
|
-
<text>Your updated and condensed summary here</text>
|
|
594
|
-
<topics>topic1, topic2, topic3</topics>
|
|
595
|
-
<keyPoints>
|
|
596
|
-
<point>First key point</point>
|
|
597
|
-
<point>Second key point</point>
|
|
598
|
-
</keyPoints>
|
|
599
|
-
</summary>`;
|
|
600
|
-
function parseSummaryXML(xml) {
|
|
601
|
-
const summaryMatch = xml.match(/<text>([\s\S]*?)<\/text>/);
|
|
602
|
-
const topicsMatch = xml.match(/<topics>([\s\S]*?)<\/topics>/);
|
|
603
|
-
const keyPointsMatches = xml.matchAll(/<point>([\s\S]*?)<\/point>/g);
|
|
604
|
-
const summary = summaryMatch ? summaryMatch[1].trim() : "Summary not available";
|
|
605
|
-
const topics = topicsMatch ? topicsMatch[1].split(",").map((t) => t.trim()).filter(Boolean) : [];
|
|
606
|
-
const keyPoints = Array.from(keyPointsMatches).map((match) => match[1].trim());
|
|
607
|
-
return { summary, topics, keyPoints };
|
|
579
|
+
// src/utils/decay-scoring.ts
|
|
580
|
+
function calculateDecayFactor(decayFunction, decayRate, timeDeltaDays) {
|
|
581
|
+
switch (decayFunction) {
|
|
582
|
+
case "EXPONENTIAL" /* EXPONENTIAL */:
|
|
583
|
+
return Math.exp(-decayRate * timeDeltaDays);
|
|
584
|
+
case "LINEAR" /* LINEAR */:
|
|
585
|
+
return Math.max(0, 1 - decayRate * timeDeltaDays);
|
|
586
|
+
case "NONE" /* NONE */:
|
|
587
|
+
return 1;
|
|
588
|
+
default:
|
|
589
|
+
return 1;
|
|
590
|
+
}
|
|
608
591
|
}
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
const
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
592
|
+
function calculateAccessBoost(accessCount) {
|
|
593
|
+
return 1 + Math.log(1 + accessCount) * 0.1;
|
|
594
|
+
}
|
|
595
|
+
function applyDecayScoring(memories) {
|
|
596
|
+
const now = Date.now();
|
|
597
|
+
return memories.map((memory) => {
|
|
598
|
+
const lastAccessed = memory.lastAccessedAt?.getTime() || memory.createdAt.getTime();
|
|
599
|
+
const timeDeltaDays = (now - lastAccessed) / (1000 * 60 * 60 * 24);
|
|
600
|
+
const decayFactor = calculateDecayFactor(memory.decayFunction, memory.decayRate, timeDeltaDays);
|
|
601
|
+
const accessBoost = calculateAccessBoost(memory.accessCount);
|
|
602
|
+
const activationScore = memory.confidence * decayFactor * accessBoost;
|
|
603
|
+
const finalScore = memory.relevanceScore * activationScore;
|
|
604
|
+
return {
|
|
605
|
+
...memory,
|
|
606
|
+
activationScore,
|
|
607
|
+
finalScore
|
|
608
|
+
};
|
|
609
|
+
});
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
// src/utils/search-merging.ts
|
|
613
|
+
function mergeSearchResults(vectorResults, bm25Results) {
|
|
614
|
+
const merged = new Map;
|
|
615
|
+
for (const result of vectorResults) {
|
|
616
|
+
merged.set(result.id, result);
|
|
617
|
+
}
|
|
618
|
+
for (const result of bm25Results) {
|
|
619
|
+
if (merged.has(result.id)) {
|
|
620
|
+
const existing = merged.get(result.id);
|
|
621
|
+
existing.relevanceScore = (existing.relevanceScore + result.relevanceScore) / 2;
|
|
637
622
|
} else {
|
|
638
|
-
|
|
639
|
-
const shouldUpdate = newMessageCount >= config.shortTermSummarizationInterval;
|
|
640
|
-
logger2.debug({
|
|
641
|
-
roomId: message.roomId,
|
|
642
|
-
currentMessageCount,
|
|
643
|
-
lastOffset: existingSummary.lastMessageOffset,
|
|
644
|
-
newMessageCount,
|
|
645
|
-
interval: config.shortTermSummarizationInterval,
|
|
646
|
-
shouldUpdate,
|
|
647
|
-
reason: "summary_update_check"
|
|
648
|
-
}, "Summarization validation check");
|
|
649
|
-
return shouldUpdate;
|
|
623
|
+
merged.set(result.id, result);
|
|
650
624
|
}
|
|
651
|
-
}
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
625
|
+
}
|
|
626
|
+
return Array.from(merged.values());
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
// src/utils/formatting.ts
|
|
630
|
+
function formatMemoriesForContext(memories) {
|
|
631
|
+
if (memories.length === 0) {
|
|
632
|
+
return "";
|
|
633
|
+
}
|
|
634
|
+
const grouped = new Map;
|
|
635
|
+
for (const memory of memories) {
|
|
636
|
+
if (!grouped.has(memory.type)) {
|
|
637
|
+
grouped.set(memory.type, []);
|
|
657
638
|
}
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
const totalMessageCount = await runtime.countMemories(roomId, false, "messages");
|
|
665
|
-
const newMessageCount = totalMessageCount - lastOffset;
|
|
666
|
-
const maxNewMessages = config.summaryMaxNewMessages || 50;
|
|
667
|
-
const messagesToFetch = Math.min(newMessageCount, maxNewMessages);
|
|
668
|
-
if (messagesToFetch === 0) {
|
|
669
|
-
logger2.debug("No new messages to summarize");
|
|
670
|
-
return;
|
|
671
|
-
}
|
|
672
|
-
if (newMessageCount > maxNewMessages) {
|
|
673
|
-
logger2.warn(`Capping new messages at ${maxNewMessages} (${newMessageCount} available). Oldest messages will be skipped.`);
|
|
674
|
-
}
|
|
675
|
-
const newMessages = await runtime.getMemories({
|
|
676
|
-
tableName: "messages",
|
|
677
|
-
roomId,
|
|
678
|
-
count: messagesToFetch,
|
|
679
|
-
unique: false,
|
|
680
|
-
start: lastOffset
|
|
681
|
-
});
|
|
682
|
-
if (newMessages.length === 0) {
|
|
683
|
-
logger2.debug("No new messages retrieved");
|
|
684
|
-
return;
|
|
685
|
-
}
|
|
686
|
-
const sortedMessages = newMessages.sort((a, b) => (a.createdAt || 0) - (b.createdAt || 0));
|
|
687
|
-
const formattedMessages = sortedMessages.map((msg) => {
|
|
688
|
-
const sender = msg.entityId === runtime.agentId ? runtime.character.name : "User";
|
|
689
|
-
return `${sender}: ${msg.content.text || "[non-text message]"}`;
|
|
690
|
-
}).join(`
|
|
639
|
+
grouped.get(memory.type).push(memory);
|
|
640
|
+
}
|
|
641
|
+
const sections = [];
|
|
642
|
+
if (grouped.has("SEMANTIC" /* SEMANTIC */)) {
|
|
643
|
+
const facts = grouped.get("SEMANTIC" /* SEMANTIC */);
|
|
644
|
+
const items = facts.map((m) => `- ${m.content} (confidence: ${m.confidence.toFixed(2)})`).join(`
|
|
691
645
|
`);
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
...state,
|
|
711
|
-
recentMessages: formattedMessages
|
|
712
|
-
},
|
|
713
|
-
template
|
|
714
|
-
});
|
|
715
|
-
}
|
|
716
|
-
const response = await runtime.useModel(ModelType.TEXT_LARGE, {
|
|
717
|
-
prompt,
|
|
718
|
-
maxTokens: config.summaryMaxTokens || 2500
|
|
719
|
-
});
|
|
720
|
-
const summaryResult = parseSummaryXML(response);
|
|
721
|
-
logger2.info(`${existingSummary ? "Updated" : "Generated"} summary: ${summaryResult.summary.substring(0, 100)}...`);
|
|
722
|
-
const newOffset = totalMessageCount;
|
|
723
|
-
const firstMessage = sortedMessages[0];
|
|
724
|
-
const lastMessage = sortedMessages[sortedMessages.length - 1];
|
|
725
|
-
const startTime = existingSummary ? existingSummary.startTime : firstMessage?.createdAt && firstMessage.createdAt > 0 ? new Date(firstMessage.createdAt) : new Date;
|
|
726
|
-
const endTime = lastMessage?.createdAt && lastMessage.createdAt > 0 ? new Date(lastMessage.createdAt) : new Date;
|
|
727
|
-
if (existingSummary) {
|
|
728
|
-
await memoryService.updateSessionSummary(existingSummary.id, roomId, {
|
|
729
|
-
summary: summaryResult.summary,
|
|
730
|
-
messageCount: existingSummary.messageCount + sortedMessages.length,
|
|
731
|
-
lastMessageOffset: newOffset,
|
|
732
|
-
endTime,
|
|
733
|
-
topics: summaryResult.topics,
|
|
734
|
-
metadata: {
|
|
735
|
-
keyPoints: summaryResult.keyPoints
|
|
736
|
-
}
|
|
737
|
-
});
|
|
738
|
-
logger2.info(`Updated summary for room ${roomId}: ${sortedMessages.length} new messages processed (offset: ${lastOffset} → ${newOffset})`);
|
|
739
|
-
} else {
|
|
740
|
-
await memoryService.storeSessionSummary({
|
|
741
|
-
agentId: runtime.agentId,
|
|
742
|
-
roomId,
|
|
743
|
-
entityId: message.entityId !== runtime.agentId ? message.entityId : undefined,
|
|
744
|
-
summary: summaryResult.summary,
|
|
745
|
-
messageCount: sortedMessages.length,
|
|
746
|
-
lastMessageOffset: newOffset,
|
|
747
|
-
startTime,
|
|
748
|
-
endTime,
|
|
749
|
-
topics: summaryResult.topics,
|
|
750
|
-
metadata: {
|
|
751
|
-
keyPoints: summaryResult.keyPoints
|
|
752
|
-
}
|
|
753
|
-
});
|
|
754
|
-
logger2.info(`Created new summary for room ${roomId}: ${sortedMessages.length} messages summarized (offset: 0 → ${newOffset})`);
|
|
755
|
-
}
|
|
756
|
-
} catch (error) {
|
|
757
|
-
logger2.error({ error }, "Error during summarization:");
|
|
758
|
-
}
|
|
759
|
-
},
|
|
760
|
-
examples: []
|
|
761
|
-
};
|
|
646
|
+
sections.push(`**Semantic Knowledge (Facts)**:
|
|
647
|
+
${items}`);
|
|
648
|
+
}
|
|
649
|
+
if (grouped.has("EPISODIC" /* EPISODIC */)) {
|
|
650
|
+
const episodes = grouped.get("EPISODIC" /* EPISODIC */);
|
|
651
|
+
const items = episodes.map((m) => `- ${m.content} (${m.createdAt.toLocaleDateString()})`).join(`
|
|
652
|
+
`);
|
|
653
|
+
sections.push(`**Episodic Memory (Events)**:
|
|
654
|
+
${items}`);
|
|
655
|
+
}
|
|
656
|
+
if (grouped.has("PROCEDURAL" /* PROCEDURAL */)) {
|
|
657
|
+
const skills = grouped.get("PROCEDURAL" /* PROCEDURAL */);
|
|
658
|
+
const items = skills.map((m) => `- ${m.content}`).join(`
|
|
659
|
+
`);
|
|
660
|
+
sections.push(`**Procedural Knowledge (Skills)**:
|
|
661
|
+
${items}`);
|
|
662
|
+
}
|
|
663
|
+
return sections.join(`
|
|
762
664
|
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
logger as logger3,
|
|
766
|
-
ModelType as ModelType2,
|
|
767
|
-
composePromptFromState as composePromptFromState2
|
|
768
|
-
} from "@elizaos/core";
|
|
665
|
+
`);
|
|
666
|
+
}
|
|
769
667
|
|
|
770
|
-
// src/
|
|
771
|
-
|
|
772
|
-
(
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
LongTermMemoryCategory2["GOALS"] = "goals";
|
|
779
|
-
LongTermMemoryCategory2["CONSTRAINTS"] = "constraints";
|
|
780
|
-
LongTermMemoryCategory2["DEFINITIONS"] = "definitions";
|
|
781
|
-
LongTermMemoryCategory2["BEHAVIORAL_PATTERNS"] = "behavioral_patterns";
|
|
782
|
-
})(LongTermMemoryCategory ||= {});
|
|
783
|
-
|
|
784
|
-
// src/evaluators/long-term-extraction.ts
|
|
785
|
-
var extractionTemplate = `# Task: Extract Long-Term Memory
|
|
786
|
-
|
|
787
|
-
You are analyzing a conversation to extract ONLY the most important, persistent facts about the user that should be remembered long-term.
|
|
788
|
-
|
|
789
|
-
# Recent Messages
|
|
790
|
-
{{recentMessages}}
|
|
791
|
-
|
|
792
|
-
# Current Long-Term Memories
|
|
793
|
-
{{existingMemories}}
|
|
794
|
-
|
|
795
|
-
# Memory Categories
|
|
796
|
-
1. **identity**: User's name, role, identity (e.g., "I'm a data scientist")
|
|
797
|
-
2. **expertise**: User's skills, knowledge domains, or unfamiliarity with topics
|
|
798
|
-
3. **projects**: Ongoing projects, past interactions, recurring topics
|
|
799
|
-
4. **preferences**: Communication style, format preferences, verbosity, etc.
|
|
800
|
-
5. **data_sources**: Frequently used files, databases, APIs
|
|
801
|
-
6. **goals**: Broader intentions (e.g., "preparing for interview")
|
|
802
|
-
7. **constraints**: User-defined rules or limitations
|
|
803
|
-
8. **definitions**: Custom terms, acronyms, glossaries
|
|
804
|
-
9. **behavioral_patterns**: How the user tends to interact
|
|
805
|
-
|
|
806
|
-
# STRICT EXTRACTION CRITERIA
|
|
807
|
-
|
|
808
|
-
**DO EXTRACT** - Facts that are:
|
|
809
|
-
- Explicitly stated personal information (name, role, profession)
|
|
810
|
-
- Repeated patterns across multiple conversations (3+ occurrences)
|
|
811
|
-
- Core preferences stated with clear emphasis ("I always prefer...", "I never want...")
|
|
812
|
-
- Important ongoing projects or goals mentioned multiple times
|
|
813
|
-
- Persistent technical constraints or requirements
|
|
814
|
-
- Domain expertise demonstrated consistently
|
|
815
|
-
- Custom terminology the user defines and uses repeatedly
|
|
816
|
-
|
|
817
|
-
**DO NOT EXTRACT** - Facts that are:
|
|
818
|
-
- One-time requests or isolated tasks (e.g., "generate an image or similar")
|
|
819
|
-
- Single interactions without pattern repetition
|
|
820
|
-
- Casual preferences from single occurrences
|
|
821
|
-
- Testing or exploratory questions
|
|
822
|
-
- Temporary context or transient information
|
|
823
|
-
- General courtesy or social patterns (greetings, thank yous)
|
|
824
|
-
- Random or playful requests that don't indicate lasting preference
|
|
825
|
-
- Information that could change or is situational
|
|
826
|
-
|
|
827
|
-
# Quality Standards
|
|
828
|
-
- **Confidence threshold**: Only extract if confidence >= 0.8
|
|
829
|
-
- **Persistence required**: Must see evidence across multiple messages or strong explicit statement
|
|
830
|
-
- **Meaningful value**: Would this fact improve future interactions weeks from now?
|
|
831
|
-
- **Not redundant**: Avoid if existing memories already cover this information
|
|
832
|
-
|
|
833
|
-
# Instructions
|
|
834
|
-
Extract ONLY truly important NEW information that meets the strict criteria above. For each item:
|
|
835
|
-
- Determine which category it belongs to
|
|
836
|
-
- Write a clear, factual statement
|
|
837
|
-
- Assess confidence (0.0 to 1.0) - BE CONSERVATIVE
|
|
838
|
-
- Require strong evidence before extraction
|
|
839
|
-
|
|
840
|
-
**When in doubt, DO NOT extract.** It's better to miss temporary information than to clutter long-term memory.
|
|
841
|
-
|
|
842
|
-
If there are no new long-term facts to extract, respond with <memories></memories>
|
|
843
|
-
|
|
844
|
-
Respond in this XML format:
|
|
845
|
-
<memories>
|
|
846
|
-
<memory>
|
|
847
|
-
<category>identity</category>
|
|
848
|
-
<content>User is a software engineer specializing in backend development</content>
|
|
849
|
-
<confidence>0.95</confidence>
|
|
850
|
-
</memory>
|
|
851
|
-
<memory>
|
|
852
|
-
<category>preferences</category>
|
|
853
|
-
<content>Prefers code examples over lengthy explanations</content>
|
|
854
|
-
<confidence>0.85</confidence>
|
|
855
|
-
</memory>
|
|
856
|
-
</memories>`;
|
|
857
|
-
function parseMemoryExtractionXML(xml) {
|
|
858
|
-
const memoryMatches = xml.matchAll(/<memory>[\s\S]*?<category>(.*?)<\/category>[\s\S]*?<content>(.*?)<\/content>[\s\S]*?<confidence>(.*?)<\/confidence>[\s\S]*?<\/memory>/g);
|
|
859
|
-
const extractions = [];
|
|
860
|
-
for (const match of memoryMatches) {
|
|
861
|
-
const category = match[1].trim();
|
|
862
|
-
const content = match[2].trim();
|
|
863
|
-
const confidence = parseFloat(match[3].trim());
|
|
864
|
-
if (!Object.values(LongTermMemoryCategory).includes(category)) {
|
|
865
|
-
logger3.warn(`Invalid memory category: ${category}`);
|
|
866
|
-
continue;
|
|
867
|
-
}
|
|
868
|
-
if (content && !isNaN(confidence)) {
|
|
869
|
-
extractions.push({ category, content, confidence });
|
|
870
|
-
}
|
|
871
|
-
}
|
|
872
|
-
return extractions;
|
|
668
|
+
// src/utils/token-counter.ts
|
|
669
|
+
function estimateTokenCount(text3) {
|
|
670
|
+
if (!text3 || text3.length === 0) {
|
|
671
|
+
return 0;
|
|
672
|
+
}
|
|
673
|
+
const charCount = text3.length;
|
|
674
|
+
const estimatedTokens = Math.ceil(charCount / 4);
|
|
675
|
+
return estimatedTokens;
|
|
873
676
|
}
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
if (!message.content?.text) {
|
|
886
|
-
logger3.debug("Skipping long-term memory extraction for message without text");
|
|
887
|
-
return false;
|
|
677
|
+
function estimateTokenCountForArray(texts) {
|
|
678
|
+
return texts.reduce((total, text3) => total + estimateTokenCount(text3), 0);
|
|
679
|
+
}
|
|
680
|
+
function trimToTokenBudget(items, budget, getText, includeOverhead = 10) {
|
|
681
|
+
const result = [];
|
|
682
|
+
let currentTokens = 0;
|
|
683
|
+
for (const item of items) {
|
|
684
|
+
const itemText = getText(item);
|
|
685
|
+
const itemTokens = estimateTokenCount(itemText) + includeOverhead;
|
|
686
|
+
if (currentTokens + itemTokens > budget) {
|
|
687
|
+
break;
|
|
888
688
|
}
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
689
|
+
result.push(item);
|
|
690
|
+
currentTokens += itemTokens;
|
|
691
|
+
}
|
|
692
|
+
return result;
|
|
693
|
+
}
|
|
694
|
+
function formatTokenCount(count) {
|
|
695
|
+
if (count < 1000) {
|
|
696
|
+
return `${count} tokens`;
|
|
697
|
+
} else if (count < 1e6) {
|
|
698
|
+
return `${(count / 1000).toFixed(1)}K tokens`;
|
|
699
|
+
} else {
|
|
700
|
+
return `${(count / 1e6).toFixed(1)}M tokens`;
|
|
701
|
+
}
|
|
702
|
+
}
|
|
703
|
+
|
|
704
|
+
// src/services/memory-service.ts
|
|
705
|
+
class MemoryService extends Service {
|
|
706
|
+
static serviceType = "memory";
|
|
707
|
+
bm25Index = null;
|
|
708
|
+
embeddingDimension;
|
|
709
|
+
isInitialized = false;
|
|
710
|
+
longTermMemoryRepo;
|
|
711
|
+
conversationSummaryRepo;
|
|
712
|
+
capabilityDescription = "State-of-the-art cognitive memory system with episodic, semantic, and procedural memory";
|
|
713
|
+
static defaultConfig = {
|
|
714
|
+
consolidationThreshold: 12,
|
|
715
|
+
minConfidence: 0.7,
|
|
716
|
+
enableVectorSearch: true,
|
|
717
|
+
enableBM25: true,
|
|
718
|
+
retrievalLimit: 5,
|
|
719
|
+
tokenBudget: 1000,
|
|
720
|
+
defaultDecayRates: {
|
|
721
|
+
["EPISODIC" /* EPISODIC */]: 0.05,
|
|
722
|
+
["SEMANTIC" /* SEMANTIC */]: 0.01,
|
|
723
|
+
["PROCEDURAL" /* PROCEDURAL */]: 0.02
|
|
724
|
+
},
|
|
725
|
+
enableContradictionDetection: true,
|
|
726
|
+
summarization: {
|
|
727
|
+
enabled: true,
|
|
728
|
+
messagesPerSummary: 7,
|
|
729
|
+
summariesPerLevel: 5,
|
|
730
|
+
maxDepth: 3,
|
|
731
|
+
summaryTokenBudget: 500
|
|
893
732
|
}
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
733
|
+
};
|
|
734
|
+
constructor(runtime) {
|
|
735
|
+
super(runtime);
|
|
736
|
+
this.config = { ...MemoryService.defaultConfig };
|
|
737
|
+
}
|
|
738
|
+
static async start(runtime) {
|
|
739
|
+
const service = new MemoryService(runtime);
|
|
740
|
+
await service.initialize(runtime);
|
|
741
|
+
return service;
|
|
742
|
+
}
|
|
743
|
+
async stop() {
|
|
744
|
+
logger4.info("MemoryService stopped");
|
|
745
|
+
}
|
|
746
|
+
async initialize(runtime) {
|
|
747
|
+
this.runtime = runtime;
|
|
748
|
+
this.loadConfiguration();
|
|
749
|
+
await this.ensureEmbeddingDimension();
|
|
750
|
+
this.longTermMemoryRepo = new LongTermMemoryRepository(runtime, this.embeddingDimension);
|
|
751
|
+
this.conversationSummaryRepo = new ConversationSummaryRepository(runtime, this.embeddingDimension);
|
|
752
|
+
logger4.info({ config: this.config }, "MemoryService initialized");
|
|
753
|
+
if (this.config.enableBM25) {
|
|
754
|
+
await this.rebuildBM25Index();
|
|
898
755
|
}
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
const memoryService = runtime.getService("memory");
|
|
906
|
-
if (!memoryService) {
|
|
907
|
-
logger3.error("MemoryService not found");
|
|
908
|
-
return;
|
|
756
|
+
this.isInitialized = true;
|
|
757
|
+
}
|
|
758
|
+
loadConfiguration() {
|
|
759
|
+
const threshold = this.runtime.getSetting("MEMORY_CONSOLIDATION_THRESHOLD");
|
|
760
|
+
if (threshold) {
|
|
761
|
+
this.config.consolidationThreshold = parseInt(threshold, 10);
|
|
909
762
|
}
|
|
910
|
-
const
|
|
911
|
-
|
|
763
|
+
const minConfidence = this.runtime.getSetting("MEMORY_MIN_CONFIDENCE");
|
|
764
|
+
if (minConfidence) {
|
|
765
|
+
this.config.minConfidence = parseFloat(minConfidence);
|
|
766
|
+
}
|
|
767
|
+
const enableVector = this.runtime.getSetting("MEMORY_ENABLE_VECTOR_SEARCH");
|
|
768
|
+
logger4.debug({
|
|
769
|
+
enableVector,
|
|
770
|
+
type: typeof enableVector,
|
|
771
|
+
defaultValue: this.config.enableVectorSearch
|
|
772
|
+
}, "Loading MEMORY_ENABLE_VECTOR_SEARCH setting");
|
|
773
|
+
if (enableVector !== undefined && enableVector !== null && enableVector !== "") {
|
|
774
|
+
this.config.enableVectorSearch = enableVector === "true" || enableVector === true;
|
|
775
|
+
logger4.info({ enabled: this.config.enableVectorSearch }, "Vector search explicitly configured via environment variable");
|
|
776
|
+
} else {
|
|
777
|
+
logger4.info({ enabled: this.config.enableVectorSearch }, "Vector search using default configuration");
|
|
778
|
+
}
|
|
779
|
+
const enableBM25 = this.runtime.getSetting("MEMORY_ENABLE_BM25");
|
|
780
|
+
logger4.debug({
|
|
781
|
+
enableBM25,
|
|
782
|
+
type: typeof enableBM25,
|
|
783
|
+
defaultValue: this.config.enableBM25
|
|
784
|
+
}, "Loading MEMORY_ENABLE_BM25 setting");
|
|
785
|
+
if (enableBM25 !== undefined && enableBM25 !== null && enableBM25 !== "") {
|
|
786
|
+
this.config.enableBM25 = enableBM25 === "true" || enableBM25 === true;
|
|
787
|
+
logger4.info({ enabled: this.config.enableBM25 }, "BM25 search explicitly configured via environment variable");
|
|
788
|
+
} else {
|
|
789
|
+
logger4.info({ enabled: this.config.enableBM25 }, "BM25 search using default configuration");
|
|
790
|
+
}
|
|
791
|
+
const retrievalLimit = this.runtime.getSetting("MEMORY_RETRIEVAL_LIMIT");
|
|
792
|
+
if (retrievalLimit) {
|
|
793
|
+
this.config.retrievalLimit = parseInt(retrievalLimit, 10);
|
|
794
|
+
}
|
|
795
|
+
const tokenBudget = this.runtime.getSetting("MEMORY_TOKEN_BUDGET");
|
|
796
|
+
if (tokenBudget) {
|
|
797
|
+
this.config.tokenBudget = parseInt(tokenBudget, 10);
|
|
798
|
+
}
|
|
799
|
+
const summaryEnabled = this.runtime.getSetting("MEMORY_SUMMARY_ENABLED");
|
|
800
|
+
logger4.debug({
|
|
801
|
+
summaryEnabled,
|
|
802
|
+
type: typeof summaryEnabled,
|
|
803
|
+
defaultValue: this.config.summarization?.enabled
|
|
804
|
+
}, "Loading MEMORY_SUMMARY_ENABLED setting");
|
|
805
|
+
if (summaryEnabled !== undefined && summaryEnabled !== null && summaryEnabled !== "") {
|
|
806
|
+
this.config.summarization.enabled = summaryEnabled === "true" || summaryEnabled === true;
|
|
807
|
+
logger4.info({ enabled: this.config.summarization.enabled }, "Summarization explicitly configured via environment variable");
|
|
808
|
+
} else {
|
|
809
|
+
logger4.info({ enabled: this.config.summarization.enabled }, "Summarization using default configuration");
|
|
810
|
+
}
|
|
811
|
+
const messagesPerSummary = this.runtime.getSetting("MEMORY_MESSAGES_PER_SUMMARY");
|
|
812
|
+
if (messagesPerSummary) {
|
|
813
|
+
this.config.summarization.messagesPerSummary = parseInt(messagesPerSummary, 10);
|
|
814
|
+
}
|
|
815
|
+
const summariesPerLevel = this.runtime.getSetting("MEMORY_SUMMARIES_PER_LEVEL");
|
|
816
|
+
if (summariesPerLevel) {
|
|
817
|
+
this.config.summarization.summariesPerLevel = parseInt(summariesPerLevel, 10);
|
|
818
|
+
}
|
|
819
|
+
const maxDepth = this.runtime.getSetting("MEMORY_SUMMARY_MAX_DEPTH");
|
|
820
|
+
if (maxDepth) {
|
|
821
|
+
this.config.summarization.maxDepth = parseInt(maxDepth, 10);
|
|
822
|
+
}
|
|
823
|
+
const summaryTokenBudget = this.runtime.getSetting("MEMORY_SUMMARY_TOKEN_BUDGET");
|
|
824
|
+
if (summaryTokenBudget) {
|
|
825
|
+
this.config.summarization.summaryTokenBudget = parseInt(summaryTokenBudget, 10);
|
|
826
|
+
}
|
|
827
|
+
}
|
|
828
|
+
async ensureEmbeddingDimension() {
|
|
912
829
|
try {
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
830
|
+
const embeddingModel = this.runtime.getModel(ModelType2.TEXT_EMBEDDING);
|
|
831
|
+
if (!embeddingModel) {
|
|
832
|
+
logger4.warn("No TEXT_EMBEDDING model registered. Embeddings will not be generated.");
|
|
833
|
+
return;
|
|
834
|
+
}
|
|
835
|
+
const testEmbedding = await this.runtime.useModel(ModelType2.TEXT_EMBEDDING, {
|
|
836
|
+
text: "test"
|
|
919
837
|
});
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
838
|
+
if (!testEmbedding || !Array.isArray(testEmbedding)) {
|
|
839
|
+
throw new Error("Invalid embedding received from model");
|
|
840
|
+
}
|
|
841
|
+
const dimension = testEmbedding.length;
|
|
842
|
+
const dimensionColumn = MEMORY_DIMENSION_MAP[dimension];
|
|
843
|
+
if (!dimensionColumn) {
|
|
844
|
+
throw new Error(`Unsupported embedding dimension: ${dimension}. Supported dimensions: ${Object.keys(MEMORY_DIMENSION_MAP).join(", ")}`);
|
|
845
|
+
}
|
|
846
|
+
this.embeddingDimension = dimensionColumn;
|
|
847
|
+
logger4.info(`Memory embedding dimension set to ${dimension} (${dimensionColumn})`);
|
|
848
|
+
} catch (error) {
|
|
849
|
+
logger4.error("Failed to ensure embedding dimension:", JSON.stringify(error));
|
|
850
|
+
throw error;
|
|
851
|
+
}
|
|
852
|
+
}
|
|
853
|
+
getConfig() {
|
|
854
|
+
return { ...this.config };
|
|
855
|
+
}
|
|
856
|
+
updateConfig(updates) {
|
|
857
|
+
this.config = { ...this.config, ...updates };
|
|
858
|
+
if (updates.enableBM25 !== undefined) {
|
|
859
|
+
if (updates.enableBM25 && !this.bm25Index) {
|
|
860
|
+
this.rebuildBM25Index();
|
|
861
|
+
} else if (!updates.enableBM25) {
|
|
862
|
+
this.bm25Index = null;
|
|
863
|
+
}
|
|
864
|
+
}
|
|
865
|
+
}
|
|
866
|
+
async storeLongTermMemory(memory) {
|
|
867
|
+
let embedding;
|
|
868
|
+
if (this.embeddingDimension) {
|
|
869
|
+
try {
|
|
870
|
+
const rawEmbedding = await generateEmbedding(this.runtime, memory.embeddingContext);
|
|
871
|
+
embedding = cleanEmbedding(rawEmbedding);
|
|
872
|
+
} catch (error) {
|
|
873
|
+
logger4.warn("Failed to generate embedding for long-term memory:", JSON.stringify(error));
|
|
874
|
+
}
|
|
875
|
+
}
|
|
876
|
+
const stored = await this.longTermMemoryRepo.insert(memory, embedding);
|
|
877
|
+
if (this.config.enableBM25 && this.bm25Index) {
|
|
878
|
+
await this.rebuildBM25Index();
|
|
879
|
+
}
|
|
880
|
+
return stored;
|
|
881
|
+
}
|
|
882
|
+
async getLongTermMemory(id) {
|
|
883
|
+
return this.longTermMemoryRepo.findById(id);
|
|
884
|
+
}
|
|
885
|
+
async updateLongTermMemory(id, updates) {
|
|
886
|
+
let newEmbedding;
|
|
887
|
+
if (updates.embeddingContext !== undefined && this.embeddingDimension) {
|
|
888
|
+
try {
|
|
889
|
+
const rawEmbedding = await generateEmbedding(this.runtime, updates.embeddingContext);
|
|
890
|
+
newEmbedding = cleanEmbedding(rawEmbedding);
|
|
891
|
+
} catch (error) {
|
|
892
|
+
logger4.warn("Failed to regenerate embedding:", JSON.stringify(error));
|
|
893
|
+
}
|
|
894
|
+
}
|
|
895
|
+
await this.longTermMemoryRepo.update(id, updates, newEmbedding);
|
|
896
|
+
}
|
|
897
|
+
async deleteLongTermMemory(id) {
|
|
898
|
+
await this.longTermMemoryRepo.delete(id);
|
|
899
|
+
}
|
|
900
|
+
async getLongTermMemories(entityId, type, limit = 20, includeInactive = false) {
|
|
901
|
+
return this.longTermMemoryRepo.findByEntity(entityId, type, limit, includeInactive);
|
|
902
|
+
}
|
|
903
|
+
async handleContradiction(entityId, newMemory) {
|
|
904
|
+
const searchResults = await this.searchLongTermMemories({
|
|
905
|
+
entityId,
|
|
906
|
+
query: newMemory.content,
|
|
907
|
+
type: newMemory.type,
|
|
908
|
+
limit: 5,
|
|
909
|
+
includeInactive: false
|
|
910
|
+
});
|
|
911
|
+
if (searchResults.length === 0) {
|
|
912
|
+
await this.storeLongTermMemory(newMemory);
|
|
913
|
+
return;
|
|
914
|
+
}
|
|
915
|
+
const contradictingMemory = await this.detectContradiction(newMemory, searchResults);
|
|
916
|
+
if (contradictingMemory) {
|
|
917
|
+
logger4.info({
|
|
918
|
+
oldMemoryId: contradictingMemory.id,
|
|
919
|
+
newContent: newMemory.content
|
|
920
|
+
}, "Contradiction detected, superseding old memory");
|
|
921
|
+
await this.updateLongTermMemory(contradictingMemory.id, {
|
|
922
|
+
isActive: false
|
|
936
923
|
});
|
|
937
|
-
const
|
|
938
|
-
|
|
924
|
+
const stored = await this.storeLongTermMemory({
|
|
925
|
+
...newMemory,
|
|
926
|
+
supersedesId: contradictingMemory.id
|
|
939
927
|
});
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
928
|
+
logger4.info({ newMemoryId: stored.id }, "Stored superseding memory");
|
|
929
|
+
} else {
|
|
930
|
+
await this.storeLongTermMemory(newMemory);
|
|
931
|
+
}
|
|
932
|
+
}
|
|
933
|
+
async detectContradiction(newMemory, existingMemories) {
|
|
934
|
+
const prompt = `# TASK: Contradiction Detection
|
|
935
|
+
|
|
936
|
+
You are analyzing whether a new long-term memory contradicts any existing long-term memories.
|
|
937
|
+
|
|
938
|
+
## New Long-Term Memory:
|
|
939
|
+
"${newMemory.content}"
|
|
940
|
+
|
|
941
|
+
## Existing Long-Term Memories:
|
|
942
|
+
${existingMemories.map((m, idx) => `${idx + 1}. "${m.content}" (confidence: ${m.confidence}, created: ${m.createdAt.toISOString()})`).join(`
|
|
943
|
+
`)}
|
|
944
|
+
|
|
945
|
+
## Instructions:
|
|
946
|
+
Determine if the new long-term memory directly contradicts any of the existing long-term memories. A contradiction means the statements cannot both be true.
|
|
947
|
+
|
|
948
|
+
Examples of contradictions:
|
|
949
|
+
- "User likes blue" vs "User hates blue"
|
|
950
|
+
- "User lives in Paris" vs "User lives in London"
|
|
951
|
+
|
|
952
|
+
Examples of non-contradictions (these are compatible):
|
|
953
|
+
- "User likes blue" vs "User likes blue only for clothes" (nuance, not contradiction)
|
|
954
|
+
- "User was in Paris" vs "User moved to London" (state change over time)
|
|
955
|
+
|
|
956
|
+
## Output Format:
|
|
957
|
+
Return an XML response:
|
|
958
|
+
|
|
959
|
+
<response>
|
|
960
|
+
<hasContradiction>true or false</hasContradiction>
|
|
961
|
+
<contradictingMemoryIndex>number or null</contradictingMemoryIndex>
|
|
962
|
+
<reasoning>Explanation</reasoning>
|
|
963
|
+
</response>
|
|
964
|
+
|
|
965
|
+
If no contradiction is found, set hasContradiction to false and contradictingMemoryIndex to null.`;
|
|
966
|
+
try {
|
|
967
|
+
const response = await this.runtime.useModel(ModelType2.TEXT_LARGE, {
|
|
968
|
+
prompt,
|
|
969
|
+
temperature: 0.2
|
|
970
|
+
});
|
|
971
|
+
const responseText = typeof response === "string" ? response : JSON.stringify(response);
|
|
972
|
+
const result = parseKeyValueXml(responseText);
|
|
973
|
+
if (!result) {
|
|
974
|
+
logger4.warn("Failed to parse contradiction detection XML response");
|
|
975
|
+
return null;
|
|
976
|
+
}
|
|
977
|
+
const hasContradiction = parseBooleanFromText(typeof result.hasContradiction === "boolean" ? result.hasContradiction ? "true" : "false" : result.hasContradiction);
|
|
978
|
+
let contradictingMemoryIndex = null;
|
|
979
|
+
if (result.contradictingMemoryIndex !== null && result.contradictingMemoryIndex !== "null") {
|
|
980
|
+
const parsed = parseInt(result.contradictingMemoryIndex, 10);
|
|
981
|
+
if (!isNaN(parsed)) {
|
|
982
|
+
contradictingMemoryIndex = parsed;
|
|
959
983
|
}
|
|
960
984
|
}
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
985
|
+
if (hasContradiction && contradictingMemoryIndex !== null) {
|
|
986
|
+
const contradictingMemory = existingMemories[contradictingMemoryIndex];
|
|
987
|
+
logger4.info({ reasoning: result.reasoning }, "Contradiction detected");
|
|
988
|
+
return contradictingMemory;
|
|
989
|
+
}
|
|
990
|
+
return null;
|
|
964
991
|
} catch (error) {
|
|
965
|
-
|
|
992
|
+
logger4.error({ error }, "Failed to detect contradiction");
|
|
993
|
+
return null;
|
|
966
994
|
}
|
|
967
|
-
}
|
|
968
|
-
|
|
969
|
-
|
|
995
|
+
}
|
|
996
|
+
async searchLongTermMemories(params) {
|
|
997
|
+
const limit = params.limit || this.config.retrievalLimit;
|
|
998
|
+
const tokenBudget = params.tokenBudget || this.config.tokenBudget;
|
|
999
|
+
const minConfidence = params.minConfidence !== undefined ? params.minConfidence : this.config.minConfidence;
|
|
1000
|
+
const similarityThreshold = params.similarityThreshold !== undefined ? params.similarityThreshold : 0.3;
|
|
1001
|
+
logger4.debug({
|
|
1002
|
+
limit,
|
|
1003
|
+
tokenBudget,
|
|
1004
|
+
minConfidence,
|
|
1005
|
+
similarityThreshold,
|
|
1006
|
+
vectorSearchEnabled: this.config.enableVectorSearch,
|
|
1007
|
+
bm25Enabled: this.config.enableBM25
|
|
1008
|
+
}, "Searching long-term memories");
|
|
1009
|
+
let vectorResults = [];
|
|
1010
|
+
if (this.config.enableVectorSearch) {
|
|
1011
|
+
logger4.debug("Vector search enabled, searching...");
|
|
1012
|
+
vectorResults = await this.vectorSearch(params, similarityThreshold);
|
|
1013
|
+
}
|
|
1014
|
+
logger4.debug({ vectorResults: vectorResults.length }, "Vector search results");
|
|
1015
|
+
let bm25Results = [];
|
|
1016
|
+
if (this.config.enableBM25) {
|
|
1017
|
+
bm25Results = await this.bm25Search(params);
|
|
1018
|
+
}
|
|
1019
|
+
const mergedResults = mergeSearchResults(vectorResults, bm25Results);
|
|
1020
|
+
const confidenceFiltered = mergedResults.filter((m) => m.confidence >= minConfidence);
|
|
1021
|
+
const decayedResults = applyDecayScoring(confidenceFiltered);
|
|
1022
|
+
const sorted = decayedResults.sort((a, b) => b.finalScore - a.finalScore);
|
|
1023
|
+
const topResults = sorted.slice(0, limit);
|
|
1024
|
+
const budgetedResults = trimToTokenBudget(topResults, tokenBudget, (memory) => memory.content, 15);
|
|
1025
|
+
logger4.debug({
|
|
1026
|
+
totalResults: sorted.length,
|
|
1027
|
+
afterCountLimit: topResults.length,
|
|
1028
|
+
afterTokenBudget: budgetedResults.length,
|
|
1029
|
+
tokenBudget,
|
|
1030
|
+
estimatedTokens: budgetedResults.reduce((sum, m) => sum + estimateTokenCount(m.content) + 15, 0)
|
|
1031
|
+
}, "Applied token budget to memory retrieval");
|
|
1032
|
+
await this.longTermMemoryRepo.updateAccessMetadata(budgetedResults.map((r) => r.id));
|
|
1033
|
+
return budgetedResults;
|
|
1034
|
+
}
|
|
1035
|
+
async vectorSearch(params, similarityThreshold = 0.3) {
|
|
1036
|
+
if (!this.embeddingDimension) {
|
|
1037
|
+
logger4.warn("Embedding dimension not set, skipping vector search");
|
|
1038
|
+
return [];
|
|
1039
|
+
}
|
|
1040
|
+
try {
|
|
1041
|
+
const rawEmbedding = await generateEmbedding(this.runtime, params.query);
|
|
1042
|
+
const queryEmbedding = cleanEmbedding(rawEmbedding);
|
|
1043
|
+
if (!queryEmbedding || !Array.isArray(queryEmbedding)) {
|
|
1044
|
+
logger4.warn("Failed to generate query embedding");
|
|
1045
|
+
return [];
|
|
1046
|
+
}
|
|
1047
|
+
return await this.longTermMemoryRepo.vectorSearch(params, queryEmbedding, similarityThreshold);
|
|
1048
|
+
} catch (error) {
|
|
1049
|
+
logger4.error("Failed to execute vector search:", JSON.stringify(error));
|
|
1050
|
+
return [];
|
|
1051
|
+
}
|
|
1052
|
+
}
|
|
1053
|
+
async bm25Search(params) {
|
|
1054
|
+
if (!this.bm25Index) {
|
|
1055
|
+
return [];
|
|
1056
|
+
}
|
|
1057
|
+
try {
|
|
1058
|
+
const bm25Results = this.bm25Index.search(params.query, params.limit || 20);
|
|
1059
|
+
const memoryPromises = bm25Results.map(async (result) => {
|
|
1060
|
+
const doc = this.bm25Index.documents[result.index];
|
|
1061
|
+
if (!doc || !doc.id) {
|
|
1062
|
+
logger4.warn({ resultIndex: result.index }, "BM25 result has no document ID");
|
|
1063
|
+
return null;
|
|
1064
|
+
}
|
|
1065
|
+
const memory = await this.getLongTermMemory(doc.id);
|
|
1066
|
+
if (!memory)
|
|
1067
|
+
return null;
|
|
1068
|
+
if (memory.entityId !== params.entityId)
|
|
1069
|
+
return null;
|
|
1070
|
+
if (!params.includeInactive && !memory.isActive)
|
|
1071
|
+
return null;
|
|
1072
|
+
if (params.type && memory.type !== params.type)
|
|
1073
|
+
return null;
|
|
1074
|
+
if (params.minConfidence && memory.confidence < params.minConfidence)
|
|
1075
|
+
return null;
|
|
1076
|
+
return {
|
|
1077
|
+
...memory,
|
|
1078
|
+
relevanceScore: result.score,
|
|
1079
|
+
activationScore: 0,
|
|
1080
|
+
finalScore: 0
|
|
1081
|
+
};
|
|
1082
|
+
});
|
|
1083
|
+
const results = await Promise.all(memoryPromises);
|
|
1084
|
+
return results.filter((r) => r !== null);
|
|
1085
|
+
} catch (error) {
|
|
1086
|
+
logger4.error({ error }, "BM25 search failed");
|
|
1087
|
+
return [];
|
|
1088
|
+
}
|
|
1089
|
+
}
|
|
1090
|
+
async rebuildBM25Index() {
|
|
1091
|
+
try {
|
|
1092
|
+
const memories = await this.longTermMemoryRepo.fetchAllActive();
|
|
1093
|
+
const documents = memories.map((row) => ({
|
|
1094
|
+
id: row.id,
|
|
1095
|
+
content: row.content,
|
|
1096
|
+
embeddingContext: row.embeddingContext
|
|
1097
|
+
}));
|
|
1098
|
+
this.bm25Index = new BM25(documents, {
|
|
1099
|
+
k1: 1.2,
|
|
1100
|
+
b: 0.75,
|
|
1101
|
+
stemming: true,
|
|
1102
|
+
minLength: 2
|
|
1103
|
+
});
|
|
1104
|
+
logger4.info({ documentCount: documents.length }, "Rebuilt BM25 index");
|
|
1105
|
+
} catch (error) {
|
|
1106
|
+
logger4.error({ error }, "Failed to rebuild BM25 index");
|
|
1107
|
+
this.bm25Index = null;
|
|
1108
|
+
}
|
|
1109
|
+
}
|
|
1110
|
+
async getFormattedLongTermMemoriesForContext(entityId, query, roomId) {
|
|
1111
|
+
const memories = await this.searchLongTermMemories({
|
|
1112
|
+
entityId,
|
|
1113
|
+
query,
|
|
1114
|
+
roomId,
|
|
1115
|
+
limit: this.config.retrievalLimit
|
|
1116
|
+
});
|
|
1117
|
+
return formatMemoriesForContext(memories);
|
|
1118
|
+
}
|
|
1119
|
+
async storeSummary(summary) {
|
|
1120
|
+
let embedding;
|
|
1121
|
+
if (this.embeddingDimension) {
|
|
1122
|
+
try {
|
|
1123
|
+
const rawEmbedding = await generateEmbedding(this.runtime, summary.content);
|
|
1124
|
+
embedding = cleanEmbedding(rawEmbedding);
|
|
1125
|
+
} catch (error) {
|
|
1126
|
+
logger4.warn("Failed to generate embedding for summary:", JSON.stringify(error));
|
|
1127
|
+
}
|
|
1128
|
+
}
|
|
1129
|
+
return this.conversationSummaryRepo.insert(summary, embedding);
|
|
1130
|
+
}
|
|
1131
|
+
async getSummariesByLevel(roomId, level) {
|
|
1132
|
+
return this.conversationSummaryRepo.findByLevel(roomId, level);
|
|
1133
|
+
}
|
|
1134
|
+
async searchSummaries(params) {
|
|
1135
|
+
if (!this.embeddingDimension) {
|
|
1136
|
+
logger4.warn("Embedding dimension not set, skipping summary search");
|
|
1137
|
+
return [];
|
|
1138
|
+
}
|
|
1139
|
+
const limit = params.limit || 5;
|
|
1140
|
+
const tokenBudget = params.tokenBudget || this.config.summarization?.summaryTokenBudget || 500;
|
|
1141
|
+
try {
|
|
1142
|
+
const rawEmbedding = await generateEmbedding(this.runtime, params.query);
|
|
1143
|
+
const queryEmbedding = cleanEmbedding(rawEmbedding);
|
|
1144
|
+
if (!queryEmbedding || !Array.isArray(queryEmbedding)) {
|
|
1145
|
+
logger4.warn("Failed to generate query embedding");
|
|
1146
|
+
return [];
|
|
1147
|
+
}
|
|
1148
|
+
const summaries = await this.conversationSummaryRepo.vectorSearch(params.entityId, params.roomId, queryEmbedding, limit);
|
|
1149
|
+
const budgetedSummaries = trimToTokenBudget(summaries, tokenBudget, (s) => s.content, 10);
|
|
1150
|
+
await this.conversationSummaryRepo.updateAccessMetadata(budgetedSummaries.map((s) => s.id));
|
|
1151
|
+
return budgetedSummaries;
|
|
1152
|
+
} catch (error) {
|
|
1153
|
+
logger4.error("Failed to search summaries:", JSON.stringify(error));
|
|
1154
|
+
return [];
|
|
1155
|
+
}
|
|
1156
|
+
}
|
|
1157
|
+
async getMostRecentLevel1Summary(roomId, entityId) {
|
|
1158
|
+
try {
|
|
1159
|
+
const summaries = await this.conversationSummaryRepo.findByLevel(roomId, 1);
|
|
1160
|
+
if (summaries.length === 0) {
|
|
1161
|
+
return null;
|
|
1162
|
+
}
|
|
1163
|
+
const entitySummaries = summaries.filter((s) => s.entityId === entityId).sort((a, b) => b.endTime.getTime() - a.endTime.getTime());
|
|
1164
|
+
return entitySummaries.length > 0 ? entitySummaries[0] : null;
|
|
1165
|
+
} catch (error) {
|
|
1166
|
+
logger4.error("Failed to get most recent Level 1 summary:", JSON.stringify(error));
|
|
1167
|
+
return null;
|
|
1168
|
+
}
|
|
1169
|
+
}
|
|
1170
|
+
}
|
|
970
1171
|
|
|
971
|
-
// src/
|
|
1172
|
+
// src/evaluators/consolidation.ts
|
|
972
1173
|
import {
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
formatMessages,
|
|
976
|
-
formatPosts,
|
|
977
|
-
getEntityDetails,
|
|
978
|
-
logger as logger4
|
|
1174
|
+
logger as logger5,
|
|
1175
|
+
ModelType as ModelType3
|
|
979
1176
|
} from "@elizaos/core";
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
1177
|
+
|
|
1178
|
+
// src/prompts/consolidation.ts
|
|
1179
|
+
var CONSOLIDATION_SYSTEM_PROMPT = `You are the "Cortex" — an advanced Memory Extraction Engine.
|
|
1180
|
+
Your function is to parse conversation logs and extract persistent facts into a structured database format.
|
|
1181
|
+
|
|
1182
|
+
# CORE DIRECTIVE: "Subject-First" Extraction
|
|
1183
|
+
You must rephrase memories to focus on the *topic*, not the user. This optimizes vector retrieval.
|
|
1184
|
+
- BAD: "User likes to trade Bitcoin." (Too generic)
|
|
1185
|
+
- GOOD: "Bitcoin (BTC) is a preferred trading asset." (Topic-focused)
|
|
1186
|
+
|
|
1187
|
+
# COMPRESSION RULES (CRITICAL)
|
|
1188
|
+
1. **Aggressive Filtering**: Most user chatter is noise. If it won't be relevant in 30 days, DO NOT extract it.
|
|
1189
|
+
2. **Merge & Dedupe**: Do not create three separate memories for one topic. Combine them.
|
|
1190
|
+
- *Input:* "I like Red. I also like Blue. And Green."
|
|
1191
|
+
- *Output:* "Red, Blue, and Green are the preferred colors."
|
|
1192
|
+
3. **Conflict Resolution**: If a new fact contradicts an old one, mark 'isContradiction' as true.
|
|
1193
|
+
|
|
1194
|
+
# OUTPUT FORMAT
|
|
1195
|
+
Phase 1: [ANALYSIS]
|
|
1196
|
+
- List extracted points.
|
|
1197
|
+
- MARK items as [TRANSIENT] (Ignore) or [MERGE] (Combine).
|
|
1198
|
+
- Refine the final wording.
|
|
1199
|
+
|
|
1200
|
+
Phase 2: [MEMORIES]
|
|
1201
|
+
Format: \`MEM|TYPE|CATEGORY|CONFIDENCE|IS_CONTRADICTION|CONTENT\`
|
|
1202
|
+
|
|
1203
|
+
Types: EPISODIC, SEMANTIC, PROCEDURAL
|
|
1204
|
+
Categories: bio, health, finance, preferences, relationships, skills, work
|
|
1205
|
+
`;
|
|
1206
|
+
function buildExtractionPrompt(conversationLog) {
|
|
1207
|
+
const refDate = new Date().toISOString();
|
|
1208
|
+
return `# INPUT DATA
|
|
1209
|
+
**Reference Date:** ${refDate} (Use this to resolve relative dates like "yesterday" or "next Friday")
|
|
1210
|
+
|
|
1211
|
+
<conversation_log>
|
|
1212
|
+
${conversationLog}
|
|
1213
|
+
</conversation_log>
|
|
1214
|
+
|
|
1215
|
+
# FEW-SHOT EXAMPLES (DENSE INPUTS)
|
|
1216
|
+
|
|
1217
|
+
<example_1_finance_consolidation>
|
|
1218
|
+
Input: "Okay, market is looking bad. I'm closing my ETH long. Too risky. Also, can you check the weather in Tokyo? I might fly there. Actually, cancel all my limit orders on Solana too, I want to go all cash for the weekend."
|
|
1219
|
+
Output:
|
|
1220
|
+
[ANALYSIS]
|
|
1221
|
+
- "Market looking bad" -> Context, not memory.
|
|
1222
|
+
- "Closing ETH long" -> Actionable preference change.
|
|
1223
|
+
- "Check weather" -> [TRANSIENT] Ignore.
|
|
1224
|
+
- "Fly to Tokyo" -> [TRANSIENT] "Might" implies uncertainty. Ignore until confirmed.
|
|
1225
|
+
- "Cancel SOL orders" -> Actionable strategy.
|
|
1226
|
+
- "Go all cash" -> High-level strategy.
|
|
1227
|
+
- MERGE: Combine ETH close, SOL cancel, and Cash strategy into one record.
|
|
1228
|
+
[MEMORIES]
|
|
1229
|
+
MEM|PROCEDURAL|finance|0.95|true|Portfolio Strategy: All positions (ETH, SOL) liquidated; Cash-only stance adopted for weekend.
|
|
1230
|
+
</example_1_finance_consolidation>
|
|
1231
|
+
|
|
1232
|
+
<example_2_companion_emotional>
|
|
1233
|
+
Input: "I had a huge fight with my sister, Jenny. She's so controlling. I don't want to talk about her anymore. Let's play a game. Maybe chess? Actually no, I hate chess, it's boring. Let's do a quiz."
|
|
1234
|
+
Output:
|
|
1235
|
+
[ANALYSIS]
|
|
1236
|
+
- "Fight with sister Jenny" -> Relationship dynamic.
|
|
1237
|
+
- "She's controlling" -> Character attribute.
|
|
1238
|
+
- "Don't want to talk about her" -> Boundary/Preference.
|
|
1239
|
+
- "Play a game" -> [TRANSIENT] Immediate desire.
|
|
1240
|
+
- "Hate chess" -> Negative Preference.
|
|
1241
|
+
- "Do a quiz" -> [TRANSIENT] Immediate desire.
|
|
1242
|
+
- MERGE: Combine Jenny details. Separate Chess preference.
|
|
1243
|
+
[MEMORIES]
|
|
1244
|
+
MEM|EPISODIC|relationships|0.9|false|Jenny (sister) is characterized as controlling; currently a sensitive topic to be avoided.
|
|
1245
|
+
MEM|SEMANTIC|preferences|0.95|false|Chess is a disliked activity (described as boring).
|
|
1246
|
+
</example_2_companion_emotional>
|
|
1247
|
+
|
|
1248
|
+
<example_3_coding_stack>
|
|
1249
|
+
Input: "This node_modules folder is huge. I'm done with NPM. From now on we only use Bun for all projects. It's faster. Also, help me debug this loop. It's printing 'undefined'."
|
|
1250
|
+
Output:
|
|
1251
|
+
[ANALYSIS]
|
|
1252
|
+
- "node_modules huge" -> Rationale.
|
|
1253
|
+
- "Done with NPM" -> Deprecation.
|
|
1254
|
+
- "Use Bun" -> New Standard.
|
|
1255
|
+
- "Debug this loop" -> [TRANSIENT] Immediate task.
|
|
1256
|
+
- MERGE: Bun adoption and NPM rejection.
|
|
1257
|
+
[MEMORIES]
|
|
1258
|
+
MEM|PROCEDURAL|skills|0.95|true|Bun is the mandated package manager; NPM usage is deprecated/forbidden.
|
|
1259
|
+
</example_3_coding_stack>
|
|
1260
|
+
|
|
1261
|
+
<example_4_health_routine>
|
|
1262
|
+
Input: "I ate a burger today, felt kinda heavy. I think I'm going to start intermittent fasting. 16/8 window. Start eating at 12pm, stop at 8pm. Remind me to drink water."
|
|
1263
|
+
Output:
|
|
1264
|
+
[ANALYSIS]
|
|
1265
|
+
- "Ate a burger" -> [TRANSIENT] One-off meal.
|
|
1266
|
+
- "Felt heavy" -> [TRANSIENT] Temporary sensation.
|
|
1267
|
+
- "Start intermittent fasting" -> New Health Protocol.
|
|
1268
|
+
- "16/8 window, 12-8" -> Specific details of protocol.
|
|
1269
|
+
- "Remind me to drink water" -> [TRANSIENT] Command.
|
|
1270
|
+
- MERGE: All fasting details into one concise protocol.
|
|
1271
|
+
[MEMORIES]
|
|
1272
|
+
MEM|PROCEDURAL|health|0.9|false|Intermittent Fasting (16/8 protocol) adopted: Eating window restricted to 12pm-8pm.
|
|
1273
|
+
</example_4_health_routine>
|
|
1274
|
+
|
|
1275
|
+
<example_5_work_milestones>
|
|
1276
|
+
Input: "Meeting went well. The client, Apex Corp, agreed to the $50k budget. But they want the deadline moved to March 1st. Can you write a thank you note? Oh, and I need to update my resume."
|
|
1277
|
+
Output:
|
|
1278
|
+
[ANALYSIS]
|
|
1279
|
+
- "Meeting went well" -> [TRANSIENT] Sentiment.
|
|
1280
|
+
- "Apex Corp" -> Client Entity.
|
|
1281
|
+
- "$50k budget" -> Financial Fact.
|
|
1282
|
+
- "Deadline March 1st" -> Project Constraint.
|
|
1283
|
+
- "Write note" -> [TRANSIENT] Task.
|
|
1284
|
+
- "Update resume" -> [TRANSIENT] Generic task unless specific details given.
|
|
1285
|
+
- MERGE: Client details, budget, and deadline.
|
|
1286
|
+
[MEMORIES]
|
|
1287
|
+
MEM|SEMANTIC|work|1.0|false|Apex Corp project secured: $50k budget with March 1st deadline.
|
|
1288
|
+
</example_5_work_milestones>
|
|
1289
|
+
|
|
1290
|
+
# EXTRACTION CHECKLIST
|
|
1291
|
+
1. **Search for STATE CHANGES**: Did the user move, change jobs, break up, or alter a portfolio? These are high-value.
|
|
1292
|
+
2. **Search for HARD CONSTRAINTS**: Look for phrases like "Never do X", "Always use Y", "I hate Z".
|
|
1293
|
+
3. **COMPRESSION**:
|
|
1294
|
+
- You have received a long conversation.
|
|
1295
|
+
- **MERGE** related details into single, dense records.
|
|
1296
|
+
- **IGNORE** all small talk, greetings, and transient requests.
|
|
1297
|
+
4. **QUALITY CONTROL**: If a fact feels temporary or weak, **DO NOT** extract it. Silence is better than noise.
|
|
1298
|
+
|
|
1299
|
+
Begin the [ANALYSIS] phase now.`;
|
|
1300
|
+
}
|
|
1301
|
+
function buildContradictionPrompt(newMemoryContent, existingMemories) {
|
|
1302
|
+
return `Does this new memory contradict any existing memories?
|
|
1303
|
+
|
|
1304
|
+
New: "${newMemoryContent}"
|
|
1305
|
+
|
|
1306
|
+
Existing:
|
|
1307
|
+
${existingMemories.map((m, idx) => `${idx + 1}. "${m.content}" (confidence: ${m.confidence})`).join(`
|
|
1308
|
+
`)}
|
|
1309
|
+
|
|
1310
|
+
A contradiction means both statements cannot be true simultaneously.
|
|
1311
|
+
|
|
1312
|
+
TRUE contradictions:
|
|
1313
|
+
- "User likes blue" vs "User hates blue"
|
|
1314
|
+
- "User lives in Paris" vs "User lives in London"
|
|
1315
|
+
- "User is vegetarian" vs "User eats meat"
|
|
1316
|
+
|
|
1317
|
+
NOT contradictions:
|
|
1318
|
+
- "User likes blue" vs "User likes blue for clothes" (nuance)
|
|
1319
|
+
- "User was in Paris" vs "User moved to London" (time change)
|
|
1320
|
+
- "User likes Python" vs "User likes JavaScript" (not exclusive)
|
|
1321
|
+
|
|
1322
|
+
<response>
|
|
1323
|
+
<hasContradiction>true or false</hasContradiction>
|
|
1324
|
+
<contradictingIndex>number or null</contradictingIndex>
|
|
1325
|
+
<reasoning>Brief explanation</reasoning>
|
|
1326
|
+
</response>`;
|
|
1327
|
+
}
|
|
1328
|
+
|
|
1329
|
+
// src/evaluators/consolidation.ts
|
|
1330
|
+
var consolidationBuffers = new Map;
|
|
1331
|
+
var consolidationEvaluator = {
|
|
1332
|
+
name: "CONSOLIDATION",
|
|
1333
|
+
similes: ["MEMORY_CONSOLIDATION", "EXTRACT_FACTS", "MEMORY_BUFFER"],
|
|
1334
|
+
description: "Buffers conversation messages and performs periodic consolidation to extract persistent facts " + "using LLM analysis. Distinguishes transient intents from long-term knowledge.",
|
|
1335
|
+
validate: async (runtime, message) => {
|
|
1336
|
+
if (!consolidationBuffers.has(message.roomId)) {
|
|
1337
|
+
consolidationBuffers.set(message.roomId, []);
|
|
1338
|
+
}
|
|
1339
|
+
consolidationBuffers.get(message.roomId).push(message);
|
|
1340
|
+
const memoryService = runtime.getService("memory");
|
|
1341
|
+
if (!memoryService) {
|
|
1342
|
+
return false;
|
|
1343
|
+
}
|
|
1344
|
+
const config = memoryService.getConfig();
|
|
1345
|
+
const bufferSize = consolidationBuffers.get(message.roomId).length;
|
|
1346
|
+
const shouldConsolidate = bufferSize >= config.consolidationThreshold;
|
|
1347
|
+
if (shouldConsolidate) {
|
|
1348
|
+
logger5.info({
|
|
1349
|
+
roomId: message.roomId,
|
|
1350
|
+
bufferSize,
|
|
1351
|
+
threshold: config.consolidationThreshold
|
|
1352
|
+
}, "Consolidation threshold reached");
|
|
1353
|
+
}
|
|
1354
|
+
return shouldConsolidate;
|
|
1355
|
+
},
|
|
1356
|
+
handler: async (runtime, message) => {
|
|
1357
|
+
const roomId = message.roomId;
|
|
985
1358
|
try {
|
|
986
1359
|
const memoryService = runtime.getService("memory");
|
|
987
1360
|
if (!memoryService) {
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
values: {},
|
|
991
|
-
text: ""
|
|
992
|
-
};
|
|
1361
|
+
logger5.warn("Memory service not available for consolidation");
|
|
1362
|
+
return;
|
|
993
1363
|
}
|
|
994
|
-
const
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
tableName: "messages",
|
|
1004
|
-
roomId,
|
|
1005
|
-
count: conversationLength,
|
|
1006
|
-
unique: false
|
|
1007
|
-
})
|
|
1008
|
-
]);
|
|
1009
|
-
const actionResultMessages = recentMessagesData.filter((msg) => msg.content?.type === "action_result" && msg.metadata?.type === "action_result");
|
|
1010
|
-
const dialogueMessages = recentMessagesData.filter((msg) => !(msg.content?.type === "action_result" && msg.metadata?.type === "action_result"));
|
|
1011
|
-
const isPostFormat = room?.type ? room.type === ChannelType.FEED || room.type === ChannelType.THREAD : false;
|
|
1012
|
-
const [formattedRecentMessages, formattedRecentPosts] = await Promise.all([
|
|
1013
|
-
formatMessages({
|
|
1014
|
-
messages: dialogueMessages,
|
|
1015
|
-
entities: entitiesData
|
|
1016
|
-
}),
|
|
1017
|
-
formatPosts({
|
|
1018
|
-
messages: dialogueMessages,
|
|
1019
|
-
entities: entitiesData,
|
|
1020
|
-
conversationHeader: false
|
|
1021
|
-
})
|
|
1022
|
-
]);
|
|
1023
|
-
let actionResultsText = "";
|
|
1024
|
-
if (actionResultMessages.length > 0) {
|
|
1025
|
-
const groupedByRun = new Map;
|
|
1026
|
-
for (const mem of actionResultMessages) {
|
|
1027
|
-
const runId = String(mem.content?.runId || "unknown");
|
|
1028
|
-
if (!groupedByRun.has(runId)) {
|
|
1029
|
-
groupedByRun.set(runId, []);
|
|
1030
|
-
}
|
|
1031
|
-
groupedByRun.get(runId)?.push(mem);
|
|
1032
|
-
}
|
|
1033
|
-
const formattedActionResults = Array.from(groupedByRun.entries()).slice(-3).map(([runId, memories]) => {
|
|
1034
|
-
const sortedMemories = memories.sort((a, b) => (a.createdAt || 0) - (b.createdAt || 0));
|
|
1035
|
-
const thought = sortedMemories[0]?.content?.planThought || "";
|
|
1036
|
-
const runText = sortedMemories.map((mem) => {
|
|
1037
|
-
const actionName = mem.content?.actionName || "Unknown";
|
|
1038
|
-
const status = mem.content?.actionStatus || "unknown";
|
|
1039
|
-
const planStep = mem.content?.planStep || "";
|
|
1040
|
-
const text5 = mem.content?.text || "";
|
|
1041
|
-
const error = mem.content?.error || "";
|
|
1042
|
-
let memText = ` - ${actionName} (${status})`;
|
|
1043
|
-
if (planStep)
|
|
1044
|
-
memText += ` [${planStep}]`;
|
|
1045
|
-
if (error) {
|
|
1046
|
-
memText += `: Error - ${error}`;
|
|
1047
|
-
} else if (text5 && text5 !== `Executed action: ${actionName}`) {
|
|
1048
|
-
memText += `: ${text5}`;
|
|
1049
|
-
}
|
|
1050
|
-
return memText;
|
|
1051
|
-
}).join(`
|
|
1364
|
+
const buffer = consolidationBuffers.get(roomId) || [];
|
|
1365
|
+
if (buffer.length === 0) {
|
|
1366
|
+
return;
|
|
1367
|
+
}
|
|
1368
|
+
logger5.info({ roomId, messageCount: buffer.length }, "Starting memory consolidation");
|
|
1369
|
+
const conversationLog = buffer.map((m, idx) => {
|
|
1370
|
+
const content = typeof m.content === "string" ? m.content : m.content.text || JSON.stringify(m.content);
|
|
1371
|
+
return `[${idx + 1}] ${m.entityId}: ${content}`;
|
|
1372
|
+
}).join(`
|
|
1052
1373
|
`);
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1374
|
+
const extractionPrompt = buildExtractionPrompt(conversationLog);
|
|
1375
|
+
const originalSystemPrompt = runtime.character.system;
|
|
1376
|
+
try {
|
|
1377
|
+
runtime.character.system = CONSOLIDATION_SYSTEM_PROMPT;
|
|
1378
|
+
logger5.debug("Calling LLM for memory extraction");
|
|
1379
|
+
const response = await runtime.useModel(ModelType3.TEXT_LARGE, {
|
|
1380
|
+
prompt: extractionPrompt,
|
|
1381
|
+
temperature: 0.3
|
|
1382
|
+
});
|
|
1383
|
+
const consolidationResult = parseConsolidationResult(response);
|
|
1384
|
+
logger5.debug({
|
|
1385
|
+
reasoning: consolidationResult.reasoningTrace,
|
|
1386
|
+
extracted: consolidationResult.extractedMemories.length
|
|
1387
|
+
}, "Extraction complete");
|
|
1388
|
+
const config = memoryService.getConfig();
|
|
1389
|
+
const validMemories = consolidationResult.extractedMemories.filter((m) => m.confidence >= config.minConfidence);
|
|
1390
|
+
logger5.info({
|
|
1391
|
+
total: consolidationResult.extractedMemories.length,
|
|
1392
|
+
valid: validMemories.length,
|
|
1393
|
+
minConfidence: config.minConfidence
|
|
1394
|
+
}, "Filtered extracted memories");
|
|
1395
|
+
for (const extracted of validMemories) {
|
|
1396
|
+
await storeExtractedMemory(runtime, memoryService, roomId, buffer, extracted);
|
|
1397
|
+
}
|
|
1398
|
+
consolidationBuffers.set(roomId, []);
|
|
1399
|
+
logger5.info({ roomId, stored: validMemories.length }, "Memory consolidation complete");
|
|
1400
|
+
} finally {
|
|
1401
|
+
runtime.character.system = originalSystemPrompt;
|
|
1402
|
+
}
|
|
1403
|
+
} catch (error) {
|
|
1404
|
+
logger5.error({ error, roomId }, "Consolidation evaluator failed");
|
|
1405
|
+
}
|
|
1406
|
+
},
|
|
1407
|
+
examples: []
|
|
1408
|
+
};
|
|
1409
|
+
function parseConsolidationResult(response) {
|
|
1410
|
+
try {
|
|
1411
|
+
const responseText = typeof response === "string" ? response : JSON.stringify(response);
|
|
1412
|
+
const analysisMatch = responseText.match(/\[ANALYSIS\](.*?)(?:\[MEMORIES\]|$)/s);
|
|
1413
|
+
const reasoningTrace = analysisMatch ? analysisMatch[1].trim() : "";
|
|
1414
|
+
const memoriesMatch = responseText.match(/\[MEMORIES\](.*?)$/s);
|
|
1415
|
+
const memoriesText = memoriesMatch ? memoriesMatch[1].trim() : "";
|
|
1416
|
+
const extractedMemories = [];
|
|
1417
|
+
if (memoriesText) {
|
|
1418
|
+
const lines = memoriesText.split(`
|
|
1057
1419
|
`);
|
|
1058
|
-
|
|
1420
|
+
for (const line of lines) {
|
|
1421
|
+
const trimmedLine = line.trim();
|
|
1422
|
+
if (!trimmedLine || !trimmedLine.startsWith("MEM|")) {
|
|
1423
|
+
continue;
|
|
1059
1424
|
}
|
|
1060
|
-
const
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
data: {
|
|
1065
|
-
summaries: [],
|
|
1066
|
-
recentMessages: [],
|
|
1067
|
-
actionResults: [],
|
|
1068
|
-
mode: "full_conversation"
|
|
1069
|
-
},
|
|
1070
|
-
values: {
|
|
1071
|
-
recentMessage: "No recent message available."
|
|
1072
|
-
},
|
|
1073
|
-
text: "No recent messages available"
|
|
1074
|
-
};
|
|
1425
|
+
const parts = trimmedLine.split("|");
|
|
1426
|
+
if (parts.length < 6) {
|
|
1427
|
+
logger5.warn({ line: trimmedLine }, "Skipping malformed memory line (insufficient fields)");
|
|
1428
|
+
continue;
|
|
1075
1429
|
}
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
});
|
|
1083
|
-
|
|
1084
|
-
recentMessage = formattedSingleMessage;
|
|
1085
|
-
}
|
|
1430
|
+
const type = parts[1].trim();
|
|
1431
|
+
const category = parts[2].trim();
|
|
1432
|
+
const confidenceStr = parts[3].trim();
|
|
1433
|
+
const isContradictionStr = parts[4].trim();
|
|
1434
|
+
const content = parts.slice(5).join("|").trim();
|
|
1435
|
+
if (!["EPISODIC", "SEMANTIC", "PROCEDURAL"].includes(type)) {
|
|
1436
|
+
logger5.warn({ type, line: trimmedLine }, "Invalid memory type");
|
|
1437
|
+
continue;
|
|
1086
1438
|
}
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
const
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
summaries: [],
|
|
1104
|
-
recentMessages: dialogueMessages,
|
|
1105
|
-
actionResults: actionResultMessages,
|
|
1106
|
-
mode: "full_conversation"
|
|
1107
|
-
},
|
|
1108
|
-
values: {
|
|
1109
|
-
...(isPostFormat ? recentPosts : recentMessages) && {
|
|
1110
|
-
recentMessages: isPostFormat ? recentPosts : recentMessages
|
|
1111
|
-
},
|
|
1112
|
-
...recentPosts && { recentPosts },
|
|
1113
|
-
...actionResultsText && { recentActionResults: actionResultsText },
|
|
1114
|
-
...recentMessage && { recentMessage },
|
|
1115
|
-
...receivedMessageHeader && { receivedMessageHeader },
|
|
1116
|
-
...focusHeader && { focusHeader }
|
|
1117
|
-
},
|
|
1118
|
-
text: text4
|
|
1439
|
+
if (!category) {
|
|
1440
|
+
logger5.warn({ line: trimmedLine }, "Missing category");
|
|
1441
|
+
continue;
|
|
1442
|
+
}
|
|
1443
|
+
const confidence = parseFloat(confidenceStr);
|
|
1444
|
+
if (isNaN(confidence) || confidence < 0 || confidence > 1) {
|
|
1445
|
+
logger5.warn({ confidenceStr, line: trimmedLine }, "Invalid confidence value");
|
|
1446
|
+
continue;
|
|
1447
|
+
}
|
|
1448
|
+
const isContradiction = isContradictionStr.toLowerCase() === "true";
|
|
1449
|
+
if (!content) {
|
|
1450
|
+
logger5.warn({ line: trimmedLine }, "Missing content");
|
|
1451
|
+
continue;
|
|
1452
|
+
}
|
|
1453
|
+
const metadata = {
|
|
1454
|
+
category
|
|
1119
1455
|
};
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
count: config.shortTermRetainRecent,
|
|
1127
|
-
unique: false,
|
|
1128
|
-
start: lastOffset
|
|
1456
|
+
extractedMemories.push({
|
|
1457
|
+
type,
|
|
1458
|
+
content,
|
|
1459
|
+
confidence,
|
|
1460
|
+
isContradiction,
|
|
1461
|
+
metadata
|
|
1129
1462
|
});
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1463
|
+
}
|
|
1464
|
+
}
|
|
1465
|
+
return {
|
|
1466
|
+
reasoningTrace,
|
|
1467
|
+
transientSummary: "",
|
|
1468
|
+
extractedMemories
|
|
1469
|
+
};
|
|
1470
|
+
} catch (error) {
|
|
1471
|
+
logger5.error({ error }, "Failed to parse consolidation response");
|
|
1472
|
+
return {
|
|
1473
|
+
reasoningTrace: "Parse error",
|
|
1474
|
+
transientSummary: "",
|
|
1475
|
+
extractedMemories: []
|
|
1476
|
+
};
|
|
1477
|
+
}
|
|
1478
|
+
}
|
|
1479
|
+
async function storeExtractedMemory(runtime, memoryService, roomId, buffer, extracted) {
|
|
1480
|
+
const entityId = buffer[0]?.entityId || runtime.agentId;
|
|
1481
|
+
const embeddingContext = generateContextualString(extracted);
|
|
1482
|
+
const config = memoryService.getConfig();
|
|
1483
|
+
const decayRate = config.defaultDecayRates[extracted.type];
|
|
1484
|
+
const memoryData = {
|
|
1485
|
+
agentId: runtime.agentId,
|
|
1486
|
+
entityId,
|
|
1487
|
+
roomId,
|
|
1488
|
+
type: extracted.type,
|
|
1489
|
+
content: extracted.content,
|
|
1490
|
+
embeddingContext,
|
|
1491
|
+
confidence: extracted.confidence,
|
|
1492
|
+
decayRate,
|
|
1493
|
+
decayFunction: "EXPONENTIAL" /* EXPONENTIAL */,
|
|
1494
|
+
source: {
|
|
1495
|
+
sessionId: roomId,
|
|
1496
|
+
messageId: extracted.sourceMessageId,
|
|
1497
|
+
textSnippet: extracted.content.substring(0, 200)
|
|
1498
|
+
},
|
|
1499
|
+
metadata: extracted.metadata || {}
|
|
1500
|
+
};
|
|
1501
|
+
if (config.enableContradictionDetection && extracted.isContradiction) {
|
|
1502
|
+
await memoryService.handleContradiction(entityId, memoryData);
|
|
1503
|
+
} else {
|
|
1504
|
+
await memoryService.storeLongTermMemory(memoryData);
|
|
1505
|
+
}
|
|
1506
|
+
}
|
|
1507
|
+
function generateContextualString(extracted) {
|
|
1508
|
+
const typeLabel = {
|
|
1509
|
+
["EPISODIC" /* EPISODIC */]: "Event",
|
|
1510
|
+
["SEMANTIC" /* SEMANTIC */]: "Fact",
|
|
1511
|
+
["PROCEDURAL" /* PROCEDURAL */]: "Skill"
|
|
1512
|
+
}[extracted.type];
|
|
1513
|
+
const category = extracted.metadata?.category || "general";
|
|
1514
|
+
return `[${typeLabel} about ${category}]: ${extracted.content}`;
|
|
1515
|
+
}
|
|
1516
|
+
|
|
1517
|
+
// src/evaluators/summarization.ts
|
|
1518
|
+
import {
|
|
1519
|
+
logger as logger6,
|
|
1520
|
+
ModelType as ModelType4
|
|
1521
|
+
} from "@elizaos/core";
|
|
1522
|
+
|
|
1523
|
+
// src/prompts/summarization.ts
|
|
1524
|
+
var SUMMARIZATION_SYSTEM_PROMPT = `You are "Chronos", a master summarizer.
|
|
1525
|
+
Your function is to condense conversation logs into concise, subject-first narrative summaries.
|
|
1526
|
+
|
|
1527
|
+
# CORE DIRECTIVE: "Subject-First" Summarization
|
|
1528
|
+
You must rephrase the narrative to focus on the *topic*, not the user. This optimizes vector retrieval.
|
|
1529
|
+
- BAD: "User asked about Python." (Too generic)
|
|
1530
|
+
- GOOD: "Python programming inquiries were addressed." (Topic-focused)
|
|
1531
|
+
|
|
1532
|
+
# COMPRESSION RULES
|
|
1533
|
+
1. **Be Concise**: Target 2-4 sentences. Maximum 100 words.
|
|
1534
|
+
2. **Be Factual**: No interpretation, no speculation. Only what actually happened.
|
|
1535
|
+
3. **Be Narrative**: Write as a story, not a bullet list.
|
|
1536
|
+
4. **Preserve Key Facts**: If the user revealed important information (preferences, identity, needs), include it.
|
|
1537
|
+
5. **Exclude Trivia**: Skip greetings, acknowledgments, and filler conversation.
|
|
1538
|
+
|
|
1539
|
+
# OUTPUT FORMAT
|
|
1540
|
+
Phase 1: [ANALYSIS]
|
|
1541
|
+
- Identify key topics.
|
|
1542
|
+
- Draft the summary.
|
|
1543
|
+
- Refine wording to be subject-first.
|
|
1544
|
+
|
|
1545
|
+
Phase 2: [RESULT]
|
|
1546
|
+
Format: \`SUMM|TAGS|CONTENT\`
|
|
1547
|
+
- TAGS: Comma-separated list of key topics (lowercase)
|
|
1548
|
+
- CONTENT: The narrative summary text (must be a single line, no newlines)
|
|
1157
1549
|
`;
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1550
|
+
function buildLevel1SummaryPrompt(formattedMessages, previousSummary) {
|
|
1551
|
+
const previousContext = previousSummary ? `
|
|
1552
|
+
# PREVIOUS SUMMARY
|
|
1553
|
+
<previous_summary>
|
|
1554
|
+
${previousSummary}
|
|
1555
|
+
</previous_summary>
|
|
1556
|
+
|
|
1557
|
+
**Note**: The above is what was discussed before. Ensure your new summary:
|
|
1558
|
+
- Does not duplicate information already captured in the previous summary
|
|
1559
|
+
- Focuses only on the NEW conversation below
|
|
1560
|
+
- Maintains continuity (e.g., "Conversation continued with...")
|
|
1561
|
+
` : "";
|
|
1562
|
+
return `${previousContext}
|
|
1563
|
+
# INPUT DATA
|
|
1564
|
+
<messages>
|
|
1565
|
+
${formattedMessages}
|
|
1566
|
+
</messages>
|
|
1567
|
+
|
|
1568
|
+
# FEW-SHOT EXAMPLES
|
|
1569
|
+
|
|
1570
|
+
<example_1_task_oriented>
|
|
1571
|
+
Input:
|
|
1572
|
+
[Message 1] User: I need help deploying my React app to Vercel
|
|
1573
|
+
[Message 2] Agent: Sure! First, make sure you have the Vercel CLI installed...
|
|
1574
|
+
[Message 3] User: Done. What's next?
|
|
1575
|
+
[Message 4] Agent: Run 'vercel' in your project directory...
|
|
1576
|
+
[Message 5] User: It worked! Thanks!
|
|
1577
|
+
|
|
1578
|
+
Output:
|
|
1579
|
+
[ANALYSIS]
|
|
1580
|
+
- Topic: React app deployment to Vercel.
|
|
1581
|
+
- Action: CLI installation and deployment.
|
|
1582
|
+
- Outcome: Success.
|
|
1583
|
+
- Draft: User requested help... -> React app deployment...
|
|
1584
|
+
[RESULT]
|
|
1585
|
+
SUMM|deployment,react,vercel,cli|React app deployment to Vercel was successfully completed following CLI installation and configuration guidance.
|
|
1586
|
+
</example_1_task_oriented>
|
|
1587
|
+
|
|
1588
|
+
<example_2_identity_revelation>
|
|
1589
|
+
Input:
|
|
1590
|
+
[Message 1] User: I'm working on a side project in my spare time
|
|
1591
|
+
[Message 2] Agent: That's great! What kind of project?
|
|
1592
|
+
[Message 3] User: A machine learning app for analyzing stock data. I'm a data scientist by profession.
|
|
1593
|
+
[Message 4] Agent: Interesting! Are you using Python?
|
|
1594
|
+
[Message 5] User: Yeah, mostly PyTorch and pandas.
|
|
1595
|
+
|
|
1596
|
+
Output:
|
|
1597
|
+
[ANALYSIS]
|
|
1598
|
+
- Identity: Data scientist.
|
|
1599
|
+
- Project: Stock data analysis (ML).
|
|
1600
|
+
- Tech Stack: Python, PyTorch, pandas.
|
|
1601
|
+
- Draft: User discussed... -> Stock data analysis project...
|
|
1602
|
+
[RESULT]
|
|
1603
|
+
SUMM|machine learning,data science,python,stocks|Stock data analysis project (Machine Learning) utilizing Python (PyTorch, pandas) is in development by a data scientist.
|
|
1604
|
+
</example_2_identity_revelation>
|
|
1605
|
+
|
|
1606
|
+
<example_3_chitchat>
|
|
1607
|
+
Input:
|
|
1608
|
+
[Message 1] User: Hey, how's it going?
|
|
1609
|
+
[Message 2] Agent: I'm doing well, thanks for asking! How can I help you today?
|
|
1610
|
+
[Message 3] User: Just saying hi
|
|
1611
|
+
[Message 4] Agent: Nice to hear from you!
|
|
1612
|
+
|
|
1613
|
+
Output:
|
|
1614
|
+
[ANALYSIS]
|
|
1615
|
+
- Content: Greetings only.
|
|
1616
|
+
- Substance: None.
|
|
1617
|
+
[RESULT]
|
|
1618
|
+
SUMM|greeting,casual|Casual greeting exchanged; no substantive topics discussed.
|
|
1619
|
+
</example_3_chitchat>
|
|
1620
|
+
|
|
1621
|
+
Begin the [ANALYSIS] phase now.`;
|
|
1622
|
+
}
|
|
1623
|
+
var HIGHER_LEVEL_SUMMARIZATION_SYSTEM_PROMPT = `You are "Chronos", a Meta-Summarization Agent.
|
|
1624
|
+
Your task is to compress multiple conversation summaries into a single, higher-level summary.
|
|
1625
|
+
|
|
1626
|
+
# MISSION
|
|
1627
|
+
Transform a list of conversation summaries into one concise meta-summary that captures:
|
|
1628
|
+
1. **Overarching themes** across the summaries
|
|
1629
|
+
2. **Key events or milestones** (e.g., "User onboarded", "Project completed")
|
|
1630
|
+
3. **Evolving context** (e.g., "User's preferences shifted from X to Y")
|
|
1631
|
+
|
|
1632
|
+
# RULES
|
|
1633
|
+
- **Subject-First**: Focus on the topic, not the user.
|
|
1634
|
+
- **Abstract Higher**: Don't repeat specifics from each summary. Find the pattern.
|
|
1635
|
+
- **Chronological Flow**: Maintain temporal order if it matters.
|
|
1636
|
+
- **Preserve Critical Facts**: If summaries mention important identity or preferences, keep them.
|
|
1637
|
+
|
|
1638
|
+
# OUTPUT FORMAT
|
|
1639
|
+
Phase 1: [ANALYSIS]
|
|
1640
|
+
- Identify themes and milestones.
|
|
1641
|
+
- Combine related points.
|
|
1642
|
+
- Refine to subject-first.
|
|
1643
|
+
|
|
1644
|
+
Phase 2: [RESULT]
|
|
1645
|
+
Format: \`SUMM|TAGS|CONTENT\`
|
|
1646
|
+
- TAGS: Comma-separated list of key topics (lowercase)
|
|
1647
|
+
- CONTENT: The meta-summary text (must be a single line, no newlines)
|
|
1648
|
+
`;
|
|
1649
|
+
function buildHigherLevelSummaryPrompt(formattedSummaries) {
|
|
1650
|
+
return `# INPUT DATA
|
|
1651
|
+
<summaries>
|
|
1652
|
+
${formattedSummaries}
|
|
1653
|
+
</summaries>
|
|
1654
|
+
|
|
1655
|
+
# FEW-SHOT EXAMPLES
|
|
1656
|
+
|
|
1657
|
+
<example_1_meta_summary>
|
|
1658
|
+
Input:
|
|
1659
|
+
<summary1>Stock data analysis project (Machine Learning) utilizing Python (PyTorch, pandas) is in development by a data scientist.</summary1>
|
|
1660
|
+
<summary2>Flask and Vercel were suggested for ML model deployment to a web app.</summary2>
|
|
1661
|
+
<summary3>CORS configuration issues were resolved, leading to successful deployment.</summary3>
|
|
1662
|
+
|
|
1663
|
+
Output:
|
|
1664
|
+
[ANALYSIS]
|
|
1665
|
+
- Theme: ML App Development & Deployment.
|
|
1666
|
+
- Flow: Development -> Stack Choice -> Deployment -> Troubleshooting -> Success.
|
|
1667
|
+
- Draft: User, a data scientist... -> Machine learning stock analysis app...
|
|
1668
|
+
[RESULT]
|
|
1669
|
+
SUMM|data science,machine learning,python,deployment,flask,vercel|Machine learning stock analysis app (Python/PyTorch) was developed and successfully deployed to Vercel using Flask after resolving CORS configuration issues.
|
|
1670
|
+
</example_1_meta_summary>
|
|
1671
|
+
|
|
1672
|
+
Begin the [ANALYSIS] phase now.`;
|
|
1673
|
+
}
|
|
1674
|
+
function estimateTokensInSummary(text3) {
|
|
1675
|
+
return Math.ceil(text3.length / 4);
|
|
1676
|
+
}
|
|
1677
|
+
|
|
1678
|
+
// src/evaluators/summarization.ts
|
|
1679
|
+
var summarizationCounters = new Map;
|
|
1680
|
+
var summarizationEvaluator = {
|
|
1681
|
+
name: "SUMMARIZATION",
|
|
1682
|
+
similes: ["HIERARCHICAL_SUMMARIZATION", "EPISODIC_COMPRESSION", "CONVERSATION_SUMMARY"],
|
|
1683
|
+
description: "Hierarchical conversation summarization that compresses message history into multi-level narrative summaries " + "for token-efficient long-term episodic memory.",
|
|
1684
|
+
validate: async (runtime, message) => {
|
|
1685
|
+
const memoryService = runtime.getService("memory");
|
|
1686
|
+
if (!memoryService) {
|
|
1687
|
+
return false;
|
|
1688
|
+
}
|
|
1689
|
+
const config = memoryService.getConfig();
|
|
1690
|
+
if (!config.summarization?.enabled) {
|
|
1691
|
+
logger6.debug("Summarization is not enabled");
|
|
1692
|
+
return false;
|
|
1693
|
+
}
|
|
1694
|
+
if (!summarizationCounters.has(message.roomId)) {
|
|
1695
|
+
summarizationCounters.set(message.roomId, 0);
|
|
1696
|
+
}
|
|
1697
|
+
const currentCount = summarizationCounters.get(message.roomId) + 1;
|
|
1698
|
+
summarizationCounters.set(message.roomId, currentCount);
|
|
1699
|
+
const threshold = config.summarization.messagesPerSummary;
|
|
1700
|
+
logger6.debug({
|
|
1701
|
+
currentCount,
|
|
1702
|
+
threshold,
|
|
1703
|
+
messageId: message.id,
|
|
1704
|
+
entityId: message.entityId,
|
|
1705
|
+
roomId: message.roomId
|
|
1706
|
+
}, "Message counted for summarization (user + agent messages)");
|
|
1707
|
+
const shouldSummarize = currentCount >= threshold;
|
|
1708
|
+
if (shouldSummarize) {
|
|
1709
|
+
logger6.info({
|
|
1710
|
+
roomId: message.roomId,
|
|
1711
|
+
messageCount: currentCount,
|
|
1712
|
+
threshold
|
|
1713
|
+
}, "Summarization threshold reached (Level 1) - triggering summarization");
|
|
1714
|
+
}
|
|
1715
|
+
return shouldSummarize;
|
|
1716
|
+
},
|
|
1717
|
+
handler: async (runtime, message) => {
|
|
1718
|
+
const memoryService = runtime.getService("memory");
|
|
1719
|
+
if (!memoryService) {
|
|
1720
|
+
logger6.warn("MemoryService not available for summarization");
|
|
1721
|
+
return;
|
|
1722
|
+
}
|
|
1723
|
+
const config = memoryService.getConfig();
|
|
1724
|
+
if (!config.summarization?.enabled) {
|
|
1725
|
+
return;
|
|
1726
|
+
}
|
|
1727
|
+
const messageCount = summarizationCounters.get(message.roomId) || 0;
|
|
1728
|
+
if (messageCount === 0) {
|
|
1729
|
+
return;
|
|
1730
|
+
}
|
|
1731
|
+
logger6.info({
|
|
1732
|
+
roomId: message.roomId,
|
|
1733
|
+
messageCount
|
|
1734
|
+
}, "Starting Level 1 summarization - pulling messages from database");
|
|
1735
|
+
try {
|
|
1736
|
+
const previousSummary = await memoryService.getMostRecentLevel1Summary(message.roomId, message.entityId);
|
|
1737
|
+
const messages = await fetchMessagesSinceLastSummary(runtime, message.roomId, previousSummary?.endTime);
|
|
1738
|
+
if (messages.length === 0) {
|
|
1739
|
+
logger6.warn({ roomId: message.roomId }, "No messages found for summarization");
|
|
1740
|
+
return;
|
|
1741
|
+
}
|
|
1742
|
+
const summary = await createLevel1Summary(runtime, memoryService, messages, message.roomId, previousSummary);
|
|
1743
|
+
if (summary) {
|
|
1744
|
+
logger6.info({
|
|
1745
|
+
summaryId: summary.id,
|
|
1746
|
+
tokenCount: summary.tokenCount,
|
|
1747
|
+
messageCount: messages.length,
|
|
1748
|
+
startTime: summary.startTime,
|
|
1749
|
+
endTime: summary.endTime
|
|
1750
|
+
}, "Created Level 1 summary from database messages");
|
|
1751
|
+
summarizationCounters.set(message.roomId, 0);
|
|
1752
|
+
await checkAndTriggerHigherLevelSummarization(runtime, memoryService, summary.roomId, summary.level);
|
|
1753
|
+
}
|
|
1754
|
+
} catch (error) {
|
|
1755
|
+
logger6.error({
|
|
1756
|
+
error,
|
|
1757
|
+
roomId: message.roomId
|
|
1758
|
+
}, "Failed to create summary");
|
|
1759
|
+
}
|
|
1760
|
+
},
|
|
1761
|
+
examples: []
|
|
1762
|
+
};
|
|
1763
|
+
async function fetchMessagesSinceLastSummary(runtime, roomId, lastSummaryEndTime) {
|
|
1764
|
+
const allMessages = await runtime.getMemories({
|
|
1765
|
+
tableName: "messages",
|
|
1766
|
+
roomId,
|
|
1767
|
+
count: 1000,
|
|
1768
|
+
unique: false
|
|
1769
|
+
});
|
|
1770
|
+
const dialogueMessages = allMessages.filter((msg) => !(msg.content?.type === "action_result" && msg.metadata?.type === "action_result"));
|
|
1771
|
+
let messages = dialogueMessages;
|
|
1772
|
+
if (lastSummaryEndTime) {
|
|
1773
|
+
messages = dialogueMessages.filter((msg) => {
|
|
1774
|
+
if (!msg.createdAt)
|
|
1775
|
+
return false;
|
|
1776
|
+
const msgTime = new Date(msg.createdAt);
|
|
1777
|
+
return msgTime > lastSummaryEndTime;
|
|
1778
|
+
});
|
|
1779
|
+
}
|
|
1780
|
+
messages.sort((a, b) => (a.createdAt || 0) - (b.createdAt || 0));
|
|
1781
|
+
logger6.debug({
|
|
1782
|
+
roomId,
|
|
1783
|
+
totalMessages: allMessages.length,
|
|
1784
|
+
dialogueMessages: dialogueMessages.length,
|
|
1785
|
+
filteredMessages: messages.length,
|
|
1786
|
+
lastSummaryEndTime: lastSummaryEndTime?.toISOString()
|
|
1787
|
+
}, "Fetched messages for summarization");
|
|
1788
|
+
return messages;
|
|
1789
|
+
}
|
|
1790
|
+
function parseSummarizationResult(response) {
|
|
1791
|
+
try {
|
|
1792
|
+
const responseText = typeof response === "string" ? response : JSON.stringify(response);
|
|
1793
|
+
const analysisMatch = responseText.match(/\[ANALYSIS\](.*?)(?:\[RESULT\]|$)/s);
|
|
1794
|
+
const reasoningTrace = analysisMatch ? analysisMatch[1].trim() : "";
|
|
1795
|
+
const resultMatch = responseText.match(/\[RESULT\](.*?)$/s);
|
|
1796
|
+
const resultText = resultMatch ? resultMatch[1].trim() : "";
|
|
1797
|
+
let summary = "";
|
|
1798
|
+
let keyTopics = "";
|
|
1799
|
+
if (resultText) {
|
|
1800
|
+
const lines = resultText.split(`
|
|
1801
|
+
`);
|
|
1802
|
+
for (const line of lines) {
|
|
1803
|
+
const trimmed = line.trim();
|
|
1804
|
+
if (trimmed.startsWith("SUMM|")) {
|
|
1805
|
+
const parts = trimmed.split("|");
|
|
1806
|
+
if (parts.length >= 3) {
|
|
1807
|
+
keyTopics = parts[1].trim();
|
|
1808
|
+
summary = parts.slice(2).join("|").trim();
|
|
1809
|
+
break;
|
|
1162
1810
|
}
|
|
1163
|
-
summaryText = addHeader("# Conversation Summary", summaryText);
|
|
1164
1811
|
}
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1812
|
+
}
|
|
1813
|
+
}
|
|
1814
|
+
return { summary, keyTopics, reasoningTrace };
|
|
1815
|
+
} catch (error) {
|
|
1816
|
+
logger6.error({ error }, "Failed to parse summarization response");
|
|
1817
|
+
return { summary: "", keyTopics: "", reasoningTrace: "" };
|
|
1818
|
+
}
|
|
1819
|
+
}
|
|
1820
|
+
async function createLevel1Summary(runtime, memoryService, messages, roomId, previousSummary) {
|
|
1821
|
+
const formattedMessages = messages.map((m, i) => {
|
|
1822
|
+
const text3 = typeof m.content === "string" ? m.content : m.content.text || "";
|
|
1823
|
+
const author = m.entityId === runtime.agentId ? "Agent" : "User";
|
|
1824
|
+
const timestamp3 = m.createdAt ? new Date(m.createdAt).toISOString() : "Unknown time";
|
|
1825
|
+
return `[${timestamp3}] ${author}: ${text3}`;
|
|
1826
|
+
}).join(`
|
|
1178
1827
|
`);
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1828
|
+
let prompt;
|
|
1829
|
+
if (previousSummary) {
|
|
1830
|
+
prompt = buildLevel1SummaryPrompt(formattedMessages, previousSummary.content);
|
|
1831
|
+
} else {
|
|
1832
|
+
prompt = buildLevel1SummaryPrompt(formattedMessages);
|
|
1833
|
+
}
|
|
1834
|
+
const originalSystemPrompt = runtime.character.system;
|
|
1835
|
+
try {
|
|
1836
|
+
runtime.character.system = SUMMARIZATION_SYSTEM_PROMPT;
|
|
1837
|
+
logger6.debug("Calling LLM for Level 1 summarization with timestamped conversation log");
|
|
1838
|
+
const response = await runtime.useModel(ModelType4.TEXT_LARGE, {
|
|
1839
|
+
prompt,
|
|
1840
|
+
temperature: 0.3
|
|
1841
|
+
});
|
|
1842
|
+
runtime.character.system = originalSystemPrompt;
|
|
1843
|
+
const { summary, keyTopics, reasoningTrace } = parseSummarizationResult(response);
|
|
1844
|
+
if (!summary) {
|
|
1845
|
+
logger6.warn({ reasoningTrace }, "No summary extracted from LLM response");
|
|
1846
|
+
return null;
|
|
1847
|
+
}
|
|
1848
|
+
const tokenCount = estimateTokensInSummary(summary);
|
|
1849
|
+
const startTime = messages[0].createdAt ? new Date(messages[0].createdAt) : new Date;
|
|
1850
|
+
const endTime = messages[messages.length - 1].createdAt ? new Date(messages[messages.length - 1].createdAt) : new Date;
|
|
1851
|
+
const summaryData = {
|
|
1852
|
+
agentId: runtime.agentId,
|
|
1853
|
+
entityId: messages[0].entityId,
|
|
1854
|
+
roomId,
|
|
1855
|
+
level: 1,
|
|
1856
|
+
parentSummaryId: undefined,
|
|
1857
|
+
content: summary,
|
|
1858
|
+
tokenCount,
|
|
1859
|
+
startTime,
|
|
1860
|
+
endTime,
|
|
1861
|
+
sourceCount: messages.length,
|
|
1862
|
+
sourceIds: messages.map((m) => m.id),
|
|
1863
|
+
metadata: {
|
|
1864
|
+
keyTopics,
|
|
1865
|
+
hasPreviousSummary: !!previousSummary
|
|
1193
1866
|
}
|
|
1194
|
-
}
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1867
|
+
};
|
|
1868
|
+
return await memoryService.storeSummary(summaryData);
|
|
1869
|
+
} catch (error) {
|
|
1870
|
+
logger6.error({ error }, "Failed to call LLM for Level 1 summarization");
|
|
1871
|
+
runtime.character.system = originalSystemPrompt;
|
|
1872
|
+
return null;
|
|
1873
|
+
}
|
|
1874
|
+
}
|
|
1875
|
+
async function checkAndTriggerHigherLevelSummarization(runtime, memoryService, roomId, currentLevel) {
|
|
1876
|
+
const config = memoryService.getConfig();
|
|
1877
|
+
if (!config.summarization) {
|
|
1878
|
+
return;
|
|
1879
|
+
}
|
|
1880
|
+
if (currentLevel >= config.summarization.maxDepth) {
|
|
1881
|
+
logger6.debug({ currentLevel, maxDepth: config.summarization.maxDepth }, "Max depth reached");
|
|
1882
|
+
return;
|
|
1883
|
+
}
|
|
1884
|
+
const summaries = await memoryService.getSummariesByLevel(roomId, currentLevel);
|
|
1885
|
+
const threshold = config.summarization.summariesPerLevel;
|
|
1886
|
+
if (summaries.length < threshold) {
|
|
1887
|
+
return;
|
|
1888
|
+
}
|
|
1889
|
+
logger6.info({
|
|
1890
|
+
level: currentLevel,
|
|
1891
|
+
count: summaries.length,
|
|
1892
|
+
nextLevel: currentLevel + 1
|
|
1893
|
+
}, `Triggering Level ${currentLevel + 1} summarization`);
|
|
1894
|
+
const higherSummary = await createHigherLevelSummary(runtime, memoryService, summaries, roomId, currentLevel + 1);
|
|
1895
|
+
if (higherSummary) {
|
|
1896
|
+
await checkAndTriggerHigherLevelSummarization(runtime, memoryService, roomId, currentLevel + 1);
|
|
1897
|
+
}
|
|
1898
|
+
}
|
|
1899
|
+
async function createHigherLevelSummary(runtime, memoryService, lowerSummaries, roomId, level) {
|
|
1900
|
+
const formattedSummaries = lowerSummaries.map((s, i) => `<summary${i + 1}>${s.content}</summary${i + 1}>`).join(`
|
|
1901
|
+
`);
|
|
1902
|
+
const prompt = buildHigherLevelSummaryPrompt(formattedSummaries);
|
|
1903
|
+
const originalSystemPrompt = runtime.character.system;
|
|
1904
|
+
try {
|
|
1905
|
+
runtime.character.system = HIGHER_LEVEL_SUMMARIZATION_SYSTEM_PROMPT;
|
|
1906
|
+
const response = await runtime.useModel(ModelType4.TEXT_LARGE, {
|
|
1907
|
+
prompt,
|
|
1908
|
+
temperature: 0.3
|
|
1909
|
+
});
|
|
1910
|
+
runtime.character.system = originalSystemPrompt;
|
|
1911
|
+
const { summary, keyTopics, reasoningTrace } = parseSummarizationResult(response);
|
|
1912
|
+
if (!summary) {
|
|
1913
|
+
logger6.warn({ reasoningTrace }, "No higher-level summary extracted from LLM response");
|
|
1914
|
+
return null;
|
|
1201
1915
|
}
|
|
1916
|
+
const tokenCount = estimateTokensInSummary(summary);
|
|
1917
|
+
const startTime = new Date(Math.min(...lowerSummaries.map((s) => s.startTime.getTime())));
|
|
1918
|
+
const endTime = new Date(Math.max(...lowerSummaries.map((s) => s.endTime.getTime())));
|
|
1919
|
+
const summaryData = {
|
|
1920
|
+
agentId: runtime.agentId,
|
|
1921
|
+
entityId: lowerSummaries[0].entityId,
|
|
1922
|
+
roomId,
|
|
1923
|
+
level,
|
|
1924
|
+
parentSummaryId: undefined,
|
|
1925
|
+
content: summary,
|
|
1926
|
+
tokenCount,
|
|
1927
|
+
startTime,
|
|
1928
|
+
endTime,
|
|
1929
|
+
sourceCount: lowerSummaries.length,
|
|
1930
|
+
sourceIds: lowerSummaries.map((s) => s.id),
|
|
1931
|
+
metadata: {
|
|
1932
|
+
keyTopics,
|
|
1933
|
+
compressedSummaries: lowerSummaries.length
|
|
1934
|
+
}
|
|
1935
|
+
};
|
|
1936
|
+
return await memoryService.storeSummary(summaryData);
|
|
1937
|
+
} catch (error) {
|
|
1938
|
+
logger6.error({ error, level }, "Failed to create higher-level summary");
|
|
1939
|
+
runtime.character.system = originalSystemPrompt;
|
|
1940
|
+
return null;
|
|
1202
1941
|
}
|
|
1203
|
-
}
|
|
1942
|
+
}
|
|
1204
1943
|
|
|
1205
1944
|
// src/providers/long-term-memory.ts
|
|
1206
1945
|
import {
|
|
1207
|
-
logger as
|
|
1208
|
-
addHeader
|
|
1946
|
+
logger as logger7,
|
|
1947
|
+
addHeader
|
|
1209
1948
|
} from "@elizaos/core";
|
|
1210
1949
|
var longTermMemoryProvider = {
|
|
1211
1950
|
name: "LONG_TERM_MEMORY",
|
|
1212
|
-
description: "
|
|
1213
|
-
position:
|
|
1214
|
-
get: async (runtime, message,
|
|
1951
|
+
description: "User knowledge and facts (semantic + procedural memory)",
|
|
1952
|
+
position: 80,
|
|
1953
|
+
get: async (runtime, message, state) => {
|
|
1215
1954
|
try {
|
|
1216
1955
|
const memoryService = runtime.getService("memory");
|
|
1217
1956
|
if (!memoryService) {
|
|
1957
|
+
logger7.warn("Memory service not available");
|
|
1218
1958
|
return {
|
|
1219
|
-
data: {
|
|
1220
|
-
values: {
|
|
1959
|
+
data: {},
|
|
1960
|
+
values: {},
|
|
1221
1961
|
text: ""
|
|
1222
1962
|
};
|
|
1223
1963
|
}
|
|
1224
|
-
const
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1964
|
+
const entityId = message.entityId;
|
|
1965
|
+
const roomId = message.roomId;
|
|
1966
|
+
const query = typeof message.content === "string" ? message.content : message.content.text || JSON.stringify(message.content);
|
|
1967
|
+
logger7.debug({
|
|
1968
|
+
entityId,
|
|
1969
|
+
roomId,
|
|
1970
|
+
queryLength: query.length
|
|
1971
|
+
}, "Retrieving unified memories");
|
|
1972
|
+
const config = memoryService.getConfig();
|
|
1973
|
+
const [semanticMemories, proceduralMemories, episodicMemories] = await Promise.all([
|
|
1974
|
+
memoryService.searchLongTermMemories({
|
|
1975
|
+
entityId,
|
|
1976
|
+
query,
|
|
1977
|
+
roomId,
|
|
1978
|
+
type: "SEMANTIC" /* SEMANTIC */,
|
|
1979
|
+
limit: config.retrievalLimit,
|
|
1980
|
+
minConfidence: config.minConfidence,
|
|
1981
|
+
similarityThreshold: 0.15
|
|
1982
|
+
}),
|
|
1983
|
+
memoryService.searchLongTermMemories({
|
|
1984
|
+
entityId,
|
|
1985
|
+
query,
|
|
1986
|
+
roomId,
|
|
1987
|
+
type: "PROCEDURAL" /* PROCEDURAL */,
|
|
1988
|
+
limit: Math.floor(config.retrievalLimit / 2),
|
|
1989
|
+
minConfidence: config.minConfidence,
|
|
1990
|
+
similarityThreshold: 0.15
|
|
1991
|
+
}),
|
|
1992
|
+
memoryService.searchLongTermMemories({
|
|
1993
|
+
entityId,
|
|
1994
|
+
query,
|
|
1995
|
+
roomId,
|
|
1996
|
+
type: "EPISODIC" /* EPISODIC */,
|
|
1997
|
+
limit: config.retrievalLimit,
|
|
1998
|
+
minConfidence: config.minConfidence,
|
|
1999
|
+
similarityThreshold: 0.15
|
|
2000
|
+
})
|
|
2001
|
+
]);
|
|
2002
|
+
const longTermSections = [];
|
|
2003
|
+
if (semanticMemories.length > 0) {
|
|
2004
|
+
const items = semanticMemories.map((m) => `- ${m.content} (confidence: ${m.confidence.toFixed(2)}, strength: ${m.activationScore.toFixed(2)})`).join(`
|
|
2005
|
+
`);
|
|
2006
|
+
longTermSections.push(addHeader("## Semantic Knowledge (Facts)", items));
|
|
1231
2007
|
}
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
}
|
|
2008
|
+
if (proceduralMemories.length > 0) {
|
|
2009
|
+
const items = proceduralMemories.map((m) => `- ${m.content}`).join(`
|
|
2010
|
+
`);
|
|
2011
|
+
longTermSections.push(addHeader("## Procedural Knowledge (Skills & Patterns)", items));
|
|
2012
|
+
}
|
|
2013
|
+
if (episodicMemories.length > 0) {
|
|
2014
|
+
const items = episodicMemories.map((m) => `- ${m.content} (occurred: ${m.createdAt ? new Date(m.createdAt).toLocaleDateString() : "Unknown"})`).join(`
|
|
2015
|
+
`);
|
|
2016
|
+
longTermSections.push(addHeader("## Significant Past Events", items));
|
|
2017
|
+
}
|
|
2018
|
+
const longTermMemoriesText = longTermSections.join(`
|
|
2019
|
+
|
|
2020
|
+
`);
|
|
2021
|
+
const data = {
|
|
2022
|
+
semanticMemories,
|
|
2023
|
+
proceduralMemories,
|
|
2024
|
+
episodicMemories,
|
|
2025
|
+
config: {
|
|
2026
|
+
retrievalLimit: config.retrievalLimit,
|
|
2027
|
+
tokenBudget: config.tokenBudget
|
|
2028
|
+
}
|
|
2029
|
+
};
|
|
2030
|
+
const values = {
|
|
2031
|
+
longTermMemories: longTermMemoriesText
|
|
2032
|
+
};
|
|
2033
|
+
const text3 = longTermMemoriesText;
|
|
2034
|
+
logger7.info({
|
|
2035
|
+
semanticCount: semanticMemories.length,
|
|
2036
|
+
proceduralCount: proceduralMemories.length,
|
|
2037
|
+
episodicCount: episodicMemories.length
|
|
2038
|
+
}, "Retrieved long-term memory facts");
|
|
2039
|
+
return {
|
|
2040
|
+
data,
|
|
2041
|
+
values,
|
|
2042
|
+
text: text3
|
|
2043
|
+
};
|
|
2044
|
+
} catch (error) {
|
|
2045
|
+
logger7.error({ error }, "Failed to retrieve long-term memories");
|
|
2046
|
+
return {
|
|
2047
|
+
data: {},
|
|
2048
|
+
values: {
|
|
2049
|
+
longTermMemories: ""
|
|
2050
|
+
},
|
|
2051
|
+
text: ""
|
|
2052
|
+
};
|
|
2053
|
+
}
|
|
2054
|
+
}
|
|
2055
|
+
};
|
|
2056
|
+
|
|
2057
|
+
// src/providers/recent-conversation-summary.ts
|
|
2058
|
+
import {
|
|
2059
|
+
addHeader as addHeader2,
|
|
2060
|
+
ChannelType,
|
|
2061
|
+
formatMessages,
|
|
2062
|
+
formatPosts,
|
|
2063
|
+
getEntityDetails,
|
|
2064
|
+
logger as logger8
|
|
2065
|
+
} from "@elizaos/core";
|
|
2066
|
+
var getProviderConfig = (runtime) => ({
|
|
2067
|
+
overlapUserMessageCount: parseInt(runtime.getSetting("CONTEXT_OVERLAP_USER_MESSAGES") || "2", 10)
|
|
2068
|
+
});
|
|
2069
|
+
async function fetchConversationData(runtime, roomId) {
|
|
2070
|
+
const [entities, room, allMessages] = await Promise.all([
|
|
2071
|
+
getEntityDetails({ runtime, roomId }),
|
|
2072
|
+
runtime.getRoom(roomId),
|
|
2073
|
+
runtime.getMemories({
|
|
2074
|
+
tableName: "messages",
|
|
2075
|
+
roomId,
|
|
2076
|
+
count: 100,
|
|
2077
|
+
unique: false
|
|
2078
|
+
})
|
|
2079
|
+
]);
|
|
2080
|
+
return { entities, room, allMessages };
|
|
2081
|
+
}
|
|
2082
|
+
function filterAndSortDialogueMessages(allMessages) {
|
|
2083
|
+
const dialogueMessages = allMessages.filter((msg) => !(msg.content?.type === "action_result" && msg.metadata?.type === "action_result"));
|
|
2084
|
+
dialogueMessages.sort((a, b) => (a.createdAt || 0) - (b.createdAt || 0));
|
|
2085
|
+
return dialogueMessages;
|
|
2086
|
+
}
|
|
2087
|
+
async function buildCompressedHistory(runtime, message, dialogueMessages, sessionStartTime) {
|
|
2088
|
+
let compressedHistoryText = "";
|
|
2089
|
+
let lastSummarizedIndex = -1;
|
|
2090
|
+
const memoryService = runtime.getService("memory");
|
|
2091
|
+
if (!memoryService?.searchSummaries) {
|
|
2092
|
+
return { compressedHistoryText, lastSummarizedIndex };
|
|
2093
|
+
}
|
|
2094
|
+
try {
|
|
2095
|
+
const summaries = await memoryService.searchSummaries({
|
|
2096
|
+
entityId: message.entityId,
|
|
2097
|
+
roomId: message.roomId,
|
|
2098
|
+
query: typeof message.content === "string" ? message.content : message.content.text || "",
|
|
2099
|
+
limit: 3,
|
|
2100
|
+
tokenBudget: 500
|
|
2101
|
+
});
|
|
2102
|
+
if (summaries.length === 0) {
|
|
2103
|
+
return { compressedHistoryText, lastSummarizedIndex };
|
|
2104
|
+
}
|
|
2105
|
+
lastSummarizedIndex = findLastSummarizedMessageIndex(summaries, dialogueMessages);
|
|
2106
|
+
const summaryItems = summaries.map((s) => {
|
|
2107
|
+
const levelLabel = s.level === 1 ? "Recent Session" : `Overview (L${s.level})`;
|
|
2108
|
+
return `**[${levelLabel}]** ${s.content}`;
|
|
2109
|
+
}).join(`
|
|
2110
|
+
|
|
2111
|
+
`);
|
|
2112
|
+
const headerText = `# Conversation History (Compressed)
|
|
2113
|
+
**Session Started:** ${formatDateTime(sessionStartTime)}`;
|
|
2114
|
+
compressedHistoryText = addHeader2(headerText, summaryItems);
|
|
2115
|
+
logger8.debug({
|
|
2116
|
+
summaryCount: summaries.length,
|
|
2117
|
+
totalTokens: summaries.reduce((sum, s) => sum + s.tokenCount, 0),
|
|
2118
|
+
lastSummarizedIndex
|
|
2119
|
+
}, "Using hierarchical summaries for compressed history");
|
|
2120
|
+
} catch (error) {
|
|
2121
|
+
logger8.warn({ error }, "Failed to retrieve summaries");
|
|
2122
|
+
}
|
|
2123
|
+
return { compressedHistoryText, lastSummarizedIndex };
|
|
2124
|
+
}
|
|
2125
|
+
function findLastSummarizedMessageIndex(summaries, dialogueMessages) {
|
|
2126
|
+
const level1Summaries = summaries.filter((s) => s.level === 1);
|
|
2127
|
+
if (level1Summaries.length === 0) {
|
|
2128
|
+
return -1;
|
|
2129
|
+
}
|
|
2130
|
+
const allSummarizedMessageIds = new Set;
|
|
2131
|
+
level1Summaries.forEach((summary) => {
|
|
2132
|
+
summary.sourceIds.forEach((id) => allSummarizedMessageIds.add(id));
|
|
2133
|
+
});
|
|
2134
|
+
for (let i = dialogueMessages.length - 1;i >= 0; i--) {
|
|
2135
|
+
if (allSummarizedMessageIds.has(dialogueMessages[i].id)) {
|
|
2136
|
+
logger8.debug({
|
|
2137
|
+
lastSummarizedIndex: i,
|
|
2138
|
+
totalMessages: dialogueMessages.length,
|
|
2139
|
+
summarizedCount: allSummarizedMessageIds.size
|
|
2140
|
+
}, "Determined last summarized message index");
|
|
2141
|
+
return i;
|
|
2142
|
+
}
|
|
2143
|
+
}
|
|
2144
|
+
return -1;
|
|
2145
|
+
}
|
|
2146
|
+
function calculateBufferMessages(runtime, dialogueMessages, lastSummarizedIndex, hasSummaries, overlapUserMessageCount) {
|
|
2147
|
+
let bufferMessages;
|
|
2148
|
+
if (lastSummarizedIndex >= 0 && hasSummaries) {
|
|
2149
|
+
const summarizedMessages = dialogueMessages.slice(0, lastSummarizedIndex + 1);
|
|
2150
|
+
const summarizedUserMessages = summarizedMessages.filter((m) => m.entityId !== runtime.agentId);
|
|
2151
|
+
const overlapUserMessages = summarizedUserMessages.slice(-overlapUserMessageCount);
|
|
2152
|
+
const newUnsummarizedMessages = dialogueMessages.slice(lastSummarizedIndex + 1);
|
|
2153
|
+
let overlapStartIndex = lastSummarizedIndex + 1;
|
|
2154
|
+
if (overlapUserMessages.length > 0) {
|
|
2155
|
+
const firstOverlapUserMessageId = overlapUserMessages[0].id;
|
|
2156
|
+
const foundIndex = dialogueMessages.findIndex((m) => m.id === firstOverlapUserMessageId);
|
|
2157
|
+
if (foundIndex >= 0) {
|
|
2158
|
+
overlapStartIndex = foundIndex;
|
|
1239
2159
|
}
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
2160
|
+
}
|
|
2161
|
+
bufferMessages = dialogueMessages.slice(overlapStartIndex);
|
|
2162
|
+
logger8.debug({
|
|
2163
|
+
lastSummarizedIndex,
|
|
2164
|
+
summarizedMessageCount: summarizedMessages.length,
|
|
2165
|
+
overlapStartIndex,
|
|
2166
|
+
overlapSize: lastSummarizedIndex + 1 - overlapStartIndex,
|
|
2167
|
+
newUnsummarizedCount: newUnsummarizedMessages.length,
|
|
2168
|
+
bufferSize: bufferMessages.length,
|
|
2169
|
+
totalDialogueMessages: dialogueMessages.length,
|
|
2170
|
+
overlapUserMessageCount
|
|
2171
|
+
}, `Dynamic buffer: [overlap: last ${overlapUserMessageCount} user msgs from summary] + [all new unsummarized messages]`);
|
|
2172
|
+
} else {
|
|
2173
|
+
bufferMessages = dialogueMessages;
|
|
2174
|
+
logger8.debug({
|
|
2175
|
+
bufferSize: bufferMessages.length,
|
|
2176
|
+
totalMessages: dialogueMessages.length
|
|
2177
|
+
}, "Using full conversation: no summaries yet");
|
|
2178
|
+
}
|
|
2179
|
+
return { bufferMessages, lastSummarizedIndex };
|
|
2180
|
+
}
|
|
2181
|
+
function formatDateTime(timestamp3) {
|
|
2182
|
+
const date = new Date(timestamp3);
|
|
2183
|
+
return date.toLocaleString("en-US", {
|
|
2184
|
+
year: "numeric",
|
|
2185
|
+
month: "short",
|
|
2186
|
+
day: "numeric",
|
|
2187
|
+
hour: "2-digit",
|
|
2188
|
+
minute: "2-digit",
|
|
2189
|
+
hour12: false
|
|
2190
|
+
});
|
|
2191
|
+
}
|
|
2192
|
+
function removeEntityIds(text3) {
|
|
2193
|
+
return text3.replace(/\s*\[[\w\-]+\]/g, "").replace(/\s*\([^)]*'s internal thought:[^)]*\)/gi, "").split(`
|
|
2194
|
+
`).map((line) => line.trim()).join(`
|
|
2195
|
+
`).replace(/\n{3,}/g, `
|
|
2196
|
+
|
|
2197
|
+
`);
|
|
2198
|
+
}
|
|
2199
|
+
async function formatBufferMessages(bufferMessages, entities, room, sessionStartTime) {
|
|
2200
|
+
const isPostFormat = room?.type ? room.type === ChannelType.FEED || room.type === ChannelType.THREAD : false;
|
|
2201
|
+
const [formattedRecentMessages, formattedRecentPosts] = await Promise.all([
|
|
2202
|
+
formatMessages({
|
|
2203
|
+
messages: bufferMessages,
|
|
2204
|
+
entities
|
|
2205
|
+
}),
|
|
2206
|
+
formatPosts({
|
|
2207
|
+
messages: bufferMessages,
|
|
2208
|
+
entities,
|
|
2209
|
+
conversationHeader: false
|
|
2210
|
+
})
|
|
2211
|
+
]);
|
|
2212
|
+
let recentBufferText = (isPostFormat ? formattedRecentPosts : formattedRecentMessages) || "";
|
|
2213
|
+
recentBufferText = removeEntityIds(recentBufferText);
|
|
2214
|
+
const firstBufferTime = bufferMessages.length > 0 ? bufferMessages[0].createdAt : null;
|
|
2215
|
+
const lastBufferTime = bufferMessages.length > 0 ? bufferMessages[bufferMessages.length - 1].createdAt : null;
|
|
2216
|
+
let headerText = `# Recent Messages (Last ${bufferMessages.length})`;
|
|
2217
|
+
if (firstBufferTime && lastBufferTime) {
|
|
2218
|
+
headerText += `
|
|
2219
|
+
**Time Range:** ${formatDateTime(firstBufferTime)} - ${formatDateTime(lastBufferTime)}`;
|
|
2220
|
+
}
|
|
2221
|
+
return recentBufferText ? addHeader2(headerText, recentBufferText) : "";
|
|
2222
|
+
}
|
|
2223
|
+
function buildReceivedMessageSection(message, entities) {
|
|
2224
|
+
const metaData = message.metadata;
|
|
2225
|
+
const senderName = entities.find((entity) => entity.id === message.entityId)?.names[0] || metaData?.entityName || "Unknown User";
|
|
2226
|
+
const receivedMessageContent = message.content.text;
|
|
2227
|
+
const hasReceivedMessage = !!receivedMessageContent?.trim();
|
|
2228
|
+
const receivedMessageHeader = hasReceivedMessage ? addHeader2("# Current Message", `**From ${senderName}:** ${receivedMessageContent}`) : "";
|
|
2229
|
+
const focusHeader = hasReceivedMessage ? addHeader2("# Response Focus", `Reply to **${senderName}**'s current message above. Stay relevant to their question. Don't repeat previous responses unless asked again.`) : "";
|
|
2230
|
+
return { receivedMessageHeader, focusHeader };
|
|
2231
|
+
}
|
|
2232
|
+
function assembleContextSections(compressedHistoryText, recentBufferHeader, receivedMessageHeader, focusHeader) {
|
|
2233
|
+
const textSections = [
|
|
2234
|
+
compressedHistoryText,
|
|
2235
|
+
recentBufferHeader,
|
|
2236
|
+
receivedMessageHeader,
|
|
2237
|
+
focusHeader
|
|
2238
|
+
].filter(Boolean);
|
|
2239
|
+
return textSections.join(`
|
|
2240
|
+
|
|
2241
|
+
`);
|
|
2242
|
+
}
|
|
2243
|
+
var recentContextProvider = {
|
|
2244
|
+
name: "RECENT_CONVERSATION_SUMMARY",
|
|
2245
|
+
description: "Intelligent context management combining recent messages with hierarchical summaries for optimal token efficiency",
|
|
2246
|
+
position: 100,
|
|
2247
|
+
get: async (runtime, message) => {
|
|
2248
|
+
try {
|
|
2249
|
+
const { roomId } = message;
|
|
2250
|
+
const config = getProviderConfig(runtime);
|
|
2251
|
+
const { entities, room, allMessages } = await fetchConversationData(runtime, roomId);
|
|
2252
|
+
const dialogueMessages = filterAndSortDialogueMessages(allMessages);
|
|
2253
|
+
const sessionStartTime = dialogueMessages.length > 0 ? dialogueMessages[0].createdAt || Date.now() : Date.now();
|
|
2254
|
+
const { compressedHistoryText, lastSummarizedIndex } = await buildCompressedHistory(runtime, message, dialogueMessages, sessionStartTime);
|
|
2255
|
+
const hasSummaries = !!compressedHistoryText;
|
|
2256
|
+
const { bufferMessages } = calculateBufferMessages(runtime, dialogueMessages, lastSummarizedIndex, hasSummaries, config.overlapUserMessageCount);
|
|
2257
|
+
const recentBufferHeader = await formatBufferMessages(bufferMessages, entities, room, sessionStartTime);
|
|
2258
|
+
const { receivedMessageHeader, focusHeader } = buildReceivedMessageSection(message, entities);
|
|
2259
|
+
const text3 = assembleContextSections(compressedHistoryText, recentBufferHeader, receivedMessageHeader, focusHeader);
|
|
2260
|
+
const data = {
|
|
2261
|
+
dialogueMessages: bufferMessages,
|
|
2262
|
+
messageCount: dialogueMessages.length,
|
|
2263
|
+
lastSummarizedIndex,
|
|
2264
|
+
config
|
|
2265
|
+
};
|
|
2266
|
+
const isPostFormat = room?.type ? room.type === ChannelType.FEED || room.type === ChannelType.THREAD : false;
|
|
2267
|
+
const values = {
|
|
2268
|
+
compressedHistory: compressedHistoryText,
|
|
2269
|
+
recentMessages: recentBufferHeader,
|
|
2270
|
+
receivedMessage: receivedMessageHeader,
|
|
2271
|
+
focusInstruction: focusHeader,
|
|
2272
|
+
recentPosts: isPostFormat ? recentBufferHeader : ""
|
|
2273
|
+
};
|
|
2274
|
+
logger8.info({
|
|
2275
|
+
messageCount: dialogueMessages.length,
|
|
2276
|
+
bufferSize: bufferMessages.length,
|
|
2277
|
+
hasSummaries,
|
|
2278
|
+
lastSummarizedIndex,
|
|
2279
|
+
estimatedTokens: Math.ceil(text3.length / 4),
|
|
2280
|
+
overlapUserMessageCount: config.overlapUserMessageCount
|
|
2281
|
+
}, "Recent context assembled");
|
|
2282
|
+
return {
|
|
2283
|
+
data,
|
|
2284
|
+
values,
|
|
2285
|
+
text: text3
|
|
2286
|
+
};
|
|
2287
|
+
} catch (error) {
|
|
2288
|
+
logger8.error({ error }, "Error in recentContextProvider");
|
|
2289
|
+
return {
|
|
2290
|
+
data: {
|
|
2291
|
+
dialogueMessages: [],
|
|
2292
|
+
messageCount: 0,
|
|
2293
|
+
lastSummarizedIndex: -1,
|
|
2294
|
+
config: getProviderConfig(runtime)
|
|
2295
|
+
},
|
|
2296
|
+
values: {
|
|
2297
|
+
compressedHistory: "",
|
|
2298
|
+
recentMessages: "",
|
|
2299
|
+
receivedMessage: "",
|
|
2300
|
+
focusInstruction: "",
|
|
2301
|
+
recentPosts: ""
|
|
2302
|
+
},
|
|
2303
|
+
text: "Error retrieving context."
|
|
2304
|
+
};
|
|
2305
|
+
}
|
|
2306
|
+
}
|
|
2307
|
+
};
|
|
2308
|
+
|
|
2309
|
+
// src/providers/action-results.ts
|
|
2310
|
+
import { addHeader as addHeader3, logger as logger9 } from "@elizaos/core";
|
|
2311
|
+
var getActionResultsConfig = (runtime) => ({
|
|
2312
|
+
limit: parseInt(runtime.getSetting("CONTEXT_ACTION_RESULTS_LIMIT") || "3", 10)
|
|
2313
|
+
});
|
|
2314
|
+
function formatActionResults(actionResultMessages, limit) {
|
|
2315
|
+
if (actionResultMessages.length === 0) {
|
|
2316
|
+
return "";
|
|
2317
|
+
}
|
|
2318
|
+
const groupedByRun = new Map;
|
|
2319
|
+
for (const mem of actionResultMessages) {
|
|
2320
|
+
const runId = String(mem.content?.runId || "unknown");
|
|
2321
|
+
if (!groupedByRun.has(runId)) {
|
|
2322
|
+
groupedByRun.set(runId, []);
|
|
2323
|
+
}
|
|
2324
|
+
groupedByRun.get(runId).push(mem);
|
|
2325
|
+
}
|
|
2326
|
+
const formattedActionResults = Array.from(groupedByRun.entries()).slice(-limit).map(([runId, memories]) => {
|
|
2327
|
+
const sortedMemories = memories.sort((a, b) => (a.createdAt || 0) - (b.createdAt || 0));
|
|
2328
|
+
const thought = sortedMemories[0]?.content?.planThought || "";
|
|
2329
|
+
const runText = sortedMemories.map((mem) => {
|
|
2330
|
+
const actionName = mem.content?.actionName || "Unknown";
|
|
2331
|
+
const status = mem.content?.actionStatus || "unknown";
|
|
2332
|
+
const planStep = mem.content?.planStep || "";
|
|
2333
|
+
const text3 = mem.content?.text || "";
|
|
2334
|
+
const error = mem.content?.error || "";
|
|
2335
|
+
let memText = ` - ${actionName} (${status})`;
|
|
2336
|
+
if (planStep)
|
|
2337
|
+
memText += ` [${planStep}]`;
|
|
2338
|
+
if (error) {
|
|
2339
|
+
memText += `: Error - ${error}`;
|
|
2340
|
+
} else if (text3 && text3 !== `Executed action: ${actionName}`) {
|
|
2341
|
+
memText += `: ${text3}`;
|
|
1246
2342
|
}
|
|
1247
|
-
|
|
2343
|
+
return memText;
|
|
2344
|
+
}).join(`
|
|
2345
|
+
`);
|
|
2346
|
+
return `**Action Run ${runId.slice(0, 8)}**${thought ? ` - "${thought}"` : ""}
|
|
2347
|
+
${runText}`;
|
|
2348
|
+
}).join(`
|
|
2349
|
+
|
|
2350
|
+
`);
|
|
2351
|
+
return formattedActionResults ? addHeader3("# Recent Action Executions", formattedActionResults) : "";
|
|
2352
|
+
}
|
|
2353
|
+
var actionResultsProvider = {
|
|
2354
|
+
name: "ACTION_RESULTS",
|
|
2355
|
+
description: "Recent action executions with their outcomes (tool memory)",
|
|
2356
|
+
position: 101,
|
|
2357
|
+
get: async (runtime, message) => {
|
|
2358
|
+
try {
|
|
2359
|
+
const { roomId } = message;
|
|
2360
|
+
const config = getActionResultsConfig(runtime);
|
|
2361
|
+
const allMessages = await runtime.getMemories({
|
|
2362
|
+
tableName: "messages",
|
|
2363
|
+
roomId,
|
|
2364
|
+
count: 50,
|
|
2365
|
+
unique: false
|
|
2366
|
+
});
|
|
2367
|
+
const actionResultMessages = allMessages.filter((msg) => msg.content?.type === "action_result" && msg.metadata?.type === "action_result");
|
|
2368
|
+
actionResultMessages.sort((a, b) => (a.createdAt || 0) - (b.createdAt || 0));
|
|
2369
|
+
const text3 = formatActionResults(actionResultMessages, config.limit);
|
|
2370
|
+
logger9.debug({
|
|
2371
|
+
actionResultCount: actionResultMessages.length,
|
|
2372
|
+
limit: config.limit
|
|
2373
|
+
}, "Action results provider assembled");
|
|
1248
2374
|
return {
|
|
1249
2375
|
data: {
|
|
1250
|
-
|
|
1251
|
-
|
|
2376
|
+
actionResults: actionResultMessages,
|
|
2377
|
+
config
|
|
1252
2378
|
},
|
|
1253
2379
|
values: {
|
|
1254
|
-
|
|
1255
|
-
memoryCategories: categoryList
|
|
2380
|
+
recentActionResults: text3
|
|
1256
2381
|
},
|
|
1257
|
-
text:
|
|
2382
|
+
text: text3
|
|
1258
2383
|
};
|
|
1259
2384
|
} catch (error) {
|
|
1260
|
-
|
|
2385
|
+
logger9.error({ error }, "Error in actionResultsProvider");
|
|
1261
2386
|
return {
|
|
1262
|
-
data: {
|
|
1263
|
-
|
|
2387
|
+
data: {
|
|
2388
|
+
actionResults: [],
|
|
2389
|
+
config: getActionResultsConfig(runtime)
|
|
2390
|
+
},
|
|
2391
|
+
values: {
|
|
2392
|
+
recentActionResults: ""
|
|
2393
|
+
},
|
|
1264
2394
|
text: ""
|
|
1265
2395
|
};
|
|
1266
2396
|
}
|
|
1267
2397
|
}
|
|
1268
2398
|
};
|
|
1269
|
-
|
|
1270
2399
|
// src/index.ts
|
|
1271
2400
|
var memoryPlugin = {
|
|
1272
2401
|
name: "memory",
|
|
1273
|
-
description: "
|
|
2402
|
+
description: "State-of-the-art cognitive memory system with episodic, semantic, and procedural memory, " + "featuring hybrid retrieval (Vector + BM25 + Graph), exponential decay, contextual embeddings, " + "and contradiction detection",
|
|
1274
2403
|
services: [MemoryService],
|
|
1275
|
-
evaluators: [
|
|
1276
|
-
providers: [longTermMemoryProvider,
|
|
2404
|
+
evaluators: [consolidationEvaluator, summarizationEvaluator],
|
|
2405
|
+
providers: [longTermMemoryProvider, recentContextProvider, actionResultsProvider],
|
|
1277
2406
|
schema: exports_schemas
|
|
1278
2407
|
};
|
|
1279
2408
|
var src_default = memoryPlugin;
|
|
1280
2409
|
export {
|
|
1281
|
-
|
|
2410
|
+
trimToTokenBudget,
|
|
2411
|
+
recentContextProvider,
|
|
2412
|
+
mergeSearchResults,
|
|
1282
2413
|
memoryPlugin,
|
|
1283
|
-
|
|
2414
|
+
mapDbRowToLongTermMemory,
|
|
2415
|
+
mapDbRowToConversationSummary,
|
|
2416
|
+
longTermMemoryProvider,
|
|
2417
|
+
longTermMemoryEmbeddings,
|
|
1284
2418
|
longTermMemories,
|
|
2419
|
+
generateEmbedding,
|
|
2420
|
+
formatTokenCount,
|
|
2421
|
+
formatMemoriesForContext,
|
|
2422
|
+
estimateTokensInSummary,
|
|
2423
|
+
estimateTokenCountForArray,
|
|
2424
|
+
estimateTokenCount,
|
|
1285
2425
|
src_default as default,
|
|
2426
|
+
conversationSummaryEmbeddings,
|
|
2427
|
+
conversationSummaries,
|
|
2428
|
+
cleanEmbedding,
|
|
2429
|
+
calculateDecayFactor,
|
|
2430
|
+
calculateAccessBoost,
|
|
2431
|
+
buildLevel1SummaryPrompt,
|
|
2432
|
+
buildHigherLevelSummaryPrompt,
|
|
2433
|
+
buildExtractionPrompt,
|
|
2434
|
+
buildContradictionPrompt,
|
|
2435
|
+
applyDecayScoring,
|
|
2436
|
+
actionResultsProvider,
|
|
2437
|
+
SUMMARIZATION_SYSTEM_PROMPT,
|
|
2438
|
+
MemoryType,
|
|
1286
2439
|
MemoryService,
|
|
1287
|
-
|
|
2440
|
+
MEMORY_DIMENSION_MAP,
|
|
2441
|
+
LongTermMemoryRepository,
|
|
2442
|
+
HIGHER_LEVEL_SUMMARIZATION_SYSTEM_PROMPT,
|
|
2443
|
+
DecayFunction,
|
|
2444
|
+
ConversationSummaryRepository,
|
|
2445
|
+
CONSOLIDATION_SYSTEM_PROMPT
|
|
1288
2446
|
};
|
|
1289
2447
|
|
|
1290
|
-
//# debugId=
|
|
2448
|
+
//# debugId=925F6225A20E2D9B64756E2164756E21
|