@lumenflow/agent 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,358 @@
1
+ /**
2
+ * Feedback Review Core Logic (WU-1598)
3
+ *
4
+ * Aggregates .beacon/incidents/*.ndjson and .beacon/memory/memory.jsonl,
5
+ * clusters by title similarity, scores patterns (frequency x severity x recency),
6
+ * and outputs prioritised patterns for human review.
7
+ *
8
+ * @see {@link tools/__tests__/feedback-review.test.mjs} - Tests
9
+ * @see {@link tools/feedback-review.mjs} - CLI entry point
10
+ */
11
+ import fs from 'node:fs/promises';
12
+ import path from 'node:path';
13
+ import { INCIDENT_SEVERITY } from '@lumenflow/core/lib/wu-constants.js';
14
+ /**
15
+ * Severity weights for scoring
16
+ *
17
+ * Higher severity = higher weight in scoring formula
18
+ */
19
+ export const SEVERITY_WEIGHTS = {
20
+ [INCIDENT_SEVERITY.BLOCKER]: 4,
21
+ [INCIDENT_SEVERITY.MAJOR]: 3,
22
+ [INCIDENT_SEVERITY.MINOR]: 2,
23
+ [INCIDENT_SEVERITY.INFO]: 1,
24
+ };
25
+ /**
26
+ * Default similarity threshold for title clustering
27
+ *
28
+ * Titles with similarity >= this threshold are grouped together.
29
+ * Range: 0-1 where 1 = exact match
30
+ */
31
+ const SIMILARITY_THRESHOLD = 0.7;
32
+ /**
33
+ * Recency decay factor (in milliseconds)
34
+ *
35
+ * Patterns older than this are weighted less.
36
+ * Default: 30 days
37
+ */
38
+ const RECENCY_DECAY_MS = 30 * 24 * 60 * 60 * 1000;
39
+ /**
40
+ * Duration multipliers
41
+ */
42
+ const DURATION_MULTIPLIERS = {
43
+ m: 60 * 1000, // minutes
44
+ h: 60 * 60 * 1000, // hours
45
+ d: 24 * 60 * 60 * 1000, // days
46
+ w: 7 * 24 * 60 * 60 * 1000, // weeks
47
+ };
48
+ /**
49
+ * Parse duration string to milliseconds
50
+ *
51
+ * Supports: 1d (day), 1w (week), 1h (hour), 1m (minute)
52
+ *
53
+ * @param duration - Duration string like "7d", "1w"
54
+ * @returns Milliseconds
55
+ */
56
+ function parseDuration(duration) {
57
+ const match = duration.match(/^(\d+)([dwhmDWHM])$/);
58
+ if (!match) {
59
+ throw new Error(`Invalid duration format: ${duration}. Use format like "7d", "1w", "24h"`);
60
+ }
61
+ const value = Number.parseInt(match[1], 10);
62
+ const unit = match[2].toLowerCase();
63
+ // eslint-disable-next-line security/detect-object-injection -- unit is validated by regex
64
+ return value * DURATION_MULTIPLIERS[unit];
65
+ }
66
+ /**
67
+ * Load NDJSON file and parse lines
68
+ *
69
+ * @param filePath - Path to NDJSON file
70
+ * @returns Parsed objects
71
+ */
72
+ async function loadNdjson(filePath) {
73
+ try {
74
+ // eslint-disable-next-line security/detect-non-literal-fs-filename -- CLI tool reads known paths
75
+ const content = await fs.readFile(filePath, 'utf8');
76
+ const lines = content.trim().split('\n').filter(Boolean);
77
+ return lines
78
+ .map((line) => {
79
+ try {
80
+ return JSON.parse(line);
81
+ }
82
+ catch {
83
+ // Skip malformed lines
84
+ return null;
85
+ }
86
+ })
87
+ .filter((item) => item !== null);
88
+ }
89
+ catch (err) {
90
+ if (err instanceof Error && 'code' in err && err.code === 'ENOENT') {
91
+ return [];
92
+ }
93
+ throw err;
94
+ }
95
+ }
96
+ /**
97
+ * Load all incidents from .beacon/incidents/*.ndjson
98
+ *
99
+ * @param baseDir - Base directory
100
+ * @returns All incident objects
101
+ */
102
+ async function loadIncidents(baseDir) {
103
+ const incidentsDir = path.join(baseDir, '.beacon', 'incidents');
104
+ let files;
105
+ try {
106
+ // eslint-disable-next-line security/detect-non-literal-fs-filename -- CLI tool reads known paths
107
+ files = await fs.readdir(incidentsDir);
108
+ }
109
+ catch (err) {
110
+ if (err instanceof Error && 'code' in err && err.code === 'ENOENT') {
111
+ return [];
112
+ }
113
+ throw err;
114
+ }
115
+ const ndjsonFiles = files.filter((f) => f.endsWith('.ndjson'));
116
+ const incidents = [];
117
+ for (const file of ndjsonFiles) {
118
+ const items = await loadNdjson(path.join(incidentsDir, file));
119
+ incidents.push(...items);
120
+ }
121
+ return incidents;
122
+ }
123
+ /**
124
+ * Load memory nodes from .beacon/memory/memory.jsonl
125
+ *
126
+ * @param baseDir - Base directory
127
+ * @returns Memory node objects
128
+ */
129
+ async function loadMemoryNodes(baseDir) {
130
+ const memoryFile = path.join(baseDir, '.beacon', 'memory', 'memory.jsonl');
131
+ return loadNdjson(memoryFile);
132
+ }
133
+ /**
134
+ * Calculate simple Jaccard similarity between two strings
135
+ *
136
+ * Uses word-level comparison for better semantic matching.
137
+ *
138
+ * @param str1 - First string
139
+ * @param str2 - Second string
140
+ * @returns Similarity score 0-1
141
+ */
142
+ function calculateSimilarity(str1, str2) {
143
+ if (!str1 || !str2)
144
+ return 0;
145
+ if (str1 === str2)
146
+ return 1;
147
+ // Normalize and tokenize
148
+ const normalize = (s) => s
149
+ .toLowerCase()
150
+ .replace(/[^\w\s]/g, ' ')
151
+ .split(/\s+/)
152
+ .filter(Boolean);
153
+ const words1 = new Set(normalize(str1));
154
+ const words2 = new Set(normalize(str2));
155
+ if (words1.size === 0 || words2.size === 0)
156
+ return 0;
157
+ // Jaccard similarity: intersection / union
158
+ const intersection = new Set([...words1].filter((w) => words2.has(w)));
159
+ const union = new Set([...words1, ...words2]);
160
+ return intersection.size / union.size;
161
+ }
162
+ /**
163
+ * Get display title for a node
164
+ *
165
+ * Falls back to content if title not present.
166
+ *
167
+ * @param node - Node object
168
+ * @returns Title or content
169
+ */
170
+ function getNodeTitle(node) {
171
+ return node.title ?? node.content ?? '';
172
+ }
173
+ /**
174
+ * Cluster nodes by title similarity
175
+ *
176
+ * Uses simple greedy clustering with Jaccard similarity.
177
+ *
178
+ * @param nodes - Nodes to cluster
179
+ * @param threshold - Similarity threshold
180
+ * @returns Array of cluster objects
181
+ */
182
+ export function clusterByTitle(nodes, threshold = SIMILARITY_THRESHOLD) {
183
+ if (!nodes || nodes.length === 0) {
184
+ return [];
185
+ }
186
+ const clusters = [];
187
+ const assigned = new Set();
188
+ for (const node of nodes) {
189
+ if (assigned.has(node.id))
190
+ continue;
191
+ const title = getNodeTitle(node);
192
+ if (!title) {
193
+ // Skip nodes without title/content
194
+ assigned.add(node.id);
195
+ continue;
196
+ }
197
+ // Find or create cluster
198
+ let bestCluster = null;
199
+ let bestSimilarity = 0;
200
+ for (const cluster of clusters) {
201
+ const similarity = calculateSimilarity(title, cluster.title);
202
+ if (similarity >= threshold && similarity > bestSimilarity) {
203
+ bestSimilarity = similarity;
204
+ bestCluster = cluster;
205
+ }
206
+ }
207
+ if (bestCluster) {
208
+ bestCluster.nodes.push(node);
209
+ }
210
+ else {
211
+ clusters.push({
212
+ title,
213
+ nodes: [node],
214
+ category: node.category ?? 'uncategorized',
215
+ });
216
+ }
217
+ assigned.add(node.id);
218
+ }
219
+ return clusters;
220
+ }
221
+ /**
222
+ * Score a pattern cluster
223
+ *
224
+ * Formula: frequency x average_severity x recency_factor
225
+ *
226
+ * @param cluster - Cluster with nodes
227
+ * @returns Score value
228
+ */
229
+ export function scorePattern(cluster) {
230
+ if (!cluster.nodes || cluster.nodes.length === 0) {
231
+ return 0;
232
+ }
233
+ const frequency = cluster.nodes.length;
234
+ // Average severity weight
235
+ const severitySum = cluster.nodes.reduce((sum, node) => {
236
+ const severity = node.severity;
237
+ const weight = severity
238
+ ? (SEVERITY_WEIGHTS[severity] ?? SEVERITY_WEIGHTS[INCIDENT_SEVERITY.INFO])
239
+ : SEVERITY_WEIGHTS[INCIDENT_SEVERITY.INFO];
240
+ return sum + weight;
241
+ }, 0);
242
+ const avgSeverity = severitySum / cluster.nodes.length;
243
+ // Recency factor: most recent occurrence weighted higher
244
+ const now = Date.now();
245
+ const timestamps = cluster.nodes
246
+ .map((n) => (n.created_at ? new Date(n.created_at).getTime() : 0))
247
+ .filter((t) => t > 0);
248
+ let recencyFactor = 1;
249
+ if (timestamps.length > 0) {
250
+ const mostRecent = Math.max(...timestamps);
251
+ const age = now - mostRecent;
252
+ // Exponential decay: recent = ~1, old (30+ days) = ~0.37
253
+ recencyFactor = Math.exp(-age / RECENCY_DECAY_MS);
254
+ // Clamp to minimum 0.1 so old patterns still count
255
+ recencyFactor = Math.max(0.1, recencyFactor);
256
+ }
257
+ return frequency * avgSeverity * recencyFactor;
258
+ }
259
+ /**
260
+ * Review feedback from incidents and memory nodes
261
+ *
262
+ * Main entry point for feedback review logic.
263
+ *
264
+ * @param baseDir - Base directory containing .beacon
265
+ * @param options - Review options
266
+ * @returns Review result
267
+ */
268
+ export async function reviewFeedback(baseDir, options = {}) {
269
+ const { since, minFrequency, category } = options;
270
+ // Load all data
271
+ const [incidents, memoryNodes] = await Promise.all([
272
+ loadIncidents(baseDir),
273
+ loadMemoryNodes(baseDir),
274
+ ]);
275
+ // Merge into unified nodes format
276
+ let nodes = [
277
+ ...incidents.map((inc) => ({
278
+ id: String(inc.id ?? ''),
279
+ source: 'incident',
280
+ title: String(inc.title ?? inc.content ?? ''),
281
+ content: String(inc.content ?? ''),
282
+ category: String(inc.category ?? 'uncategorized'),
283
+ severity: String(inc.severity ?? 'info'),
284
+ created_at: inc.created_at ? String(inc.created_at) : undefined,
285
+ })),
286
+ ...memoryNodes.map((mem) => {
287
+ const metadata = mem.metadata;
288
+ const tags = mem.tags;
289
+ return {
290
+ id: String(mem.id ?? ''),
291
+ source: 'memory',
292
+ title: String(mem.content ?? ''), // Memory nodes use content as title
293
+ content: String(mem.content ?? ''),
294
+ severity: String(metadata?.severity ?? 'info'),
295
+ category: String(mem.type ?? tags?.[0] ?? 'uncategorized'),
296
+ created_at: mem.created_at ? String(mem.created_at) : undefined,
297
+ };
298
+ }),
299
+ ];
300
+ // Filter by since
301
+ if (since) {
302
+ const cutoffMs = Date.now() - parseDuration(since);
303
+ nodes = nodes.filter((n) => {
304
+ const timestamp = n.created_at ? new Date(n.created_at).getTime() : 0;
305
+ return timestamp >= cutoffMs;
306
+ });
307
+ }
308
+ // Filter by category
309
+ if (category) {
310
+ nodes = nodes.filter((n) => n.category === category);
311
+ }
312
+ const totalNodes = nodes.length;
313
+ // Cluster by title similarity
314
+ let clusters = clusterByTitle(nodes);
315
+ // Filter by minimum frequency
316
+ if (minFrequency && minFrequency > 0) {
317
+ clusters = clusters.filter((c) => c.nodes.length >= minFrequency);
318
+ }
319
+ // Score and sort patterns
320
+ const patterns = clusters
321
+ .map((cluster) => ({
322
+ title: cluster.title,
323
+ frequency: cluster.nodes.length,
324
+ category: cluster.category,
325
+ score: scorePattern(cluster),
326
+ firstSeen: cluster.nodes
327
+ .map((n) => n.created_at)
328
+ .filter((c) => c !== undefined)
329
+ .sort()[0],
330
+ lastSeen: cluster.nodes
331
+ .map((n) => n.created_at)
332
+ .filter((c) => c !== undefined)
333
+ .sort()
334
+ .slice(-1)[0],
335
+ examples: cluster.nodes.slice(0, 3).map((n) => ({
336
+ id: n.id,
337
+ severity: n.severity,
338
+ source: n.source,
339
+ })),
340
+ }))
341
+ .sort((a, b) => b.score - a.score);
342
+ // Calculate summary
343
+ const categoryCounts = new Map();
344
+ for (const p of patterns) {
345
+ const cat = p.category ?? 'uncategorized';
346
+ categoryCounts.set(cat, (categoryCounts.get(cat) ?? 0) + p.frequency);
347
+ }
348
+ const topCategory = [...categoryCounts.entries()].sort(([, a], [, b]) => b - a)[0]?.[0] ?? null;
349
+ return {
350
+ success: true,
351
+ patterns,
352
+ summary: {
353
+ totalNodes,
354
+ totalClusters: patterns.length,
355
+ topCategory,
356
+ },
357
+ };
358
+ }
@@ -0,0 +1,10 @@
1
+ /**
2
+ * @lumenflow/agent - Agent session management
3
+ * @module @lumenflow/agent
4
+ */
5
+ export * from './agent-incidents.js';
6
+ export * from './agent-session.js';
7
+ export * from './agent-verification.js';
8
+ export * from './auto-session-integration.js';
9
+ export * from './feedback-promote-core.js';
10
+ export * from './feedback-review-core.js';
package/dist/index.js ADDED
@@ -0,0 +1,10 @@
1
+ /**
2
+ * @lumenflow/agent - Agent session management
3
+ * @module @lumenflow/agent
4
+ */
5
+ export * from './agent-incidents.js';
6
+ export * from './agent-session.js';
7
+ export * from './agent-verification.js';
8
+ export * from './auto-session-integration.js';
9
+ export * from './feedback-promote-core.js';
10
+ export * from './feedback-review-core.js';
package/package.json ADDED
@@ -0,0 +1,63 @@
1
+ {
2
+ "name": "@lumenflow/agent",
3
+ "version": "1.0.0",
4
+ "description": "Agent session management for LumenFlow workflow framework",
5
+ "keywords": [
6
+ "lumenflow",
7
+ "workflow",
8
+ "agent",
9
+ "session",
10
+ "logging"
11
+ ],
12
+ "homepage": "https://github.com/hellmai/os",
13
+ "repository": {
14
+ "type": "git",
15
+ "url": "https://github.com/hellmai/os.git",
16
+ "directory": "packages/@lumenflow/agent"
17
+ },
18
+ "license": "Apache-2.0",
19
+ "author": {
20
+ "name": "HellmAI",
21
+ "url": "https://hellm.ai"
22
+ },
23
+ "type": "module",
24
+ "exports": {
25
+ ".": "./dist/index.js",
26
+ "./session": "./dist/agent-session.js",
27
+ "./incidents": "./dist/agent-incidents.js",
28
+ "./verification": "./dist/agent-verification.js",
29
+ "./auto-session": "./dist/auto-session-integration.js",
30
+ "./feedback-promote": "./dist/feedback-promote-core.js",
31
+ "./feedback-review": "./dist/feedback-review-core.js",
32
+ "./dist/*": "./dist/*"
33
+ },
34
+ "main": "./dist/index.js",
35
+ "files": [
36
+ "dist",
37
+ "LICENSE",
38
+ "README.md"
39
+ ],
40
+ "dependencies": {
41
+ "simple-git": "^3.30.0",
42
+ "zod": "^4.3.5",
43
+ "@lumenflow/core": "1.0.0",
44
+ "@lumenflow/memory": "1.0.0"
45
+ },
46
+ "devDependencies": {
47
+ "typescript": "^5.7.0",
48
+ "vitest": "^2.1.0"
49
+ },
50
+ "engines": {
51
+ "node": ">=22"
52
+ },
53
+ "publishConfig": {
54
+ "access": "public"
55
+ },
56
+ "scripts": {
57
+ "build": "tsc",
58
+ "build:dist": "tsc -p tsconfig.build.json",
59
+ "pack:dist": "pnpm pack",
60
+ "clean": "rm -rf dist *.tgz",
61
+ "test": "vitest run"
62
+ }
63
+ }