@sesamespace/hivemind 0.8.10 → 0.8.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,211 @@
1
+ # Memory Enhancement Plan — Automatic Context Management
2
+
3
+ ## Vision
4
+ Transform Hivemind's memory system from passive storage to active context management. Background processes continuously organize, index, and surface relevant information without agent intervention.
5
+
6
+ ## Core Enhancements
7
+
8
+ ### 1. Code Context Tracking
9
+ **Background Process:** `code-indexer`
10
+ - Monitors file access patterns (which files the agent reads/writes)
11
+ - Extracts key structures: functions, classes, interfaces, schemas
12
+ - Maintains a "working set" of active code elements
13
+ - Updates git commit context automatically
14
+ - Indexes by: project, language, purpose, last-accessed
15
+
16
+ **Data Structure:**
17
+ ```json
18
+ {
19
+ "type": "code_context",
20
+ "project": "hivemind",
21
+ "file": "packages/runtime/src/agent.ts",
22
+ "elements": [
23
+ {
24
+ "name": "processMessage",
25
+ "type": "function",
26
+ "signature": "(msg: Message): Promise<Response>",
27
+ "purpose": "Core message processing loop",
28
+ "dependencies": ["memory-client", "router"]
29
+ }
30
+ ],
31
+ "last_accessed": "2024-01-15T10:30:00Z",
32
+ "access_count": 15,
33
+ "git_context": {
34
+ "branch": "feature/memory-enhancement",
35
+ "last_commit": "abc123",
36
+ "modified": true
37
+ }
38
+ }
39
+ ```
40
+
41
+ ### 2. Web Research Digestion
42
+ **Background Process:** `research-digester`
43
+ - Monitors web fetch/browse operations
44
+ - Extracts key concepts, APIs, solutions
45
+ - Links research to active tasks/projects
46
+ - Builds knowledge graph of related concepts
47
+ - Identifies patterns across multiple sources
48
+
49
+ **Data Structure:**
50
+ ```json
51
+ {
52
+ "type": "research_insight",
53
+ "url": "https://docs.example.com/api",
54
+ "project": "sesame-integration",
55
+ "extracted": {
56
+ "key_concepts": ["OAuth flow", "webhook endpoints"],
57
+ "code_examples": ["const auth = await getToken()..."],
58
+ "warnings": ["Rate limit: 100 req/min"],
59
+ "related_to": ["auth-implementation", "rate-limiting"]
60
+ },
61
+ "timestamp": "2024-01-15T09:00:00Z",
62
+ "referenced_count": 3
63
+ }
64
+ ```
65
+
66
+ ### 3. Task State Management
67
+ **Background Process:** `task-tracker`
68
+ - Monitors agent actions and maps to task progress
69
+ - Detects task transitions (started, blocked, completed)
70
+ - Tracks dependencies and blockers
71
+ - Identifies patterns in task completion
72
+ - Surfaces relevant context when returning to a task
73
+
74
+ **Data Structure:**
75
+ ```json
76
+ {
77
+ "type": "task_state",
78
+ "id": "implement-dashboard",
79
+ "project": "hivemind",
80
+ "status": "in_progress",
81
+ "progress": {
82
+ "completed": ["setup routes", "basic UI"],
83
+ "current": "implement request filtering",
84
+ "next": ["add export functionality", "write tests"]
85
+ },
86
+ "context": {
87
+ "key_files": ["src/dashboard/server.js", "src/dashboard/index.html"],
88
+ "recent_decisions": ["use server-sent events for real-time updates"],
89
+ "blockers": [],
90
+ "time_spent": "3.5 hours"
91
+ },
92
+ "last_updated": "2024-01-15T11:00:00Z"
93
+ }
94
+ ```
95
+
96
+ ### 4. Tool Usage Patterns
97
+ **Background Process:** `command-learner`
98
+ - Tracks successful command sequences
99
+ - Identifies common patterns and workflows
100
+ - Builds "recipes" for common tasks
101
+ - Learns from failures and corrections
102
+ - Suggests optimizations
103
+
104
+ **Data Structure:**
105
+ ```json
106
+ {
107
+ "type": "tool_pattern",
108
+ "name": "github-push-workflow",
109
+ "triggers": ["git push", "push changes"],
110
+ "sequence": [
111
+ {"tool": "git_status", "check": "has_changes"},
112
+ {"tool": "git_add", "params": {"files": "."}}
113
+ {"tool": "git_commit", "params": {"message": "<generated>"}},
114
+ {"tool": "git_push", "params": {"remote": "origin"}}
115
+ ],
116
+ "success_rate": 0.95,
117
+ "last_used": "2024-01-15T10:00:00Z",
118
+ "variations": ["with-specific-files", "force-push"]
119
+ }
120
+ ```
121
+
122
+ ### 5. Cross-Agent Awareness
123
+ **Background Process:** `agent-sync`
124
+ - Monitors Sesame channels for other agent activity
125
+ - Extracts "public knowledge" from agent interactions
126
+ - Tracks handoff points and collaboration patterns
127
+ - Maintains agent capability registry
128
+ - Identifies complementary skills
129
+
130
+ **Data Structure:**
131
+ ```json
132
+ {
133
+ "type": "agent_knowledge",
134
+ "agent": "bailey",
135
+ "capabilities": ["rust development", "system architecture"],
136
+ "current_focus": ["hivemind dashboard", "memory optimization"],
137
+ "collaboration_points": [
138
+ {
139
+ "task": "dashboard-implementation",
140
+ "status": "bailey-implementing",
141
+ "handoff_ready": "2024-01-16"
142
+ }
143
+ ],
144
+ "last_seen": "2024-01-15T11:30:00Z"
145
+ }
146
+ ```
147
+
148
+ ## Implementation Architecture
149
+
150
+ ### Background Process Framework
151
+ ```typescript
152
+ interface BackgroundProcess {
153
+ name: string;
154
+ interval: number; // milliseconds
155
+ async run(context: ProcessContext): Promise<void>;
156
+ async shouldRun(context: ProcessContext): Promise<boolean>;
157
+ }
158
+
159
+ class ProcessManager {
160
+ private processes: Map<string, BackgroundProcess>;
161
+ private memory: MemoryClient;
162
+
163
+ async start() {
164
+ for (const [name, process] of this.processes) {
165
+ setInterval(async () => {
166
+ if (await process.shouldRun(this.context)) {
167
+ await process.run(this.context);
168
+ }
169
+ }, process.interval);
170
+ }
171
+ }
172
+ }
173
+ ```
174
+
175
+ ### Memory Indexing Strategy
176
+ 1. **Write-through cache**: All observations written immediately to L2
177
+ 2. **Background indexing**: Processes run every 30s-5min depending on type
178
+ 3. **Smart batching**: Group related updates to minimize memory churn
179
+ 4. **Relevance scoring**: Continuously update scores based on access patterns
180
+ 5. **Compression**: Older entries compressed/summarized, recent kept detailed
181
+
182
+ ### Context Injection
183
+ When building LLM prompts, the system will:
184
+ 1. Query active task state
185
+ 2. Include relevant code context (files, functions being worked on)
186
+ 3. Add recent research/documentation insights
187
+ 4. Include tool patterns for likely next actions
188
+ 5. Add cross-agent awareness if collaborating
189
+
190
+ ### Local Processing Power Usage
191
+ - **Embedding generation**: Ollama with local models (no API calls)
192
+ - **Pattern matching**: Rust-based processors for speed
193
+ - **Index management**: LanceDB for vector operations
194
+ - **File watching**: Native OS APIs for efficiency
195
+ - **Git operations**: libgit2 bindings for speed
196
+
197
+ ## Benefits
198
+ 1. **Zero cognitive load**: Agents don't think about memory management
199
+ 2. **Rich context**: Every request includes highly relevant information
200
+ 3. **Learning system**: Gets better at predicting needed context over time
201
+ 4. **Collaborative**: Agents automatically aware of each other's work
202
+ 5. **Efficient**: Background processing keeps LLM calls focused
203
+
204
+ ## Next Steps
205
+ 1. Implement the background process framework in TypeScript
206
+ 2. Create the first processor: `code-indexer`
207
+ 3. Test with real agent workflows
208
+ 4. Add remaining processors incrementally
209
+ 5. Optimize based on dashboard metrics
210
+
211
+ This system will make every Hivemind agent dramatically more capable without any changes to their prompts or behavior.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@sesamespace/hivemind",
3
- "version": "0.8.10",
3
+ "version": "0.8.11",
4
4
  "description": "Cognitive architecture for AI agents with multi-layered memory",
5
5
  "scripts": {
6
6
  "build": "tsup",
@@ -15,7 +15,9 @@
15
15
  },
16
16
  "dependencies": {
17
17
  "@sesamespace/sdk": "^0.1.6",
18
- "smol-toml": "^1.6.0"
18
+ "smol-toml": "^1.6.0",
19
+ "axios": "^1.7.9",
20
+ "express": "^4.21.2"
19
21
  },
20
22
  "devDependencies": {
21
23
  "tsup": "^8.5.1"
@@ -0,0 +1,295 @@
1
+ /**
2
+ * Dashboard Integration - Exposes memory system state to the Hivemind dashboard
3
+ */
4
+
5
+ import { EventEmitter } from 'events';
6
+ import express from 'express';
7
+ import { MessageFlowIntegration } from './processors/message-flow-integration';
8
+ import { ResearchDigester } from './processors/research-digester';
9
+ import { CommandLearner } from './processors/command-learner';
10
+ import { AgentSync } from './processors/agent-sync';
11
+
12
+ export interface DashboardConfig {
13
+ port?: number;
14
+ messageFlow: MessageFlowIntegration;
15
+ researchDigester?: ResearchDigester;
16
+ commandLearner?: CommandLearner;
17
+ agentSync?: AgentSync;
18
+ }
19
+
20
+ export class DashboardIntegration extends EventEmitter {
21
+ private app: express.Application;
22
+ private messageFlow: MessageFlowIntegration;
23
+ private researchDigester?: ResearchDigester;
24
+ private commandLearner?: CommandLearner;
25
+ private agentSync?: AgentSync;
26
+
27
+ constructor(config: DashboardConfig) {
28
+ super();
29
+
30
+ this.messageFlow = config.messageFlow;
31
+ this.researchDigester = config.researchDigester;
32
+ this.commandLearner = config.commandLearner;
33
+ this.agentSync = config.agentSync;
34
+
35
+ this.app = express();
36
+ this.setupRoutes();
37
+ }
38
+
39
+ private setupRoutes(): void {
40
+ this.app.use(express.json());
41
+
42
+ // Memory system overview
43
+ this.app.get('/api/memory/overview', async (req, res) => {
44
+ try {
45
+ const state = await this.messageFlow.getState();
46
+ const overview = {
47
+ ...state,
48
+ research: this.researchDigester ? {
49
+ entries: (await this.researchDigester['research']).size,
50
+ topics: Array.from(this.researchDigester['topicIndex'].keys())
51
+ } : null,
52
+ commands: this.commandLearner ? {
53
+ patterns: this.commandLearner['patterns'].size,
54
+ categories: Array.from(this.commandLearner['categoryIndex'].keys())
55
+ } : null,
56
+ agents: this.agentSync ? {
57
+ known: (await this.agentSync.getAgentKnowledge()).length,
58
+ sharedTasks: (await this.agentSync.getSharedTasks()).length
59
+ } : null
60
+ };
61
+ res.json(overview);
62
+ } catch (error) {
63
+ res.status(500).json({ error: error.message });
64
+ }
65
+ });
66
+
67
+ // Working set - currently active code files
68
+ this.app.get('/api/memory/working-set', async (req, res) => {
69
+ try {
70
+ const state = await this.messageFlow.getState();
71
+ res.json({
72
+ files: state.workingSet,
73
+ totalFiles: state.workingSet.length
74
+ });
75
+ } catch (error) {
76
+ res.status(500).json({ error: error.message });
77
+ }
78
+ });
79
+
80
+ // Active tasks
81
+ this.app.get('/api/memory/tasks', async (req, res) => {
82
+ try {
83
+ const state = await this.messageFlow.getState();
84
+ const sharedTasks = this.agentSync ? await this.agentSync.getSharedTasks() : [];
85
+
86
+ res.json({
87
+ localTasks: state.tasks,
88
+ sharedTasks: sharedTasks,
89
+ total: state.tasks.length + sharedTasks.length
90
+ });
91
+ } catch (error) {
92
+ res.status(500).json({ error: error.message });
93
+ }
94
+ });
95
+
96
+ // Research entries
97
+ this.app.get('/api/memory/research', async (req, res) => {
98
+ if (!this.researchDigester) {
99
+ return res.json({ entries: [], topics: [] });
100
+ }
101
+
102
+ try {
103
+ const recent = await this.researchDigester.getRecent(20);
104
+ res.json({
105
+ entries: recent,
106
+ topics: Array.from(this.researchDigester['topicIndex'].keys())
107
+ });
108
+ } catch (error) {
109
+ res.status(500).json({ error: error.message });
110
+ }
111
+ });
112
+
113
+ // Search research
114
+ this.app.get('/api/memory/research/search', async (req, res) => {
115
+ if (!this.researchDigester) {
116
+ return res.json({ results: [] });
117
+ }
118
+
119
+ try {
120
+ const { q } = req.query;
121
+ const results = await this.researchDigester.search(q as string);
122
+ res.json({ results });
123
+ } catch (error) {
124
+ res.status(500).json({ error: error.message });
125
+ }
126
+ });
127
+
128
+ // Command patterns
129
+ this.app.get('/api/memory/commands', async (req, res) => {
130
+ if (!this.commandLearner) {
131
+ return res.json({ patterns: [], categories: [] });
132
+ }
133
+
134
+ try {
135
+ const patterns = Array.from(this.commandLearner['patterns'].values());
136
+ const categories = Array.from(this.commandLearner['categoryIndex'].keys());
137
+
138
+ res.json({
139
+ patterns: patterns.sort((a, b) => b.usageCount - a.usageCount),
140
+ categories
141
+ });
142
+ } catch (error) {
143
+ res.status(500).json({ error: error.message });
144
+ }
145
+ });
146
+
147
+ // Command suggestions
148
+ this.app.get('/api/memory/commands/suggest', async (req, res) => {
149
+ if (!this.commandLearner) {
150
+ return res.json({ suggestions: [] });
151
+ }
152
+
153
+ try {
154
+ const { context, category } = req.query;
155
+ const suggestions = await this.commandLearner.getSuggestions(
156
+ context as string || '',
157
+ category as string
158
+ );
159
+ res.json({ suggestions });
160
+ } catch (error) {
161
+ res.status(500).json({ error: error.message });
162
+ }
163
+ });
164
+
165
+ // Agent knowledge
166
+ this.app.get('/api/memory/agents', async (req, res) => {
167
+ if (!this.agentSync) {
168
+ return res.json({ agents: [] });
169
+ }
170
+
171
+ try {
172
+ const agents = await this.agentSync.getAgentKnowledge();
173
+ res.json({ agents });
174
+ } catch (error) {
175
+ res.status(500).json({ error: error.message });
176
+ }
177
+ });
178
+
179
+ // Memory timeline - recent activity
180
+ this.app.get('/api/memory/timeline', async (req, res) => {
181
+ try {
182
+ const timeline = [];
183
+
184
+ // Add recent tasks
185
+ const state = await this.messageFlow.getState();
186
+ for (const task of state.tasks) {
187
+ timeline.push({
188
+ type: 'task',
189
+ timestamp: task.lastUpdate,
190
+ description: task.description,
191
+ metadata: { state: task.state }
192
+ });
193
+ }
194
+
195
+ // Add recent research
196
+ if (this.researchDigester) {
197
+ const research = await this.researchDigester.getRecent(5);
198
+ for (const entry of research) {
199
+ timeline.push({
200
+ type: 'research',
201
+ timestamp: entry.timestamp,
202
+ description: entry.title,
203
+ metadata: { url: entry.url, topics: entry.relatedTopics }
204
+ });
205
+ }
206
+ }
207
+
208
+ // Add recent commands
209
+ if (this.commandLearner) {
210
+ const patterns = Array.from(this.commandLearner['patterns'].values())
211
+ .sort((a, b) => b.lastUsed.getTime() - a.lastUsed.getTime())
212
+ .slice(0, 5);
213
+
214
+ for (const pattern of patterns) {
215
+ timeline.push({
216
+ type: 'command',
217
+ timestamp: pattern.lastUsed,
218
+ description: pattern.description,
219
+ metadata: { pattern: pattern.pattern, category: pattern.category }
220
+ });
221
+ }
222
+ }
223
+
224
+ // Sort by timestamp
225
+ timeline.sort((a, b) => b.timestamp.getTime() - a.timestamp.getTime());
226
+
227
+ res.json({ timeline: timeline.slice(0, 50) });
228
+ } catch (error) {
229
+ res.status(500).json({ error: error.message });
230
+ }
231
+ });
232
+
233
+ // Context preview - what would be included in next request
234
+ this.app.post('/api/memory/context-preview', async (req, res) => {
235
+ try {
236
+ const { message } = req.body;
237
+ const context = await this.messageFlow.processMessage({
238
+ role: 'user',
239
+ content: message,
240
+ timestamp: new Date()
241
+ });
242
+
243
+ res.json({
244
+ context,
245
+ length: context.length,
246
+ sections: this.analyzeContextSections(context)
247
+ });
248
+ } catch (error) {
249
+ res.status(500).json({ error: error.message });
250
+ }
251
+ });
252
+ }
253
+
254
+ private analyzeContextSections(context: string): any[] {
255
+ const sections = [];
256
+ const lines = context.split('\n');
257
+ let currentSection = null;
258
+ let currentContent = [];
259
+
260
+ for (const line of lines) {
261
+ if (line.startsWith('## ')) {
262
+ if (currentSection) {
263
+ sections.push({
264
+ title: currentSection,
265
+ lines: currentContent.length,
266
+ characters: currentContent.join('\n').length
267
+ });
268
+ }
269
+ currentSection = line.substring(3);
270
+ currentContent = [];
271
+ } else {
272
+ currentContent.push(line);
273
+ }
274
+ }
275
+
276
+ if (currentSection) {
277
+ sections.push({
278
+ title: currentSection,
279
+ lines: currentContent.length,
280
+ characters: currentContent.join('\n').length
281
+ });
282
+ }
283
+
284
+ return sections;
285
+ }
286
+
287
+ async start(port: number = 9486): Promise<void> {
288
+ return new Promise((resolve) => {
289
+ this.app.listen(port, () => {
290
+ this.emit('started', { port });
291
+ resolve();
292
+ });
293
+ });
294
+ }
295
+ }