@vectorize-io/hindsight-openclaw 0.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,35 @@
1
+ # Hindsight Memory Plugin for OpenClaw
2
+
3
+ Biomimetic long-term memory for [OpenClaw](https://openclaw.ai) using [Hindsight](https://vectorize.io/hindsight). Automatically captures conversations and intelligently recalls relevant context.
4
+
5
+ ## Quick Start
6
+
7
+ ```bash
8
+ # 1. Configure your LLM provider
9
+ export OPENAI_API_KEY="sk-your-key"
10
+ clawdbot config set 'agents.defaults.models."openai/gpt-4o-mini"' '{}'
11
+
12
+ # 2. Install and enable the plugin
13
+ clawdbot plugins install @vectorize-io/hindsight-openclaw
14
+
15
+ # 3. Start OpenClaw
16
+ clawdbot gateway
17
+ ```
18
+
19
+ That's it! The plugin will automatically start capturing and recalling memories.
20
+
21
+ ## Documentation
22
+
23
+ For full documentation, configuration options, troubleshooting, and development guide, see:
24
+
25
+ **[OpenClaw Integration Documentation](https://vectorize.io/hindsight/sdks/integrations/openclaw)**
26
+
27
+ ## Links
28
+
29
+ - [Hindsight Documentation](https://vectorize.io/hindsight)
30
+ - [OpenClaw Documentation](https://openclaw.ai)
31
+ - [GitHub Repository](https://github.com/vectorize-io/hindsight)
32
+
33
+ ## License
34
+
35
+ MIT
@@ -0,0 +1,49 @@
1
+ {
2
+ "id": "hindsight-openclaw",
3
+ "name": "Hindsight Memory",
4
+ "kind": "memory",
5
+ "configSchema": {
6
+ "type": "object",
7
+ "properties": {
8
+ "daemonIdleTimeout": {
9
+ "type": "number",
10
+ "description": "Seconds before daemon shuts down from inactivity (0 = never)",
11
+ "default": 0
12
+ },
13
+ "embedPort": {
14
+ "type": "number",
15
+ "description": "Port for hindsight-embed server (auto-assigned if not specified)",
16
+ "default": 0
17
+ },
18
+ "bankMission": {
19
+ "type": "string",
20
+ "description": "Custom mission/context for the memory bank",
21
+ "default": "You are an AI assistant helping users across multiple communication channels (Telegram, Slack, Discord, etc.). Remember user preferences, instructions, and important context from conversations to provide personalized assistance."
22
+ },
23
+ "embedVersion": {
24
+ "type": "string",
25
+ "description": "hindsight-embed version to use (e.g. 'latest', '0.4.2', or empty for latest)",
26
+ "default": "latest"
27
+ }
28
+ },
29
+ "additionalProperties": false
30
+ },
31
+ "uiHints": {
32
+ "daemonIdleTimeout": {
33
+ "label": "Daemon Idle Timeout",
34
+ "placeholder": "0 (never timeout)"
35
+ },
36
+ "embedPort": {
37
+ "label": "Embed Server Port",
38
+ "placeholder": "0 (auto-assign)"
39
+ },
40
+ "bankMission": {
41
+ "label": "Bank Mission",
42
+ "placeholder": "Custom context for what this agent does..."
43
+ },
44
+ "embedVersion": {
45
+ "label": "Hindsight Embed Version",
46
+ "placeholder": "latest (or pin to specific version like 0.4.2)"
47
+ }
48
+ }
49
+ }
@@ -0,0 +1,14 @@
1
+ import type { RetainRequest, RetainResponse, RecallRequest, RecallResponse } from './types.js';
2
+ export declare class HindsightClient {
3
+ private bankId;
4
+ private llmProvider;
5
+ private llmApiKey;
6
+ private llmModel?;
7
+ private embedVersion;
8
+ constructor(llmProvider: string, llmApiKey: string, llmModel?: string, embedVersion?: string);
9
+ setBankId(bankId: string): void;
10
+ setBankMission(mission: string): Promise<void>;
11
+ private getEnv;
12
+ retain(request: RetainRequest): Promise<RetainResponse>;
13
+ recall(request: RecallRequest): Promise<RecallResponse>;
14
+ }
package/dist/client.js ADDED
@@ -0,0 +1,91 @@
1
+ import { exec } from 'child_process';
2
+ import { promisify } from 'util';
3
+ const execAsync = promisify(exec);
4
+ export class HindsightClient {
5
+ bankId = 'default'; // Always use default bank
6
+ llmProvider;
7
+ llmApiKey;
8
+ llmModel;
9
+ embedVersion;
10
+ constructor(llmProvider, llmApiKey, llmModel, embedVersion = 'latest') {
11
+ this.llmProvider = llmProvider;
12
+ this.llmApiKey = llmApiKey;
13
+ this.llmModel = llmModel;
14
+ this.embedVersion = embedVersion || 'latest';
15
+ }
16
+ setBankId(bankId) {
17
+ this.bankId = bankId;
18
+ }
19
+ async setBankMission(mission) {
20
+ if (!mission || mission.trim().length === 0) {
21
+ return;
22
+ }
23
+ const escapedMission = mission.replace(/'/g, "'\\''"); // Escape single quotes
24
+ const embedPackage = this.embedVersion ? `hindsight-embed@${this.embedVersion}` : 'hindsight-embed@latest';
25
+ const cmd = `uvx ${embedPackage} bank mission ${this.bankId} '${escapedMission}'`;
26
+ try {
27
+ const { stdout } = await execAsync(cmd, { env: this.getEnv() });
28
+ console.log(`[Hindsight] Bank mission set: ${stdout.trim()}`);
29
+ }
30
+ catch (error) {
31
+ // Don't fail if mission set fails - bank might not exist yet, will be created on first retain
32
+ console.warn(`[Hindsight] Could not set bank mission (bank may not exist yet): ${error}`);
33
+ }
34
+ }
35
+ getEnv() {
36
+ const env = {
37
+ ...process.env,
38
+ HINDSIGHT_EMBED_LLM_PROVIDER: this.llmProvider,
39
+ HINDSIGHT_EMBED_LLM_API_KEY: this.llmApiKey,
40
+ };
41
+ if (this.llmModel) {
42
+ env.HINDSIGHT_EMBED_LLM_MODEL = this.llmModel;
43
+ }
44
+ return env;
45
+ }
46
+ async retain(request) {
47
+ const content = request.content.replace(/'/g, "'\\''"); // Escape single quotes
48
+ const docId = request.document_id || 'conversation';
49
+ const embedPackage = this.embedVersion ? `hindsight-embed@${this.embedVersion}` : 'hindsight-embed@latest';
50
+ const cmd = `uvx ${embedPackage} memory retain ${this.bankId} '${content}' --doc-id '${docId}' --async`;
51
+ try {
52
+ const { stdout } = await execAsync(cmd, { env: this.getEnv() });
53
+ console.log(`[Hindsight] Retained (async): ${stdout.trim()}`);
54
+ // Return a simple response
55
+ return {
56
+ message: 'Memory queued for background processing',
57
+ document_id: docId,
58
+ memory_unit_ids: [],
59
+ };
60
+ }
61
+ catch (error) {
62
+ throw new Error(`Failed to retain memory: ${error}`);
63
+ }
64
+ }
65
+ async recall(request) {
66
+ const query = request.query.replace(/'/g, "'\\''"); // Escape single quotes
67
+ const maxTokens = request.max_tokens || 1024;
68
+ const embedPackage = this.embedVersion ? `hindsight-embed@${this.embedVersion}` : 'hindsight-embed@latest';
69
+ const cmd = `uvx ${embedPackage} memory recall ${this.bankId} '${query}' --output json --max-tokens ${maxTokens}`;
70
+ try {
71
+ const { stdout } = await execAsync(cmd, { env: this.getEnv() });
72
+ // Parse JSON output - returns { entities: {...}, results: [...] }
73
+ const response = JSON.parse(stdout);
74
+ const results = response.results || [];
75
+ return {
76
+ results: results.map((r) => ({
77
+ content: r.text || r.content || '',
78
+ score: 1.0, // CLI doesn't return scores
79
+ metadata: {
80
+ document_id: r.document_id,
81
+ chunk_id: r.chunk_id,
82
+ ...r.metadata,
83
+ },
84
+ })),
85
+ };
86
+ }
87
+ catch (error) {
88
+ throw new Error(`Failed to recall memories: ${error}`);
89
+ }
90
+ }
91
+ }
@@ -0,0 +1,18 @@
1
+ export declare class HindsightEmbedManager {
2
+ private process;
3
+ private port;
4
+ private baseUrl;
5
+ private embedDir;
6
+ private llmProvider;
7
+ private llmApiKey;
8
+ private llmModel?;
9
+ private daemonIdleTimeout;
10
+ private embedVersion;
11
+ constructor(port: number, llmProvider: string, llmApiKey: string, llmModel?: string, daemonIdleTimeout?: number, // Default: never timeout
12
+ embedVersion?: string);
13
+ start(): Promise<void>;
14
+ stop(): Promise<void>;
15
+ private waitForReady;
16
+ getBaseUrl(): string;
17
+ isRunning(): boolean;
18
+ }
@@ -0,0 +1,120 @@
1
+ import { spawn } from 'child_process';
2
+ import { join } from 'path';
3
+ import { homedir } from 'os';
4
+ export class HindsightEmbedManager {
5
+ process = null;
6
+ port;
7
+ baseUrl;
8
+ embedDir;
9
+ llmProvider;
10
+ llmApiKey;
11
+ llmModel;
12
+ daemonIdleTimeout;
13
+ embedVersion;
14
+ constructor(port, llmProvider, llmApiKey, llmModel, daemonIdleTimeout = 0, // Default: never timeout
15
+ embedVersion = 'latest' // Default: latest
16
+ ) {
17
+ this.port = 8889; // hindsight-embed uses fixed port 8889
18
+ this.baseUrl = `http://127.0.0.1:8889`;
19
+ this.embedDir = join(homedir(), '.clawdbot', 'hindsight-embed');
20
+ this.llmProvider = llmProvider;
21
+ this.llmApiKey = llmApiKey;
22
+ this.llmModel = llmModel;
23
+ this.daemonIdleTimeout = daemonIdleTimeout;
24
+ this.embedVersion = embedVersion || 'latest';
25
+ }
26
+ async start() {
27
+ console.log(`[Hindsight] Starting hindsight-embed daemon...`);
28
+ // Build environment variables
29
+ const env = {
30
+ ...process.env,
31
+ HINDSIGHT_EMBED_LLM_PROVIDER: this.llmProvider,
32
+ HINDSIGHT_EMBED_LLM_API_KEY: this.llmApiKey,
33
+ HINDSIGHT_EMBED_DAEMON_IDLE_TIMEOUT: this.daemonIdleTimeout.toString(),
34
+ };
35
+ if (this.llmModel) {
36
+ env['HINDSIGHT_EMBED_LLM_MODEL'] = this.llmModel;
37
+ }
38
+ // Start hindsight-embed daemon (it manages itself)
39
+ const embedPackage = this.embedVersion ? `hindsight-embed@${this.embedVersion}` : 'hindsight-embed@latest';
40
+ const startDaemon = spawn('uvx', [embedPackage, 'daemon', 'start'], {
41
+ env,
42
+ stdio: 'pipe',
43
+ });
44
+ // Collect output
45
+ let output = '';
46
+ startDaemon.stdout?.on('data', (data) => {
47
+ const text = data.toString();
48
+ output += text;
49
+ console.log(`[Hindsight] ${text.trim()}`);
50
+ });
51
+ startDaemon.stderr?.on('data', (data) => {
52
+ const text = data.toString();
53
+ output += text;
54
+ console.error(`[Hindsight] ${text.trim()}`);
55
+ });
56
+ // Wait for daemon start command to complete
57
+ await new Promise((resolve, reject) => {
58
+ startDaemon.on('exit', (code) => {
59
+ if (code === 0) {
60
+ console.log('[Hindsight] Daemon start command completed');
61
+ resolve();
62
+ }
63
+ else {
64
+ reject(new Error(`Daemon start failed with code ${code}: ${output}`));
65
+ }
66
+ });
67
+ startDaemon.on('error', (error) => {
68
+ reject(error);
69
+ });
70
+ });
71
+ // Wait for server to be ready
72
+ await this.waitForReady();
73
+ console.log('[Hindsight] Daemon is ready');
74
+ }
75
+ async stop() {
76
+ console.log('[Hindsight] Stopping hindsight-embed daemon...');
77
+ const embedPackage = this.embedVersion ? `hindsight-embed@${this.embedVersion}` : 'hindsight-embed@latest';
78
+ const stopDaemon = spawn('uvx', [embedPackage, 'daemon', 'stop'], {
79
+ stdio: 'pipe',
80
+ });
81
+ await new Promise((resolve) => {
82
+ stopDaemon.on('exit', () => {
83
+ console.log('[Hindsight] Daemon stopped');
84
+ resolve();
85
+ });
86
+ stopDaemon.on('error', (error) => {
87
+ console.error('[Hindsight] Error stopping daemon:', error);
88
+ resolve(); // Resolve anyway
89
+ });
90
+ // Timeout after 5 seconds
91
+ setTimeout(() => {
92
+ console.log('[Hindsight] Daemon stop timeout');
93
+ resolve();
94
+ }, 5000);
95
+ });
96
+ }
97
+ async waitForReady(maxAttempts = 30) {
98
+ console.log('[Hindsight] Waiting for daemon to be ready...');
99
+ for (let i = 0; i < maxAttempts; i++) {
100
+ try {
101
+ const response = await fetch(`${this.baseUrl}/health`);
102
+ if (response.ok) {
103
+ console.log('[Hindsight] Daemon health check passed');
104
+ return;
105
+ }
106
+ }
107
+ catch {
108
+ // Not ready yet
109
+ }
110
+ await new Promise((resolve) => setTimeout(resolve, 1000));
111
+ }
112
+ throw new Error('Hindsight daemon failed to become ready within 30 seconds');
113
+ }
114
+ getBaseUrl() {
115
+ return this.baseUrl;
116
+ }
117
+ isRunning() {
118
+ return this.process !== null;
119
+ }
120
+ }
@@ -0,0 +1,4 @@
1
+ import type { MoltbotPluginAPI } from './types.js';
2
+ import { HindsightClient } from './client.js';
3
+ export default function (api: MoltbotPluginAPI): void;
4
+ export declare function getClient(): HindsightClient | null;
package/dist/index.js ADDED
@@ -0,0 +1,313 @@
1
+ import { HindsightEmbedManager } from './embed-manager.js';
2
+ import { HindsightClient } from './client.js';
3
+ import { dirname } from 'path';
4
+ import { fileURLToPath } from 'url';
5
+ // Module-level state
6
+ let embedManager = null;
7
+ let client = null;
8
+ let initPromise = null;
9
+ let isInitialized = false;
10
+ // Global access for hooks (Moltbot loads hooks separately)
11
+ if (typeof global !== 'undefined') {
12
+ global.__hindsightClient = {
13
+ getClient: () => client,
14
+ waitForReady: async () => {
15
+ if (isInitialized)
16
+ return;
17
+ if (initPromise)
18
+ await initPromise;
19
+ },
20
+ };
21
+ }
22
+ // Get directory of current module
23
+ const __filename = fileURLToPath(import.meta.url);
24
+ const __dirname = dirname(__filename);
25
+ // Default bank name
26
+ const BANK_NAME = 'openclaw';
27
+ // Provider mapping: moltbot provider name -> hindsight provider name
28
+ const PROVIDER_MAP = {
29
+ anthropic: 'anthropic',
30
+ openai: 'openai',
31
+ 'openai-codex': 'openai',
32
+ gemini: 'gemini',
33
+ groq: 'groq',
34
+ ollama: 'ollama',
35
+ };
36
+ // Environment variable mapping
37
+ const ENV_KEY_MAP = {
38
+ anthropic: 'ANTHROPIC_API_KEY',
39
+ openai: 'OPENAI_API_KEY',
40
+ 'openai-codex': 'OPENAI_API_KEY',
41
+ gemini: 'GEMINI_API_KEY',
42
+ groq: 'GROQ_API_KEY',
43
+ ollama: '', // No key needed for local ollama
44
+ };
45
+ function detectLLMConfig(api) {
46
+ // Get models from config (agents.defaults.models is a dictionary of models)
47
+ const models = api.config.agents?.defaults?.models;
48
+ if (!models || Object.keys(models).length === 0) {
49
+ throw new Error('No models configured in Moltbot. Please configure at least one model in agents.defaults.models');
50
+ }
51
+ // Try all configured models to find one with an available API key
52
+ const configuredModels = Object.keys(models);
53
+ for (const modelKey of configuredModels) {
54
+ const [moltbotProvider, ...modelParts] = modelKey.split('/');
55
+ const model = modelParts.join('/');
56
+ const hindsightProvider = PROVIDER_MAP[moltbotProvider];
57
+ if (!hindsightProvider) {
58
+ continue; // Skip unsupported providers
59
+ }
60
+ const envKey = ENV_KEY_MAP[moltbotProvider];
61
+ const apiKey = envKey ? process.env[envKey] || '' : '';
62
+ // For ollama, no key is needed
63
+ if (hindsightProvider === 'ollama') {
64
+ return { provider: hindsightProvider, apiKey: '', model, envKey: '' };
65
+ }
66
+ // If we found a key, use this provider
67
+ if (apiKey) {
68
+ return { provider: hindsightProvider, apiKey, model, envKey };
69
+ }
70
+ }
71
+ // No API keys found for any provider - show helpful error
72
+ const configuredProviders = configuredModels
73
+ .map(m => m.split('/')[0])
74
+ .filter(p => PROVIDER_MAP[p]);
75
+ const keyInstructions = configuredProviders
76
+ .map(p => {
77
+ const envVar = ENV_KEY_MAP[p];
78
+ return envVar ? ` • ${envVar} (for ${p})` : null;
79
+ })
80
+ .filter(Boolean)
81
+ .join('\n');
82
+ throw new Error(`No API keys found for Hindsight memory plugin.\n\n` +
83
+ `Configured providers in Moltbot: ${configuredProviders.join(', ')}\n\n` +
84
+ `Please set one of these environment variables:\n${keyInstructions}\n\n` +
85
+ `You can set them in your shell profile (~/.zshrc or ~/.bashrc):\n` +
86
+ ` export ANTHROPIC_API_KEY="your-key-here"\n\n` +
87
+ `Or run Moltbot with the environment variable:\n` +
88
+ ` ANTHROPIC_API_KEY="your-key" clawdbot start\n\n` +
89
+ `Alternatively, configure ollama provider which doesn't require an API key.`);
90
+ }
91
+ function getPluginConfig(api) {
92
+ const config = api.config.plugins?.entries?.['hindsight-openclaw']?.config || {};
93
+ const defaultMission = 'You are an AI assistant helping users across multiple communication channels (Telegram, Slack, Discord, etc.). Remember user preferences, instructions, and important context from conversations to provide personalized assistance.';
94
+ return {
95
+ bankMission: config.bankMission || defaultMission,
96
+ embedPort: config.embedPort || 0,
97
+ daemonIdleTimeout: config.daemonIdleTimeout !== undefined ? config.daemonIdleTimeout : 0,
98
+ embedVersion: config.embedVersion || 'latest',
99
+ };
100
+ }
101
+ export default function (api) {
102
+ try {
103
+ console.log('[Hindsight] Plugin loading...');
104
+ // Detect LLM configuration from Moltbot
105
+ console.log('[Hindsight] Detecting LLM config...');
106
+ const llmConfig = detectLLMConfig(api);
107
+ if (llmConfig.provider === 'ollama') {
108
+ console.log(`[Hindsight] ✓ Using provider: ${llmConfig.provider}, model: ${llmConfig.model || 'default'} (no API key required)`);
109
+ }
110
+ else {
111
+ console.log(`[Hindsight] ✓ Using provider: ${llmConfig.provider}, model: ${llmConfig.model || 'default'} (API key: ${llmConfig.envKey})`);
112
+ }
113
+ console.log('[Hindsight] Getting plugin config...');
114
+ const pluginConfig = getPluginConfig(api);
115
+ if (pluginConfig.bankMission) {
116
+ console.log(`[Hindsight] Custom bank mission configured: "${pluginConfig.bankMission.substring(0, 50)}..."`);
117
+ }
118
+ console.log(`[Hindsight] Daemon idle timeout: ${pluginConfig.daemonIdleTimeout}s (0 = never timeout)`);
119
+ // Determine port
120
+ const port = pluginConfig.embedPort || Math.floor(Math.random() * 10000) + 10000;
121
+ console.log(`[Hindsight] Port: ${port}`);
122
+ // Initialize in background (non-blocking)
123
+ console.log('[Hindsight] Starting initialization in background...');
124
+ initPromise = (async () => {
125
+ try {
126
+ // Initialize embed manager
127
+ console.log('[Hindsight] Creating HindsightEmbedManager...');
128
+ embedManager = new HindsightEmbedManager(port, llmConfig.provider, llmConfig.apiKey, llmConfig.model, pluginConfig.daemonIdleTimeout, pluginConfig.embedVersion);
129
+ // Start the embedded server
130
+ console.log('[Hindsight] Starting embedded server...');
131
+ await embedManager.start();
132
+ // Initialize client
133
+ console.log('[Hindsight] Creating HindsightClient...');
134
+ client = new HindsightClient(llmConfig.provider, llmConfig.apiKey, llmConfig.model, pluginConfig.embedVersion);
135
+ // Use openclaw bank
136
+ console.log(`[Hindsight] Using bank: ${BANK_NAME}`);
137
+ client.setBankId(BANK_NAME);
138
+ // Set bank mission
139
+ if (pluginConfig.bankMission) {
140
+ console.log(`[Hindsight] Setting bank mission...`);
141
+ await client.setBankMission(pluginConfig.bankMission);
142
+ }
143
+ isInitialized = true;
144
+ console.log('[Hindsight] ✓ Ready');
145
+ }
146
+ catch (error) {
147
+ console.error('[Hindsight] Initialization error:', error);
148
+ throw error;
149
+ }
150
+ })();
151
+ // Don't await - let it initialize in background
152
+ // Register background service for cleanup
153
+ console.log('[Hindsight] Registering service...');
154
+ api.registerService({
155
+ id: 'hindsight-memory',
156
+ async start() {
157
+ // Wait for background init if still pending
158
+ console.log('[Hindsight] Service start called - ensuring initialization complete...');
159
+ if (initPromise)
160
+ await initPromise;
161
+ },
162
+ async stop() {
163
+ try {
164
+ console.log('[Hindsight] Service stopping...');
165
+ if (embedManager) {
166
+ await embedManager.stop();
167
+ embedManager = null;
168
+ }
169
+ client = null;
170
+ isInitialized = false;
171
+ console.log('[Hindsight] Service stopped');
172
+ }
173
+ catch (error) {
174
+ console.error('[Hindsight] Service stop error:', error);
175
+ throw error;
176
+ }
177
+ },
178
+ });
179
+ console.log('[Hindsight] Plugin loaded successfully');
180
+ // Register agent_end hook for auto-retention
181
+ console.log('[Hindsight] Registering agent_end hook...');
182
+ // Store session key for retention
183
+ let currentSessionKey;
184
+ // Auto-recall: Inject relevant memories before agent processes the message
185
+ api.on('before_agent_start', async (context) => {
186
+ try {
187
+ // Capture session key
188
+ if (context.sessionKey) {
189
+ currentSessionKey = context.sessionKey;
190
+ console.log('[Hindsight] Captured session key:', currentSessionKey);
191
+ }
192
+ // Get the user's latest message for recall
193
+ let prompt = context.prompt;
194
+ if (!prompt || typeof prompt !== 'string' || prompt.length < 5) {
195
+ return; // Skip very short messages
196
+ }
197
+ // Extract actual message from Telegram format: [Telegram ... GMT+1] actual message
198
+ const telegramMatch = prompt.match(/\[Telegram[^\]]+\]\s*(.+)$/);
199
+ if (telegramMatch) {
200
+ prompt = telegramMatch[1].trim();
201
+ }
202
+ if (prompt.length < 5) {
203
+ return; // Skip very short messages after extraction
204
+ }
205
+ // Wait for client to be ready
206
+ const clientGlobal = global.__hindsightClient;
207
+ if (!clientGlobal) {
208
+ console.log('[Hindsight] Client global not available, skipping auto-recall');
209
+ return;
210
+ }
211
+ await clientGlobal.waitForReady();
212
+ const client = clientGlobal.getClient();
213
+ if (!client) {
214
+ console.log('[Hindsight] Client not initialized, skipping auto-recall');
215
+ return;
216
+ }
217
+ console.log('[Hindsight] Auto-recall for prompt:', prompt.substring(0, 50));
218
+ // Recall relevant memories (up to 1024 tokens)
219
+ const response = await client.recall({
220
+ query: prompt,
221
+ max_tokens: 1024,
222
+ });
223
+ if (!response.results || response.results.length === 0) {
224
+ console.log('[Hindsight] No memories found for auto-recall');
225
+ return;
226
+ }
227
+ // Format memories as JSON with all fields from recall
228
+ const memoriesJson = JSON.stringify(response.results, null, 2);
229
+ const contextMessage = `<hindsight_memories>
230
+ ${memoriesJson}
231
+ </hindsight_memories>`;
232
+ console.log(`[Hindsight] Auto-recall: Injecting ${response.results.length} memories`);
233
+ // Inject context before the user message
234
+ return { prependContext: contextMessage };
235
+ }
236
+ catch (error) {
237
+ console.error('[Hindsight] Auto-recall error:', error);
238
+ return;
239
+ }
240
+ });
241
+ api.on('agent_end', async (event) => {
242
+ try {
243
+ console.log('[Hindsight Hook] agent_end triggered');
244
+ // Check event success and messages
245
+ if (!event.success || !Array.isArray(event.messages) || event.messages.length === 0) {
246
+ console.log('[Hindsight Hook] Skipping: success:', event.success, 'messages:', event.messages?.length);
247
+ return;
248
+ }
249
+ // Wait for client to be ready
250
+ const clientGlobal = global.__hindsightClient;
251
+ if (!clientGlobal) {
252
+ console.warn('[Hindsight] Client global not found, skipping retain');
253
+ return;
254
+ }
255
+ await clientGlobal.waitForReady();
256
+ const client = clientGlobal.getClient();
257
+ if (!client) {
258
+ console.warn('[Hindsight] Client not initialized, skipping retain');
259
+ return;
260
+ }
261
+ // Format messages into a transcript
262
+ const transcript = event.messages
263
+ .map((msg) => {
264
+ const role = msg.role || 'unknown';
265
+ let content = '';
266
+ // Handle different content formats
267
+ if (typeof msg.content === 'string') {
268
+ content = msg.content;
269
+ }
270
+ else if (Array.isArray(msg.content)) {
271
+ content = msg.content
272
+ .filter((block) => block.type === 'text')
273
+ .map((block) => block.text)
274
+ .join('\n');
275
+ }
276
+ return `[role: ${role}]\n${content}\n[${role}:end]`;
277
+ })
278
+ .join('\n\n');
279
+ if (!transcript.trim() || transcript.length < 10) {
280
+ console.log('[Hindsight Hook] Transcript too short, skipping');
281
+ return;
282
+ }
283
+ // Use session key as document ID
284
+ const documentId = currentSessionKey || 'default-session';
285
+ // Retain to Hindsight
286
+ await client.retain({
287
+ content: transcript,
288
+ document_id: documentId,
289
+ metadata: {
290
+ retained_at: new Date().toISOString(),
291
+ message_count: event.messages.length,
292
+ },
293
+ });
294
+ console.log(`[Hindsight] Retained ${event.messages.length} messages for session ${documentId}`);
295
+ }
296
+ catch (error) {
297
+ console.error('[Hindsight] Error retaining messages:', error);
298
+ }
299
+ });
300
+ console.log('[Hindsight] Hook registered');
301
+ }
302
+ catch (error) {
303
+ console.error('[Hindsight] Plugin loading error:', error);
304
+ if (error instanceof Error) {
305
+ console.error('[Hindsight] Error stack:', error.stack);
306
+ }
307
+ throw error;
308
+ }
309
+ }
310
+ // Export client getter for tools
311
+ export function getClient() {
312
+ return client;
313
+ }
@@ -0,0 +1,27 @@
1
+ declare module 'moltbot/plugin-sdk' {
2
+ interface HookEvent {
3
+ type: 'command' | 'session' | 'agent' | 'gateway' | 'tool_result_persist';
4
+ action?: string;
5
+ sessionKey?: string;
6
+ timestamp?: string;
7
+ messages?: string[];
8
+ context?: {
9
+ sessionEntry?: {
10
+ messages?: Array<{
11
+ role: string;
12
+ content: string;
13
+ }>;
14
+ };
15
+ sessionId?: string;
16
+ sessionKey?: string;
17
+ sessionFile?: string;
18
+ commandSource?: string;
19
+ senderId?: string;
20
+ workspaceDir?: string;
21
+ bootstrapFiles?: string[];
22
+ cfg?: any;
23
+ };
24
+ }
25
+ type HookHandler = (event: HookEvent) => Promise<void>;
26
+ function registerPluginHooksFromDir(api: any, dir: string): void;
27
+ }
@@ -0,0 +1,3 @@
1
+ "use strict";
2
+ // Type definitions for moltbot plugin SDK
3
+ // These are minimal types based on the documentation
@@ -0,0 +1,72 @@
1
+ export interface MoltbotPluginAPI {
2
+ config: MoltbotConfig;
3
+ registerService(config: ServiceConfig): void;
4
+ on(event: string, handler: (context: any) => void | Promise<void | {
5
+ prependContext?: string;
6
+ }>): void;
7
+ }
8
+ export interface MoltbotConfig {
9
+ agents?: {
10
+ defaults?: {
11
+ models?: {
12
+ [modelName: string]: {
13
+ alias?: string;
14
+ };
15
+ };
16
+ };
17
+ };
18
+ plugins?: {
19
+ entries?: {
20
+ [pluginId: string]: {
21
+ enabled?: boolean;
22
+ config?: PluginConfig;
23
+ };
24
+ };
25
+ };
26
+ }
27
+ export interface PluginConfig {
28
+ bankMission?: string;
29
+ embedPort?: number;
30
+ daemonIdleTimeout?: number;
31
+ embedVersion?: string;
32
+ }
33
+ export interface ServiceConfig {
34
+ id: string;
35
+ start(): Promise<void>;
36
+ stop(): Promise<void>;
37
+ }
38
+ export interface RetainRequest {
39
+ content: string;
40
+ document_id?: string;
41
+ metadata?: Record<string, unknown>;
42
+ }
43
+ export interface RetainResponse {
44
+ message: string;
45
+ document_id: string;
46
+ memory_unit_ids: string[];
47
+ }
48
+ export interface RecallRequest {
49
+ query: string;
50
+ max_tokens?: number;
51
+ }
52
+ export interface RecallResponse {
53
+ results: MemoryResult[];
54
+ }
55
+ export interface MemoryResult {
56
+ content: string;
57
+ score: number;
58
+ metadata?: {
59
+ document_id?: string;
60
+ created_at?: string;
61
+ source?: string;
62
+ };
63
+ }
64
+ export interface CreateBankRequest {
65
+ name: string;
66
+ background_context?: string;
67
+ }
68
+ export interface CreateBankResponse {
69
+ bank_id: string;
70
+ name: string;
71
+ created_at: string;
72
+ }
package/dist/types.js ADDED
@@ -0,0 +1,2 @@
1
+ // Moltbot plugin API types (minimal subset needed for this plugin)
2
+ export {};
package/package.json ADDED
@@ -0,0 +1,53 @@
1
+ {
2
+ "name": "@vectorize-io/hindsight-openclaw",
3
+ "version": "0.4.4",
4
+ "description": "Hindsight memory plugin for OpenClaw - biomimetic long-term memory with fact extraction",
5
+ "main": "dist/index.js",
6
+ "types": "dist/index.d.ts",
7
+ "type": "module",
8
+ "clawdbot": {
9
+ "extensions": [
10
+ "./dist/index.js"
11
+ ]
12
+ },
13
+ "keywords": [
14
+ "openclaw",
15
+ "memory",
16
+ "ai",
17
+ "agent",
18
+ "hindsight",
19
+ "long-term-memory"
20
+ ],
21
+ "author": "Vectorize <support@vectorize.io>",
22
+ "license": "MIT",
23
+ "repository": {
24
+ "type": "git",
25
+ "url": "https://github.com/vectorize-io/hindsight.git",
26
+ "directory": "hindsight-integrations/openclaw"
27
+ },
28
+ "files": [
29
+ "dist",
30
+ "clawdbot.plugin.json",
31
+ "README.md"
32
+ ],
33
+ "scripts": {
34
+ "build": "tsc",
35
+ "dev": "tsc --watch",
36
+ "clean": "rm -rf dist",
37
+ "test": "vitest run",
38
+ "test:watch": "vitest",
39
+ "prepublishOnly": "npm run clean && npm run build"
40
+ },
41
+ "dependencies": {
42
+ "node-fetch": "^3.3.2"
43
+ },
44
+ "devDependencies": {
45
+ "@types/node": "^20.0.0",
46
+ "@vitest/ui": "^4.0.18",
47
+ "typescript": "^5.3.0",
48
+ "vitest": "^4.0.18"
49
+ },
50
+ "engines": {
51
+ "node": ">=22"
52
+ }
53
+ }