@vectorize-io/hindsight-openclaw 0.4.6 → 0.4.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -5,9 +5,15 @@ Biomimetic long-term memory for [OpenClaw](https://openclaw.ai) using [Hindsight
5
5
  ## Quick Start
6
6
 
7
7
  ```bash
8
- # 1. Configure your LLM provider
8
+ # 1. Configure your LLM provider for memory extraction
9
+ # Option A: OpenAI
9
10
  export OPENAI_API_KEY="sk-your-key"
10
- openclaw config set 'agents.defaults.models."openai/gpt-4o-mini"' '{}'
11
+
12
+ # Option B: Claude Code (no API key needed)
13
+ export HINDSIGHT_API_LLM_PROVIDER=claude-code
14
+
15
+ # Option C: OpenAI Codex (no API key needed)
16
+ export HINDSIGHT_API_LLM_PROVIDER=openai-codex
11
17
 
12
18
  # 2. Install and enable the plugin
13
19
  openclaw plugins install @vectorize-io/hindsight-openclaw
@@ -24,6 +30,40 @@ For full documentation, configuration options, troubleshooting, and development
24
30
 
25
31
  **[OpenClaw Integration Documentation](https://vectorize.io/hindsight/sdks/integrations/openclaw)**
26
32
 
33
+ ## Development
34
+
35
+ To test local changes to the Hindsight package before publishing:
36
+
37
+ 1. Add `embedPackagePath` to your plugin config in `~/.openclaw/openclaw.json`:
38
+ ```json
39
+ {
40
+ "plugins": {
41
+ "entries": {
42
+ "hindsight-openclaw": {
43
+ "enabled": true,
44
+ "config": {
45
+ "embedPackagePath": "/path/to/hindsight-wt3/hindsight-embed"
46
+ }
47
+ }
48
+ }
49
+ }
50
+ }
51
+ ```
52
+
53
+ 2. The plugin will use `uv run --directory <path> hindsight-embed` instead of `uvx hindsight-embed@latest`
54
+
55
+ 3. To use a specific profile for testing:
56
+ ```bash
57
+ # Check daemon status
58
+ uvx hindsight-embed@latest -p openclaw daemon status
59
+
60
+ # View logs
61
+ tail -f ~/.hindsight/profiles/openclaw.log
62
+
63
+ # List profiles
64
+ uvx hindsight-embed@latest profile list
65
+ ```
66
+
27
67
  ## Links
28
68
 
29
69
  - [Hindsight Documentation](https://vectorize.io/hindsight)
package/dist/client.d.ts CHANGED
@@ -5,10 +5,14 @@ export declare class HindsightClient {
5
5
  private llmApiKey;
6
6
  private llmModel?;
7
7
  private embedVersion;
8
- constructor(llmProvider: string, llmApiKey: string, llmModel?: string, embedVersion?: string);
8
+ private embedPackagePath?;
9
+ constructor(llmProvider: string, llmApiKey: string, llmModel?: string, embedVersion?: string, embedPackagePath?: string);
10
+ /**
11
+ * Get the command prefix to run hindsight-embed (either local or from PyPI)
12
+ */
13
+ private getEmbedCommandPrefix;
9
14
  setBankId(bankId: string): void;
10
15
  setBankMission(mission: string): Promise<void>;
11
- private getEnv;
12
16
  retain(request: RetainRequest): Promise<RetainResponse>;
13
17
  recall(request: RecallRequest): Promise<RecallResponse>;
14
18
  }
package/dist/client.js CHANGED
@@ -7,11 +7,27 @@ export class HindsightClient {
7
7
  llmApiKey;
8
8
  llmModel;
9
9
  embedVersion;
10
- constructor(llmProvider, llmApiKey, llmModel, embedVersion = 'latest') {
10
+ embedPackagePath;
11
+ constructor(llmProvider, llmApiKey, llmModel, embedVersion = 'latest', embedPackagePath) {
11
12
  this.llmProvider = llmProvider;
12
13
  this.llmApiKey = llmApiKey;
13
14
  this.llmModel = llmModel;
14
15
  this.embedVersion = embedVersion || 'latest';
16
+ this.embedPackagePath = embedPackagePath;
17
+ }
18
+ /**
19
+ * Get the command prefix to run hindsight-embed (either local or from PyPI)
20
+ */
21
+ getEmbedCommandPrefix() {
22
+ if (this.embedPackagePath) {
23
+ // Local package: uv run --directory <path> hindsight-embed
24
+ return `uv run --directory ${this.embedPackagePath} hindsight-embed`;
25
+ }
26
+ else {
27
+ // PyPI package: uvx hindsight-embed@version
28
+ const embedPackage = this.embedVersion ? `hindsight-embed@${this.embedVersion}` : 'hindsight-embed@latest';
29
+ return `uvx ${embedPackage}`;
30
+ }
15
31
  }
16
32
  setBankId(bankId) {
17
33
  this.bankId = bankId;
@@ -21,10 +37,10 @@ export class HindsightClient {
21
37
  return;
22
38
  }
23
39
  const escapedMission = mission.replace(/'/g, "'\\''"); // Escape single quotes
24
- const embedPackage = this.embedVersion ? `hindsight-embed@${this.embedVersion}` : 'hindsight-embed@latest';
25
- const cmd = `uvx ${embedPackage} bank mission ${this.bankId} '${escapedMission}'`;
40
+ const embedCmd = this.getEmbedCommandPrefix();
41
+ const cmd = `${embedCmd} --profile openclaw bank mission ${this.bankId} '${escapedMission}'`;
26
42
  try {
27
- const { stdout } = await execAsync(cmd, { env: this.getEnv() });
43
+ const { stdout } = await execAsync(cmd);
28
44
  console.log(`[Hindsight] Bank mission set: ${stdout.trim()}`);
29
45
  }
30
46
  catch (error) {
@@ -32,24 +48,13 @@ export class HindsightClient {
32
48
  console.warn(`[Hindsight] Could not set bank mission (bank may not exist yet): ${error}`);
33
49
  }
34
50
  }
35
- getEnv() {
36
- const env = {
37
- ...process.env,
38
- HINDSIGHT_EMBED_LLM_PROVIDER: this.llmProvider,
39
- HINDSIGHT_EMBED_LLM_API_KEY: this.llmApiKey,
40
- };
41
- if (this.llmModel) {
42
- env.HINDSIGHT_EMBED_LLM_MODEL = this.llmModel;
43
- }
44
- return env;
45
- }
46
51
  async retain(request) {
47
52
  const content = request.content.replace(/'/g, "'\\''"); // Escape single quotes
48
53
  const docId = request.document_id || 'conversation';
49
- const embedPackage = this.embedVersion ? `hindsight-embed@${this.embedVersion}` : 'hindsight-embed@latest';
50
- const cmd = `uvx ${embedPackage} memory retain ${this.bankId} '${content}' --doc-id '${docId}' --async`;
54
+ const embedCmd = this.getEmbedCommandPrefix();
55
+ const cmd = `${embedCmd} --profile openclaw memory retain ${this.bankId} '${content}' --doc-id '${docId}' --async`;
51
56
  try {
52
- const { stdout } = await execAsync(cmd, { env: this.getEnv() });
57
+ const { stdout } = await execAsync(cmd);
53
58
  console.log(`[Hindsight] Retained (async): ${stdout.trim()}`);
54
59
  // Return a simple response
55
60
  return {
@@ -65,10 +70,10 @@ export class HindsightClient {
65
70
  async recall(request) {
66
71
  const query = request.query.replace(/'/g, "'\\''"); // Escape single quotes
67
72
  const maxTokens = request.max_tokens || 1024;
68
- const embedPackage = this.embedVersion ? `hindsight-embed@${this.embedVersion}` : 'hindsight-embed@latest';
69
- const cmd = `uvx ${embedPackage} memory recall ${this.bankId} '${query}' --output json --max-tokens ${maxTokens}`;
73
+ const embedCmd = this.getEmbedCommandPrefix();
74
+ const cmd = `${embedCmd} --profile openclaw memory recall ${this.bankId} '${query}' --output json --max-tokens ${maxTokens}`;
70
75
  try {
71
- const { stdout } = await execAsync(cmd, { env: this.getEnv() });
76
+ const { stdout } = await execAsync(cmd);
72
77
  // Parse JSON output - returns { entities: {...}, results: [...] }
73
78
  const response = JSON.parse(stdout);
74
79
  const results = response.results || [];
@@ -6,14 +6,22 @@ export declare class HindsightEmbedManager {
6
6
  private llmProvider;
7
7
  private llmApiKey;
8
8
  private llmModel?;
9
+ private llmBaseUrl?;
9
10
  private daemonIdleTimeout;
10
11
  private embedVersion;
11
- constructor(port: number, llmProvider: string, llmApiKey: string, llmModel?: string, daemonIdleTimeout?: number, // Default: never timeout
12
- embedVersion?: string);
12
+ private embedPackagePath?;
13
+ constructor(port: number, llmProvider: string, llmApiKey: string, llmModel?: string, llmBaseUrl?: string, daemonIdleTimeout?: number, // Default: never timeout
14
+ embedVersion?: string, // Default: latest
15
+ embedPackagePath?: string);
16
+ /**
17
+ * Get the command to run hindsight-embed (either local or from PyPI)
18
+ */
19
+ private getEmbedCommand;
13
20
  start(): Promise<void>;
14
21
  stop(): Promise<void>;
15
22
  private waitForReady;
16
23
  getBaseUrl(): string;
17
24
  isRunning(): boolean;
18
- private writeConfigEnv;
25
+ checkHealth(): Promise<boolean>;
26
+ private configureProfile;
19
27
  }
@@ -1,5 +1,4 @@
1
1
  import { spawn } from 'child_process';
2
- import { promises as fs } from 'fs';
3
2
  import { join } from 'path';
4
3
  import { homedir } from 'os';
5
4
  export class HindsightEmbedManager {
@@ -10,38 +9,67 @@ export class HindsightEmbedManager {
10
9
  llmProvider;
11
10
  llmApiKey;
12
11
  llmModel;
12
+ llmBaseUrl;
13
13
  daemonIdleTimeout;
14
14
  embedVersion;
15
- constructor(port, llmProvider, llmApiKey, llmModel, daemonIdleTimeout = 0, // Default: never timeout
16
- embedVersion = 'latest' // Default: latest
15
+ embedPackagePath;
16
+ constructor(port, llmProvider, llmApiKey, llmModel, llmBaseUrl, daemonIdleTimeout = 0, // Default: never timeout
17
+ embedVersion = 'latest', // Default: latest
18
+ embedPackagePath // Local path to hindsight package
17
19
  ) {
18
- this.port = 8889; // hindsight-embed uses fixed port 8889
19
- this.baseUrl = `http://127.0.0.1:8889`;
20
+ // Use the configured port (default: 9077 from config)
21
+ this.port = port;
22
+ this.baseUrl = `http://127.0.0.1:${port}`;
20
23
  this.embedDir = join(homedir(), '.openclaw', 'hindsight-embed');
21
24
  this.llmProvider = llmProvider;
22
25
  this.llmApiKey = llmApiKey;
23
26
  this.llmModel = llmModel;
27
+ this.llmBaseUrl = llmBaseUrl;
24
28
  this.daemonIdleTimeout = daemonIdleTimeout;
25
29
  this.embedVersion = embedVersion || 'latest';
30
+ this.embedPackagePath = embedPackagePath;
31
+ }
32
+ /**
33
+ * Get the command to run hindsight-embed (either local or from PyPI)
34
+ */
35
+ getEmbedCommand() {
36
+ if (this.embedPackagePath) {
37
+ // Local package: uv run --directory <path> hindsight-embed
38
+ return ['uv', 'run', '--directory', this.embedPackagePath, 'hindsight-embed'];
39
+ }
40
+ else {
41
+ // PyPI package: uvx hindsight-embed@version
42
+ const embedPackage = this.embedVersion ? `hindsight-embed@${this.embedVersion}` : 'hindsight-embed@latest';
43
+ return ['uvx', embedPackage];
44
+ }
26
45
  }
27
46
  async start() {
28
47
  console.log(`[Hindsight] Starting hindsight-embed daemon...`);
29
- // Build environment variables
48
+ // Build environment variables using standard HINDSIGHT_API_LLM_* variables
30
49
  const env = {
31
50
  ...process.env,
32
- HINDSIGHT_EMBED_LLM_PROVIDER: this.llmProvider,
33
- HINDSIGHT_EMBED_LLM_API_KEY: this.llmApiKey,
51
+ HINDSIGHT_API_LLM_PROVIDER: this.llmProvider,
52
+ HINDSIGHT_API_LLM_API_KEY: this.llmApiKey,
34
53
  HINDSIGHT_EMBED_DAEMON_IDLE_TIMEOUT: this.daemonIdleTimeout.toString(),
35
54
  };
36
55
  if (this.llmModel) {
37
- env['HINDSIGHT_EMBED_LLM_MODEL'] = this.llmModel;
56
+ env['HINDSIGHT_API_LLM_MODEL'] = this.llmModel;
57
+ }
58
+ // Pass through base URL for OpenAI-compatible providers (OpenRouter, etc.)
59
+ if (this.llmBaseUrl) {
60
+ env['HINDSIGHT_API_LLM_BASE_URL'] = this.llmBaseUrl;
61
+ }
62
+ // On macOS, force CPU for embeddings/reranker to avoid MPS/Metal issues in daemon mode
63
+ if (process.platform === 'darwin') {
64
+ env['HINDSIGHT_API_EMBEDDINGS_LOCAL_FORCE_CPU'] = '1';
65
+ env['HINDSIGHT_API_RERANKER_LOCAL_FORCE_CPU'] = '1';
38
66
  }
39
- // Write env vars to ~/.hindsight/config.env for daemon persistence
40
- await this.writeConfigEnv(env);
41
- // Start hindsight-embed daemon (it manages itself)
42
- const embedPackage = this.embedVersion ? `hindsight-embed@${this.embedVersion}` : 'hindsight-embed@latest';
43
- const startDaemon = spawn('uvx', [embedPackage, 'daemon', 'start'], {
44
- env,
67
+ // Configure "openclaw" profile using hindsight-embed configure (non-interactive)
68
+ console.log('[Hindsight] Configuring "openclaw" profile...');
69
+ await this.configureProfile(env);
70
+ // Start hindsight-embed daemon with openclaw profile
71
+ const embedCmd = this.getEmbedCommand();
72
+ const startDaemon = spawn(embedCmd[0], [...embedCmd.slice(1), 'daemon', '--profile', 'openclaw', 'start'], {
45
73
  stdio: 'pipe',
46
74
  });
47
75
  // Collect output
@@ -77,8 +105,8 @@ export class HindsightEmbedManager {
77
105
  }
78
106
  async stop() {
79
107
  console.log('[Hindsight] Stopping hindsight-embed daemon...');
80
- const embedPackage = this.embedVersion ? `hindsight-embed@${this.embedVersion}` : 'hindsight-embed@latest';
81
- const stopDaemon = spawn('uvx', [embedPackage, 'daemon', 'stop'], {
108
+ const embedCmd = this.getEmbedCommand();
109
+ const stopDaemon = spawn(embedCmd[0], [...embedCmd.slice(1), 'daemon', '--profile', 'openclaw', 'stop'], {
82
110
  stdio: 'pipe',
83
111
  });
84
112
  await new Promise((resolve) => {
@@ -120,58 +148,63 @@ export class HindsightEmbedManager {
120
148
  isRunning() {
121
149
  return this.process !== null;
122
150
  }
123
- async writeConfigEnv(env) {
124
- const hindsightDir = join(homedir(), '.hindsight');
125
- const embedConfigPath = join(hindsightDir, 'embed');
126
- // Ensure directory exists
127
- await fs.mkdir(hindsightDir, { recursive: true });
128
- // Read existing config to preserve extra settings
129
- let existingContent = '';
130
- let extraSettings = [];
151
+ async checkHealth() {
131
152
  try {
132
- existingContent = await fs.readFile(embedConfigPath, 'utf-8');
133
- // Extract non-LLM settings (like FORCE_CPU flags)
134
- const lines = existingContent.split('\n');
135
- for (const line of lines) {
136
- const trimmed = line.trim();
137
- if (trimmed && !trimmed.startsWith('#') &&
138
- !trimmed.startsWith('HINDSIGHT_EMBED_LLM_') &&
139
- !trimmed.startsWith('HINDSIGHT_EMBED_BANK_ID') &&
140
- !trimmed.startsWith('HINDSIGHT_EMBED_DAEMON_IDLE_TIMEOUT')) {
141
- extraSettings.push(line);
142
- }
143
- }
153
+ const response = await fetch(`${this.baseUrl}/health`, { signal: AbortSignal.timeout(2000) });
154
+ return response.ok;
144
155
  }
145
156
  catch {
146
- // File doesn't exist yet, that's fine
157
+ return false;
147
158
  }
148
- // Build config file with header
149
- const configLines = [
150
- '# Hindsight Embed Configuration',
151
- '# Generated by OpenClaw Hindsight plugin',
152
- '',
159
+ }
160
+ async configureProfile(env) {
161
+ // Build profile create command args with --merge, --port and --env flags
162
+ // Use --merge to allow updating existing profile
163
+ const createArgs = ['profile', 'create', 'openclaw', '--merge', '--port', this.port.toString()];
164
+ // Add all environment variables as --env flags
165
+ const envVars = [
166
+ 'HINDSIGHT_API_LLM_PROVIDER',
167
+ 'HINDSIGHT_API_LLM_MODEL',
168
+ 'HINDSIGHT_API_LLM_API_KEY',
169
+ 'HINDSIGHT_API_LLM_BASE_URL',
170
+ 'HINDSIGHT_EMBED_DAEMON_IDLE_TIMEOUT',
171
+ 'HINDSIGHT_API_EMBEDDINGS_LOCAL_FORCE_CPU',
172
+ 'HINDSIGHT_API_RERANKER_LOCAL_FORCE_CPU',
153
173
  ];
154
- // Add LLM config
155
- if (env.HINDSIGHT_EMBED_LLM_PROVIDER) {
156
- configLines.push(`HINDSIGHT_EMBED_LLM_PROVIDER=${env.HINDSIGHT_EMBED_LLM_PROVIDER}`);
157
- }
158
- if (env.HINDSIGHT_EMBED_LLM_MODEL) {
159
- configLines.push(`HINDSIGHT_EMBED_LLM_MODEL=${env.HINDSIGHT_EMBED_LLM_MODEL}`);
160
- }
161
- if (env.HINDSIGHT_EMBED_LLM_API_KEY) {
162
- configLines.push(`HINDSIGHT_EMBED_LLM_API_KEY=${env.HINDSIGHT_EMBED_LLM_API_KEY}`);
163
- }
164
- if (env.HINDSIGHT_EMBED_DAEMON_IDLE_TIMEOUT) {
165
- configLines.push(`HINDSIGHT_EMBED_DAEMON_IDLE_TIMEOUT=${env.HINDSIGHT_EMBED_DAEMON_IDLE_TIMEOUT}`);
166
- }
167
- // Add extra settings if they exist
168
- if (extraSettings.length > 0) {
169
- configLines.push('');
170
- configLines.push('# Additional settings');
171
- configLines.push(...extraSettings);
174
+ for (const envVar of envVars) {
175
+ if (env[envVar]) {
176
+ createArgs.push('--env', `${envVar}=${env[envVar]}`);
177
+ }
172
178
  }
173
- // Write to file
174
- await fs.writeFile(embedConfigPath, configLines.join('\n') + '\n', 'utf-8');
175
- console.log(`[Hindsight] Wrote config to ${embedConfigPath}`);
179
+ // Run profile create command (non-interactive, overwrites if exists)
180
+ const embedCmd = this.getEmbedCommand();
181
+ const create = spawn(embedCmd[0], [...embedCmd.slice(1), ...createArgs], {
182
+ stdio: 'pipe',
183
+ });
184
+ let output = '';
185
+ create.stdout?.on('data', (data) => {
186
+ const text = data.toString();
187
+ output += text;
188
+ console.log(`[Hindsight] ${text.trim()}`);
189
+ });
190
+ create.stderr?.on('data', (data) => {
191
+ const text = data.toString();
192
+ output += text;
193
+ console.error(`[Hindsight] ${text.trim()}`);
194
+ });
195
+ await new Promise((resolve, reject) => {
196
+ create.on('exit', (code) => {
197
+ if (code === 0) {
198
+ console.log('[Hindsight] Profile "openclaw" configured successfully');
199
+ resolve();
200
+ }
201
+ else {
202
+ reject(new Error(`Profile create failed with code ${code}: ${output}`));
203
+ }
204
+ });
205
+ create.on('error', (error) => {
206
+ reject(error);
207
+ });
208
+ });
176
209
  }
177
210
  }
package/dist/index.js CHANGED
@@ -24,69 +24,102 @@ const __filename = fileURLToPath(import.meta.url);
24
24
  const __dirname = dirname(__filename);
25
25
  // Default bank name
26
26
  const BANK_NAME = 'openclaw';
27
- // Provider mapping: moltbot provider name -> hindsight provider name
28
- const PROVIDER_MAP = {
29
- anthropic: 'anthropic',
30
- openai: 'openai',
31
- 'openai-codex': 'openai',
32
- gemini: 'gemini',
33
- groq: 'groq',
34
- ollama: 'ollama',
35
- };
36
- // Environment variable mapping
37
- const ENV_KEY_MAP = {
38
- anthropic: 'ANTHROPIC_API_KEY',
39
- openai: 'OPENAI_API_KEY',
40
- 'openai-codex': 'OPENAI_API_KEY',
41
- gemini: 'GEMINI_API_KEY',
42
- groq: 'GROQ_API_KEY',
43
- ollama: '', // No key needed for local ollama
44
- };
45
- function detectLLMConfig(api) {
46
- // Get models from config (agents.defaults.models is a dictionary of models)
47
- const models = api.config.agents?.defaults?.models;
48
- if (!models || Object.keys(models).length === 0) {
49
- throw new Error('No models configured in Moltbot. Please configure at least one model in agents.defaults.models');
27
+ // Provider detection from standard env vars
28
+ const PROVIDER_DETECTION = [
29
+ { name: 'openai', keyEnv: 'OPENAI_API_KEY', defaultModel: 'gpt-4o-mini' },
30
+ { name: 'anthropic', keyEnv: 'ANTHROPIC_API_KEY', defaultModel: 'claude-3-5-haiku-20241022' },
31
+ { name: 'gemini', keyEnv: 'GEMINI_API_KEY', defaultModel: 'gemini-2.5-flash' },
32
+ { name: 'groq', keyEnv: 'GROQ_API_KEY', defaultModel: 'openai/gpt-oss-20b' },
33
+ { name: 'ollama', keyEnv: '', defaultModel: 'llama3.2' },
34
+ { name: 'openai-codex', keyEnv: '', defaultModel: 'gpt-5.2-codex' },
35
+ { name: 'claude-code', keyEnv: '', defaultModel: 'claude-sonnet-4-5-20250929' },
36
+ ];
37
+ function detectLLMConfig(pluginConfig) {
38
+ // Override values from HINDSIGHT_API_LLM_* env vars (highest priority)
39
+ const overrideProvider = process.env.HINDSIGHT_API_LLM_PROVIDER;
40
+ const overrideModel = process.env.HINDSIGHT_API_LLM_MODEL;
41
+ const overrideKey = process.env.HINDSIGHT_API_LLM_API_KEY;
42
+ const overrideBaseUrl = process.env.HINDSIGHT_API_LLM_BASE_URL;
43
+ // Priority 1: If provider is explicitly set via env var, use that
44
+ if (overrideProvider) {
45
+ // Providers that don't require an API key (use OAuth or local models)
46
+ const noKeyRequired = ['ollama', 'openai-codex', 'claude-code'];
47
+ if (!overrideKey && !noKeyRequired.includes(overrideProvider)) {
48
+ throw new Error(`HINDSIGHT_API_LLM_PROVIDER is set to "${overrideProvider}" but HINDSIGHT_API_LLM_API_KEY is not set.\n` +
49
+ `Please set: export HINDSIGHT_API_LLM_API_KEY=your-api-key`);
50
+ }
51
+ const providerInfo = PROVIDER_DETECTION.find(p => p.name === overrideProvider);
52
+ return {
53
+ provider: overrideProvider,
54
+ apiKey: overrideKey || '',
55
+ model: overrideModel || (providerInfo?.defaultModel),
56
+ baseUrl: overrideBaseUrl,
57
+ source: 'HINDSIGHT_API_LLM_PROVIDER override',
58
+ };
50
59
  }
51
- // Try all configured models to find one with an available API key
52
- const configuredModels = Object.keys(models);
53
- for (const modelKey of configuredModels) {
54
- const [moltbotProvider, ...modelParts] = modelKey.split('/');
55
- const model = modelParts.join('/');
56
- const hindsightProvider = PROVIDER_MAP[moltbotProvider];
57
- if (!hindsightProvider) {
58
- continue; // Skip unsupported providers
60
+ // Priority 2: Plugin config llmProvider/llmModel
61
+ if (pluginConfig?.llmProvider) {
62
+ const providerInfo = PROVIDER_DETECTION.find(p => p.name === pluginConfig.llmProvider);
63
+ // Resolve API key: llmApiKeyEnv > provider's standard keyEnv
64
+ let apiKey = '';
65
+ if (pluginConfig.llmApiKeyEnv) {
66
+ apiKey = process.env[pluginConfig.llmApiKeyEnv] || '';
67
+ }
68
+ else if (providerInfo?.keyEnv) {
69
+ apiKey = process.env[providerInfo.keyEnv] || '';
59
70
  }
60
- const envKey = ENV_KEY_MAP[moltbotProvider];
61
- const apiKey = envKey ? process.env[envKey] || '' : '';
62
- // For ollama, no key is needed
63
- if (hindsightProvider === 'ollama') {
64
- return { provider: hindsightProvider, apiKey: '', model, envKey: '' };
71
+ // Providers that don't require an API key (use OAuth or local models)
72
+ const noKeyRequired = ['ollama', 'openai-codex', 'claude-code'];
73
+ if (!apiKey && !noKeyRequired.includes(pluginConfig.llmProvider)) {
74
+ const keySource = pluginConfig.llmApiKeyEnv || providerInfo?.keyEnv || 'unknown';
75
+ throw new Error(`Plugin config llmProvider is set to "${pluginConfig.llmProvider}" but no API key found.\n` +
76
+ `Expected env var: ${keySource}\n` +
77
+ `Set the env var or use llmApiKeyEnv in plugin config to specify a custom env var name.`);
78
+ }
79
+ return {
80
+ provider: pluginConfig.llmProvider,
81
+ apiKey,
82
+ model: pluginConfig.llmModel || overrideModel || providerInfo?.defaultModel,
83
+ baseUrl: overrideBaseUrl,
84
+ source: 'plugin config',
85
+ };
86
+ }
87
+ // Priority 3: Auto-detect from standard provider env vars
88
+ for (const providerInfo of PROVIDER_DETECTION) {
89
+ const apiKey = providerInfo.keyEnv ? process.env[providerInfo.keyEnv] : '';
90
+ // Skip providers that don't use API keys in auto-detection (must be explicitly requested)
91
+ const noKeyRequired = ['ollama', 'openai-codex', 'claude-code'];
92
+ if (noKeyRequired.includes(providerInfo.name)) {
93
+ continue;
65
94
  }
66
- // If we found a key, use this provider
67
95
  if (apiKey) {
68
- return { provider: hindsightProvider, apiKey, model, envKey };
96
+ return {
97
+ provider: providerInfo.name,
98
+ apiKey,
99
+ model: overrideModel || providerInfo.defaultModel,
100
+ baseUrl: overrideBaseUrl, // Only use explicit HINDSIGHT_API_LLM_BASE_URL
101
+ source: `auto-detected from ${providerInfo.keyEnv}`,
102
+ };
69
103
  }
70
104
  }
71
- // No API keys found for any provider - show helpful error
72
- const configuredProviders = configuredModels
73
- .map(m => m.split('/')[0])
74
- .filter(p => PROVIDER_MAP[p]);
75
- const keyInstructions = configuredProviders
76
- .map(p => {
77
- const envVar = ENV_KEY_MAP[p];
78
- return envVar ? ` • ${envVar} (for ${p})` : null;
79
- })
80
- .filter(Boolean)
81
- .join('\n');
82
- throw new Error(`No API keys found for Hindsight memory plugin.\n\n` +
83
- `Configured providers in Moltbot: ${configuredProviders.join(', ')}\n\n` +
84
- `Please set one of these environment variables:\n${keyInstructions}\n\n` +
85
- `You can set them in your shell profile (~/.zshrc or ~/.bashrc):\n` +
86
- ` export ANTHROPIC_API_KEY="your-key-here"\n\n` +
87
- `Or run OpenClaw with the environment variable:\n` +
88
- ` ANTHROPIC_API_KEY="your-key" openclaw gateway\n\n` +
89
- `Alternatively, configure ollama provider which doesn't require an API key.`);
105
+ // No configuration found - show helpful error
106
+ throw new Error(`No LLM configuration found for Hindsight memory plugin.\n\n` +
107
+ `Option 1: Set a standard provider API key (auto-detect):\n` +
108
+ ` export OPENAI_API_KEY=sk-your-key # Uses gpt-4o-mini\n` +
109
+ ` export ANTHROPIC_API_KEY=your-key # Uses claude-3-5-haiku\n` +
110
+ ` export GEMINI_API_KEY=your-key # Uses gemini-2.5-flash\n` +
111
+ ` export GROQ_API_KEY=your-key # Uses openai/gpt-oss-20b\n\n` +
112
+ `Option 2: Use Codex or Claude Code (no API key needed):\n` +
113
+ ` export HINDSIGHT_API_LLM_PROVIDER=openai-codex # Requires 'codex auth login'\n` +
114
+ ` export HINDSIGHT_API_LLM_PROVIDER=claude-code # Requires Claude Code CLI\n\n` +
115
+ `Option 3: Set llmProvider in openclaw.json plugin config:\n` +
116
+ ` "llmProvider": "openai", "llmModel": "gpt-4o-mini"\n\n` +
117
+ `Option 4: Override with Hindsight-specific env vars:\n` +
118
+ ` export HINDSIGHT_API_LLM_PROVIDER=openai\n` +
119
+ ` export HINDSIGHT_API_LLM_MODEL=gpt-4o-mini\n` +
120
+ ` export HINDSIGHT_API_LLM_API_KEY=sk-your-key\n` +
121
+ ` export HINDSIGHT_API_LLM_BASE_URL=https://openrouter.ai/api/v1 # Optional\n\n` +
122
+ `Tip: Use a cheap/fast model for memory extraction (e.g., gpt-4o-mini, claude-3-5-haiku, or free models on OpenRouter)`);
90
123
  }
91
124
  function getPluginConfig(api) {
92
125
  const config = api.config.plugins?.entries?.['hindsight-openclaw']?.config || {};
@@ -96,42 +129,49 @@ function getPluginConfig(api) {
96
129
  embedPort: config.embedPort || 0,
97
130
  daemonIdleTimeout: config.daemonIdleTimeout !== undefined ? config.daemonIdleTimeout : 0,
98
131
  embedVersion: config.embedVersion || 'latest',
132
+ embedPackagePath: config.embedPackagePath,
133
+ llmProvider: config.llmProvider,
134
+ llmModel: config.llmModel,
135
+ llmApiKeyEnv: config.llmApiKeyEnv,
99
136
  };
100
137
  }
101
138
  export default function (api) {
102
139
  try {
103
140
  console.log('[Hindsight] Plugin loading...');
104
- // Detect LLM configuration from Moltbot
141
+ // Get plugin config first (needed for LLM detection)
142
+ console.log('[Hindsight] Getting plugin config...');
143
+ const pluginConfig = getPluginConfig(api);
144
+ // Detect LLM configuration (env vars > plugin config > auto-detect)
105
145
  console.log('[Hindsight] Detecting LLM config...');
106
- const llmConfig = detectLLMConfig(api);
146
+ const llmConfig = detectLLMConfig(pluginConfig);
147
+ const baseUrlInfo = llmConfig.baseUrl ? `, base URL: ${llmConfig.baseUrl}` : '';
148
+ const modelInfo = llmConfig.model || 'default';
107
149
  if (llmConfig.provider === 'ollama') {
108
- console.log(`[Hindsight] ✓ Using provider: ${llmConfig.provider}, model: ${llmConfig.model || 'default'} (no API key required)`);
150
+ console.log(`[Hindsight] ✓ Using provider: ${llmConfig.provider}, model: ${modelInfo} (${llmConfig.source})`);
109
151
  }
110
152
  else {
111
- console.log(`[Hindsight] ✓ Using provider: ${llmConfig.provider}, model: ${llmConfig.model || 'default'} (API key: ${llmConfig.envKey})`);
153
+ console.log(`[Hindsight] ✓ Using provider: ${llmConfig.provider}, model: ${modelInfo} (${llmConfig.source}${baseUrlInfo})`);
112
154
  }
113
- console.log('[Hindsight] Getting plugin config...');
114
- const pluginConfig = getPluginConfig(api);
115
155
  if (pluginConfig.bankMission) {
116
156
  console.log(`[Hindsight] Custom bank mission configured: "${pluginConfig.bankMission.substring(0, 50)}..."`);
117
157
  }
118
158
  console.log(`[Hindsight] Daemon idle timeout: ${pluginConfig.daemonIdleTimeout}s (0 = never timeout)`);
119
- // Determine port
120
- const port = pluginConfig.embedPort || Math.floor(Math.random() * 10000) + 10000;
121
- console.log(`[Hindsight] Port: ${port}`);
159
+ // Get API port from config (default: 9077)
160
+ const apiPort = pluginConfig.apiPort || 9077;
161
+ console.log(`[Hindsight] API Port: ${apiPort}`);
122
162
  // Initialize in background (non-blocking)
123
163
  console.log('[Hindsight] Starting initialization in background...');
124
164
  initPromise = (async () => {
125
165
  try {
126
166
  // Initialize embed manager
127
167
  console.log('[Hindsight] Creating HindsightEmbedManager...');
128
- embedManager = new HindsightEmbedManager(port, llmConfig.provider, llmConfig.apiKey, llmConfig.model, pluginConfig.daemonIdleTimeout, pluginConfig.embedVersion);
168
+ embedManager = new HindsightEmbedManager(apiPort, llmConfig.provider, llmConfig.apiKey, llmConfig.model, llmConfig.baseUrl, pluginConfig.daemonIdleTimeout, pluginConfig.embedVersion, pluginConfig.embedPackagePath);
129
169
  // Start the embedded server
130
170
  console.log('[Hindsight] Starting embedded server...');
131
171
  await embedManager.start();
132
172
  // Initialize client
133
173
  console.log('[Hindsight] Creating HindsightClient...');
134
- client = new HindsightClient(llmConfig.provider, llmConfig.apiKey, llmConfig.model, pluginConfig.embedVersion);
174
+ client = new HindsightClient(llmConfig.provider, llmConfig.apiKey, llmConfig.model, pluginConfig.embedVersion, pluginConfig.embedPackagePath);
135
175
  // Use openclaw bank
136
176
  console.log(`[Hindsight] Using bank: ${BANK_NAME}`);
137
177
  client.setBankId(BANK_NAME);
@@ -154,10 +194,46 @@ export default function (api) {
154
194
  api.registerService({
155
195
  id: 'hindsight-memory',
156
196
  async start() {
197
+ console.log('[Hindsight] Service start called - checking daemon health...');
157
198
  // Wait for background init if still pending
158
- console.log('[Hindsight] Service start called - ensuring initialization complete...');
159
- if (initPromise)
160
- await initPromise;
199
+ if (initPromise) {
200
+ try {
201
+ await initPromise;
202
+ }
203
+ catch (error) {
204
+ console.error('[Hindsight] Initial initialization failed:', error);
205
+ // Continue to health check below
206
+ }
207
+ }
208
+ // Check if daemon is actually healthy (handles SIGUSR1 restart case)
209
+ if (embedManager && isInitialized) {
210
+ const healthy = await embedManager.checkHealth();
211
+ if (healthy) {
212
+ console.log('[Hindsight] Daemon is healthy');
213
+ return;
214
+ }
215
+ console.log('[Hindsight] Daemon is not responding - reinitializing...');
216
+ // Reset state for reinitialization
217
+ embedManager = null;
218
+ client = null;
219
+ isInitialized = false;
220
+ }
221
+ // Reinitialize if needed (fresh start or recovery from dead daemon)
222
+ if (!isInitialized) {
223
+ console.log('[Hindsight] Reinitializing daemon...');
224
+ const pluginConfig = getPluginConfig(api);
225
+ const llmConfig = detectLLMConfig(pluginConfig);
226
+ const apiPort = pluginConfig.apiPort || 9077;
227
+ embedManager = new HindsightEmbedManager(apiPort, llmConfig.provider, llmConfig.apiKey, llmConfig.model, llmConfig.baseUrl, pluginConfig.daemonIdleTimeout, pluginConfig.embedVersion, pluginConfig.embedPackagePath);
228
+ await embedManager.start();
229
+ client = new HindsightClient(llmConfig.provider, llmConfig.apiKey, llmConfig.model, pluginConfig.embedVersion, pluginConfig.embedPackagePath);
230
+ client.setBankId(BANK_NAME);
231
+ if (pluginConfig.bankMission) {
232
+ await client.setBankMission(pluginConfig.bankMission);
233
+ }
234
+ isInitialized = true;
235
+ console.log('[Hindsight] Reinitialization complete');
236
+ }
161
237
  },
162
238
  async stop() {
163
239
  try {
package/dist/types.d.ts CHANGED
@@ -29,6 +29,11 @@ export interface PluginConfig {
29
29
  embedPort?: number;
30
30
  daemonIdleTimeout?: number;
31
31
  embedVersion?: string;
32
+ embedPackagePath?: string;
33
+ llmProvider?: string;
34
+ llmModel?: string;
35
+ llmApiKeyEnv?: string;
36
+ apiPort?: number;
32
37
  }
33
38
  export interface ServiceConfig {
34
39
  id: string;
@@ -24,6 +24,28 @@
24
24
  "type": "string",
25
25
  "description": "hindsight-embed version to use (e.g. 'latest', '0.4.2', or empty for latest)",
26
26
  "default": "latest"
27
+ },
28
+ "llmProvider": {
29
+ "type": "string",
30
+ "description": "LLM provider for Hindsight memory (e.g. 'openai', 'anthropic', 'gemini', 'groq', 'ollama', 'openai-codex', 'claude-code'). Takes priority over auto-detection but not over HINDSIGHT_API_LLM_PROVIDER env var.",
31
+ "enum": ["openai", "anthropic", "gemini", "groq", "ollama", "openai-codex", "claude-code"]
32
+ },
33
+ "llmModel": {
34
+ "type": "string",
35
+ "description": "LLM model to use (e.g. 'gpt-4o-mini', 'claude-3-5-haiku-20241022'). Used with llmProvider."
36
+ },
37
+ "llmApiKeyEnv": {
38
+ "type": "string",
39
+ "description": "Name of the env var holding the API key (e.g. 'MY_CUSTOM_KEY'). If not set, uses the standard env var for the chosen provider."
40
+ },
41
+ "embedPackagePath": {
42
+ "type": "string",
43
+ "description": "Local path to hindsight package for development (e.g. '/path/to/hindsight'). When set, uses 'uv run --directory <path>' instead of 'uvx hindsight-embed@latest'."
44
+ },
45
+ "apiPort": {
46
+ "type": "number",
47
+ "description": "Port for the openclaw profile daemon (default: 9077)",
48
+ "default": 9077
27
49
  }
28
50
  },
29
51
  "additionalProperties": false
@@ -44,6 +66,26 @@
44
66
  "embedVersion": {
45
67
  "label": "Hindsight Embed Version",
46
68
  "placeholder": "latest (or pin to specific version like 0.4.2)"
69
+ },
70
+ "llmProvider": {
71
+ "label": "LLM Provider",
72
+ "placeholder": "e.g. openai, anthropic, gemini, groq"
73
+ },
74
+ "llmModel": {
75
+ "label": "LLM Model",
76
+ "placeholder": "e.g. gpt-4o-mini, claude-3-5-haiku-20241022"
77
+ },
78
+ "llmApiKeyEnv": {
79
+ "label": "API Key Env Var",
80
+ "placeholder": "e.g. MY_CUSTOM_API_KEY (optional)"
81
+ },
82
+ "embedPackagePath": {
83
+ "label": "Local Package Path (Dev)",
84
+ "placeholder": "/path/to/hindsight (for local development)"
85
+ },
86
+ "apiPort": {
87
+ "label": "API Port",
88
+ "placeholder": "9077 (default)"
47
89
  }
48
90
  }
49
91
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@vectorize-io/hindsight-openclaw",
3
- "version": "0.4.6",
3
+ "version": "0.4.8",
4
4
  "description": "Hindsight memory plugin for OpenClaw - biomimetic long-term memory with fact extraction",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",