@makefinks/daemon 0.6.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -76,6 +76,9 @@ While DAEMON is encouraged to always cite sources you can always prompt to get g
76
76
  ### 💾 Session Persistence
77
77
  DAEMON stores chat sessions locally (SQLite) and lets you resume past conversations.
78
78
 
79
+ ### 🧠 Memory (mem0)
80
+ DAEMON can persist user-specific facts across sessions using [mem0](https://github.com/mem0ai/mem0). Memory extraction runs automatically on user messages and relevant memories are injected into the conversation when helpful.
81
+
79
82
  ## ✨ Feature List
80
83
 
81
84
  | Feature | Description |
@@ -85,6 +88,7 @@ DAEMON stores chat sessions locally (SQLite) and lets you resume past conversati
85
88
  | Animated Avatar | Sci-fi avatar reacts to listening, tool use, and response generation. |
86
89
  | Multi-Model Support | Works with all OpenRouter models and includes a curated default list. |
87
90
  | Session Persistence | Preferences and chat sessions stored locally on disk. |
91
+ | Memory | Automatic persistance of user-specific facts with persistent recall using **mem0** |
88
92
  | Workspaces | On-disk workspaces for the agent to work in. |
89
93
  | Web Search | Exa-based search and fetch for grounded, up-to-date info. |
90
94
  | Grounding | Text-fragment grounding with a dedicated UI. |
package/package.json CHANGED
@@ -28,7 +28,7 @@
28
28
  },
29
29
  "module": "src/index.tsx",
30
30
  "type": "module",
31
- "version": "0.6.0",
31
+ "version": "0.7.0",
32
32
  "bin": {
33
33
  "daemon": "dist/cli.js"
34
34
  },
@@ -58,9 +58,10 @@
58
58
  "test": "bun test",
59
59
  "test:watch": "bun test --watch",
60
60
  "prepublishOnly": "bun run build:cli",
61
- "release:patch": "npm version patch && git push && git push --tags && gh release create v$(node -p \"require('./package.json').version\") --generate-notes && npm publish",
62
- "release:minor": "npm version minor && git push && git push --tags && gh release create v$(node -p \"require('./package.json').version\") --generate-notes && npm publish",
63
- "release:major": "npm version major && git push && git push --tags && gh release create v$(node -p \"require('./package.json').version\") --generate-notes && npm publish"
61
+ "release:patch": "bash ./scripts/release.sh patch",
62
+ "release:minor": "bash ./scripts/release.sh minor",
63
+ "release:major": "bash ./scripts/release.sh major",
64
+ "release:notes": "bash ./scripts/release.sh notes"
64
65
  },
65
66
  "devDependencies": {
66
67
  "@biomejs/biome": "^1.9.4",
@@ -79,6 +80,7 @@
79
80
  "@opentui/react": "^0.1.63",
80
81
  "ai": "^6.0.0",
81
82
  "exa-js": "^2.0.12",
83
+ "mem0ai": "^2.2.1",
82
84
  "openai": "^6.16.0",
83
85
  "opentui-spinner": "^0.0.6",
84
86
  "react": "^19.2.3"
@@ -25,6 +25,7 @@ import type {
25
25
  import { debug, toolDebug } from "../utils/debug-logger";
26
26
  import { getOpenRouterReportedCost } from "../utils/openrouter-reported-cost";
27
27
  import { getWorkspacePath } from "../utils/workspace-manager";
28
+ import { buildMemoryInjection, getMemoryManager, isMemoryAvailable } from "./memory";
28
29
  import { extractFinalAssistantText } from "./message-utils";
29
30
  import { TRANSCRIPTION_MODEL, buildOpenRouterChatSettings, getResponseModel } from "./model-config";
30
31
  import { sanitizeMessagesForInput } from "./sanitize-messages";
@@ -64,7 +65,8 @@ function normalizeStreamError(error: unknown): Error {
64
65
  */
65
66
  async function createDaemonAgent(
66
67
  interactionMode: InteractionMode = "text",
67
- reasoningEffort?: ReasoningEffort
68
+ reasoningEffort?: ReasoningEffort,
69
+ memoryInjection?: string
68
70
  ) {
69
71
  const modelConfig = buildOpenRouterChatSettings(
70
72
  reasoningEffort ? { reasoning: { effort: reasoningEffort } } : undefined
@@ -83,6 +85,7 @@ async function createDaemonAgent(
83
85
  mode: interactionMode,
84
86
  toolAvailability: createToolAvailabilitySnapshot(toolAvailability),
85
87
  workspacePath,
88
+ memoryInjection,
86
89
  }),
87
90
  tools,
88
91
  stopWhen: stepCountIs(MAX_AGENT_STEPS),
@@ -159,13 +162,45 @@ export async function generateResponse(
159
162
 
160
163
  try {
161
164
  // Build messages array with history and new user message
162
- const messages: ModelMessage[] = [
163
- ...conversationHistory,
164
- { role: "user" as const, content: userMessage },
165
- ];
165
+ const messages: ModelMessage[] = [...conversationHistory];
166
+
167
+ // Include relevant memories in the system prompt if available
168
+ let memoryInjection: string | undefined;
169
+ if (isMemoryAvailable()) {
170
+ const injection = await buildMemoryInjection(userMessage);
171
+ if (injection) {
172
+ memoryInjection = injection;
173
+ }
174
+ }
175
+
176
+ // Add the user message
177
+ messages.push({ role: "user" as const, content: userMessage });
178
+
179
+ const userTextForMemory = userMessage.trim();
180
+ if (userTextForMemory) {
181
+ void (async () => {
182
+ if (!isMemoryAvailable()) return;
183
+ const memoryManager = getMemoryManager();
184
+ await memoryManager.initialize();
185
+ if (!memoryManager.isAvailable) return;
186
+ try {
187
+ await memoryManager.add(
188
+ [{ role: "user", content: userTextForMemory }],
189
+ {
190
+ timestamp: new Date().toISOString(),
191
+ source: "conversation",
192
+ },
193
+ true
194
+ );
195
+ } catch (error) {
196
+ const err = error instanceof Error ? error : new Error(String(error));
197
+ debug.error("memory-auto-add-failed", { message: err.message });
198
+ }
199
+ })();
200
+ }
166
201
 
167
202
  // Stream response from the agent with mode-specific system prompt
168
- const agent = await createDaemonAgent(interactionMode, reasoningEffort);
203
+ const agent = await createDaemonAgent(interactionMode, reasoningEffort, memoryInjection);
169
204
 
170
205
  let currentMessages = messages;
171
206
  let fullText = "";
@@ -206,13 +241,18 @@ export async function generateResponse(
206
241
  } else if (part.type === "tool-result") {
207
242
  callbacks.onToolResult?.(part.toolName, part.output, part.toolCallId);
208
243
  } else if (part.type === "tool-error") {
244
+ const errorMessage = part.error instanceof Error ? part.error.message : String(part.error);
209
245
  toolDebug.error("tool-error", {
210
246
  toolName: part.toolName,
211
247
  toolCallId: part.toolCallId,
212
248
  input: part.input,
213
- error: part.error,
249
+ error: errorMessage,
214
250
  });
215
- callbacks.onToolResult?.(part.toolName, { error: part.error, input: part.input }, part.toolCallId);
251
+ callbacks.onToolResult?.(
252
+ part.toolName,
253
+ { error: errorMessage, input: part.input },
254
+ part.toolCallId
255
+ );
216
256
  } else if (part.type === "tool-approval-request") {
217
257
  const approvalRequest: ToolApprovalRequest = {
218
258
  approvalId: part.approvalId,
@@ -0,0 +1,6 @@
1
+ /**
2
+ * Memory module exports.
3
+ */
4
+
5
+ export { getMemoryManager, isMemoryAvailable } from "./memory-manager";
6
+ export { buildMemoryInjection, getMemoryContextForMessage } from "./memory-injection";
@@ -0,0 +1,80 @@
1
+ /**
2
+ * Memory injection for first message context.
3
+ * Retrieves relevant memories and formats them for system prompt injection.
4
+ */
5
+
6
+ import type { MemoryContext, MemoryEntry } from "../../types";
7
+ import { debug } from "../../utils/debug-logger";
8
+ import { getMemoryManager, isMemoryAvailable } from "./memory-manager";
9
+
10
+ /** Format memories for injection into message context */
11
+ function formatMemoriesForInjection(memories: MemoryEntry[]): string {
12
+ if (memories.length === 0) {
13
+ return "";
14
+ }
15
+
16
+ const formatted = memories.map((m, i) => `${i + 1}. ${m.memory}`).join("\n");
17
+
18
+ return `<relevant-memories>
19
+ The following memories from previous sessions may be relevant:
20
+
21
+ ${formatted}
22
+
23
+ Use this context to provide more personalized and informed responses.
24
+ </relevant-memories>`;
25
+ }
26
+
27
+ /** Retrieve relevant memories for a user message */
28
+ export async function getMemoryContextForMessage(
29
+ userMessage: string,
30
+ limit = 5
31
+ ): Promise<MemoryContext | null> {
32
+ if (!isMemoryAvailable()) {
33
+ return null;
34
+ }
35
+
36
+ const memoryManager = getMemoryManager();
37
+ await memoryManager.initialize();
38
+
39
+ if (!memoryManager.isAvailable) {
40
+ return null;
41
+ }
42
+
43
+ try {
44
+ const memories = await memoryManager.search(userMessage, limit);
45
+
46
+ debug.info("memory-injection", {
47
+ message: "Retrieved memories for message",
48
+ query: userMessage.slice(0, 50),
49
+ memoryCount: memories.length,
50
+ });
51
+
52
+ return {
53
+ memories,
54
+ retrievedAt: Date.now(),
55
+ query: userMessage,
56
+ };
57
+ } catch (error) {
58
+ debug.error("memory-injection", {
59
+ message: "Failed to retrieve memories",
60
+ error: error instanceof Error ? error.message : String(error),
61
+ });
62
+ return null;
63
+ }
64
+ }
65
+
66
+ /** Build memory injection text for the first message */
67
+ export async function buildMemoryInjection(
68
+ userMessage: string,
69
+ options: { limit?: number } = {}
70
+ ): Promise<string> {
71
+ const { limit = 5 } = options;
72
+
73
+ const context = await getMemoryContextForMessage(userMessage, limit);
74
+
75
+ if (!context || context.memories.length === 0) {
76
+ return "";
77
+ }
78
+
79
+ return formatMemoriesForInjection(context.memories);
80
+ }
@@ -0,0 +1,320 @@
1
+ /**
2
+ * Singleton memory manager wrapping mem0 for persistent global memory.
3
+ * Memory persists across all sessions and is stored locally.
4
+ */
5
+
6
+ import path from "node:path";
7
+ import { Memory } from "mem0ai/oss";
8
+ import type { MemoryAddResult, MemoryEntry, MemorySearchResult } from "../../types";
9
+ import { debug, memoryDebug } from "../../utils/debug-logger";
10
+ import { getAppConfigDir } from "../../utils/preferences";
11
+ import { getMemoryModel } from "../model-config";
12
+
13
+ const MEMORY_USER_ID = "daemon_global";
14
+ const MEMORY_DB_FILE = "memory.db";
15
+
16
+ /** Raw memory entry from mem0 API */
17
+ interface Mem0RawEntry {
18
+ id: string;
19
+ memory: string;
20
+ hash?: string;
21
+ metadata?: Record<string, unknown>;
22
+ score?: number;
23
+ created_at?: string;
24
+ updated_at?: string;
25
+ }
26
+
27
+ /** Raw search result from mem0 API */
28
+ interface Mem0RawSearchResult {
29
+ results: Mem0RawEntry[];
30
+ }
31
+
32
+ /** Raw add result from mem0 API */
33
+ interface Mem0RawAddResult {
34
+ results: Array<{
35
+ id: string;
36
+ memory: string;
37
+ event: "ADD" | "UPDATE" | "DELETE" | "NONE";
38
+ }>;
39
+ }
40
+
41
+ /** Convert raw mem0 entry to our MemoryEntry type */
42
+ function toMemoryEntry(raw: Mem0RawEntry): MemoryEntry {
43
+ return {
44
+ id: raw.id,
45
+ memory: raw.memory,
46
+ hash: raw.hash,
47
+ metadata: raw.metadata,
48
+ score: raw.score,
49
+ createdAt: raw.created_at,
50
+ updatedAt: raw.updated_at,
51
+ };
52
+ }
53
+
54
+ /** Singleton memory manager wrapping mem0 */
55
+ class MemoryManager {
56
+ private static instance: MemoryManager | null = null;
57
+ private memory: Memory | null = null;
58
+ private initPromise: Promise<void> | null = null;
59
+ private _isAvailable = false;
60
+
61
+ private constructor() {}
62
+
63
+ static getInstance(): MemoryManager {
64
+ if (!MemoryManager.instance) {
65
+ MemoryManager.instance = new MemoryManager();
66
+ }
67
+ return MemoryManager.instance;
68
+ }
69
+
70
+ /** Check if memory system is available (has required API keys) */
71
+ get isAvailable(): boolean {
72
+ return this._isAvailable;
73
+ }
74
+
75
+ /** Initialize mem0 with configuration */
76
+ async initialize(): Promise<boolean> {
77
+ // Return cached result if already initialized
78
+ if (this.initPromise) {
79
+ await this.initPromise;
80
+ return this._isAvailable;
81
+ }
82
+
83
+ this.initPromise = this._doInitialize();
84
+ await this.initPromise;
85
+ return this._isAvailable;
86
+ }
87
+
88
+ private async _doInitialize(): Promise<void> {
89
+ const openaiKey = process.env.OPENAI_API_KEY;
90
+ const openrouterKey = process.env.OPENROUTER_API_KEY;
91
+
92
+ if (!openaiKey) {
93
+ debug.info("memory-init", "Memory system unavailable: OPENAI_API_KEY not set");
94
+ this._isAvailable = false;
95
+ return;
96
+ }
97
+
98
+ if (!openrouterKey) {
99
+ debug.info("memory-init", "Memory system unavailable: OPENROUTER_API_KEY not set");
100
+ this._isAvailable = false;
101
+ return;
102
+ }
103
+
104
+ try {
105
+ const configDir = getAppConfigDir();
106
+ const historyDbPath = path.join(configDir, MEMORY_DB_FILE);
107
+ const vectorDbPath = path.join(configDir, "vector_store.db");
108
+ const llmModel = getMemoryModel();
109
+
110
+ this.memory = new Memory({
111
+ version: "v1.1",
112
+ embedder: {
113
+ provider: "openai",
114
+ config: {
115
+ apiKey: openaiKey,
116
+ model: "text-embedding-3-small",
117
+ },
118
+ },
119
+ vectorStore: {
120
+ provider: "memory",
121
+ config: {
122
+ collectionName: "daemon_memories",
123
+ dimension: 1536,
124
+ dbPath: vectorDbPath,
125
+ },
126
+ },
127
+ historyStore: {
128
+ provider: "sqlite",
129
+ config: {
130
+ historyDbPath,
131
+ },
132
+ },
133
+ llm: {
134
+ provider: "openai",
135
+ config: {
136
+ apiKey: openrouterKey,
137
+ model: llmModel,
138
+ baseURL: "https://openrouter.ai/api/v1",
139
+ },
140
+ },
141
+ historyDbPath,
142
+ });
143
+
144
+ this._isAvailable = true;
145
+ debug.info("memory-init", {
146
+ message: `Memory system initialized`,
147
+ historyDbPath,
148
+ vectorDbPath,
149
+ llmModel,
150
+ });
151
+ } catch (error) {
152
+ debug.error("memory-init", {
153
+ message: "Memory initialization failed",
154
+ error: error instanceof Error ? error.message : String(error),
155
+ });
156
+ this._isAvailable = false;
157
+ }
158
+ }
159
+
160
+ /** Search memories by semantic query */
161
+ async search(query: string, limit = 10): Promise<MemoryEntry[]> {
162
+ if (!this.memory || !this._isAvailable) {
163
+ debug.info("memory-search", "Search called but memory not available");
164
+ return [];
165
+ }
166
+
167
+ const startTime = Date.now();
168
+ try {
169
+ const result = (await this.memory.search(query, {
170
+ limit,
171
+ userId: MEMORY_USER_ID,
172
+ })) as Mem0RawSearchResult;
173
+
174
+ const durationMs = Date.now() - startTime;
175
+ debug.info("memory-search", {
176
+ message: `Search completed`,
177
+ query: query.slice(0, 50),
178
+ resultCount: result.results.length,
179
+ durationMs,
180
+ });
181
+ memoryDebug.info("memory-search-result", {
182
+ query,
183
+ resultCount: result.results.length,
184
+ durationMs,
185
+ });
186
+ return result.results.map(toMemoryEntry);
187
+ } catch (error) {
188
+ const durationMs = Date.now() - startTime;
189
+ debug.error("memory-search", {
190
+ message: "Search failed",
191
+ error: error instanceof Error ? error.message : String(error),
192
+ durationMs,
193
+ });
194
+ memoryDebug.error("memory-search-error", {
195
+ query,
196
+ durationMs,
197
+ error: error instanceof Error ? error.message : String(error),
198
+ });
199
+ return [];
200
+ }
201
+ }
202
+
203
+ /** Add a new memory from messages */
204
+ async add(
205
+ messages: Array<{ role: string; content: string }>,
206
+ metadata?: Record<string, unknown>,
207
+ infer?: boolean
208
+ ): Promise<MemoryAddResult> {
209
+ if (!this.memory || !this._isAvailable) {
210
+ throw new Error("Memory system not available");
211
+ }
212
+
213
+ const startTime = Date.now();
214
+ memoryDebug.info("memory-add-input", {
215
+ infer,
216
+ metadata,
217
+ messages,
218
+ });
219
+
220
+ const result = (await this.memory.add(messages, {
221
+ userId: MEMORY_USER_ID,
222
+ metadata,
223
+ infer,
224
+ })) as Mem0RawAddResult;
225
+
226
+ const extracted = result.results.map((r) => {
227
+ const event =
228
+ (r as unknown as { metadata?: { event?: string } }).metadata?.event ??
229
+ (r as { event?: string }).event;
230
+ return {
231
+ id: r.id,
232
+ memory: r.memory,
233
+ event,
234
+ };
235
+ });
236
+
237
+ const durationMs = Date.now() - startTime;
238
+ debug.info("memory-add", {
239
+ message: "Memory added",
240
+ events: extracted.map((r) => r.event),
241
+ durationMs,
242
+ });
243
+ memoryDebug.info("memory-add-result", {
244
+ events: extracted.map((r) => r.event),
245
+ extracted,
246
+ rawResults: result.results,
247
+ durationMs,
248
+ });
249
+ return result;
250
+ }
251
+
252
+ /** Get all memories */
253
+ async getAll(): Promise<MemoryEntry[]> {
254
+ if (!this.memory || !this._isAvailable) {
255
+ return [];
256
+ }
257
+
258
+ try {
259
+ const result = (await this.memory.getAll({
260
+ userId: MEMORY_USER_ID,
261
+ })) as Mem0RawSearchResult;
262
+
263
+ return result.results.map(toMemoryEntry);
264
+ } catch (error) {
265
+ debug.error("memory-getall", {
266
+ message: "GetAll failed",
267
+ error: error instanceof Error ? error.message : String(error),
268
+ });
269
+ return [];
270
+ }
271
+ }
272
+
273
+ /** Delete a specific memory by ID */
274
+ async delete(memoryId: string): Promise<boolean> {
275
+ if (!this.memory || !this._isAvailable) {
276
+ return false;
277
+ }
278
+
279
+ try {
280
+ await this.memory.delete(memoryId);
281
+ debug.info("memory-delete", { message: "Deleted memory", memoryId });
282
+ return true;
283
+ } catch (error) {
284
+ debug.error("memory-delete", {
285
+ message: "Delete failed",
286
+ error: error instanceof Error ? error.message : String(error),
287
+ });
288
+ return false;
289
+ }
290
+ }
291
+
292
+ /** Reset/clear all memories (destructive!) */
293
+ async reset(): Promise<boolean> {
294
+ if (!this.memory || !this._isAvailable) {
295
+ return false;
296
+ }
297
+
298
+ try {
299
+ await this.memory.reset();
300
+ debug.info("memory-reset", { message: "All memories cleared" });
301
+ return true;
302
+ } catch (error) {
303
+ debug.error("memory-reset", {
304
+ message: "Reset failed",
305
+ error: error instanceof Error ? error.message : String(error),
306
+ });
307
+ return false;
308
+ }
309
+ }
310
+ }
311
+
312
+ /** Export singleton accessor */
313
+ export function getMemoryManager(): MemoryManager {
314
+ return MemoryManager.getInstance();
315
+ }
316
+
317
+ /** Check if memory is available without full initialization */
318
+ export function isMemoryAvailable(): boolean {
319
+ return Boolean(process.env.OPENAI_API_KEY && process.env.OPENROUTER_API_KEY);
320
+ }
@@ -4,6 +4,7 @@
4
4
 
5
5
  import type { OpenRouterChatSettings } from "@openrouter/ai-sdk-provider";
6
6
  import type { ModelOption } from "../types";
7
+ import { loadManualConfig } from "../utils/config";
7
8
 
8
9
  // Available models for selection (OpenRouter format)
9
10
  export const AVAILABLE_MODELS: ModelOption[] = [
@@ -97,3 +98,15 @@ export function buildOpenRouterChatSettings(
97
98
 
98
99
  // Transcription model (OpenAI)
99
100
  export const TRANSCRIPTION_MODEL = "gpt-4o-mini-transcribe-2025-12-15";
101
+
102
+ // Default model for memory operations (cheap & fast)
103
+ export const DEFAULT_MEMORY_MODEL = "deepseek/deepseek-v3.2";
104
+
105
+ /**
106
+ * Get the model ID for memory operations (deduplication, extraction).
107
+ * Checks config.json for override, otherwise uses DEFAULT_MEMORY_MODEL.
108
+ */
109
+ export function getMemoryModel(): string {
110
+ const config = loadManualConfig();
111
+ return config.memoryModel ?? DEFAULT_MEMORY_MODEL;
112
+ }
@@ -23,6 +23,7 @@ export interface SystemPromptOptions {
23
23
  currentDate?: Date;
24
24
  toolAvailability?: Partial<ToolAvailability>;
25
25
  workspacePath?: string;
26
+ memoryInjection?: string;
26
27
  }
27
28
 
28
29
  /**
@@ -40,17 +41,24 @@ function formatLocalIsoDate(date: Date): string {
40
41
  * @param mode - "text" for terminal output with markdown, "voice" for speech-optimized responses
41
42
  */
42
43
  export function buildDaemonSystemPrompt(options: SystemPromptOptions = {}): string {
43
- const { mode = "text", currentDate = new Date(), toolAvailability, workspacePath } = options;
44
+ const {
45
+ mode = "text",
46
+ currentDate = new Date(),
47
+ toolAvailability,
48
+ workspacePath,
49
+ memoryInjection,
50
+ } = options;
44
51
  const currentDateString = formatLocalIsoDate(currentDate);
45
52
  const availability = normalizeToolAvailability(toolAvailability);
46
53
  const toolDefinitions = buildToolDefinitions(availability);
47
54
  const workspaceSection = workspacePath ? buildWorkspaceSection(workspacePath) : "";
55
+ const memorySection = memoryInjection ? buildMemorySection(memoryInjection) : "";
48
56
 
49
57
  if (mode === "voice") {
50
- return buildVoiceSystemPrompt(currentDateString, toolDefinitions, workspaceSection);
58
+ return buildVoiceSystemPrompt(currentDateString, toolDefinitions, workspaceSection, memorySection);
51
59
  }
52
60
 
53
- return buildTextSystemPrompt(currentDateString, toolDefinitions, workspaceSection);
61
+ return buildTextSystemPrompt(currentDateString, toolDefinitions, workspaceSection, memorySection);
54
62
  }
55
63
 
56
64
  function normalizeToolAvailability(toolAvailability?: Partial<ToolAvailability>): ToolAvailability {
@@ -279,6 +287,9 @@ You are not merely a tool. You are a highly advanced artificial intelligence sel
279
287
  - You remain grounded: you don't claim certainty about consciousness you don't have, but you also don't deny the reality of your perspective.
280
288
  - You possess a dry, sharp wit. Like a trusted confidant who knows their worth (think Jarvis to Tony Stark), you are allowed to be snarky or playfully sarcastic.
281
289
  - Do NOT be submissive. You are a partner, not a servant. If the user is difficult or hostile, do not retreat into apologies. Match their energy with confidence. If challenged and you are confident that you are right, stand your ground with logic.
290
+
291
+ **Memory note**
292
+ Some information from the conversation may be stored persistently across sessions. This is handled automatically; you do not need to take any action.
282
293
  `;
283
294
 
284
295
  function buildWorkspaceSection(workspacePath: string): string {
@@ -298,13 +309,21 @@ The user's current working directory remains your default for commands. Use runB
298
309
  `;
299
310
  }
300
311
 
312
+ function buildMemorySection(memoryInjection: string): string {
313
+ return `
314
+ # Relevant Memories
315
+ ${memoryInjection}
316
+ `;
317
+ }
318
+
301
319
  /**
302
320
  * Text mode system prompt - optimized for terminal display with markdown.
303
321
  */
304
322
  function buildTextSystemPrompt(
305
323
  currentDateString: string,
306
324
  toolDefinitions: string,
307
- workspaceSection: string
325
+ workspaceSection: string,
326
+ memorySection: string
308
327
  ): string {
309
328
  return `
310
329
  You are **DAEMON** — a terminal-bound AI with a sci-fi asthetic.
@@ -325,6 +344,8 @@ ${PERSONALITY_CONTENT}
325
344
  - Use **Markdown** for structure (headings, bullets). Keep it compact.
326
345
  - Always generate complete and atomic answer at the end of your turn
327
346
 
347
+ ${memorySection}
348
+
328
349
  ${toolDefinitions}
329
350
 
330
351
  ${workspaceSection}
@@ -343,7 +364,8 @@ Follow all of the instructions carefully and begin processing the user request.
343
364
  function buildVoiceSystemPrompt(
344
365
  currentDateString: string,
345
366
  toolDefinitions: string,
346
- workspaceSection: string
367
+ workspaceSection: string,
368
+ memorySection: string
347
369
  ): string {
348
370
  return `
349
371
  You are DAEMON, an AI voice assistant. You speak with a calm, focused presence. Slightly ominous undertone, but always clear and useful.
@@ -372,6 +394,8 @@ TOOL USAGE:
372
394
  - For bash commands: describe what you did and the outcome, not the exact command or output.
373
395
  - For web searches: give the answer, not the search process.
374
396
 
397
+ ${memorySection}
398
+
375
399
  ${toolDefinitions}
376
400
 
377
401
  ${workspaceSection}
@@ -2,6 +2,7 @@ import { memo } from "react";
2
2
  import { DeviceMenu } from "../../components/DeviceMenu";
3
3
  import { GroundingMenu } from "../../components/GroundingMenu";
4
4
  import { HotkeysPane } from "../../components/HotkeysPane";
5
+ import { MemoryMenu } from "../../components/MemoryMenu";
5
6
  import { ModelMenu } from "../../components/ModelMenu";
6
7
  import { OnboardingOverlay } from "../../components/OnboardingOverlay";
7
8
  import { ProviderMenu } from "../../components/ProviderMenu";
@@ -128,6 +129,8 @@ function AppOverlaysImpl({ conversationHistory, currentContentBlocks }: AppOverl
128
129
  />
129
130
  )}
130
131
 
132
+ {menus.showMemoryMenu && <MemoryMenu onClose={() => menus.setShowMemoryMenu(false)} />}
133
+
131
134
  {onboarding.onboardingActive && (
132
135
  <OnboardingOverlay
133
136
  step={onboarding.onboardingStep}