claude-memory-agent 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/.env.example +107 -0
  2. package/README.md +200 -0
  3. package/agent_card.py +512 -0
  4. package/bin/cli.js +181 -0
  5. package/bin/postinstall.js +216 -0
  6. package/config.py +104 -0
  7. package/dashboard.html +2689 -0
  8. package/hooks/README.md +196 -0
  9. package/hooks/__pycache__/auto-detect-response.cpython-312.pyc +0 -0
  10. package/hooks/__pycache__/auto_capture.cpython-312.pyc +0 -0
  11. package/hooks/__pycache__/session_end.cpython-312.pyc +0 -0
  12. package/hooks/__pycache__/session_start.cpython-312.pyc +0 -0
  13. package/hooks/auto-detect-response.py +348 -0
  14. package/hooks/auto_capture.py +255 -0
  15. package/hooks/detect-correction.py +173 -0
  16. package/hooks/grounding-hook.py +348 -0
  17. package/hooks/log-tool-use.py +234 -0
  18. package/hooks/log-user-request.py +208 -0
  19. package/hooks/pre-tool-decision.py +218 -0
  20. package/hooks/problem-detector.py +343 -0
  21. package/hooks/session_end.py +192 -0
  22. package/hooks/session_start.py +227 -0
  23. package/install.py +887 -0
  24. package/main.py +2859 -0
  25. package/manager.py +997 -0
  26. package/package.json +55 -0
  27. package/requirements.txt +8 -0
  28. package/run_server.py +136 -0
  29. package/services/__init__.py +50 -0
  30. package/services/__pycache__/__init__.cpython-312.pyc +0 -0
  31. package/services/__pycache__/agent_registry.cpython-312.pyc +0 -0
  32. package/services/__pycache__/auth.cpython-312.pyc +0 -0
  33. package/services/__pycache__/auto_inject.cpython-312.pyc +0 -0
  34. package/services/__pycache__/claude_md_sync.cpython-312.pyc +0 -0
  35. package/services/__pycache__/cleanup.cpython-312.pyc +0 -0
  36. package/services/__pycache__/compaction_flush.cpython-312.pyc +0 -0
  37. package/services/__pycache__/confidence.cpython-312.pyc +0 -0
  38. package/services/__pycache__/daily_log.cpython-312.pyc +0 -0
  39. package/services/__pycache__/database.cpython-312.pyc +0 -0
  40. package/services/__pycache__/embeddings.cpython-312.pyc +0 -0
  41. package/services/__pycache__/insights.cpython-312.pyc +0 -0
  42. package/services/__pycache__/llm_analyzer.cpython-312.pyc +0 -0
  43. package/services/__pycache__/memory_md_sync.cpython-312.pyc +0 -0
  44. package/services/__pycache__/retry_queue.cpython-312.pyc +0 -0
  45. package/services/__pycache__/timeline.cpython-312.pyc +0 -0
  46. package/services/__pycache__/vector_index.cpython-312.pyc +0 -0
  47. package/services/__pycache__/websocket.cpython-312.pyc +0 -0
  48. package/services/agent_registry.py +753 -0
  49. package/services/auth.py +331 -0
  50. package/services/auto_inject.py +250 -0
  51. package/services/claude_md_sync.py +275 -0
  52. package/services/cleanup.py +667 -0
  53. package/services/compaction_flush.py +447 -0
  54. package/services/confidence.py +301 -0
  55. package/services/daily_log.py +333 -0
  56. package/services/database.py +2485 -0
  57. package/services/embeddings.py +358 -0
  58. package/services/insights.py +632 -0
  59. package/services/llm_analyzer.py +595 -0
  60. package/services/memory_md_sync.py +409 -0
  61. package/services/retry_queue.py +453 -0
  62. package/services/timeline.py +579 -0
  63. package/services/vector_index.py +398 -0
  64. package/services/websocket.py +257 -0
  65. package/skills/__init__.py +6 -0
  66. package/skills/__pycache__/__init__.cpython-312.pyc +0 -0
  67. package/skills/__pycache__/admin.cpython-312.pyc +0 -0
  68. package/skills/__pycache__/checkpoint.cpython-312.pyc +0 -0
  69. package/skills/__pycache__/claude_md.cpython-312.pyc +0 -0
  70. package/skills/__pycache__/cleanup.cpython-312.pyc +0 -0
  71. package/skills/__pycache__/grounding.cpython-312.pyc +0 -0
  72. package/skills/__pycache__/insights.cpython-312.pyc +0 -0
  73. package/skills/__pycache__/natural_language.cpython-312.pyc +0 -0
  74. package/skills/__pycache__/retrieve.cpython-312.pyc +0 -0
  75. package/skills/__pycache__/search.cpython-312.pyc +0 -0
  76. package/skills/__pycache__/state.cpython-312.pyc +0 -0
  77. package/skills/__pycache__/store.cpython-312.pyc +0 -0
  78. package/skills/__pycache__/summarize.cpython-312.pyc +0 -0
  79. package/skills/__pycache__/timeline.cpython-312.pyc +0 -0
  80. package/skills/__pycache__/verification.cpython-312.pyc +0 -0
  81. package/skills/admin.py +469 -0
  82. package/skills/checkpoint.py +198 -0
  83. package/skills/claude_md.py +363 -0
  84. package/skills/cleanup.py +241 -0
  85. package/skills/grounding.py +801 -0
  86. package/skills/insights.py +231 -0
  87. package/skills/natural_language.py +277 -0
  88. package/skills/retrieve.py +67 -0
  89. package/skills/search.py +213 -0
  90. package/skills/state.py +182 -0
  91. package/skills/store.py +179 -0
  92. package/skills/summarize.py +588 -0
  93. package/skills/timeline.py +387 -0
  94. package/skills/verification.py +391 -0
  95. package/start_daemon.py +155 -0
  96. package/test_automation.py +221 -0
  97. package/test_complete.py +338 -0
  98. package/test_full.py +322 -0
  99. package/update_system.py +817 -0
  100. package/verify_db.py +134 -0
@@ -0,0 +1,391 @@
1
+ """Verification skills for anti-hallucination - Best-of-N and Quote Extraction."""
2
+ import os
3
+ import json
4
+ import asyncio
5
+ from typing import Dict, Any, Optional, List
6
+ from services.database import DatabaseService
7
+ from services.embeddings import EmbeddingService
8
+
9
+ # Check if LLM analysis is available
10
+ USE_LLM_ANALYSIS = os.getenv("USE_LLM_ANALYSIS", "true").lower() == "true"
11
+ OLLAMA_HOST = os.getenv("OLLAMA_HOST", "http://localhost:11434")
12
+ VERIFICATION_MODEL = os.getenv("VERIFICATION_MODEL", "llama3.2:3b")
13
+
14
+
15
+ async def best_of_n_verify(
16
+ query: str,
17
+ n: int = 3,
18
+ context: Optional[str] = None,
19
+ threshold: float = 0.7
20
+ ) -> Dict[str, Any]:
21
+ """
22
+ Best-of-N verification: Run the same query N times and check consistency.
23
+
24
+ If outputs are inconsistent, it likely indicates hallucination.
25
+
26
+ Args:
27
+ query: The question/task to verify
28
+ n: Number of runs (default 3)
29
+ context: Optional context to include
30
+ threshold: Similarity threshold for consistency (0-1)
31
+
32
+ Returns:
33
+ Dict with verification results
34
+ """
35
+ if not USE_LLM_ANALYSIS:
36
+ return {
37
+ "success": False,
38
+ "error": "LLM analysis not available",
39
+ "recommendation": "Enable USE_LLM_ANALYSIS or install Ollama"
40
+ }
41
+
42
+ try:
43
+ import ollama
44
+ client = ollama.Client(host=OLLAMA_HOST)
45
+ except Exception as e:
46
+ return {
47
+ "success": False,
48
+ "error": f"Ollama not available: {e}"
49
+ }
50
+
51
+ prompt_template = """Answer this question concisely and factually.
52
+ {context}
53
+ Question: {query}
54
+
55
+ Answer (be specific and factual):"""
56
+
57
+ context_str = f"Context: {context}\n" if context else ""
58
+ prompt = prompt_template.format(context=context_str, query=query)
59
+
60
+ # Run N times
61
+ responses = []
62
+ for i in range(n):
63
+ try:
64
+ response = client.generate(
65
+ model=VERIFICATION_MODEL,
66
+ prompt=prompt,
67
+ options={
68
+ "temperature": 0.7, # Some variation to test consistency
69
+ "num_predict": 200
70
+ }
71
+ )
72
+ responses.append(response.get("response", "").strip())
73
+ except Exception as e:
74
+ responses.append(f"[Error: {e}]")
75
+
76
+ # Analyze consistency
77
+ consistency_result = await _analyze_consistency(responses, threshold)
78
+
79
+ return {
80
+ "success": True,
81
+ "query": query,
82
+ "n_runs": n,
83
+ "responses": responses,
84
+ "is_consistent": consistency_result["is_consistent"],
85
+ "consistency_score": consistency_result["score"],
86
+ "consensus_answer": consistency_result.get("consensus"),
87
+ "inconsistencies": consistency_result.get("inconsistencies", []),
88
+ "recommendation": (
89
+ "Answers are consistent - likely reliable"
90
+ if consistency_result["is_consistent"]
91
+ else "INCONSISTENT answers detected - verify manually before trusting"
92
+ )
93
+ }
94
+
95
+
96
+ async def _analyze_consistency(responses: List[str], threshold: float) -> Dict[str, Any]:
97
+ """Analyze consistency across multiple responses."""
98
+ if len(responses) < 2:
99
+ return {"is_consistent": True, "score": 1.0, "consensus": responses[0] if responses else None}
100
+
101
+ # Simple word overlap consistency check
102
+ def get_key_words(text: str) -> set:
103
+ # Extract significant words (longer than 3 chars, not common)
104
+ common_words = {'the', 'and', 'for', 'that', 'this', 'with', 'are', 'was', 'were', 'been', 'have', 'has', 'will', 'would', 'could', 'should'}
105
+ words = set(w.lower() for w in text.split() if len(w) > 3 and w.lower() not in common_words)
106
+ return words
107
+
108
+ word_sets = [get_key_words(r) for r in responses]
109
+
110
+ # Calculate pairwise overlap
111
+ overlaps = []
112
+ for i in range(len(word_sets)):
113
+ for j in range(i + 1, len(word_sets)):
114
+ if word_sets[i] and word_sets[j]:
115
+ intersection = word_sets[i] & word_sets[j]
116
+ union = word_sets[i] | word_sets[j]
117
+ overlap = len(intersection) / len(union) if union else 0
118
+ overlaps.append(overlap)
119
+
120
+ avg_overlap = sum(overlaps) / len(overlaps) if overlaps else 0
121
+
122
+ # Find inconsistencies
123
+ inconsistencies = []
124
+ if avg_overlap < threshold:
125
+ # Find which responses differ most
126
+ all_words = set()
127
+ for ws in word_sets:
128
+ all_words.update(ws)
129
+
130
+ # Words that appear in some but not all responses
131
+ for word in all_words:
132
+ present_in = sum(1 for ws in word_sets if word in ws)
133
+ if 0 < present_in < len(word_sets):
134
+ inconsistencies.append(f"'{word}' appears in {present_in}/{len(word_sets)} responses")
135
+
136
+ # Find consensus (most common response pattern)
137
+ consensus = responses[0] if responses else None
138
+
139
+ return {
140
+ "is_consistent": avg_overlap >= threshold,
141
+ "score": round(avg_overlap, 3),
142
+ "consensus": consensus,
143
+ "inconsistencies": inconsistencies[:5] # Limit to 5
144
+ }
145
+
146
+
147
+ async def extract_quotes(
148
+ document: str,
149
+ query: str,
150
+ max_quotes: int = 5,
151
+ min_length: int = 20
152
+ ) -> Dict[str, Any]:
153
+ """
154
+ Extract direct quotes from a document that are relevant to a query.
155
+
156
+ Forces verbatim grounding - Claude must work from exact quotes.
157
+
158
+ Args:
159
+ document: The source document text
160
+ query: What we're looking for
161
+ max_quotes: Maximum quotes to extract
162
+ min_length: Minimum quote length
163
+
164
+ Returns:
165
+ Dict with extracted quotes
166
+ """
167
+ if not document or not query:
168
+ return {
169
+ "success": False,
170
+ "error": "Document and query are required"
171
+ }
172
+
173
+ if not USE_LLM_ANALYSIS:
174
+ # Fallback: simple keyword-based extraction
175
+ return await _extract_quotes_keyword(document, query, max_quotes, min_length)
176
+
177
+ try:
178
+ import ollama
179
+ client = ollama.Client(host=OLLAMA_HOST)
180
+ except:
181
+ return await _extract_quotes_keyword(document, query, max_quotes, min_length)
182
+
183
+ prompt = f"""Extract exact, word-for-word quotes from this document that are relevant to the query.
184
+
185
+ DOCUMENT:
186
+ {document[:5000]}
187
+
188
+ QUERY: {query}
189
+
190
+ Return ONLY a JSON array of exact quotes from the document. Do not paraphrase or modify.
191
+ Example format: ["exact quote 1", "exact quote 2"]
192
+
193
+ Quotes (JSON array only):"""
194
+
195
+ try:
196
+ response = client.generate(
197
+ model=VERIFICATION_MODEL,
198
+ prompt=prompt,
199
+ options={
200
+ "temperature": 0.1, # Low temperature for accuracy
201
+ "num_predict": 500
202
+ }
203
+ )
204
+
205
+ result_text = response.get("response", "[]")
206
+
207
+ # Parse JSON
208
+ json_start = result_text.find("[")
209
+ json_end = result_text.rfind("]") + 1
210
+
211
+ if json_start >= 0 and json_end > json_start:
212
+ quotes = json.loads(result_text[json_start:json_end])
213
+
214
+ # Verify quotes actually exist in document
215
+ verified_quotes = []
216
+ for quote in quotes[:max_quotes]:
217
+ if isinstance(quote, str) and len(quote) >= min_length:
218
+ # Check if quote (or close match) exists in document
219
+ quote_lower = quote.lower()
220
+ doc_lower = document.lower()
221
+ if quote_lower in doc_lower or _fuzzy_match(quote_lower, doc_lower):
222
+ verified_quotes.append({
223
+ "quote": quote,
224
+ "verified": True
225
+ })
226
+ else:
227
+ verified_quotes.append({
228
+ "quote": quote,
229
+ "verified": False,
230
+ "warning": "Quote not found verbatim in document"
231
+ })
232
+
233
+ return {
234
+ "success": True,
235
+ "query": query,
236
+ "quotes": verified_quotes,
237
+ "total_found": len(verified_quotes),
238
+ "all_verified": all(q["verified"] for q in verified_quotes),
239
+ "grounding_instruction": (
240
+ "Use ONLY these verified quotes to answer. "
241
+ "Do not add information not in the quotes."
242
+ )
243
+ }
244
+
245
+ except Exception as e:
246
+ pass
247
+
248
+ # Fallback to keyword extraction
249
+ return await _extract_quotes_keyword(document, query, max_quotes, min_length)
250
+
251
+
252
+ def _fuzzy_match(quote: str, document: str, threshold: float = 0.8) -> bool:
253
+ """Check if quote approximately matches something in document."""
254
+ # Simple check: do most words appear in sequence?
255
+ words = quote.split()
256
+ if len(words) < 3:
257
+ return False
258
+
259
+ # Check if 80% of words appear near each other in document
260
+ matches = 0
261
+ for word in words:
262
+ if word in document:
263
+ matches += 1
264
+
265
+ return (matches / len(words)) >= threshold
266
+
267
+
268
+ async def _extract_quotes_keyword(
269
+ document: str,
270
+ query: str,
271
+ max_quotes: int,
272
+ min_length: int
273
+ ) -> Dict[str, Any]:
274
+ """Fallback keyword-based quote extraction."""
275
+ # Split query into keywords
276
+ keywords = [w.lower() for w in query.split() if len(w) > 3]
277
+
278
+ # Split document into sentences
279
+ sentences = []
280
+ for sep in ['. ', '.\n', '! ', '? ', '\n\n']:
281
+ if sep in document:
282
+ parts = document.split(sep)
283
+ for part in parts:
284
+ if len(part.strip()) >= min_length:
285
+ sentences.append(part.strip())
286
+
287
+ if not sentences:
288
+ sentences = [document[i:i+200] for i in range(0, len(document), 150)]
289
+
290
+ # Score sentences by keyword matches
291
+ scored = []
292
+ for sentence in sentences:
293
+ sentence_lower = sentence.lower()
294
+ score = sum(1 for kw in keywords if kw in sentence_lower)
295
+ if score > 0:
296
+ scored.append((score, sentence))
297
+
298
+ # Sort by score and take top N
299
+ scored.sort(reverse=True)
300
+ quotes = [{"quote": s, "verified": True, "keyword_matches": score} for score, s in scored[:max_quotes]]
301
+
302
+ return {
303
+ "success": True,
304
+ "query": query,
305
+ "quotes": quotes,
306
+ "total_found": len(quotes),
307
+ "method": "keyword_extraction",
308
+ "grounding_instruction": (
309
+ "Use these extracted sections to answer. "
310
+ "Cite specific quotes when making claims."
311
+ )
312
+ }
313
+
314
+
315
+ async def require_grounding(
316
+ db: DatabaseService,
317
+ session_id: str,
318
+ statement: str,
319
+ source_type: str = "any"
320
+ ) -> Dict[str, Any]:
321
+ """
322
+ Require that a statement be grounded in stored facts before accepting it.
323
+
324
+ Args:
325
+ db: Database service
326
+ session_id: Current session
327
+ statement: The statement to verify
328
+ source_type: Type of source required ("anchor", "memory", "any")
329
+
330
+ Returns:
331
+ Dict with grounding verification
332
+ """
333
+ grounding_sources = []
334
+
335
+ # Check against anchors
336
+ events = await db.get_timeline_events(
337
+ session_id=session_id,
338
+ limit=50,
339
+ anchors_only=True
340
+ )
341
+
342
+ statement_lower = statement.lower()
343
+
344
+ for event in events:
345
+ if event.get("is_anchor"):
346
+ summary_lower = event["summary"].lower()
347
+ # Check for keyword overlap
348
+ overlap = sum(1 for word in statement_lower.split() if len(word) > 3 and word in summary_lower)
349
+ if overlap >= 2:
350
+ grounding_sources.append({
351
+ "type": "anchor",
352
+ "content": event["summary"],
353
+ "match_strength": "keyword_overlap"
354
+ })
355
+
356
+ # Check against memories if needed
357
+ if source_type in ["memory", "any"] and not grounding_sources:
358
+ try:
359
+ from services.embeddings import EmbeddingService
360
+ embeddings = EmbeddingService()
361
+ embedding = await embeddings.generate_embedding(statement)
362
+
363
+ memories = await db.search_similar(
364
+ embedding=embedding,
365
+ limit=3,
366
+ threshold=0.7
367
+ )
368
+
369
+ for memory in memories:
370
+ grounding_sources.append({
371
+ "type": "memory",
372
+ "content": memory.get("content", "")[:200],
373
+ "similarity": memory.get("similarity")
374
+ })
375
+ except:
376
+ pass
377
+
378
+ is_grounded = len(grounding_sources) > 0
379
+
380
+ return {
381
+ "success": True,
382
+ "statement": statement,
383
+ "is_grounded": is_grounded,
384
+ "grounding_sources": grounding_sources,
385
+ "source_count": len(grounding_sources),
386
+ "recommendation": (
387
+ "Statement is grounded in stored facts"
388
+ if is_grounded
389
+ else "WARNING: Statement has no grounding. Verify before using."
390
+ )
391
+ }
@@ -0,0 +1,155 @@
1
+ """Start the memory agent as a proper background daemon on Windows.
2
+
3
+ Uses msvcrt.locking() for a true Windows mutex to prevent multiple
4
+ simultaneous startup attempts. The server itself has its own mutex.
5
+ """
6
+ import subprocess
7
+ import sys
8
+ import os
9
+ import time
10
+ import msvcrt
11
+
12
+ AGENT_DIR = os.path.dirname(os.path.abspath(__file__))
13
+ LOG_FILE = os.path.join(AGENT_DIR, "memory-agent.log")
14
+ STARTUP_LOCK_FILE = os.path.join(AGENT_DIR, "memory-agent-startup.lock")
15
+ PID_FILE = os.path.join(AGENT_DIR, "memory-agent.pid")
16
+
17
+ # Global handle - must stay open for lock to persist
18
+ _startup_lock_handle = None
19
+
20
+
21
+ def acquire_startup_lock() -> bool:
22
+ """Acquire startup mutex using Windows file locking (msvcrt.locking).
23
+
24
+ This prevents multiple hooks from trying to start the agent simultaneously.
25
+ The lock is held until release_startup_lock() is called.
26
+ """
27
+ global _startup_lock_handle
28
+
29
+ try:
30
+ # Open/create the lock file
31
+ _startup_lock_handle = open(STARTUP_LOCK_FILE, 'w+')
32
+
33
+ # Try non-blocking exclusive lock
34
+ try:
35
+ msvcrt.locking(_startup_lock_handle.fileno(), msvcrt.LK_NBLCK, 1)
36
+ except (IOError, OSError):
37
+ # Lock held by another process - they're already starting the agent
38
+ _startup_lock_handle.close()
39
+ _startup_lock_handle = None
40
+ return False
41
+
42
+ # We have the lock - write our PID for debugging
43
+ _startup_lock_handle.seek(0)
44
+ _startup_lock_handle.truncate()
45
+ _startup_lock_handle.write(str(os.getpid()))
46
+ _startup_lock_handle.flush()
47
+ return True
48
+
49
+ except Exception as e:
50
+ print(f"[STARTUP] Failed to acquire lock: {e}")
51
+ if _startup_lock_handle:
52
+ try:
53
+ _startup_lock_handle.close()
54
+ except:
55
+ pass
56
+ _startup_lock_handle = None
57
+ return False
58
+
59
+
60
+ def release_startup_lock():
61
+ """Release the startup mutex."""
62
+ global _startup_lock_handle
63
+
64
+ try:
65
+ if _startup_lock_handle:
66
+ try:
67
+ msvcrt.locking(_startup_lock_handle.fileno(), msvcrt.LK_UNLCK, 1)
68
+ except:
69
+ pass
70
+ _startup_lock_handle.close()
71
+ _startup_lock_handle = None
72
+ except Exception:
73
+ pass
74
+
75
+
76
+ def is_running():
77
+ """Check if agent is already running via health endpoint."""
78
+ try:
79
+ import requests
80
+ url = os.getenv("MEMORY_AGENT_URL", "http://localhost:8102")
81
+ r = requests.get(f"{url}/health", timeout=2)
82
+ return r.status_code == 200
83
+ except Exception:
84
+ return False
85
+
86
+
87
+ def read_pid():
88
+ """Read the PID from the PID file if it exists."""
89
+ try:
90
+ if os.path.exists(PID_FILE):
91
+ with open(PID_FILE, 'r') as f:
92
+ return int(f.read().strip())
93
+ except Exception:
94
+ pass
95
+ return None
96
+
97
+
98
+ def start_daemon():
99
+ """Start the memory agent as a detached background process."""
100
+ # First check: is it already responding?
101
+ if is_running():
102
+ print("Memory agent is already running!")
103
+ return True
104
+
105
+ # Second check: try to acquire mutex lock
106
+ if not acquire_startup_lock():
107
+ # Another startup is in progress, wait for it
108
+ print("Waiting for other startup to complete...")
109
+ for i in range(10):
110
+ time.sleep(0.5)
111
+ if is_running():
112
+ print("Memory agent started by another process!")
113
+ return True
114
+ print("Other startup failed or timed out")
115
+ return False
116
+
117
+ try:
118
+ # Windows-specific flags for detached process
119
+ DETACHED_PROCESS = 0x00000008
120
+ CREATE_NO_WINDOW = 0x08000000
121
+ CREATE_NEW_PROCESS_GROUP = 0x00000200
122
+
123
+ with open(LOG_FILE, "w") as log:
124
+ proc = subprocess.Popen(
125
+ [sys.executable, "run_server.py"],
126
+ cwd=AGENT_DIR,
127
+ stdout=log,
128
+ stderr=subprocess.STDOUT,
129
+ creationflags=DETACHED_PROCESS | CREATE_NO_WINDOW | CREATE_NEW_PROCESS_GROUP,
130
+ close_fds=True
131
+ )
132
+
133
+ # Save PID for future reference
134
+ with open(PID_FILE, 'w') as f:
135
+ f.write(str(proc.pid))
136
+
137
+ print(f"Started memory agent (PID: {proc.pid})")
138
+
139
+ # Wait for startup with health check
140
+ for i in range(10):
141
+ time.sleep(0.5)
142
+ if is_running():
143
+ print("Memory agent is now running!")
144
+ return True
145
+
146
+ print("Warning: Agent started but health check failed. Check log file.")
147
+ return False
148
+
149
+ finally:
150
+ # Always release lock when done (success or failure)
151
+ release_startup_lock()
152
+
153
+
154
+ if __name__ == "__main__":
155
+ start_daemon()