@simbimbo/memory-ocmemog 0.1.4 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.1.5 — 2026-03-18
4
+
5
+ Repair and hardening follow-up after the 0.1.4 publish.
6
+
7
+ ### Highlights
8
+ - Fixed vector reindex defaults so repair scripts use provider-backed Ollama embeddings instead of silently rebuilding weak local/hash vectors
9
+ - Added battery-aware sidecar defaults for macOS laptops (`OCMEMOG_LAPTOP_MODE=auto|ac|battery`)
10
+ - Fixed `record_reinforcement()` so new experiences preserve `memory_reference`, and added integrity repair to backfill legacy missing references
11
+ - Added incremental vector backfill tooling (`scripts/ocmemog-backfill-vectors.py`) for non-destructive backlog repair
12
+ - Cleaned freshness summaries so junk placeholders (`promoted`, `summary`, `No local memory summary available`) do not pollute advisories
13
+ - Improved integrity reporting to count duplicate promotion groups accurately
14
+
15
+ ### Notes
16
+ - Historical vector backlog still exists and should be burned down in staged backfills, especially for `knowledge`
17
+ - Detailed repair notes: `docs/notes/2026-03-18-memory-repair-and-backfill.md`
18
+
3
19
  ## 0.1.4 — 2026-03-18
4
20
 
5
21
  Package ownership + runtime safety release.
package/README.md CHANGED
@@ -83,14 +83,15 @@ Optional environment variables:
83
83
  - `OCMEMOG_TRANSCRIPT_ROOTS` (comma-separated allowed roots for transcript context retrieval; default: `~/.openclaw/workspace/memory`)
84
84
  - `OCMEMOG_API_TOKEN` (optional; if set, requests must include `x-ocmemog-token` or `Authorization: Bearer ...`)
85
85
  - `OCMEMOG_AUTO_HYDRATION` (`true` to re-enable prompt-time continuity prepending; defaults to `false` as a safety guard until the host runtime is verified not to persist prepended context into session history)
86
+ - `OCMEMOG_LAPTOP_MODE` (`auto` by default; on macOS battery power this slows watcher polling, reduces ingest batch size, and disables sentiment reinforcement unless explicitly overridden)
86
87
  - `OCMEMOG_USE_OLLAMA` (`true` to use Ollama for distill/inference)
87
88
  - `OCMEMOG_OLLAMA_HOST` (default: `http://127.0.0.1:11434`)
88
- - `OCMEMOG_OLLAMA_MODEL` (default: `phi3:latest`)
89
+ - `OCMEMOG_OLLAMA_MODEL` (default: `phi3:latest`; lightweight local fallback / cheap cognition)
89
90
  - `OCMEMOG_OLLAMA_EMBED_MODEL` (default: `nomic-embed-text:latest`)
90
91
  - `OCMEMOG_PROMOTION_THRESHOLD` (default: `0.5`)
91
92
  - `OCMEMOG_DEMOTION_THRESHOLD` (default: `0.2`)
92
93
  - `OCMEMOG_PONDER_ENABLED` (default: `true`)
93
- - `OCMEMOG_PONDER_MODEL` (default: `OCMEMOG_MEMORY_MODEL`)
94
+ - `OCMEMOG_PONDER_MODEL` (default via launcher: `qwen2.5:7b`; recommended for structured local memory refinement)
94
95
  - `OCMEMOG_LESSON_MINING_ENABLED` (default: `true`)
95
96
 
96
97
  ## Security
@@ -153,7 +154,7 @@ launchctl bootstrap gui/$UID scripts/launchagents/com.openclaw.ocmemog.guard.pli
153
154
 
154
155
  ## Recent changes
155
156
 
156
- ### 0.1.4 (unreleased / current main)
157
+ ### 0.1.5 (current main)
157
158
 
158
159
  Package ownership + runtime safety release:
159
160
  - Publish package under `@simbimbo/memory-ocmemog` instead of the unauthorized `@openclaw` scope
@@ -221,3 +222,4 @@ Current limitations before broader public rollout:
221
222
  - Public release/distribution metadata is still being tightened up
222
223
 
223
224
  When a richer path is unavailable, the sidecar is designed to fail soft with explicit warnings rather than crash.
225
+ soft with explicit warnings rather than crash.
@@ -102,17 +102,18 @@ def store_memory(
102
102
  def record_reinforcement(task_id: str, outcome: str, note: str, *, source_module: str | None = None) -> None:
103
103
  outcome = _sanitize(outcome)
104
104
  note = _sanitize(note)
105
+ memory_reference = f"reinforcement:{task_id or 'unknown'}:{source_module or 'unspecified'}"
105
106
  def _write() -> None:
106
107
  conn = store.connect()
107
108
  try:
108
109
  conn.execute(
109
- "INSERT INTO experiences (task_id, outcome, reward_score, confidence, experience_type, source_module, schema_version) "
110
- "VALUES (?, ?, ?, ?, ?, ?, ?)",
111
- (task_id, outcome, None, 1.0, "reinforcement", source_module, store.SCHEMA_VERSION),
110
+ "INSERT INTO experiences (task_id, outcome, reward_score, confidence, memory_reference, experience_type, source_module, schema_version) "
111
+ "VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
112
+ (task_id, outcome, None, 1.0, memory_reference, "reinforcement", source_module, store.SCHEMA_VERSION),
112
113
  )
113
114
  conn.execute(
114
115
  "INSERT INTO memory_events (event_type, source, details_json, schema_version) VALUES (?, ?, ?, ?)",
115
- ("reinforcement_note", source_module, json.dumps({"task_id": task_id, "note": note}), store.SCHEMA_VERSION),
116
+ ("reinforcement_note", source_module, json.dumps({"task_id": task_id, "note": note, "memory_reference": memory_reference}), store.SCHEMA_VERSION),
116
117
  )
117
118
  conn.commit()
118
119
  finally:
@@ -12,6 +12,29 @@ DEFAULT_CONFIDENCE_THRESHOLD = 0.6
12
12
  DEFAULT_LIMIT = 25
13
13
 
14
14
 
15
+ _BAD_SUMMARY_PREFIXES = (
16
+ "promoted",
17
+ "candidate_promoted",
18
+ "no local memory summary available",
19
+ "summary",
20
+ )
21
+
22
+
23
+ def _summary_from_content(content: Any, limit: int = 120) -> str:
24
+ text = str(content or "").strip()
25
+ if not text:
26
+ return "(empty memory content)"
27
+ lines = [line.strip() for line in text.splitlines() if line.strip()]
28
+ for line in lines:
29
+ lowered = line.lower()
30
+ if lowered in _BAD_SUMMARY_PREFIXES:
31
+ continue
32
+ if any(lowered.startswith(prefix + ":") for prefix in _BAD_SUMMARY_PREFIXES):
33
+ continue
34
+ return line[:limit]
35
+ return "(needs summary cleanup)"
36
+
37
+
15
38
  def scan_freshness(
16
39
  stale_days: int = DEFAULT_STALE_DAYS,
17
40
  confidence_threshold: float = DEFAULT_CONFIDENCE_THRESHOLD,
@@ -66,7 +89,7 @@ def scan_freshness(
66
89
  "memory_id": row["id"],
67
90
  "timestamp": row["timestamp"],
68
91
  "confidence": confidence,
69
- "summary": str(row["content"])[:120],
92
+ "summary": _summary_from_content(row["content"]),
70
93
  "freshness_score": round(freshness_score, 3),
71
94
  "refresh_recommended": refresh_recommended,
72
95
  }
@@ -50,11 +50,11 @@ def run_integrity_check() -> Dict[str, Any]:
50
50
 
51
51
  # duplicate promotions
52
52
  try:
53
- dup = conn.execute(
54
- "SELECT COUNT(*) FROM promotions GROUP BY source, content HAVING COUNT(*) > 1",
55
- ).fetchone()
56
- if dup:
57
- issues.append("duplicate_promotions")
53
+ dup_groups = conn.execute(
54
+ "SELECT COUNT(*) FROM (SELECT 1 FROM promotions GROUP BY source, content HAVING COUNT(*) > 1)",
55
+ ).fetchone()[0]
56
+ if dup_groups:
57
+ issues.append(f"duplicate_promotions:{dup_groups}")
58
58
  emit_event(state_store.reports_dir() / "brain_memory.log.jsonl", "brain_memory_integrity_issue", status="warn")
59
59
  except Exception:
60
60
  pass
@@ -66,6 +66,7 @@ def run_integrity_check() -> Dict[str, Any]:
66
66
  ).fetchone()[0]
67
67
  if missing_ref:
68
68
  issues.append(f"missing_memory_reference:{missing_ref}")
69
+ repairable.append("missing_memory_reference")
69
70
  emit_event(state_store.reports_dir() / "brain_memory.log.jsonl", "brain_memory_integrity_issue", status="warn")
70
71
  except Exception:
71
72
  pass
@@ -141,6 +142,7 @@ def repair_integrity() -> Dict[str, Any]:
141
142
  def _write() -> Dict[str, Any]:
142
143
  conn = store.connect()
143
144
  removed_orphans = 0
145
+ repaired_missing_refs = 0
144
146
  try:
145
147
  tables = {row[0] for row in conn.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()}
146
148
  if "vector_embeddings" in tables:
@@ -158,8 +160,19 @@ def repair_integrity() -> Dict[str, Any]:
158
160
  """,
159
161
  (table,),
160
162
  ).rowcount
163
+ if "experiences" in tables:
164
+ repaired_missing_refs += conn.execute(
165
+ """
166
+ UPDATE experiences
167
+ SET memory_reference = 'legacy:' || COALESCE(experience_type, 'unknown') || ':' || id
168
+ WHERE memory_reference IS NULL OR memory_reference = ''
169
+ """
170
+ ).rowcount
161
171
  conn.commit()
162
- return {"removed_orphan_vectors": int(removed_orphans)}
172
+ return {
173
+ "removed_orphan_vectors": int(removed_orphans),
174
+ "repaired_missing_memory_references": int(repaired_missing_refs),
175
+ }
163
176
  finally:
164
177
  conn.close()
165
178
 
@@ -167,4 +180,7 @@ def repair_integrity() -> Dict[str, Any]:
167
180
  if int(result.get("removed_orphan_vectors") or 0) > 0:
168
181
  repaired.append(f"vector_orphan:{int(result['removed_orphan_vectors'])}")
169
182
  emit_event(state_store.reports_dir() / "brain_memory.log.jsonl", "brain_memory_integrity_repair", status="ok", repaired="vector_orphan", count=int(result["removed_orphan_vectors"]))
183
+ if int(result.get("repaired_missing_memory_references") or 0) > 0:
184
+ repaired.append(f"missing_memory_reference:{int(result['repaired_missing_memory_references'])}")
185
+ emit_event(state_store.reports_dir() / "brain_memory.log.jsonl", "brain_memory_integrity_repair", status="ok", repaired="missing_memory_reference", count=int(result["repaired_missing_memory_references"]))
170
186
  return {"ok": True, "repaired": repaired, **result}
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import hashlib
3
4
  import json
4
5
  import re
5
6
  import threading
@@ -214,13 +215,33 @@ def _load_continuity_candidates(limit: int) -> List[Dict[str, object]]:
214
215
  return items[:limit]
215
216
 
216
217
 
218
+ def _low_value_candidate(record: Dict[str, object]) -> bool:
219
+ content = str(record.get("content") or "").strip()
220
+ if not content:
221
+ return True
222
+ normalized = re.sub(r"\s+", " ", content.lower())
223
+ if normalized.startswith("202") and "[assistant]" in normalized and "[[reply_to_current]]" in normalized:
224
+ return True
225
+ if "**current target**" in normalized and "validation performed" in normalized:
226
+ return True
227
+ if normalized.startswith("recent memory worth reinforcing:"):
228
+ return True
229
+ if normalized.startswith("consolidated pattern:"):
230
+ return True
231
+ return False
232
+
233
+
217
234
  def _dedupe_candidates(items: List[Dict[str, object]], limit: int) -> List[Dict[str, object]]:
218
235
  deduped: List[Dict[str, object]] = []
219
236
  seen: set[str] = set()
220
237
  for item in items:
221
238
  reference = str(item.get("reference") or "")
222
239
  content = str(item.get("content") or "").strip()
223
- key = reference or content.lower()
240
+ if _low_value_candidate(item):
241
+ continue
242
+ normalized = re.sub(r"\s+", " ", content.lower())[:1200]
243
+ content_key = hashlib.sha256(normalized.encode("utf-8", errors="ignore")).hexdigest() if normalized else ""
244
+ key = content_key or reference
224
245
  if not key or key in seen or not content:
225
246
  continue
226
247
  seen.add(key)
@@ -232,11 +253,66 @@ def _dedupe_candidates(items: List[Dict[str, object]], limit: int) -> List[Dict[
232
253
 
233
254
  def _heuristic_summary(text: str, limit: int = 220) -> str:
234
255
  collapsed = re.sub(r"\s+", " ", text or "").strip()
256
+ collapsed = re.sub(r"^\d{4}-\d{2}-\d{2}T[^ ]+\s+\[[^\]]+\]\s*", "", collapsed)
257
+ collapsed = re.sub(r"^\d{4}-\d{2}-\d{2}t[^ ]+\s+\[[^\]]+\]\s*", "", collapsed, flags=re.IGNORECASE)
258
+ collapsed = re.sub(r"^\[\[reply_to_current\]\]\s*", "", collapsed)
235
259
  if len(collapsed) <= limit:
236
260
  return collapsed
237
261
  return f"{collapsed[: limit - 1].rstrip()}…"
238
262
 
239
263
 
264
+ def _needs_unresolved_refine(summary: str) -> bool:
265
+ text = (summary or "").strip().lower()
266
+ if not text:
267
+ return True
268
+ if text.startswith(("## ", "### ", "1)", "2)", "- ", "* ")):
269
+ return True
270
+ trigger_phrases = (
271
+ "next steps",
272
+ "open questions",
273
+ "recommended next action",
274
+ "current status",
275
+ "quick recap",
276
+ "paused",
277
+ "todo:",
278
+ )
279
+ return any(phrase in text for phrase in trigger_phrases)
280
+
281
+
282
+ def _heuristic_unresolved_rewrite(raw: str) -> str:
283
+ text = _heuristic_summary(raw, limit=500).strip()
284
+ lowered = text.lower()
285
+ text = re.sub(r"^(##+\s*|\*\*|\d+\)\s*)", "", text).strip("* ")
286
+ if lowered.startswith("todo:"):
287
+ body = text.split(":", 1)[1].strip() if ":" in text else text[5:].strip()
288
+ return _heuristic_summary(f"Outstanding task: {body}", limit=180)
289
+ if "next steps / open questions" in lowered or "current status / next steps" in lowered or "recommended next action" in lowered:
290
+ return "Review the linked note and extract the concrete pending decision or next action."
291
+ if lowered.startswith("paused"):
292
+ return "Resume the paused work from its saved checkpoint and confirm the next concrete action."
293
+ return _heuristic_summary(text, limit=180)
294
+
295
+
296
+ def _refine_unresolved_summary(summary: str, reference: str = "") -> str:
297
+ raw = _heuristic_summary(summary, limit=500)
298
+ if not _needs_unresolved_refine(raw):
299
+ return _heuristic_summary(raw)
300
+ prompt = (
301
+ "Rewrite this unresolved item as one concise actionable unresolved summary. "
302
+ "Keep it under 180 characters. Focus on the decision, blocker, or next action. "
303
+ "Do not use markdown headings or numbering.\n\n"
304
+ f"Reference: {reference}\n"
305
+ f"Unresolved item: {raw}\n\n"
306
+ "Summary:"
307
+ )
308
+ result = _infer_with_timeout(prompt)
309
+ output = str(result.get("output") or "").strip()
310
+ cleaned = _SUMMARY_PREFIX_RE.sub("", output).strip()
311
+ if cleaned and len(cleaned) >= 12 and cleaned.lower() != raw.lower() and not _needs_unresolved_refine(cleaned):
312
+ return _heuristic_summary(cleaned, limit=180)
313
+ return _heuristic_unresolved_rewrite(raw)
314
+
315
+
240
316
  def _heuristic_ponder(record: Dict[str, object]) -> Dict[str, str]:
241
317
  text = str(record.get("content") or "").strip()
242
318
  reference = str(record.get("reference") or "")
@@ -256,12 +332,12 @@ def _heuristic_ponder(record: Dict[str, object]) -> Dict[str, str]:
256
332
  if kind == "turn":
257
333
  role = str(metadata.get("role") or "conversation")
258
334
  return {
259
- "insight": f"Recent {role} turn may shape near-term continuity: {summary}",
260
- "recommendation": "Retain the turn in short-horizon context and checkpoint if it changes the active branch or next action.",
335
+ "insight": f"Recent {role} turn changed active context: {summary}",
336
+ "recommendation": "Preserve only the decision, lesson, or next action from this turn instead of the full transcript wording.",
261
337
  }
262
338
  return {
263
- "insight": f"Recent memory worth reinforcing: {summary}",
264
- "recommendation": "Link the reflection back to its source memory so future retrieval can hydrate it with provenance.",
339
+ "insight": f"Potential durable learning: {summary}",
340
+ "recommendation": "Capture the concrete lesson, decision, or next action so this memory is reusable instead of just retrievable.",
265
341
  }
266
342
 
267
343
 
@@ -414,7 +490,7 @@ def _store_lesson_once(lesson: str, *, source_reference: str) -> Optional[str]:
414
490
 
415
491
  def _candidate_memories(max_items: int) -> List[Dict[str, object]]:
416
492
  base_candidates: List[Dict[str, object]] = []
417
- for table in ("reflections", "knowledge", "tasks", "runbooks"):
493
+ for table in ("knowledge", "tasks", "runbooks", "lessons"):
418
494
  base_candidates.extend(_load_recent(table, max_items))
419
495
  base_candidates.extend(_load_continuity_candidates(max_items))
420
496
  return _dedupe_candidates(base_candidates, max_items)
@@ -439,10 +515,11 @@ def run_ponder_cycle(max_items: int = 5) -> Dict[str, object]:
439
515
 
440
516
  insights: List[Dict[str, object]] = []
441
517
  for item in unresolved[:max_items]:
442
- summary = str(item.get("summary") or "").strip()
443
- if not summary:
518
+ raw_summary = str(item.get("summary") or "").strip()
519
+ if not raw_summary:
444
520
  continue
445
521
  source_reference = str(item.get("reference") or "") or str(item.get("target_reference") or "")
522
+ summary = _refine_unresolved_summary(raw_summary, source_reference)
446
523
  reflection_ref = _store_reflection(
447
524
  f"Unresolved state remains active: {summary}",
448
525
  source_reference=source_reference or "unresolved_state",
@@ -1,7 +1,9 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import hashlib
3
4
  import json
4
5
  import math
6
+ import re
5
7
  import threading
6
8
  from typing import Any, Dict, List, Iterable
7
9
 
@@ -22,6 +24,9 @@ EMBEDDING_TABLES: tuple[str, ...] = (
22
24
  )
23
25
  _REBUILD_LOCK = threading.Lock()
24
26
  _WRITE_CHUNK_SIZE = 64
27
+ _EMBEDDING_TEXT_LIMIT = 8000
28
+ _HTML_TAG_RE = re.compile(r"<[^>]+>")
29
+ _WHITESPACE_RE = re.compile(r"\s+")
25
30
 
26
31
 
27
32
  def _ensure_vector_table(conn) -> None:
@@ -94,30 +99,66 @@ def insert_memory(memory_id: int, content: str, confidence: float, *, source_typ
94
99
  store.submit_write(_write, timeout=30.0)
95
100
 
96
101
 
97
- def _load_table_rows(table: str, *, limit: int | None = None, descending: bool = False) -> List[Dict[str, Any]]:
102
+ def _load_table_rows(table: str, *, limit: int | None = None, descending: bool = False, missing_only: bool = False) -> List[Dict[str, Any]]:
98
103
  conn = store.connect()
99
104
  try:
100
105
  order = "DESC" if descending else "ASC"
106
+ where = ""
107
+ params: list[Any] = []
108
+ if missing_only:
109
+ where = " WHERE CAST(id AS TEXT) NOT IN (SELECT source_id FROM vector_embeddings WHERE source_type = ?)"
110
+ params.append(table)
101
111
  if limit is None:
102
112
  rows = conn.execute(
103
- f"SELECT id, content, confidence, metadata_json FROM {table} ORDER BY id {order}",
113
+ f"SELECT id, content, confidence, metadata_json FROM {table}{where} ORDER BY id {order}",
114
+ tuple(params),
104
115
  ).fetchall()
105
116
  else:
106
117
  rows = conn.execute(
107
- f"SELECT id, content, confidence, metadata_json FROM {table} ORDER BY id {order} LIMIT ?",
108
- (limit,),
118
+ f"SELECT id, content, confidence, metadata_json FROM {table}{where} ORDER BY id {order} LIMIT ?",
119
+ tuple(params + [limit]),
109
120
  ).fetchall()
110
121
  finally:
111
122
  conn.close()
112
123
  return [dict(row) for row in rows]
113
124
 
114
125
 
126
+ def _embedding_input(text: str, *, table: str = "knowledge") -> str:
127
+ cleaned = _HTML_TAG_RE.sub(" ", text)
128
+ cleaned = _WHITESPACE_RE.sub(" ", cleaned).strip()
129
+ lowered = cleaned.lower()
130
+ artifactish = (
131
+ "| chunk " in lowered
132
+ or ".sql" in lowered
133
+ or "topology/" in lowered
134
+ or cleaned.count("),(") >= 8
135
+ )
136
+ if table == "knowledge" and artifactish:
137
+ return cleaned[:500]
138
+ if table == "knowledge" and len(cleaned) > 9000:
139
+ return cleaned[:1000]
140
+ if table == "reflections" and len(cleaned) > 8000:
141
+ return cleaned[:1200]
142
+ if len(cleaned) > 20000:
143
+ return cleaned[:2000]
144
+ if len(cleaned) > 12000:
145
+ return cleaned[:4000]
146
+ return cleaned[:_EMBEDDING_TEXT_LIMIT]
147
+
148
+
115
149
  def _prepare_embedding_rows(rows: Iterable[Dict[str, Any]], *, table: str) -> List[Dict[str, Any]]:
116
150
  prepared: List[Dict[str, Any]] = []
151
+ embedding_cache: Dict[str, List[float] | None] = {}
117
152
  for row in rows:
118
153
  content = str(row.get("content") or "")
119
154
  redacted_content, changed = redaction.redact_text(content)
120
- embedding = embedding_engine.generate_embedding(redacted_content)
155
+ embedding_input = _embedding_input(redacted_content, table=table)
156
+ cache_key = hashlib.sha256(embedding_input.encode("utf-8", errors="ignore")).hexdigest()
157
+ if cache_key in embedding_cache:
158
+ embedding = embedding_cache[cache_key]
159
+ else:
160
+ embedding = embedding_engine.generate_embedding(embedding_input)
161
+ embedding_cache[cache_key] = embedding
121
162
  if not embedding:
122
163
  continue
123
164
  try:
@@ -213,6 +254,27 @@ def rebuild_vector_index(*, tables: Iterable[str] | None = None) -> int:
213
254
  return count
214
255
 
215
256
 
257
+ def backfill_missing_vectors(*, tables: Iterable[str] | None = None, limit_per_table: int | None = None) -> int:
258
+ emit_event(LOGFILE, "brain_memory_vector_backfill_start", status="ok")
259
+ if not _REBUILD_LOCK.acquire(blocking=False):
260
+ emit_event(LOGFILE, "brain_memory_vector_backfill_complete", status="skipped", reason="already_running")
261
+ return 0
262
+ count = 0
263
+ try:
264
+ requested_tables = [table for table in (tables or EMBEDDING_TABLES) if table in EMBEDDING_TABLES]
265
+ for table in requested_tables:
266
+ prepared = _prepare_embedding_rows(
267
+ _load_table_rows(table, limit=limit_per_table, missing_only=True),
268
+ table=table,
269
+ )
270
+ for offset in range(0, len(prepared), _WRITE_CHUNK_SIZE):
271
+ count += _write_embedding_chunk(table, prepared[offset: offset + _WRITE_CHUNK_SIZE])
272
+ finally:
273
+ _REBUILD_LOCK.release()
274
+ emit_event(LOGFILE, "brain_memory_vector_backfill_complete", status="ok", indexed=count)
275
+ return count
276
+
277
+
216
278
  def search_memory(query: str, limit: int = 5) -> List[Dict[str, Any]]:
217
279
  emit_event(LOGFILE, "brain_memory_vector_search_start", status="ok")
218
280
  conn = store.connect()
@@ -0,0 +1,70 @@
1
+ # 2026-03-18 — Memory repair, integrity cleanup, and backfill tooling
2
+
3
+ ## Summary
4
+ This pass focused on turning `ocmemog` from a noisy/fragile memory stack into a more repairable and laptop-safe system. The work addressed:
5
+ - bad default vector rebuild behavior
6
+ - misleading health/compat signals
7
+ - missing `memory_reference` writer debt
8
+ - poor freshness summaries
9
+ - lack of an incremental vector backfill path
10
+ - battery-unfriendly defaults in the sidecar launcher
11
+
12
+ ## Changes landed
13
+
14
+ ### Embedding and rebuild behavior
15
+ - Fixed the vector reindex entrypoint so it defaults to provider-backed Ollama embeddings instead of silently rebuilding weak hash/simple vectors.
16
+ - Confirmed local Ollama embeddings (`nomic-embed-text:latest`) are available and produce 768-dim vectors.
17
+ - Added a new incremental repair path:
18
+ - `backfill_missing_vectors()` in `brain/runtime/memory/vector_index.py`
19
+ - `scripts/ocmemog-backfill-vectors.py`
20
+ - This gives a non-destructive, table-by-table, chunkable way to backfill missing vectors without requiring a full destructive rebuild.
21
+
22
+ ### Integrity and writer correctness
23
+ - Fixed `record_reinforcement()` so new `experiences` rows preserve a deterministic `memory_reference`.
24
+ - Added repair support for legacy rows missing `memory_reference`.
25
+ - Ran integrity repair and backfilled `1807` missing references.
26
+ - Fixed duplicate promotion integrity reporting so grouped duplicate counts are reported accurately.
27
+
28
+ ### Health and output quality
29
+ - Fixed sidecar compat/health reporting so provider-backed embeddings do not falsely report local hash fallback warnings.
30
+ - Cleaned freshness summaries so placeholder content like `promoted`, `candidate_promoted`, `summary`, and `No local memory summary available` do not pollute advisories.
31
+ - Junk-only rows now surface as `(needs summary cleanup)` instead of pretending they contain a meaningful summary.
32
+
33
+ ### Laptop/battery-aware behavior
34
+ - Added battery-aware defaults to `scripts/ocmemog-sidecar.sh`.
35
+ - `OCMEMOG_LAPTOP_MODE=auto|ac|battery` now controls watcher/ingest aggressiveness.
36
+ - On battery the sidecar uses slower polling, smaller batches, and disables sentiment reinforcement by default.
37
+
38
+ ## Current integrity state
39
+ After writer/reference repair:
40
+ - `missing_memory_reference` debt is cleared
41
+ - remaining integrity issue is primarily vector backlog:
42
+ - `vector_missing:19935`
43
+
44
+ Observed coverage snapshot during staged backfill work:
45
+ - `knowledge`: 15999 rows, 0 vectors
46
+ - `runbooks`: 179 rows, 152 vectors
47
+ - `lessons`: 76 rows, 76 vectors
48
+ - `directives`: 233 rows, 206 vectors
49
+ - `reflections`: 3460 rows, 83 vectors
50
+ - `tasks`: 505 rows, 0 vectors
51
+
52
+ ## Why backlog remains
53
+ The remaining `vector_missing` debt is mostly historical backlog rather than an active write-path failure. Existing new writes can index correctly; the old corpus simply was never fully rebuilt under the corrected provider-backed embedding path.
54
+
55
+ ## Recommended staged follow-up
56
+ For laptop-friendly backlog burn-down, use staged backfills in roughly this order:
57
+ 1. directives
58
+ 2. tasks
59
+ 3. runbooks
60
+ 4. lessons
61
+ 5. reflections
62
+ 6. knowledge last
63
+
64
+ ## Commits from this sweep
65
+ - `f3d3dd9` — fix: default vector reindex to ollama embeddings
66
+ - `759d23d` — feat: add battery-aware sidecar defaults
67
+ - `4a102eb` — fix: clean memory freshness summaries
68
+ - `9ee7966` — fix: report duplicate promotion counts accurately
69
+ - `8704db9` — fix: preserve and repair experience memory references
70
+ - `5dc3cb9` — feat: add incremental vector backfill tooling
@@ -0,0 +1,50 @@
1
+ # Local model role matrix — 2026-03-18
2
+
3
+ Purpose: document which installed local model is best suited for which `ocmemog` task so background cognition can be smarter without putting heavy/slow models on every path.
4
+
5
+ Installed local models observed:
6
+ - `phi3:latest`
7
+ - `qwen2.5:7b`
8
+ - `llama3.1:8b`
9
+ - embeddings: `nomic-embed-text:latest`
10
+
11
+ ## Intended decision areas
12
+ - unresolved-state rewrite
13
+ - lesson extraction
14
+ - ponder/reflection shaping
15
+ - cluster recommendation wording
16
+ - fallback/speed path
17
+
18
+ ## Bakeoff results
19
+
20
+ ### Unresolved-state rewrite
21
+ - **Winner:** `qwen2.5:7b`
22
+ - Why: cleanest concise rewrite, best instruction-following, least rambling.
23
+ - Notes:
24
+ - `phi3:latest` tended to be verbose and occasionally hallucination-prone.
25
+ - `llama3.1:8b` produced one outright unusable response ("None found...").
26
+
27
+ ### Lesson extraction
28
+ - **Winner:** `qwen2.5:7b`
29
+ - Strong alternate: `llama3.1:8b`
30
+ - Why: `qwen2.5:7b` produced the clearest operational lesson with good cause/effect preservation.
31
+ - Notes:
32
+ - `phi3:latest` was weaker and more generic.
33
+
34
+ ### Cluster insight / recommendation shaping
35
+ - **Winner:** `qwen2.5:7b`
36
+ - Why: best structured output, least fluff, most concrete recommendation wording.
37
+ - Notes:
38
+ - `llama3.1:8b` was decent but more wordy/stylized.
39
+ - `phi3:latest` timed out or underperformed on this task.
40
+
41
+ ## Recommended model-role split
42
+ - embeddings: `nomic-embed-text:latest`
43
+ - fast fallback cognition: `phi3:latest`
44
+ - default structured memory refinement / ponder model: `qwen2.5:7b`
45
+ - richer optional background cognition: `llama3.1:8b`
46
+
47
+ ## Operational recommendation
48
+ - Keep `OCMEMOG_OLLAMA_MODEL=phi3:latest` for lightweight local fallback behavior.
49
+ - Set `OCMEMOG_PONDER_MODEL=qwen2.5:7b` for unresolved-state rewrite, lesson extraction, and cluster recommendation shaping.
50
+ - Consider `llama3.1:8b` for optional deeper background cognition passes where latency is acceptable.
package/docs/usage.md CHANGED
@@ -34,6 +34,8 @@ export OCMEMOG_TRANSCRIPT_WATCHER=true
34
34
  ./scripts/ocmemog-sidecar.sh
35
35
  ```
36
36
 
37
+ On macOS laptops, the launcher defaults to `OCMEMOG_LAPTOP_MODE=auto`, which detects battery power and uses lower-impact watcher settings automatically. Override with `OCMEMOG_LAPTOP_MODE=ac` for wall-power behavior or `OCMEMOG_LAPTOP_MODE=battery` to force conservative mode.
38
+
37
39
  Useful environment variables:
38
40
 
39
41
  ```bash
@@ -34,6 +34,32 @@ QUEUE_STATS = {
34
34
  }
35
35
 
36
36
 
37
+ def _queue_stats_path() -> Path:
38
+ path = state_store.data_dir() / "queue_stats.json"
39
+ path.parent.mkdir(parents=True, exist_ok=True)
40
+ return path
41
+
42
+
43
+ def _load_queue_stats() -> None:
44
+ path = _queue_stats_path()
45
+ try:
46
+ data = json.loads(path.read_text(encoding="utf-8"))
47
+ except Exception:
48
+ return
49
+ if not isinstance(data, dict):
50
+ return
51
+ for key in list(QUEUE_STATS.keys()):
52
+ if key in data:
53
+ QUEUE_STATS[key] = data[key]
54
+
55
+
56
+ def _save_queue_stats() -> None:
57
+ path = _queue_stats_path()
58
+ tmp = path.with_suffix('.tmp')
59
+ tmp.write_text(json.dumps(QUEUE_STATS, indent=2, sort_keys=True), encoding='utf-8')
60
+ tmp.replace(path)
61
+
62
+
37
63
  @app.middleware("http")
38
64
  async def _auth_middleware(request: Request, call_next):
39
65
  if API_TOKEN:
@@ -177,6 +203,7 @@ def _process_queue(limit: Optional[int] = None) -> Dict[str, Any]:
177
203
  QUEUE_STATS["errors"] += errors
178
204
  if last_error:
179
205
  QUEUE_STATS["last_error"] = last_error
206
+ _save_queue_stats()
180
207
  return {"processed": processed, "errors": errors, "last_error": last_error}
181
208
 
182
209
 
@@ -388,7 +415,38 @@ def _fallback_search(query: str, limit: int, categories: List[str]) -> List[Dict
388
415
  conn.close()
389
416
 
390
417
 
418
+ _ALLOWED_MEMORY_REFERENCE_TYPES = {
419
+ "knowledge",
420
+ "reflections",
421
+ "directives",
422
+ "tasks",
423
+ "runbooks",
424
+ "lessons",
425
+ "conversation_turns",
426
+ "conversation_checkpoints",
427
+ }
428
+
429
+
430
+ def _parse_reference(reference: str) -> tuple[str, str] | None:
431
+ if not isinstance(reference, str) or ":" not in reference:
432
+ return None
433
+ prefix, identifier = reference.split(":", 1)
434
+ prefix = prefix.strip()
435
+ identifier = identifier.strip()
436
+ if not prefix or not identifier:
437
+ return None
438
+ return prefix, identifier
439
+
440
+
391
441
  def _get_row(reference: str) -> Optional[Dict[str, Any]]:
442
+ parsed = _parse_reference(reference)
443
+ if not parsed:
444
+ return None
445
+ prefix, identifier = parsed
446
+ if prefix not in _ALLOWED_MEMORY_REFERENCE_TYPES:
447
+ return None
448
+ if prefix in {"knowledge", "reflections", "directives", "tasks", "runbooks", "lessons", "conversation_turns", "conversation_checkpoints"} and not identifier.isdigit():
449
+ return None
392
450
  return provenance.hydrate_reference(reference, depth=2)
393
451
 
394
452
 
@@ -541,11 +599,38 @@ def memory_search(request: SearchRequest) -> dict[str, Any]:
541
599
  @app.post("/memory/get")
542
600
  def memory_get(request: GetRequest) -> dict[str, Any]:
543
601
  runtime = _runtime_payload()
544
- row = _get_row(request.reference)
602
+ parsed = _parse_reference(request.reference)
603
+ if not parsed:
604
+ return {
605
+ "ok": False,
606
+ "error": "invalid_reference",
607
+ "message": "Reference must be in the form type:id",
608
+ "reference": request.reference,
609
+ **runtime,
610
+ }
611
+ prefix, identifier = parsed
612
+ if prefix not in _ALLOWED_MEMORY_REFERENCE_TYPES:
613
+ return {
614
+ "ok": False,
615
+ "error": "unsupported_reference_type",
616
+ "message": f"Unsupported memory reference type: {prefix}",
617
+ "reference": request.reference,
618
+ **runtime,
619
+ }
620
+ if prefix in {"knowledge", "reflections", "directives", "tasks", "runbooks", "lessons", "conversation_turns", "conversation_checkpoints"} and not identifier.isdigit():
621
+ return {
622
+ "ok": False,
623
+ "error": "invalid_reference_id",
624
+ "message": f"Reference id for {prefix} must be numeric",
625
+ "reference": request.reference,
626
+ **runtime,
627
+ }
628
+ row = provenance.hydrate_reference(request.reference, depth=2)
545
629
  if row is None:
546
630
  return {
547
631
  "ok": False,
548
- "error": "TODO: memory reference was not found or is not yet supported by the sidecar.",
632
+ "error": "reference_not_found",
633
+ "message": "Reference was well-formed but no matching memory was found",
549
634
  "reference": request.reference,
550
635
  **runtime,
551
636
  }
@@ -761,11 +846,19 @@ def memory_ponder_latest(limit: int = 5) -> dict[str, Any]:
761
846
  meta = json.loads(row["metadata_json"] or "{}")
762
847
  except Exception:
763
848
  meta = {}
849
+ content = str(row["content"] or "")
850
+ summary = content
851
+ recommendation = meta.get("recommendation")
852
+ if "\nRecommendation:" in content:
853
+ summary, _, tail = content.partition("\nRecommendation:")
854
+ summary = summary.strip()
855
+ if not recommendation:
856
+ recommendation = tail.strip()
764
857
  items.append({
765
858
  "reference": f"reflections:{row['id']}",
766
859
  "timestamp": row["timestamp"],
767
- "summary": row["content"],
768
- "recommendation": meta.get("recommendation"),
860
+ "summary": summary,
861
+ "recommendation": recommendation,
769
862
  "source_reference": meta.get("source_reference") or ((meta.get("provenance") or {}).get("source_reference") if isinstance(meta.get("provenance"), dict) else None),
770
863
  "provenance": provenance.preview_from_metadata(meta),
771
864
  })
@@ -958,6 +1051,20 @@ def metrics() -> dict[str, Any]:
958
1051
  counts["queue_processed"] = QUEUE_STATS.get("processed", 0)
959
1052
  counts["queue_errors"] = QUEUE_STATS.get("errors", 0)
960
1053
  payload["counts"] = counts
1054
+ coverage_tables = ["knowledge", "runbooks", "lessons", "directives", "reflections", "tasks"]
1055
+ conn = store.connect()
1056
+ try:
1057
+ payload["coverage"] = [
1058
+ {
1059
+ "table": table,
1060
+ "rows": int(counts.get(table, 0) or 0),
1061
+ "vectors": int(conn.execute("SELECT COUNT(*) FROM vector_embeddings WHERE source_type=?", (table,)).fetchone()[0] or 0),
1062
+ "missing": max(int(counts.get(table, 0) or 0) - int(conn.execute("SELECT COUNT(*) FROM vector_embeddings WHERE source_type=?", (table,)).fetchone()[0] or 0), 0),
1063
+ }
1064
+ for table in coverage_tables
1065
+ ]
1066
+ finally:
1067
+ conn.close()
961
1068
  payload["queue"] = QUEUE_STATS
962
1069
  return {"ok": True, "metrics": payload, **runtime}
963
1070
 
@@ -997,8 +1104,37 @@ def _tail_events(limit: int = 50) -> str:
997
1104
  def dashboard() -> HTMLResponse:
998
1105
  metrics_payload = health.get_memory_health()
999
1106
  counts = metrics_payload.get("counts", {})
1107
+ coverage_tables = ["knowledge", "runbooks", "lessons", "directives", "reflections", "tasks"]
1108
+ conn = store.connect()
1109
+ try:
1110
+ coverage_rows = []
1111
+ for table in coverage_tables:
1112
+ total = int(counts.get(table, 0) or 0)
1113
+ vectors = int(
1114
+ conn.execute(
1115
+ "SELECT COUNT(*) FROM vector_embeddings WHERE source_type=?",
1116
+ (table,),
1117
+ ).fetchone()[0]
1118
+ or 0
1119
+ )
1120
+ missing = max(total - vectors, 0)
1121
+ coverage_rows.append({"table": table, "rows": total, "vectors": vectors, "missing": missing})
1122
+ finally:
1123
+ conn.close()
1124
+
1125
+ metrics_cards = [{"label": key, "value": value} for key, value in counts.items()]
1126
+ metrics_cards.extend(
1127
+ [
1128
+ {"label": "vector_index_count", "value": metrics_payload.get("vector_index_count", 0)},
1129
+ {"label": "vector_index_coverage", "value": metrics_payload.get("vector_index_coverage", 0)},
1130
+ ]
1131
+ )
1000
1132
  metrics_html = "".join(
1001
- f"<div class='card'><strong>{key}</strong><br/>{value}</div>" for key, value in counts.items()
1133
+ f"<div class='card'><strong>{card['label']}</strong><br/>{card['value']}</div>" for card in metrics_cards
1134
+ )
1135
+ coverage_html = "".join(
1136
+ f"<div class='card'><strong>{row['table']}</strong><br/>rows: {row['rows']}<br/>vectors: {row['vectors']}<br/>missing: {row['missing']}</div>"
1137
+ for row in coverage_rows
1002
1138
  )
1003
1139
  events_html = _tail_events()
1004
1140
 
@@ -1018,6 +1154,8 @@ def dashboard() -> HTMLResponse:
1018
1154
  <body>
1019
1155
  <h2>ocmemog realtime</h2>
1020
1156
  <div class="metrics" id="metrics">{metrics_html}</div>
1157
+ <h3>Vector coverage</h3>
1158
+ <div class="metrics" id="coverage">{coverage_html}</div>
1021
1159
  <h3>Ponder recommendations</h3>
1022
1160
  <div id="ponder-meta" style="margin-bottom:8px; color:#666;"></div>
1023
1161
  <div id="ponder"></div>
@@ -1025,6 +1163,7 @@ def dashboard() -> HTMLResponse:
1025
1163
  <pre id="events">{events_html}</pre>
1026
1164
  <script>
1027
1165
  const metricsEl = document.getElementById('metrics');
1166
+ const coverageEl = document.getElementById('coverage');
1028
1167
  const ponderEl = document.getElementById('ponder');
1029
1168
  const ponderMetaEl = document.getElementById('ponder-meta');
1030
1169
  const eventsEl = document.getElementById('events');
@@ -1033,8 +1172,17 @@ def dashboard() -> HTMLResponse:
1033
1172
  const res = await fetch('/metrics');
1034
1173
  const data = await res.json();
1035
1174
  const counts = data.metrics?.counts || {{}};
1036
- metricsEl.innerHTML = Object.entries(counts).map(([k,v]) =>
1037
- `<div class=\"card\"><strong>${{k}}</strong><br/>${{v}}</div>`
1175
+ const cards = [
1176
+ ...Object.entries(counts).map(([k, v]) => ({{ label: k, value: v }})),
1177
+ {{ label: 'vector_index_count', value: data.metrics?.vector_index_count ?? 0 }},
1178
+ {{ label: 'vector_index_coverage', value: data.metrics?.vector_index_coverage ?? 0 }},
1179
+ ];
1180
+ metricsEl.innerHTML = cards.map((card) =>
1181
+ `<div class="card"><strong>${{card.label}}</strong><br/>${{card.value}}</div>`
1182
+ ).join('');
1183
+ const coverage = data.metrics?.coverage || [];
1184
+ coverageEl.innerHTML = coverage.map((row) =>
1185
+ `<div class="card"><strong>${{row.table}}</strong><br/>rows: ${{row.rows}}<br/>vectors: ${{row.vectors}}<br/>missing: ${{row.missing}}</div>`
1038
1186
  ).join('');
1039
1187
  }}
1040
1188
 
@@ -1047,7 +1195,7 @@ def dashboard() -> HTMLResponse:
1047
1195
  const mode = data.mode || 'n/a';
1048
1196
  ponderMetaEl.textContent = `Last update: ${{lastTs}} • Mode: ${{mode}}${{warnings ? ' • ' + warnings : ''}}`;
1049
1197
  ponderEl.innerHTML = items.map((item) =>
1050
- `<div class=\"card\"><strong>${{item.summary}}</strong><br/><em>${{item.recommendation || ''}}</em><br/><small>${{item.timestamp || ''}} • ${{item.reference || ''}}</small></div>`
1198
+ `<div class="card"><strong>${{item.summary}}</strong><br/><em>${{item.recommendation || ''}}</em><br/><small>${{item.timestamp || ''}} • ${{item.reference || ''}}</small></div>`
1051
1199
  ).join('');
1052
1200
  }}
1053
1201
 
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import importlib
4
4
  import importlib.util
5
+ import os
5
6
  from dataclasses import dataclass
6
7
  from typing import Any
7
8
 
@@ -35,7 +36,8 @@ def probe_runtime() -> RuntimeStatus:
35
36
  except Exception as exc:
36
37
  missing_deps.append(f"{module_name}: {exc}")
37
38
 
38
- if importlib.util.find_spec("sentence_transformers") is None:
39
+ provider = os.environ.get("BRAIN_EMBED_MODEL_PROVIDER", "").strip().lower()
40
+ if importlib.util.find_spec("sentence_transformers") is None and provider not in {"ollama", "openai", "openai_compatible", "openai-compatible", "local-ollama"}:
39
41
  warnings.append("Optional dependency missing: sentence-transformers; using local hash embeddings.")
40
42
 
41
43
  try:
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@simbimbo/memory-ocmemog",
3
- "version": "0.1.4",
3
+ "version": "0.1.5",
4
4
  "description": "Advanced OpenClaw memory plugin with durable recall, transcript-backed continuity, and sidecar APIs",
5
5
  "license": "MIT",
6
6
  "repository": {
@@ -0,0 +1,33 @@
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import os
6
+ import sys
7
+ from pathlib import Path
8
+
9
+ REPO_ROOT = Path(__file__).resolve().parents[1]
10
+ sys.path.insert(0, str(REPO_ROOT))
11
+
12
+ os.environ.setdefault("OCMEMOG_USE_OLLAMA", "true")
13
+ os.environ.setdefault("OCMEMOG_OLLAMA_MODEL", "phi3:latest")
14
+ os.environ.setdefault("OCMEMOG_OLLAMA_EMBED_MODEL", "nomic-embed-text:latest")
15
+ os.environ.setdefault("BRAIN_EMBED_MODEL_PROVIDER", "ollama")
16
+ os.environ.setdefault("BRAIN_EMBED_MODEL_LOCAL", "")
17
+ os.environ.setdefault("OCMEMOG_STATE_DIR", str(REPO_ROOT / ".ocmemog-state"))
18
+
19
+ from brain.runtime.memory import vector_index
20
+
21
+
22
+ def main() -> int:
23
+ parser = argparse.ArgumentParser(description="Backfill missing vector embeddings without clearing existing ones")
24
+ parser.add_argument("--table", dest="tables", action="append", help="Table to backfill (repeatable)")
25
+ parser.add_argument("--limit-per-table", type=int, default=None, help="Optional max missing rows per table")
26
+ args = parser.parse_args()
27
+ count = vector_index.backfill_missing_vectors(tables=args.tables, limit_per_table=args.limit_per_table)
28
+ print(f"backfilled: {count}")
29
+ return 0
30
+
31
+
32
+ if __name__ == "__main__":
33
+ raise SystemExit(main())
@@ -1,12 +1,20 @@
1
1
  #!/usr/bin/env python3
2
2
  from __future__ import annotations
3
3
 
4
+ import os
4
5
  import sys
5
6
  from pathlib import Path
6
7
 
7
8
  REPO_ROOT = Path(__file__).resolve().parents[1]
8
9
  sys.path.insert(0, str(REPO_ROOT))
9
10
 
11
+ os.environ.setdefault("OCMEMOG_USE_OLLAMA", "true")
12
+ os.environ.setdefault("OCMEMOG_OLLAMA_MODEL", "phi3:latest")
13
+ os.environ.setdefault("OCMEMOG_OLLAMA_EMBED_MODEL", "nomic-embed-text:latest")
14
+ os.environ.setdefault("BRAIN_EMBED_MODEL_PROVIDER", "ollama")
15
+ os.environ.setdefault("BRAIN_EMBED_MODEL_LOCAL", "")
16
+ os.environ.setdefault("OCMEMOG_STATE_DIR", str(REPO_ROOT / ".ocmemog-state"))
17
+
10
18
  from brain.runtime.memory import vector_index
11
19
 
12
20
  if __name__ == "__main__":
@@ -13,21 +13,47 @@ export OCMEMOG_STATE_DIR="${OCMEMOG_STATE_DIR:-${ROOT_DIR}/.ocmemog-state}"
13
13
  export PYTHONPATH="${ROOT_DIR}${PYTHONPATH:+:${PYTHONPATH}}"
14
14
  mkdir -p "${OCMEMOG_STATE_DIR}" "${OCMEMOG_STATE_DIR}/logs"
15
15
 
16
+ is_on_battery() {
17
+ if [[ "$(uname -s)" != "Darwin" ]]; then
18
+ return 1
19
+ fi
20
+ command -v pmset >/dev/null 2>&1 || return 1
21
+ pmset -g batt 2>/dev/null | grep -q "Battery Power"
22
+ }
23
+
24
+ LAPTOP_MODE="${OCMEMOG_LAPTOP_MODE:-auto}"
25
+ if [[ "$LAPTOP_MODE" == "auto" ]]; then
26
+ if is_on_battery; then
27
+ LAPTOP_MODE="battery"
28
+ else
29
+ LAPTOP_MODE="ac"
30
+ fi
31
+ fi
32
+ export OCMEMOG_LAPTOP_MODE="$LAPTOP_MODE"
33
+
16
34
  # defaults for local ollama-backed inference/embeddings
17
35
  export OCMEMOG_USE_OLLAMA="${OCMEMOG_USE_OLLAMA:-true}"
18
36
  export OCMEMOG_OLLAMA_MODEL="${OCMEMOG_OLLAMA_MODEL:-phi3:latest}"
19
37
  export OCMEMOG_OLLAMA_EMBED_MODEL="${OCMEMOG_OLLAMA_EMBED_MODEL:-nomic-embed-text:latest}"
38
+ export OCMEMOG_PONDER_MODEL="${OCMEMOG_PONDER_MODEL:-qwen2.5:7b}"
20
39
  export BRAIN_EMBED_MODEL_PROVIDER="${BRAIN_EMBED_MODEL_PROVIDER:-ollama}"
21
40
  export BRAIN_EMBED_MODEL_LOCAL="${BRAIN_EMBED_MODEL_LOCAL:-}"
22
41
 
23
- # always-on transcript watcher defaults
42
+ # battery-aware transcript watcher defaults
24
43
  export OCMEMOG_TRANSCRIPT_WATCHER="${OCMEMOG_TRANSCRIPT_WATCHER:-true}"
25
44
  export OCMEMOG_SESSION_DIR="${OCMEMOG_SESSION_DIR:-$HOME/.openclaw/agents/main/sessions}"
26
- export OCMEMOG_TRANSCRIPT_POLL_SECONDS="${OCMEMOG_TRANSCRIPT_POLL_SECONDS:-30}"
27
- export OCMEMOG_INGEST_BATCH_SECONDS="${OCMEMOG_INGEST_BATCH_SECONDS:-30}"
28
- export OCMEMOG_INGEST_BATCH_MAX="${OCMEMOG_INGEST_BATCH_MAX:-25}"
45
+ if [[ "$LAPTOP_MODE" == "battery" ]]; then
46
+ export OCMEMOG_TRANSCRIPT_POLL_SECONDS="${OCMEMOG_TRANSCRIPT_POLL_SECONDS:-120}"
47
+ export OCMEMOG_INGEST_BATCH_SECONDS="${OCMEMOG_INGEST_BATCH_SECONDS:-120}"
48
+ export OCMEMOG_INGEST_BATCH_MAX="${OCMEMOG_INGEST_BATCH_MAX:-10}"
49
+ export OCMEMOG_REINFORCE_SENTIMENT="${OCMEMOG_REINFORCE_SENTIMENT:-false}"
50
+ else
51
+ export OCMEMOG_TRANSCRIPT_POLL_SECONDS="${OCMEMOG_TRANSCRIPT_POLL_SECONDS:-30}"
52
+ export OCMEMOG_INGEST_BATCH_SECONDS="${OCMEMOG_INGEST_BATCH_SECONDS:-30}"
53
+ export OCMEMOG_INGEST_BATCH_MAX="${OCMEMOG_INGEST_BATCH_MAX:-25}"
54
+ export OCMEMOG_REINFORCE_SENTIMENT="${OCMEMOG_REINFORCE_SENTIMENT:-true}"
55
+ fi
29
56
  export OCMEMOG_INGEST_ENDPOINT="${OCMEMOG_INGEST_ENDPOINT:-http://127.0.0.1:17890/memory/ingest_async}"
30
- export OCMEMOG_REINFORCE_SENTIMENT="${OCMEMOG_REINFORCE_SENTIMENT:-true}"
31
57
  export OCMEMOG_INGEST_SOURCE="${OCMEMOG_INGEST_SOURCE:-transcript}"
32
58
  export OCMEMOG_INGEST_MEMORY_TYPE="${OCMEMOG_INGEST_MEMORY_TYPE:-reflections}"
33
59