@simbimbo/memory-ocmemog 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/CHANGELOG.md +59 -0
  2. package/LICENSE +21 -0
  3. package/README.md +223 -0
  4. package/brain/__init__.py +1 -0
  5. package/brain/runtime/__init__.py +13 -0
  6. package/brain/runtime/config.py +21 -0
  7. package/brain/runtime/inference.py +83 -0
  8. package/brain/runtime/instrumentation.py +17 -0
  9. package/brain/runtime/memory/__init__.py +13 -0
  10. package/brain/runtime/memory/api.py +152 -0
  11. package/brain/runtime/memory/artifacts.py +33 -0
  12. package/brain/runtime/memory/candidate.py +89 -0
  13. package/brain/runtime/memory/context_builder.py +87 -0
  14. package/brain/runtime/memory/conversation_state.py +1825 -0
  15. package/brain/runtime/memory/distill.py +198 -0
  16. package/brain/runtime/memory/embedding_engine.py +94 -0
  17. package/brain/runtime/memory/freshness.py +91 -0
  18. package/brain/runtime/memory/health.py +42 -0
  19. package/brain/runtime/memory/integrity.py +170 -0
  20. package/brain/runtime/memory/interaction_memory.py +57 -0
  21. package/brain/runtime/memory/memory_consolidation.py +60 -0
  22. package/brain/runtime/memory/memory_gate.py +38 -0
  23. package/brain/runtime/memory/memory_graph.py +54 -0
  24. package/brain/runtime/memory/memory_links.py +109 -0
  25. package/brain/runtime/memory/memory_salience.py +235 -0
  26. package/brain/runtime/memory/memory_synthesis.py +33 -0
  27. package/brain/runtime/memory/memory_taxonomy.py +35 -0
  28. package/brain/runtime/memory/person_identity.py +83 -0
  29. package/brain/runtime/memory/person_memory.py +138 -0
  30. package/brain/runtime/memory/pondering_engine.py +577 -0
  31. package/brain/runtime/memory/promote.py +237 -0
  32. package/brain/runtime/memory/provenance.py +356 -0
  33. package/brain/runtime/memory/reinforcement.py +73 -0
  34. package/brain/runtime/memory/retrieval.py +153 -0
  35. package/brain/runtime/memory/semantic_search.py +66 -0
  36. package/brain/runtime/memory/sentiment_memory.py +67 -0
  37. package/brain/runtime/memory/store.py +400 -0
  38. package/brain/runtime/memory/tool_catalog.py +68 -0
  39. package/brain/runtime/memory/unresolved_state.py +93 -0
  40. package/brain/runtime/memory/vector_index.py +270 -0
  41. package/brain/runtime/model_roles.py +11 -0
  42. package/brain/runtime/model_router.py +22 -0
  43. package/brain/runtime/providers.py +59 -0
  44. package/brain/runtime/security/__init__.py +3 -0
  45. package/brain/runtime/security/redaction.py +14 -0
  46. package/brain/runtime/state_store.py +25 -0
  47. package/brain/runtime/storage_paths.py +41 -0
  48. package/docs/architecture/memory.md +118 -0
  49. package/docs/release-checklist.md +34 -0
  50. package/docs/reports/ocmemog-code-audit-2026-03-14.md +155 -0
  51. package/docs/usage.md +223 -0
  52. package/index.ts +726 -0
  53. package/ocmemog/__init__.py +1 -0
  54. package/ocmemog/sidecar/__init__.py +1 -0
  55. package/ocmemog/sidecar/app.py +1068 -0
  56. package/ocmemog/sidecar/compat.py +74 -0
  57. package/ocmemog/sidecar/transcript_watcher.py +425 -0
  58. package/openclaw.plugin.json +18 -0
  59. package/package.json +60 -0
  60. package/scripts/install-ocmemog.sh +277 -0
  61. package/scripts/launchagents/com.openclaw.ocmemog.guard.plist +22 -0
  62. package/scripts/launchagents/com.openclaw.ocmemog.ponder.plist +22 -0
  63. package/scripts/launchagents/com.openclaw.ocmemog.sidecar.plist +27 -0
  64. package/scripts/ocmemog-context.sh +15 -0
  65. package/scripts/ocmemog-continuity-benchmark.py +178 -0
  66. package/scripts/ocmemog-demo.py +122 -0
  67. package/scripts/ocmemog-failover-test.sh +17 -0
  68. package/scripts/ocmemog-guard.sh +11 -0
  69. package/scripts/ocmemog-install.sh +93 -0
  70. package/scripts/ocmemog-load-test.py +106 -0
  71. package/scripts/ocmemog-ponder.sh +30 -0
  72. package/scripts/ocmemog-recall-test.py +58 -0
  73. package/scripts/ocmemog-reindex-vectors.py +14 -0
  74. package/scripts/ocmemog-reliability-soak.py +177 -0
  75. package/scripts/ocmemog-sidecar.sh +46 -0
  76. package/scripts/ocmemog-soak-report.py +58 -0
  77. package/scripts/ocmemog-soak-test.py +44 -0
  78. package/scripts/ocmemog-test-rig.py +345 -0
  79. package/scripts/ocmemog-transcript-append.py +45 -0
  80. package/scripts/ocmemog-transcript-watcher.py +8 -0
  81. package/scripts/ocmemog-transcript-watcher.sh +7 -0
@@ -0,0 +1,74 @@
1
+ from __future__ import annotations
2
+
3
+ import importlib
4
+ import importlib.util
5
+ from dataclasses import dataclass
6
+ from typing import Any
7
+
8
+
9
+ @dataclass(frozen=True)
10
+ class RuntimeStatus:
11
+ mode: str
12
+ missing_deps: list[str]
13
+ todo: list[str]
14
+ warnings: list[str]
15
+
16
+
17
+ TODO_ITEMS = [
18
+ "Add a role registry (brain.runtime.roles) if you want role-prioritized context building.",
19
+ "Add non-OpenAI embedding providers if required.",
20
+ ]
21
+
22
+
23
+ def probe_runtime() -> RuntimeStatus:
24
+ missing_deps: list[str] = []
25
+ warnings: list[str] = []
26
+
27
+ for module_name in (
28
+ "brain.runtime.memory.store",
29
+ "brain.runtime.memory.retrieval",
30
+ "brain.runtime.memory.vector_index",
31
+ "brain.runtime.memory.memory_links",
32
+ ):
33
+ try:
34
+ importlib.import_module(module_name)
35
+ except Exception as exc:
36
+ missing_deps.append(f"{module_name}: {exc}")
37
+
38
+ if importlib.util.find_spec("sentence_transformers") is None:
39
+ warnings.append("Optional dependency missing: sentence-transformers; using local hash embeddings.")
40
+
41
+ try:
42
+ from brain.runtime import inference, providers
43
+
44
+ if getattr(inference, "__shim__", False):
45
+ missing_deps.append("brain.runtime.inference (shim only)")
46
+ if getattr(getattr(providers, "provider_execute", None), "__shim__", False):
47
+ missing_deps.append("brain.runtime.providers.provider_execute (shim only)")
48
+ except Exception as exc:
49
+ missing_deps.append(f"brain.runtime compatibility probe failed: {exc}")
50
+
51
+ mode = "degraded" if missing_deps else "ready"
52
+ return RuntimeStatus(mode=mode, missing_deps=missing_deps, todo=list(TODO_ITEMS), warnings=warnings)
53
+
54
+
55
+ def flatten_results(results: dict[str, list[dict[str, Any]]]) -> list[dict[str, Any]]:
56
+ flattened: list[dict[str, Any]] = []
57
+ for bucket, entries in results.items():
58
+ for entry in entries:
59
+ reference = str(entry.get("memory_reference") or "")
60
+ table, _, raw_id = reference.partition(":")
61
+ flattened.append(
62
+ {
63
+ "bucket": bucket,
64
+ "reference": reference,
65
+ "table": table or bucket,
66
+ "id": raw_id,
67
+ "content": entry.get("content", ""),
68
+ "score": float(entry.get("score", 0.0) or 0.0),
69
+ "links": entry.get("links", []),
70
+ "provenance": entry.get("provenance_preview") or {},
71
+ }
72
+ )
73
+ flattened.sort(key=lambda item: item["score"], reverse=True)
74
+ return flattened
@@ -0,0 +1,425 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ import time
6
+ from pathlib import Path
7
+ from typing import Optional
8
+ from urllib import request as urlrequest
9
+
10
+ DEFAULT_ENDPOINT = "http://127.0.0.1:17890/memory/ingest_async"
11
+ DEFAULT_GLOB = "*.log"
12
+ DEFAULT_SESSION_GLOB = "*.jsonl"
13
+ DEFAULT_REINFORCE_POSITIVE = [
14
+ "good job",
15
+ "nice job",
16
+ "well done",
17
+ "great work",
18
+ "awesome",
19
+ "thanks",
20
+ "thank you",
21
+ "love it",
22
+ ]
23
+ DEFAULT_REINFORCE_NEGATIVE = [
24
+ "not good",
25
+ "bad job",
26
+ "this sucks",
27
+ "terrible",
28
+ "awful",
29
+ "wrong",
30
+ "disappointed",
31
+ "frustrated",
32
+ ]
33
+
34
+
35
+ def _pick_latest(path: Path, pattern: str) -> Optional[Path]:
36
+ if path.is_file():
37
+ return path
38
+ if not path.exists():
39
+ return None
40
+ files = sorted(path.glob(pattern), key=lambda p: p.stat().st_mtime)
41
+ return files[-1] if files else None
42
+
43
+
44
+ def _post_ingest(endpoint: str, payload: dict) -> None:
45
+ data = json.dumps(payload).encode("utf-8")
46
+ req = urlrequest.Request(endpoint, data=data, method="POST")
47
+ req.add_header("Content-Type", "application/json")
48
+ try:
49
+ with urlrequest.urlopen(req, timeout=10) as resp:
50
+ resp.read()
51
+ except Exception:
52
+ return
53
+
54
+
55
+ def _post_json(endpoint: str, payload: dict) -> None:
56
+ data = json.dumps(payload).encode("utf-8")
57
+ req = urlrequest.Request(endpoint, data=data, method="POST")
58
+ req.add_header("Content-Type", "application/json")
59
+ try:
60
+ with urlrequest.urlopen(req, timeout=10) as resp:
61
+ resp.read()
62
+ except Exception:
63
+ return
64
+
65
+
66
+ def _post_turn(endpoint: str, payload: dict) -> None:
67
+ _post_json(endpoint, payload)
68
+
69
+
70
+ def _extract_user_text(text: str) -> str:
71
+ # Prefer the final user line: "[Sat ...] message"
72
+ candidate = ""
73
+ for line in text.splitlines():
74
+ line = line.strip()
75
+ if line.startswith("[") and "]" in line:
76
+ tail = line.split("]", 1)[-1].strip()
77
+ if tail:
78
+ candidate = tail
79
+ return candidate or text
80
+
81
+
82
+ def _extract_conversation_info(text: str) -> dict:
83
+ marker = "Conversation info (untrusted metadata):"
84
+ if marker not in text:
85
+ return {}
86
+ tail = text.split(marker, 1)[1]
87
+ start = tail.find("```")
88
+ if start < 0:
89
+ return {}
90
+ tail = tail[start + 3 :]
91
+ if tail.startswith("json"):
92
+ tail = tail[4:]
93
+ end = tail.find("```")
94
+ if end < 0:
95
+ return {}
96
+ raw = tail[:end].strip()
97
+ try:
98
+ payload = json.loads(raw)
99
+ except Exception:
100
+ return {}
101
+ return payload if isinstance(payload, dict) else {}
102
+
103
+
104
+ def _parse_transcript_line(text: str) -> tuple[Optional[str], str]:
105
+ stripped = text.strip()
106
+ if not stripped:
107
+ return None, ""
108
+ if "[" in stripped and "]" in stripped:
109
+ prefix, suffix = stripped.split("[", 1)
110
+ role_part, remainder = suffix.split("]", 1)
111
+ role = role_part.strip().lower()
112
+ if role in {"user", "assistant", "system", "tool"}:
113
+ return role, remainder.strip()
114
+ return None, stripped
115
+
116
+
117
+ def _count_lines(path: Path) -> int:
118
+ if not path.exists() or not path.is_file():
119
+ return 0
120
+ with path.open("r", encoding="utf-8", errors="ignore") as handle:
121
+ return sum(1 for _ in handle)
122
+
123
+
124
+
125
+ def _append_transcript(transcripts_dir: Path, timestamp: str, role: str, text: str) -> tuple[Path, int]:
126
+ date = timestamp.split("T")[0] if "T" in timestamp else time.strftime("%Y-%m-%d")
127
+ path = transcripts_dir / f"{date}.log"
128
+ transcripts_dir.mkdir(parents=True, exist_ok=True)
129
+ line_no = _count_lines(path) + 1
130
+ with path.open("a", encoding="utf-8") as handle:
131
+ handle.write(f"{timestamp} [{role}] {text}\n")
132
+ return path, line_no
133
+
134
+
135
+ def watch_forever() -> None:
136
+ transcript_path = os.environ.get("OCMEMOG_TRANSCRIPT_PATH", "").strip()
137
+ transcript_dir = os.environ.get("OCMEMOG_TRANSCRIPT_DIR", "").strip()
138
+ glob_pattern = os.environ.get("OCMEMOG_TRANSCRIPT_GLOB", DEFAULT_GLOB)
139
+ session_dir = os.environ.get("OCMEMOG_SESSION_DIR", "").strip()
140
+ session_glob = os.environ.get("OCMEMOG_SESSION_GLOB", DEFAULT_SESSION_GLOB)
141
+
142
+ endpoint = os.environ.get("OCMEMOG_INGEST_ENDPOINT", DEFAULT_ENDPOINT)
143
+ turn_endpoint = os.environ.get("OCMEMOG_TURN_INGEST_ENDPOINT", endpoint.replace("/memory/ingest_async", "/conversation/ingest_turn").replace("/memory/ingest", "/conversation/ingest_turn"))
144
+ poll_seconds = float(os.environ.get("OCMEMOG_TRANSCRIPT_POLL_SECONDS", "30"))
145
+ batch_seconds = float(os.environ.get("OCMEMOG_INGEST_BATCH_SECONDS", "30"))
146
+ batch_max = int(os.environ.get("OCMEMOG_INGEST_BATCH_MAX", "25"))
147
+ start_at_end = os.environ.get("OCMEMOG_TRANSCRIPT_START_AT_END", "true").lower() in {"1", "true", "yes"}
148
+
149
+ kind = os.environ.get("OCMEMOG_INGEST_KIND", "memory").strip() or "memory"
150
+ source = os.environ.get("OCMEMOG_INGEST_SOURCE", "transcript").strip() or "transcript"
151
+ memory_type = os.environ.get("OCMEMOG_INGEST_MEMORY_TYPE", "knowledge").strip() or "knowledge"
152
+
153
+ reinforce_enabled = os.environ.get("OCMEMOG_REINFORCE_SENTIMENT", "true").lower() in {"1", "true", "yes"}
154
+ reinforce_endpoint = os.environ.get(
155
+ "OCMEMOG_REINFORCE_ENDPOINT", "http://127.0.0.1:17890/memory/reinforce"
156
+ ).strip()
157
+ pos_terms = os.environ.get("OCMEMOG_REINFORCE_POSITIVE", ",".join(DEFAULT_REINFORCE_POSITIVE))
158
+ neg_terms = os.environ.get("OCMEMOG_REINFORCE_NEGATIVE", ",".join(DEFAULT_REINFORCE_NEGATIVE))
159
+ positive_terms = [t.strip().lower() for t in pos_terms.split(",") if t.strip()]
160
+ negative_terms = [t.strip().lower() for t in neg_terms.split(",") if t.strip()]
161
+
162
+ if transcript_path or transcript_dir:
163
+ transcript_target = Path(transcript_path or transcript_dir).expanduser().resolve()
164
+ else:
165
+ transcript_target = (Path.home() / ".openclaw" / "workspace" / "memory" / "transcripts").expanduser().resolve()
166
+
167
+ if session_dir:
168
+ session_target = Path(session_dir).expanduser().resolve()
169
+ else:
170
+ session_target = (Path.home() / ".openclaw" / "agents" / "main" / "sessions").expanduser().resolve()
171
+
172
+ current_file: Optional[Path] = None
173
+ position = 0
174
+ current_line_number = 0
175
+ session_file: Optional[Path] = None
176
+ session_pos = 0
177
+
178
+ transcript_buffer: list[str] = []
179
+ session_buffer: list[str] = []
180
+ transcript_last_path: Optional[Path] = None
181
+ session_last_path: Optional[Path] = None
182
+ transcript_last_timestamp: Optional[str] = None
183
+ session_last_timestamp: Optional[str] = None
184
+ transcript_start_line: Optional[int] = None
185
+ transcript_end_line: Optional[int] = None
186
+ session_start_line: Optional[int] = None
187
+ session_end_line: Optional[int] = None
188
+ last_transcript_flush = time.time()
189
+ last_session_flush = time.time()
190
+
191
+ def _flush_buffer(
192
+ buffer: list[str],
193
+ *,
194
+ source_label: str,
195
+ transcript_path: Optional[Path],
196
+ timestamp: Optional[str],
197
+ start_line: Optional[int],
198
+ end_line: Optional[int],
199
+ ) -> None:
200
+ if not buffer:
201
+ return
202
+ payload = {
203
+ "content": "\n".join(buffer),
204
+ "kind": kind,
205
+ "memory_type": memory_type,
206
+ "source": source_label,
207
+ }
208
+ if transcript_path is not None:
209
+ payload["transcript_path"] = str(transcript_path)
210
+ if start_line is not None:
211
+ payload["transcript_offset"] = start_line
212
+ if end_line is not None:
213
+ payload["transcript_end_offset"] = end_line
214
+ if timestamp:
215
+ payload["timestamp"] = timestamp.replace("T", " ")[:19]
216
+ _post_ingest(endpoint, payload)
217
+ buffer.clear()
218
+
219
+ def _maybe_reinforce(text: str, timestamp: str) -> None:
220
+ if not reinforce_enabled:
221
+ return
222
+ lowered = text.lower()
223
+ if any(term in lowered for term in positive_terms):
224
+ payload = {
225
+ "task_id": f"feedback:{timestamp}",
226
+ "outcome": "positive feedback",
227
+ "reward_score": 1.0,
228
+ "confidence": 1.0,
229
+ "memory_reference": "feedback:chat",
230
+ "experience_type": "reinforcement",
231
+ "source_module": "sentiment",
232
+ "note": text,
233
+ }
234
+ _post_json(reinforce_endpoint, payload)
235
+ elif any(term in lowered for term in negative_terms):
236
+ payload = {
237
+ "task_id": f"feedback:{timestamp}",
238
+ "outcome": "negative feedback",
239
+ "reward_score": 0.0,
240
+ "confidence": 1.0,
241
+ "memory_reference": "feedback:chat",
242
+ "experience_type": "reinforcement",
243
+ "source_module": "sentiment",
244
+ "note": text,
245
+ }
246
+ _post_json(reinforce_endpoint, payload)
247
+
248
+ while True:
249
+ # 1) Watch transcript logs (if any)
250
+ latest = _pick_latest(transcript_target, glob_pattern)
251
+ if latest is not None:
252
+ if current_file is None or latest != current_file:
253
+ current_file = latest
254
+ position = 0
255
+ current_line_number = 0
256
+ if start_at_end:
257
+ try:
258
+ position = current_file.stat().st_size
259
+ except Exception:
260
+ position = 0
261
+ try:
262
+ current_line_number = _count_lines(current_file)
263
+ except Exception:
264
+ current_line_number = 0
265
+
266
+ try:
267
+ with current_file.open("r", encoding="utf-8", errors="ignore") as handle:
268
+ handle.seek(position)
269
+ for line in handle:
270
+ text = line.rstrip("\n")
271
+ current_line_number += 1
272
+ if not text.strip():
273
+ continue
274
+ transcript_buffer.append(text)
275
+ transcript_last_path = current_file
276
+ if transcript_start_line is None:
277
+ transcript_start_line = current_line_number
278
+ transcript_end_line = current_line_number
279
+ timestamp_value = None
280
+ if text and " " in text:
281
+ timestamp_value = text.split(" ", 1)[0]
282
+ transcript_last_timestamp = timestamp_value
283
+ role, turn_text = _parse_transcript_line(text)
284
+ if role and turn_text:
285
+ _post_turn(
286
+ turn_endpoint,
287
+ {
288
+ "role": role,
289
+ "content": turn_text,
290
+ "source": source,
291
+ "transcript_path": str(current_file),
292
+ "transcript_offset": current_line_number,
293
+ "transcript_end_offset": current_line_number,
294
+ "timestamp": timestamp_value.replace("T", " ")[:19] if timestamp_value else None,
295
+ },
296
+ )
297
+ if len(transcript_buffer) >= batch_max:
298
+ _flush_buffer(
299
+ transcript_buffer,
300
+ source_label=source,
301
+ transcript_path=transcript_last_path,
302
+ timestamp=transcript_last_timestamp,
303
+ start_line=transcript_start_line,
304
+ end_line=transcript_end_line,
305
+ )
306
+ transcript_start_line = None
307
+ transcript_end_line = None
308
+ last_transcript_flush = time.time()
309
+ position = handle.tell()
310
+ except Exception:
311
+ pass
312
+
313
+ # 2) Watch OpenClaw session jsonl (verbatim capture)
314
+ session_latest = _pick_latest(session_target, session_glob)
315
+ if session_latest is not None:
316
+ if session_file is None or session_latest != session_file:
317
+ session_file = session_latest
318
+ session_pos = 0
319
+ if start_at_end:
320
+ try:
321
+ session_pos = session_file.stat().st_size
322
+ except Exception:
323
+ session_pos = 0
324
+ try:
325
+ with session_file.open("r", encoding="utf-8", errors="ignore") as handle:
326
+ handle.seek(session_pos)
327
+ for line in handle:
328
+ try:
329
+ entry = json.loads(line)
330
+ except Exception:
331
+ continue
332
+ if entry.get("type") != "message":
333
+ continue
334
+ msg = entry.get("message") or {}
335
+ role = msg.get("role")
336
+ if role not in {"user", "assistant"}:
337
+ continue
338
+ content = msg.get("content")
339
+ if isinstance(content, list):
340
+ text = next((c.get("text") for c in content if c.get("type") == "text"), "")
341
+ else:
342
+ text = content or ""
343
+ text = str(text).strip()
344
+ conversation_info = _extract_conversation_info(text)
345
+ if role == "user":
346
+ text = _extract_user_text(text)
347
+ text = text.replace("\n", " ").strip()
348
+ if not text:
349
+ continue
350
+ timestamp = entry.get("timestamp") or time.strftime("%Y-%m-%dT%H:%M:%S")
351
+ if role == "user":
352
+ _maybe_reinforce(text, timestamp)
353
+ transcript_path, transcript_line_no = _append_transcript(transcript_target, timestamp, role, text)
354
+ session_id = session_file.stem if session_file is not None else None
355
+ message_id = entry.get("id") or conversation_info.get("message_id")
356
+ conversation_id = conversation_info.get("conversation_id") or session_id
357
+ thread_id = conversation_info.get("thread_id") or session_id
358
+ _post_turn(
359
+ turn_endpoint,
360
+ {
361
+ "role": role,
362
+ "content": text,
363
+ "conversation_id": conversation_id,
364
+ "session_id": session_id,
365
+ "thread_id": thread_id,
366
+ "message_id": message_id,
367
+ "source": "session",
368
+ "transcript_path": str(transcript_path),
369
+ "transcript_offset": transcript_line_no,
370
+ "transcript_end_offset": transcript_line_no,
371
+ "timestamp": timestamp.replace("T", " ")[:19],
372
+ "metadata": {
373
+ "parent_message_id": entry.get("parentId"),
374
+ },
375
+ },
376
+ )
377
+ session_buffer.append(f"{timestamp} [{role}] {text}")
378
+ session_last_path = transcript_path
379
+ session_last_timestamp = timestamp
380
+ if session_start_line is None:
381
+ session_start_line = transcript_line_no
382
+ session_end_line = transcript_line_no
383
+ if len(session_buffer) >= batch_max:
384
+ _flush_buffer(
385
+ session_buffer,
386
+ source_label="session",
387
+ transcript_path=session_last_path,
388
+ timestamp=session_last_timestamp,
389
+ start_line=session_start_line,
390
+ end_line=session_end_line,
391
+ )
392
+ session_start_line = None
393
+ session_end_line = None
394
+ last_session_flush = time.time()
395
+ session_pos = handle.tell()
396
+ except Exception:
397
+ pass
398
+
399
+ now = time.time()
400
+ if transcript_buffer and (now - last_transcript_flush) >= batch_seconds:
401
+ _flush_buffer(
402
+ transcript_buffer,
403
+ source_label=source,
404
+ transcript_path=transcript_last_path,
405
+ timestamp=transcript_last_timestamp,
406
+ start_line=transcript_start_line,
407
+ end_line=transcript_end_line,
408
+ )
409
+ transcript_start_line = None
410
+ transcript_end_line = None
411
+ last_transcript_flush = now
412
+ if session_buffer and (now - last_session_flush) >= batch_seconds:
413
+ _flush_buffer(
414
+ session_buffer,
415
+ source_label="session",
416
+ transcript_path=session_last_path,
417
+ timestamp=session_last_timestamp,
418
+ start_line=session_start_line,
419
+ end_line=session_end_line,
420
+ )
421
+ session_start_line = None
422
+ session_end_line = None
423
+ last_session_flush = now
424
+
425
+ time.sleep(poll_seconds)
@@ -0,0 +1,18 @@
1
+ {
2
+ "id": "memory-ocmemog",
3
+ "kind": "memory",
4
+ "configSchema": {
5
+ "type": "object",
6
+ "additionalProperties": false,
7
+ "properties": {
8
+ "endpoint": {
9
+ "type": "string",
10
+ "description": "Optional HTTP endpoint for the ocmemog memory sidecar."
11
+ },
12
+ "timeoutMs": {
13
+ "type": "number",
14
+ "description": "Request timeout in milliseconds."
15
+ }
16
+ }
17
+ }
18
+ }
package/package.json ADDED
@@ -0,0 +1,60 @@
1
+ {
2
+ "name": "@simbimbo/memory-ocmemog",
3
+ "version": "0.1.4",
4
+ "description": "Advanced OpenClaw memory plugin with durable recall, transcript-backed continuity, and sidecar APIs",
5
+ "license": "MIT",
6
+ "repository": {
7
+ "type": "git",
8
+ "url": "https://github.com/simbimbo/ocmemog.git"
9
+ },
10
+ "homepage": "https://github.com/simbimbo/ocmemog",
11
+ "keywords": [
12
+ "openclaw",
13
+ "plugin",
14
+ "memory",
15
+ "ocmemog",
16
+ "sidecar"
17
+ ],
18
+ "type": "module",
19
+ "files": [
20
+ "index.ts",
21
+ "openclaw.plugin.json",
22
+ "ocmemog/**",
23
+ "brain/**",
24
+ "scripts/**",
25
+ "docs/**",
26
+ "README.md",
27
+ "CHANGELOG.md",
28
+ "LICENSE",
29
+ "!**/__pycache__/**",
30
+ "!**/*.pyc",
31
+ "!reports/**",
32
+ "!tests/**",
33
+ "!REVIEW.md"
34
+ ],
35
+ "dependencies": {
36
+ "@sinclair/typebox": "^0.34.41"
37
+ },
38
+ "devDependencies": {
39
+ "openclaw": "workspace:*"
40
+ },
41
+ "peerDependencies": {
42
+ "openclaw": ">=2026.3.11"
43
+ },
44
+ "peerDependenciesMeta": {
45
+ "openclaw": {
46
+ "optional": true
47
+ }
48
+ },
49
+ "scripts": {
50
+ "sidecar": "./scripts/ocmemog-sidecar.sh"
51
+ },
52
+ "openclaw": {
53
+ "extensions": [
54
+ "./index.ts"
55
+ ]
56
+ },
57
+ "publishConfig": {
58
+ "access": "public"
59
+ }
60
+ }