@simbimbo/memory-ocmemog 0.1.5 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,16 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.1.6 — 2026-03-19
4
+
5
+ Port-separation and publish-solid follow-up.
6
+
7
+ ### Highlights
8
+ - Split ocmemog sidecar onto dedicated loopback port `17891` to avoid collision with the OpenClaw gateway/dashboard on `17890`
9
+ - Restored the plain realtime dashboard on `/dashboard` and fixed the `local_html` template crash
10
+ - Updated plugin/runtime defaults, scripts, and documentation to use the dedicated sidecar endpoint on `17891`
11
+ - Added governance retrieval/governance-policy hardening plus expanded regression coverage for duplicate, contradiction, supersession, queue, audit, rollback, and auto-resolve flows
12
+ - Aligned package/version metadata across npm, Python, and FastAPI surfaces
13
+
3
14
  ## 0.1.5 — 2026-03-18
4
15
 
5
16
  Repair and hardening follow-up after the 0.1.4 publish.
package/README.md CHANGED
@@ -32,7 +32,7 @@ pip install -r requirements.txt
32
32
  ./scripts/ocmemog-sidecar.sh
33
33
 
34
34
  # then open
35
- # http://127.0.0.1:17890/dashboard
35
+ # http://127.0.0.1:17891/dashboard
36
36
  ```
37
37
 
38
38
  ## Optional: transcript watcher (auto-ingest)
@@ -45,8 +45,8 @@ export OCMEMOG_TRANSCRIPT_DIR="$HOME/.openclaw/workspace/memory/transcripts"
45
45
 
46
46
  Default bind:
47
47
 
48
- - endpoint: `http://127.0.0.1:17890`
49
- - health: `http://127.0.0.1:17890/healthz`
48
+ - endpoint: `http://127.0.0.1:17891`
49
+ - health: `http://127.0.0.1:17891/healthz`
50
50
 
51
51
  ## Continuity proof / benchmark harness
52
52
 
@@ -193,7 +193,7 @@ plugins:
193
193
  memory-ocmemog:
194
194
  enabled: true
195
195
  config:
196
- endpoint: http://127.0.0.1:17890
196
+ endpoint: http://127.0.0.1:17891
197
197
  timeoutMs: 30000
198
198
  ```
199
199
 
@@ -33,13 +33,43 @@ def _infer_ollama(prompt: str, model: str | None = None) -> dict[str, str]:
33
33
  return {"status": "ok", "output": str(output).strip()}
34
34
 
35
35
 
36
+ def _looks_like_ollama_model(name: str) -> bool:
37
+ if not name:
38
+ return False
39
+ lowered = name.strip().lower()
40
+ if lowered.startswith("ollama:"):
41
+ return True
42
+ if "/" in lowered:
43
+ return False
44
+ return ":" in lowered
45
+
46
+
47
+ def stats() -> dict[str, object]:
48
+ materialized_local = int(_LOCAL_INFER_STATS.get("local_success", 0)) + int(_LOCAL_INFER_STATS.get("cache_hits", 0))
49
+ est_prompt_tokens_saved = materialized_local * _AVG_PROMPT_TOKENS_SAVED
50
+ est_completion_tokens_saved = materialized_local * _AVG_COMPLETION_TOKENS_SAVED
51
+ est_cost_saved = (
52
+ (est_prompt_tokens_saved / 1000.0) * _EST_FRONTIER_INPUT_COST_PER_1K
53
+ + (est_completion_tokens_saved / 1000.0) * _EST_FRONTIER_OUTPUT_COST_PER_1K
54
+ )
55
+ return {
56
+ "cache_entries": len(_LOCAL_INFER_CACHE),
57
+ "warm_models": sorted(_MODEL_WARM_STATE.keys()),
58
+ "frontier_calls_avoided_est": materialized_local,
59
+ "prompt_tokens_saved_est": est_prompt_tokens_saved,
60
+ "completion_tokens_saved_est": est_completion_tokens_saved,
61
+ "cost_saved_usd_est": round(est_cost_saved, 4),
62
+ **{k: int(v) for k, v in _LOCAL_INFER_STATS.items()},
63
+ }
64
+
65
+
36
66
  def infer(prompt: str, provider_name: str | None = None) -> dict[str, str]:
37
67
  if not isinstance(prompt, str) or not prompt.strip():
38
68
  return {"status": "error", "error": "empty_prompt"}
39
69
 
40
70
  use_ollama = os.environ.get("OCMEMOG_USE_OLLAMA", "").lower() in {"1", "true", "yes"}
41
71
  model_override = provider_name or config.OCMEMOG_MEMORY_MODEL
42
- if use_ollama or model_override.startswith("ollama:"):
72
+ if use_ollama or _looks_like_ollama_model(model_override):
43
73
  model = model_override.split(":", 1)[-1] if model_override.startswith("ollama:") else model_override
44
74
  return _infer_ollama(prompt, model)
45
75