@simbimbo/memory-ocmemog 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +59 -0
- package/LICENSE +21 -0
- package/README.md +223 -0
- package/brain/__init__.py +1 -0
- package/brain/runtime/__init__.py +13 -0
- package/brain/runtime/config.py +21 -0
- package/brain/runtime/inference.py +83 -0
- package/brain/runtime/instrumentation.py +17 -0
- package/brain/runtime/memory/__init__.py +13 -0
- package/brain/runtime/memory/api.py +152 -0
- package/brain/runtime/memory/artifacts.py +33 -0
- package/brain/runtime/memory/candidate.py +89 -0
- package/brain/runtime/memory/context_builder.py +87 -0
- package/brain/runtime/memory/conversation_state.py +1825 -0
- package/brain/runtime/memory/distill.py +198 -0
- package/brain/runtime/memory/embedding_engine.py +94 -0
- package/brain/runtime/memory/freshness.py +91 -0
- package/brain/runtime/memory/health.py +42 -0
- package/brain/runtime/memory/integrity.py +170 -0
- package/brain/runtime/memory/interaction_memory.py +57 -0
- package/brain/runtime/memory/memory_consolidation.py +60 -0
- package/brain/runtime/memory/memory_gate.py +38 -0
- package/brain/runtime/memory/memory_graph.py +54 -0
- package/brain/runtime/memory/memory_links.py +109 -0
- package/brain/runtime/memory/memory_salience.py +235 -0
- package/brain/runtime/memory/memory_synthesis.py +33 -0
- package/brain/runtime/memory/memory_taxonomy.py +35 -0
- package/brain/runtime/memory/person_identity.py +83 -0
- package/brain/runtime/memory/person_memory.py +138 -0
- package/brain/runtime/memory/pondering_engine.py +577 -0
- package/brain/runtime/memory/promote.py +237 -0
- package/brain/runtime/memory/provenance.py +356 -0
- package/brain/runtime/memory/reinforcement.py +73 -0
- package/brain/runtime/memory/retrieval.py +153 -0
- package/brain/runtime/memory/semantic_search.py +66 -0
- package/brain/runtime/memory/sentiment_memory.py +67 -0
- package/brain/runtime/memory/store.py +400 -0
- package/brain/runtime/memory/tool_catalog.py +68 -0
- package/brain/runtime/memory/unresolved_state.py +93 -0
- package/brain/runtime/memory/vector_index.py +270 -0
- package/brain/runtime/model_roles.py +11 -0
- package/brain/runtime/model_router.py +22 -0
- package/brain/runtime/providers.py +59 -0
- package/brain/runtime/security/__init__.py +3 -0
- package/brain/runtime/security/redaction.py +14 -0
- package/brain/runtime/state_store.py +25 -0
- package/brain/runtime/storage_paths.py +41 -0
- package/docs/architecture/memory.md +118 -0
- package/docs/release-checklist.md +34 -0
- package/docs/reports/ocmemog-code-audit-2026-03-14.md +155 -0
- package/docs/usage.md +223 -0
- package/index.ts +726 -0
- package/ocmemog/__init__.py +1 -0
- package/ocmemog/sidecar/__init__.py +1 -0
- package/ocmemog/sidecar/app.py +1068 -0
- package/ocmemog/sidecar/compat.py +74 -0
- package/ocmemog/sidecar/transcript_watcher.py +425 -0
- package/openclaw.plugin.json +18 -0
- package/package.json +60 -0
- package/scripts/install-ocmemog.sh +277 -0
- package/scripts/launchagents/com.openclaw.ocmemog.guard.plist +22 -0
- package/scripts/launchagents/com.openclaw.ocmemog.ponder.plist +22 -0
- package/scripts/launchagents/com.openclaw.ocmemog.sidecar.plist +27 -0
- package/scripts/ocmemog-context.sh +15 -0
- package/scripts/ocmemog-continuity-benchmark.py +178 -0
- package/scripts/ocmemog-demo.py +122 -0
- package/scripts/ocmemog-failover-test.sh +17 -0
- package/scripts/ocmemog-guard.sh +11 -0
- package/scripts/ocmemog-install.sh +93 -0
- package/scripts/ocmemog-load-test.py +106 -0
- package/scripts/ocmemog-ponder.sh +30 -0
- package/scripts/ocmemog-recall-test.py +58 -0
- package/scripts/ocmemog-reindex-vectors.py +14 -0
- package/scripts/ocmemog-reliability-soak.py +177 -0
- package/scripts/ocmemog-sidecar.sh +46 -0
- package/scripts/ocmemog-soak-report.py +58 -0
- package/scripts/ocmemog-soak-test.py +44 -0
- package/scripts/ocmemog-test-rig.py +345 -0
- package/scripts/ocmemog-transcript-append.py +45 -0
- package/scripts/ocmemog-transcript-watcher.py +8 -0
- package/scripts/ocmemog-transcript-watcher.sh +7 -0
package/CHANGELOG.md
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# Changelog
|
|
2
|
+
|
|
3
|
+
## 0.1.4 — 2026-03-18
|
|
4
|
+
|
|
5
|
+
Package ownership + runtime safety release.
|
|
6
|
+
|
|
7
|
+
### Highlights
|
|
8
|
+
- Renamed npm package to `@simbimbo/memory-ocmemog` so it can be published under Steven's own npm scope
|
|
9
|
+
- Updated installer/docs to use the `@simbimbo` package while keeping plugin id `memory-ocmemog` unchanged
|
|
10
|
+
- Preserved the OpenClaw runtime safety hardening from 0.1.3 (sync-safe ingest + auto-hydration opt-in guard)
|
|
11
|
+
|
|
12
|
+
## 0.1.3 — 2026-03-18
|
|
13
|
+
|
|
14
|
+
OpenClaw runtime safety hardening release.
|
|
15
|
+
|
|
16
|
+
### Highlights
|
|
17
|
+
- Made `before_message_write` continuity ingest sync-safe for OpenClaw's synchronous hook contract
|
|
18
|
+
- Disabled automatic prompt hydration by default unless `OCMEMOG_AUTO_HYDRATION=true` is explicitly set
|
|
19
|
+
- Kept sidecar-backed memory search/ingest/checkpoint flows active while guarding against context-window blowups from prepended continuity wrappers
|
|
20
|
+
- Added startup logging so hosts can see when auto hydration is intentionally disabled
|
|
21
|
+
|
|
22
|
+
## 0.1.2 — 2026-03-17
|
|
23
|
+
|
|
24
|
+
Continuity hydration hardening release.
|
|
25
|
+
|
|
26
|
+
### Highlights
|
|
27
|
+
- Prevented recursive re-ingest of auto-hydrated continuity wrappers into conversational state
|
|
28
|
+
- Kept short confirmation replies like `ok`, `yes`, and `sure` compact in `latest_user_ask` / `latest_user_intent`
|
|
29
|
+
- Changed hydration to prefer unresolved assistant commitments only
|
|
30
|
+
- Ignored oversized/noisy checkpoint summaries during hydration selection
|
|
31
|
+
- Normalized sender envelopes, reply tags, and polluted multi-timestamp wrapper text before they could pollute state
|
|
32
|
+
- Added self-healing cleanup for legacy poisoned turns/checkpoints during refresh
|
|
33
|
+
- Hardened `memory_links` unique-index setup against duplicate legacy rows
|
|
34
|
+
- Added `.DS_Store` ignore hygiene for the repo
|
|
35
|
+
|
|
36
|
+
### Included commits
|
|
37
|
+
- `231cfcb` — fix: harden continuity hydration against recursive contamination
|
|
38
|
+
- `3db6891` — fix: keep short reply intent compact in hydration state
|
|
39
|
+
- `fe49663` — fix: prefer unresolved assistant commitments in hydration
|
|
40
|
+
- `74a44fc` — fix: drop oversized checkpoint summaries from hydration
|
|
41
|
+
- `4b89fc1` — chore: ignore macOS Finder metadata files
|
|
42
|
+
|
|
43
|
+
## 0.1.1 — 2026-03-16
|
|
44
|
+
|
|
45
|
+
Publish-prep release.
|
|
46
|
+
|
|
47
|
+
### Highlights
|
|
48
|
+
- Cleaned package metadata for public release
|
|
49
|
+
- Corrected repository and homepage links to the actual `simbimbo/ocmemog` repo
|
|
50
|
+
- Removed "scaffold" positioning from release-facing package metadata
|
|
51
|
+
- Tightened README wording around current status and install flow
|
|
52
|
+
- Excluded Python cache artifacts, tests, reports, and review notes from the published package
|
|
53
|
+
- Verified clean package output with `npm pack --dry-run`
|
|
54
|
+
- Re-ran continuity benchmark with passing score (`overall_score: 1.0`)
|
|
55
|
+
|
|
56
|
+
### Intended publish command
|
|
57
|
+
```bash
|
|
58
|
+
clawhub publish . --slug memory-ocmemog --name "ocmemog" --version 0.1.1 --changelog "Initial public release: durable memory, transcript-backed continuity, packaging cleanup, and publish-ready metadata"
|
|
59
|
+
```
|
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 OpenClaw
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
# ocmemog
|
|
2
|
+
|
|
3
|
+
**ocmemog** is an advanced memory engine for OpenClaw that combines durable long-term memory, transcript-backed continuity, conversation hydration, checkpoint expansion, and pondering inside a sidecar-based plugin architecture.
|
|
4
|
+
|
|
5
|
+
It is designed to go beyond simple memory search by providing:
|
|
6
|
+
- **durable memory and semantic retrieval**
|
|
7
|
+
- **lossless-style conversation continuity**
|
|
8
|
+
- **checkpointing, branch-aware hydration, and turn expansion**
|
|
9
|
+
- **transcript ingestion with anchored context recovery**
|
|
10
|
+
- **pondering and reflection generation**
|
|
11
|
+
|
|
12
|
+
Architecture at a glance:
|
|
13
|
+
- **OpenClaw plugin (`index.ts`)** handles tools and hook integration
|
|
14
|
+
- **FastAPI sidecar (`ocmemog/sidecar/`)** exposes memory and continuity APIs
|
|
15
|
+
- **SQLite-backed runtime (`brain/runtime/memory/`)** powers storage, hydration, checkpoints, salience ranking, and pondering
|
|
16
|
+
|
|
17
|
+
## Repo layout
|
|
18
|
+
|
|
19
|
+
- `openclaw.plugin.json`, `index.ts`, `package.json`: OpenClaw plugin package and manifest.
|
|
20
|
+
- `ocmemog/sidecar/`: FastAPI sidecar with `/memory/search` and `/memory/get`.
|
|
21
|
+
- `brain/runtime/memory/`: copied brAIn memory package.
|
|
22
|
+
- `brain/runtime/`: compatibility shims for state store, instrumentation, redaction, storage paths, and a few placeholder runtime modules needed for importability.
|
|
23
|
+
- `scripts/ocmemog-sidecar.sh`: convenience launcher.
|
|
24
|
+
|
|
25
|
+
## Run the sidecar
|
|
26
|
+
|
|
27
|
+
```bash
|
|
28
|
+
cd /path/to/ocmemog
|
|
29
|
+
python3 -m venv .venv
|
|
30
|
+
. .venv/bin/activate
|
|
31
|
+
pip install -r requirements.txt
|
|
32
|
+
./scripts/ocmemog-sidecar.sh
|
|
33
|
+
|
|
34
|
+
# then open
|
|
35
|
+
# http://127.0.0.1:17890/dashboard
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
## Optional: transcript watcher (auto-ingest)
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
# defaults to ~/.openclaw/workspace/memory/transcripts if not set
|
|
42
|
+
export OCMEMOG_TRANSCRIPT_DIR="$HOME/.openclaw/workspace/memory/transcripts"
|
|
43
|
+
./scripts/ocmemog-transcript-watcher.sh
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Default bind:
|
|
47
|
+
|
|
48
|
+
- endpoint: `http://127.0.0.1:17890`
|
|
49
|
+
- health: `http://127.0.0.1:17890/healthz`
|
|
50
|
+
|
|
51
|
+
## Continuity proof / benchmark harness
|
|
52
|
+
|
|
53
|
+
Run the fixture-driven continuity benchmark:
|
|
54
|
+
|
|
55
|
+
```bash
|
|
56
|
+
cd /path/to/ocmemog
|
|
57
|
+
./.venv/bin/python scripts/ocmemog-continuity-benchmark.py \
|
|
58
|
+
--fixture tests/fixtures/continuity_benchmark.json \
|
|
59
|
+
--report reports/continuity-benchmark-latest.json
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
This exercises:
|
|
63
|
+
- restart/recovery hydration from persisted SQLite state
|
|
64
|
+
- long-thread + ambiguous reply continuity
|
|
65
|
+
- salience-ranked checkpoint expansion
|
|
66
|
+
- salience-ranked turn expansion
|
|
67
|
+
|
|
68
|
+
A passing run writes a JSON report with per-scenario checks and an `overall_score` that must meet the configured `continuity_bar`.
|
|
69
|
+
|
|
70
|
+
Optional environment variables:
|
|
71
|
+
|
|
72
|
+
- `OCMEMOG_HOST`
|
|
73
|
+
- `OCMEMOG_PORT`
|
|
74
|
+
- `OCMEMOG_STATE_DIR` (defaults to `<repo>/.ocmemog-state`)
|
|
75
|
+
- `OCMEMOG_DB_PATH`
|
|
76
|
+
- `OCMEMOG_MEMORY_MODEL` (default: `gpt-4o-mini`)
|
|
77
|
+
- `OCMEMOG_OPENAI_API_KEY` (required for model-backed distill)
|
|
78
|
+
- `OCMEMOG_OPENAI_API_BASE` (default: `https://api.openai.com/v1`)
|
|
79
|
+
- `OCMEMOG_OPENAI_EMBED_MODEL` (default: `text-embedding-3-small`)
|
|
80
|
+
- `BRAIN_EMBED_MODEL_LOCAL` (`simple` by default)
|
|
81
|
+
- `BRAIN_EMBED_MODEL_PROVIDER` (`openai` to enable provider embeddings)
|
|
82
|
+
- `OCMEMOG_TRANSCRIPT_WATCHER` (`true` to auto-start transcript watcher inside the sidecar)
|
|
83
|
+
- `OCMEMOG_TRANSCRIPT_ROOTS` (comma-separated allowed roots for transcript context retrieval; default: `~/.openclaw/workspace/memory`)
|
|
84
|
+
- `OCMEMOG_API_TOKEN` (optional; if set, requests must include `x-ocmemog-token` or `Authorization: Bearer ...`)
|
|
85
|
+
- `OCMEMOG_AUTO_HYDRATION` (`true` to re-enable prompt-time continuity prepending; defaults to `false` as a safety guard until the host runtime is verified not to persist prepended context into session history)
|
|
86
|
+
- `OCMEMOG_USE_OLLAMA` (`true` to use Ollama for distill/inference)
|
|
87
|
+
- `OCMEMOG_OLLAMA_HOST` (default: `http://127.0.0.1:11434`)
|
|
88
|
+
- `OCMEMOG_OLLAMA_MODEL` (default: `phi3:latest`)
|
|
89
|
+
- `OCMEMOG_OLLAMA_EMBED_MODEL` (default: `nomic-embed-text:latest`)
|
|
90
|
+
- `OCMEMOG_PROMOTION_THRESHOLD` (default: `0.5`)
|
|
91
|
+
- `OCMEMOG_DEMOTION_THRESHOLD` (default: `0.2`)
|
|
92
|
+
- `OCMEMOG_PONDER_ENABLED` (default: `true`)
|
|
93
|
+
- `OCMEMOG_PONDER_MODEL` (default: `OCMEMOG_MEMORY_MODEL`)
|
|
94
|
+
- `OCMEMOG_LESSON_MINING_ENABLED` (default: `true`)
|
|
95
|
+
|
|
96
|
+
## Security
|
|
97
|
+
|
|
98
|
+
- Sidecar binds to **127.0.0.1** by default. Keep it local unless you add auth + firewall rules.
|
|
99
|
+
- If you expose the sidecar, set `OCMEMOG_API_TOKEN` and pass the header `x-ocmemog-token`.
|
|
100
|
+
|
|
101
|
+
## One‑shot installer (macOS / local dev)
|
|
102
|
+
|
|
103
|
+
```bash
|
|
104
|
+
./scripts/install-ocmemog.sh
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
Optional target checkout directory:
|
|
108
|
+
|
|
109
|
+
```bash
|
|
110
|
+
./scripts/install-ocmemog.sh /custom/path/ocmemog
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
Optional prereq auto-install on macOS/Homebrew systems:
|
|
114
|
+
|
|
115
|
+
```bash
|
|
116
|
+
OCMEMOG_INSTALL_PREREQS=true ./scripts/install-ocmemog.sh
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
Quick help:
|
|
120
|
+
|
|
121
|
+
```bash
|
|
122
|
+
./scripts/install-ocmemog.sh --help
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
This installer will try to:
|
|
126
|
+
- clone/update the repo when a custom target directory is provided
|
|
127
|
+
- create `.venv`
|
|
128
|
+
- install Python requirements
|
|
129
|
+
- install/enable the OpenClaw plugin when the `openclaw` CLI is available
|
|
130
|
+
- install/load LaunchAgents via `scripts/ocmemog-install.sh`
|
|
131
|
+
- pull required local Ollama models when Ollama is already installed
|
|
132
|
+
- validate `/healthz`
|
|
133
|
+
|
|
134
|
+
Notes:
|
|
135
|
+
- If `OCMEMOG_INSTALL_PREREQS=true` and Homebrew is present, the installer will try to install missing `ollama` and `ffmpeg` automatically.
|
|
136
|
+
- If Ollama is not installed and prereq auto-install is off or unavailable, the installer warns and continues; local model support will remain unavailable until Ollama is installed.
|
|
137
|
+
- If package install is unavailable in the local OpenClaw build, the installer falls back to local-path plugin install.
|
|
138
|
+
- Advanced flags are available for local debugging/CI (`--skip-plugin-install`, `--skip-launchagents`, `--skip-model-pulls`, `--endpoint`, `--repo-url`).
|
|
139
|
+
|
|
140
|
+
## LaunchAgents (macOS)
|
|
141
|
+
|
|
142
|
+
Templates are included under `scripts/launchagents/`:
|
|
143
|
+
- `com.openclaw.ocmemog.sidecar.plist`
|
|
144
|
+
- `com.openclaw.ocmemog.ponder.plist`
|
|
145
|
+
- `com.openclaw.ocmemog.guard.plist`
|
|
146
|
+
|
|
147
|
+
You can load them with:
|
|
148
|
+
```bash
|
|
149
|
+
launchctl bootstrap gui/$UID scripts/launchagents/com.openclaw.ocmemog.sidecar.plist
|
|
150
|
+
launchctl bootstrap gui/$UID scripts/launchagents/com.openclaw.ocmemog.ponder.plist
|
|
151
|
+
launchctl bootstrap gui/$UID scripts/launchagents/com.openclaw.ocmemog.guard.plist
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
## Recent changes
|
|
155
|
+
|
|
156
|
+
### 0.1.4 (unreleased / current main)
|
|
157
|
+
|
|
158
|
+
Package ownership + runtime safety release:
|
|
159
|
+
- Publish package under `@simbimbo/memory-ocmemog` instead of the unauthorized `@openclaw` scope
|
|
160
|
+
- Keep `memory-ocmemog` as the plugin id for OpenClaw config and enable flows
|
|
161
|
+
- Make `before_message_write` ingest sync-safe for OpenClaw's synchronous hook contract
|
|
162
|
+
- Default auto prompt hydration to opt-in via `OCMEMOG_AUTO_HYDRATION=true`
|
|
163
|
+
- Preserve prior continuity self-healing and polluted-wrapper cleanup behavior
|
|
164
|
+
|
|
165
|
+
## Release prep / publish
|
|
166
|
+
|
|
167
|
+
Current intended ClawHub publish command:
|
|
168
|
+
|
|
169
|
+
```bash
|
|
170
|
+
clawhub publish . --slug memory-ocmemog --name "ocmemog" --version 0.1.4 --changelog "Package ownership fix: publish under @simbimbo scope plus runtime safety hardening for sync-safe ingest and auto-hydration guard"
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
## Install from npm (after publish)
|
|
174
|
+
|
|
175
|
+
```bash
|
|
176
|
+
openclaw plugins install @simbimbo/memory-ocmemog
|
|
177
|
+
openclaw plugins enable memory-ocmemog
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
## Enable in OpenClaw (local dev)
|
|
181
|
+
|
|
182
|
+
Add the plugin to your OpenClaw config. The key setting is selecting `memory-ocmemog` in the `memory` slot and pointing the plugin entry at this repo.
|
|
183
|
+
|
|
184
|
+
```yaml
|
|
185
|
+
plugins:
|
|
186
|
+
load:
|
|
187
|
+
paths:
|
|
188
|
+
- /path/to/ocmemog
|
|
189
|
+
slots:
|
|
190
|
+
memory: memory-ocmemog
|
|
191
|
+
entries:
|
|
192
|
+
memory-ocmemog:
|
|
193
|
+
enabled: true
|
|
194
|
+
config:
|
|
195
|
+
endpoint: http://127.0.0.1:17890
|
|
196
|
+
timeoutMs: 30000
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
Development install:
|
|
200
|
+
|
|
201
|
+
```bash
|
|
202
|
+
openclaw plugins install -l /path/to/ocmemog
|
|
203
|
+
openclaw plugins enable memory-ocmemog
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
If your local OpenClaw build also documents a separate `memory.backend` setting, keep that at its current default unless your build explicitly requires a plugin-backed override. The slot selection above is what activates this plugin.
|
|
207
|
+
|
|
208
|
+
## Current status
|
|
209
|
+
|
|
210
|
+
ocmemog is usable today for local OpenClaw installs that want a stronger memory layer with durable recall and transcript-backed continuity.
|
|
211
|
+
|
|
212
|
+
What is working now:
|
|
213
|
+
- Search/get against the local SQLite-backed memory store
|
|
214
|
+
- Transcript ingestion and anchored context recovery
|
|
215
|
+
- Continuity hydration, checkpoint expansion, and salience-ranked expansion flows
|
|
216
|
+
- Local sidecar deployment for macOS/OpenClaw development setups
|
|
217
|
+
|
|
218
|
+
Current limitations before broader public rollout:
|
|
219
|
+
- Some advanced inference- and embedding-dependent paths still depend on environment configuration and may degrade to simpler local behavior if provider access is unavailable
|
|
220
|
+
- Packaging and install UX are aimed primarily at power users and local developers today
|
|
221
|
+
- Public release/distribution metadata is still being tightened up
|
|
222
|
+
|
|
223
|
+
When a richer path is unavailable, the sidecar is designed to fail soft with explicit warnings rather than crash.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Local compatibility package for the copied brAIn memory runtime."""
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""Minimal runtime shims required by the copied brAIn memory package."""
|
|
2
|
+
|
|
3
|
+
from . import config, inference, instrumentation, model_roles, model_router, state_store, storage_paths
|
|
4
|
+
|
|
5
|
+
__all__ = [
|
|
6
|
+
"config",
|
|
7
|
+
"inference",
|
|
8
|
+
"instrumentation",
|
|
9
|
+
"model_roles",
|
|
10
|
+
"model_router",
|
|
11
|
+
"state_store",
|
|
12
|
+
"storage_paths",
|
|
13
|
+
]
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
BRAIN_EMBED_MODEL_LOCAL = os.environ.get("BRAIN_EMBED_MODEL_LOCAL", "simple")
|
|
6
|
+
BRAIN_EMBED_MODEL_PROVIDER = os.environ.get("BRAIN_EMBED_MODEL_PROVIDER", "")
|
|
7
|
+
|
|
8
|
+
OCMEMOG_MEMORY_MODEL = os.environ.get("OCMEMOG_MEMORY_MODEL", "gpt-4o-mini")
|
|
9
|
+
OCMEMOG_OPENAI_API_BASE = os.environ.get("OCMEMOG_OPENAI_API_BASE", "https://api.openai.com/v1")
|
|
10
|
+
OCMEMOG_OPENAI_EMBED_MODEL = os.environ.get("OCMEMOG_OPENAI_EMBED_MODEL", "text-embedding-3-small")
|
|
11
|
+
|
|
12
|
+
OCMEMOG_OLLAMA_HOST = os.environ.get("OCMEMOG_OLLAMA_HOST", "http://127.0.0.1:11434")
|
|
13
|
+
OCMEMOG_OLLAMA_MODEL = os.environ.get("OCMEMOG_OLLAMA_MODEL", "phi3:latest")
|
|
14
|
+
OCMEMOG_OLLAMA_EMBED_MODEL = os.environ.get("OCMEMOG_OLLAMA_EMBED_MODEL", "nomic-embed-text:latest")
|
|
15
|
+
|
|
16
|
+
OCMEMOG_PROMOTION_THRESHOLD = float(os.environ.get("OCMEMOG_PROMOTION_THRESHOLD", "0.5"))
|
|
17
|
+
OCMEMOG_DEMOTION_THRESHOLD = float(os.environ.get("OCMEMOG_DEMOTION_THRESHOLD", "0.2"))
|
|
18
|
+
|
|
19
|
+
OCMEMOG_PONDER_ENABLED = os.environ.get("OCMEMOG_PONDER_ENABLED", "true")
|
|
20
|
+
OCMEMOG_PONDER_MODEL = os.environ.get("OCMEMOG_PONDER_MODEL", OCMEMOG_MEMORY_MODEL)
|
|
21
|
+
OCMEMOG_LESSON_MINING_ENABLED = os.environ.get("OCMEMOG_LESSON_MINING_ENABLED", "true")
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import re
|
|
6
|
+
import urllib.request
|
|
7
|
+
|
|
8
|
+
from brain.runtime import config, state_store
|
|
9
|
+
from brain.runtime.instrumentation import emit_event
|
|
10
|
+
|
|
11
|
+
LOGFILE = state_store.reports_dir() / "brain_memory.log.jsonl"
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _infer_ollama(prompt: str, model: str | None = None) -> dict[str, str]:
|
|
15
|
+
payload = {
|
|
16
|
+
"model": model or config.OCMEMOG_OLLAMA_MODEL,
|
|
17
|
+
"prompt": prompt,
|
|
18
|
+
"stream": False,
|
|
19
|
+
}
|
|
20
|
+
data = json.dumps(payload).encode("utf-8")
|
|
21
|
+
req = urllib.request.Request(f"{config.OCMEMOG_OLLAMA_HOST.rstrip('/')}/api/generate", data=data, method="POST")
|
|
22
|
+
req.add_header("Content-Type", "application/json")
|
|
23
|
+
try:
|
|
24
|
+
with urllib.request.urlopen(req, timeout=60) as resp:
|
|
25
|
+
response = json.loads(resp.read().decode("utf-8"))
|
|
26
|
+
except Exception as exc:
|
|
27
|
+
emit_event(LOGFILE, "brain_infer_error", status="error", provider="ollama", error=str(exc))
|
|
28
|
+
return {"status": "error", "error": f"ollama_failed:{exc}"}
|
|
29
|
+
output = response.get("response")
|
|
30
|
+
if not output:
|
|
31
|
+
emit_event(LOGFILE, "brain_infer_error", status="error", provider="ollama", error="invalid_response")
|
|
32
|
+
return {"status": "error", "error": "invalid_response"}
|
|
33
|
+
return {"status": "ok", "output": str(output).strip()}
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def infer(prompt: str, provider_name: str | None = None) -> dict[str, str]:
|
|
37
|
+
if not isinstance(prompt, str) or not prompt.strip():
|
|
38
|
+
return {"status": "error", "error": "empty_prompt"}
|
|
39
|
+
|
|
40
|
+
use_ollama = os.environ.get("OCMEMOG_USE_OLLAMA", "").lower() in {"1", "true", "yes"}
|
|
41
|
+
model_override = provider_name or config.OCMEMOG_MEMORY_MODEL
|
|
42
|
+
if use_ollama or model_override.startswith("ollama:"):
|
|
43
|
+
model = model_override.split(":", 1)[-1] if model_override.startswith("ollama:") else model_override
|
|
44
|
+
return _infer_ollama(prompt, model)
|
|
45
|
+
|
|
46
|
+
api_key = os.environ.get("OCMEMOG_OPENAI_API_KEY") or os.environ.get("OPENAI_API_KEY")
|
|
47
|
+
if not api_key:
|
|
48
|
+
# fall back to local ollama if configured
|
|
49
|
+
return _infer_ollama(prompt, config.OCMEMOG_OLLAMA_MODEL)
|
|
50
|
+
|
|
51
|
+
model = model_override
|
|
52
|
+
url = f"{config.OCMEMOG_OPENAI_API_BASE.rstrip('/')}/chat/completions"
|
|
53
|
+
payload = {
|
|
54
|
+
"model": model,
|
|
55
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
56
|
+
"temperature": 0.2,
|
|
57
|
+
}
|
|
58
|
+
data = json.dumps(payload).encode("utf-8")
|
|
59
|
+
req = urllib.request.Request(url, data=data, method="POST")
|
|
60
|
+
req.add_header("Authorization", f"Bearer {api_key}")
|
|
61
|
+
req.add_header("Content-Type", "application/json")
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
with urllib.request.urlopen(req, timeout=30) as resp:
|
|
65
|
+
response = json.loads(resp.read().decode("utf-8"))
|
|
66
|
+
except Exception as exc:
|
|
67
|
+
emit_event(LOGFILE, "brain_infer_error", status="error", provider="openai", error=str(exc))
|
|
68
|
+
return {"status": "error", "error": f"request_failed:{exc}"}
|
|
69
|
+
|
|
70
|
+
try:
|
|
71
|
+
output = response["choices"][0]["message"]["content"]
|
|
72
|
+
except Exception as exc:
|
|
73
|
+
emit_event(LOGFILE, "brain_infer_error", status="error", provider="openai", error=str(exc))
|
|
74
|
+
return {"status": "error", "error": "invalid_response"}
|
|
75
|
+
|
|
76
|
+
return {"status": "ok", "output": str(output).strip()}
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def parse_operator_name(text: str) -> dict[str, str] | None:
|
|
80
|
+
match = re.search(r"\bmy name is ([A-Z][a-z]+(?:\s+[A-Z][a-z]+)*)\b", text, flags=re.IGNORECASE)
|
|
81
|
+
if not match:
|
|
82
|
+
return None
|
|
83
|
+
return {"name": match.group(1)}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def emit_event(path: Path, event: str, **fields: Any) -> None:
|
|
10
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
11
|
+
payload = {
|
|
12
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
13
|
+
"event": event,
|
|
14
|
+
**fields,
|
|
15
|
+
}
|
|
16
|
+
with path.open("a", encoding="utf-8") as handle:
|
|
17
|
+
handle.write(json.dumps(payload, sort_keys=True) + "\n")
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from typing import List, Dict, Any
|
|
5
|
+
|
|
6
|
+
from brain.runtime.memory import provenance, store
|
|
7
|
+
from brain.runtime.instrumentation import emit_event
|
|
8
|
+
from brain.runtime.security import redaction
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _sanitize(text: str) -> str:
|
|
12
|
+
redacted, _ = redaction.redact_text(text)
|
|
13
|
+
return redacted
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _emit(event: str) -> None:
|
|
17
|
+
emit_event(store.state_store.reports_dir() / "brain_memory.log.jsonl", event, status="ok")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def record_event(event_type: str, payload: str, *, source: str | None = None) -> None:
|
|
21
|
+
payload = _sanitize(payload)
|
|
22
|
+
details_json = json.dumps({"payload": payload})
|
|
23
|
+
def _write() -> None:
|
|
24
|
+
conn = store.connect()
|
|
25
|
+
try:
|
|
26
|
+
conn.execute(
|
|
27
|
+
"INSERT INTO memory_events (event_type, source, details_json, schema_version) VALUES (?, ?, ?, ?)",
|
|
28
|
+
(event_type, source, details_json, store.SCHEMA_VERSION),
|
|
29
|
+
)
|
|
30
|
+
conn.commit()
|
|
31
|
+
finally:
|
|
32
|
+
conn.close()
|
|
33
|
+
|
|
34
|
+
store.submit_write(_write, timeout=30.0)
|
|
35
|
+
_emit("record_event")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def record_task(task_id: str, status: str, *, source: str | None = None) -> None:
|
|
39
|
+
status = _sanitize(status)
|
|
40
|
+
metadata_json = json.dumps({"task_id": task_id})
|
|
41
|
+
def _write() -> None:
|
|
42
|
+
conn = store.connect()
|
|
43
|
+
try:
|
|
44
|
+
conn.execute(
|
|
45
|
+
"INSERT INTO tasks (source, confidence, metadata_json, content, schema_version) VALUES (?, ?, ?, ?, ?)",
|
|
46
|
+
(source, 1.0, metadata_json, status, store.SCHEMA_VERSION),
|
|
47
|
+
)
|
|
48
|
+
conn.commit()
|
|
49
|
+
finally:
|
|
50
|
+
conn.close()
|
|
51
|
+
|
|
52
|
+
store.submit_write(_write, timeout=30.0)
|
|
53
|
+
_emit("record_task")
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def store_memory(
|
|
57
|
+
memory_type: str,
|
|
58
|
+
content: str,
|
|
59
|
+
*,
|
|
60
|
+
source: str | None = None,
|
|
61
|
+
metadata: Dict[str, Any] | None = None,
|
|
62
|
+
timestamp: str | None = None,
|
|
63
|
+
) -> int:
|
|
64
|
+
content = _sanitize(content)
|
|
65
|
+
table = memory_type.strip().lower() if memory_type else "knowledge"
|
|
66
|
+
allowed = {"knowledge", "reflections", "directives", "tasks", "runbooks", "lessons"}
|
|
67
|
+
if table not in allowed:
|
|
68
|
+
table = "knowledge"
|
|
69
|
+
normalized_metadata = provenance.normalize_metadata(metadata, source=source)
|
|
70
|
+
|
|
71
|
+
def _write() -> int:
|
|
72
|
+
conn = store.connect()
|
|
73
|
+
try:
|
|
74
|
+
if timestamp:
|
|
75
|
+
cur = conn.execute(
|
|
76
|
+
f"INSERT INTO {table} (source, confidence, metadata_json, content, schema_version, timestamp) VALUES (?, ?, ?, ?, ?, ?)",
|
|
77
|
+
(source, 1.0, json.dumps(normalized_metadata, ensure_ascii=False), content, store.SCHEMA_VERSION, timestamp),
|
|
78
|
+
)
|
|
79
|
+
else:
|
|
80
|
+
cur = conn.execute(
|
|
81
|
+
f"INSERT INTO {table} (source, confidence, metadata_json, content, schema_version) VALUES (?, ?, ?, ?, ?)",
|
|
82
|
+
(source, 1.0, json.dumps(normalized_metadata, ensure_ascii=False), content, store.SCHEMA_VERSION),
|
|
83
|
+
)
|
|
84
|
+
conn.commit()
|
|
85
|
+
return int(cur.lastrowid)
|
|
86
|
+
finally:
|
|
87
|
+
conn.close()
|
|
88
|
+
|
|
89
|
+
last_row_id = store.submit_write(_write, timeout=30.0)
|
|
90
|
+
reference = f"{table}:{last_row_id}"
|
|
91
|
+
provenance.apply_links(reference, normalized_metadata)
|
|
92
|
+
try:
|
|
93
|
+
from brain.runtime.memory import vector_index
|
|
94
|
+
|
|
95
|
+
vector_index.insert_memory(last_row_id, content, 1.0, source_type=table)
|
|
96
|
+
except Exception as exc:
|
|
97
|
+
emit_event(store.state_store.reports_dir() / "brain_memory.log.jsonl", "store_memory_index_failed", status="error", error=str(exc), memory_type=table)
|
|
98
|
+
_emit("store_memory")
|
|
99
|
+
return last_row_id
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def record_reinforcement(task_id: str, outcome: str, note: str, *, source_module: str | None = None) -> None:
|
|
103
|
+
outcome = _sanitize(outcome)
|
|
104
|
+
note = _sanitize(note)
|
|
105
|
+
def _write() -> None:
|
|
106
|
+
conn = store.connect()
|
|
107
|
+
try:
|
|
108
|
+
conn.execute(
|
|
109
|
+
"INSERT INTO experiences (task_id, outcome, reward_score, confidence, experience_type, source_module, schema_version) "
|
|
110
|
+
"VALUES (?, ?, ?, ?, ?, ?, ?)",
|
|
111
|
+
(task_id, outcome, None, 1.0, "reinforcement", source_module, store.SCHEMA_VERSION),
|
|
112
|
+
)
|
|
113
|
+
conn.execute(
|
|
114
|
+
"INSERT INTO memory_events (event_type, source, details_json, schema_version) VALUES (?, ?, ?, ?)",
|
|
115
|
+
("reinforcement_note", source_module, json.dumps({"task_id": task_id, "note": note}), store.SCHEMA_VERSION),
|
|
116
|
+
)
|
|
117
|
+
conn.commit()
|
|
118
|
+
finally:
|
|
119
|
+
conn.close()
|
|
120
|
+
|
|
121
|
+
store.submit_write(_write, timeout=30.0)
|
|
122
|
+
_emit("record_reinforcement")
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def get_recent_events(limit: int = 10) -> List[Dict[str, Any]]:
|
|
126
|
+
conn = store.connect()
|
|
127
|
+
rows = conn.execute(
|
|
128
|
+
"SELECT id, timestamp, event_type, source, details_json FROM memory_events ORDER BY id DESC LIMIT ?",
|
|
129
|
+
(limit,),
|
|
130
|
+
).fetchall()
|
|
131
|
+
conn.close()
|
|
132
|
+
return [dict(row) for row in rows]
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def get_recent_tasks(limit: int = 10) -> List[Dict[str, Any]]:
|
|
136
|
+
conn = store.connect()
|
|
137
|
+
rows = conn.execute(
|
|
138
|
+
"SELECT id, timestamp, source, confidence, metadata_json, content FROM tasks ORDER BY id DESC LIMIT ?",
|
|
139
|
+
(limit,),
|
|
140
|
+
).fetchall()
|
|
141
|
+
conn.close()
|
|
142
|
+
return [dict(row) for row in rows]
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def get_memories(limit: int = 10) -> List[Dict[str, Any]]:
|
|
146
|
+
conn = store.connect()
|
|
147
|
+
rows = conn.execute(
|
|
148
|
+
"SELECT id, timestamp, source, confidence, metadata_json, content FROM knowledge ORDER BY id DESC LIMIT ?",
|
|
149
|
+
(limit,),
|
|
150
|
+
).fetchall()
|
|
151
|
+
conn.close()
|
|
152
|
+
return [dict(row) for row in rows]
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Dict, Any
|
|
6
|
+
|
|
7
|
+
from brain.runtime import state_store
|
|
8
|
+
from brain.runtime.memory import store
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _artifact_dir() -> Path:
|
|
12
|
+
path = state_store.memory_dir() / "artifacts"
|
|
13
|
+
path.mkdir(parents=True, exist_ok=True)
|
|
14
|
+
return path
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def store_artifact(artifact_id: str, content: bytes, metadata: Dict[str, Any]) -> Path:
|
|
18
|
+
path = _artifact_dir() / f"{artifact_id}.bin"
|
|
19
|
+
path.write_bytes(content)
|
|
20
|
+
content_hash = str(hash(content))
|
|
21
|
+
conn = store.connect()
|
|
22
|
+
conn.execute(
|
|
23
|
+
"INSERT INTO artifacts (artifact_id, artifact_type, source_path, content_hash, metadata) VALUES (?, ?, ?, ?, ?)",
|
|
24
|
+
(artifact_id, metadata.get("artifact_type", "unknown"), metadata.get("source_path", ""), content_hash, json.dumps(metadata)),
|
|
25
|
+
)
|
|
26
|
+
conn.commit()
|
|
27
|
+
conn.close()
|
|
28
|
+
return path
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def load_artifact(artifact_id: str) -> bytes:
|
|
32
|
+
path = _artifact_dir() / f"{artifact_id}.bin"
|
|
33
|
+
return path.read_bytes()
|