@parkgogogo/openclaw-reflection 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/INSTALL.md CHANGED
@@ -1,78 +1,90 @@
1
- # OpenClaw Install Guide
1
+ # OpenClaw Plugin Install Skill
2
2
 
3
- This file is for OpenClaw itself to follow when installing `@parkgogogo/openclaw-reflection`.
3
+ Use this file when installing `@parkgogogo/openclaw-reflection`.
4
4
 
5
- ## Goal
5
+ ## Role
6
6
 
7
- Install the packaged plugin, enable it under the `openclaw-reflection` plugin id, provide a valid LLM config, and restart the gateway so the plugin becomes active.
7
+ Install the package, collect only missing config, write the plugin entry, validate config, and restart the gateway.
8
8
 
9
- ## Install From npm
9
+ ## Ask Only If Missing
10
10
 
11
- Use the published package, not a linked workspace checkout:
11
+ - `workspaceDir`
12
+ Recommendation: use OpenClaw's current workspace for the active profile.
13
+ Only ask if the current workspace is unknown or the operator wants a different one.
14
+ - `llm.baseURL`
15
+ - `llm.apiKey`
16
+ - `llm.model`
17
+ - whether the profile already uses `plugins.allow`
12
18
 
13
- ```bash
14
- openclaw plugins install @parkgogogo/openclaw-reflection@0.1.0
15
- ```
19
+ ## Defaults
20
+
21
+ - `bufferSize`: `50`
22
+ - `logLevel`: `info`
23
+ - `memoryGate.enabled`: `true`
24
+ - `memoryGate.windowSize`: `10`
25
+ - `consolidation.enabled`: `false`
26
+ - `consolidation.schedule`: `0 2 * * *`
27
+ - recommended model: `x-ai/grok-4.1-fast`
28
+
29
+ ## Rules
16
30
 
17
- If you are installing from a local release artifact instead:
31
+ - Install from npm, not a linked checkout.
32
+ - Write config under `plugins.entries.openclaw-reflection`.
33
+ - Do not leave `llm.baseURL`, `llm.apiKey`, or `llm.model` empty.
34
+ - Only add `openclaw-reflection` to `plugins.allow` if the profile already uses an allowlist.
35
+
36
+ ## Steps
37
+
38
+ ### 1. Install
18
39
 
19
40
  ```bash
20
- openclaw plugins install ./parkgogogo-openclaw-reflection-0.1.0.tgz
41
+ openclaw plugins install @parkgogogo/openclaw-reflection@0.1.0
21
42
  ```
22
43
 
23
- ## Write Config
44
+ ### 2. Write Config
24
45
 
25
- Add this under `plugins.entries.openclaw-reflection` in the active OpenClaw profile config:
46
+ Use the current OpenClaw workspace as `workspaceDir` unless the operator explicitly wants another workspace.
26
47
 
27
- ```json
48
+ ```jsonc
28
49
  {
29
50
  "enabled": true,
30
51
  "config": {
31
- "workspaceDir": "/absolute/path/to/agent-workspace",
52
+ "workspaceDir": "/absolute/path/to/current-openclaw-workspace",
32
53
  "bufferSize": 50,
33
54
  "logLevel": "info",
34
55
  "llm": {
35
- "baseURL": "https://api.openai.com/v1",
56
+ "baseURL": "https://openrouter.ai/api/v1",
36
57
  "apiKey": "YOUR_API_KEY",
37
- "model": "gpt-4.1-mini"
58
+ "model": "x-ai/grok-4.1-fast"
38
59
  },
39
60
  "memoryGate": {
40
61
  "enabled": true,
41
62
  "windowSize": 10
42
63
  },
43
64
  "consolidation": {
44
- "enabled": true,
65
+ "enabled": false,
45
66
  "schedule": "0 2 * * *"
46
67
  }
47
68
  }
48
69
  }
49
70
  ```
50
71
 
51
- Make sure `plugins.allow` includes `openclaw-reflection` if the profile uses an allowlist.
72
+ If the profile uses `plugins.allow`, ensure it contains:
52
73
 
53
- ## Validate And Restart
74
+ ```json
75
+ ["openclaw-reflection"]
76
+ ```
54
77
 
55
- Run:
78
+ ### 3. Validate
56
79
 
57
80
  ```bash
58
81
  openclaw config validate --json
59
- openclaw gateway run --force --verbose
60
82
  ```
61
83
 
62
- In another shell, confirm the gateway is live:
84
+ Stop if validation fails.
85
+
86
+ ### 4. Restart
63
87
 
64
88
  ```bash
65
- curl --fail http://127.0.0.1:PORT/health
89
+ openclaw gateway restart
66
90
  ```
67
-
68
- ## Expected Success Signals
69
-
70
- - The gateway starts without plugin registration errors
71
- - The plugin log contains `Plugin registered successfully, all hooks active`
72
- - `GET /health` returns JSON containing `"ok": true`
73
-
74
- ## Do Not Do This
75
-
76
- - Do not use `openclaw plugins install -l` unless you are actively developing this repository
77
- - Do not leave `llm.baseURL`, `llm.apiKey`, or `llm.model` empty
78
- - Do not configure the plugin under the old id `reflection-plugin`
package/README.md CHANGED
@@ -2,13 +2,24 @@
2
2
 
3
3
  ![OpenClaw Plugin](https://img.shields.io/badge/OpenClaw-Plugin-111111?style=flat-square)
4
4
  ![TypeScript](https://img.shields.io/badge/TypeScript-5.x-3178c6?style=flat-square)
5
- ![memoryGate 16/16](https://img.shields.io/badge/memoryGate-16%2F16%20passed-2ea043?style=flat-square)
6
- ![writer guardian 16/16](https://img.shields.io/badge/writer%20guardian-16%2F16%20passed-2ea043?style=flat-square)
5
+ ![memory_gate 18 cases](https://img.shields.io/badge/memory_gate-18%20benchmark%20cases-2ea043?style=flat-square)
6
+ ![write_guardian 14 cases](https://img.shields.io/badge/write_guardian-14%20benchmark%20cases-2ea043?style=flat-square)
7
+
8
+ Chinese version: [README.zh-CN.md](./README.zh-CN.md)
7
9
 
8
10
  **Make OpenClaw's native memory system sharper without replacing it.**
9
11
 
10
12
  OpenClaw Reflection is an additive layer on top of OpenClaw's built-in Markdown memory system. It captures message flow, keeps thread noise out of long-term memory, writes durable knowledge into the same human-readable memory files OpenClaw already uses, and periodically consolidates them so your agent gets sharper over time instead of messier.
11
13
 
14
+ ## Current Scope
15
+
16
+ Reflection currently supports:
17
+
18
+ - a single agent
19
+ - multiple sessions for that same agent
20
+
21
+ Reflection does not currently support multi-agent memory coordination or per-agent routing across multiple agents in one OpenClaw setup.
22
+
12
23
  ## Built On OpenClaw Memory
13
24
 
14
25
  OpenClaw memory is already workspace-native: the source of truth is Markdown files in the agent workspace, not a hidden database. In the official model, daily logs live under `memory/YYYY-MM-DD.md`, while `MEMORY.md` is the curated long-term layer.
@@ -19,17 +30,15 @@ Reflection builds on top of that system instead of replacing it.
19
30
  - It does **not** require replacing OpenClaw's default `memory-core`
20
31
  - It does **not** take over the active `plugins.slots.memory` role
21
32
  - It works by listening to message hooks and curating the same workspace memory files
33
+ - It analyzes and curates `USER.md`, `MEMORY.md`, `TOOLS.md`, `IDENTITY.md`, and `SOUL.md` based on conversation flow
22
34
 
23
35
  In practice, that means low migration risk and low conceptual overhead: you keep OpenClaw's native MEMORY workflow, and Reflection enhances the capture, filtering, routing, and consolidation steps around it.
24
36
 
25
37
  ## Why People Install It
26
38
 
27
- Most chat memory systems fail in one of two ways:
28
-
29
- - they forget too much, so you keep re-explaining the same context
30
- - they remember too much, so temporary thread noise pollutes long-term memory
39
+ OpenClaw's core long-term files such as `USER.md`, `TOOLS.md`, `IDENTITY.md`, and `SOUL.md` are hard to improve continuously in the default setup.
31
40
 
32
- Reflection is built to fix both.
41
+ Reflection is built to solve that.
33
42
 
34
43
  - Keep stable user preferences and collaboration habits
35
44
  - Preserve durable shared context across sessions
@@ -37,15 +46,20 @@ Reflection is built to fix both.
37
46
  - Refuse one-off tasks, active thread chatter, and misrouted writes
38
47
  - Periodically consolidate memory so it stays usable
39
48
 
49
+ ## Core Mechanism
50
+
51
+ Reflection uses LLM analysis over recent conversation context and adds two control points: `memory_gate` and `write_guardian`.
52
+
53
+ - `memory_gate` analyzes the conversation and decides which durable fact, if any, should be written and which target file it belongs to
54
+ - `write_guardian` acts as the write gate and follows OpenClaw's file responsibilities to decide whether a write should be accepted, rejected, or merged into the target file
55
+
40
56
  ## Install
41
57
 
42
58
  ### Recommended for users: install the plugin package
43
59
 
44
- OpenClaw can install plugins directly from a package source. That is the right distribution path for Reflection, because users should not need to clone the repository or run `pnpm install` just to use the plugin.
60
+ For an install script written for OpenClaw itself to follow, including which config questions to ask first, see [INSTALL.md](./INSTALL.md).
45
61
 
46
- For a step-by-step installation flow that OpenClaw can follow directly, see [INSTALL.md](./INSTALL.md).
47
-
48
- Registry install after publishing:
62
+ Install
49
63
 
50
64
  ```bash
51
65
  openclaw plugins install <npm-spec>
@@ -61,25 +75,25 @@ openclaw plugins install @parkgogogo/openclaw-reflection
61
75
 
62
76
  Put the following under `plugins.entries.openclaw-reflection` in your OpenClaw config:
63
77
 
64
- ```json
78
+ ```jsonc
65
79
  {
66
- "enabled": true,
80
+ "enabled": true, // Enable the plugin entry
67
81
  "config": {
68
- "workspaceDir": "/absolute/path/to/your-agent-workspace",
69
- "bufferSize": 50,
70
- "logLevel": "info",
82
+ "workspaceDir": "/absolute/path/to/your-agent-workspace", // Workspace where MEMORY.md, USER.md, TOOLS.md, IDENTITY.md, and SOUL.md live
83
+ "bufferSize": 50, // Session buffer size used to collect recent messages
84
+ "logLevel": "info", // Runtime log verbosity: debug, info, warn, or error
71
85
  "llm": {
72
- "baseURL": "https://api.openai.com/v1",
73
- "apiKey": "YOUR_API_KEY",
74
- "model": "gpt-4.1-mini"
86
+ "baseURL": "https://openrouter.ai/api/v1", // OpenAI-compatible provider base URL
87
+ "apiKey": "YOUR_API_KEY", // Provider API key used for analysis and writing
88
+ "model": "x-ai/grok-4.1-fast" // Recommended model for plugin runtime
75
89
  },
76
90
  "memoryGate": {
77
- "enabled": true,
78
- "windowSize": 10
91
+ "enabled": true, // Enable durable-memory filtering before any write
92
+ "windowSize": 10 // Number of recent messages included in memory_gate analysis
79
93
  },
80
94
  "consolidation": {
81
- "enabled": true,
82
- "schedule": "0 2 * * *"
95
+ "enabled": false, // Keep disabled by default; enable only if you want scheduled cleanup
96
+ "schedule": "0 2 * * *" // Cron expression used when consolidation is enabled
83
97
  }
84
98
  }
85
99
  }
@@ -96,24 +110,15 @@ Once the gateway restarts, Reflection will begin listening to `message_received`
96
110
  | A memory system you can inspect | Plain Markdown files you can open, edit, diff, and version |
97
111
  | Better continuity across sessions | Durable facts routed into the right long-term file |
98
112
  | Less memory pollution | Gatekeeping that refuses temporary or misrouted content |
99
- | A system that stays usable over time | Scheduled consolidation for existing memory files |
100
-
101
- ## Why This Beats Naive Memory
102
-
103
- | Naive memory | Reflection |
104
- | -------------------------------- | ------------------------------------------------ |
105
- | Appends whatever seems memorable | Filters for durable signal before writing |
106
- | Hides memory in a black box | Stores memory in readable Markdown files |
107
- | Mixes all facts together | Routes facts into purpose-specific files |
108
- | Lets bad writes accumulate | Adds writer guarding and scheduled consolidation |
113
+ | A system that stays usable over time | Optional scheduled consolidation for existing memory files |
109
114
 
110
115
  ## How It Works
111
116
 
112
117
  ```mermaid
113
118
  flowchart LR
114
119
  A["Incoming conversation"] --> B["Session buffer"]
115
- B --> C["memoryGate"]
116
- C -->|durable fact| D["Writer guardian"]
120
+ B --> C["memory_gate"]
121
+ C -->|durable fact| D["write_guardian"]
117
122
  C -->|thread noise| E["No write"]
118
123
  D --> F["MEMORY.md / USER.md / SOUL.md / IDENTITY.md / TOOLS.md"]
119
124
  F --> G["Scheduled consolidation"]
@@ -122,16 +127,21 @@ flowchart LR
122
127
  In practice, the pipeline is simple:
123
128
 
124
129
  1. Reflection captures conversation context from OpenClaw hooks.
125
- 2. `memoryGate` decides whether the candidate fact is durable enough to keep.
130
+ 2. `memory_gate` decides whether the candidate fact is durable enough to keep.
126
131
  3. A file-specific guardian either rewrites the target memory file or refuses the write.
127
- 4. Scheduled consolidation keeps `MEMORY.md`, `USER.md`, `SOUL.md`, and `TOOLS.md` compact over time.
132
+ 4. When enabled, scheduled consolidation keeps `MEMORY.md`, `USER.md`, `SOUL.md`, and `TOOLS.md` compact over time.
128
133
 
129
134
  ## Proof, Not Just Promises
130
135
 
131
- This repo already includes offline eval coverage for the two hardest parts of the system:
136
+ The active default offline benchmark currently includes:
137
+
138
+ - `memory_gate`: `18` benchmark cases
139
+ - `write_guardian`: `14` benchmark cases
140
+
141
+ The most recent archived result snapshots in this repo are:
132
142
 
133
- - [`memoryGate`: 16/16 passed on V2](./evals/results/2026-03-08-memory-gate-v2-16-of-16.md)
134
- - [`writer guardian`: 16/16 passed on V2](./evals/results/2026-03-08-writer-guardian-v2-16-of-16.md)
143
+ - [`memory_gate`: 16/16 passed on V2](./evals/results/2026-03-08-memory-gate-v2-16-of-16.md)
144
+ - [`write_guardian`: 16/16 passed on V2](./evals/results/2026-03-08-write-guardian-v2-16-of-16.md)
135
145
 
136
146
  These evals focus on the failure modes that make long-term memory systems unreliable:
137
147
 
@@ -163,33 +173,37 @@ These evals focus on the failure modes that make long-term memory systems unreli
163
173
  | `llm.model` | `gpt-4.1-mini` | Model used for analysis and consolidation |
164
174
  | `memoryGate.enabled` | `true` | Enable long-term memory filtering |
165
175
  | `memoryGate.windowSize` | `10` | Message window used during analysis |
166
- | `consolidation.enabled` | `true` | Enable scheduled consolidation |
176
+ | `consolidation.enabled` | `false` | Enable scheduled consolidation |
167
177
  | `consolidation.schedule` | `0 2 * * *` | Cron expression for consolidation |
168
178
 
169
179
  ## Built For
170
180
 
171
181
  - personal agents that should get better over weeks, not just one session
182
+ - single-agent OpenClaw setups with many sessions
172
183
  - teams that want memory with reviewability and version control
173
184
  - OpenClaw users who do not want a black-box memory store
174
185
  - agents that need stronger continuity without turning every chat into permanent history
175
186
 
176
187
  ## Development And Evals
177
188
 
189
+ Recommended model for real plugin use:
190
+
191
+ - `x-ai/grok-4.1-fast`
192
+
193
+ The development eval setup in this repository currently uses:
194
+
195
+ - eval model: `x-ai/grok-4.1-fast`
196
+ - judge model: `openai/gpt-5.4`
197
+
178
198
  ```bash
179
199
  pnpm run typecheck
180
200
  pnpm run eval:memory-gate
181
- pnpm run eval:writer-guardian
201
+ pnpm run eval:write-guardian
182
202
  pnpm run eval:all
183
203
  ```
184
204
 
185
205
  More eval details: [evals/README.md](./evals/README.md)
186
206
 
187
- Fast packaged-plugin regression on a reused local OpenClaw profile:
188
-
189
- ```bash
190
- pnpm run e2e:openclaw-plugin
191
- ```
192
-
193
207
  ## Links
194
208
 
195
209
  - OpenClaw plugin docs: [docs.openclaw.ai/tools/plugin](https://docs.openclaw.ai/tools/plugin)
@@ -0,0 +1,183 @@
1
+ # OpenClaw Reflection
2
+
3
+ 英文版: [README.md](./README.md)
4
+
5
+ ![OpenClaw Plugin](https://img.shields.io/badge/OpenClaw-Plugin-111111?style=flat-square)
6
+ ![TypeScript](https://img.shields.io/badge/TypeScript-5.x-3178c6?style=flat-square)
7
+ ![memory_gate 18 cases](https://img.shields.io/badge/memory_gate-18%20benchmark%20cases-2ea043?style=flat-square)
8
+ ![write_guardian 14 cases](https://img.shields.io/badge/write_guardian-14%20benchmark%20cases-2ea043?style=flat-square)
9
+
10
+ **在不替换 OpenClaw 原生记忆体系的前提下,让 Markdown 记忆更干净、更稳定、更可持续。**
11
+
12
+ OpenClaw Reflection 是叠加在 OpenClaw 原生 Markdown memory 之上的一层增强插件。它负责监听消息流,过滤线程噪音,把真正长期有效的信息写回 OpenClaw 的核心记忆文件,并定期整理这些文件,避免长期使用后越记越乱。
13
+
14
+ ## 当前支持范围
15
+
16
+ Reflection 当前支持:
17
+
18
+ - 单一 agent
19
+ - 同一个 agent 下的多 sessions
20
+
21
+ 目前还不支持多 agent 之间的记忆协调,也不支持在一个 OpenClaw 多 agent 环境里做按 agent 分流的长期记忆管理。
22
+
23
+ ## 它建立在 OpenClaw 原生 Memory 之上
24
+
25
+ OpenClaw 的 memory 本来就是 workspace-native 的:事实源头是 agent workspace 中的 Markdown 文件,而不是隐藏数据库。官方模型里,日常记录通常在 `memory/YYYY-MM-DD.md`,而 `MEMORY.md` 是长期整理层。
26
+
27
+ Reflection 的定位不是替换,而是增强:
28
+
29
+ - 不引入新的私有 memory store
30
+ - 不要求替换 OpenClaw 默认的 `memory-core`
31
+ - 不接管 `plugins.slots.memory`
32
+ - 直接围绕现有 Markdown memory 文件做捕获、过滤、路由和整理
33
+ - 根据对话,分析整理 `USER.md` `MEMORY.md` `TOOLS.md` `IDENTITY.md` `SOUL.md`
34
+
35
+ 这意味着迁移成本低、概念负担低,也更容易人工检查和版本管理。
36
+
37
+ ## 为什么要装它
38
+
39
+ Openclaw 默认状态下核心的 `USER.md` `TOOLS.md` `IDENTITY.md` `SOUL.md` 是很难自我迭代改进的
40
+
41
+ Reflection 就是为了解决这个问题:
42
+
43
+ - 保留稳定的用户偏好和协作习惯
44
+ - 沉淀跨会话仍然有价值的长期上下文
45
+ - 将长期记忆拆分到 `MEMORY.md`、`USER.md`、`SOUL.md`、`IDENTITY.md`、`TOOLS.md`
46
+ - 拒绝一次性任务、短期线程聊天、错路由内容
47
+ - 周期性整理长期记忆,防止文件持续膨胀和失真
48
+
49
+ ## 原理
50
+
51
+ 我们使用 LLM 的能力对最近的对话进行分析,设置了 `memory_gate` 和 `write_guardian` 两个工具
52
+
53
+ - `memory_gate` 通过对话分析,分析有哪些事实应该被记录到哪个文件
54
+
55
+ - `write_guardian` 设置为写入门禁,会根据 OpenClaw 官方的指引,来判断是否要写入,并进行事实整合
56
+
57
+ ## 安装
58
+
59
+ ### 推荐方式:安装打包后的插件
60
+
61
+ 更详细的安装指引见 [INSTALL.md](./INSTALL.md)。这个文件现在按“给 OpenClaw 自己执行的安装技能”来写,包含安装前应该向操作者询问哪些配置。
62
+
63
+ 手动直接安装:
64
+
65
+ ```bash
66
+ openclaw plugins install @parkgogogo/openclaw-reflection
67
+ ```
68
+
69
+ ### 添加插件配置
70
+
71
+ 把下面这段配置写到 OpenClaw profile 的 `plugins.entries.openclaw-reflection` 下:
72
+
73
+ ```jsonc
74
+ {
75
+ "enabled": true, // 启用这个插件入口
76
+ "config": {
77
+ "workspaceDir": "/absolute/path/to/your-agent-workspace", // 长期记忆文件所在的 agent workspace 目录
78
+ "bufferSize": 50, // 会话缓冲区大小,用来保留最近消息上下文
79
+ "logLevel": "info", // 运行日志级别:debug、info、warn、error
80
+ "llm": {
81
+ "baseURL": "https://openrouter.ai/api/v1", // OpenAI 兼容接口的 provider base URL
82
+ "apiKey": "YOUR_API_KEY", // 用于分析和写入决策的 provider API key
83
+ "model": "x-ai/grok-4.1-fast" // 推荐用于插件运行时的模型
84
+ },
85
+ "memoryGate": {
86
+ "enabled": true, // 启用长期记忆写入前的过滤
87
+ "windowSize": 10 // memory_gate 分析时使用的最近消息窗口大小
88
+ },
89
+ "consolidation": {
90
+ "enabled": false, // 默认禁用;只有需要定时整理时再开启
91
+ "schedule": "0 2 * * *" // 启用 consolidation 后使用的 cron 表达式
92
+ }
93
+ }
94
+ }
95
+ ```
96
+
97
+ ### 重启 OpenClaw Gateway
98
+
99
+ Gateway 重启后,Reflection 就会开始监听 `message_received` 和 `before_message_write`,并把整理后的长期信息写入你配置的 `workspaceDir`。
100
+
101
+ ## 你会得到什么
102
+
103
+ | 你想要的能力 | Reflection 提供的结果 |
104
+ | ------------------------ | ---------------------------------------------- |
105
+ | 可检查、可编辑的记忆系统 | 直接落到 Markdown 文件,能打开、diff、版本管理 |
106
+ | 更稳定的跨会话连续性 | 长期事实会被路由到正确的文件 |
107
+ | 更少的记忆污染 | 会过滤临时线程内容和错路由写入 |
108
+ | 长期使用后仍然可维护 | 可选的定期 consolidation,避免文件越来越乱 |
109
+
110
+ ## 它如何工作
111
+
112
+ ```mermaid
113
+ flowchart LR
114
+ A["Incoming conversation"] --> B["Session buffer"]
115
+ B --> C["memory_gate"]
116
+ C -->|durable fact| D["write_guardian"]
117
+ C -->|thread noise| E["No write"]
118
+ D --> F["MEMORY.md / USER.md / SOUL.md / IDENTITY.md / TOOLS.md"]
119
+ F --> G["Scheduled consolidation"]
120
+ ```
121
+
122
+ 流程很直接:
123
+
124
+ 1. Reflection 从 OpenClaw hook 中捕获会话上下文。
125
+ 2. `memory_gate` 判断候选事实是否足够长期、足够稳定。
126
+ 3. file-specific `write_guardian` 决定是否写入目标文件,并在需要时重写目标文件内容。
127
+ 4. 在启用时,`consolidation` 会定期整理长期文件,控制冗余和过时信息。
128
+
129
+ ## 评测覆盖
130
+
131
+ 我们设置了一个小型人工校验过的数据集,使用 x-ai/grok-4.1-fast 来优化 prompt,测试完善 `memory_gate` 和 `write_guardian`
132
+
133
+ 当前默认离线 benchmark 包含:
134
+
135
+ - `memory_gate`:`18` 个 benchmark case
136
+ - `write_guardian`:`14` 个 benchmark case
137
+
138
+ 仓库中最近一次归档结果快照是:
139
+
140
+ - [`memory_gate`: 16/16 passed on V2](./evals/results/2026-03-08-memory-gate-v2-16-of-16.md)
141
+ - [`write_guardian`: 16/16 passed on V2](./evals/results/2026-03-08-write-guardian-v2-16-of-16.md)
142
+
143
+ 这些评测重点覆盖:
144
+
145
+ - 拒绝当前线程噪音
146
+ - 防止用户事实写错文件
147
+ - 保持 `SOUL` 连续性规则
148
+ - 正确替换过时的 `IDENTITY` 元数据
149
+ - 让 `TOOLS.md` 只保存本地工具映射,而不是把它误当工具注册表
150
+
151
+ ## 长期记忆文件
152
+
153
+ | 文件 | 作用 |
154
+ | ------------- | ---------------------------------------------- |
155
+ | `MEMORY.md` | 持久共享上下文、关键结论、长期背景事实 |
156
+ | `USER.md` | 稳定的用户偏好、协作风格、长期有帮助的个人背景 |
157
+ | `SOUL.md` | 助手原则、边界、连续性规则 |
158
+ | `IDENTITY.md` | 显式身份元数据,例如名字、气质、形象描述 |
159
+ | `TOOLS.md` | 环境特定的工具别名、端点、设备名、本地工具映射 |
160
+
161
+ ## 开发和评测命令
162
+
163
+ 实际插件使用时,推荐模型:
164
+
165
+ - `x-ai/grok-4.1-fast`
166
+
167
+ 当前这个仓库里的开发评测配置使用的是:
168
+
169
+ - eval model: `x-ai/grok-4.1-fast`
170
+ - judge model: `openai/gpt-5.4`
171
+
172
+ ```bash
173
+ pnpm run typecheck
174
+ pnpm run eval:memory-gate
175
+ pnpm run eval:write-guardian
176
+ pnpm run eval:all
177
+ ```
178
+
179
+ 更多评测说明见 [evals/README.md](./evals/README.md)。
180
+
181
+ ## 链接
182
+
183
+ - OpenClaw plugin docs: [docs.openclaw.ai/tools/plugin](https://docs.openclaw.ai/tools/plugin)
@@ -54,7 +54,7 @@
54
54
  "properties": {
55
55
  "enabled": {
56
56
  "type": "boolean",
57
- "default": true
57
+ "default": false
58
58
  },
59
59
  "schedule": {
60
60
  "type": "string",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@parkgogogo/openclaw-reflection",
3
- "version": "0.1.0",
3
+ "version": "0.1.1",
4
4
  "description": "OpenClaw plugin that enhances native Markdown memory with filtering, curation, and consolidation",
5
5
  "type": "module",
6
6
  "main": "src/index.ts",
@@ -8,6 +8,7 @@
8
8
  "src/",
9
9
  "openclaw.plugin.json",
10
10
  "README.md",
11
+ "README.zh-CN.md",
11
12
  "INSTALL.md"
12
13
  ],
13
14
  "repository": {
@@ -24,7 +25,7 @@
24
25
  "typecheck": "tsc --noEmit",
25
26
  "e2e:openclaw-plugin": "bash scripts/e2e-openclaw-plugin.sh",
26
27
  "eval:memory-gate": "pnpm exec tsc && node evals/run.mjs --suite memory-gate",
27
- "eval:writer-guardian": "pnpm exec tsc && node evals/run.mjs --suite writer-guardian",
28
+ "eval:write-guardian": "pnpm exec tsc && node evals/run.mjs --suite write-guardian",
28
29
  "eval:all": "pnpm exec tsc && node evals/run.mjs --suite all"
29
30
  },
30
31
  "keywords": [
package/src/config.ts CHANGED
@@ -21,7 +21,7 @@ const DEFAULT_CONFIG: PluginConfig = {
21
21
  windowSize: 10,
22
22
  },
23
23
  consolidation: {
24
- enabled: true,
24
+ enabled: false,
25
25
  schedule: "0 2 * * *",
26
26
  },
27
27
  };
package/src/evals/cli.ts CHANGED
@@ -1,4 +1,4 @@
1
- export type EvalSuite = "all" | "memory-gate" | "writer-guardian";
1
+ export type EvalSuite = "all" | "memory-gate" | "write-guardian";
2
2
 
3
3
  export interface EvalCliOptions {
4
4
  suite: EvalSuite;
@@ -6,7 +6,7 @@ export interface EvalCliOptions {
6
6
  datasetRoot?: string;
7
7
  sharedDatasetPath?: string;
8
8
  memoryGateDatasetPath?: string;
9
- writerGuardianDatasetPath?: string;
9
+ writeGuardianDatasetPath?: string;
10
10
  }
11
11
 
12
12
  function getArgValue(argv: string[], flag: string): string | undefined {
@@ -23,13 +23,13 @@ function parseSuite(value: string | undefined): EvalSuite {
23
23
  if (
24
24
  suite === "all" ||
25
25
  suite === "memory-gate" ||
26
- suite === "writer-guardian"
26
+ suite === "write-guardian"
27
27
  ) {
28
28
  return suite;
29
29
  }
30
30
 
31
31
  throw new Error(
32
- `Unsupported suite: ${suite}. Expected one of: all, memory-gate, writer-guardian`
32
+ `Unsupported suite: ${suite}. Expected one of: all, memory-gate, write-guardian`
33
33
  );
34
34
  }
35
35
 
@@ -40,6 +40,6 @@ export function parseEvalCliOptions(argv: string[]): EvalCliOptions {
40
40
  datasetRoot: getArgValue(argv, "--dataset-root"),
41
41
  sharedDatasetPath: getArgValue(argv, "--shared-dataset"),
42
42
  memoryGateDatasetPath: getArgValue(argv, "--memory-gate-dataset"),
43
- writerGuardianDatasetPath: getArgValue(argv, "--writer-guardian-dataset"),
43
+ writeGuardianDatasetPath: getArgValue(argv, "--write-guardian-dataset"),
44
44
  };
45
45
  }
@@ -5,13 +5,13 @@ export interface ResolveEvalDatasetPathsInput {
5
5
  datasetRoot?: string;
6
6
  sharedDatasetPath?: string;
7
7
  memoryGateDatasetPath?: string;
8
- writerGuardianDatasetPath?: string;
8
+ writeGuardianDatasetPath?: string;
9
9
  }
10
10
 
11
11
  export interface EvalDatasetPaths {
12
12
  sharedDatasetPath: string;
13
13
  memoryGateDatasetPath: string;
14
- writerGuardianDatasetPath: string;
14
+ writeGuardianDatasetPath: string;
15
15
  }
16
16
 
17
17
  function resolvePath(rootDir: string, targetPath: string): string {
@@ -32,8 +32,8 @@ export function resolveEvalDatasetPaths(
32
32
  memoryGateDatasetPath: input.memoryGateDatasetPath
33
33
  ? resolvePath(input.rootDir, input.memoryGateDatasetPath)
34
34
  : path.join(datasetRoot, "memory-gate/benchmark.jsonl"),
35
- writerGuardianDatasetPath: input.writerGuardianDatasetPath
36
- ? resolvePath(input.rootDir, input.writerGuardianDatasetPath)
37
- : path.join(datasetRoot, "writer-guardian/benchmark.jsonl"),
35
+ writeGuardianDatasetPath: input.writeGuardianDatasetPath
36
+ ? resolvePath(input.rootDir, input.writeGuardianDatasetPath)
37
+ : path.join(datasetRoot, "write-guardian/benchmark.jsonl"),
38
38
  };
39
39
  }
@@ -2,9 +2,9 @@ import os from "node:os";
2
2
  import path from "node:path";
3
3
  import { mkdtemp, readFile, rm, writeFile } from "node:fs/promises";
4
4
 
5
- import { FileCurator } from "../file-curator/index.js";
6
5
  import { LLMService } from "../llm/service.js";
7
6
  import { MemoryGateAnalyzer } from "../memory-gate/analyzer.js";
7
+ import { WriteGuardian } from "../write-guardian/index.js";
8
8
  import type {
9
9
  AgentStep,
10
10
  LLMService as LLMServiceContract,
@@ -14,7 +14,7 @@ import type {
14
14
 
15
15
  export interface SharedScenario {
16
16
  scenario_id: string;
17
- task_type?: "memory_gate" | "writer_guardian";
17
+ task_type?: "memory_gate" | "write_guardian";
18
18
  title: string;
19
19
  recent_messages?: Array<{
20
20
  role: "user" | "agent";
@@ -39,7 +39,7 @@ export interface MemoryGateBenchmarkCase {
39
39
  tags: string[];
40
40
  }
41
41
 
42
- export interface WriterGuardianBenchmarkCase {
42
+ export interface WriteGuardianBenchmarkCase {
43
43
  scenario_id: string;
44
44
  expected_should_write: boolean;
45
45
  expected_outcome_type: string;
@@ -62,7 +62,7 @@ export interface MemoryGateCaseResult {
62
62
  error?: string;
63
63
  }
64
64
 
65
- export interface WriterGuardianCaseResult {
65
+ export interface WriteGuardianCaseResult {
66
66
  scenarioId: string;
67
67
  pass: boolean;
68
68
  shouldWritePass: boolean;
@@ -156,7 +156,7 @@ export async function evaluateMemoryGateBenchmark(input: {
156
156
  }
157
157
 
158
158
  try {
159
- logger.info("EvalRunner", "Starting memory gate case", {
159
+ logger.info("EvalRunner", "Starting memory_gate case", {
160
160
  scenarioId: benchmarkCase.scenario_id,
161
161
  expectedDecision: benchmarkCase.expected_decision,
162
162
  });
@@ -199,7 +199,7 @@ export async function evaluateMemoryGateBenchmark(input: {
199
199
  actualCandidateFact: actual.candidateFact,
200
200
  expectedCandidateFact: benchmarkCase.expected_candidate_fact,
201
201
  });
202
- logger.info("EvalRunner", "Completed memory gate case", {
202
+ logger.info("EvalRunner", "Completed memory_gate case", {
203
203
  scenarioId: benchmarkCase.scenario_id,
204
204
  pass,
205
205
  decisionPass,
@@ -220,7 +220,7 @@ export async function evaluateMemoryGateBenchmark(input: {
220
220
  expectedCandidateFact: benchmarkCase.expected_candidate_fact,
221
221
  error: reason,
222
222
  });
223
- logger.error("EvalRunner", "Memory gate case failed", {
223
+ logger.error("EvalRunner", "memory_gate case failed", {
224
224
  scenarioId: benchmarkCase.scenario_id,
225
225
  reason,
226
226
  });
@@ -236,18 +236,18 @@ export async function evaluateMemoryGateBenchmark(input: {
236
236
  };
237
237
  }
238
238
 
239
- export async function evaluateWriterGuardianBenchmark(input: {
239
+ export async function evaluateWriteGuardianBenchmark(input: {
240
240
  scenarios: SharedScenario[];
241
- benchmarkCases: WriterGuardianBenchmarkCase[];
241
+ benchmarkCases: WriteGuardianBenchmarkCase[];
242
242
  executeCase: (scenario: SharedScenario) => Promise<{
243
243
  shouldWrite: boolean;
244
244
  toolTrace: string[];
245
245
  finalContent: string;
246
246
  }>;
247
247
  logger?: Logger;
248
- }): Promise<{ summary: BenchmarkSummary; results: WriterGuardianCaseResult[] }> {
248
+ }): Promise<{ summary: BenchmarkSummary; results: WriteGuardianCaseResult[] }> {
249
249
  const scenarioMap = buildScenarioMap(input.scenarios);
250
- const results: WriterGuardianCaseResult[] = [];
250
+ const results: WriteGuardianCaseResult[] = [];
251
251
  const logger = input.logger ?? createNoopLogger();
252
252
 
253
253
  for (const benchmarkCase of input.benchmarkCases) {
@@ -261,7 +261,7 @@ export async function evaluateWriterGuardianBenchmark(input: {
261
261
  }
262
262
 
263
263
  try {
264
- logger.info("EvalRunner", "Starting writer guardian case", {
264
+ logger.info("EvalRunner", "Starting write_guardian case", {
265
265
  scenarioId: benchmarkCase.scenario_id,
266
266
  targetFile: scenario.target_file,
267
267
  expectedShouldWrite: benchmarkCase.expected_should_write,
@@ -293,7 +293,7 @@ export async function evaluateWriterGuardianBenchmark(input: {
293
293
  actualToolTrace: actual.toolTrace,
294
294
  targetFile: scenario.target_file,
295
295
  });
296
- logger.info("EvalRunner", "Completed writer guardian case", {
296
+ logger.info("EvalRunner", "Completed write_guardian case", {
297
297
  scenarioId: benchmarkCase.scenario_id,
298
298
  pass,
299
299
  shouldWritePass,
@@ -315,7 +315,7 @@ export async function evaluateWriterGuardianBenchmark(input: {
315
315
  targetFile: scenario.target_file,
316
316
  error: reason,
317
317
  });
318
- logger.error("EvalRunner", "Writer guardian case failed", {
318
+ logger.error("EvalRunner", "write_guardian case failed", {
319
319
  scenarioId: benchmarkCase.scenario_id,
320
320
  targetFile: scenario.target_file,
321
321
  reason,
@@ -341,7 +341,7 @@ export async function runMemoryGateCase(input: {
341
341
  !input.scenario.current_user_message ||
342
342
  typeof input.scenario.current_agent_reply !== "string"
343
343
  ) {
344
- throw new Error(`Memory gate scenario is missing current turn fields: ${input.scenario.scenario_id}`);
344
+ throw new Error(`memory_gate scenario is missing current turn fields: ${input.scenario.scenario_id}`);
345
345
  }
346
346
 
347
347
  const analyzer = new MemoryGateAnalyzer(
@@ -360,7 +360,7 @@ export async function runMemoryGateCase(input: {
360
360
  });
361
361
  }
362
362
 
363
- export async function runWriterGuardianCase(input: {
363
+ export async function runWriteGuardianCase(input: {
364
364
  scenario: SharedScenario;
365
365
  llmService: LLMServiceContract;
366
366
  logger?: Logger;
@@ -373,7 +373,7 @@ export async function runWriterGuardianCase(input: {
373
373
  !scenario.candidate_fact ||
374
374
  typeof scenario.current_file_content !== "string"
375
375
  ) {
376
- throw new Error(`Writer guardian scenario is missing required fields: ${scenario.scenario_id}`);
376
+ throw new Error(`write_guardian scenario is missing required fields: ${scenario.scenario_id}`);
377
377
  }
378
378
 
379
379
  const workspaceDir = await mkdtemp(path.join(os.tmpdir(), "reflection-eval-"));
@@ -396,8 +396,8 @@ export async function runWriterGuardianCase(input: {
396
396
  };
397
397
 
398
398
  try {
399
- const curator = new FileCurator({ workspaceDir }, logger, recordingService);
400
- await curator.write({
399
+ const writeGuardian = new WriteGuardian({ workspaceDir }, logger, recordingService);
400
+ await writeGuardian.write({
401
401
  decision: scenario.gate_decision,
402
402
  reason: scenario.gate_reason,
403
403
  candidateFact: scenario.candidate_fact,
package/src/index.ts CHANGED
@@ -6,12 +6,12 @@ import {
6
6
  resolveWorkspaceDir,
7
7
  } from "./config.js";
8
8
  import { ConsolidationScheduler } from "./consolidation/index.js";
9
- import { FileCurator } from "./file-curator/index.js";
10
9
  import { LLMService as SharedLLMService } from "./llm/service.js";
11
10
  import { FileLogger } from "./logger.js";
12
11
  import {
13
12
  MemoryGateAnalyzer,
14
13
  } from "./memory-gate/index.js";
14
+ import { WriteGuardian } from "./write-guardian/index.js";
15
15
  import {
16
16
  handleBeforeMessageWrite,
17
17
  handleMessageReceived,
@@ -205,24 +205,24 @@ export default function activate(api: PluginAPI): void {
205
205
  : undefined;
206
206
 
207
207
  let memoryGate: MemoryGateAnalyzer | undefined;
208
- let fileCurator: FileCurator | undefined;
208
+ let writeGuardian: WriteGuardian | undefined;
209
209
 
210
210
  if (config.memoryGate.enabled && llmService) {
211
211
  memoryGate = new MemoryGateAnalyzer(llmService, logger);
212
- logger.info("PluginLifecycle", "MemoryGateAnalyzer initialized", {
212
+ logger.info("PluginLifecycle", "memory_gate initialized", {
213
213
  model: config.llm.model,
214
214
  });
215
215
  } else {
216
- logger.info("PluginLifecycle", "MemoryGateAnalyzer disabled");
216
+ logger.info("PluginLifecycle", "memory_gate disabled");
217
217
  }
218
218
 
219
219
  if (llmService && workspaceDir) {
220
- fileCurator = new FileCurator({ workspaceDir }, logger, llmService);
221
- logger.info("PluginLifecycle", "FileCurator initialized", {
220
+ writeGuardian = new WriteGuardian({ workspaceDir }, logger, llmService);
221
+ logger.info("PluginLifecycle", "write_guardian initialized", {
222
222
  workspaceDir,
223
223
  });
224
224
  } else if (llmService) {
225
- logger.warn("PluginLifecycle", "FileCurator disabled: workspace unavailable", {
225
+ logger.warn("PluginLifecycle", "write_guardian disabled: workspace unavailable", {
226
226
  source: workspaceResolution.source,
227
227
  reason: workspaceResolution.reason,
228
228
  });
@@ -266,7 +266,7 @@ export default function activate(api: PluginAPI): void {
266
266
  logger,
267
267
  context,
268
268
  memoryGate,
269
- fileCurator,
269
+ writeGuardian,
270
270
  config.memoryGate.windowSize
271
271
  );
272
272
  } else {
@@ -65,7 +65,7 @@ export class MemoryGateAnalyzer {
65
65
  async analyze(input: MemoryGateInput): Promise<MemoryGateOutput> {
66
66
  const prompt = this.buildPrompt(input);
67
67
 
68
- this.logger.debug("MemoryGateAnalyzer", "Starting memory gate analysis", {
68
+ this.logger.debug("memory_gate", "Starting memory_gate analysis", {
69
69
  recentMessages: input.recentMessages.length,
70
70
  hasCurrentUserMessage: input.currentUserMessage.trim() !== "",
71
71
  hasCurrentAgentReply: input.currentAgentReply.trim() !== "",
@@ -85,7 +85,7 @@ export class MemoryGateAnalyzer {
85
85
  });
86
86
  } catch (error) {
87
87
  const reason = `LLM request failed: ${getErrorMessage(error)}`;
88
- this.logger.error("MemoryGateAnalyzer", "Memory gate LLM request failed", {
88
+ this.logger.error("memory_gate", "memory_gate LLM request failed", {
89
89
  reason,
90
90
  });
91
91
  return {
@@ -96,7 +96,7 @@ export class MemoryGateAnalyzer {
96
96
 
97
97
  const output = this.normalizeOutput(response);
98
98
 
99
- this.logger.info("MemoryGateAnalyzer", "Memory gate decision generated", {
99
+ this.logger.info("memory_gate", "memory_gate decision generated", {
100
100
  decision: output.decision,
101
101
  reason: output.reason,
102
102
  hasCandidateFact: Boolean(output.candidateFact),
@@ -160,7 +160,7 @@ export class MemoryGateAnalyzer {
160
160
  if (!VALID_DECISIONS.has(decision)) {
161
161
  return {
162
162
  decision: "NO_WRITE",
163
- reason: "Invalid decision returned by memory gate",
163
+ reason: "Invalid decision returned by memory_gate",
164
164
  };
165
165
  }
166
166
 
@@ -1,4 +1,4 @@
1
- export const MEMORY_GATE_SYSTEM_PROMPT = `You are the assistant's Memory Gate.
1
+ export const MEMORY_GATE_SYSTEM_PROMPT = `You are the assistant's memory_gate.
2
2
 
3
3
  After each turn, output exactly one decision:
4
4
  - NO_WRITE
@@ -1,7 +1,7 @@
1
1
  import type { SessionBufferManager } from "./session-manager.js";
2
2
  import type { Logger, ReflectionMessage } from "./types.js";
3
3
  import { MemoryGateAnalyzer, type MemoryGateOutput } from "./memory-gate/index.js";
4
- import { FileCurator } from "./file-curator/index.js";
4
+ import { WriteGuardian } from "./write-guardian/index.js";
5
5
  import { ulid } from "ulid";
6
6
 
7
7
  const DEFAULT_MEMORY_GATE_WINDOW_SIZE = 10;
@@ -528,7 +528,7 @@ async function triggerMemoryGate(
528
528
  sessionKey: string,
529
529
  bufferManager: SessionBufferManager,
530
530
  memoryGate: MemoryGateAnalyzer,
531
- fileCurator: FileCurator | undefined,
531
+ writeGuardian: WriteGuardian | undefined,
532
532
  logger: Logger,
533
533
  memoryGateWindowSize: number
534
534
  ): Promise<void> {
@@ -557,22 +557,22 @@ async function triggerMemoryGate(
557
557
 
558
558
  logger.info(
559
559
  "MessageHandler",
560
- "Memory gate decision evaluated",
560
+ "memory_gate decision evaluated",
561
561
  {
562
562
  decision: output.decision,
563
563
  reason: output.reason,
564
564
  hasCandidateFact: Boolean(output.candidateFact),
565
565
  },
566
566
  sessionKey
567
- );
567
+ );
568
568
 
569
569
  if (isUpdateDecision(output.decision)) {
570
- if (fileCurator) {
571
- const writeResult = await fileCurator.write(output);
570
+ if (writeGuardian) {
571
+ const writeResult = await writeGuardian.write(output);
572
572
  if (writeResult.status === "written") {
573
573
  logger.info(
574
574
  "MessageHandler",
575
- "Writer guardian applied update",
575
+ "write_guardian applied update",
576
576
  {
577
577
  decision: output.decision,
578
578
  },
@@ -581,7 +581,7 @@ async function triggerMemoryGate(
581
581
  } else if (writeResult.status === "refused") {
582
582
  logger.info(
583
583
  "MessageHandler",
584
- "Writer guardian refused update",
584
+ "write_guardian refused update",
585
585
  {
586
586
  decision: output.decision,
587
587
  reason: writeResult.reason,
@@ -591,7 +591,7 @@ async function triggerMemoryGate(
591
591
  } else if (writeResult.status === "failed") {
592
592
  logger.error(
593
593
  "MessageHandler",
594
- "Writer guardian failed",
594
+ "write_guardian failed",
595
595
  {
596
596
  decision: output.decision,
597
597
  reason: writeResult.reason,
@@ -601,7 +601,7 @@ async function triggerMemoryGate(
601
601
  } else {
602
602
  logger.warn(
603
603
  "MessageHandler",
604
- "Writer guardian skipped update",
604
+ "write_guardian skipped update",
605
605
  {
606
606
  decision: output.decision,
607
607
  reason: writeResult.reason,
@@ -612,7 +612,7 @@ async function triggerMemoryGate(
612
612
  } else {
613
613
  logger.warn(
614
614
  "MessageHandler",
615
- "UPDATE_* skipped because FileCurator is unavailable",
615
+ "UPDATE_* skipped because write_guardian is unavailable",
616
616
  {
617
617
  decision: output.decision,
618
618
  },
@@ -624,7 +624,7 @@ async function triggerMemoryGate(
624
624
  const reason = error instanceof Error ? error.message : String(error);
625
625
  logger.error(
626
626
  "MessageHandler",
627
- "Memory gate trigger failed",
627
+ "memory_gate trigger failed",
628
628
  { reason },
629
629
  sessionKey
630
630
  );
@@ -697,7 +697,7 @@ function handleAgentMessage(
697
697
  hookName: string,
698
698
  hookContext?: unknown,
699
699
  memoryGate?: MemoryGateAnalyzer,
700
- fileCurator?: FileCurator,
700
+ writeGuardian?: WriteGuardian,
701
701
  memoryGateWindowSize = DEFAULT_MEMORY_GATE_WINDOW_SIZE
702
702
  ): void {
703
703
  const normalizedEvent = normalizeSentEvent(event, hookContext);
@@ -771,7 +771,7 @@ function handleAgentMessage(
771
771
  sessionKey,
772
772
  bufferManager,
773
773
  memoryGate,
774
- fileCurator,
774
+ writeGuardian,
775
775
  logger,
776
776
  memoryGateWindowSize
777
777
  )
@@ -785,7 +785,7 @@ export function handleMessageSent(
785
785
  logger: Logger,
786
786
  hookContext?: unknown,
787
787
  memoryGate?: MemoryGateAnalyzer,
788
- fileCurator?: FileCurator,
788
+ writeGuardian?: WriteGuardian,
789
789
  memoryGateWindowSize = DEFAULT_MEMORY_GATE_WINDOW_SIZE
790
790
  ): void {
791
791
  handleAgentMessage(
@@ -795,7 +795,7 @@ export function handleMessageSent(
795
795
  "message:sent",
796
796
  hookContext,
797
797
  memoryGate,
798
- fileCurator,
798
+ writeGuardian,
799
799
  memoryGateWindowSize
800
800
  );
801
801
  }
@@ -806,7 +806,7 @@ export function handleBeforeMessageWrite(
806
806
  logger: Logger,
807
807
  hookContext?: unknown,
808
808
  memoryGate?: MemoryGateAnalyzer,
809
- fileCurator?: FileCurator,
809
+ writeGuardian?: WriteGuardian,
810
810
  memoryGateWindowSize = DEFAULT_MEMORY_GATE_WINDOW_SIZE
811
811
  ): void {
812
812
  const normalizedEvent = normalizeBeforeMessageWriteEvent(event, hookContext);
@@ -833,7 +833,7 @@ export function handleBeforeMessageWrite(
833
833
  "before_message_write",
834
834
  hookContext,
835
835
  memoryGate,
836
- fileCurator,
836
+ writeGuardian,
837
837
  memoryGateWindowSize
838
838
  );
839
839
  }
@@ -16,16 +16,16 @@ type CuratedFilename =
16
16
  | "IDENTITY.md"
17
17
  | "TOOLS.md";
18
18
 
19
- interface FileCuratorConfig {
19
+ interface WriteGuardianConfig {
20
20
  workspaceDir: string;
21
21
  }
22
22
 
23
- export interface FileCuratorWriteResult {
23
+ export interface WriteGuardianWriteResult {
24
24
  status: "written" | "refused" | "failed" | "skipped";
25
25
  reason?: string;
26
26
  }
27
27
 
28
- const FILE_CURATOR_SYSTEM_PROMPT = `You are the assistant's Writer Guardian.
28
+ const WRITE_GUARDIAN_SYSTEM_PROMPT = `You are the assistant's write_guardian.
29
29
 
30
30
  Your job:
31
31
  - Decide whether the candidate fact should update the target memory file
@@ -89,25 +89,25 @@ function normalizeFileContent(content: string): string {
89
89
  return normalized.endsWith("\n") ? normalized : `${normalized}\n`;
90
90
  }
91
91
 
92
- export class FileCurator {
93
- private config: FileCuratorConfig;
92
+ export class WriteGuardian {
93
+ private config: WriteGuardianConfig;
94
94
  private logger: Logger;
95
95
  private llmService: LLMService;
96
96
 
97
- constructor(config: FileCuratorConfig, logger: Logger, llmService: LLMService) {
97
+ constructor(config: WriteGuardianConfig, logger: Logger, llmService: LLMService) {
98
98
  this.config = config;
99
99
  this.logger = logger;
100
100
  this.llmService = llmService;
101
101
  }
102
102
 
103
- async write(output: MemoryGateOutput): Promise<FileCuratorWriteResult> {
103
+ async write(output: MemoryGateOutput): Promise<WriteGuardianWriteResult> {
104
104
  if (!isUpdateDecision(output.decision)) {
105
105
  return { status: "skipped", reason: "not an update decision" };
106
106
  }
107
107
 
108
108
  const candidateFact = output.candidateFact?.trim();
109
109
  if (!candidateFact) {
110
- this.logger.warn("FileCurator", "Skip UPDATE_* without candidate fact", {
110
+ this.logger.warn("WriteGuardian", "Skip UPDATE_* without candidate fact", {
111
111
  decision: output.decision,
112
112
  reason: output.reason,
113
113
  });
@@ -121,9 +121,9 @@ export class FileCurator {
121
121
 
122
122
  try {
123
123
  const result = await this.llmService.runAgent({
124
- systemPrompt: FILE_CURATOR_SYSTEM_PROMPT,
124
+ systemPrompt: WRITE_GUARDIAN_SYSTEM_PROMPT,
125
125
  userPrompt: [
126
- `Memory Gate decision: ${output.decision}`,
126
+ `memory_gate decision: ${output.decision}`,
127
127
  `Reason from gate: ${output.reason}`,
128
128
  `Candidate fact: ${candidateFact}`,
129
129
  `Target file: ${targetFile}`,
@@ -135,8 +135,8 @@ export class FileCurator {
135
135
  });
136
136
 
137
137
  if (!result.didWrite) {
138
- const reason = result.finalMessage ?? "Writer guardian finished without write";
139
- this.logger.info("FileCurator", "Guardian refused update", {
138
+ const reason = result.finalMessage ?? "write_guardian finished without write";
139
+ this.logger.info("WriteGuardian", "write_guardian refused update", {
140
140
  decision: output.decision,
141
141
  filePath,
142
142
  reason,
@@ -144,14 +144,14 @@ export class FileCurator {
144
144
  return { status: "refused", reason };
145
145
  }
146
146
 
147
- this.logger.info("FileCurator", "Writer guardian rewrote target file", {
147
+ this.logger.info("WriteGuardian", "write_guardian rewrote target file", {
148
148
  decision: output.decision,
149
149
  filePath,
150
150
  });
151
151
  return { status: "written" };
152
152
  } catch (error) {
153
153
  const reason = getErrorMessage(error);
154
- this.logger.error("FileCurator", "Writer guardian execution failed", {
154
+ this.logger.error("WriteGuardian", "write_guardian execution failed", {
155
155
  decision: output.decision,
156
156
  filePath,
157
157
  reason,