@joshuaswarren/openclaw-engram 9.3.16 → 9.3.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/openclaw.plugin.json +160 -82
  2. package/package.json +3 -3
@@ -1,9 +1,23 @@
1
1
  {
2
2
  "id": "openclaw-engram",
3
3
  "name": "Remnic OpenClaw Plugin",
4
- "version": "9.3.16",
4
+ "version": "9.3.18",
5
5
  "kind": "memory",
6
- "description": "Local semantic memory for OpenClaw. Requires plugins.slots.memory set to this plugin id for hooks to fire.",
6
+ "description": "Local semantic memory for OpenClaw with bundled Remnic core runtime. Requires plugins.slots.memory set to this plugin id for hooks to fire.",
7
+ "setup": {
8
+ "providers": [
9
+ {
10
+ "id": "openai",
11
+ "authMethods": [
12
+ "api-key"
13
+ ],
14
+ "envVars": [
15
+ "OPENAI_API_KEY"
16
+ ]
17
+ }
18
+ ],
19
+ "requiresRuntime": false
20
+ },
7
21
  "providerAuthEnvVars": {
8
22
  "openai": [
9
23
  "OPENAI_API_KEY"
@@ -14,19 +28,39 @@
14
28
  "provider": "openai",
15
29
  "method": "api-key",
16
30
  "choiceId": "remnic-openai-api-key",
17
- "choiceLabel": "OpenAI API key for Remnic memory extraction",
18
- "choiceHint": "Remnic sends memory extraction, consolidation, and embedding requests to OpenAI or the configured OpenAI-compatible endpoint unless you route those tasks through OpenClaw gateway/local LLM settings.",
31
+ "choiceLabel": "Optional OpenAI API key for Remnic plugin-mode extraction",
32
+ "choiceHint": "Not needed when Remnic uses the OpenClaw gateway model source. Set only if you intentionally use plugin mode with OpenAI or an OpenAI-compatible endpoint.",
19
33
  "groupId": "remnic-memory",
20
34
  "groupLabel": "Remnic memory",
21
35
  "optionKey": "openaiApiKey",
22
36
  "cliFlag": "--openai-api-key",
23
37
  "cliOption": "--openai-api-key <key>",
24
- "cliDescription": "OpenAI API key used by Remnic memory extraction, consolidation, and embedding flows.",
38
+ "cliDescription": "Optional OpenAI API key used by Remnic plugin-mode extraction, consolidation, and embedding flows.",
25
39
  "onboardingScopes": [
26
40
  "text-inference"
27
41
  ]
28
42
  }
29
43
  ],
44
+ "securityDisclosure": {
45
+ "conversationAccess": "As a memory-slot plugin, Remnic hooks can observe conversation turns, tool-use metadata, LLM output metadata, and local memory files so they can be indexed, summarized, recalled, and exposed through memory tools.",
46
+ "modelProviderCredentials": {
47
+ "recommendedMode": "Use modelSource=gateway to route LLM-backed memory work through OpenClaw gateway agents instead of a Remnic-owned API key.",
48
+ "dynamicCredentialSources": [
49
+ "OpenClaw runtime auth resolver for configured model providers",
50
+ "~/.openclaw/agents/main/agent/models.json provider entries",
51
+ "provider-specific environment variables such as <PROVIDER>_API_KEY and <PROVIDER>_TOKEN"
52
+ ],
53
+ "providerModeData": [
54
+ "conversation excerpts",
55
+ "memory excerpts",
56
+ "summaries",
57
+ "embedding inputs",
58
+ "active-recall query inputs",
59
+ "benchmark and evaluation inputs"
60
+ ],
61
+ "externalProviderGuidance": "Do not set openaiApiKey or provider environment variables for Remnic if all LLM-backed memory operations should stay on the OpenClaw gateway path. When plugin/provider mode is configured, selected conversation and memory excerpts may be sent to the configured provider for extraction, consolidation, summarization, embeddings, active recall, or benchmark judging."
62
+ }
63
+ },
30
64
  "supports": {
31
65
  "memorySlot": true,
32
66
  "dreamingSlot": true,
@@ -36,6 +70,34 @@
36
70
  "beforeReset": true
37
71
  },
38
72
  "contracts": {
73
+ "commands": [
74
+ "remnic"
75
+ ],
76
+ "hooks": [
77
+ "before_prompt_build",
78
+ "before_agent_start",
79
+ "agent_end",
80
+ "before_compaction",
81
+ "after_compaction",
82
+ "before_reset",
83
+ "session_start",
84
+ "session_end",
85
+ "before_tool_call",
86
+ "after_tool_call",
87
+ "llm_output",
88
+ "subagent_spawning",
89
+ "subagent_ended",
90
+ "commands.list"
91
+ ],
92
+ "memoryCapabilities": [
93
+ "openclaw-remnic"
94
+ ],
95
+ "memoryPromptSections": [
96
+ "engram-memory"
97
+ ],
98
+ "services": [
99
+ "openclaw-remnic"
100
+ ],
39
101
  "tools": [
40
102
  "compounding_promote_candidate",
41
103
  "compounding_weekly_synthesize",
@@ -93,11 +155,11 @@
93
155
  "properties": {
94
156
  "openaiApiKey": {
95
157
  "type": "string",
96
- "description": "OpenAI API key (or set OPENAI_API_KEY env var). Remnic may send conversation and memory content to OpenAI or the configured OpenAI-compatible endpoint for extraction, consolidation, summarization, and embeddings."
158
+ "description": "Optional OpenAI API key for plugin mode (or set OPENAI_API_KEY env var). Ignored by default gateway-mode installs; in plugin mode, Remnic may send conversation and memory content to OpenAI or the configured OpenAI-compatible endpoint for extraction, consolidation, summarization, and embeddings."
97
159
  },
98
160
  "openaiBaseUrl": {
99
161
  "type": "string",
100
- "description": "Override the OpenAI API base URL for OpenAI-compatible providers (Scryr, Together, OpenRouter, etc.)"
162
+ "description": "Set the OpenAI API base URL for OpenAI-compatible providers (Scryr, Together, OpenRouter, etc.)"
101
163
  },
102
164
  "model": {
103
165
  "type": "string",
@@ -180,7 +242,7 @@
180
242
  "maxMemoryTokens": {
181
243
  "type": "number",
182
244
  "default": 2000,
183
- "description": "Max tokens injected per turn"
245
+ "description": "Max memory-context tokens per turn"
184
246
  },
185
247
  "memoryOsPreset": {
186
248
  "type": "string",
@@ -191,7 +253,7 @@
191
253
  "research-max",
192
254
  "local-llm-heavy"
193
255
  ],
194
- "description": "Optional named preset that seeds Engram's advanced config surface before explicit per-setting overrides are applied. `research` is accepted as a backward-compatible alias for `research-max`."
256
+ "description": "Optional named preset that seeds Engram's advanced config surface before explicit per-setting settings are applied. `research` is accepted as a backward-compatible alias for `research-max`."
195
257
  },
196
258
  "qmdEnabled": {
197
259
  "type": "boolean",
@@ -283,7 +345,7 @@
283
345
  },
284
346
  "entitySchemas": {
285
347
  "type": "object",
286
- "description": "Optional per-entity-type structured section schema overrides.",
348
+ "description": "Optional per-entity-type structured section schema customizations.",
287
349
  "additionalProperties": {
288
350
  "type": "object",
289
351
  "additionalProperties": false,
@@ -624,7 +686,7 @@
624
686
  "minimum": 1,
625
687
  "maximum": 10,
626
688
  "default": 2,
627
- "description": "Maximum procedure memories to inject on task-initiation recall."
689
+ "description": "Maximum procedure memories to add on task-initiation recall."
628
690
  },
629
691
  "proceduralMiningCronAutoRegister": {
630
692
  "type": "boolean",
@@ -712,7 +774,7 @@
712
774
  "recallTranscriptsEnabled": {
713
775
  "type": "boolean",
714
776
  "default": false,
715
- "description": "Write JSONL recall audit transcripts for runtime-surface injections."
777
+ "description": "Write JSONL recall audit transcripts for runtime recall-context assembly."
716
778
  },
717
779
  "recallTranscriptRetentionDays": {
718
780
  "type": "integer",
@@ -729,7 +791,7 @@
729
791
  "activeRecallEnabled": {
730
792
  "type": "boolean",
731
793
  "default": false,
732
- "description": "Enable the OpenClaw active-recall prompt surface."
794
+ "description": "Enable the OpenClaw active-recall context surface."
733
795
  },
734
796
  "activeRecallAgents": {
735
797
  "type": "array",
@@ -776,15 +838,15 @@
776
838
  "preference-only"
777
839
  ],
778
840
  "default": "balanced",
779
- "description": "Prompt assembly style for the active-recall surface."
841
+ "description": "Context assembly style for the active-recall surface."
780
842
  },
781
- "activeRecallPromptOverride": {
843
+ "activeRecallCustomInstruction": {
782
844
  "type": "string",
783
- "description": "Optional full prompt override for the active-recall surface."
845
+ "description": "Optional custom guidance for the active-recall builder."
784
846
  },
785
847
  "activeRecallPromptAppend": {
786
848
  "type": "string",
787
- "description": "Optional prompt suffix appended to the active-recall system prompt."
849
+ "description": "Optional additional guidance for the active-recall builder."
788
850
  },
789
851
  "activeRecallMaxSummaryChars": {
790
852
  "type": "integer",
@@ -850,7 +912,7 @@
850
912
  },
851
913
  "activeRecallModel": {
852
914
  "type": "string",
853
- "description": "Optional model override for active recall."
915
+ "description": "Optional model selection for active recall."
854
916
  },
855
917
  "activeRecallModelFallbackPolicy": {
856
918
  "type": "string",
@@ -1166,7 +1228,7 @@
1166
1228
  "memoryExtensionsEnabled": {
1167
1229
  "type": "boolean",
1168
1230
  "default": true,
1169
- "description": "Whether third-party memory extensions are discovered and injected into consolidation prompts."
1231
+ "description": "Whether third-party memory extensions are discovered and included in consolidation instructions."
1170
1232
  },
1171
1233
  "memoryExtensionsRoot": {
1172
1234
  "type": "string",
@@ -1250,12 +1312,12 @@
1250
1312
  "full"
1251
1313
  ],
1252
1314
  "default": "recovery_only",
1253
- "description": "Controls when identity continuity context is injected into recall assembly"
1315
+ "description": "Controls when identity continuity context is added to recall assembly"
1254
1316
  },
1255
1317
  "identityMaxInjectChars": {
1256
1318
  "type": "number",
1257
1319
  "default": 1200,
1258
- "description": "Maximum characters allowed for identity continuity context injection"
1320
+ "description": "Maximum characters allowed for identity continuity context"
1259
1321
  },
1260
1322
  "continuityIncidentLoggingEnabled": {
1261
1323
  "type": "boolean",
@@ -1320,7 +1382,7 @@
1320
1382
  "injectQuestions": {
1321
1383
  "type": "boolean",
1322
1384
  "default": false,
1323
- "description": "Inject the most relevant open question into the system prompt"
1385
+ "description": "Include the most relevant open question in generated memory context"
1324
1386
  },
1325
1387
  "commitmentDecayDays": {
1326
1388
  "type": "number",
@@ -1456,12 +1518,12 @@
1456
1518
  "maxResults": {
1457
1519
  "type": "number",
1458
1520
  "default": 4,
1459
- "description": "Maximum native knowledge chunks to inject into recall."
1521
+ "description": "Maximum native knowledge chunks to add to recall."
1460
1522
  },
1461
1523
  "maxChars": {
1462
1524
  "type": "number",
1463
1525
  "default": 2400,
1464
- "description": "Maximum total characters to inject from native knowledge recall."
1526
+ "description": "Maximum total characters to add from native knowledge recall."
1465
1527
  },
1466
1528
  "stateDir": {
1467
1529
  "type": "string",
@@ -1735,7 +1797,7 @@
1735
1797
  "recordEmptyRecallImpressions": {
1736
1798
  "type": "boolean",
1737
1799
  "default": false,
1738
- "description": "Record recall impressions with empty memoryIds when no memory context is injected."
1800
+ "description": "Record recall impressions with empty memoryIds when no memory context is added."
1739
1801
  },
1740
1802
  "recallPlannerEnabled": {
1741
1803
  "type": "boolean",
@@ -1810,7 +1872,7 @@
1810
1872
  "verbatimArtifactsMaxRecall": {
1811
1873
  "type": "number",
1812
1874
  "default": 5,
1813
- "description": "Maximum artifact anchors injected per recall."
1875
+ "description": "Maximum artifact anchors added per recall."
1814
1876
  },
1815
1877
  "verbatimArtifactCategories": {
1816
1878
  "type": "array",
@@ -1876,7 +1938,7 @@
1876
1938
  "boxRecallDays": {
1877
1939
  "type": "number",
1878
1940
  "default": 3,
1879
- "description": "Number of recent days of boxes to inject during recall."
1941
+ "description": "Number of recent days of boxes to add during recall."
1880
1942
  },
1881
1943
  "episodeNoteModeEnabled": {
1882
1944
  "type": "boolean",
@@ -1941,7 +2003,7 @@
1941
2003
  "tagRecallMaxMatches": {
1942
2004
  "type": "number",
1943
2005
  "default": 10,
1944
- "description": "Maximum number of tag-matched memories injected per recall."
2006
+ "description": "Maximum number of tag-matched memories added per recall."
1945
2007
  },
1946
2008
  "multiGraphMemoryEnabled": {
1947
2009
  "type": "boolean",
@@ -1971,7 +2033,7 @@
1971
2033
  "graphRecallShadowEnabled": {
1972
2034
  "type": "boolean",
1973
2035
  "default": false,
1974
- "description": "Run graph recall in shadow mode evaluate but do not inject results."
2036
+ "description": "Run graph recall in shadow mode - evaluate but do not add results."
1975
2037
  },
1976
2038
  "graphRecallSnapshotEnabled": {
1977
2039
  "type": "boolean",
@@ -2085,12 +2147,12 @@
2085
2147
  "graphRecallEntityHintMax": {
2086
2148
  "type": "number",
2087
2149
  "default": 3,
2088
- "description": "Maximum number of entity hints injected per graph recall result."
2150
+ "description": "Maximum number of entity hints added per graph recall result."
2089
2151
  },
2090
2152
  "graphRecallEntityHintMaxChars": {
2091
2153
  "type": "number",
2092
2154
  "default": 200,
2093
- "description": "Maximum characters per entity hint injected during graph recall."
2155
+ "description": "Maximum characters per entity hint added during graph recall."
2094
2156
  },
2095
2157
  "graphRecallSnapshotDir": {
2096
2158
  "type": "string",
@@ -2119,7 +2181,7 @@
2119
2181
  "graphAssistShadowEvalEnabled": {
2120
2182
  "type": "boolean",
2121
2183
  "default": false,
2122
- "description": "In full mode, compute graph assist for comparison telemetry and snapshots but keep injected recall output baseline-identical."
2184
+ "description": "In full mode, compute graph assist for comparison telemetry and snapshots but keep recall output baseline-identical."
2123
2185
  },
2124
2186
  "graphAssistMinSeedResults": {
2125
2187
  "type": "number",
@@ -2197,12 +2259,12 @@
2197
2259
  "recallConfidenceGateEnabled": {
2198
2260
  "type": "boolean",
2199
2261
  "default": false,
2200
- "description": "Synapse-inspired confidence gate: skip memory injection when top recall score is below threshold"
2262
+ "description": "Synapse-inspired confidence gate: skip memory context when top recall score is below threshold"
2201
2263
  },
2202
2264
  "recallConfidenceGateThreshold": {
2203
2265
  "type": "number",
2204
2266
  "default": 0.12,
2205
- "description": "Minimum top recall score to inject memories (0-1). Below this, memories are rejected as too uncertain"
2267
+ "description": "Minimum top recall score to include memories (0-1). Below this, memories are rejected as too uncertain"
2206
2268
  },
2207
2269
  "causalRuleExtractionEnabled": {
2208
2270
  "type": "boolean",
@@ -2808,12 +2870,12 @@
2808
2870
  "maxTranscriptTurns": {
2809
2871
  "type": "number",
2810
2872
  "default": 50,
2811
- "description": "Maximum transcript turns to inject"
2873
+ "description": "Maximum transcript turns to include"
2812
2874
  },
2813
2875
  "maxTranscriptTokens": {
2814
2876
  "type": "number",
2815
2877
  "default": 1000,
2816
- "description": "Maximum tokens for transcript injection"
2878
+ "description": "Maximum tokens for transcript context"
2817
2879
  },
2818
2880
  "checkpointEnabled": {
2819
2881
  "type": "boolean",
@@ -2828,7 +2890,7 @@
2828
2890
  "compactionResetEnabled": {
2829
2891
  "type": "boolean",
2830
2892
  "default": false,
2831
- "description": "Trigger session reset after compaction with BOOT.md injection (requires OC fork with api.resetSession)"
2893
+ "description": "Trigger session reset after compaction with BOOT.md recovery context (requires OC fork with api.resetSession)"
2832
2894
  },
2833
2895
  "hourlySummariesEnabled": {
2834
2896
  "type": "boolean",
@@ -2848,7 +2910,7 @@
2848
2910
  "maxSummaryCount": {
2849
2911
  "type": "number",
2850
2912
  "default": 6,
2851
- "description": "Maximum number of summaries to inject"
2913
+ "description": "Maximum number of summaries to include"
2852
2914
  },
2853
2915
  "summaryModel": {
2854
2916
  "type": "string",
@@ -2860,8 +2922,8 @@
2860
2922
  "plugin",
2861
2923
  "gateway"
2862
2924
  ],
2863
- "default": "plugin",
2864
- "description": "LLM source: 'plugin' uses Engram's own openai/localLlm config; 'gateway' delegates to a gateway agent's model chain (agents.list[])."
2925
+ "default": "gateway",
2926
+ "description": "LLM source: 'gateway' delegates to a gateway agent's model chain (agents.list[]); 'plugin' uses Engram's own openai/localLlm config."
2865
2927
  },
2866
2928
  "gatewayAgentId": {
2867
2929
  "type": "string",
@@ -2939,7 +3001,7 @@
2939
3001
  "traceRecallContent": {
2940
3002
  "type": "boolean",
2941
3003
  "default": false,
2942
- "description": "If true, include the full recalled memory text in RecallTraceEvent.recalledContent emitted to __openclawEngramTrace subscribers (e.g. Langfuse). Disabled by default only enable when you want external trace collectors to capture injected memory context."
3004
+ "description": "If true, include the full recalled memory text in RecallTraceEvent.recalledContent emitted to __openclawEngramTrace subscribers (e.g. Langfuse). Disabled by default - only enable when you want external trace collectors to capture memory context."
2943
3005
  },
2944
3006
  "profilingEnabled": {
2945
3007
  "type": "boolean",
@@ -3156,7 +3218,7 @@
3156
3218
  "localLlmDisableThinking": {
3157
3219
  "type": "boolean",
3158
3220
  "default": true,
3159
- "description": "When true (default), request chain-of-thought / thinking-mode suppression on the main local LLM (issue #548). The `chat_template_kwargs: { enable_thinking: false }` field is only injected when the detected backend is known to support it (LM Studio, vLLM); strict OpenAI-compat backends fail open to avoid the 400-cooldown path. Structured-output tasks like extraction and consolidation gain nothing from reasoning tokens and thinking-capable models (Qwen 3.5, Gemma 4, DeepSeek) often blow the 60s timeout before emitting content. Set to false to restore thinking for narrative tasks. The fast-tier client always disables thinking and is not affected by this flag."
3221
+ "description": "When true (default), request chain-of-thought / thinking-mode suppression on the main local LLM (issue #548). The `chat_template_kwargs: { enable_thinking: false }` field is sent only when the detected backend is known to support it (LM Studio, vLLM); strict OpenAI-compat backends fail open to avoid the 400-cooldown path. Structured-output tasks like extraction and consolidation gain nothing from reasoning tokens and thinking-capable models (Qwen 3.5, Gemma 4, DeepSeek) often blow the 60s timeout before emitting content. Set to false to restore thinking for narrative tasks. The fast-tier client always disables thinking and is not affected by this flag."
3160
3222
  },
3161
3223
  "hourlySummaryCronAutoRegister": {
3162
3224
  "type": "boolean",
@@ -3270,12 +3332,12 @@
3270
3332
  "conversationRecallTopK": {
3271
3333
  "type": "number",
3272
3334
  "default": 3,
3273
- "description": "Top-K conversation chunks to inject."
3335
+ "description": "Top-K conversation chunks to include."
3274
3336
  },
3275
3337
  "conversationRecallMaxChars": {
3276
3338
  "type": "number",
3277
3339
  "default": 2500,
3278
- "description": "Max characters of semantic conversation recall to inject."
3340
+ "description": "Max characters of semantic conversation recall to include."
3279
3341
  },
3280
3342
  "conversationRecallTimeoutMs": {
3281
3343
  "type": "number",
@@ -3324,7 +3386,7 @@
3324
3386
  "objectiveStateRecallEnabled": {
3325
3387
  "type": "boolean",
3326
3388
  "default": false,
3327
- "description": "Inject prompt-relevant objective-state snapshots into recall context."
3389
+ "description": "Add recall-relevant objective-state snapshots to recall context."
3328
3390
  },
3329
3391
  "objectiveStateStoreDir": {
3330
3392
  "type": "string",
@@ -3342,7 +3404,7 @@
3342
3404
  "causalTrajectoryRecallEnabled": {
3343
3405
  "type": "boolean",
3344
3406
  "default": false,
3345
- "description": "Inject prompt-relevant causal trajectories into recall context."
3407
+ "description": "Add recall-relevant causal trajectories to recall context."
3346
3408
  },
3347
3409
  "trustZonesEnabled": {
3348
3410
  "type": "boolean",
@@ -3361,7 +3423,7 @@
3361
3423
  "trustZoneRecallEnabled": {
3362
3424
  "type": "boolean",
3363
3425
  "default": false,
3364
- "description": "Inject prompt-relevant working and trusted trust-zone records into recall context."
3426
+ "description": "Add recall-relevant working and trusted trust-zone records to recall context."
3365
3427
  },
3366
3428
  "memoryPoisoningDefenseEnabled": {
3367
3429
  "type": "boolean",
@@ -3376,7 +3438,7 @@
3376
3438
  "harmonicRetrievalEnabled": {
3377
3439
  "type": "boolean",
3378
3440
  "default": false,
3379
- "description": "Enable harmonic retrieval blending over abstraction nodes and cue anchors, including recall-section injection and harmonic-search diagnostics."
3441
+ "description": "Enable harmonic retrieval blending over abstraction nodes and cue anchors, including recall-section assembly and harmonic-search diagnostics."
3380
3442
  },
3381
3443
  "abstractionAnchorsEnabled": {
3382
3444
  "type": "boolean",
@@ -3390,7 +3452,7 @@
3390
3452
  "verifiedRecallEnabled": {
3391
3453
  "type": "boolean",
3392
3454
  "default": false,
3393
- "description": "Inject prompt-relevant memory boxes only when their cited source memories verify as non-archived episodes."
3455
+ "description": "Add recall-relevant memory boxes only when their cited source memories verify as non-archived episodes."
3394
3456
  },
3395
3457
  "semanticRulePromotionEnabled": {
3396
3458
  "type": "boolean",
@@ -3447,7 +3509,7 @@
3447
3509
  "operatorAwareConsolidationEnabled": {
3448
3510
  "type": "boolean",
3449
3511
  "default": false,
3450
- "description": "Opt in to operator-aware consolidation prompts so the LLM returns structured {operator, output} JSON and SPLIT/MERGE/UPDATE is recorded on derived_via. When disabled (default), derived_via still populates via the cluster-shape heuristic."
3512
+ "description": "Opt in to operator-aware consolidation instructions so the LLM returns structured {operator, output} JSON and SPLIT/MERGE/UPDATE is recorded on derived_via. When disabled (default), derived_via still populates via the cluster-shape heuristic."
3451
3513
  },
3452
3514
  "peerProfileReasonerEnabled": {
3453
3515
  "type": "boolean",
@@ -3474,13 +3536,13 @@
3474
3536
  "peerProfileRecallEnabled": {
3475
3537
  "type": "boolean",
3476
3538
  "default": false,
3477
- "description": "When true, inject the active peer's profile fields into the recall context as a '## Peer Profile' section (issue #679 PR 3/5). Requires the session's peer ID to be registered before recall. Default off (opt-in)."
3539
+ "description": "When true, add the active peer's profile fields to the recall context as a '## Peer Profile' section (issue #679 PR 3/5). Requires the session's peer ID to be registered before recall. Default off (opt-in)."
3478
3540
  },
3479
3541
  "peerProfileRecallMaxFields": {
3480
3542
  "type": "number",
3481
3543
  "default": 5,
3482
3544
  "minimum": 0,
3483
- "description": "Maximum number of peer profile fields to inject per recall call. Only the most-recently-updated N fields are included. Set to 0 to disable field injection even when peerProfileRecallEnabled is true."
3545
+ "description": "Maximum number of peer profile fields to add per recall call. Only the most-recently-updated N fields are included. Set to 0 to disable field inclusion even when peerProfileRecallEnabled is true."
3484
3546
  },
3485
3547
  "creationMemoryEnabled": {
3486
3548
  "type": "boolean",
@@ -3528,7 +3590,7 @@
3528
3590
  "workProductRecallEnabled": {
3529
3591
  "type": "boolean",
3530
3592
  "default": false,
3531
- "description": "Inject prompt-relevant work-product ledger entries into recall context and expose artifact-recovery search tooling."
3593
+ "description": "Add recall-relevant work-product ledger entries to recall context and expose artifact-recovery search tooling."
3532
3594
  },
3533
3595
  "workProductLedgerDir": {
3534
3596
  "type": "string",
@@ -3768,7 +3830,7 @@
3768
3830
  "sharedContextEnabled": {
3769
3831
  "type": "boolean",
3770
3832
  "default": false,
3771
- "description": "Enable shared-context injection + tools (v4.0). Default off."
3833
+ "description": "Enable shared-context assembly and tools (v4.0). Default off."
3772
3834
  },
3773
3835
  "sharedContextDir": {
3774
3836
  "type": "string",
@@ -3777,7 +3839,7 @@
3777
3839
  "sharedContextMaxInjectChars": {
3778
3840
  "type": "number",
3779
3841
  "default": 4000,
3780
- "description": "Max characters of shared-context to inject into each prompt."
3842
+ "description": "Max characters of shared-context to include in each recall context."
3781
3843
  },
3782
3844
  "crossSignalsSemanticEnabled": {
3783
3845
  "type": "boolean",
@@ -4050,17 +4112,17 @@
4050
4112
  "calibrationEnabled": {
4051
4113
  "type": "boolean",
4052
4114
  "default": false,
4053
- "description": "Enable recall calibration rules injection (v9.0, default off)."
4115
+ "description": "Enable recall calibration rules (v9.0, default off)."
4054
4116
  },
4055
4117
  "calibrationMaxRulesPerRecall": {
4056
4118
  "type": "number",
4057
4119
  "default": 10,
4058
- "description": "Maximum number of calibration rules to inject per recall pass."
4120
+ "description": "Maximum number of calibration rules to include per recall pass."
4059
4121
  },
4060
4122
  "calibrationMaxChars": {
4061
4123
  "type": "number",
4062
4124
  "default": 1200,
4063
- "description": "Maximum characters of calibration content to inject per recall pass."
4125
+ "description": "Maximum characters of calibration content to include per recall pass."
4064
4126
  },
4065
4127
  "lancedbEnabled": {
4066
4128
  "type": "boolean",
@@ -4225,7 +4287,7 @@
4225
4287
  },
4226
4288
  "recallBudgetChars": {
4227
4289
  "type": "number",
4228
- "description": "Hard character cap for total recall injection. Defaults to maxMemoryTokens * 4."
4290
+ "description": "Hard character cap for total recall context. Defaults to maxMemoryTokens * 4."
4229
4291
  },
4230
4292
  "recallOuterTimeoutMs": {
4231
4293
  "type": "number",
@@ -4245,7 +4307,7 @@
4245
4307
  "recallMmrEnabled": {
4246
4308
  "type": "boolean",
4247
4309
  "default": true,
4248
- "description": "Apply Maximal Marginal Relevance to the final recall selection per-section so one redundant cluster cannot dominate the injected context."
4310
+ "description": "Apply Maximal Marginal Relevance to the final recall selection per-section so one redundant cluster cannot dominate the included context."
4249
4311
  },
4250
4312
  "recallMmrLambda": {
4251
4313
  "type": "number",
@@ -4386,7 +4448,7 @@
4386
4448
  "messagePartsRecallMaxResults": {
4387
4449
  "type": "number",
4388
4450
  "default": 6,
4389
- "description": "Maximum structured message-part matches to inject into recall when messagePartsEnabled is true."
4451
+ "description": "Maximum structured message-part matches to include in recall when messagePartsEnabled is true."
4390
4452
  },
4391
4453
  "ircEnabled": {
4392
4454
  "type": "boolean",
@@ -4396,7 +4458,7 @@
4396
4458
  "ircMaxPreferences": {
4397
4459
  "type": "number",
4398
4460
  "default": 20,
4399
- "description": "Maximum number of preferences to include in IRC rule injection."
4461
+ "description": "Maximum number of preferences to include in IRC rules."
4400
4462
  },
4401
4463
  "ircIncludeCorrections": {
4402
4464
  "type": "boolean",
@@ -4451,7 +4513,7 @@
4451
4513
  "cmcRetrievalEnabled": {
4452
4514
  "type": "boolean",
4453
4515
  "default": false,
4454
- "description": "Enable CMC retrieval injection at recall time (default off)."
4516
+ "description": "Enable CMC retrieval at recall time (default off)."
4455
4517
  },
4456
4518
  "cmcRetrievalMaxDepth": {
4457
4519
  "type": "number",
@@ -4461,7 +4523,7 @@
4461
4523
  "cmcRetrievalMaxChars": {
4462
4524
  "type": "number",
4463
4525
  "default": 800,
4464
- "description": "Maximum characters of CMC content injected per recall pass."
4526
+ "description": "Maximum characters of CMC content included per recall pass."
4465
4527
  },
4466
4528
  "cmcRetrievalCounterfactualBoost": {
4467
4529
  "type": "number",
@@ -4596,7 +4658,7 @@
4596
4658
  "label": "OpenAI API Key",
4597
4659
  "sensitive": true,
4598
4660
  "placeholder": "sk-...",
4599
- "help": "API key for OpenAI (or use ${OPENAI_API_KEY})"
4661
+ "help": "Optional API key for plugin mode only. Not needed when Model Source is gateway."
4600
4662
  },
4601
4663
  "openaiBaseUrl": {
4602
4664
  "label": "OpenAI Base URL",
@@ -4737,13 +4799,13 @@
4737
4799
  "help": "Enable identity continuity workflows (anchor, incidents, audits)"
4738
4800
  },
4739
4801
  "identityInjectionMode": {
4740
- "label": "Identity Injection Mode",
4802
+ "label": "Identity Context Mode",
4741
4803
  "advanced": true,
4742
4804
  "placeholder": "recovery_only",
4743
- "help": "When to inject identity continuity context: recovery_only, minimal, or full"
4805
+ "help": "When to add identity continuity context: recovery_only, minimal, or full"
4744
4806
  },
4745
4807
  "identityMaxInjectChars": {
4746
- "label": "Identity Max Inject Chars",
4808
+ "label": "Identity Max Context Chars",
4747
4809
  "advanced": true,
4748
4810
  "placeholder": "1200"
4749
4811
  },
@@ -4774,7 +4836,7 @@
4774
4836
  },
4775
4837
  "injectQuestions": {
4776
4838
  "label": "Inject Questions",
4777
- "help": "Include the most relevant open question in the system prompt"
4839
+ "help": "Include the most relevant open question in generated memory context"
4778
4840
  },
4779
4841
  "commitmentDecayDays": {
4780
4842
  "label": "Commitment Decay (days)",
@@ -5020,7 +5082,7 @@
5020
5082
  },
5021
5083
  "compactionResetEnabled": {
5022
5084
  "label": "Compaction Reset",
5023
- "help": "Reset session after compaction and inject BOOT.md for recovery",
5085
+ "help": "Reset session after compaction and add BOOT.md recovery context",
5024
5086
  "advanced": true
5025
5087
  },
5026
5088
  "hourlySummariesEnabled": {
@@ -5044,7 +5106,7 @@
5044
5106
  },
5045
5107
  "modelSource": {
5046
5108
  "label": "Model Source",
5047
- "help": "Route LLM calls through the gateway's agent model chain instead of Engram's own config. When set to 'gateway', localLlm and openai settings are ignored."
5109
+ "help": "Route LLM calls through the gateway's agent model chain instead of Engram's own config. Default for OpenClaw installs. When set to 'gateway', localLlm and openai settings are ignored."
5048
5110
  },
5049
5111
  "gatewayAgentId": {
5050
5112
  "label": "Gateway Agent ID",
@@ -5182,7 +5244,7 @@
5182
5244
  "objectiveStateRecallEnabled": {
5183
5245
  "label": "Objective-State Recall",
5184
5246
  "advanced": true,
5185
- "help": "Inject prompt-relevant objective-state snapshots into recall context."
5247
+ "help": "Add recall-relevant objective-state snapshots to recall context."
5186
5248
  },
5187
5249
  "objectiveStateStoreDir": {
5188
5250
  "label": "Objective-State Store Directory",
@@ -5203,7 +5265,7 @@
5203
5265
  "causalTrajectoryRecallEnabled": {
5204
5266
  "label": "Causal Trajectory Recall",
5205
5267
  "advanced": true,
5206
- "help": "Inject prompt-relevant causal trajectories into recall context."
5268
+ "help": "Add recall-relevant causal trajectories to recall context."
5207
5269
  },
5208
5270
  "trustZonesEnabled": {
5209
5271
  "label": "Trust Zones",
@@ -5223,7 +5285,7 @@
5223
5285
  "trustZoneRecallEnabled": {
5224
5286
  "label": "Trust-Zone Recall",
5225
5287
  "advanced": true,
5226
- "help": "Inject prompt-relevant working and trusted trust-zone records into recall context."
5288
+ "help": "Add recall-relevant working and trusted trust-zone records to recall context."
5227
5289
  },
5228
5290
  "memoryPoisoningDefenseEnabled": {
5229
5291
  "label": "Memory Poisoning Defense",
@@ -5237,7 +5299,7 @@
5237
5299
  },
5238
5300
  "harmonicRetrievalEnabled": {
5239
5301
  "label": "Harmonic Retrieval",
5240
- "help": "Enable harmonic retrieval blending over abstraction nodes and cue anchors, including recall-section injection and harmonic-search diagnostics."
5302
+ "help": "Enable harmonic retrieval blending over abstraction nodes and cue anchors, including recall-section assembly and harmonic-search diagnostics."
5241
5303
  },
5242
5304
  "abstractionAnchorsEnabled": {
5243
5305
  "label": "Abstraction Anchors",
@@ -5252,7 +5314,7 @@
5252
5314
  },
5253
5315
  "verifiedRecallEnabled": {
5254
5316
  "label": "Verified Recall",
5255
- "help": "Inject prompt-relevant memory boxes only when their cited source memories verify as non-archived episodes."
5317
+ "help": "Add recall-relevant memory boxes only when their cited source memories verify as non-archived episodes."
5256
5318
  },
5257
5319
  "semanticRulePromotionEnabled": {
5258
5320
  "label": "Semantic Rule Promotion",
@@ -5300,9 +5362,9 @@
5300
5362
  "help": "Max memories to consolidate per run to limit LLM cost."
5301
5363
  },
5302
5364
  "operatorAwareConsolidationEnabled": {
5303
- "label": "Operator-Aware Consolidation Prompt",
5365
+ "label": "Operator-Aware Consolidation",
5304
5366
  "advanced": true,
5305
- "help": "Opt in to operator-aware consolidation prompts (default off). When enabled, the LLM returns structured {operator, output} JSON and we record SPLIT/MERGE/UPDATE on derived_via. When disabled (default), derived_via still populates via the cluster-shape heuristic."
5367
+ "help": "Opt in to operator-aware consolidation instructions (default off). When enabled, the LLM returns structured {operator, output} JSON and we record SPLIT/MERGE/UPDATE on derived_via. When disabled (default), derived_via still populates via the cluster-shape heuristic."
5306
5368
  },
5307
5369
  "peerProfileReasonerEnabled": {
5308
5370
  "label": "Peer Profile Reasoner",
@@ -5325,14 +5387,14 @@
5325
5387
  "help": "Hard cap on the total number of profile fields the reasoner will apply across all peers in a single run. Set to 0 to disable applying any fields."
5326
5388
  },
5327
5389
  "peerProfileRecallEnabled": {
5328
- "label": "Peer Profile Recall Injection",
5390
+ "label": "Peer Profile Recall",
5329
5391
  "advanced": true,
5330
- "help": "When enabled, injects the active peer's profile fields into recall context as a '## Peer Profile' section. Requires the session peer ID to be registered. Default off."
5392
+ "help": "When enabled, adds the active peer's profile fields to recall context as a '## Peer Profile' section. Requires the session peer ID to be registered. Default off."
5331
5393
  },
5332
5394
  "peerProfileRecallMaxFields": {
5333
5395
  "label": "Peer Profile Recall Max Fields",
5334
5396
  "advanced": true,
5335
- "help": "Maximum number of peer profile fields to inject per recall. Only the most-recently-updated N fields are included. Set to 0 to disable injection."
5397
+ "help": "Maximum number of peer profile fields to add per recall. Only the most-recently-updated N fields are included. Set to 0 to disable this feature."
5336
5398
  },
5337
5399
  "creationMemoryEnabled": {
5338
5400
  "label": "Creation Memory",
@@ -5384,7 +5446,7 @@
5384
5446
  "workProductRecallEnabled": {
5385
5447
  "label": "Work-Product Recall",
5386
5448
  "advanced": true,
5387
- "help": "Inject prompt-relevant work-product ledger entries into recall context and expose artifact-recovery search tooling."
5449
+ "help": "Add recall-relevant work-product ledger entries to recall context and expose artifact-recovery search tooling."
5388
5450
  },
5389
5451
  "workProductLedgerDir": {
5390
5452
  "label": "Work-Product Ledger Directory",
@@ -5650,5 +5712,21 @@
5650
5712
  "advanced": true,
5651
5713
  "help": "Promote stored reasoning_trace memories to the top of recall results when the incoming query reads like a problem-solving ask. Default off; enable after benchmarking (issue #564)."
5652
5714
  }
5715
+ },
5716
+ "commandAliases": [
5717
+ {
5718
+ "name": "remnic",
5719
+ "kind": "runtime-slash",
5720
+ "cliCommand": "remnic"
5721
+ }
5722
+ ],
5723
+ "activation": {
5724
+ "onCommands": [
5725
+ "remnic"
5726
+ ],
5727
+ "onCapabilities": [
5728
+ "tool",
5729
+ "hook"
5730
+ ]
5653
5731
  }
5654
5732
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@joshuaswarren/openclaw-engram",
3
- "version": "9.3.16",
3
+ "version": "9.3.18",
4
4
  "description": "Deprecated compatibility shim for Engram installs. Re-exports @remnic/plugin-openclaw and forwards engram-access to @remnic/core.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -31,8 +31,8 @@
31
31
  ]
32
32
  },
33
33
  "dependencies": {
34
- "@remnic/plugin-openclaw": "^1.0.20",
35
- "@remnic/core": "^1.1.8"
34
+ "@remnic/core": "^1.1.9",
35
+ "@remnic/plugin-openclaw": "^1.0.32"
36
36
  },
37
37
  "peerDependencies": {
38
38
  "openclaw": ">=2026.4.8"