memory-crystal 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +20 -0
- package/CHANGELOG.md +6 -0
- package/LETTERS.md +22 -0
- package/LICENSE +21 -0
- package/README-ENTERPRISE.md +162 -0
- package/README-old.md +275 -0
- package/README.md +91 -0
- package/RELAY.md +88 -0
- package/TECHNICAL.md +379 -0
- package/ai/dev-updates/2026-02-25--cc-air--phase2-architecture-pivot.md +70 -0
- package/ai/dev-updates/2026-02-25--cc-air--phase2-worker-build.md +72 -0
- package/ai/dev-updates/2026-02-26--10-25-16--cc-mini--phase2-implementation.md +49 -0
- package/ai/dev-updates/2026-02-27--20-30-00--cc-mini--readme-overhaul-and-public-deploy.md +69 -0
- package/ai/notes/2026-02-26--cc-air--notes.md +412 -0
- package/ai/notes/2026-02-27--cc-mini--grok-feedback.md +44 -0
- package/ai/notes/2026-02-27--cc-mini--lesa-feedback.md +45 -0
- package/ai/notes/RESEARCH.md +1185 -0
- package/ai/notes/salience-research/README.md +29 -0
- package/ai/notes/salience-research/eurosla-salience-review.md +64 -0
- package/ai/notes/salience-research/full-research-summary.md +269 -0
- package/ai/notes/salience-research/salience-levels-diagram.png +0 -0
- package/ai/plan/2026-02-27--cc-mini--qr-pairing-spec.md +203 -0
- package/ai/plan/_archive/PLAN.md +194 -0
- package/ai/plan/_archive/PRD.md +1014 -0
- package/ai/plan/cc-plans-duplicates-from-dot-claude/2026-02-26--cc-mini--phase2-implementation-plan.md +245 -0
- package/ai/plan/dev-conventions-note.md +70 -0
- package/ai/plan/ldm-os-install-and-boot-architecture.md +285 -0
- package/ai/plan/memory-crystal-phase2-plan.md +192 -0
- package/ai/plan/memory-system-lay-of-the-land.md +214 -0
- package/ai/plan/phase2-ephemeral-relay.md +238 -0
- package/ai/plan/readme-first.md +68 -0
- package/ai/plan/roadmap.md +159 -0
- package/ai/todos/PUNCHLIST.md +44 -0
- package/ai/todos/README.md +31 -0
- package/ai/todos/inboxes/cc-air/2026-02-26--cc-air--post-relay-todos.md +85 -0
- package/ai/todos/inboxes/cc-mini/2026-02-26--cc-mini--phase2-status.md +100 -0
- package/ai/todos/inboxes/cc-mini/_archive/TODO.md +25 -0
- package/ai/todos/inboxes/parker/2026-02-25--cc-air--setup-checklist.md +139 -0
- package/ai/todos/inboxes/parker/2026-02-26--cc-mini--phase2-your-moves.md +72 -0
- package/dist/cc-hook.d.ts +1 -0
- package/dist/cc-hook.js +349 -0
- package/dist/chunk-3VFIJYS4.js +818 -0
- package/dist/chunk-52QE3YI3.js +1169 -0
- package/dist/chunk-AA3OPP4Z.js +432 -0
- package/dist/chunk-D3I3ZSE2.js +411 -0
- package/dist/chunk-EKSACBTJ.js +1070 -0
- package/dist/chunk-F3Y7EL7K.js +83 -0
- package/dist/chunk-JWZXYVET.js +1068 -0
- package/dist/chunk-KYVWO6ZM.js +1069 -0
- package/dist/chunk-L3VHARQH.js +413 -0
- package/dist/chunk-LOVAHSQV.js +411 -0
- package/dist/chunk-LQOYCAGG.js +446 -0
- package/dist/chunk-MK42FMEG.js +147 -0
- package/dist/chunk-NIJCVN3O.js +147 -0
- package/dist/chunk-O2UITJGH.js +465 -0
- package/dist/chunk-PEK6JH65.js +432 -0
- package/dist/chunk-PJ6FFKEX.js +77 -0
- package/dist/chunk-PLUBBZYR.js +800 -0
- package/dist/chunk-SGL6ISBJ.js +1061 -0
- package/dist/chunk-UNHVZB5G.js +411 -0
- package/dist/chunk-VAFTWSTE.js +1061 -0
- package/dist/chunk-XZ3S56RQ.js +1061 -0
- package/dist/chunk-Y72C7F6O.js +148 -0
- package/dist/cli.d.ts +1 -0
- package/dist/cli.js +325 -0
- package/dist/core.d.ts +188 -0
- package/dist/core.js +12 -0
- package/dist/crypto.d.ts +16 -0
- package/dist/crypto.js +18 -0
- package/dist/dev-update-SZ2Z4WCQ.js +6 -0
- package/dist/ldm.d.ts +17 -0
- package/dist/ldm.js +12 -0
- package/dist/mcp-server.d.ts +1 -0
- package/dist/mcp-server.js +250 -0
- package/dist/migrate.d.ts +1 -0
- package/dist/migrate.js +89 -0
- package/dist/mirror-sync.d.ts +1 -0
- package/dist/mirror-sync.js +130 -0
- package/dist/openclaw.d.ts +5 -0
- package/dist/openclaw.js +349 -0
- package/dist/poller.d.ts +1 -0
- package/dist/poller.js +272 -0
- package/dist/summarize.d.ts +19 -0
- package/dist/summarize.js +10 -0
- package/dist/worker.js +137 -0
- package/openclaw.plugin.json +11 -0
- package/package.json +40 -0
- package/scripts/migrate-lance-to-sqlite.mjs +217 -0
- package/skills/memory/SKILL.md +61 -0
- package/src/cc-hook.ts +447 -0
- package/src/cli.ts +356 -0
- package/src/core.ts +1472 -0
- package/src/crypto.ts +113 -0
- package/src/dev-update.ts +178 -0
- package/src/ldm.ts +117 -0
- package/src/mcp-server.ts +274 -0
- package/src/migrate.ts +104 -0
- package/src/mirror-sync.ts +175 -0
- package/src/openclaw.ts +250 -0
- package/src/poller.ts +345 -0
- package/src/summarize.ts +210 -0
- package/src/worker.ts +208 -0
- package/tsconfig.json +18 -0
- package/wrangler.toml +20 -0
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# Salience Research Artifacts
|
|
2
|
+
|
|
3
|
+
Research into linguistic and cognitive salience frameworks for improving Memory Crystal's recall architecture.
|
|
4
|
+
|
|
5
|
+
**Started:** 2026-02-20
|
|
6
|
+
**Origin:** Parker found the EuroSLA salience paper while exploring visual salience to language salience. Led to mapping how salience research applies to the gap between memory capture (works) and spontaneous recall (doesn't).
|
|
7
|
+
|
|
8
|
+
## Files
|
|
9
|
+
|
|
10
|
+
| File | What |
|
|
11
|
+
|------|------|
|
|
12
|
+
| `README.md` | This index |
|
|
13
|
+
| `eurosla-salience-review.md` | Summary of the Knell et al. EuroSLA paper (three-level salience taxonomy) |
|
|
14
|
+
| `salience-levels-diagram.png` | Screenshot of the perceptual/psycholinguistic/experiential diagram |
|
|
15
|
+
| `full-research-summary.md` | Complete research synthesis: 7 papers, 7 architecture principles, all sources |
|
|
16
|
+
|
|
17
|
+
## Key Insight
|
|
18
|
+
|
|
19
|
+
The problem isn't storage. It's not even retrieval. It's **associative recall**. Salience determines what gets noticed, encoded deeply, and spontaneously surfaced. Our current system treats all 150K chunks equally. It needs a salience layer.
|
|
20
|
+
|
|
21
|
+
## Seven Principles (from the research)
|
|
22
|
+
|
|
23
|
+
1. Salience is multi-dimensional (structural, contextual, experiential)
|
|
24
|
+
2. Prediction error drives encoding priority (surprise > confirmation)
|
|
25
|
+
3. Retrieval needs spreading activation (graph, not flat vector store)
|
|
26
|
+
4. Blocking is the enemy (summaries prevent deeper encoding)
|
|
27
|
+
5. Temporal decay with reactivation (use it or lose it)
|
|
28
|
+
6. Salience contagion (important events boost nearby memories)
|
|
29
|
+
7. Consolidation needs a "sleep" phase (salience-aware compaction)
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
# Stand-out: A Systematic Review of the Role of Salience in Second Language Acquisition
|
|
2
|
+
|
|
3
|
+
**Source:** https://euroslajournal.org/articles/10.22599/jesla.131
|
|
4
|
+
**DOI:** 10.22599/jesla.131
|
|
5
|
+
**Journal:** Journal of the European Second Language Association (EuroSLA)
|
|
6
|
+
**Authors:** Georgia Knell, Saioa Cipitria, Ludovic de Cuypere, Alex Housen, Esli Struys (Vrije Universiteit Brussel)
|
|
7
|
+
**Saved:** 2026-02-20
|
|
8
|
+
|
|
9
|
+
---
|
|
10
|
+
|
|
11
|
+
## Summary
|
|
12
|
+
|
|
13
|
+
Systematic review of 42 empirical studies examining how linguistic salience ("the degree to which a linguistic element stands out from its environment") affects second language (L2) acquisition.
|
|
14
|
+
|
|
15
|
+
## Key Framework: Three Salience Categories
|
|
16
|
+
|
|
17
|
+
The authors organize salience into three nested levels:
|
|
18
|
+
|
|
19
|
+
### 1. Perceptual Salience (Narrow definition)
|
|
20
|
+
- Physical properties of the linguistic form itself
|
|
21
|
+
- Examples: substance (phonological weight), syllabicity, position in utterance
|
|
22
|
+
- Does it physically stand out?
|
|
23
|
+
|
|
24
|
+
### 2. Psycholinguistic Salience (Medium definition)
|
|
25
|
+
- How linguistic context affects prominence
|
|
26
|
+
- Examples: semantic redundancy, formal redundancy/transparency
|
|
27
|
+
- Key insight: tense markers become invisible when temporal adverbs already carry the meaning. Redundancy reduces salience.
|
|
28
|
+
|
|
29
|
+
### 3. Experiential Salience (Wide definition)
|
|
30
|
+
- The learner's prior linguistic experience
|
|
31
|
+
- Examples: L1 experience (cross-linguistic influence), L2 experience (frequency, exposure)
|
|
32
|
+
- Prior experience shapes what stands out
|
|
33
|
+
|
|
34
|
+
## Key Findings
|
|
35
|
+
|
|
36
|
+
- **79% of tested variables showed fully or partially positive effects** ... increased salience correlates with better learning outcomes
|
|
37
|
+
- Most research has focused on perceptual properties; psycholinguistic manifestations are understudied
|
|
38
|
+
- The interaction between salience types is poorly understood
|
|
39
|
+
|
|
40
|
+
## Why This Matters for Agent Memory
|
|
41
|
+
|
|
42
|
+
The parallel to AI agent memory architecture:
|
|
43
|
+
|
|
44
|
+
| Salience Level | Language Acquisition | Agent Memory |
|
|
45
|
+
|---|---|---|
|
|
46
|
+
| Perceptual | Does the form physically stand out? | Is a memory chunk distinctive, or does it blend in with 150K others? |
|
|
47
|
+
| Psycholinguistic | Does context make it redundant? | If the same info exists in 5 places, the agent stops noticing any of them |
|
|
48
|
+
| Experiential | Does prior experience make it stand out? | Memories tied to identity or recent work should be more salient than generic tasks |
|
|
49
|
+
|
|
50
|
+
The core insight: **you can't recall what you didn't notice.** Storage is not enough. The memory system needs a salience layer that determines what "stands out" at the moment of retrieval, not just at the moment of capture.
|
|
51
|
+
|
|
52
|
+
## Key References to Follow
|
|
53
|
+
|
|
54
|
+
- **Schmidt (1990, 2001)** ... The Noticing Hypothesis. You can't learn what you don't notice. Attention is the gateway to memory.
|
|
55
|
+
- **Goldschneider & DeKeyser (2001)** ... Foundational work on morpheme acquisition and salience
|
|
56
|
+
- **Gass et al. (2017)** ... Comprehensive volume on salience in SLA
|
|
57
|
+
- **Ellis (2006, 2022)** ... Contingency, associative learning, and salience interactions
|
|
58
|
+
- **DeKeyser et al. (2017)** ... Stratification of salience definitions
|
|
59
|
+
|
|
60
|
+
## Diagram
|
|
61
|
+
|
|
62
|
+
The paper includes a nested diagram showing the three levels (saved as screenshot):
|
|
63
|
+
- Narrow (Perceptual) nested inside Medium (Psycholinguistic) nested inside Wide (Experiential)
|
|
64
|
+
- Source image: `/Users/lesa/Desktop/67dd4edc63fc6.png`
|
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
# Linguistic Salience and Memory: Research Summary for AI Agent Architecture
|
|
2
|
+
|
|
3
|
+
**Date:** 2026-02-20
|
|
4
|
+
**Context:** Parker found the EuroSLA salience paper while exploring visual salience to language salience. This research explores how salience frameworks from linguistics and cognitive science can inform Memory Crystal's architecture... specifically the gap between memory capture (works) and spontaneous recall (doesn't work).
|
|
5
|
+
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## 1. What Is Salience? A Working Taxonomy
|
|
9
|
+
|
|
10
|
+
Salience is the property of a stimulus that makes it stand out, attract attention, and get processed more deeply. Research distinguishes several interacting dimensions:
|
|
11
|
+
|
|
12
|
+
**Bottom-up (stimulus-driven) salience:** The stimulus itself attracts attention through physical properties. In language: phonological stress, syllable length, sentence position (first-mention advantage), acoustic prominence.
|
|
13
|
+
|
|
14
|
+
**Top-down (expectation-driven) salience:** Attention directed by prior knowledge, current goals, or recent activation. A stimulus is salient because the perceiver expects it, has been primed for it, or is actively searching for it. Breaks when something violates expectations (surprisal).
|
|
15
|
+
|
|
16
|
+
**Experiential salience:** A stimulus stands out because of the perceiver's personal history. Prior emotional or motivational associations amplify encoding.
|
|
17
|
+
|
|
18
|
+
**Key insight for AI memory:** Salience is not a fixed property of information. It emerges from the interaction between the incoming signal, the current context, and the agent's history. This is why a flat vector store with cosine similarity fails at spontaneous recall. It only captures one dimension (semantic similarity) while ignoring context, recency, emotional weight, structural position, and relational importance.
|
|
19
|
+
|
|
20
|
+
**Source:** [Editorial: Perceptual Linguistic Salience (PMC)](https://pmc.ncbi.nlm.nih.gov/articles/PMC5360726/)
|
|
21
|
+
|
|
22
|
+
---
|
|
23
|
+
|
|
24
|
+
## 2. The Knell et al. Framework (EuroSLA): Three Levels of Salience
|
|
25
|
+
|
|
26
|
+
**Paper:** Georgia Knell et al. "Stand-out: A Systematic Review of the Role of Salience in Second Language Acquisition." *Journal of the European Second Language Association* (2025).
|
|
27
|
+
**Link:** https://euroslajournal.org/articles/10.22599/jesla.131
|
|
28
|
+
|
|
29
|
+
After reviewing 42 empirical studies, the authors propose a three-level taxonomy:
|
|
30
|
+
|
|
31
|
+
### 2a. Perceptual salience (narrow)
|
|
32
|
+
Physical properties of the form. Length, stress, position, boundedness, sonority. Longer morphemes get noticed more than shorter ones. Sentence-initial items get an encoding advantage.
|
|
33
|
+
|
|
34
|
+
### 2b. Psycholinguistic salience (medium)
|
|
35
|
+
Relationship between form and linguistic context. When a grammatical marker is semantically redundant (e.g., past tense "-ed" appearing alongside "yesterday"), learners process the meaning word and ignore the morphology. Learners process input for meaning before form.
|
|
36
|
+
|
|
37
|
+
### 2c. Experiential salience (wide)
|
|
38
|
+
Relationship between form and the learner's prior language experience. L1 transfer, frequency of prior exposure, familiarity.
|
|
39
|
+
|
|
40
|
+
**Finding:** 79% of studies showed positive relationships between higher salience and better learning outcomes.
|
|
41
|
+
|
|
42
|
+
### Application to agent memory
|
|
43
|
+
|
|
44
|
+
| Salience Level | Language Acquisition | Agent Memory |
|
|
45
|
+
|---|---|---|
|
|
46
|
+
| Perceptual | Does the form physically stand out? | Structural position: is this in a header, first message, explicit decision, or buried mid-paragraph? |
|
|
47
|
+
| Psycholinguistic | Does context make it redundant? | If the same info exists in 5 places, the agent stops noticing any of them |
|
|
48
|
+
| Experiential | Does prior experience make it stand out? | Memories tied to identity or recent work should be more salient than generic tasks |
|
|
49
|
+
|
|
50
|
+
---
|
|
51
|
+
|
|
52
|
+
## 3. Nick Ellis: Blocking, Overshadowing, and Learned Attention
|
|
53
|
+
|
|
54
|
+
**Researcher:** Nick C. Ellis, University of Michigan
|
|
55
|
+
|
|
56
|
+
Three critical phenomena from associative learning:
|
|
57
|
+
|
|
58
|
+
**Blocking:** When a reliable cue (e.g., "yesterday") is already learned, it blocks acquisition of a less salient cue that predicts the same thing (e.g., "-ed"). The learner already has a working solution, so the redundant cue gets no associative weight. Not laziness... optimal resource allocation given bounded attention.
|
|
59
|
+
|
|
60
|
+
**Overshadowing:** When two cues are presented simultaneously, the more salient one captures more associative strength. The less salient cue is "overshadowed" even though it's equally informative.
|
|
61
|
+
|
|
62
|
+
**Learned attention:** Prior experience tunes what you attend to. Chinese speakers (L1 lacks tense morphology) were less able to acquire English tense markers than Spanish/Russian speakers (rich morphology). Prior attentional habits transfer.
|
|
63
|
+
|
|
64
|
+
**His core claim:** "What we attend to is determined by our prior experience, and salience is as much a psychological as a physical property."
|
|
65
|
+
|
|
66
|
+
### Application to agent memory
|
|
67
|
+
|
|
68
|
+
- **Blocking:** If the agent has a "good enough" summary, it may never encode the richer original. The summary blocks deeper processing.
|
|
69
|
+
- **Overshadowing:** Dramatic emotional exchanges overshadow quiet but important technical decisions in encoding.
|
|
70
|
+
- **Learned attention:** An agent primarily asked about scheduling develops attentional biases toward temporal information, under-encoding other facts.
|
|
71
|
+
- **Implication:** A memory system needs anti-blocking mechanisms. Redundancy should trigger comparison, not dismissal.
|
|
72
|
+
|
|
73
|
+
**Sources:**
|
|
74
|
+
- [Blocking and Learned Attention in Language Acquisition](https://sites.lsa.umich.edu/nickellis-new/wp-content/uploads/sites/1284/2021/07/pp400-ellis.pdf)
|
|
75
|
+
- [Selective Attention and Transfer in L2 Acquisition (Oxford Academic)](https://academic.oup.com/applij/article-abstract/27/2/164/185787)
|
|
76
|
+
|
|
77
|
+
---
|
|
78
|
+
|
|
79
|
+
## 4. The Rescorla-Wagner Model: Computational Salience Through Prediction Error
|
|
80
|
+
|
|
81
|
+
**Model:** Rescorla & Wagner (1972). Foundational mathematical model of associative learning.
|
|
82
|
+
|
|
83
|
+
Learning occurs proportionally to **surprise**. Change in associative strength depends on:
|
|
84
|
+
- **Alpha (salience of the cue):** intrinsic property of the stimulus
|
|
85
|
+
- **Beta (salience of the outcome):** how strong the unconditioned stimulus is
|
|
86
|
+
- **Prediction error:** difference between expected and actual outcomes. Learning is maximal when the outcome is unexpected.
|
|
87
|
+
|
|
88
|
+
When multiple cues are present, their combined associative strengths determine the prediction error. This creates cue competition. If cue A already predicts the outcome well, adding cue B produces no surprise, so B gains no associative weight. This is the formal mechanism behind blocking.
|
|
89
|
+
|
|
90
|
+
**Extensions:** The CompAct model adds dynamic attention allocation. Features compete for attention. A "familiarity principle" reduces attention to repeatedly observed, unreinforced cues.
|
|
91
|
+
|
|
92
|
+
### Application to agent memory
|
|
93
|
+
- **Prediction error as a signal:** Information that surprises the agent (contradicts existing memories, introduces genuinely new facts) should receive higher encoding priority
|
|
94
|
+
- **Cue competition:** Multiple pieces of information in a conversation turn should compete for encoding weight. Not everything deserves equal allocation
|
|
95
|
+
- **Decay of attention to familiar stimuli:** Repeatedly encountered, unchanged information should receive declining encoding weight
|
|
96
|
+
|
|
97
|
+
**Source:** [Rescorla-Wagner Model (Wikipedia)](https://en.wikipedia.org/wiki/Rescorla%E2%80%93Wagner_model)
|
|
98
|
+
|
|
99
|
+
---
|
|
100
|
+
|
|
101
|
+
## 5. Emotional Salience and Memory Consolidation
|
|
102
|
+
|
|
103
|
+
Memory is a selective system biased toward motivationally significant information.
|
|
104
|
+
|
|
105
|
+
- Emotionally salient experiences trigger norepinephrine surges that create "hotspots" of long-term potentiation. Emotional events don't just get remembered better... they **enhance encoding of temporally adjacent mundane information** through graded prioritization.
|
|
106
|
+
|
|
107
|
+
- Intentional instruction to remember ("this is important") can compete with and sometimes override emotional salience. Top-down goals modulate consolidation.
|
|
108
|
+
|
|
109
|
+
- During sleep consolidation, salient experiences are preferentially reactivated. Multiple salience cues compete for dominance.
|
|
110
|
+
|
|
111
|
+
### Application to agent memory
|
|
112
|
+
- **Salience contagion:** Important events should boost encoding weight of temporally adjacent memories
|
|
113
|
+
- **Explicit importance marking:** When the user says "remember this," that overrides default encoding weights
|
|
114
|
+
- **Consolidation phase:** Periodic "sleep-like" process reactivating high-salience memories while low-salience ones decay. This is what compaction should be, guided by salience not just recency.
|
|
115
|
+
|
|
116
|
+
**Source:** [Salient experiences enhance mundane memories (Science Advances)](https://www.science.org/doi/10.1126/sciadv.ady1704)
|
|
117
|
+
|
|
118
|
+
---
|
|
119
|
+
|
|
120
|
+
## 6. Spontaneous Retrieval and Spreading Activation
|
|
121
|
+
|
|
122
|
+
Spontaneous retrieval (memories surfacing without deliberate search) relies on associative processes, not executive search. Mediated by hippocampus, triggered by contextual overlap between current situation and encoded memory.
|
|
123
|
+
|
|
124
|
+
- **Spreading activation:** Accessing one concept automatically activates related concepts. Activation spreads along associative links, decaying over distance.
|
|
125
|
+
- **Context-dependent memory:** Memories retrieved more easily when retrieval context matches encoding context (encoding specificity principle).
|
|
126
|
+
- **Involuntary autobiographical memories:** Surface without conscious search, triggered by environmental cues that overlap with encoded features. Less cognitively demanding than deliberate recall.
|
|
127
|
+
|
|
128
|
+
### Application to agent memory
|
|
129
|
+
This is the missing piece. The agent has 150K+ chunks but no spreading activation. When processing a new message, the agent should:
|
|
130
|
+
1. Activate nodes that semantically match current input
|
|
131
|
+
2. Propagate activation along relational links to associated memories
|
|
132
|
+
3. Surface memories exceeding an activation threshold, even without direct query match
|
|
133
|
+
|
|
134
|
+
Without this, the agent can only retrieve what it explicitly searches for.
|
|
135
|
+
|
|
136
|
+
**Source:** [Contextually Mediated Spontaneous Retrieval (PMC)](https://pmc.ncbi.nlm.nih.gov/articles/PMC5789776/)
|
|
137
|
+
|
|
138
|
+
---
|
|
139
|
+
|
|
140
|
+
## 7. Computational Architectures Applying These Principles
|
|
141
|
+
|
|
142
|
+
### 7a. SYNAPSE (2025): Spreading Activation for LLM Agents
|
|
143
|
+
|
|
144
|
+
**Paper:** "SYNAPSE: Empowering LLM Agents with Episodic-Semantic Memory via Spreading Activation"
|
|
145
|
+
**Link:** https://arxiv.org/abs/2601.02744
|
|
146
|
+
|
|
147
|
+
Unified graph with episodic nodes (specific interactions) and semantic nodes (abstract concepts). Three edge types: temporal, abstraction, association. Activation propagates with fan effect (dilution), lateral inhibition (winner-take-all), and temporal decay. Final scoring fuses semantic similarity + graph activation + PageRank importance.
|
|
148
|
+
|
|
149
|
+
Includes "feeling of knowing" gate: refuses to answer when confidence is low rather than hallucinating.
|
|
150
|
+
|
|
151
|
+
**Results:** 40.5 F1 on LoCoMo (outperforms A-Mem by 7.2 points). 23% improvement in multi-hop reasoning. 95% token reduction vs. full-context.
|
|
152
|
+
|
|
153
|
+
**Most directly applicable architecture to our problem.**
|
|
154
|
+
|
|
155
|
+
### 7b. ACT-R-Inspired Memory (2025)
|
|
156
|
+
|
|
157
|
+
**Paper:** "Human-Like Remembering and Forgetting in LLM Agents"
|
|
158
|
+
**Link:** https://dl.acm.org/doi/10.1145/3765766.3765803
|
|
159
|
+
|
|
160
|
+
Implements ACT-R declarative memory equations. Each chunk has activation from:
|
|
161
|
+
- Base-level activation (frequency + recency with temporal decay)
|
|
162
|
+
- Spreading activation (contextual relevance via cosine similarity)
|
|
163
|
+
- Stochastic noise (Gaussian variability)
|
|
164
|
+
|
|
165
|
+
A chunk is recalled only if total activation exceeds retrieval threshold. Naturally models both remembering and forgetting.
|
|
166
|
+
|
|
167
|
+
**Activation formula:** `Activation = BLA(frequency, recency) + SpreadingActivation(context) + Noise`
|
|
168
|
+
|
|
169
|
+
Could replace or augment pure cosine similarity in Memory Crystal.
|
|
170
|
+
|
|
171
|
+
### 7c. A-Mem (2025): Zettelkasten-Inspired Agentic Memory
|
|
172
|
+
|
|
173
|
+
**Paper:** "A-Mem: Agentic Memory for LLM Agents"
|
|
174
|
+
**Link:** https://arxiv.org/html/2502.12110v1
|
|
175
|
+
|
|
176
|
+
Each memory note: original content, timestamp, LLM-generated keywords, tags, contextual descriptions, embeddings, and links. New memories trigger updates to contextual representations of related existing memories. Enables higher-order pattern emergence.
|
|
177
|
+
|
|
178
|
+
**Key feature:** Memory evolution. New experiences modify representations of related existing memories (analogous to reconsolidation in neuroscience).
|
|
179
|
+
|
|
180
|
+
### 7d. Agent Cognitive Compressor (ACC) (2025)
|
|
181
|
+
|
|
182
|
+
**Paper:** "AI Agents Need Memory Control Over More Context"
|
|
183
|
+
**Link:** https://arxiv.org/html/2601.11653
|
|
184
|
+
|
|
185
|
+
Replaces transcript accumulation with bounded Compressed Cognitive State (CCS). Typed fields: episodic trace, semantic gist, focal entities, relational map, goal orientation, constraints, predictive cues, uncertainty signals. Retrieved evidence merely proposes information; only decision-critical content enters through a qualification gate.
|
|
186
|
+
|
|
187
|
+
**Addresses opposite failure mode:** Where Crystal captures everything and fails at recall, ACC argues for aggressive compression and curation at write time.
|
|
188
|
+
|
|
189
|
+
### 7e. Salience Maps in Computational Neuroscience
|
|
190
|
+
|
|
191
|
+
**Paper:** Veale et al. (2020). "Salience Models: A Computational Cognitive Neuroscience Review"
|
|
192
|
+
**Link:** https://pmc.ncbi.nlm.nih.gov/articles/PMC6969943/
|
|
193
|
+
|
|
194
|
+
Extract features in parallel, apply center-surround filtering (what stands out relative to surroundings), normalize via competition, generate salience map. Includes lateral inhibition and inhibition of return.
|
|
195
|
+
|
|
196
|
+
**Key principle:** A memory is salient not because of absolute properties but because it contrasts with its surroundings. A quiet technical decision in a conversation full of small talk is salient precisely because it differs.
|
|
197
|
+
|
|
198
|
+
---
|
|
199
|
+
|
|
200
|
+
## 8. Synthesis: Seven Principles for Salience-Aware Agent Memory
|
|
201
|
+
|
|
202
|
+
### Principle 1: Salience is multi-dimensional
|
|
203
|
+
Compute from at least three independent dimensions: structural (position, explicitness), contextual (uniqueness, contrast), experiential (frequency, recency, personal relevance).
|
|
204
|
+
|
|
205
|
+
### Principle 2: Prediction error drives encoding priority
|
|
206
|
+
Information that surprises should get higher encoding weight. The current system gives equal weight to everything.
|
|
207
|
+
|
|
208
|
+
### Principle 3: Retrieval needs spreading activation
|
|
209
|
+
A graph with lateral connections enables spontaneous surfacing. Flat vector stores can only answer "what is similar to X?" not "what is related through causal, temporal, or structural connections?"
|
|
210
|
+
|
|
211
|
+
### Principle 4: Blocking is the enemy
|
|
212
|
+
Once the agent has a "good enough" summary, it stops encoding deeper representations. Redundancy should trigger comparison, not dismissal.
|
|
213
|
+
|
|
214
|
+
### Principle 5: Temporal decay with reactivation
|
|
215
|
+
Every memory should have activation that decays with time but increases with each access. Unused memories fall below threshold. Frequently accessed ones stay available.
|
|
216
|
+
|
|
217
|
+
### Principle 6: Salience contagion
|
|
218
|
+
Important events boost encoding of temporally adjacent information. Critical decisions elevate surrounding context.
|
|
219
|
+
|
|
220
|
+
### Principle 7: Consolidation needs a "sleep" phase
|
|
221
|
+
Periodic offline reprocessing that reactivates high-salience memories and lets low-salience ones decay. Compaction guided by salience, not just recency.
|
|
222
|
+
|
|
223
|
+
---
|
|
224
|
+
|
|
225
|
+
## Connection to "Attention Is All You Need"
|
|
226
|
+
|
|
227
|
+
The Transformer's attention mechanism IS salience computation within the context window. Every attention head computes "what stands out given this query." But it only works on tokens currently in context.
|
|
228
|
+
|
|
229
|
+
The memory problem is: **how do you extend attention beyond the context window?** All the architectures above (ACT-R, SYNAPSE, A-Mem) are different answers. They build the salience layer that Transformers can't do alone... deciding what deserves to enter the window before the model ever sees it.
|
|
230
|
+
|
|
231
|
+
Schmidt's Noticing Hypothesis maps here: you can't learn what you don't notice. Transformers can't attend to what isn't in context. Same constraint, different framing.
|
|
232
|
+
|
|
233
|
+
---
|
|
234
|
+
|
|
235
|
+
## All Sources
|
|
236
|
+
|
|
237
|
+
### Core Papers
|
|
238
|
+
- [Stand-out: Salience in SLA (EuroSLA)](https://euroslajournal.org/articles/10.22599/jesla.131)
|
|
239
|
+
- [SYNAPSE: Spreading Activation for LLM Agents (arXiv)](https://arxiv.org/abs/2601.02744)
|
|
240
|
+
- [ACT-R-Inspired Memory Architecture (ACM)](https://dl.acm.org/doi/10.1145/3765766.3765803)
|
|
241
|
+
- [A-Mem: Agentic Memory (arXiv)](https://arxiv.org/html/2502.12110v1)
|
|
242
|
+
- [Agent Cognitive Compressor (arXiv)](https://arxiv.org/html/2601.11653)
|
|
243
|
+
- [Salient experiences enhance mundane memories (Science Advances)](https://www.science.org/doi/10.1126/sciadv.ady1704)
|
|
244
|
+
|
|
245
|
+
### Salience Frameworks
|
|
246
|
+
- [Editorial: Perceptual Linguistic Salience (PMC)](https://pmc.ncbi.nlm.nih.gov/articles/PMC5360726/)
|
|
247
|
+
- [What is Salience? (De Gruyter)](https://www.degruyterbrill.com/document/doi/10.1515/opli-2020-0042/html)
|
|
248
|
+
- [Attention and Salience (Oxford Bibliographies)](https://www.oxfordbibliographies.com/display/document/obo-9780199772810/obo-9780199772810-0324.xml)
|
|
249
|
+
- [Salience Models: Computational Neuroscience Review (PMC)](https://pmc.ncbi.nlm.nih.gov/articles/PMC6969943/)
|
|
250
|
+
|
|
251
|
+
### Associative Learning
|
|
252
|
+
- [Blocking and Learned Attention (Nick Ellis)](https://sites.lsa.umich.edu/nickellis-new/wp-content/uploads/sites/1284/2021/07/pp400-ellis.pdf)
|
|
253
|
+
- [Selective Attention and Transfer in L2 (Oxford Academic)](https://academic.oup.com/applij/article-abstract/27/2/164/185787)
|
|
254
|
+
- [Rescorla-Wagner Model (Wikipedia)](https://en.wikipedia.org/wiki/Rescorla%E2%80%93Wagner_model)
|
|
255
|
+
|
|
256
|
+
### Memory and Retrieval
|
|
257
|
+
- [Contextually Mediated Spontaneous Retrieval (PMC)](https://pmc.ncbi.nlm.nih.gov/articles/PMC5789776/)
|
|
258
|
+
- [Survival of the salient: Emotion rescues memories (bioRxiv)](https://www.biorxiv.org/content/10.1101/2020.07.07.192252v1.full)
|
|
259
|
+
- [Neural dynamics of spontaneous memory recall (Nature Comms)](https://www.nature.com/articles/s41467-025-61807-w)
|
|
260
|
+
|
|
261
|
+
### Agent Memory Surveys
|
|
262
|
+
- [Memory in the Age of AI Agents: Paper List (GitHub)](https://github.com/Shichun-Liu/Agent-Memory-Paper-List)
|
|
263
|
+
- [Memory Mechanisms in LLM Agents (EmergentMind)](https://www.emergentmind.com/topics/memory-mechanisms-in-llm-based-agents)
|
|
264
|
+
- [ICLR 2026 Workshop: MemAgents (OpenReview)](https://openreview.net/pdf?id=U51WxL382H)
|
|
265
|
+
- [CAIM: Cognitive AI Memory Framework (arXiv)](https://arxiv.org/abs/2505.13044)
|
|
266
|
+
|
|
267
|
+
---
|
|
268
|
+
|
|
269
|
+
*Compiled 2026-02-20 by Claude Code. Research initiated by Parker's discovery of the EuroSLA salience framework.*
|
|
Binary file
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
# QR Code Pairing for Relay Key Sharing
|
|
2
|
+
|
|
3
|
+
**Date:** 2026-02-27
|
|
4
|
+
**Author:** cc-mini
|
|
5
|
+
**Status:** Spec (ready for review)
|
|
6
|
+
**Priority:** High (Grok flagged key sharing UX as top critique)
|
|
7
|
+
|
|
8
|
+
## Problem
|
|
9
|
+
|
|
10
|
+
Right now, sharing the Relay encryption key between devices requires:
|
|
11
|
+
1. Run `openssl rand -base64 32 > ~/.openclaw/secrets/crystal-relay-key`
|
|
12
|
+
2. Manually copy the file to every other device (AirDrop, 1Password, USB, etc.)
|
|
13
|
+
|
|
14
|
+
This works for Parker. It does not work for anyone else. Grok called it out: "For normies or big teams it'll feel clunky until you add something like QR codes or secure enclave flow."
|
|
15
|
+
|
|
16
|
+
## Solution
|
|
17
|
+
|
|
18
|
+
`crystal pair` ... a QR code pairing flow that transfers the encryption key between devices without touching a server.
|
|
19
|
+
|
|
20
|
+
## User Experience
|
|
21
|
+
|
|
22
|
+
### Device A (has the key, or generates one)
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
$ crystal pair
|
|
26
|
+
|
|
27
|
+
No relay key found. Generating one...
|
|
28
|
+
Key saved to ~/.openclaw/secrets/crystal-relay-key
|
|
29
|
+
|
|
30
|
+
Scan this QR code from your other device:
|
|
31
|
+
|
|
32
|
+
██████████████████████████████
|
|
33
|
+
██████████████████████████████
|
|
34
|
+
██ ██ ████ ██ ██ ██ █
|
|
35
|
+
██████████████████████████████
|
|
36
|
+
...
|
|
37
|
+
|
|
38
|
+
Waiting for scan... (expires in 5 minutes)
|
|
39
|
+
Press Ctrl+C to cancel.
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
If a key already exists:
|
|
43
|
+
```bash
|
|
44
|
+
$ crystal pair
|
|
45
|
+
|
|
46
|
+
Relay key found.
|
|
47
|
+
Scan this QR code from your other device:
|
|
48
|
+
|
|
49
|
+
[QR code]
|
|
50
|
+
|
|
51
|
+
Waiting for scan... (expires in 5 minutes)
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
### Device B (receiving the key)
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
$ crystal pair --scan
|
|
58
|
+
|
|
59
|
+
Point your camera at the QR code...
|
|
60
|
+
(or paste the pairing code manually)
|
|
61
|
+
|
|
62
|
+
> mc1:AaBbCc...XxYyZz
|
|
63
|
+
|
|
64
|
+
Key received and saved to ~/.openclaw/secrets/crystal-relay-key
|
|
65
|
+
Relay encryption is now active on this device.
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
### Alternative: no camera (SSH, headless, remote)
|
|
69
|
+
|
|
70
|
+
The QR code encodes a pairing string. The user can also just copy-paste it:
|
|
71
|
+
|
|
72
|
+
```bash
|
|
73
|
+
$ crystal pair
|
|
74
|
+
...
|
|
75
|
+
Or copy this pairing code:
|
|
76
|
+
mc1:T2hJbGxPZkRhcmtuZXNzTXlPbGRGcmllbmQ=
|
|
77
|
+
|
|
78
|
+
$ crystal pair --code mc1:T2hJbGxPZkRhcmtuZXNzTXlPbGRGcmllbmQ=
|
|
79
|
+
Key received and saved.
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
## Technical Design
|
|
83
|
+
|
|
84
|
+
### What the QR code contains
|
|
85
|
+
|
|
86
|
+
A pairing string with this format:
|
|
87
|
+
|
|
88
|
+
```
|
|
89
|
+
mc1:<base64-encoded-key>
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
- `mc1` ... protocol version prefix (Memory Crystal v1)
|
|
93
|
+
- The rest is the raw 32-byte encryption key, base64 encoded
|
|
94
|
+
|
|
95
|
+
That's it. No server. No handshake. No session tokens. The QR code IS the key. Scan it, save it, done.
|
|
96
|
+
|
|
97
|
+
### Why this is safe
|
|
98
|
+
|
|
99
|
+
1. **QR codes are physical proximity only.** You have to be in the same room to scan it. That's the security model. Same as AirDrop.
|
|
100
|
+
2. **The key never touches a network.** No HTTP call, no relay, no cloud. Camera to screen. Local.
|
|
101
|
+
3. **Terminal QR codes are ephemeral.** They exist on screen for a few minutes, then the terminal scrolls past them. No persistence.
|
|
102
|
+
4. **The pairing string is copy-pasteable** for headless/SSH scenarios. The user is responsible for secure transfer in that case (same as today).
|
|
103
|
+
|
|
104
|
+
### What we are NOT building
|
|
105
|
+
|
|
106
|
+
- **No TOTP/rotating codes.** The key is static. If you pair once, you're paired forever (until you rotate the key).
|
|
107
|
+
- **No relay-assisted pairing.** The whole point is the key never touches a server.
|
|
108
|
+
- **No Bluetooth/NFC.** Terminal-first. QR code covers 90% of cases. The manual code covers the rest.
|
|
109
|
+
- **No key rotation in this spec.** That's a separate feature (`crystal rotate`). This spec is about initial pairing only.
|
|
110
|
+
|
|
111
|
+
## Implementation
|
|
112
|
+
|
|
113
|
+
### New CLI commands
|
|
114
|
+
|
|
115
|
+
Add to `cli.ts`:
|
|
116
|
+
|
|
117
|
+
```
|
|
118
|
+
crystal pair Show QR code with current key (generate if none exists)
|
|
119
|
+
crystal pair --scan Open camera to scan QR code (macOS only, uses AVFoundation)
|
|
120
|
+
crystal pair --code <str> Accept a pairing code directly (no camera needed)
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
### Dependencies
|
|
124
|
+
|
|
125
|
+
- **QR generation:** `qrcode-terminal` (npm, MIT, zero deps, renders QR in terminal using Unicode block chars). Already well-established, 2M+ weekly downloads.
|
|
126
|
+
- **QR scanning (macOS):** Two options:
|
|
127
|
+
- **Option A:** `imagesnap` + `zbar` (brew install, captures frame from camera, decodes QR). Heavy.
|
|
128
|
+
- **Option B:** AppleScript/Swift bridge to AVFoundation camera. Native, no deps, macOS only.
|
|
129
|
+
- **Option C (recommended):** Skip camera scanning entirely for v1. Just support `--code` for manual paste. The showing side (QR display) is the important UX win. Scanning can come later or be done via phone camera (most people will photograph the terminal QR with their phone, then type the code on the other machine).
|
|
130
|
+
|
|
131
|
+
### Recommended v1 scope
|
|
132
|
+
|
|
133
|
+
1. `crystal pair` ... generate key if missing, display QR code + pairing string in terminal
|
|
134
|
+
2. `crystal pair --code <string>` ... accept pairing string, save key
|
|
135
|
+
3. Skip `--scan` for v1 (camera scanning is complex, cross-platform headache, and the manual code covers it)
|
|
136
|
+
|
|
137
|
+
### Files to change
|
|
138
|
+
|
|
139
|
+
| File | Change |
|
|
140
|
+
|------|--------|
|
|
141
|
+
| `src/cli.ts` | Add `pair` subcommand with QR display and `--code` flag |
|
|
142
|
+
| `src/crypto.ts` | Add `generateRelayKey()` function (wraps `randomBytes(32)`) |
|
|
143
|
+
| `src/pair.ts` (new) | Pairing logic: encode/decode pairing string, QR generation, key save |
|
|
144
|
+
| `package.json` | Add `qrcode-terminal` dependency |
|
|
145
|
+
| `RELAY.md` | Update key sharing section with `crystal pair` instructions |
|
|
146
|
+
| `SKILL.md` | Add pairing instructions to the relay setup flow |
|
|
147
|
+
|
|
148
|
+
### Pairing string format
|
|
149
|
+
|
|
150
|
+
```
|
|
151
|
+
mc1:<base64(32-byte-key)>
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
Validation:
|
|
155
|
+
- Must start with `mc1:`
|
|
156
|
+
- Base64 portion must decode to exactly 32 bytes
|
|
157
|
+
- Reject anything else with a clear error
|
|
158
|
+
|
|
159
|
+
Future versions could use `mc2:` for a different key format or include metadata (device name, timestamp). But v1 is just the key.
|
|
160
|
+
|
|
161
|
+
### Key generation function
|
|
162
|
+
|
|
163
|
+
```typescript
|
|
164
|
+
// src/crypto.ts
|
|
165
|
+
export function generateRelayKey(): Buffer {
|
|
166
|
+
return randomBytes(32);
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
export function encodeRelayPairingString(key: Buffer): string {
|
|
170
|
+
if (key.length !== 32) throw new Error('Key must be 32 bytes');
|
|
171
|
+
return `mc1:${key.toString('base64')}`;
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
export function decodeRelayPairingString(str: string): Buffer {
|
|
175
|
+
if (!str.startsWith('mc1:')) throw new Error('Invalid pairing string (expected mc1: prefix)');
|
|
176
|
+
const key = Buffer.from(str.slice(4), 'base64');
|
|
177
|
+
if (key.length !== 32) throw new Error('Invalid key length (expected 32 bytes)');
|
|
178
|
+
return key;
|
|
179
|
+
}
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
## Open Questions
|
|
183
|
+
|
|
184
|
+
1. **Should `crystal pair` also configure the relay URL and auth token?** Right now those are separate env vars (`CRYSTAL_RELAY_URL`, `CRYSTAL_RELAY_TOKEN`). The pairing flow could prompt for these too, making `crystal pair` a full "connect to relay" wizard. Or keep it key-only and add `crystal relay setup` separately.
|
|
185
|
+
|
|
186
|
+
2. **Key rotation.** If you pair a third device later, it gets the current key. But what if you want to rotate the key and re-pair everything? That's `crystal rotate` territory. Not in this spec, but worth noting.
|
|
187
|
+
|
|
188
|
+
3. **Enterprise key provisioning.** For teams, QR pairing is per-device. Enterprise might want a different flow (admin provisions keys via 1Password, vault, or MDM). That's the enterprise spec, not this one.
|
|
189
|
+
|
|
190
|
+
## Timeline
|
|
191
|
+
|
|
192
|
+
This is small. Estimated scope:
|
|
193
|
+
- `pair.ts` + CLI wiring: ~100 lines
|
|
194
|
+
- `crypto.ts` additions: ~20 lines
|
|
195
|
+
- Docs updates: ~30 lines
|
|
196
|
+
- Testing: manual (pair between Mac Mini and MacBook Air)
|
|
197
|
+
|
|
198
|
+
## References
|
|
199
|
+
|
|
200
|
+
- Grok feedback: `ai/notes/2026-02-27--cc-mini--grok-feedback.md`
|
|
201
|
+
- Lēsa feedback: `ai/notes/2026-02-27--cc-mini--lesa-feedback.md`
|
|
202
|
+
- Current crypto: `src/crypto.ts`
|
|
203
|
+
- Current relay docs: `RELAY.md`, `TECHNICAL.md`
|