synaptic-state 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,28 @@
1
+ """
2
+ SynapticAI: Verifiable State Plane for Autonomous Agents
3
+
4
+ Remember less. Stay correct longer.
5
+ Neuro-Symbolic Memory That Learns What to Forget.
6
+ """
7
+
8
+ __version__ = "0.1.0"
9
+ __author__ = "SynapticAI Team"
10
+ __license__ = "MIT"
11
+
12
+ from synaptic_state.core.stateplane import StatePlane
13
+ from synaptic_state.core.agm import AGMEngine, BeliefState
14
+ from synaptic_state.core.verification import VerificationGate, VerificationResult
15
+ from synaptic_state.adaptive.forgetting import LearnableForgetting
16
+ from synaptic_state.retrieval.hybrid import HybridRetriever
17
+ from synaptic_state.adaptive.budget import ContextBudgetRL
18
+
19
+ __all__ = [
20
+ "StatePlane",
21
+ "AGMEngine",
22
+ "BeliefState",
23
+ "VerificationGate",
24
+ "VerificationResult",
25
+ "LearnableForgetting",
26
+ "HybridRetriever",
27
+ "ContextBudgetRL",
28
+ ]
@@ -0,0 +1,4 @@
1
+ """Adaptive layer: Budget Optimization and Learnable Forgetting."""
2
+ from synaptic_state.adaptive.budget import ContextBudgetRL
3
+ from synaptic_state.adaptive.forgetting import LearnableForgetting
4
+ __all__ = ["ContextBudgetRL", "LearnableForgetting"]
@@ -0,0 +1,186 @@
1
+ """
2
+ ContextBudget RL: Adaptive Context Budget Allocation
3
+
4
+ Based on ContextBudget paper (arXiv:2604.01664):
5
+ - Formulates context management as a sequential decision problem
6
+ - Learns optimal compression strategies under varying budgets
7
+
8
+ For MVP: Heuristic-based allocation (Phase 2: actual RL)
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import time
14
+ import logging
15
+ from typing import Any, Dict, Optional
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class ContextBudgetRL:
21
+ """
22
+ Adaptive context budget optimizer.
23
+
24
+ Allocates token budget across memory types based on:
25
+ 1. Task type (coding, analysis, conversation, etc.)
26
+ 2. History of what was most useful
27
+ 3. Current context constraints
28
+
29
+ MVP: Rule-based allocation with learning hooks.
30
+ Phase 2: Actual RL-trained policy network.
31
+ """
32
+
33
+ # Default allocation percentages by strategy
34
+ ALLOCATION_PROFILES = {
35
+ "greedy": {
36
+ "episodic": 0.4,
37
+ "procedural": 0.2,
38
+ "semantic": 0.25,
39
+ "symbolic": 0.15,
40
+ },
41
+ "rl_optimized": {
42
+ "episodic": 0.3,
43
+ "procedural": 0.3,
44
+ "semantic": 0.25,
45
+ "symbolic": 0.15,
46
+ },
47
+ "symbolic_heavy": {
48
+ "episodic": 0.15,
49
+ "procedural": 0.2,
50
+ "semantic": 0.3,
51
+ "symbolic": 0.35,
52
+ },
53
+ "conversation": {
54
+ "episodic": 0.5,
55
+ "procedural": 0.1,
56
+ "semantic": 0.3,
57
+ "symbolic": 0.1,
58
+ },
59
+ }
60
+
61
+ def __init__(
62
+ self,
63
+ default_budget: int = 2048,
64
+ max_budget: int = 8192,
65
+ learning_rate: float = 0.01,
66
+ ):
67
+ self.default_budget = default_budget
68
+ self.max_budget = max_budget
69
+ self.learning_rate = learning_rate
70
+
71
+ # Learning state (for Phase 2 RL)
72
+ self._task_history: list = []
73
+ self._allocation_history: list = []
74
+ self._feedback_scores: list = [] # 0-1 scores post-task
75
+
76
+ def allocate_budget(
77
+ self,
78
+ requested_budget: Optional[int] = None,
79
+ strategy: str = "rl_optimized",
80
+ task_type: str = "generic",
81
+ available_types: Optional[Dict[str, int]] = None,
82
+ ) -> Dict[str, int]:
83
+ """
84
+ Allocate token budget across memory types.
85
+
86
+ Returns: {"episodic": N, "procedural": N, "semantic": N, "symbolic": N}
87
+ """
88
+ budget = min(
89
+ requested_budget or self.default_budget,
90
+ self.max_budget,
91
+ )
92
+
93
+ profile = self.ALLOCATION_PROFILES.get(strategy, self.ALLOCATION_PROFILES["rl_optimized"])
94
+
95
+ # Apply task-type adjustments
96
+ adjusted = self._adjust_for_task_type(profile, task_type)
97
+
98
+ # Normalize
99
+ total = sum(adjusted.values())
100
+ if total > 0:
101
+ allocation = {
102
+ k: int(budget * v / total)
103
+ for k, v in adjusted.items()
104
+ }
105
+ else:
106
+ allocation = {k: budget // len(adjusted) for k in adjusted}
107
+
108
+ # Log for learning
109
+ self._allocation_history.append({
110
+ "timestamp": time.time(),
111
+ "budget": budget,
112
+ "strategy": strategy,
113
+ "task_type": task_type,
114
+ "allocation": allocation,
115
+ })
116
+
117
+ # Apply available constraints
118
+ if available_types:
119
+ for mem_type, available_count in available_types.items():
120
+ if mem_type in allocation and available_count == 0:
121
+ allocation[mem_type] = 0
122
+
123
+ return allocation
124
+
125
+ def record_feedback(self, task_type: str, score: float, allocation: Dict[str, int]) -> None:
126
+ """
127
+ Record task feedback for future learning.
128
+
129
+ score: 0.0 (bad allocation) to 1.0 (perfect allocation)
130
+ """
131
+ self._feedback_scores.append({
132
+ "timestamp": time.time(),
133
+ "task_type": task_type,
134
+ "score": score,
135
+ "allocation": allocation,
136
+ })
137
+
138
+ def _adjust_for_task_type(self, profile: Dict[str, float], task_type: str) -> Dict[str, float]:
139
+ """Adjust allocation ratios based on task type."""
140
+ adjusted = dict(profile)
141
+
142
+ adjustments = {
143
+ "coding": {"procedural": 0.15, "symbolic": 0.1, "episodic": -0.1, "semantic": -0.05},
144
+ "debug": {"episodic": 0.15, "procedural": 0.1, "symbolic": 0.0, "semantic": 0.0},
145
+ "analysis": {"semantic": 0.15, "episodic": 0.05, "procedural": -0.05, "symbolic": -0.05},
146
+ "conversation": {"episodic": 0.2, "semantic": 0.1, "procedural": -0.15, "symbolic": -0.05},
147
+ }
148
+
149
+ task_adjust = adjustments.get(task_type, {})
150
+ for mem_type, delta in task_adjust.items():
151
+ if mem_type in adjusted:
152
+ adjusted[mem_type] = max(0.05, adjusted[mem_type] + delta)
153
+
154
+ return adjusted
155
+
156
+ def get_optimal_strategy(self, task_type: str = "generic") -> str:
157
+ """
158
+ Suggest optimal strategy based on historical feedback.
159
+
160
+ For Phase 2: this will use learned RL policy.
161
+ For MVP: simple heuristic.
162
+ """
163
+ if not self._feedback_scores:
164
+ return "rl_optimized"
165
+
166
+ # Find highest-scoring strategy for this task type
167
+ task_feedback = [f for f in self._feedback_scores if f["task_type"] == task_type]
168
+ if not task_feedback:
169
+ return "rl_optimized"
170
+
171
+ # Simple averaging
172
+ avg_scores: Dict[str, float] = {}
173
+ for fb in task_feedback:
174
+ strategy = fb.get("allocation", {}).get("_strategy", "rl_optimized")
175
+ if strategy not in avg_scores:
176
+ avg_scores[strategy] = []
177
+ avg_scores[strategy].append(fb["score"])
178
+
179
+ if not avg_scores:
180
+ return "rl_optimized"
181
+
182
+ best_strategy = max(
183
+ avg_scores.items(),
184
+ key=lambda x: sum(x[1]) / len(x[1])
185
+ )
186
+ return best_strategy[0]
@@ -0,0 +1,225 @@
1
+ """
2
+ Learnable Forgetting Engine
3
+
4
+ Based on Oblivion (arXiv:2604.00131):
5
+ - Models biological forgetting curves
6
+ - Importance-based retention vs static TTL
7
+ - Explainable: "Why was this forgotten?"
8
+
9
+ Instead of expiring memories by time, this engine:
10
+ 1. Adjusts decay rates based on importance and access patterns
11
+ 2. Can strengthen frequently accessed memories
12
+ 3. Provides explanations for forgetting decisions
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ import time
18
+ import math
19
+ import logging
20
+ from typing import Any, Dict, List, Optional
21
+ from dataclasses import dataclass
22
+
23
+ from synaptic_state.core.models import MemoryEntry
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ @dataclass
29
+ class DecayConfig:
30
+ """Configuration for decay behavior."""
31
+ # Base decay rates per memory type
32
+ episodic_decay: float = 0.02 # Experiences fade faster
33
+ procedural_decay: float = 0.005 # Skills persist
34
+ semantic_decay: float = 0.01 # Facts fade slowly
35
+ symbolic_decay: float = 0.003 # Rules persist longest
36
+
37
+ # Access-based reinforcement
38
+ access_boost_per_use: float = 0.05
39
+ max_access_boost: float = 0.3
40
+
41
+ # Verification-based retention
42
+ verified_retention_bonus: float = 0.1
43
+
44
+ # Ebbinghaus forgetting curve parameters
45
+ ebbinghaus_S: float = 100 # Initial memory strength (%)
46
+ ebbinghaus_k: float = 1.25 # Forgetting constant
47
+
48
+
49
+ class LearnableForgetting:
50
+ """
51
+ Learnable decay and forgetting engine.
52
+
53
+ Models how memories fade over time, with:
54
+ - Type-specific decay rates
55
+ - Access-based reinforcement (use it or lose it)
56
+ - Verification-based retention
57
+ - Ebbinghaus forgetting curve
58
+ - Explainable decisions
59
+ """
60
+
61
+ def __init__(
62
+ self,
63
+ default_decay_rate: float = 0.01,
64
+ config: Optional[DecayConfig] = None,
65
+ enabled: bool = True,
66
+ ):
67
+ self.default_decay_rate = default_decay_rate
68
+ self.config = config or DecayConfig()
69
+ self.enabled = enabled
70
+ self.decay_history: Dict[str, List[Dict[str, Any]]] = {}
71
+ self._forget_count = 0
72
+
73
+ def on_commit(self, entry: MemoryEntry) -> None:
74
+ """Record when a memory is committed."""
75
+ self.decay_history[entry.key] = [{
76
+ "event": "commit",
77
+ "timestamp": time.time(),
78
+ "strength": entry.current_strength,
79
+ "importance": entry.importance,
80
+ "decay_rate": entry.decay_rate,
81
+ }]
82
+
83
+ def on_forget(self, entry: MemoryEntry, reason: str = "expired") -> None:
84
+ """Record when a memory is forgotten."""
85
+ if entry.key not in self.decay_history:
86
+ self.decay_history[entry.key] = []
87
+
88
+ self.decay_history[entry.key].append({
89
+ "event": "forget",
90
+ "timestamp": time.time(),
91
+ "reason": reason,
92
+ "final_strength": entry.current_strength,
93
+ })
94
+ self._forget_count += 1
95
+
96
+ def on_access(self, entry: MemoryEntry) -> float:
97
+ """
98
+ Record memory access and apply reinforcement.
99
+
100
+ Returns the new strength after access boost.
101
+ """
102
+ access_count = entry.access_count
103
+ entry.access_count += 1
104
+ entry.last_accessed = time.time()
105
+
106
+ # Apply access boost (diminishing returns)
107
+ boost = self.config.access_boost_per_use / (1 + access_count * 0.1)
108
+ boost = min(boost, self.config.max_access_boost -
109
+ sum(h["event"] == "boost" for h in self.decay_history.get(entry.key, [])) * 0.05)
110
+
111
+ new_strength = min(1.0, entry.current_strength + boost)
112
+ entry.current_strength = new_strength
113
+
114
+ if entry.key not in self.decay_history:
115
+ self.decay_history[entry.key] = []
116
+ self.decay_history[entry.key].append({
117
+ "event": "access_boost",
118
+ "timestamp": time.time(),
119
+ "strength_before": new_strength - boost,
120
+ "strength_after": new_strength,
121
+ "boost": boost,
122
+ "access_count": entry.access_count,
123
+ })
124
+
125
+ return new_strength
126
+
127
+ def apply_decay_to_entry(self, entry: MemoryEntry) -> None:
128
+ """Apply time-based decay to a single entry."""
129
+ if not self.enabled:
130
+ return
131
+
132
+ now = time.time()
133
+ last_update = entry.updated_at
134
+ elapsed_hours = max(0, (now - last_update) / 3600.0)
135
+
136
+ if elapsed_hours < 0.01: # Less than 36 seconds
137
+ return
138
+
139
+ # Get type-specific decay rate
140
+ type_decay = {
141
+ "episodic": self.config.episodic_decay,
142
+ "procedural": self.config.procedural_decay,
143
+ "semantic": self.config.semantic_decay,
144
+ "symbolic": self.config.symbolic_decay,
145
+ }.get(entry.memory_type.value if hasattr(entry.memory_type, 'value') else str(entry.memory_type),
146
+ self.default_decay_rate)
147
+
148
+ # Apply Ebbinghaus curve
149
+ ebbinghaus_strength = (
150
+ self.config.ebbinghaus_S *
151
+ math.exp(-self.config.ebbinghaus_k * elapsed_hours)
152
+ )
153
+
154
+ # Exponential decay
155
+ base_decay = type_decay * elapsed_hours
156
+
157
+ # Verification bonus
158
+ if entry.is_verified:
159
+ base_decay *= (1 - self.config.verified_retention_bonus)
160
+
161
+ entry.decay_rate = type_decay
162
+ entry.apply_decay(elapsed_hours)
163
+
164
+ def apply_global_decay(self) -> None:
165
+ """
166
+ Apply decay to all tracked entries.
167
+
168
+ This should be called periodically (e.g., at the start of each recall).
169
+ """
170
+ if not self.enabled:
171
+ return
172
+
173
+ # Decay is applied per-entry during commit/recall in StatePlane
174
+ pass
175
+
176
+ def generate_explanation(self, key: str) -> str:
177
+ """
178
+ Explain why a memory was forgotten or has low strength.
179
+
180
+ Returns a human-readable explanation.
181
+ """
182
+ history = self.decay_history.get(key, [])
183
+
184
+ if not history:
185
+ return f"No history found for '{key}'."
186
+
187
+ explanations = []
188
+
189
+ for event in history:
190
+ if event["event"] == "commit":
191
+ explanations.append(
192
+ f"Committed with importance {event['importance']:.2f} "
193
+ f"and initial strength {event['strength']:.2f}."
194
+ )
195
+ elif event["event"] == "forget":
196
+ explanations.append(
197
+ f"Forgotten because: {event['reason']}. "
198
+ f"Final strength: {event.get('final_strength', 'N/A')}."
199
+ )
200
+ elif event["event"] == "access_boost":
201
+ explanations.append(
202
+ f"Accessed {event['access_count']} times, "
203
+ f"strength boosted to {event['strength_after']:.2f}."
204
+ )
205
+
206
+ return " ".join(explanations)
207
+
208
+ def get_decay_stats(self) -> Dict[str, Any]:
209
+ """Get overall forgetting statistics."""
210
+ total_committed = sum(
211
+ 1 for history in self.decay_history.values()
212
+ if any(e["event"] == "commit" for e in history)
213
+ )
214
+ total_forgotten = sum(
215
+ 1 for history in self.decay_history.values()
216
+ if any(e["event"] == "forget" for e in history)
217
+ )
218
+
219
+ return {
220
+ "enabled": self.enabled,
221
+ "tracked_keys": len(self.decay_history),
222
+ "total_committed": total_committed,
223
+ "total_forgotten": total_forgotten,
224
+ "forget_rate": total_forgotten / max(total_committed, 1),
225
+ }
@@ -0,0 +1 @@
1
+ """CLI for SynapticAI."""
@@ -0,0 +1,104 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ SynapticAI CLI
4
+
5
+ Usage:
6
+ synaptic commit --key USER_PREF --value '{"tabs": 2}'
7
+ synaptic recall --intent "editor settings"
8
+ synaptic forget --key USER_PREF
9
+ synaptic explain --key USER_PREF
10
+ synaptic status
11
+ """
12
+
13
+ import argparse
14
+ import json
15
+ import sys
16
+
17
+ from synaptic_state.core.stateplane import StatePlane
18
+ import logging
19
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s: %(message)s")
20
+
21
+ def main():
22
+ parser = argparse.ArgumentParser(description="SynapticAI CLI")
23
+ sub = parser.add_subparsers(dest="command", help="Available commands")
24
+
25
+ # Commit
26
+ p_commit = sub.add_parser("commit", help="Commit a memory entry")
27
+ p_commit.add_argument("--key", required=True, help="Memory key")
28
+ p_commit.add_argument("--value", required=True, help="Memory value (JSON or string)")
29
+ p_commit.add_argument("--type", default="episodic", choices=["episodic", "procedural", "semantic", "symbolic"])
30
+ p_commit.add_argument("--verify", action="store_true", default=True)
31
+ p_commit.add_argument("--ttl", type=float, default=None)
32
+ p_commit.add_argument("--source", default="cli")
33
+ p_commit.add_argument("--session", default="")
34
+
35
+ # Recall
36
+ p_recall = sub.add_parser("recall", help="Recall memories")
37
+ p_recall.add_argument("--intent", default="", help="What you're looking for")
38
+ p_recall.add_argument("--budget", type=int, default=2048)
39
+ p_recall.add_argument("--strategy", default="rl_optimized")
40
+ p_recall.add_argument("--min-confidence", type=float, default=0.3)
41
+
42
+ # Forget
43
+ p_forget = sub.add_parser("forget", help="Remove a memory entry")
44
+ p_forget.add_argument("--key", required=True)
45
+
46
+ # Explain
47
+ p_explain = sub.add_parser("explain", help="Explain why a memory was forgotten")
48
+ p_explain.add_argument("--key", required=True)
49
+
50
+ # Status
51
+ sub.add_parser("status", help="Show operational statistics")
52
+
53
+ args = parser.parse_args()
54
+ plane = StatePlane()
55
+
56
+ if args.command == "commit":
57
+ result = plane.commit(
58
+ key=args.key,
59
+ value=json.loads(args.value) if args.value.startswith(('{', '[')) else args.value,
60
+ memory_type=args.type,
61
+ verify=args.verify,
62
+ ttl=args.ttl,
63
+ source=args.source,
64
+ session_id=args.session,
65
+ )
66
+ print(json.dumps(result, indent=2))
67
+
68
+ elif args.command == "recall":
69
+ result = plane.recall(
70
+ intent=args.intent,
71
+ budget_tokens=args.budget,
72
+ strategy=args.strategy,
73
+ min_confidence=args.min_confidence,
74
+ )
75
+ output = {
76
+ "memories": [{"key": m.key, "type": m.memory_type.value, "strength": m.current_strength} for m in result.memories],
77
+ "total_tokens": result.total_tokens_estimated,
78
+ "budget_used": f"{result.total_tokens_estimated}/{result.budget_requested}",
79
+ "stale_count": result.stale_count,
80
+ "conflict_count": result.conflict_count,
81
+ }
82
+ print(json.dumps(output, indent=2))
83
+
84
+ elif args.command == "forget":
85
+ entry = plane.forget(args.key)
86
+ if entry:
87
+ print(f"Forgotten: {args.key}")
88
+ else:
89
+ print(f"Key not found: {args.key}")
90
+
91
+ elif args.command == "explain":
92
+ explanation = plane.explain_forgetting(args.key)
93
+ print(json.dumps(explanation, indent=2))
94
+
95
+ elif args.command == "status":
96
+ stats = plane.get_stats()
97
+ drift = plane.get_drift_report()
98
+ print(json.dumps({"stats": stats, "drift": drift}, indent=2))
99
+
100
+ else:
101
+ parser.print_help()
102
+
103
+ if __name__ == "__main__":
104
+ main()
@@ -0,0 +1,6 @@
1
+ """Core components: StatePlane, AGM Belief Revision, Verification Gate."""
2
+ from synaptic_state.core.stateplane import StatePlane
3
+ from synaptic_state.core.agm import AGMEngine, BeliefState
4
+ from synaptic_state.core.verification import VerificationGate, VerificationResult
5
+
6
+ __all__ = ["StatePlane", "AGMEngine", "BeliefState", "VerificationGate", "VerificationResult"]