synaptic-state 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. synaptic_state-0.1.0/LICENSE +21 -0
  2. synaptic_state-0.1.0/PKG-INFO +140 -0
  3. synaptic_state-0.1.0/README.md +108 -0
  4. synaptic_state-0.1.0/pyproject.toml +65 -0
  5. synaptic_state-0.1.0/setup.cfg +4 -0
  6. synaptic_state-0.1.0/src/synaptic_state/__init__.py +28 -0
  7. synaptic_state-0.1.0/src/synaptic_state/adaptive/__init__.py +4 -0
  8. synaptic_state-0.1.0/src/synaptic_state/adaptive/budget.py +186 -0
  9. synaptic_state-0.1.0/src/synaptic_state/adaptive/forgetting.py +225 -0
  10. synaptic_state-0.1.0/src/synaptic_state/cli/__init__.py +1 -0
  11. synaptic_state-0.1.0/src/synaptic_state/cli/main.py +104 -0
  12. synaptic_state-0.1.0/src/synaptic_state/core/__init__.py +6 -0
  13. synaptic_state-0.1.0/src/synaptic_state/core/agm.py +393 -0
  14. synaptic_state-0.1.0/src/synaptic_state/core/models.py +237 -0
  15. synaptic_state-0.1.0/src/synaptic_state/core/stateplane.py +537 -0
  16. synaptic_state-0.1.0/src/synaptic_state/core/verification.py +292 -0
  17. synaptic_state-0.1.0/src/synaptic_state/integrations/__init__.py +1 -0
  18. synaptic_state-0.1.0/src/synaptic_state/integrations/langgraph.py +235 -0
  19. synaptic_state-0.1.0/src/synaptic_state/mcp_server/__init__.py +1 -0
  20. synaptic_state-0.1.0/src/synaptic_state/mcp_server/server.py +172 -0
  21. synaptic_state-0.1.0/src/synaptic_state/retrieval/__init__.py +3 -0
  22. synaptic_state-0.1.0/src/synaptic_state/retrieval/hybrid.py +233 -0
  23. synaptic_state-0.1.0/src/synaptic_state/storage/__init__.py +10 -0
  24. synaptic_state-0.1.0/src/synaptic_state/storage/backend.py +17 -0
  25. synaptic_state-0.1.0/src/synaptic_state/storage/memory.py +20 -0
  26. synaptic_state-0.1.0/src/synaptic_state/storage/postgres.py +230 -0
  27. synaptic_state-0.1.0/src/synaptic_state.egg-info/PKG-INFO +140 -0
  28. synaptic_state-0.1.0/src/synaptic_state.egg-info/SOURCES.txt +31 -0
  29. synaptic_state-0.1.0/src/synaptic_state.egg-info/dependency_links.txt +1 -0
  30. synaptic_state-0.1.0/src/synaptic_state.egg-info/entry_points.txt +2 -0
  31. synaptic_state-0.1.0/src/synaptic_state.egg-info/requires.txt +13 -0
  32. synaptic_state-0.1.0/src/synaptic_state.egg-info/top_level.txt +1 -0
  33. synaptic_state-0.1.0/tests/test_core.py +135 -0
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Atakan Elik
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,140 @@
1
+ Metadata-Version: 2.4
2
+ Name: synaptic-state
3
+ Version: 0.1.0
4
+ Summary: Verifiable State Plane for Autonomous Agents
5
+ Author: SynapticAI Team
6
+ License: MIT
7
+ Keywords: ai-agent-memory,context-management,belief-revision,state-plane,agm,mcp-server,verification,context-budget,neuro-symbolic
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3.9
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
+ Requires-Python: >=3.9
18
+ Description-Content-Type: text/markdown
19
+ License-File: LICENSE
20
+ Requires-Dist: mcp>=1.0.0
21
+ Requires-Dist: pydantic>=2.0
22
+ Provides-Extra: dev
23
+ Requires-Dist: pytest>=7.0; extra == "dev"
24
+ Requires-Dist: pytest-cov>=4.0; extra == "dev"
25
+ Requires-Dist: pytest-asyncio>=0.21; extra == "dev"
26
+ Requires-Dist: mypy>=1.0; extra == "dev"
27
+ Requires-Dist: ruff>=0.1; extra == "dev"
28
+ Provides-Extra: server
29
+ Requires-Dist: fastapi>=0.110; extra == "server"
30
+ Requires-Dist: uvicorn>=0.27; extra == "server"
31
+ Dynamic: license-file
32
+
33
+ # 🧠⚡ SynapticAI
34
+
35
+ **Semantic Multi-Layer Memory System for AI Agents**
36
+
37
+ SynapticAI gives AI agents a unified memory architecture with semantic search, cross-layer bridging, and master index patterns — so your agent can actually *remember* and *connect the dots*.
38
+
39
+ ## The Problem
40
+
41
+ AI agents typically have 4-5 separate memory systems:
42
+ - Short-term injected memory (system prompt)
43
+ - Vector database entries (semantic search)
44
+ - Procedural skills (installation guides, workflows)
45
+ - Session transcripts (conversation history)
46
+ - Config files (tokens, env vars, auth)
47
+
48
+ **None of them talk to each other.** You have to query each one separately.
49
+
50
+ ## The Solution
51
+
52
+ SynapticAI connects them all with:
53
+
54
+ 1. **Master Index Pattern** — One authoritative entry per topic
55
+ 2. **Semantic Search First** — `fabric_recall` finds relevant info across all layers
56
+ 3. **Cross-Layer References** — Entries link to each other (`tags`, `session_id`, source)
57
+ 4. **Search Flow Priority** — Single query → ranked results from all layers
58
+
59
+ ## Architecture
60
+
61
+ ```
62
+ ┌─────────────────────────────────────────────┐
63
+ │ fabric_recall "query" │
64
+ │ (semantic search — first stop) │
65
+ ├─────────────┬───────────┬─────────┬─────────┤
66
+ │ Memory │ Fabric │ Session │ Skills │
67
+ │ (prefs) │ (notes, │ (history│ (procs) │
68
+ │ injected │ decisions│ + LLM │ + guide │
69
+ │ per-turn) │ + outcomes) │ sum) │ lines │
70
+ └─────────────┴───────────┴─────────┴─────────┘
71
+ │ │ │
72
+ └───────────┼───────────┘
73
+
74
+ Cross-Layer Index
75
+ (tags, session_id, refs)
76
+ ```
77
+
78
+ ## Search Flow
79
+
80
+ ```
81
+ 1. fabric_recall "query" → semantic search (priority 1)
82
+ 2. fabric_search "keyword" → exact keyword match
83
+ 3. session_search "query" → historical conversations
84
+ 4. skills → procedural how-tos
85
+ 5. memory → user preferences (always injected)
86
+ ```
87
+
88
+ ## Master Index Structure
89
+
90
+ Each master index is a single `fabric_write` entry tagged `master-index`:
91
+
92
+ ```markdown
93
+ # Master Index — Topic Name
94
+
95
+ ## Current Status
96
+ ## Components
97
+ ## Auth & Config
98
+ ## Related Skills
99
+ ## Decisions Made
100
+ ## Cross-References → other master entries
101
+ ```
102
+
103
+ ## Installation
104
+
105
+ ```bash
106
+ # For Hermes Agent users:
107
+ # 1. Clone this repo
108
+ # 2. Run the setup script
109
+ ./scripts/setup.sh
110
+
111
+ # The script creates:
112
+ # - Master index templates in ~/fabric/
113
+ # - unified-search skill in ~/.hermes/skills/
114
+ # - Memory bridge entries
115
+ ```
116
+
117
+ ## Usage
118
+
119
+ Once installed, any agent can retrieve cross-layer memory with:
120
+
121
+ ```
122
+ fabric_recall "any topic" → ranked semantic results
123
+ fabric_remember "decision" → log important decisions
124
+ fabric_link "entry_id" → cross-reference entries
125
+ ```
126
+
127
+ ## Why It Works
128
+
129
+ - **Lightweight** — No new infrastructure, just patterns on existing tools
130
+ - **Agent-native** — Designed for AI agent memory, not human note-taking
131
+ - **Extensible** — Works with fabric, session_search, skills, any config store
132
+ - **Search-first** — Single query replaces 5 separate lookups
133
+
134
+ ## License
135
+
136
+ MIT
137
+
138
+ ## Author
139
+
140
+ Atakan Elik (@atakanelik34)
@@ -0,0 +1,108 @@
1
+ # 🧠⚡ SynapticAI
2
+
3
+ **Semantic Multi-Layer Memory System for AI Agents**
4
+
5
+ SynapticAI gives AI agents a unified memory architecture with semantic search, cross-layer bridging, and master index patterns — so your agent can actually *remember* and *connect the dots*.
6
+
7
+ ## The Problem
8
+
9
+ AI agents typically have 4-5 separate memory systems:
10
+ - Short-term injected memory (system prompt)
11
+ - Vector database entries (semantic search)
12
+ - Procedural skills (installation guides, workflows)
13
+ - Session transcripts (conversation history)
14
+ - Config files (tokens, env vars, auth)
15
+
16
+ **None of them talk to each other.** You have to query each one separately.
17
+
18
+ ## The Solution
19
+
20
+ SynapticAI connects them all with:
21
+
22
+ 1. **Master Index Pattern** — One authoritative entry per topic
23
+ 2. **Semantic Search First** — `fabric_recall` finds relevant info across all layers
24
+ 3. **Cross-Layer References** — Entries link to each other (`tags`, `session_id`, source)
25
+ 4. **Search Flow Priority** — Single query → ranked results from all layers
26
+
27
+ ## Architecture
28
+
29
+ ```
30
+ ┌─────────────────────────────────────────────┐
31
+ │ fabric_recall "query" │
32
+ │ (semantic search — first stop) │
33
+ ├─────────────┬───────────┬─────────┬─────────┤
34
+ │ Memory │ Fabric │ Session │ Skills │
35
+ │ (prefs) │ (notes, │ (history│ (procs) │
36
+ │ injected │ decisions│ + LLM │ + guide │
37
+ │ per-turn) │ + outcomes) │ sum) │ lines │
38
+ └─────────────┴───────────┴─────────┴─────────┘
39
+ │ │ │
40
+ └───────────┼───────────┘
41
+
42
+ Cross-Layer Index
43
+ (tags, session_id, refs)
44
+ ```
45
+
46
+ ## Search Flow
47
+
48
+ ```
49
+ 1. fabric_recall "query" → semantic search (priority 1)
50
+ 2. fabric_search "keyword" → exact keyword match
51
+ 3. session_search "query" → historical conversations
52
+ 4. skills → procedural how-tos
53
+ 5. memory → user preferences (always injected)
54
+ ```
55
+
56
+ ## Master Index Structure
57
+
58
+ Each master index is a single `fabric_write` entry tagged `master-index`:
59
+
60
+ ```markdown
61
+ # Master Index — Topic Name
62
+
63
+ ## Current Status
64
+ ## Components
65
+ ## Auth & Config
66
+ ## Related Skills
67
+ ## Decisions Made
68
+ ## Cross-References → other master entries
69
+ ```
70
+
71
+ ## Installation
72
+
73
+ ```bash
74
+ # For Hermes Agent users:
75
+ # 1. Clone this repo
76
+ # 2. Run the setup script
77
+ ./scripts/setup.sh
78
+
79
+ # The script creates:
80
+ # - Master index templates in ~/fabric/
81
+ # - unified-search skill in ~/.hermes/skills/
82
+ # - Memory bridge entries
83
+ ```
84
+
85
+ ## Usage
86
+
87
+ Once installed, any agent can retrieve cross-layer memory with:
88
+
89
+ ```
90
+ fabric_recall "any topic" → ranked semantic results
91
+ fabric_remember "decision" → log important decisions
92
+ fabric_link "entry_id" → cross-reference entries
93
+ ```
94
+
95
+ ## Why It Works
96
+
97
+ - **Lightweight** — No new infrastructure, just patterns on existing tools
98
+ - **Agent-native** — Designed for AI agent memory, not human note-taking
99
+ - **Extensible** — Works with fabric, session_search, skills, any config store
100
+ - **Search-first** — Single query replaces 5 separate lookups
101
+
102
+ ## License
103
+
104
+ MIT
105
+
106
+ ## Author
107
+
108
+ Atakan Elik (@atakanelik34)
@@ -0,0 +1,65 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "synaptic-state"
7
+ version = "0.1.0"
8
+ description = "Verifiable State Plane for Autonomous Agents"
9
+ readme = "README.md"
10
+ license = {text = "MIT"}
11
+ authors = [{name = "SynapticAI Team"}]
12
+ requires-python = ">=3.9"
13
+ keywords = [
14
+ "ai-agent-memory",
15
+ "context-management",
16
+ "belief-revision",
17
+ "state-plane",
18
+ "agm",
19
+ "mcp-server",
20
+ "verification",
21
+ "context-budget",
22
+ "neuro-symbolic",
23
+ ]
24
+ classifiers = [
25
+ "Development Status :: 3 - Alpha",
26
+ "Intended Audience :: Developers",
27
+ "License :: OSI Approved :: MIT License",
28
+ "Programming Language :: Python :: 3",
29
+ "Programming Language :: Python :: 3.9",
30
+ "Programming Language :: Python :: 3.10",
31
+ "Programming Language :: Python :: 3.11",
32
+ "Programming Language :: Python :: 3.12",
33
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
34
+ ]
35
+
36
+ dependencies = [
37
+ "mcp>=1.0.0",
38
+ "pydantic>=2.0",
39
+ ]
40
+
41
+ [project.optional-dependencies]
42
+ dev = [
43
+ "pytest>=7.0",
44
+ "pytest-cov>=4.0",
45
+ "pytest-asyncio>=0.21",
46
+ "mypy>=1.0",
47
+ "ruff>=0.1",
48
+ ]
49
+ server = [
50
+ "fastapi>=0.110",
51
+ "uvicorn>=0.27",
52
+ ]
53
+
54
+ [project.scripts]
55
+ synaptic = "synaptic_state.cli.main:main"
56
+
57
+ [tool.setuptools.packages.find]
58
+ where = ["src"]
59
+
60
+ [tool.pytest.ini_options]
61
+ testpaths = ["tests"]
62
+
63
+ [tool.ruff]
64
+ target-version = "py39"
65
+ line-length = 100
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,28 @@
1
+ """
2
+ SynapticAI: Verifiable State Plane for Autonomous Agents
3
+
4
+ Remember less. Stay correct longer.
5
+ Neuro-Symbolic Memory That Learns What to Forget.
6
+ """
7
+
8
+ __version__ = "0.1.0"
9
+ __author__ = "SynapticAI Team"
10
+ __license__ = "MIT"
11
+
12
+ from synaptic_state.core.stateplane import StatePlane
13
+ from synaptic_state.core.agm import AGMEngine, BeliefState
14
+ from synaptic_state.core.verification import VerificationGate, VerificationResult
15
+ from synaptic_state.adaptive.forgetting import LearnableForgetting
16
+ from synaptic_state.retrieval.hybrid import HybridRetriever
17
+ from synaptic_state.adaptive.budget import ContextBudgetRL
18
+
19
+ __all__ = [
20
+ "StatePlane",
21
+ "AGMEngine",
22
+ "BeliefState",
23
+ "VerificationGate",
24
+ "VerificationResult",
25
+ "LearnableForgetting",
26
+ "HybridRetriever",
27
+ "ContextBudgetRL",
28
+ ]
@@ -0,0 +1,4 @@
1
+ """Adaptive layer: Budget Optimization and Learnable Forgetting."""
2
+ from synaptic_state.adaptive.budget import ContextBudgetRL
3
+ from synaptic_state.adaptive.forgetting import LearnableForgetting
4
+ __all__ = ["ContextBudgetRL", "LearnableForgetting"]
@@ -0,0 +1,186 @@
1
+ """
2
+ ContextBudget RL: Adaptive Context Budget Allocation
3
+
4
+ Based on ContextBudget paper (arXiv:2604.01664):
5
+ - Formulates context management as a sequential decision problem
6
+ - Learns optimal compression strategies under varying budgets
7
+
8
+ For MVP: Heuristic-based allocation (Phase 2: actual RL)
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import time
14
+ import logging
15
+ from typing import Any, Dict, Optional
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class ContextBudgetRL:
21
+ """
22
+ Adaptive context budget optimizer.
23
+
24
+ Allocates token budget across memory types based on:
25
+ 1. Task type (coding, analysis, conversation, etc.)
26
+ 2. History of what was most useful
27
+ 3. Current context constraints
28
+
29
+ MVP: Rule-based allocation with learning hooks.
30
+ Phase 2: Actual RL-trained policy network.
31
+ """
32
+
33
+ # Default allocation percentages by strategy
34
+ ALLOCATION_PROFILES = {
35
+ "greedy": {
36
+ "episodic": 0.4,
37
+ "procedural": 0.2,
38
+ "semantic": 0.25,
39
+ "symbolic": 0.15,
40
+ },
41
+ "rl_optimized": {
42
+ "episodic": 0.3,
43
+ "procedural": 0.3,
44
+ "semantic": 0.25,
45
+ "symbolic": 0.15,
46
+ },
47
+ "symbolic_heavy": {
48
+ "episodic": 0.15,
49
+ "procedural": 0.2,
50
+ "semantic": 0.3,
51
+ "symbolic": 0.35,
52
+ },
53
+ "conversation": {
54
+ "episodic": 0.5,
55
+ "procedural": 0.1,
56
+ "semantic": 0.3,
57
+ "symbolic": 0.1,
58
+ },
59
+ }
60
+
61
+ def __init__(
62
+ self,
63
+ default_budget: int = 2048,
64
+ max_budget: int = 8192,
65
+ learning_rate: float = 0.01,
66
+ ):
67
+ self.default_budget = default_budget
68
+ self.max_budget = max_budget
69
+ self.learning_rate = learning_rate
70
+
71
+ # Learning state (for Phase 2 RL)
72
+ self._task_history: list = []
73
+ self._allocation_history: list = []
74
+ self._feedback_scores: list = [] # 0-1 scores post-task
75
+
76
+ def allocate_budget(
77
+ self,
78
+ requested_budget: Optional[int] = None,
79
+ strategy: str = "rl_optimized",
80
+ task_type: str = "generic",
81
+ available_types: Optional[Dict[str, int]] = None,
82
+ ) -> Dict[str, int]:
83
+ """
84
+ Allocate token budget across memory types.
85
+
86
+ Returns: {"episodic": N, "procedural": N, "semantic": N, "symbolic": N}
87
+ """
88
+ budget = min(
89
+ requested_budget or self.default_budget,
90
+ self.max_budget,
91
+ )
92
+
93
+ profile = self.ALLOCATION_PROFILES.get(strategy, self.ALLOCATION_PROFILES["rl_optimized"])
94
+
95
+ # Apply task-type adjustments
96
+ adjusted = self._adjust_for_task_type(profile, task_type)
97
+
98
+ # Normalize
99
+ total = sum(adjusted.values())
100
+ if total > 0:
101
+ allocation = {
102
+ k: int(budget * v / total)
103
+ for k, v in adjusted.items()
104
+ }
105
+ else:
106
+ allocation = {k: budget // len(adjusted) for k in adjusted}
107
+
108
+ # Log for learning
109
+ self._allocation_history.append({
110
+ "timestamp": time.time(),
111
+ "budget": budget,
112
+ "strategy": strategy,
113
+ "task_type": task_type,
114
+ "allocation": allocation,
115
+ })
116
+
117
+ # Apply available constraints
118
+ if available_types:
119
+ for mem_type, available_count in available_types.items():
120
+ if mem_type in allocation and available_count == 0:
121
+ allocation[mem_type] = 0
122
+
123
+ return allocation
124
+
125
+ def record_feedback(self, task_type: str, score: float, allocation: Dict[str, int]) -> None:
126
+ """
127
+ Record task feedback for future learning.
128
+
129
+ score: 0.0 (bad allocation) to 1.0 (perfect allocation)
130
+ """
131
+ self._feedback_scores.append({
132
+ "timestamp": time.time(),
133
+ "task_type": task_type,
134
+ "score": score,
135
+ "allocation": allocation,
136
+ })
137
+
138
+ def _adjust_for_task_type(self, profile: Dict[str, float], task_type: str) -> Dict[str, float]:
139
+ """Adjust allocation ratios based on task type."""
140
+ adjusted = dict(profile)
141
+
142
+ adjustments = {
143
+ "coding": {"procedural": 0.15, "symbolic": 0.1, "episodic": -0.1, "semantic": -0.05},
144
+ "debug": {"episodic": 0.15, "procedural": 0.1, "symbolic": 0.0, "semantic": 0.0},
145
+ "analysis": {"semantic": 0.15, "episodic": 0.05, "procedural": -0.05, "symbolic": -0.05},
146
+ "conversation": {"episodic": 0.2, "semantic": 0.1, "procedural": -0.15, "symbolic": -0.05},
147
+ }
148
+
149
+ task_adjust = adjustments.get(task_type, {})
150
+ for mem_type, delta in task_adjust.items():
151
+ if mem_type in adjusted:
152
+ adjusted[mem_type] = max(0.05, adjusted[mem_type] + delta)
153
+
154
+ return adjusted
155
+
156
+ def get_optimal_strategy(self, task_type: str = "generic") -> str:
157
+ """
158
+ Suggest optimal strategy based on historical feedback.
159
+
160
+ For Phase 2: this will use learned RL policy.
161
+ For MVP: simple heuristic.
162
+ """
163
+ if not self._feedback_scores:
164
+ return "rl_optimized"
165
+
166
+ # Find highest-scoring strategy for this task type
167
+ task_feedback = [f for f in self._feedback_scores if f["task_type"] == task_type]
168
+ if not task_feedback:
169
+ return "rl_optimized"
170
+
171
+ # Simple averaging
172
+ avg_scores: Dict[str, float] = {}
173
+ for fb in task_feedback:
174
+ strategy = fb.get("allocation", {}).get("_strategy", "rl_optimized")
175
+ if strategy not in avg_scores:
176
+ avg_scores[strategy] = []
177
+ avg_scores[strategy].append(fb["score"])
178
+
179
+ if not avg_scores:
180
+ return "rl_optimized"
181
+
182
+ best_strategy = max(
183
+ avg_scores.items(),
184
+ key=lambda x: sum(x[1]) / len(x[1])
185
+ )
186
+ return best_strategy[0]