superlocalmemory 2.6.0 → 2.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CHANGELOG.md +167 -1803
  2. package/README.md +212 -397
  3. package/bin/slm +179 -3
  4. package/bin/superlocalmemoryv2:learning +4 -0
  5. package/bin/superlocalmemoryv2:patterns +4 -0
  6. package/docs/ACCESSIBILITY.md +291 -0
  7. package/docs/ARCHITECTURE.md +12 -6
  8. package/docs/FRAMEWORK-INTEGRATIONS.md +300 -0
  9. package/docs/MCP-MANUAL-SETUP.md +14 -4
  10. package/install.sh +99 -3
  11. package/mcp_server.py +291 -1
  12. package/package.json +2 -1
  13. package/requirements-learning.txt +12 -0
  14. package/scripts/verify-v27.sh +233 -0
  15. package/skills/slm-show-patterns/SKILL.md +224 -0
  16. package/src/learning/__init__.py +201 -0
  17. package/src/learning/adaptive_ranker.py +826 -0
  18. package/src/learning/cross_project_aggregator.py +866 -0
  19. package/src/learning/engagement_tracker.py +638 -0
  20. package/src/learning/feature_extractor.py +461 -0
  21. package/src/learning/feedback_collector.py +690 -0
  22. package/src/learning/learning_db.py +842 -0
  23. package/src/learning/project_context_manager.py +582 -0
  24. package/src/learning/source_quality_scorer.py +685 -0
  25. package/src/learning/synthetic_bootstrap.py +1047 -0
  26. package/src/learning/tests/__init__.py +0 -0
  27. package/src/learning/tests/test_adaptive_ranker.py +328 -0
  28. package/src/learning/tests/test_aggregator.py +309 -0
  29. package/src/learning/tests/test_feedback_collector.py +295 -0
  30. package/src/learning/tests/test_learning_db.py +606 -0
  31. package/src/learning/tests/test_project_context.py +296 -0
  32. package/src/learning/tests/test_source_quality.py +355 -0
  33. package/src/learning/tests/test_synthetic_bootstrap.py +433 -0
  34. package/src/learning/tests/test_workflow_miner.py +322 -0
  35. package/src/learning/workflow_pattern_miner.py +665 -0
  36. package/ui/index.html +346 -13
  37. package/ui/js/clusters.js +90 -1
  38. package/ui/js/graph-core.js +445 -0
  39. package/ui/js/graph-cytoscape-monolithic-backup.js +1168 -0
  40. package/ui/js/graph-cytoscape.js +1168 -0
  41. package/ui/js/graph-d3-backup.js +32 -0
  42. package/ui/js/graph-filters.js +220 -0
  43. package/ui/js/graph-interactions.js +354 -0
  44. package/ui/js/graph-ui.js +214 -0
  45. package/ui/js/memories.js +52 -0
  46. package/ui/js/modal.js +104 -1
@@ -0,0 +1,582 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ SuperLocalMemory V2 - Project Context Manager (v2.7)
4
+ Copyright (c) 2026 Varun Pratap Bhardwaj
5
+ Licensed under MIT License
6
+
7
+ Repository: https://github.com/varun369/SuperLocalMemoryV2
8
+ Author: Varun Pratap Bhardwaj (Solution Architect)
9
+
10
+ NOTICE: This software is protected by MIT License.
11
+ Attribution must be preserved in all copies or derivatives.
12
+ """
13
+
14
+ """
15
+ ProjectContextManager — Layer 2: Multi-signal project detection.
16
+
17
+ Detects the current active project using 4 weighted signals, not just
18
+ the explicit project_name tag. This improves recall by boosting memories
19
+ from the currently active project context.
20
+
21
+ Signal architecture:
22
+ 1. project_tag (weight 3) — Explicit project_name field in memories
23
+ 2. project_path (weight 2) — File path analysis (extract project dir)
24
+ 3. active_profile (weight 1) — Profile name (weak signal)
25
+ 4. content_cluster (weight 1) — Cluster co-occurrence in recent memories
26
+
27
+ Winner-take-all with 40% threshold: the candidate project must accumulate
28
+ more than 40% of the total weighted signal to be declared the current
29
+ project. If no candidate clears the threshold, returns None (ambiguous).
30
+
31
+ Design principles:
32
+ - Reads memory.db in READ-ONLY mode (never writes to memory.db)
33
+ - Handles missing columns gracefully (older DBs lack project_name)
34
+ - Thread-safe: each method opens/closes its own connection
35
+ - Zero external dependencies (pure stdlib)
36
+ """
37
+
38
+ import json
39
+ import logging
40
+ import sqlite3
41
+ import threading
42
+ from collections import Counter
43
+ from pathlib import Path
44
+ from typing import Optional, List, Dict, Any
45
+
46
+ logger = logging.getLogger("superlocalmemory.learning.project_context")
47
+
48
+ MEMORY_DIR = Path.home() / ".claude-memory"
49
+ MEMORY_DB_PATH = MEMORY_DIR / "memory.db"
50
+ PROFILES_JSON = MEMORY_DIR / "profiles.json"
51
+
52
+ # Directories commonly found as parents of project roots.
53
+ # Used by _extract_project_from_path to identify where the
54
+ # project directory begins in a file path.
55
+ _PROJECT_PARENT_DIRS = frozenset({
56
+ "projects", "repos", "repositories", "workspace", "workspaces",
57
+ "code", "development", "github", "gitlab",
58
+ "bitbucket", "Documents", "sites", "apps", "services",
59
+ "AGENTIC_Official", # Varun's workspace convention
60
+ })
61
+
62
+ # Directories that are NOT project names (too generic / too deep).
63
+ _SKIP_DIRS = frozenset({
64
+ "src", "lib", "bin", "node_modules", "venv", ".venv", "env",
65
+ ".git", "__pycache__", "dist", "build", "target", "out",
66
+ ".cache", ".config", "tmp", "temp", "logs", "test", "tests",
67
+ "vendor", "packages", "deps",
68
+ })
69
+
70
+
71
+ class ProjectContextManager:
72
+ """
73
+ Detects the currently active project using multi-signal analysis.
74
+
75
+ Usage:
76
+ pcm = ProjectContextManager()
77
+ project = pcm.detect_current_project()
78
+ if project:
79
+ boost = pcm.get_project_boost(memory, project)
80
+
81
+ Thread-safe: safe to call from multiple agents / MCP handlers.
82
+ """
83
+
84
+ SIGNAL_WEIGHTS: Dict[str, int] = {
85
+ 'project_tag': 3, # Explicit project_name field
86
+ 'project_path': 2, # File path analysis
87
+ 'active_profile': 1, # Profile name (weak signal)
88
+ 'content_cluster': 1, # Cluster co-occurrence
89
+ }
90
+
91
+ def __init__(self, memory_db_path: Optional[Path] = None):
92
+ """
93
+ Initialize ProjectContextManager.
94
+
95
+ Args:
96
+ memory_db_path: Path to memory.db. Defaults to
97
+ ~/.claude-memory/memory.db. Opened read-only.
98
+ """
99
+ self._memory_db_path = Path(memory_db_path) if memory_db_path else MEMORY_DB_PATH
100
+ self._lock = threading.Lock()
101
+ # Cache available columns to avoid repeated PRAGMA calls
102
+ self._available_columns: Optional[set] = None
103
+ logger.info(
104
+ "ProjectContextManager initialized: db=%s",
105
+ self._memory_db_path,
106
+ )
107
+
108
+ # ------------------------------------------------------------------
109
+ # Public API
110
+ # ------------------------------------------------------------------
111
+
112
+ def detect_current_project(
113
+ self,
114
+ recent_memories: Optional[List[Dict[str, Any]]] = None,
115
+ ) -> Optional[str]:
116
+ """
117
+ Detect the currently active project from recent memory activity.
118
+
119
+ Applies 4 weighted signals. The winner must accumulate >40% of the
120
+ total weighted signal to be declared current. Returns None when
121
+ ambiguous or insufficient data.
122
+
123
+ Args:
124
+ recent_memories: Pre-fetched list of memory dicts.
125
+ If None, the last 20 memories are fetched from memory.db.
126
+
127
+ Returns:
128
+ Project name string or None if undetermined.
129
+ """
130
+ if recent_memories is None:
131
+ recent_memories = self._get_recent_memories(limit=20)
132
+
133
+ if not recent_memories:
134
+ logger.debug("No recent memories — cannot detect project")
135
+ return None
136
+
137
+ # Accumulate weighted votes per candidate project
138
+ votes: Counter = Counter()
139
+
140
+ # --- Signal 1: project_tag (weight 3) ---
141
+ for mem in recent_memories:
142
+ pname = self._safe_get(mem, 'project_name')
143
+ if pname:
144
+ votes[pname] += self.SIGNAL_WEIGHTS['project_tag']
145
+
146
+ # --- Signal 2: project_path (weight 2) ---
147
+ for mem in recent_memories:
148
+ ppath = self._safe_get(mem, 'project_path')
149
+ if ppath:
150
+ extracted = self._extract_project_from_path(ppath)
151
+ if extracted:
152
+ votes[extracted] += self.SIGNAL_WEIGHTS['project_path']
153
+
154
+ # --- Signal 3: active_profile (weight 1) ---
155
+ # Profile is a weak signal: only contributes if it matches a
156
+ # project name that already has some votes.
157
+ active_profile = self._get_active_profile()
158
+ if active_profile and active_profile != 'default':
159
+ # If profile name coincides with an existing candidate, boost it.
160
+ # If not, add it as a weak standalone candidate.
161
+ votes[active_profile] += self.SIGNAL_WEIGHTS['active_profile']
162
+
163
+ # --- Signal 4: content_cluster (weight 1) ---
164
+ cluster_project = self._match_content_to_clusters(recent_memories)
165
+ if cluster_project:
166
+ votes[cluster_project] += self.SIGNAL_WEIGHTS['content_cluster']
167
+
168
+ if not votes:
169
+ logger.debug("No project signals detected in recent memories")
170
+ return None
171
+
172
+ # Winner-take-all with 40% threshold
173
+ total_weight = sum(votes.values())
174
+ winner, winner_weight = votes.most_common(1)[0]
175
+ winner_ratio = winner_weight / total_weight if total_weight > 0 else 0.0
176
+
177
+ if winner_ratio > 0.4:
178
+ logger.debug(
179
+ "Project detected: '%s' (%.0f%% of signal, %d total weight)",
180
+ winner, winner_ratio * 100, total_weight,
181
+ )
182
+ return winner
183
+
184
+ logger.debug(
185
+ "No clear project winner: top='%s' at %.0f%% (threshold 40%%)",
186
+ winner, winner_ratio * 100,
187
+ )
188
+ return None
189
+
190
+ def get_project_boost(
191
+ self,
192
+ memory: Dict[str, Any],
193
+ current_project: Optional[str] = None,
194
+ ) -> float:
195
+ """
196
+ Return a boost factor for ranking based on project match.
197
+
198
+ Args:
199
+ memory: A memory dict with at least 'project_name' or
200
+ 'project_path' fields.
201
+ current_project: The detected current project (from
202
+ detect_current_project). If None, returns neutral.
203
+
204
+ Returns:
205
+ 1.0 — memory matches current project (boost)
206
+ 0.6 — project unknown or memory has no project info (neutral)
207
+ 0.3 — memory belongs to a different project (penalty)
208
+ """
209
+ if current_project is None:
210
+ return 0.6 # Unknown project context — neutral
211
+
212
+ # Check explicit project_name
213
+ mem_project = self._safe_get(memory, 'project_name')
214
+ if mem_project:
215
+ if mem_project.lower() == current_project.lower():
216
+ return 1.0
217
+ return 0.3 # Definite mismatch
218
+
219
+ # Check project_path
220
+ mem_path = self._safe_get(memory, 'project_path')
221
+ if mem_path:
222
+ extracted = self._extract_project_from_path(mem_path)
223
+ if extracted:
224
+ if extracted.lower() == current_project.lower():
225
+ return 1.0
226
+ return 0.3 # Definite mismatch
227
+
228
+ # Memory has no project info — neutral
229
+ return 0.6
230
+
231
+ # ------------------------------------------------------------------
232
+ # Data fetching (memory.db — read-only)
233
+ # ------------------------------------------------------------------
234
+
235
+ def _get_recent_memories(self, limit: int = 20) -> List[Dict[str, Any]]:
236
+ """
237
+ Fetch the most recent memories from memory.db.
238
+
239
+ Returns a list of dicts with available columns. Handles missing
240
+ columns gracefully (older databases may lack project_name, etc.).
241
+ """
242
+ if not self._memory_db_path.exists():
243
+ logger.debug("memory.db not found at %s", self._memory_db_path)
244
+ return []
245
+
246
+ available = self._get_available_columns()
247
+
248
+ # Build SELECT with only available columns
249
+ desired_cols = [
250
+ 'id', 'project_name', 'project_path', 'profile',
251
+ 'content', 'cluster_id', 'created_at',
252
+ ]
253
+ select_cols = [c for c in desired_cols if c in available]
254
+
255
+ if not select_cols:
256
+ logger.warning("memories table has none of the expected columns")
257
+ return []
258
+
259
+ # Always need at least 'id' — if missing, bail
260
+ if 'id' not in available:
261
+ return []
262
+
263
+ col_list = ", ".join(select_cols)
264
+
265
+ # Build ORDER BY using best available timestamp
266
+ order_col = 'created_at' if 'created_at' in available else 'id'
267
+
268
+ try:
269
+ conn = self._open_memory_db()
270
+ try:
271
+ cursor = conn.cursor()
272
+ cursor.execute(
273
+ f"SELECT {col_list} FROM memories "
274
+ f"ORDER BY {order_col} DESC LIMIT ?",
275
+ (limit,),
276
+ )
277
+ rows = cursor.fetchall()
278
+ # Convert to list of dicts
279
+ result = []
280
+ for row in rows:
281
+ d = {}
282
+ for i, col in enumerate(select_cols):
283
+ d[col] = row[i]
284
+ result.append(d)
285
+ return result
286
+ finally:
287
+ conn.close()
288
+ except sqlite3.Error as e:
289
+ logger.warning("Failed to read recent memories: %s", e)
290
+ return []
291
+
292
+ def _get_available_columns(self) -> set:
293
+ """
294
+ Get the set of column names in the memories table.
295
+
296
+ Cached after first call to avoid repeated PRAGMA queries.
297
+ """
298
+ if self._available_columns is not None:
299
+ return self._available_columns
300
+
301
+ if not self._memory_db_path.exists():
302
+ return set()
303
+
304
+ try:
305
+ conn = self._open_memory_db()
306
+ try:
307
+ cursor = conn.cursor()
308
+ cursor.execute("PRAGMA table_info(memories)")
309
+ cols = {row[1] for row in cursor.fetchall()}
310
+ self._available_columns = cols
311
+ return cols
312
+ finally:
313
+ conn.close()
314
+ except sqlite3.Error as e:
315
+ logger.warning("Failed to read table schema: %s", e)
316
+ return set()
317
+
318
+ def _open_memory_db(self) -> sqlite3.Connection:
319
+ """
320
+ Open a read-only connection to memory.db.
321
+
322
+ Uses uri=True with mode=ro to enforce read-only access.
323
+ Falls back to regular connection if URI mode fails
324
+ (some older Python builds do not support it).
325
+ """
326
+ db_str = str(self._memory_db_path)
327
+ try:
328
+ # Prefer URI-based read-only mode
329
+ uri = f"file:{db_str}?mode=ro"
330
+ conn = sqlite3.connect(uri, uri=True, timeout=5)
331
+ except (sqlite3.OperationalError, sqlite3.NotSupportedError):
332
+ # Fallback: regular connection (still only reads)
333
+ conn = sqlite3.connect(db_str, timeout=5)
334
+ conn.execute("PRAGMA busy_timeout=3000")
335
+ return conn
336
+
337
+ # ------------------------------------------------------------------
338
+ # Signal extraction helpers
339
+ # ------------------------------------------------------------------
340
+
341
+ @staticmethod
342
+ def _extract_project_from_path(path: str) -> Optional[str]:
343
+ """
344
+ Extract a project name from a file path.
345
+
346
+ Strategy:
347
+ 1. Walk path parts looking for a directory that follows a
348
+ known parent directory (projects/, repos/, Documents/, etc.).
349
+ 2. If found, the directory immediately after the parent is the
350
+ project name.
351
+ 3. Fallback: use the last non-skip directory component.
352
+
353
+ Examples:
354
+ /Users/x/projects/MY_PROJECT/src/main.py -> "MY_PROJECT"
355
+ /home/x/repos/my-app/lib/util.js -> "my-app"
356
+ /workspace/services/auth-service/index.ts -> "auth-service"
357
+
358
+ Returns:
359
+ Project name string or None if extraction fails.
360
+ """
361
+ if not path:
362
+ return None
363
+
364
+ try:
365
+ parts = Path(path).parts
366
+ except (ValueError, TypeError):
367
+ return None
368
+
369
+ if len(parts) < 2:
370
+ return None
371
+
372
+ # Strategy 1: find part after a known parent directory.
373
+ # Skip consecutive parent dirs (e.g., workspace/services/ both
374
+ # are parent dirs, so the project is the NEXT non-parent part).
375
+ for i, part in enumerate(parts):
376
+ if part in _PROJECT_PARENT_DIRS:
377
+ # Walk forward past any chained parent dirs
378
+ j = i + 1
379
+ while j < len(parts) and parts[j] in _PROJECT_PARENT_DIRS:
380
+ j += 1
381
+ if j < len(parts):
382
+ candidate = parts[j]
383
+ if (
384
+ candidate
385
+ and candidate not in _SKIP_DIRS
386
+ and not candidate.startswith('.')
387
+ ):
388
+ return candidate
389
+
390
+ # Strategy 2: walk backwards to find last meaningful directory
391
+ # Skip leaf (likely a filename) and known non-project dirs
392
+ for part in reversed(parts[:-1]): # exclude the last component (filename)
393
+ if (
394
+ part
395
+ and part not in _SKIP_DIRS
396
+ and part not in _PROJECT_PARENT_DIRS
397
+ and not part.startswith('.')
398
+ and not part.startswith('/')
399
+ and len(part) > 1
400
+ ):
401
+ return part
402
+
403
+ return None
404
+
405
+ @staticmethod
406
+ def _get_active_profile() -> Optional[str]:
407
+ """
408
+ Read the active profile name from profiles.json.
409
+
410
+ Returns:
411
+ Profile name string (e.g., "work", "personal") or None.
412
+ """
413
+ if not PROFILES_JSON.exists():
414
+ return None
415
+
416
+ try:
417
+ with open(PROFILES_JSON, 'r') as f:
418
+ config = json.load(f)
419
+ return config.get('active_profile', 'default')
420
+ except (json.JSONDecodeError, OSError, KeyError) as e:
421
+ logger.debug("Failed to read profiles.json: %s", e)
422
+ return None
423
+
424
+ def _match_content_to_clusters(
425
+ self,
426
+ recent_memories: List[Dict[str, Any]],
427
+ ) -> Optional[str]:
428
+ """
429
+ Check if recent memories converge on a single cluster.
430
+
431
+ If the most recent 10 memories share a dominant cluster_id, look
432
+ up that cluster's name in graph_clusters and cross-reference with
433
+ the most common project_name within that cluster.
434
+
435
+ Returns:
436
+ A project name inferred from cluster dominance, or None.
437
+ """
438
+ # Collect cluster_ids from the most recent 10 memories
439
+ cluster_ids = []
440
+ for mem in recent_memories[:10]:
441
+ cid = self._safe_get(mem, 'cluster_id')
442
+ if cid is not None:
443
+ cluster_ids.append(cid)
444
+
445
+ if not cluster_ids:
446
+ return None
447
+
448
+ # Find dominant cluster
449
+ cluster_counts = Counter(cluster_ids)
450
+ dominant_id, dominant_count = cluster_counts.most_common(1)[0]
451
+
452
+ # Require at least 40% dominance (at least 4 out of 10)
453
+ if dominant_count < max(2, len(cluster_ids) * 0.4):
454
+ return None
455
+
456
+ # Look up the dominant project_name within that cluster
457
+ return self._get_cluster_dominant_project(dominant_id)
458
+
459
+ def _get_cluster_dominant_project(self, cluster_id: int) -> Optional[str]:
460
+ """
461
+ Find the most common project_name among memories in a given cluster.
462
+
463
+ Falls back to the cluster name from graph_clusters if no explicit
464
+ project_name is found.
465
+ """
466
+ if not self._memory_db_path.exists():
467
+ return None
468
+
469
+ available = self._get_available_columns()
470
+
471
+ try:
472
+ conn = self._open_memory_db()
473
+ try:
474
+ cursor = conn.cursor()
475
+
476
+ # Try to find the most common project_name in this cluster
477
+ if 'project_name' in available and 'cluster_id' in available:
478
+ cursor.execute(
479
+ "SELECT project_name, COUNT(*) as cnt "
480
+ "FROM memories "
481
+ "WHERE cluster_id = ? AND project_name IS NOT NULL "
482
+ "AND project_name != '' "
483
+ "GROUP BY project_name "
484
+ "ORDER BY cnt DESC LIMIT 1",
485
+ (cluster_id,),
486
+ )
487
+ row = cursor.fetchone()
488
+ if row and row[0]:
489
+ return row[0]
490
+
491
+ # Fallback: use the cluster name from graph_clusters
492
+ try:
493
+ cursor.execute(
494
+ "SELECT name FROM graph_clusters WHERE id = ?",
495
+ (cluster_id,),
496
+ )
497
+ row = cursor.fetchone()
498
+ if row and row[0]:
499
+ return row[0]
500
+ except sqlite3.OperationalError:
501
+ # graph_clusters table may not exist
502
+ pass
503
+
504
+ return None
505
+ finally:
506
+ conn.close()
507
+ except sqlite3.Error as e:
508
+ logger.debug(
509
+ "Failed to query cluster %d project: %s", cluster_id, e
510
+ )
511
+ return None
512
+
513
+ # ------------------------------------------------------------------
514
+ # Utilities
515
+ # ------------------------------------------------------------------
516
+
517
+ @staticmethod
518
+ def _safe_get(d: Dict[str, Any], key: str) -> Any:
519
+ """
520
+ Safely get a value from a dict, returning None for missing keys
521
+ or empty/whitespace-only strings.
522
+ """
523
+ val = d.get(key)
524
+ if val is None:
525
+ return None
526
+ if isinstance(val, str) and not val.strip():
527
+ return None
528
+ return val
529
+
530
+ def invalidate_cache(self):
531
+ """
532
+ Clear the cached column set.
533
+
534
+ Call this if the memory.db schema may have changed at runtime
535
+ (e.g., after a migration adds new columns).
536
+ """
537
+ self._available_columns = None
538
+
539
+
540
+ # ======================================================================
541
+ # Standalone testing
542
+ # ======================================================================
543
+
544
+ if __name__ == "__main__":
545
+ logging.basicConfig(
546
+ level=logging.DEBUG,
547
+ format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
548
+ )
549
+
550
+ pcm = ProjectContextManager()
551
+
552
+ # Test path extraction
553
+ test_paths = [
554
+ "/Users/varun/projects/SuperLocalMemoryV2/src/main.py",
555
+ "/home/dev/repos/my-app/lib/util.js",
556
+ "/workspace/services/auth-service/index.ts",
557
+ "/Users/varun/Documents/AGENTIC_Official/SuperLocalMemoryV2-repo/src/learning/foo.py",
558
+ "",
559
+ None,
560
+ ]
561
+ print("=== Path Extraction Tests ===")
562
+ for p in test_paths:
563
+ result = ProjectContextManager._extract_project_from_path(p)
564
+ print(f" {p!r:60s} -> {result!r}")
565
+
566
+ # Test full detection
567
+ print("\n=== Project Detection ===")
568
+ project = pcm.detect_current_project()
569
+ print(f" Detected project: {project!r}")
570
+
571
+ # Test boost
572
+ print("\n=== Boost Tests ===")
573
+ if project:
574
+ test_mem_match = {'project_name': project}
575
+ test_mem_miss = {'project_name': 'other-project'}
576
+ test_mem_none = {'content': 'no project info'}
577
+ print(f" Match boost: {pcm.get_project_boost(test_mem_match, project)}")
578
+ print(f" Mismatch boost: {pcm.get_project_boost(test_mem_miss, project)}")
579
+ print(f" Unknown boost: {pcm.get_project_boost(test_mem_none, project)}")
580
+ else:
581
+ print(" No project detected — all boosts return 0.6 (neutral)")
582
+ print(f" Neutral boost: {pcm.get_project_boost({}, None)}")