spatial-memory-mcp 1.0.3__py3-none-any.whl → 1.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of spatial-memory-mcp might be problematic. Click here for more details.
- spatial_memory/__init__.py +97 -97
- spatial_memory/__main__.py +241 -2
- spatial_memory/adapters/lancedb_repository.py +74 -5
- spatial_memory/config.py +115 -2
- spatial_memory/core/__init__.py +35 -0
- spatial_memory/core/cache.py +317 -0
- spatial_memory/core/circuit_breaker.py +297 -0
- spatial_memory/core/connection_pool.py +41 -3
- spatial_memory/core/consolidation_strategies.py +402 -0
- spatial_memory/core/database.py +791 -769
- spatial_memory/core/db_idempotency.py +242 -0
- spatial_memory/core/db_indexes.py +575 -0
- spatial_memory/core/db_migrations.py +584 -0
- spatial_memory/core/db_search.py +509 -0
- spatial_memory/core/db_versioning.py +177 -0
- spatial_memory/core/embeddings.py +156 -19
- spatial_memory/core/errors.py +75 -3
- spatial_memory/core/filesystem.py +178 -0
- spatial_memory/core/logging.py +194 -103
- spatial_memory/core/models.py +4 -0
- spatial_memory/core/rate_limiter.py +326 -105
- spatial_memory/core/response_types.py +497 -0
- spatial_memory/core/tracing.py +300 -0
- spatial_memory/core/validation.py +403 -319
- spatial_memory/factory.py +407 -0
- spatial_memory/migrations/__init__.py +40 -0
- spatial_memory/ports/repositories.py +52 -2
- spatial_memory/server.py +329 -188
- spatial_memory/services/export_import.py +61 -43
- spatial_memory/services/lifecycle.py +397 -122
- spatial_memory/services/memory.py +81 -4
- spatial_memory/services/spatial.py +129 -46
- spatial_memory/tools/definitions.py +695 -671
- {spatial_memory_mcp-1.0.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/METADATA +83 -3
- spatial_memory_mcp-1.6.0.dist-info/RECORD +54 -0
- spatial_memory_mcp-1.0.3.dist-info/RECORD +0 -41
- {spatial_memory_mcp-1.0.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/WHEEL +0 -0
- {spatial_memory_mcp-1.0.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/entry_points.txt +0 -0
- {spatial_memory_mcp-1.0.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/licenses/LICENSE +0 -0
spatial_memory/config.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""Configuration system for Spatial Memory MCP Server."""
|
|
2
2
|
|
|
3
3
|
from pathlib import Path
|
|
4
|
-
from typing import Any
|
|
4
|
+
from typing import Any, Literal
|
|
5
5
|
|
|
6
6
|
from pydantic import Field, SecretStr
|
|
7
7
|
from pydantic_settings import BaseSettings
|
|
@@ -27,6 +27,14 @@ class Settings(BaseSettings):
|
|
|
27
27
|
default=Path("./.spatial-memory"),
|
|
28
28
|
description="Path to LanceDB storage directory",
|
|
29
29
|
)
|
|
30
|
+
acknowledge_network_filesystem_risk: bool = Field(
|
|
31
|
+
default=False,
|
|
32
|
+
description=(
|
|
33
|
+
"Set to True to suppress warnings about network filesystem usage. "
|
|
34
|
+
"File-based locking does not work reliably on NFS/SMB/CIFS. "
|
|
35
|
+
"Only set this if you are certain only one instance will access the storage."
|
|
36
|
+
),
|
|
37
|
+
)
|
|
30
38
|
|
|
31
39
|
# Embedding Model
|
|
32
40
|
embedding_model: str = Field(
|
|
@@ -133,7 +141,7 @@ class Settings(BaseSettings):
|
|
|
133
141
|
ge=1,
|
|
134
142
|
description="Re-rank top (refine_factor * limit) candidates for accuracy",
|
|
135
143
|
)
|
|
136
|
-
index_type:
|
|
144
|
+
index_type: Literal["IVF_PQ", "IVF_FLAT", "HNSW_SQ"] = Field(
|
|
137
145
|
default="IVF_PQ",
|
|
138
146
|
description="Vector index type: IVF_PQ, IVF_FLAT, or HNSW_SQ",
|
|
139
147
|
)
|
|
@@ -480,6 +488,111 @@ class Settings(BaseSettings):
|
|
|
480
488
|
description="Maximum alpha for hybrid search (1.0=pure vector)",
|
|
481
489
|
)
|
|
482
490
|
|
|
491
|
+
# =========================================================================
|
|
492
|
+
# v1.5.3 Phase 1: Observability Settings
|
|
493
|
+
# =========================================================================
|
|
494
|
+
|
|
495
|
+
include_request_meta: bool = Field(
|
|
496
|
+
default=False,
|
|
497
|
+
description="Include _meta object in responses (request_id, timing, etc.)",
|
|
498
|
+
)
|
|
499
|
+
log_include_trace_context: bool = Field(
|
|
500
|
+
default=True,
|
|
501
|
+
description="Add [req=][agent=] trace context to log messages",
|
|
502
|
+
)
|
|
503
|
+
include_timing_breakdown: bool = Field(
|
|
504
|
+
default=False,
|
|
505
|
+
description="Include timing_ms breakdown in _meta (requires include_request_meta)",
|
|
506
|
+
)
|
|
507
|
+
|
|
508
|
+
# =========================================================================
|
|
509
|
+
# v1.5.3 Phase 2: Efficiency Settings
|
|
510
|
+
# =========================================================================
|
|
511
|
+
|
|
512
|
+
warm_up_on_start: bool = Field(
|
|
513
|
+
default=True,
|
|
514
|
+
description="Pre-load embedding model on startup for faster first request",
|
|
515
|
+
)
|
|
516
|
+
response_cache_enabled: bool = Field(
|
|
517
|
+
default=True,
|
|
518
|
+
description="Enable response caching for idempotent operations",
|
|
519
|
+
)
|
|
520
|
+
response_cache_max_size: int = Field(
|
|
521
|
+
default=1000,
|
|
522
|
+
ge=100,
|
|
523
|
+
le=100000,
|
|
524
|
+
description="Maximum number of cached responses (LRU eviction)",
|
|
525
|
+
)
|
|
526
|
+
response_cache_default_ttl: float = Field(
|
|
527
|
+
default=60.0,
|
|
528
|
+
ge=1.0,
|
|
529
|
+
le=3600.0,
|
|
530
|
+
description="Default TTL in seconds for cached responses",
|
|
531
|
+
)
|
|
532
|
+
response_cache_regions_ttl: float = Field(
|
|
533
|
+
default=300.0,
|
|
534
|
+
ge=60.0,
|
|
535
|
+
le=3600.0,
|
|
536
|
+
description="TTL in seconds for regions() responses (expensive operation)",
|
|
537
|
+
)
|
|
538
|
+
idempotency_enabled: bool = Field(
|
|
539
|
+
default=True,
|
|
540
|
+
description="Enable idempotency key support for write operations",
|
|
541
|
+
)
|
|
542
|
+
idempotency_key_ttl_hours: float = Field(
|
|
543
|
+
default=24.0,
|
|
544
|
+
ge=1.0,
|
|
545
|
+
le=168.0,
|
|
546
|
+
description="Hours to remember idempotency keys (max 7 days)",
|
|
547
|
+
)
|
|
548
|
+
|
|
549
|
+
# =========================================================================
|
|
550
|
+
# v1.5.3 Phase 3: Resilience Settings
|
|
551
|
+
# =========================================================================
|
|
552
|
+
|
|
553
|
+
rate_limit_per_agent_enabled: bool = Field(
|
|
554
|
+
default=True,
|
|
555
|
+
description="Enable per-agent rate limiting",
|
|
556
|
+
)
|
|
557
|
+
rate_limit_per_agent_rate: float = Field(
|
|
558
|
+
default=25.0,
|
|
559
|
+
ge=1.0,
|
|
560
|
+
le=1000.0,
|
|
561
|
+
description="Maximum operations per second per agent",
|
|
562
|
+
)
|
|
563
|
+
rate_limit_max_tracked_agents: int = Field(
|
|
564
|
+
default=20,
|
|
565
|
+
ge=1,
|
|
566
|
+
le=1000,
|
|
567
|
+
description="Maximum number of agents to track for rate limiting (LRU eviction)",
|
|
568
|
+
)
|
|
569
|
+
circuit_breaker_enabled: bool = Field(
|
|
570
|
+
default=True,
|
|
571
|
+
description="Enable circuit breaker for external dependencies",
|
|
572
|
+
)
|
|
573
|
+
circuit_breaker_failure_threshold: int = Field(
|
|
574
|
+
default=5,
|
|
575
|
+
ge=1,
|
|
576
|
+
le=100,
|
|
577
|
+
description="Number of consecutive failures before circuit opens",
|
|
578
|
+
)
|
|
579
|
+
circuit_breaker_reset_timeout: float = Field(
|
|
580
|
+
default=60.0,
|
|
581
|
+
ge=5.0,
|
|
582
|
+
le=600.0,
|
|
583
|
+
description="Seconds to wait before attempting half-open state",
|
|
584
|
+
)
|
|
585
|
+
backpressure_queue_enabled: bool = Field(
|
|
586
|
+
default=False,
|
|
587
|
+
description="Enable backpressure queue for overload protection (future)",
|
|
588
|
+
)
|
|
589
|
+
backpressure_queue_max_size: int = Field(
|
|
590
|
+
default=100,
|
|
591
|
+
ge=10,
|
|
592
|
+
le=10000,
|
|
593
|
+
description="Maximum queue depth when backpressure is enabled",
|
|
594
|
+
)
|
|
595
|
+
|
|
483
596
|
model_config = {
|
|
484
597
|
"env_prefix": "SPATIAL_MEMORY_",
|
|
485
598
|
"env_file": ".env",
|
spatial_memory/core/__init__.py
CHANGED
|
@@ -1,6 +1,15 @@
|
|
|
1
1
|
"""Core components for Spatial Memory MCP Server."""
|
|
2
2
|
|
|
3
|
+
from spatial_memory.core.circuit_breaker import (
|
|
4
|
+
CircuitBreaker,
|
|
5
|
+
CircuitOpenError,
|
|
6
|
+
CircuitState,
|
|
7
|
+
)
|
|
3
8
|
from spatial_memory.core.database import Database
|
|
9
|
+
from spatial_memory.core.db_idempotency import IdempotencyManager, IdempotencyRecord
|
|
10
|
+
from spatial_memory.core.db_indexes import IndexManager
|
|
11
|
+
from spatial_memory.core.db_search import SearchManager
|
|
12
|
+
from spatial_memory.core.db_versioning import VersionManager
|
|
4
13
|
from spatial_memory.core.embeddings import EmbeddingService
|
|
5
14
|
from spatial_memory.core.rate_limiter import RateLimiter
|
|
6
15
|
from spatial_memory.core.errors import (
|
|
@@ -37,6 +46,15 @@ from spatial_memory.core.models import (
|
|
|
37
46
|
VisualizationNode,
|
|
38
47
|
)
|
|
39
48
|
from spatial_memory.core.utils import to_aware_utc, to_naive_utc, utc_now, utc_now_naive
|
|
49
|
+
from spatial_memory.core.tracing import (
|
|
50
|
+
RequestContext,
|
|
51
|
+
TimingContext,
|
|
52
|
+
clear_context,
|
|
53
|
+
format_context_prefix,
|
|
54
|
+
get_current_context,
|
|
55
|
+
request_context,
|
|
56
|
+
set_context,
|
|
57
|
+
)
|
|
40
58
|
|
|
41
59
|
__all__ = [
|
|
42
60
|
# Errors - Base
|
|
@@ -73,6 +91,11 @@ __all__ = [
|
|
|
73
91
|
"FilterGroup",
|
|
74
92
|
# Core services
|
|
75
93
|
"Database",
|
|
94
|
+
"VersionManager",
|
|
95
|
+
"IndexManager",
|
|
96
|
+
"SearchManager",
|
|
97
|
+
"IdempotencyManager",
|
|
98
|
+
"IdempotencyRecord",
|
|
76
99
|
"EmbeddingService",
|
|
77
100
|
"RateLimiter",
|
|
78
101
|
# Utilities
|
|
@@ -80,4 +103,16 @@ __all__ = [
|
|
|
80
103
|
"utc_now_naive",
|
|
81
104
|
"to_naive_utc",
|
|
82
105
|
"to_aware_utc",
|
|
106
|
+
# Tracing
|
|
107
|
+
"RequestContext",
|
|
108
|
+
"TimingContext",
|
|
109
|
+
"get_current_context",
|
|
110
|
+
"set_context",
|
|
111
|
+
"clear_context",
|
|
112
|
+
"request_context",
|
|
113
|
+
"format_context_prefix",
|
|
114
|
+
# Circuit Breaker
|
|
115
|
+
"CircuitBreaker",
|
|
116
|
+
"CircuitOpenError",
|
|
117
|
+
"CircuitState",
|
|
83
118
|
]
|
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
"""Response cache with TTL and LRU eviction for Spatial Memory MCP Server.
|
|
2
|
+
|
|
3
|
+
This module provides a thread-safe response cache using:
|
|
4
|
+
- LRU (Least Recently Used) eviction when at capacity
|
|
5
|
+
- TTL (Time To Live) based expiration checked on get()
|
|
6
|
+
- Namespace-based invalidation for targeted cache clearing
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
from spatial_memory.core.cache import ResponseCache
|
|
10
|
+
|
|
11
|
+
cache = ResponseCache(max_size=1000, default_ttl=60.0)
|
|
12
|
+
|
|
13
|
+
# Basic get/set
|
|
14
|
+
cache.set("recall:default:query:5", results, ttl=30.0)
|
|
15
|
+
cached = cache.get("recall:default:query:5")
|
|
16
|
+
|
|
17
|
+
# Namespace invalidation
|
|
18
|
+
cache.invalidate_namespace("default") # Clears all keys containing "default"
|
|
19
|
+
|
|
20
|
+
# Stats
|
|
21
|
+
stats = cache.stats()
|
|
22
|
+
print(f"Hit rate: {stats.hits / (stats.hits + stats.misses):.2%}")
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
from __future__ import annotations
|
|
26
|
+
|
|
27
|
+
import logging
|
|
28
|
+
import threading
|
|
29
|
+
import time
|
|
30
|
+
from collections import OrderedDict
|
|
31
|
+
from dataclasses import dataclass
|
|
32
|
+
from typing import Any
|
|
33
|
+
|
|
34
|
+
logger = logging.getLogger(__name__)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass
|
|
38
|
+
class CacheEntry:
|
|
39
|
+
"""A single cache entry with value and expiration metadata.
|
|
40
|
+
|
|
41
|
+
Attributes:
|
|
42
|
+
value: The cached value.
|
|
43
|
+
expires_at: Monotonic timestamp when this entry expires.
|
|
44
|
+
created_at: Monotonic timestamp when this entry was created.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
value: Any
|
|
48
|
+
expires_at: float
|
|
49
|
+
created_at: float
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@dataclass
|
|
53
|
+
class CacheStats:
|
|
54
|
+
"""Statistics about cache performance and usage.
|
|
55
|
+
|
|
56
|
+
Attributes:
|
|
57
|
+
hits: Number of successful cache hits.
|
|
58
|
+
misses: Number of cache misses (key not found or expired).
|
|
59
|
+
evictions: Number of entries evicted due to capacity limits.
|
|
60
|
+
size: Current number of entries in the cache.
|
|
61
|
+
max_size: Maximum capacity of the cache.
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
hits: int
|
|
65
|
+
misses: int
|
|
66
|
+
evictions: int
|
|
67
|
+
size: int
|
|
68
|
+
max_size: int
|
|
69
|
+
|
|
70
|
+
@property
|
|
71
|
+
def hit_rate(self) -> float:
|
|
72
|
+
"""Calculate the cache hit rate.
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
Hit rate as a float between 0.0 and 1.0, or 0.0 if no requests.
|
|
76
|
+
"""
|
|
77
|
+
total = self.hits + self.misses
|
|
78
|
+
return self.hits / total if total > 0 else 0.0
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class ResponseCache:
|
|
82
|
+
"""Thread-safe LRU cache with TTL expiration.
|
|
83
|
+
|
|
84
|
+
This cache is designed for caching MCP tool responses. Keys are strings
|
|
85
|
+
typically formatted as "tool:namespace:query:limit" to enable targeted
|
|
86
|
+
namespace invalidation.
|
|
87
|
+
|
|
88
|
+
The cache uses:
|
|
89
|
+
- OrderedDict for O(1) LRU operations
|
|
90
|
+
- time.monotonic() for TTL (immune to system clock changes)
|
|
91
|
+
- threading.Lock() for thread safety
|
|
92
|
+
|
|
93
|
+
Example:
|
|
94
|
+
cache = ResponseCache(max_size=1000, default_ttl=60.0)
|
|
95
|
+
|
|
96
|
+
# Set with default TTL
|
|
97
|
+
cache.set("recall:ns:query:10", result)
|
|
98
|
+
|
|
99
|
+
# Set with custom TTL
|
|
100
|
+
cache.set("recall:ns:query:10", result, ttl=30.0)
|
|
101
|
+
|
|
102
|
+
# Get (returns None on miss)
|
|
103
|
+
result = cache.get("recall:ns:query:10")
|
|
104
|
+
|
|
105
|
+
# Invalidate all entries for a namespace
|
|
106
|
+
count = cache.invalidate_namespace("ns")
|
|
107
|
+
|
|
108
|
+
# Get statistics
|
|
109
|
+
stats = cache.stats()
|
|
110
|
+
"""
|
|
111
|
+
|
|
112
|
+
def __init__(self, max_size: int = 1000, default_ttl: float = 60.0) -> None:
|
|
113
|
+
"""Initialize the response cache.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
max_size: Maximum number of entries to store. Must be positive.
|
|
117
|
+
default_ttl: Default time-to-live in seconds. Must be positive.
|
|
118
|
+
|
|
119
|
+
Raises:
|
|
120
|
+
ValueError: If max_size or default_ttl is not positive.
|
|
121
|
+
"""
|
|
122
|
+
if max_size <= 0:
|
|
123
|
+
raise ValueError("max_size must be positive")
|
|
124
|
+
if default_ttl <= 0:
|
|
125
|
+
raise ValueError("default_ttl must be positive")
|
|
126
|
+
|
|
127
|
+
self._max_size = max_size
|
|
128
|
+
self._default_ttl = default_ttl
|
|
129
|
+
self._cache: OrderedDict[str, CacheEntry] = OrderedDict()
|
|
130
|
+
self._lock = threading.Lock()
|
|
131
|
+
self._hits = 0
|
|
132
|
+
self._misses = 0
|
|
133
|
+
self._evictions = 0
|
|
134
|
+
|
|
135
|
+
def get(self, key: str) -> Any | None:
|
|
136
|
+
"""Get a value from the cache.
|
|
137
|
+
|
|
138
|
+
Returns the cached value if the key exists and has not expired.
|
|
139
|
+
On cache hit, the entry is moved to the end (most recently used).
|
|
140
|
+
On cache miss (not found or expired), returns None and increments
|
|
141
|
+
the miss counter.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
key: The cache key to look up.
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
The cached value, or None if not found or expired.
|
|
148
|
+
|
|
149
|
+
Example:
|
|
150
|
+
result = cache.get("recall:default:test:5")
|
|
151
|
+
if result is not None:
|
|
152
|
+
print("Cache hit!")
|
|
153
|
+
"""
|
|
154
|
+
with self._lock:
|
|
155
|
+
entry = self._cache.get(key)
|
|
156
|
+
|
|
157
|
+
if entry is None:
|
|
158
|
+
self._misses += 1
|
|
159
|
+
return None
|
|
160
|
+
|
|
161
|
+
# Check if expired
|
|
162
|
+
if time.monotonic() > entry.expires_at:
|
|
163
|
+
# Remove expired entry
|
|
164
|
+
del self._cache[key]
|
|
165
|
+
self._misses += 1
|
|
166
|
+
logger.debug("Cache miss (expired): %s", key)
|
|
167
|
+
return None
|
|
168
|
+
|
|
169
|
+
# Cache hit - move to end (most recently used)
|
|
170
|
+
self._cache.move_to_end(key)
|
|
171
|
+
self._hits += 1
|
|
172
|
+
logger.debug("Cache hit: %s", key)
|
|
173
|
+
return entry.value
|
|
174
|
+
|
|
175
|
+
def set(self, key: str, value: Any, ttl: float | None = None) -> None:
|
|
176
|
+
"""Set a value in the cache.
|
|
177
|
+
|
|
178
|
+
If the key already exists, it is updated and moved to the end.
|
|
179
|
+
If the cache is at capacity, the least recently used entry is evicted.
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
key: The cache key.
|
|
183
|
+
value: The value to cache.
|
|
184
|
+
ttl: Time-to-live in seconds. Uses default_ttl if not specified.
|
|
185
|
+
Must be positive if specified.
|
|
186
|
+
|
|
187
|
+
Raises:
|
|
188
|
+
ValueError: If ttl is not positive.
|
|
189
|
+
|
|
190
|
+
Example:
|
|
191
|
+
# With default TTL
|
|
192
|
+
cache.set("recall:default:test:5", result)
|
|
193
|
+
|
|
194
|
+
# With custom TTL
|
|
195
|
+
cache.set("recall:default:test:5", result, ttl=120.0)
|
|
196
|
+
"""
|
|
197
|
+
if ttl is not None and ttl <= 0:
|
|
198
|
+
raise ValueError("ttl must be positive")
|
|
199
|
+
|
|
200
|
+
effective_ttl = ttl if ttl is not None else self._default_ttl
|
|
201
|
+
now = time.monotonic()
|
|
202
|
+
entry = CacheEntry(
|
|
203
|
+
value=value,
|
|
204
|
+
expires_at=now + effective_ttl,
|
|
205
|
+
created_at=now,
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
with self._lock:
|
|
209
|
+
# If key exists, update it
|
|
210
|
+
if key in self._cache:
|
|
211
|
+
self._cache[key] = entry
|
|
212
|
+
self._cache.move_to_end(key)
|
|
213
|
+
logger.debug("Cache update: %s (ttl=%.1fs)", key, effective_ttl)
|
|
214
|
+
return
|
|
215
|
+
|
|
216
|
+
# Evict LRU if at capacity
|
|
217
|
+
while len(self._cache) >= self._max_size:
|
|
218
|
+
# popitem(last=False) removes the first item (LRU)
|
|
219
|
+
evicted_key, _ = self._cache.popitem(last=False)
|
|
220
|
+
self._evictions += 1
|
|
221
|
+
logger.debug("Cache eviction (LRU): %s", evicted_key)
|
|
222
|
+
|
|
223
|
+
# Add new entry
|
|
224
|
+
self._cache[key] = entry
|
|
225
|
+
logger.debug("Cache set: %s (ttl=%.1fs)", key, effective_ttl)
|
|
226
|
+
|
|
227
|
+
def invalidate_namespace(self, namespace: str) -> int:
|
|
228
|
+
"""Invalidate all entries containing the given namespace.
|
|
229
|
+
|
|
230
|
+
This is useful when data in a namespace changes and cached
|
|
231
|
+
query results should be refreshed.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
namespace: The namespace string to match. All keys containing
|
|
235
|
+
this string will be invalidated.
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
The number of entries invalidated.
|
|
239
|
+
|
|
240
|
+
Example:
|
|
241
|
+
# After modifying memories in "work" namespace
|
|
242
|
+
count = cache.invalidate_namespace("work")
|
|
243
|
+
print(f"Invalidated {count} cached entries")
|
|
244
|
+
"""
|
|
245
|
+
with self._lock:
|
|
246
|
+
keys_to_remove = [key for key in self._cache if namespace in key]
|
|
247
|
+
for key in keys_to_remove:
|
|
248
|
+
del self._cache[key]
|
|
249
|
+
|
|
250
|
+
if keys_to_remove:
|
|
251
|
+
logger.debug(
|
|
252
|
+
"Cache invalidate namespace '%s': %d entries",
|
|
253
|
+
namespace,
|
|
254
|
+
len(keys_to_remove),
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
return len(keys_to_remove)
|
|
258
|
+
|
|
259
|
+
def invalidate_all(self) -> int:
|
|
260
|
+
"""Clear the entire cache.
|
|
261
|
+
|
|
262
|
+
Returns:
|
|
263
|
+
The number of entries cleared.
|
|
264
|
+
|
|
265
|
+
Example:
|
|
266
|
+
count = cache.invalidate_all()
|
|
267
|
+
print(f"Cleared {count} cached entries")
|
|
268
|
+
"""
|
|
269
|
+
with self._lock:
|
|
270
|
+
count = len(self._cache)
|
|
271
|
+
self._cache.clear()
|
|
272
|
+
logger.debug("Cache cleared: %d entries", count)
|
|
273
|
+
return count
|
|
274
|
+
|
|
275
|
+
def stats(self) -> CacheStats:
|
|
276
|
+
"""Get current cache statistics.
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
CacheStats with hits, misses, evictions, size, and max_size.
|
|
280
|
+
|
|
281
|
+
Example:
|
|
282
|
+
stats = cache.stats()
|
|
283
|
+
print(f"Hit rate: {stats.hit_rate:.2%}")
|
|
284
|
+
print(f"Size: {stats.size}/{stats.max_size}")
|
|
285
|
+
"""
|
|
286
|
+
with self._lock:
|
|
287
|
+
return CacheStats(
|
|
288
|
+
hits=self._hits,
|
|
289
|
+
misses=self._misses,
|
|
290
|
+
evictions=self._evictions,
|
|
291
|
+
size=len(self._cache),
|
|
292
|
+
max_size=self._max_size,
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
def reset_stats(self) -> None:
|
|
296
|
+
"""Reset hit/miss/eviction counters to zero.
|
|
297
|
+
|
|
298
|
+
This does not clear the cache itself, only the statistics.
|
|
299
|
+
|
|
300
|
+
Example:
|
|
301
|
+
cache.reset_stats()
|
|
302
|
+
"""
|
|
303
|
+
with self._lock:
|
|
304
|
+
self._hits = 0
|
|
305
|
+
self._misses = 0
|
|
306
|
+
self._evictions = 0
|
|
307
|
+
logger.debug("Cache stats reset")
|
|
308
|
+
|
|
309
|
+
@property
|
|
310
|
+
def max_size(self) -> int:
|
|
311
|
+
"""Get the maximum cache size."""
|
|
312
|
+
return self._max_size
|
|
313
|
+
|
|
314
|
+
@property
|
|
315
|
+
def default_ttl(self) -> float:
|
|
316
|
+
"""Get the default TTL in seconds."""
|
|
317
|
+
return self._default_ttl
|