superlocalmemory 2.5.0 → 2.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,12 +15,12 @@ Implements GraphRAG with Leiden community detection to:
15
15
  All processing is local - no external APIs.
16
16
 
17
17
  LIMITS:
18
- - MAX_MEMORIES_FOR_GRAPH: 5000 (prevents O(n²) explosion)
18
+ - MAX_MEMORIES_FOR_GRAPH: 10000 (prevents O(n²) explosion)
19
19
  - For larger datasets, use incremental updates
20
20
  """
21
21
 
22
22
  # SECURITY: Graph build limits to prevent resource exhaustion
23
- MAX_MEMORIES_FOR_GRAPH = 5000
23
+ MAX_MEMORIES_FOR_GRAPH = 10000
24
24
 
25
25
  import sqlite3
26
26
  import json
@@ -157,43 +157,82 @@ class EdgeBuilder:
157
157
  logger.warning("Need at least 2 memories to build edges")
158
158
  return 0
159
159
 
160
- # Compute pairwise cosine similarity
161
- similarity_matrix = cosine_similarity(vectors)
160
+ # Try HNSW-accelerated edge building first (O(n log n))
161
+ use_hnsw = False
162
+ try:
163
+ from hnsw_index import HNSWIndex
164
+ if len(memory_ids) >= 50: # HNSW overhead not worth it for small sets
165
+ use_hnsw = True
166
+ except ImportError:
167
+ pass
162
168
 
163
169
  edges_added = 0
164
170
  conn = sqlite3.connect(self.db_path)
165
171
  cursor = conn.cursor()
166
172
 
167
173
  try:
168
- for i in range(len(memory_ids)):
169
- for j in range(i + 1, len(memory_ids)):
170
- sim = similarity_matrix[i, j]
171
-
172
- if sim >= self.min_similarity:
173
- # Find shared entities
174
- entities_i = set(entities_list[i])
175
- entities_j = set(entities_list[j])
176
- shared = list(entities_i & entities_j)
177
-
178
- # Classify relationship type
179
- rel_type = self._classify_relationship(sim, shared)
180
-
181
- # Insert edge (or update if exists)
182
- cursor.execute('''
183
- INSERT OR REPLACE INTO graph_edges
184
- (source_memory_id, target_memory_id, relationship_type,
185
- weight, shared_entities, similarity_score)
186
- VALUES (?, ?, ?, ?, ?, ?)
187
- ''', (
188
- memory_ids[i],
189
- memory_ids[j],
190
- rel_type,
191
- float(sim),
192
- json.dumps(shared),
193
- float(sim)
194
- ))
195
-
196
- edges_added += 1
174
+ if use_hnsw:
175
+ logger.info("Using HNSW-accelerated edge building for %d memories", len(memory_ids))
176
+ try:
177
+ dim = vectors.shape[1]
178
+ hnsw = HNSWIndex(dimension=dim, max_elements=len(memory_ids))
179
+ hnsw.build(vectors, memory_ids)
180
+
181
+ for i in range(len(memory_ids)):
182
+ neighbors = hnsw.search(vectors[i], k=min(20, len(memory_ids) - 1))
183
+ for neighbor_id, similarity in neighbors:
184
+ if neighbor_id == memory_ids[i]:
185
+ continue # Skip self
186
+ # Only process each pair once (lower ID first)
187
+ if memory_ids[i] > neighbor_id:
188
+ continue
189
+ if similarity >= self.min_similarity:
190
+ # Find indices for entity lookup
191
+ j = memory_ids.index(neighbor_id)
192
+ entities_i = set(entities_list[i])
193
+ entities_j = set(entities_list[j])
194
+ shared = list(entities_i & entities_j)
195
+ rel_type = self._classify_relationship(similarity, shared)
196
+
197
+ cursor.execute('''
198
+ INSERT OR REPLACE INTO graph_edges
199
+ (source_memory_id, target_memory_id, relationship_type,
200
+ weight, shared_entities, similarity_score)
201
+ VALUES (?, ?, ?, ?, ?, ?)
202
+ ''', (
203
+ memory_ids[i], neighbor_id, rel_type,
204
+ float(similarity), json.dumps(shared), float(similarity)
205
+ ))
206
+ edges_added += 1
207
+
208
+ except Exception as e:
209
+ logger.warning("HNSW edge building failed, falling back to O(n²): %s", e)
210
+ use_hnsw = False # Fall through to O(n²) below
211
+
212
+ if not use_hnsw:
213
+ # Fallback: O(n²) pairwise cosine similarity
214
+ similarity_matrix = cosine_similarity(vectors)
215
+
216
+ for i in range(len(memory_ids)):
217
+ for j in range(i + 1, len(memory_ids)):
218
+ sim = similarity_matrix[i, j]
219
+
220
+ if sim >= self.min_similarity:
221
+ entities_i = set(entities_list[i])
222
+ entities_j = set(entities_list[j])
223
+ shared = list(entities_i & entities_j)
224
+ rel_type = self._classify_relationship(sim, shared)
225
+
226
+ cursor.execute('''
227
+ INSERT OR REPLACE INTO graph_edges
228
+ (source_memory_id, target_memory_id, relationship_type,
229
+ weight, shared_entities, similarity_score)
230
+ VALUES (?, ?, ?, ?, ?, ?)
231
+ ''', (
232
+ memory_ids[i], memory_ids[j], rel_type,
233
+ float(sim), json.dumps(shared), float(sim)
234
+ ))
235
+ edges_added += 1
197
236
 
198
237
  conn.commit()
199
238
  logger.info(f"Created {edges_added} edges")
@@ -829,7 +868,7 @@ class GraphEngine:
829
868
  Dictionary with build statistics
830
869
 
831
870
  Raises:
832
- ValueError: If too many memories (>5000) for safe processing
871
+ ValueError: If too many memories (>10000) for safe processing
833
872
  """
834
873
  start_time = time.time()
835
874
  logger.info("Starting full graph build...")
@@ -882,17 +921,47 @@ class GraphEngine:
882
921
  'fix': "Add more memories: superlocalmemoryv2:remember 'Your content here'"
883
922
  }
884
923
 
885
- # SECURITY: Prevent O(n²) explosion for large datasets
924
+ # SCALABILITY: Intelligent sampling for large datasets (v2.6)
886
925
  if len(memories) > MAX_MEMORIES_FOR_GRAPH:
887
- logger.error(f"Too many memories for graph build: {len(memories)}")
888
- return {
889
- 'success': False,
890
- 'error': 'too_many_memories',
891
- 'message': f"Graph build limited to {MAX_MEMORIES_FOR_GRAPH} memories for performance.",
892
- 'memories': len(memories),
893
- 'limit': MAX_MEMORIES_FOR_GRAPH,
894
- 'fix': "Use incremental updates or reduce memory count with compression."
895
- }
926
+ logger.warning(
927
+ "Memory count (%d) exceeds graph cap (%d). Using intelligent sampling.",
928
+ len(memories), MAX_MEMORIES_FOR_GRAPH
929
+ )
930
+ # Sample: 60% most recent + 40% highest importance (with overlap dedup)
931
+ recent_count = int(MAX_MEMORIES_FOR_GRAPH * 0.6)
932
+ important_count = int(MAX_MEMORIES_FOR_GRAPH * 0.4)
933
+
934
+ recent_memories = cursor.execute('''
935
+ SELECT id, content, summary FROM memories
936
+ WHERE profile = ?
937
+ ORDER BY created_at DESC
938
+ LIMIT ?
939
+ ''', (active_profile, recent_count)).fetchall()
940
+
941
+ important_memories = cursor.execute('''
942
+ SELECT id, content, summary FROM memories
943
+ WHERE profile = ?
944
+ ORDER BY importance DESC, access_count DESC
945
+ LIMIT ?
946
+ ''', (active_profile, important_count)).fetchall()
947
+
948
+ # Deduplicate by ID, preserving order
949
+ seen_ids = set()
950
+ memories = []
951
+ for m in recent_memories + important_memories:
952
+ if m[0] not in seen_ids:
953
+ seen_ids.add(m[0])
954
+ memories.append(m)
955
+ memories = memories[:MAX_MEMORIES_FOR_GRAPH]
956
+ logger.info("Sampled %d memories for graph build", len(memories))
957
+
958
+ elif len(memories) > MAX_MEMORIES_FOR_GRAPH * 0.8:
959
+ logger.warning(
960
+ "Approaching graph cap: %d/%d memories (%.0f%%). "
961
+ "Consider running memory compression.",
962
+ len(memories), MAX_MEMORIES_FOR_GRAPH,
963
+ len(memories) / MAX_MEMORIES_FOR_GRAPH * 100
964
+ )
896
965
 
897
966
  # Clear existing graph data for this profile's memories
898
967
  profile_memory_ids = [m[0] for m in memories]
@@ -150,7 +150,7 @@ class HybridSearchEngine:
150
150
  try:
151
151
  tags = json.loads(row[3])
152
152
  text_parts.extend(tags)
153
- except:
153
+ except Exception:
154
154
  pass
155
155
 
156
156
  doc_text = ' '.join(text_parts)
@@ -334,7 +334,7 @@ class HybridSearchEngine:
334
334
  new_score = current_score * similarity * (0.7 ** depth)
335
335
  queue.append((rel_id, new_score, depth + 1))
336
336
 
337
- except:
337
+ except Exception:
338
338
  # Graph operation failed - skip
339
339
  continue
340
340
 
@@ -81,9 +81,13 @@ class MemoryReset:
81
81
  'sessions'
82
82
  ]
83
83
 
84
+ VALID_TABLES = frozenset(tables) # Whitelist from hardcoded list above
85
+
84
86
  for table in tables:
85
87
  try:
86
- cursor.execute(f'DELETE FROM {table}')
88
+ if table not in VALID_TABLES:
89
+ raise ValueError(f"Invalid table name: {table}")
90
+ cursor.execute(f'DELETE FROM {table}') # Safe: validated against whitelist
87
91
  count = cursor.rowcount
88
92
  print(f" ✓ Cleared {table}: {count} rows deleted")
89
93
  except sqlite3.OperationalError as e:
@@ -141,12 +145,18 @@ class MemoryReset:
141
145
  'archive': ['memory_archive']
142
146
  }
143
147
 
148
+ VALID_LAYER_TABLES = frozenset(
149
+ t for tables_list in layer_tables.values() for t in tables_list
150
+ ) # Whitelist from hardcoded dict above
151
+
144
152
  for layer in layers:
145
153
  if layer in layer_tables:
146
154
  print(f"\n Clearing Layer: {layer.upper()}")
147
155
  for table in layer_tables[layer]:
148
156
  try:
149
- cursor.execute(f'DELETE FROM {table}')
157
+ if table not in VALID_LAYER_TABLES:
158
+ raise ValueError(f"Invalid table name: {table}")
159
+ cursor.execute(f'DELETE FROM {table}') # Safe: validated against whitelist
150
160
  count = cursor.rowcount
151
161
  print(f" ✓ Cleared {table}: {count} rows")
152
162
  except sqlite3.OperationalError as e:
@@ -378,10 +388,14 @@ class MemoryReset:
378
388
  'Archived Memories': 'memory_archive'
379
389
  }
380
390
 
391
+ VALID_STAT_TABLES = frozenset(tables.values()) # Whitelist from hardcoded dict above
392
+
381
393
  print("\nTable Statistics:")
382
394
  for name, table in tables.items():
383
395
  try:
384
- cursor.execute(f'SELECT COUNT(*) FROM {table}')
396
+ if table not in VALID_STAT_TABLES:
397
+ raise ValueError(f"Invalid table name: {table}")
398
+ cursor.execute(f'SELECT COUNT(*) FROM {table}') # Safe: validated against whitelist
385
399
  count = cursor.fetchone()[0]
386
400
  print(f" {name:20s}: {count:>5} rows")
387
401
  except sqlite3.OperationalError:
@@ -66,6 +66,9 @@ try:
66
66
  except ImportError:
67
67
  SKLEARN_AVAILABLE = False
68
68
 
69
+ import logging
70
+ logger = logging.getLogger(__name__)
71
+
69
72
  MEMORY_DIR = Path.home() / ".claude-memory"
70
73
  DB_PATH = MEMORY_DIR / "memory.db"
71
74
  VECTORS_PATH = MEMORY_DIR / "vectors"
@@ -137,8 +140,22 @@ class MemoryStoreV2:
137
140
  self.vectorizer = None
138
141
  self.vectors = None
139
142
  self.memory_ids = []
143
+ self._last_vector_count = 0
140
144
  self._load_vectors()
141
145
 
146
+ # HNSW index for O(log n) search (v2.6, optional)
147
+ self._hnsw_index = None
148
+ try:
149
+ from hnsw_index import HNSWIndex
150
+ if self.vectors is not None and len(self.memory_ids) > 0:
151
+ dim = self.vectors.shape[1]
152
+ self._hnsw_index = HNSWIndex(dimension=dim, max_elements=max(len(self.memory_ids) * 2, 1000))
153
+ self._hnsw_index.build(self.vectors.toarray() if hasattr(self.vectors, 'toarray') else self.vectors, self.memory_ids)
154
+ logger.info("HNSW index built with %d vectors", len(self.memory_ids))
155
+ except (ImportError, Exception) as e:
156
+ logger.debug("HNSW index not available: %s", e)
157
+ self._hnsw_index = None
158
+
142
159
  # =========================================================================
143
160
  # Connection helpers — abstract ConnectionManager vs direct sqlite3
144
161
  # =========================================================================
@@ -227,6 +244,14 @@ class MemoryStoreV2:
227
244
  def _do_init(conn):
228
245
  cursor = conn.cursor()
229
246
 
247
+ # Database integrity check (v2.6: detect corruption early)
248
+ try:
249
+ result = cursor.execute('PRAGMA quick_check').fetchone()
250
+ if result[0] != 'ok':
251
+ logger.warning("Database integrity issue detected: %s", result[0])
252
+ except Exception:
253
+ logger.warning("Could not run database integrity check")
254
+
230
255
  # Check if we need to add V2 columns to existing table
231
256
  cursor.execute("PRAGMA table_info(memories)")
232
257
  existing_columns = {row[1] for row in cursor.fetchall()}
@@ -520,7 +545,7 @@ class MemoryStoreV2:
520
545
 
521
546
  # Emit event (v2.5 — Event Bus)
522
547
  self._emit_event("memory.created", memory_id=memory_id,
523
- content_preview=content[:100], tags=tags,
548
+ content_preview="[redacted]", tags=tags,
524
549
  project=project_name, importance=importance)
525
550
 
526
551
  # Record provenance (v2.5 — who created this memory)
@@ -600,8 +625,34 @@ class MemoryStoreV2:
600
625
  active_profile = self._get_active_profile()
601
626
 
602
627
  with self._read_connection() as conn:
603
- # Method 1: TF-IDF semantic search
628
+ # Method 0: HNSW accelerated search (O(log n), v2.6)
629
+ _hnsw_used = False
604
630
  if SKLEARN_AVAILABLE and self.vectorizer is not None and self.vectors is not None:
631
+ try:
632
+ from hnsw_index import HNSWIndex
633
+ if hasattr(self, '_hnsw_index') and self._hnsw_index is not None:
634
+ query_vec = self.vectorizer.transform([query]).toarray().flatten()
635
+ hnsw_results = self._hnsw_index.search(query_vec, k=limit * 2)
636
+ cursor = conn.cursor()
637
+ for memory_id, score in hnsw_results:
638
+ if score > 0.05:
639
+ cursor.execute('''
640
+ SELECT id, content, summary, project_path, project_name, tags,
641
+ category, parent_id, tree_path, depth,
642
+ memory_type, importance, created_at, cluster_id,
643
+ last_accessed, access_count
644
+ FROM memories WHERE id = ? AND profile = ?
645
+ ''', (memory_id, active_profile))
646
+ row = cursor.fetchone()
647
+ if row and self._apply_filters(row, project_path, memory_type,
648
+ category, cluster_id, min_importance):
649
+ results.append(self._row_to_dict(row, score, 'hnsw'))
650
+ _hnsw_used = len(results) > 0
651
+ except (ImportError, Exception):
652
+ pass # HNSW not available, fall through to TF-IDF
653
+
654
+ # Method 1: TF-IDF semantic search (fallback if HNSW unavailable or returned no results)
655
+ if not _hnsw_used and SKLEARN_AVAILABLE and self.vectorizer is not None and self.vectors is not None:
605
656
  try:
606
657
  query_vec = self.vectorizer.transform([query])
607
658
  similarities = cosine_similarity(query_vec, self.vectors).flatten()
@@ -865,6 +916,25 @@ class MemoryStoreV2:
865
916
  if not SKLEARN_AVAILABLE:
866
917
  return
867
918
 
919
+ # Incremental optimization: skip rebuild if memory count hasn't changed much (v2.6)
920
+ if hasattr(self, '_last_vector_count') and self._last_vector_count > 0:
921
+ with self._read_connection() as conn:
922
+ cursor = conn.cursor()
923
+ active_profile = self._get_active_profile()
924
+ cursor.execute("PRAGMA table_info(memories)")
925
+ columns = {row[1] for row in cursor.fetchall()}
926
+ if 'profile' in columns:
927
+ cursor.execute('SELECT COUNT(*) FROM memories WHERE profile = ?', (active_profile,))
928
+ else:
929
+ cursor.execute('SELECT COUNT(*) FROM memories')
930
+ current_count = cursor.fetchone()[0]
931
+
932
+ # Only rebuild if count changed by more than 5% or is the first few memories
933
+ if self._last_vector_count > 10:
934
+ change_ratio = abs(current_count - self._last_vector_count) / self._last_vector_count
935
+ if change_ratio < 0.05:
936
+ return # Skip rebuild — vectors are still accurate enough
937
+
868
938
  active_profile = self._get_active_profile()
869
939
 
870
940
  with self._read_connection() as conn:
@@ -903,6 +973,7 @@ class MemoryStoreV2:
903
973
  ngram_range=(1, 2)
904
974
  )
905
975
  self.vectors = self.vectorizer.fit_transform(texts)
976
+ self._last_vector_count = len(self.memory_ids)
906
977
 
907
978
  # Save memory IDs as JSON (safe serialization)
908
979
  self.vectors_path.mkdir(exist_ok=True)
@@ -944,7 +1015,8 @@ class MemoryStoreV2:
944
1015
  return results
945
1016
 
946
1017
  def get_by_id(self, memory_id: int) -> Optional[Dict[str, Any]]:
947
- """Get a specific memory by ID (V1 compatible)."""
1018
+ """Get a specific memory by ID (V1 compatible, profile-aware)."""
1019
+ active_profile = self._get_active_profile()
948
1020
  with self._read_connection() as conn:
949
1021
  cursor = conn.cursor()
950
1022
 
@@ -952,8 +1024,8 @@ class MemoryStoreV2:
952
1024
  SELECT id, content, summary, project_path, project_name, tags,
953
1025
  category, parent_id, tree_path, depth, memory_type, importance,
954
1026
  created_at, cluster_id, last_accessed, access_count
955
- FROM memories WHERE id = ?
956
- ''', (memory_id,))
1027
+ FROM memories WHERE id = ? AND profile = ?
1028
+ ''', (memory_id, active_profile))
957
1029
 
958
1030
  row = cursor.fetchone()
959
1031
 
@@ -966,10 +1038,11 @@ class MemoryStoreV2:
966
1038
  return self._row_to_dict(row, 1.0, 'direct')
967
1039
 
968
1040
  def delete_memory(self, memory_id: int) -> bool:
969
- """Delete a specific memory (V1 compatible)."""
1041
+ """Delete a specific memory (V1 compatible, profile-aware)."""
1042
+ active_profile = self._get_active_profile()
970
1043
  def _do_delete(conn):
971
1044
  cursor = conn.cursor()
972
- cursor.execute('DELETE FROM memories WHERE id = ?', (memory_id,))
1045
+ cursor.execute('DELETE FROM memories WHERE id = ? AND profile = ?', (memory_id, active_profile))
973
1046
  deleted = cursor.rowcount > 0
974
1047
  conn.commit()
975
1048
  return deleted
@@ -0,0 +1,87 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ SuperLocalMemory V2 - Rate Limiter
4
+ Copyright (c) 2026 Varun Pratap Bhardwaj
5
+ Licensed under MIT License
6
+ """
7
+
8
+ """
9
+ Lightweight rate limiter using sliding window algorithm.
10
+ Pure stdlib — no external dependencies.
11
+
12
+ Defaults:
13
+ Write endpoints: 100 req/min per IP
14
+ Read endpoints: 300 req/min per IP
15
+
16
+ Configurable via environment variables:
17
+ SLM_RATE_LIMIT_WRITE=100
18
+ SLM_RATE_LIMIT_READ=300
19
+ SLM_RATE_LIMIT_WINDOW=60
20
+ """
21
+
22
+ import os
23
+ import time
24
+ import threading
25
+ from collections import defaultdict
26
+ from typing import Tuple
27
+
28
+ import logging
29
+ logger = logging.getLogger("superlocalmemory.ratelimit")
30
+
31
+ # Configurable via env vars
32
+ WRITE_LIMIT = int(os.environ.get('SLM_RATE_LIMIT_WRITE', '100'))
33
+ READ_LIMIT = int(os.environ.get('SLM_RATE_LIMIT_READ', '300'))
34
+ WINDOW_SECONDS = int(os.environ.get('SLM_RATE_LIMIT_WINDOW', '60'))
35
+
36
+
37
+ class RateLimiter:
38
+ """Thread-safe sliding window rate limiter."""
39
+
40
+ def __init__(self, max_requests: int = 100, window_seconds: int = 60):
41
+ self.max_requests = max_requests
42
+ self.window = window_seconds
43
+ self._requests: dict = defaultdict(list)
44
+ self._lock = threading.Lock()
45
+
46
+ def is_allowed(self, client_id: str) -> Tuple[bool, int]:
47
+ """
48
+ Check if request is allowed for this client.
49
+
50
+ Returns:
51
+ (allowed: bool, remaining: int) — whether request is allowed
52
+ and how many requests remain in the window
53
+ """
54
+ now = time.time()
55
+ cutoff = now - self.window
56
+
57
+ with self._lock:
58
+ # Remove expired entries
59
+ self._requests[client_id] = [
60
+ t for t in self._requests[client_id] if t > cutoff
61
+ ]
62
+
63
+ current = len(self._requests[client_id])
64
+
65
+ if current >= self.max_requests:
66
+ return False, 0
67
+
68
+ self._requests[client_id].append(now)
69
+ return True, self.max_requests - current - 1
70
+
71
+ def cleanup(self):
72
+ """Remove stale entries for clients that haven't made requests recently."""
73
+ now = time.time()
74
+ cutoff = now - self.window * 2 # Keep 2 windows of data
75
+
76
+ with self._lock:
77
+ stale_keys = [
78
+ k for k, v in self._requests.items()
79
+ if not v or max(v) < cutoff
80
+ ]
81
+ for k in stale_keys:
82
+ del self._requests[k]
83
+
84
+
85
+ # Singleton instances for write and read endpoints
86
+ write_limiter = RateLimiter(max_requests=WRITE_LIMIT, window_seconds=WINDOW_SECONDS)
87
+ read_limiter = RateLimiter(max_requests=READ_LIMIT, window_seconds=WINDOW_SECONDS)
@@ -20,13 +20,14 @@ v2.5 BEHAVIOR (this version):
20
20
  - Trust scores are updated in agent_registry.trust_score
21
21
  - Dashboard shows scores but they don't affect recall ordering yet
22
22
 
23
- v2.6 BEHAVIOR (future):
23
+ v2.6 BEHAVIOR (this version):
24
24
  - Trust scores visible in dashboard
25
- - Recall results ranked by trust (higher trust = higher in results)
25
+ - Active enforcement: agents with trust < 0.3 blocked from write/delete operations
26
+ - Quarantine and admin approval deferred to v3.0
26
27
 
27
28
  v3.0 BEHAVIOR (future):
28
- - Active enforcement: quarantine low-trust memories, rate limiting
29
- - Admin approval for untrusted agents
29
+ - Quarantine low-trust memories for manual review
30
+ - Admin approval workflow for untrusted agents
30
31
 
31
32
  Trust Signals (all silently collected):
32
33
  POSITIVE (increase trust):
@@ -200,7 +201,7 @@ class TrustScorer:
200
201
  agent_id: str,
201
202
  signal_type: str,
202
203
  context: Optional[dict] = None,
203
- ):
204
+ ) -> bool:
204
205
  """
205
206
  Record a trust signal for an agent.
206
207
 
@@ -376,6 +377,37 @@ class TrustScorer:
376
377
  score = self._get_agent_trust(agent_id)
377
378
  return score if score is not None else 1.0
378
379
 
380
+ def check_trust(self, agent_id: str, operation: str = "write") -> bool:
381
+ """
382
+ Check if agent is trusted enough for the given operation.
383
+
384
+ v2.6 enforcement: blocks write/delete for agents with trust < 0.3.
385
+ New agents start at 1.0 — only repeated bad behavior triggers blocking.
386
+
387
+ Args:
388
+ agent_id: The agent identifier
389
+ operation: One of "read", "write", "delete"
390
+
391
+ Returns:
392
+ True if operation is allowed, False if blocked
393
+ """
394
+ if operation == "read":
395
+ return True # Reads are always allowed
396
+
397
+ score = self._get_agent_trust(agent_id)
398
+ if score is None:
399
+ return True # Unknown agent = first-time = allowed (starts at 1.0)
400
+
401
+ threshold = 0.3 # Block write/delete below this
402
+ if score < threshold:
403
+ logger.warning(
404
+ "Trust enforcement: agent '%s' blocked from '%s' (trust=%.2f < %.2f)",
405
+ agent_id, operation, score, threshold
406
+ )
407
+ return False
408
+
409
+ return True
410
+
379
411
  def get_signals(self, agent_id: str, limit: int = 50) -> List[dict]:
380
412
  """Get recent trust signals for an agent."""
381
413
  try:
@@ -448,7 +480,7 @@ class TrustScorer:
448
480
  "by_signal_type": by_type,
449
481
  "by_agent": by_agent,
450
482
  "avg_trust_score": round(avg, 4) if avg else 1.0,
451
- "enforcement": "disabled (v2.5silent collection only)",
483
+ "enforcement": "enabled (v2.6write/delete blocked below 0.3 trust)",
452
484
  }
453
485
 
454
486
  except Exception as e:
@@ -24,10 +24,13 @@ Security:
24
24
  - No private/internal IP blocking in v2.5 (added in v2.6 with trust enforcement)
25
25
  """
26
26
 
27
+ import ipaddress
27
28
  import json
28
29
  import logging
30
+ import socket
29
31
  import threading
30
32
  import time
33
+ import urllib.parse
31
34
  from queue import Queue, Empty
32
35
  from typing import Optional, Dict
33
36
  from datetime import datetime
@@ -49,6 +52,16 @@ except ImportError:
49
52
  HTTP_AVAILABLE = False
50
53
 
51
54
 
55
+ def _is_private_ip(hostname: str) -> bool:
56
+ """Check if hostname resolves to a private/internal IP address."""
57
+ try:
58
+ ip_str = socket.gethostbyname(hostname)
59
+ ip = ipaddress.ip_address(ip_str)
60
+ return ip.is_private or ip.is_loopback or ip.is_link_local or ip.is_reserved
61
+ except (socket.gaierror, ValueError):
62
+ return False # DNS resolution failed — allow (might be valid external hostname)
63
+
64
+
52
65
  class WebhookDispatcher:
53
66
  """
54
67
  Background webhook delivery with retry logic.
@@ -119,6 +132,10 @@ class WebhookDispatcher:
119
132
  if not webhook_url or not (webhook_url.startswith("http://") or webhook_url.startswith("https://")):
120
133
  raise ValueError(f"Invalid webhook URL: {webhook_url}")
121
134
 
135
+ parsed = urllib.parse.urlparse(webhook_url)
136
+ if parsed.hostname and _is_private_ip(parsed.hostname):
137
+ raise ValueError(f"Webhook URL points to private/internal network: {webhook_url}")
138
+
122
139
  try:
123
140
  self._queue.put_nowait({
124
141
  "event": event,