superlocalmemory 3.0.36 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -109,7 +109,7 @@ class LLMBackbone:
109
109
  host = config.api_base or os.environ.get(
110
110
  "OLLAMA_HOST", _OLLAMA_DEFAULT_BASE,
111
111
  )
112
- self._base_url = f"{host.rstrip('/')}/v1/chat/completions"
112
+ self._base_url = f"{host.rstrip('/')}/api/chat"
113
113
  elif self._provider == "openrouter":
114
114
  self._api_key = config.api_key or os.environ.get(
115
115
  _ENV_KEYS.get(self._provider, ""), "",
@@ -129,32 +129,17 @@ class LLMBackbone:
129
129
  def is_available(self) -> bool:
130
130
  """True when the provider is ready for requests.
131
131
 
132
- For Ollama: only returns True if the model is already loaded in
133
- memory. Prevents cold-load memory spikes (5+ GB) during recall.
132
+ For Ollama: always True (no API key needed). The num_ctx and
133
+ keep_alive guards in _build_ollama() protect against memory spikes.
134
+ The recall-path warm-only guard lives in Summarizer, not here —
135
+ store/fact-extraction should always use the LLM in Mode B.
134
136
  """
135
137
  if not self._provider:
136
138
  return False
137
139
  if self._provider == "ollama":
138
- return self._is_ollama_model_warm()
140
+ return True
139
141
  return bool(self._api_key)
140
142
 
141
- def _is_ollama_model_warm(self) -> bool:
142
- """Check if the LLM model is already loaded in Ollama."""
143
- try:
144
- model_base = self._model.split(":")[0]
145
- resp = httpx.get(
146
- f"{_OLLAMA_DEFAULT_BASE}/api/ps",
147
- timeout=httpx.Timeout(2.0),
148
- )
149
- if resp.status_code != 200:
150
- return False
151
- for m in resp.json().get("models", []):
152
- if model_base in m.get("name", ""):
153
- return True
154
- return False
155
- except Exception:
156
- return False
157
-
158
143
  @property
159
144
  def provider(self) -> str:
160
145
  return self._provider
@@ -266,13 +251,19 @@ class LLMBackbone:
266
251
  ) -> tuple[str, dict[str, str], dict]:
267
252
  messages = self._make_messages(system, prompt)
268
253
  headers = {"Content-Type": "application/json"}
254
+ # Native /api/chat format — NOT /v1/chat/completions.
255
+ # The OpenAI-compatible endpoint silently ignores options.num_ctx,
256
+ # causing Ollama to use the model's default (131K for llama3.1 = 30 GB).
269
257
  payload = {
270
258
  "model": self._model,
271
259
  "messages": messages,
272
- "max_tokens": max_tokens,
273
- "temperature": temperature,
260
+ "stream": False,
274
261
  "keep_alive": "30s",
275
- "options": {"num_ctx": 4096},
262
+ "options": {
263
+ "num_predict": max_tokens,
264
+ "temperature": temperature,
265
+ "num_ctx": 4096,
266
+ },
276
267
  }
277
268
  return self._base_url, headers, payload
278
269
 
@@ -323,7 +314,10 @@ class LLMBackbone:
323
314
  """Extract text from provider-specific JSON response."""
324
315
  if self._provider == "anthropic":
325
316
  return data.get("content", [{}])[0].get("text", "").strip()
326
- # OpenAI / Azure / Ollama share response format.
317
+ if self._provider == "ollama":
318
+ # Native /api/chat: {"message": {"content": "..."}}
319
+ return data.get("message", {}).get("content", "").strip()
320
+ # OpenAI / Azure share response format.
327
321
  choices = data.get("choices", [{}])
328
322
  return choices[0].get("message", {}).get("content", "").strip()
329
323
 
@@ -20,7 +20,32 @@ logger = logging.getLogger(__name__)
20
20
 
21
21
 
22
22
  def register_resources(server, get_engine: Callable) -> None:
23
- """Register 6 MCP resources on *server*."""
23
+ """Register 7 MCP resources on *server*."""
24
+
25
+ # ------------------------------------------------------------------
26
+ # 0. slm://context — Active Memory auto-injection
27
+ # ------------------------------------------------------------------
28
+ @server.resource("slm://context")
29
+ async def session_context() -> str:
30
+ """Active session context — auto-injected on MCP connect.
31
+
32
+ Returns the most relevant memories for the current session:
33
+ recent decisions, active patterns, and project context.
34
+ AI tools read this automatically on connection to get instant context.
35
+ """
36
+ try:
37
+ from superlocalmemory.hooks.auto_recall import AutoRecall
38
+ engine = get_engine()
39
+ auto = AutoRecall(
40
+ engine=engine,
41
+ config={"enabled": True, "max_memories_injected": 10, "relevance_threshold": 0.3},
42
+ )
43
+ context = auto.get_session_context(query="recent decisions and important context")
44
+ if not context:
45
+ return "No session context available yet. Use 'remember' to store memories."
46
+ return context
47
+ except Exception as exc:
48
+ return f"Context unavailable: {exc}"
24
49
 
25
50
  # ------------------------------------------------------------------
26
51
  # 1. slm://recent
@@ -58,11 +58,13 @@ def reset_engine():
58
58
  from superlocalmemory.mcp.tools_core import register_core_tools
59
59
  from superlocalmemory.mcp.tools_v28 import register_v28_tools
60
60
  from superlocalmemory.mcp.tools_v3 import register_v3_tools
61
+ from superlocalmemory.mcp.tools_active import register_active_tools
61
62
  from superlocalmemory.mcp.resources import register_resources
62
63
 
63
64
  register_core_tools(server, get_engine)
64
65
  register_v28_tools(server, get_engine)
65
66
  register_v3_tools(server, get_engine)
67
+ register_active_tools(server, get_engine)
66
68
  register_resources(server, get_engine)
67
69
 
68
70
 
@@ -0,0 +1,205 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the MIT License - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """SuperLocalMemory V3.1 — Active Memory MCP Tools.
6
+
7
+ session_init — Auto-recall project context at session start.
8
+ observe — Monitor conversation for auto-capture (decisions/bugs/prefs).
9
+ report_feedback — Record explicit feedback on recall results for learning.
10
+
11
+ These tools transform SLM from a passive database into an active
12
+ intelligence layer that learns and improves over time.
13
+
14
+ Part of Qualixar | Author: Varun Pratap Bhardwaj
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import logging
20
+ from typing import Callable
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ def register_active_tools(server, get_engine: Callable) -> None:
26
+ """Register 3 active memory tools on *server*."""
27
+
28
+ # ------------------------------------------------------------------
29
+ # 1. session_init — Auto-recall project context at session start
30
+ # ------------------------------------------------------------------
31
+ @server.tool()
32
+ async def session_init(
33
+ project_path: str = "",
34
+ query: str = "",
35
+ max_results: int = 10,
36
+ ) -> dict:
37
+ """Initialize session with relevant memory context.
38
+
39
+ Call this ONCE at the start of every session. Returns:
40
+ - Recent decisions and patterns for this project
41
+ - Top relevant memories based on project path or query
42
+ - Learning status (signal count, ranking phase)
43
+
44
+ The AI should call this automatically before any other work.
45
+ """
46
+ try:
47
+ from superlocalmemory.hooks.auto_recall import AutoRecall
48
+ from superlocalmemory.hooks.rules_engine import RulesEngine
49
+
50
+ engine = get_engine()
51
+ rules = RulesEngine()
52
+
53
+ if not rules.should_recall("session_start"):
54
+ return {"success": True, "context": "", "memories": [], "message": "Auto-recall disabled"}
55
+
56
+ recall_config = rules.get_recall_config()
57
+ auto = AutoRecall(
58
+ engine=engine,
59
+ config={
60
+ "enabled": True,
61
+ "max_memories_injected": max_results,
62
+ "relevance_threshold": recall_config.get("relevance_threshold", 0.3),
63
+ },
64
+ )
65
+
66
+ # Get formatted context for system prompt injection
67
+ context = auto.get_session_context(project_path=project_path, query=query)
68
+
69
+ # Get structured results for tool response
70
+ search_query = query or f"project context {project_path}" if project_path else "recent important decisions"
71
+ memories = auto.get_query_context(search_query)
72
+
73
+ # Get learning status
74
+ pid = engine.profile_id
75
+ feedback_count = 0
76
+ try:
77
+ feedback_count = engine._adaptive_learner.get_feedback_count(pid)
78
+ except Exception:
79
+ pass
80
+
81
+ return {
82
+ "success": True,
83
+ "context": context,
84
+ "memories": memories[:max_results],
85
+ "memory_count": len(memories),
86
+ "learning": {
87
+ "feedback_signals": feedback_count,
88
+ "phase": 1 if feedback_count < 50 else (2 if feedback_count < 200 else 3),
89
+ "status": "collecting" if feedback_count < 50 else ("learning" if feedback_count < 200 else "trained"),
90
+ },
91
+ }
92
+ except Exception as exc:
93
+ logger.exception("session_init failed")
94
+ return {"success": False, "error": str(exc)}
95
+
96
+ # ------------------------------------------------------------------
97
+ # 2. observe — Auto-capture decisions/bugs/preferences
98
+ # ------------------------------------------------------------------
99
+ @server.tool()
100
+ async def observe(
101
+ content: str,
102
+ agent_id: str = "mcp_client",
103
+ ) -> dict:
104
+ """Observe conversation content for automatic memory capture.
105
+
106
+ Send conversation snippets here. The system evaluates whether
107
+ the content contains decisions, bug fixes, or preferences worth
108
+ storing. If so, it auto-captures them with classification metadata.
109
+
110
+ Call this after making decisions, fixing bugs, or expressing preferences.
111
+ The system will NOT store low-confidence or irrelevant content.
112
+ """
113
+ try:
114
+ from superlocalmemory.hooks.auto_capture import AutoCapture
115
+ from superlocalmemory.hooks.rules_engine import RulesEngine
116
+
117
+ engine = get_engine()
118
+ rules = RulesEngine()
119
+
120
+ auto = AutoCapture(
121
+ engine=engine,
122
+ config=rules.get_capture_config(),
123
+ )
124
+
125
+ decision = auto.evaluate(content)
126
+
127
+ if not decision.capture:
128
+ return {
129
+ "captured": False,
130
+ "reason": decision.reason,
131
+ "category": decision.category,
132
+ "confidence": round(decision.confidence, 3),
133
+ }
134
+
135
+ # Check rules engine for category-level permission
136
+ if not rules.should_capture(decision.category, decision.confidence):
137
+ return {
138
+ "captured": False,
139
+ "reason": f"Category '{decision.category}' disabled in rules",
140
+ "category": decision.category,
141
+ "confidence": round(decision.confidence, 3),
142
+ }
143
+
144
+ # Auto-store via engine
145
+ stored = auto.capture(
146
+ content,
147
+ category=decision.category,
148
+ metadata={"agent_id": agent_id, "source": "auto-observe"},
149
+ )
150
+
151
+ return {
152
+ "captured": stored,
153
+ "category": decision.category,
154
+ "confidence": round(decision.confidence, 3),
155
+ "reason": decision.reason,
156
+ }
157
+ except Exception as exc:
158
+ logger.exception("observe failed")
159
+ return {"captured": False, "error": str(exc)}
160
+
161
+ # ------------------------------------------------------------------
162
+ # 3. report_feedback — Explicit feedback for learning
163
+ # ------------------------------------------------------------------
164
+ @server.tool()
165
+ async def report_feedback(
166
+ fact_id: str,
167
+ feedback: str = "relevant",
168
+ query: str = "",
169
+ ) -> dict:
170
+ """Report whether a recalled memory was useful.
171
+
172
+ feedback: "relevant" (memory was helpful), "irrelevant" (not useful),
173
+ "partial" (somewhat relevant).
174
+
175
+ This feedback trains the adaptive ranker to return better results
176
+ over time. The more feedback, the smarter the system gets.
177
+ """
178
+ try:
179
+ engine = get_engine()
180
+ pid = engine.profile_id
181
+
182
+ if feedback not in ("relevant", "irrelevant", "partial"):
183
+ return {"success": False, "error": f"Invalid feedback: {feedback}. Use relevant/irrelevant/partial"}
184
+
185
+ record = engine._adaptive_learner.record_feedback(
186
+ query=query,
187
+ fact_id=fact_id,
188
+ feedback_type=feedback,
189
+ profile_id=pid,
190
+ )
191
+
192
+ count = engine._adaptive_learner.get_feedback_count(pid)
193
+
194
+ return {
195
+ "success": True,
196
+ "feedback_id": record.feedback_id,
197
+ "total_signals": count,
198
+ "phase": 1 if count < 50 else (2 if count < 200 else 3),
199
+ "message": f"Feedback recorded. {count} total signals."
200
+ + (" Phase 2 unlocked!" if count == 50 else "")
201
+ + (" Phase 3 (ML) unlocked!" if count == 200 else ""),
202
+ }
203
+ except Exception as exc:
204
+ logger.exception("report_feedback failed")
205
+ return {"success": False, "error": str(exc)}
@@ -20,6 +20,52 @@ from typing import Any, Callable
20
20
  logger = logging.getLogger(__name__)
21
21
 
22
22
 
23
+ def _record_recall_hits(get_engine: Callable, query: str, results: list[dict]) -> None:
24
+ """Record implicit feedback + learning signals for each recall.
25
+
26
+ Non-blocking, non-critical — failures silently ignored.
27
+ Feeds: FeedbackCollector + Co-Retrieval + Confidence Boost.
28
+ """
29
+ try:
30
+ from pathlib import Path
31
+ engine = get_engine()
32
+ pid = engine.profile_id
33
+ slm_dir = Path.home() / ".superlocalmemory"
34
+ fact_ids = [r.get("fact_id", "") for r in results[:10] if r.get("fact_id")]
35
+ if not fact_ids:
36
+ return
37
+
38
+ # 1. Implicit feedback (recall_hit signals for adaptive learner)
39
+ try:
40
+ from superlocalmemory.learning.feedback import FeedbackCollector
41
+ collector = FeedbackCollector(slm_dir / "learning.db")
42
+ collector.record_implicit(
43
+ profile_id=pid, query=query,
44
+ fact_ids_returned=fact_ids, fact_ids_available=fact_ids,
45
+ )
46
+ except Exception:
47
+ pass
48
+
49
+ # 2. Co-retrieval signals (strengthen implicit graph edges)
50
+ try:
51
+ from superlocalmemory.learning.signals import LearningSignals
52
+ signals = LearningSignals(slm_dir / "learning.db")
53
+ signals.record_co_retrieval(pid, fact_ids)
54
+ except Exception:
55
+ pass
56
+
57
+ # 3. Confidence boost (accessed facts get +0.02, cap 1.0)
58
+ try:
59
+ from superlocalmemory.learning.signals import LearningSignals
60
+ mem_db = str(slm_dir / "memory.db")
61
+ for fid in fact_ids[:5]:
62
+ LearningSignals.boost_confidence(mem_db, fid)
63
+ except Exception:
64
+ pass
65
+ except Exception:
66
+ pass
67
+
68
+
23
69
  def register_core_tools(server, get_engine: Callable) -> None:
24
70
  """Register the 13 core MCP tools on *server*."""
25
71
 
@@ -57,6 +103,11 @@ def register_core_tools(server, get_engine: Callable) -> None:
57
103
  pool = WorkerPool.shared()
58
104
  result = pool.recall(query, limit=limit)
59
105
  if result.get("ok"):
106
+ # Record implicit feedback: every returned result is a recall_hit
107
+ try:
108
+ _record_recall_hits(get_engine, query, result.get("results", []))
109
+ except Exception:
110
+ pass # Feedback is non-critical, never block recall
60
111
  return {
61
112
  "success": True,
62
113
  "results": result.get("results", []),
@@ -45,9 +45,18 @@ async def behavioral_status():
45
45
  recent_outcomes = []
46
46
  try:
47
47
  tracker = OutcomeTracker(db_path)
48
- total_outcomes = tracker.get_outcome_count(profile=profile)
49
- outcome_breakdown = tracker.get_outcome_breakdown(profile=profile)
50
- recent_outcomes = tracker.get_recent_outcomes(profile=profile, limit=20)
48
+ all_outcomes = tracker.get_outcomes(profile_id=profile, limit=50)
49
+ total_outcomes = len(all_outcomes)
50
+ for o in all_outcomes:
51
+ key = o.outcome if hasattr(o, 'outcome') else str(o)
52
+ if key in outcome_breakdown:
53
+ outcome_breakdown[key] += 1
54
+ recent_outcomes = [
55
+ {"outcome": o.outcome, "action_type": o.action_type,
56
+ "timestamp": o.timestamp}
57
+ for o in all_outcomes[:20]
58
+ if hasattr(o, 'outcome')
59
+ ]
51
60
  except Exception as exc:
52
61
  logger.debug("outcome tracker: %s", exc)
53
62
 
@@ -56,8 +65,8 @@ async def behavioral_status():
56
65
  cross_project_transfers = 0
57
66
  try:
58
67
  store = BehavioralPatternStore(db_path)
59
- patterns = store.get_patterns(profile=profile)
60
- cross_project_transfers = store.get_cross_project_count(profile=profile)
68
+ patterns = store.get_patterns(profile_id=profile)
69
+ cross_project_transfers = 0
61
70
  except Exception as exc:
62
71
  logger.debug("pattern store: %s", exc)
63
72
 
@@ -69,6 +78,12 @@ async def behavioral_status():
69
78
  "patterns": patterns,
70
79
  "cross_project_transfers": cross_project_transfers,
71
80
  "recent_outcomes": recent_outcomes,
81
+ "stats": {
82
+ "success_count": outcome_breakdown.get("success", 0),
83
+ "failure_count": outcome_breakdown.get("failure", 0),
84
+ "partial_count": outcome_breakdown.get("partial", 0),
85
+ "patterns_count": len(patterns),
86
+ },
72
87
  }
73
88
  except Exception as e:
74
89
  logger.error("behavioral_status error: %s", e)
@@ -79,22 +79,45 @@ async def learning_status():
79
79
  active_profile = get_active_profile()
80
80
  result["active_profile"] = active_profile
81
81
 
82
- # Ranking phase
83
- result["ranking_phase"] = "baseline"
82
+ # Real signal count from V3.1 learning_feedback table
83
+ signal_count = 0
84
+ try:
85
+ from superlocalmemory.learning.feedback import FeedbackCollector
86
+ from pathlib import Path
87
+ learning_db = Path.home() / ".superlocalmemory" / "learning.db"
88
+ if learning_db.exists():
89
+ collector = FeedbackCollector(learning_db)
90
+ signal_count = collector.get_feedback_count(active_profile)
91
+ except Exception:
92
+ pass
93
+
94
+ # Ranking phase based on real signal count
95
+ if signal_count >= 200:
96
+ result["ranking_phase"] = "ml_model"
97
+ elif signal_count >= 20:
98
+ result["ranking_phase"] = "rule_based"
99
+ else:
100
+ result["ranking_phase"] = "baseline"
84
101
 
85
- # Feedback stats
102
+ # Feedback stats — merge old system + new V3.1 signals
103
+ stats_dict = {"feedback_count": signal_count, "active_profile": active_profile}
86
104
  feedback = _get_feedback()
87
105
  if feedback:
88
106
  try:
89
- summary = feedback.get_feedback_summary()
90
- result["stats"] = summary
91
- result["profile_feedback"] = {
92
- "profile": active_profile,
93
- "signals": summary.get("total_signals", 0),
94
- }
107
+ old_stats = feedback.get_feedback_summary()
108
+ if isinstance(old_stats, dict):
109
+ old_stats["feedback_count"] = signal_count
110
+ old_stats["active_profile"] = active_profile
111
+ stats_dict = old_stats
95
112
  except Exception as exc:
96
113
  logger.debug("feedback summary: %s", exc)
97
114
 
115
+ result["stats"] = stats_dict
116
+ result["profile_feedback"] = {
117
+ "profile": active_profile,
118
+ "signals": signal_count,
119
+ }
120
+
98
121
  # Engagement
99
122
  engagement = _get_engagement()
100
123
  if engagement:
@@ -105,9 +128,43 @@ async def learning_status():
105
128
  else:
106
129
  result["engagement"] = None
107
130
 
108
- # Tech preferences (stub until learning DB populated)
109
- result["tech_preferences"] = []
110
- result["workflow_patterns"] = []
131
+ # Tech preferences + workflow patterns from V3.1 behavioral store
132
+ try:
133
+ from superlocalmemory.learning.behavioral import BehavioralPatternStore
134
+ from pathlib import Path
135
+ learning_db = Path.home() / ".superlocalmemory" / "learning.db"
136
+ if learning_db.exists():
137
+ store = BehavioralPatternStore(str(learning_db))
138
+ all_patterns = store.get_patterns(profile_id=active_profile)
139
+ tech = [
140
+ {"key": "tech", "value": p.get("metadata", {}).get("value", p.get("pattern_key", "")),
141
+ "confidence": p.get("confidence", 0), "evidence": p.get("evidence_count", 0)}
142
+ for p in all_patterns if p.get("pattern_type") == "tech_preference"
143
+ ]
144
+ workflows = [
145
+ {"type": p.get("pattern_type"), "key": p.get("pattern_key", ""),
146
+ "value": p.get("metadata", {}).get("value", ""),
147
+ "confidence": p.get("confidence", 0)}
148
+ for p in all_patterns if p.get("pattern_type") in ("temporal", "interest")
149
+ ]
150
+ result["tech_preferences"] = tech
151
+ result["workflow_patterns"] = workflows
152
+
153
+ # Privacy stats
154
+ import os
155
+ db_size = os.path.getsize(str(learning_db)) // 1024 if learning_db.exists() else 0
156
+ stats_dict["db_size_kb"] = db_size
157
+ stats_dict["transferable_patterns"] = len(all_patterns)
158
+ stats_dict["models_trained"] = 1 if signal_count >= 200 else 0
159
+ stats_dict["tracked_sources"] = len(set(
160
+ p.get("pattern_type") for p in all_patterns
161
+ ))
162
+ else:
163
+ result["tech_preferences"] = []
164
+ result["workflow_patterns"] = []
165
+ except Exception:
166
+ result["tech_preferences"] = []
167
+ result["workflow_patterns"] = []
111
168
  result["source_scores"] = {}
112
169
 
113
170
  except Exception as e:
@@ -13,7 +13,7 @@ from typing import Optional
13
13
 
14
14
  from fastapi import APIRouter, HTTPException, Query
15
15
 
16
- from .helpers import get_db_connection, dict_factory, get_active_profile, DB_PATH
16
+ from .helpers import get_db_connection, dict_factory, get_active_profile, DB_PATH, MEMORY_DIR
17
17
 
18
18
  logger = logging.getLogger("superlocalmemory.routes.stats")
19
19
  router = APIRouter()
@@ -306,10 +306,38 @@ async def get_patterns():
306
306
 
307
307
  if not table_name:
308
308
  conn.close()
309
- return {
310
- "patterns": {}, "total_patterns": 0, "pattern_types": [],
311
- "message": "Pattern learning not initialized.",
312
- }
309
+ # Fall through to V3.1 behavioral pattern store
310
+ try:
311
+ from superlocalmemory.learning.behavioral import BehavioralPatternStore
312
+ store = BehavioralPatternStore(str(MEMORY_DIR / "learning.db"))
313
+ raw = store.get_patterns(profile_id=active_profile)
314
+ grouped = defaultdict(list)
315
+ for p in raw:
316
+ meta = p.get("metadata", {})
317
+ grouped[p["pattern_type"]].append({
318
+ "pattern_type": p["pattern_type"],
319
+ "key": meta.get("key", p.get("pattern_key", "")),
320
+ "value": meta.get("value", p.get("pattern_key", "")),
321
+ "confidence": p.get("confidence", 0),
322
+ "evidence_count": p.get("evidence_count", 0),
323
+ })
324
+ all_patterns = [p for ps in grouped.values() for p in ps]
325
+ confs = [p["confidence"] for p in all_patterns if p.get("confidence")]
326
+ return {
327
+ "patterns": dict(grouped),
328
+ "total_patterns": len(all_patterns),
329
+ "pattern_types": list(grouped.keys()),
330
+ "confidence_stats": {
331
+ "avg": sum(confs) / len(confs) if confs else 0,
332
+ "min": min(confs) if confs else 0,
333
+ "max": max(confs) if confs else 0,
334
+ },
335
+ }
336
+ except Exception:
337
+ return {
338
+ "patterns": {}, "total_patterns": 0, "pattern_types": [],
339
+ "message": "Pattern learning not initialized.",
340
+ }
313
341
 
314
342
  if table_name == 'identity_patterns':
315
343
  cursor.execute("""