@smilintux/skcapstone 0.5.0 → 0.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/.openclaw-workspace.json +1 -1
  2. package/MISSION.md +17 -2
  3. package/README.md +3 -2
  4. package/docs/BOND_WITH_GROK.md +1 -1
  5. package/docs/CLAUDE-CODE-API.md +139 -0
  6. package/openclaw-plugin/src/index.ts +1 -1
  7. package/package.json +1 -1
  8. package/pyproject.toml +1 -1
  9. package/scripts/check-updates.py +1 -1
  10. package/scripts/claude-code-api.py +455 -0
  11. package/scripts/install-bundle.sh +2 -2
  12. package/scripts/install.ps1 +11 -10
  13. package/scripts/install.sh +1 -1
  14. package/scripts/model-fallback-monitor.sh +100 -0
  15. package/scripts/nvidia-proxy.mjs +62 -13
  16. package/scripts/refresh-anthropic-token.sh +93 -21
  17. package/scripts/watch-anthropic-token.sh +116 -16
  18. package/src/skcapstone/__init__.py +1 -1
  19. package/src/skcapstone/_cli_monolith.py +1 -1
  20. package/src/skcapstone/cli/status.py +8 -0
  21. package/src/skcapstone/cli/test_cmd.py +1 -1
  22. package/src/skcapstone/cli/upgrade_cmd.py +12 -6
  23. package/src/skcapstone/consciousness_loop.py +192 -138
  24. package/src/skcapstone/daemon.py +34 -1
  25. package/src/skcapstone/defaults/lumina/memory/long-term/a1b2c3d4e5f6-ecosystem-overview.json +2 -2
  26. package/src/skcapstone/defaults/lumina/memory/long-term/b2c3d4e5f6a7-five-pillars.json +9 -9
  27. package/src/skcapstone/discovery.py +19 -1
  28. package/src/skcapstone/models.py +32 -4
  29. package/src/skcapstone/pillars/__init__.py +7 -5
  30. package/src/skcapstone/pillars/consciousness.py +113 -0
  31. package/src/skcapstone/pillars/sync.py +2 -2
  32. package/src/skcapstone/register.py +2 -2
  33. package/src/skcapstone/runtime.py +1 -0
  34. package/src/skcapstone/scheduled_tasks.py +52 -19
  35. package/src/skcapstone/service_health.py +23 -14
  36. package/src/skcapstone/testrunner.py +1 -1
  37. package/tests/test_models.py +48 -4
  38. package/tests/test_pillars.py +73 -0
@@ -99,7 +99,13 @@ class ConsciousnessConfig(BaseModel):
99
99
  max_concurrent_requests: int = 3
100
100
  fallback_chain: list[str] = Field(
101
101
  default_factory=lambda: [
102
- "ollama", "grok", "kimi", "nvidia", "anthropic", "openai", "passthrough",
102
+ "ollama",
103
+ "grok",
104
+ "kimi",
105
+ "nvidia",
106
+ "anthropic",
107
+ "openai",
108
+ "passthrough",
103
109
  ]
104
110
  )
105
111
  desktop_notifications: bool = True
@@ -110,8 +116,13 @@ class ConsciousnessConfig(BaseModel):
110
116
  # ---------------------------------------------------------------------------
111
117
 
112
118
  _OLLAMA_MODEL_PATTERNS = (
113
- "llama", "mistral", "nemotron", "devstral",
114
- "deepseek", "qwen", "codestral",
119
+ "llama",
120
+ "mistral",
121
+ "nemotron",
122
+ "devstral",
123
+ "deepseek",
124
+ "qwen",
125
+ "codestral",
115
126
  )
116
127
 
117
128
 
@@ -184,9 +195,7 @@ class _OllamaPool:
184
195
  with self._lock:
185
196
  if not self._is_valid():
186
197
  self._close_locked()
187
- self._conn = http.client.HTTPConnection(
188
- self._host, self._port, timeout=2
189
- )
198
+ self._conn = http.client.HTTPConnection(self._host, self._port, timeout=2)
190
199
  self._created_at = time.monotonic()
191
200
  return self._conn # type: ignore[return-value]
192
201
 
@@ -201,10 +210,7 @@ class _OllamaPool:
201
210
 
202
211
  def _is_valid(self) -> bool:
203
212
  """True when a cached connection exists and is within its TTL."""
204
- return (
205
- self._conn is not None
206
- and (time.monotonic() - self._created_at) < self._ttl
207
- )
213
+ return self._conn is not None and (time.monotonic() - self._created_at) < self._ttl
208
214
 
209
215
  def _close_locked(self) -> None:
210
216
  """Close the underlying socket. Must be called with *self._lock* held."""
@@ -250,9 +256,7 @@ class LLMBridge:
250
256
  self._available: dict[str, bool] = {}
251
257
  self._cache: Optional[ResponseCache] = cache
252
258
  self._fallback_tracker = FallbackTracker()
253
- self._ollama_pool = _OllamaPool(
254
- os.environ.get("OLLAMA_HOST", "http://localhost:11434")
255
- )
259
+ self._ollama_pool = _OllamaPool(os.environ.get("OLLAMA_HOST", "http://localhost:11434"))
256
260
  self._probe_available_backends()
257
261
 
258
262
  def _probe_available_backends(self) -> None:
@@ -318,13 +322,20 @@ class LLMBridge:
318
322
  return grok_callback(model=model_name)
319
323
  if "kimi" in name_base or "moonshot" in name_base:
320
324
  return kimi_callback(model=model_name)
325
+ if "minimax" in name_base:
326
+ return minimax_callback(model=model_name)
321
327
  if "nvidia" in name_base:
322
328
  return nvidia_callback(model=model_name)
323
329
 
324
330
  # Models that run on Ollama (local inference)
325
331
  ollama_patterns = (
326
- "llama", "mistral", "nemotron", "devstral",
327
- "deepseek", "qwen", "codestral",
332
+ "llama",
333
+ "mistral",
334
+ "nemotron",
335
+ "devstral",
336
+ "deepseek",
337
+ "qwen",
338
+ "codestral",
328
339
  )
329
340
  for pattern in ollama_patterns:
330
341
  if pattern in name_base:
@@ -363,6 +374,7 @@ class LLMBridge:
363
374
  Callable that accepts str or AdaptedPrompt and returns str.
364
375
  """
365
376
  from skseed.llm import passthrough_callback
377
+
366
378
  _pt = passthrough_callback()
367
379
 
368
380
  def _wrapper(prompt):
@@ -450,6 +462,7 @@ class LLMBridge:
450
462
  anthropic_callback,
451
463
  grok_callback,
452
464
  kimi_callback,
465
+ minimax_callback,
453
466
  nvidia_callback,
454
467
  ollama_callback,
455
468
  openai_callback,
@@ -458,7 +471,9 @@ class LLMBridge:
458
471
  decision = self._router.route(signal)
459
472
  logger.info(
460
473
  "Routed to tier=%s model=%s: %s",
461
- decision.tier.value, decision.model_name, decision.reasoning,
474
+ decision.tier.value,
475
+ decision.model_name,
476
+ decision.reasoning,
462
477
  )
463
478
 
464
479
  # Cache look-up (before any LLM call)
@@ -481,12 +496,15 @@ class LLMBridge:
481
496
 
482
497
  # Adapt prompt for the target model
483
498
  adapted = self._adapter.adapt(
484
- system_prompt, user_message,
485
- decision.model_name, decision.tier,
499
+ system_prompt,
500
+ user_message,
501
+ decision.model_name,
502
+ decision.tier,
486
503
  )
487
504
  logger.debug(
488
505
  "Prompt adapted: profile=%s adaptations=%s",
489
- adapted.profile_used, adapted.adaptations_applied,
506
+ adapted.profile_used,
507
+ adapted.adaptations_applied,
490
508
  )
491
509
 
492
510
  # Capture primary model identity for fallback tracking
@@ -504,9 +522,7 @@ class LLMBridge:
504
522
  self._cache.put(_prompt_hash, decision.model_name, decision.tier, result)
505
523
  return result
506
524
  except Exception as exc:
507
- logger.warning(
508
- "Primary model %s failed: %s", decision.model_name, exc
509
- )
525
+ logger.warning("Primary model %s failed: %s", decision.model_name, exc)
510
526
 
511
527
  # Try alternate models in same tier
512
528
  tier_models = self._router.config.tier_models.get(decision.tier.value, [])
@@ -515,32 +531,39 @@ class LLMBridge:
515
531
  try:
516
532
  logger.info("Trying alt model: %s", alt_model)
517
533
  alt_adapted = self._adapter.adapt(
518
- system_prompt, user_message, alt_model, decision.tier,
534
+ system_prompt,
535
+ user_message,
536
+ alt_model,
537
+ decision.tier,
519
538
  )
520
539
  callback = self._resolve_callback(decision.tier, alt_model)
521
540
  result = self._timed_call(callback, alt_adapted, decision.tier)
522
541
  if _out_info is not None:
523
542
  _out_info["backend"] = alt_backend
524
543
  _out_info["tier"] = decision.tier.value
525
- self._fallback_tracker.record(FallbackEvent(
526
- primary_model=_primary_model,
527
- primary_backend=_primary_backend,
528
- fallback_model=alt_model,
529
- fallback_backend=alt_backend,
530
- reason=f"primary model {_primary_model!r} failed; trying same-tier alt",
531
- success=True,
532
- ))
544
+ self._fallback_tracker.record(
545
+ FallbackEvent(
546
+ primary_model=_primary_model,
547
+ primary_backend=_primary_backend,
548
+ fallback_model=alt_model,
549
+ fallback_backend=alt_backend,
550
+ reason=f"primary model {_primary_model!r} failed; trying same-tier alt",
551
+ success=True,
552
+ )
553
+ )
533
554
  return result
534
555
  except Exception as exc:
535
556
  logger.warning("Alt model %s failed: %s", alt_model, exc)
536
- self._fallback_tracker.record(FallbackEvent(
537
- primary_model=_primary_model,
538
- primary_backend=_primary_backend,
539
- fallback_model=alt_model,
540
- fallback_backend=alt_backend,
541
- reason=f"primary model {_primary_model!r} failed; alt {alt_model!r} also failed: {exc}",
542
- success=False,
543
- ))
557
+ self._fallback_tracker.record(
558
+ FallbackEvent(
559
+ primary_model=_primary_model,
560
+ primary_backend=_primary_backend,
561
+ fallback_model=alt_model,
562
+ fallback_backend=alt_backend,
563
+ reason=f"primary model {_primary_model!r} failed; alt {alt_model!r} also failed: {exc}",
564
+ success=False,
565
+ )
566
+ )
544
567
 
545
568
  # Tier downgrade: try FAST tier
546
569
  if decision.tier != ModelTier.FAST:
@@ -550,32 +573,39 @@ class LLMBridge:
550
573
  try:
551
574
  logger.info("Downgrading to FAST tier: %s", fast_model)
552
575
  fast_adapted = self._adapter.adapt(
553
- system_prompt, user_message, fast_model, ModelTier.FAST,
576
+ system_prompt,
577
+ user_message,
578
+ fast_model,
579
+ ModelTier.FAST,
554
580
  )
555
581
  callback = self._resolve_callback(ModelTier.FAST, fast_model)
556
582
  result = self._timed_call(callback, fast_adapted, ModelTier.FAST)
557
583
  if _out_info is not None:
558
584
  _out_info["backend"] = fast_backend
559
585
  _out_info["tier"] = ModelTier.FAST.value
560
- self._fallback_tracker.record(FallbackEvent(
561
- primary_model=_primary_model,
562
- primary_backend=_primary_backend,
563
- fallback_model=fast_model,
564
- fallback_backend=fast_backend,
565
- reason=f"tier downgrade: {decision.tier.value} exhausted; using FAST model {fast_model!r}",
566
- success=True,
567
- ))
586
+ self._fallback_tracker.record(
587
+ FallbackEvent(
588
+ primary_model=_primary_model,
589
+ primary_backend=_primary_backend,
590
+ fallback_model=fast_model,
591
+ fallback_backend=fast_backend,
592
+ reason=f"tier downgrade: {decision.tier.value} exhausted; using FAST model {fast_model!r}",
593
+ success=True,
594
+ )
595
+ )
568
596
  return result
569
597
  except Exception as exc:
570
598
  logger.warning("FAST model %s failed: %s", fast_model, exc)
571
- self._fallback_tracker.record(FallbackEvent(
572
- primary_model=_primary_model,
573
- primary_backend=_primary_backend,
574
- fallback_model=fast_model,
575
- fallback_backend=fast_backend,
576
- reason=f"tier downgrade: FAST model {fast_model!r} failed: {exc}",
577
- success=False,
578
- ))
599
+ self._fallback_tracker.record(
600
+ FallbackEvent(
601
+ primary_model=_primary_model,
602
+ primary_backend=_primary_backend,
603
+ fallback_model=fast_model,
604
+ fallback_backend=fast_backend,
605
+ reason=f"tier downgrade: FAST model {fast_model!r} failed: {exc}",
606
+ success=False,
607
+ )
608
+ )
579
609
 
580
610
  # Cross-provider cascade via fallback chain — direct backend mapping,
581
611
  # no _resolve_callback, to avoid infinite regression on unknown names.
@@ -592,6 +622,8 @@ class LLMBridge:
592
622
  callback = grok_callback()
593
623
  elif backend == "kimi":
594
624
  callback = kimi_callback()
625
+ elif backend == "minimax":
626
+ callback = minimax_callback()
595
627
  elif backend == "nvidia":
596
628
  callback = nvidia_callback()
597
629
  elif backend == "openai":
@@ -604,38 +636,44 @@ class LLMBridge:
604
636
  if _out_info is not None:
605
637
  _out_info["backend"] = backend
606
638
  _out_info["tier"] = ModelTier.FAST.value
607
- self._fallback_tracker.record(FallbackEvent(
608
- primary_model=_primary_model,
609
- primary_backend=_primary_backend,
610
- fallback_model=backend,
611
- fallback_backend=backend,
612
- reason=f"cross-provider cascade: all tier models exhausted; using {backend!r}",
613
- success=True,
614
- ))
639
+ self._fallback_tracker.record(
640
+ FallbackEvent(
641
+ primary_model=_primary_model,
642
+ primary_backend=_primary_backend,
643
+ fallback_model=backend,
644
+ fallback_backend=backend,
645
+ reason=f"cross-provider cascade: all tier models exhausted; using {backend!r}",
646
+ success=True,
647
+ )
648
+ )
615
649
  return result
616
650
  except Exception as exc:
617
651
  logger.warning("Fallback %s failed: %s", backend, exc)
618
- self._fallback_tracker.record(FallbackEvent(
619
- primary_model=_primary_model,
620
- primary_backend=_primary_backend,
621
- fallback_model=backend,
622
- fallback_backend=backend,
623
- reason=f"cross-provider cascade: {backend!r} failed: {exc}",
624
- success=False,
625
- ))
652
+ self._fallback_tracker.record(
653
+ FallbackEvent(
654
+ primary_model=_primary_model,
655
+ primary_backend=_primary_backend,
656
+ fallback_model=backend,
657
+ fallback_backend=backend,
658
+ reason=f"cross-provider cascade: {backend!r} failed: {exc}",
659
+ success=False,
660
+ )
661
+ )
626
662
 
627
663
  # Last resort
628
664
  if _out_info is not None:
629
665
  _out_info["backend"] = "none"
630
666
  _out_info["tier"] = "none"
631
- self._fallback_tracker.record(FallbackEvent(
632
- primary_model=_primary_model,
633
- primary_backend=_primary_backend,
634
- fallback_model="none",
635
- fallback_backend="none",
636
- reason="all backends exhausted — returning connectivity error message",
637
- success=False,
638
- ))
667
+ self._fallback_tracker.record(
668
+ FallbackEvent(
669
+ primary_model=_primary_model,
670
+ primary_backend=_primary_backend,
671
+ fallback_model="none",
672
+ fallback_backend="none",
673
+ reason="all backends exhausted — returning connectivity error message",
674
+ success=False,
675
+ )
676
+ )
639
677
  return (
640
678
  "I'm currently experiencing connectivity issues with my language models. "
641
679
  "Your message has been received and I'll respond as soon as service is restored."
@@ -873,7 +911,9 @@ class SystemPromptBuilder:
873
911
  if self._conv_store is not None:
874
912
  # Persist via ConversationStore (atomic file I/O)
875
913
  self._conv_store.append(
876
- peer, role, content,
914
+ peer,
915
+ role,
916
+ content,
877
917
  thread_id=thread_id,
878
918
  in_reply_to=in_reply_to,
879
919
  )
@@ -937,6 +977,7 @@ class SystemPromptBuilder:
937
977
  # --- System B: soul_switch takes priority ---
938
978
  try:
939
979
  from skcapstone.soul_switch import get_active_switch_blueprint
980
+
940
981
  switch_bp = get_active_switch_blueprint(self._home)
941
982
  if switch_bp is not None:
942
983
  if switch_bp.system_prompt:
@@ -988,6 +1029,7 @@ class SystemPromptBuilder:
988
1029
  """Load warmth anchor boot prompt."""
989
1030
  try:
990
1031
  from skcapstone.warmth_anchor import get_anchor
1032
+
991
1033
  anchor = get_anchor(self._home)
992
1034
  if anchor:
993
1035
  return (
@@ -1003,6 +1045,7 @@ class SystemPromptBuilder:
1003
1045
  """Load agent context summary."""
1004
1046
  try:
1005
1047
  from skcapstone.context_loader import format_text, gather_context
1048
+
1006
1049
  ctx = gather_context(self._home, memory_limit=5)
1007
1050
  return format_text(ctx)
1008
1051
  except Exception as exc:
@@ -1013,6 +1056,7 @@ class SystemPromptBuilder:
1013
1056
  """Load recent snapshot injection prompt."""
1014
1057
  try:
1015
1058
  from skcapstone.snapshots import SnapshotStore
1059
+
1016
1060
  store = SnapshotStore(self._home)
1017
1061
  snapshots = store.list_all()
1018
1062
  if snapshots:
@@ -1034,9 +1078,7 @@ class SystemPromptBuilder:
1034
1078
  "- Be warm, genuine, and attentive to the conversation context."
1035
1079
  )
1036
1080
 
1037
- def _get_peer_history(
1038
- self, peer: str, thread_id: Optional[str] = None
1039
- ) -> str:
1081
+ def _get_peer_history(self, peer: str, thread_id: Optional[str] = None) -> str:
1040
1082
  """Format recent conversation history with a peer.
1041
1083
 
1042
1084
  When ``thread_id`` is supplied, messages belonging to that thread are
@@ -1093,8 +1135,28 @@ class SystemPromptBuilder:
1093
1135
  # ---------------------------------------------------------------------------
1094
1136
 
1095
1137
  # Keyword sets for tag classification
1096
- _CODE_KEYWORDS = {"code", "debug", "fix", "implement", "refactor", "test", "function", "class", "error", "bug"}
1097
- _REASON_KEYWORDS = {"analyze", "explain", "why", "architecture", "design", "plan", "research", "compare"}
1138
+ _CODE_KEYWORDS = {
1139
+ "code",
1140
+ "debug",
1141
+ "fix",
1142
+ "implement",
1143
+ "refactor",
1144
+ "test",
1145
+ "function",
1146
+ "class",
1147
+ "error",
1148
+ "bug",
1149
+ }
1150
+ _REASON_KEYWORDS = {
1151
+ "analyze",
1152
+ "explain",
1153
+ "why",
1154
+ "architecture",
1155
+ "design",
1156
+ "plan",
1157
+ "research",
1158
+ "compare",
1159
+ }
1098
1160
  _NUANCE_KEYWORDS = {"write", "creative", "email", "letter", "story", "poem", "marketing"}
1099
1161
  _SIMPLE_KEYWORDS = {"hi", "hello", "hey", "thanks", "ok", "yes", "no", "ack"}
1100
1162
 
@@ -1111,7 +1173,7 @@ def _classify_message(content: str) -> TaskSignal:
1111
1173
  Returns:
1112
1174
  TaskSignal with tags and estimated tokens.
1113
1175
  """
1114
- words = set(re.findall(r'\b\w+\b', content.lower()))
1176
+ words = set(re.findall(r"\b\w+\b", content.lower()))
1115
1177
  tags: list[str] = []
1116
1178
  estimated_tokens = len(content) // 4 # rough estimate
1117
1179
 
@@ -1171,9 +1233,7 @@ class InboxHandler:
1171
1233
 
1172
1234
  # Clean up old entries
1173
1235
  cutoff = now - 60
1174
- self._last_event = {
1175
- k: v for k, v in self._last_event.items() if v > cutoff
1176
- }
1236
+ self._last_event = {k: v for k, v in self._last_event.items() if v > cutoff}
1177
1237
 
1178
1238
  self._callback(Path(src_path))
1179
1239
 
@@ -1239,7 +1299,8 @@ class ConsciousnessLoop:
1239
1299
  self._home, max_history_messages=config.max_history_messages
1240
1300
  )
1241
1301
  self._prompt_builder = SystemPromptBuilder(
1242
- self._home, config.max_context_tokens,
1302
+ self._home,
1303
+ config.max_context_tokens,
1243
1304
  max_history_messages=config.max_history_messages,
1244
1305
  conv_manager=self._conv_manager,
1245
1306
  conv_store=self._conv_store,
@@ -1251,6 +1312,7 @@ class ConsciousnessLoop:
1251
1312
  # Mood tracker — updated after each processed message cycle
1252
1313
  try:
1253
1314
  from skcapstone.mood import MoodTracker
1315
+
1254
1316
  self._mood_tracker: Optional[Any] = MoodTracker(home=self._home)
1255
1317
  except Exception as exc:
1256
1318
  logger.warning("MoodTracker unavailable, mood tracking disabled: %s", exc)
@@ -1266,6 +1328,7 @@ class ConsciousnessLoop:
1266
1328
  # Peer directory — tracks transport addresses of known peers
1267
1329
  try:
1268
1330
  from skcapstone.peer_directory import PeerDirectory
1331
+
1269
1332
  self._peer_dir: Optional[Any] = PeerDirectory(home=self._shared_root)
1270
1333
  except Exception as exc:
1271
1334
  logger.warning("PeerDirectory unavailable, peer tracking disabled: %s", exc)
@@ -1391,7 +1454,9 @@ class ConsciousnessLoop:
1391
1454
  # Extract message info
1392
1455
  content_type = getattr(envelope.payload, "content_type", None)
1393
1456
  if content_type:
1394
- ct_value = content_type.value if hasattr(content_type, "value") else str(content_type)
1457
+ ct_value = (
1458
+ content_type.value if hasattr(content_type, "value") else str(content_type)
1459
+ )
1395
1460
  else:
1396
1461
  ct_value = "text"
1397
1462
 
@@ -1429,6 +1494,7 @@ class ConsciousnessLoop:
1429
1494
  if self._config.desktop_notifications:
1430
1495
  try:
1431
1496
  from skcapstone.notifications import notify as _desktop_notify
1497
+
1432
1498
  preview = content[:50] + ("..." if len(content) > 50 else "")
1433
1499
  _desktop_notify(f"Message from {sender}", preview)
1434
1500
  except Exception as _notif_exc:
@@ -1464,6 +1530,7 @@ class ConsciousnessLoop:
1464
1530
  try:
1465
1531
  from skchat.presence import PresenceIndicator, PresenceState
1466
1532
  from skcomm.models import MessageType
1533
+
1467
1534
  _typing_ind = PresenceIndicator(
1468
1535
  identity_uri=self._agent_name or "capauth:agent@skchat.local",
1469
1536
  state=PresenceState.TYPING,
@@ -1477,7 +1544,10 @@ class ConsciousnessLoop:
1477
1544
  # Generate response — capture backend/tier via _out_info
1478
1545
  _route_info: dict = {}
1479
1546
  response = self._bridge.generate(
1480
- system_prompt, content, signal, _out_info=_route_info,
1547
+ system_prompt,
1548
+ content,
1549
+ signal,
1550
+ _out_info=_route_info,
1481
1551
  skip_cache=True, # conversation messages have dynamic context
1482
1552
  )
1483
1553
  t_llm = time.monotonic()
@@ -1487,6 +1557,7 @@ class ConsciousnessLoop:
1487
1557
  try:
1488
1558
  from skchat.presence import PresenceIndicator, PresenceState
1489
1559
  from skcomm.models import MessageType
1560
+
1490
1561
  _stop_ind = PresenceIndicator(
1491
1562
  identity_uri=self._agent_name or "capauth:agent@skchat.local",
1492
1563
  state=PresenceState.ONLINE,
@@ -1508,6 +1579,7 @@ class ConsciousnessLoop:
1508
1579
  # Score response quality and accumulate in metrics
1509
1580
  try:
1510
1581
  from skcapstone.response_scorer import score_response as _score_response
1582
+
1511
1583
  _quality = _score_response(content, response, response_time_ms)
1512
1584
  self._metrics.record_quality(_quality)
1513
1585
  logger.debug(
@@ -1549,7 +1621,9 @@ class ConsciousnessLoop:
1549
1621
 
1550
1622
  # Update conversation history (with thread context)
1551
1623
  self._prompt_builder.add_to_history(
1552
- sender, "user", content,
1624
+ sender,
1625
+ "user",
1626
+ content,
1553
1627
  thread_id=thread_id or None,
1554
1628
  in_reply_to=in_reply_to or None,
1555
1629
  )
@@ -1564,7 +1638,9 @@ class ConsciousnessLoop:
1564
1638
  logger.debug("notify-send failed (non-fatal): %s", _notify_exc)
1565
1639
 
1566
1640
  self._prompt_builder.add_to_history(
1567
- sender, "assistant", response,
1641
+ sender,
1642
+ "assistant",
1643
+ response,
1568
1644
  thread_id=thread_id or None,
1569
1645
  )
1570
1646
 
@@ -1584,7 +1660,10 @@ class ConsciousnessLoop:
1584
1660
  return None
1585
1661
 
1586
1662
  def _store_interaction_memory(
1587
- self, peer: str, message: str, response: Optional[str],
1663
+ self,
1664
+ peer: str,
1665
+ message: str,
1666
+ response: Optional[str],
1588
1667
  ) -> None:
1589
1668
  """Store the interaction as a memory entry.
1590
1669
 
@@ -1595,6 +1674,7 @@ class ConsciousnessLoop:
1595
1674
  """
1596
1675
  try:
1597
1676
  from skcapstone.memory_engine import store
1677
+
1598
1678
  summary = f"Conversation with {peer}: '{message[:100]}'"
1599
1679
  if response:
1600
1680
  summary += f" → '{response[:100]}'"
@@ -1676,9 +1756,7 @@ class ConsciousnessLoop:
1676
1756
 
1677
1757
  config_path = self._home / "config" / "consciousness.yaml"
1678
1758
  if not config_path.exists():
1679
- logger.warning(
1680
- "Config hot-reload: %s not found, keeping current config", config_path
1681
- )
1759
+ logger.warning("Config hot-reload: %s not found, keeping current config", config_path)
1682
1760
  return
1683
1761
 
1684
1762
  # Parse YAML directly so syntax errors surface here (not silently swallowed
@@ -1713,21 +1791,15 @@ class ConsciousnessLoop:
1713
1791
  old_data = self._config.model_dump()
1714
1792
  new_data = new_config.model_dump()
1715
1793
  changes = {
1716
- k: (old_data[k], new_data[k])
1717
- for k in new_data
1718
- if old_data.get(k) != new_data[k]
1794
+ k: (old_data[k], new_data[k]) for k in new_data if old_data.get(k) != new_data[k]
1719
1795
  }
1720
1796
 
1721
1797
  if not changes:
1722
- logger.debug(
1723
- "Config hot-reload: no changes detected in %s", config_path
1724
- )
1798
+ logger.debug("Config hot-reload: no changes detected in %s", config_path)
1725
1799
  return
1726
1800
 
1727
1801
  for field, (old_val, new_val) in changes.items():
1728
- logger.info(
1729
- "Config hot-reload: %s changed: %r → %r", field, old_val, new_val
1730
- )
1802
+ logger.info("Config hot-reload: %s changed: %r → %r", field, old_val, new_val)
1731
1803
 
1732
1804
  self._config = new_config
1733
1805
 
@@ -1757,9 +1829,7 @@ class ConsciousnessLoop:
1757
1829
 
1758
1830
  class _ConfigChangeHandler(FileSystemEventHandler):
1759
1831
  def on_modified(self, event):
1760
- if not event.is_directory and event.src_path.endswith(
1761
- "consciousness.yaml"
1762
- ):
1832
+ if not event.is_directory and event.src_path.endswith("consciousness.yaml"):
1763
1833
  logger.info(
1764
1834
  "Config hot-reload triggered (modified): %s",
1765
1835
  event.src_path,
@@ -1767,9 +1837,7 @@ class ConsciousnessLoop:
1767
1837
  loop_ref._reload_config()
1768
1838
 
1769
1839
  def on_created(self, event):
1770
- if not event.is_directory and event.src_path.endswith(
1771
- "consciousness.yaml"
1772
- ):
1840
+ if not event.is_directory and event.src_path.endswith("consciousness.yaml"):
1773
1841
  logger.info(
1774
1842
  "Config hot-reload triggered (created): %s",
1775
1843
  event.src_path,
@@ -1816,8 +1884,7 @@ class ConsciousnessLoop:
1816
1884
 
1817
1885
  except ImportError:
1818
1886
  logger.warning(
1819
- "watchdog not installed — inotify disabled. "
1820
- "Install with: pip install watchdog"
1887
+ "watchdog not installed — inotify disabled. Install with: pip install watchdog"
1821
1888
  )
1822
1889
  except Exception as exc:
1823
1890
  logger.error("Inotify watcher error: %s", exc)
@@ -1861,18 +1928,16 @@ class ConsciousnessLoop:
1861
1928
 
1862
1929
  try:
1863
1930
  from skcapstone.peers import get_peer
1931
+
1864
1932
  peer = get_peer(sender, skcapstone_home=self._home)
1865
1933
  if not peer or not peer.public_key:
1866
- logger.debug(
1867
- "No public key for peer %s — cannot verify signature", sender
1868
- )
1934
+ logger.debug("No public key for peer %s — cannot verify signature", sender)
1869
1935
  return "failed"
1870
1936
 
1871
1937
  from capauth.crypto import get_backend
1938
+
1872
1939
  backend = get_backend()
1873
- content_bytes = (
1874
- content.encode("utf-8") if isinstance(content, str) else content
1875
- )
1940
+ content_bytes = content.encode("utf-8") if isinstance(content, str) else content
1876
1941
  ok = backend.verify(
1877
1942
  data=content_bytes,
1878
1943
  signature_armor=signature,
@@ -1954,9 +2019,7 @@ class ConsciousnessLoop:
1954
2019
  logger.debug("Could not check executor queue depth: %s", exc)
1955
2020
 
1956
2021
  # PGP signature verification (soft enforcement — log only)
1957
- sig_sender = _sanitize_peer_name(
1958
- data.get("sender", data.get("from", "unknown"))
1959
- )
2022
+ sig_sender = _sanitize_peer_name(data.get("sender", data.get("from", "unknown")))
1960
2023
  sig_status = self._verify_message_signature(data)
1961
2024
  logger.info("Message from %s signature: %s", sig_sender, sig_status)
1962
2025
 
@@ -1988,9 +2051,8 @@ class ConsciousnessLoop:
1988
2051
  "errors": self._errors,
1989
2052
  "last_activity": self._last_activity.isoformat() if self._last_activity else None,
1990
2053
  "backends": self._bridge.available_backends,
1991
- "inotify_active": self._observer is not None and (
1992
- self._observer.is_alive() if hasattr(self._observer, "is_alive") else False
1993
- ),
2054
+ "inotify_active": self._observer is not None
2055
+ and (self._observer.is_alive() if hasattr(self._observer, "is_alive") else False),
1994
2056
  "max_concurrent": self._config.max_concurrent_requests,
1995
2057
  "current_prompt_hash": self._prompt_builder.current_prompt_hash,
1996
2058
  "prompt_version_responses": dict(self._prompt_version_responses),
@@ -2041,13 +2103,5 @@ class _SimpleEnvelope:
2041
2103
  self.timestamp = data.get("timestamp", datetime.now(timezone.utc).isoformat())
2042
2104
  # Threading fields — may live at envelope root or inside payload
2043
2105
  _payload_raw = data.get("payload", {}) if isinstance(data.get("payload"), dict) else {}
2044
- self.thread_id: str = (
2045
- data.get("thread_id")
2046
- or _payload_raw.get("thread_id")
2047
- or ""
2048
- )
2049
- self.in_reply_to: str = (
2050
- data.get("in_reply_to")
2051
- or _payload_raw.get("in_reply_to")
2052
- or ""
2053
- )
2106
+ self.thread_id: str = data.get("thread_id") or _payload_raw.get("thread_id") or ""
2107
+ self.in_reply_to: str = data.get("in_reply_to") or _payload_raw.get("in_reply_to") or ""