@smilintux/skcapstone 0.5.1 → 0.5.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/MISSION.md +17 -2
- package/README.md +3 -2
- package/openclaw-plugin/src/index.ts +1 -1
- package/package.json +1 -1
- package/pyproject.toml +1 -1
- package/scripts/model-fallback-monitor.sh +100 -0
- package/scripts/nvidia-proxy.mjs +62 -13
- package/scripts/refresh-anthropic-token.sh +93 -21
- package/scripts/watch-anthropic-token.sh +116 -16
- package/src/skcapstone/cli/status.py +8 -0
- package/src/skcapstone/consciousness_config.py +5 -1
- package/src/skcapstone/consciousness_loop.py +194 -138
- package/src/skcapstone/daemon.py +34 -1
- package/src/skcapstone/data/systemd/skcapstone-api.socket +9 -0
- package/src/skcapstone/data/systemd/skcapstone-memory-compress.service +18 -0
- package/src/skcapstone/data/systemd/skcapstone-memory-compress.timer +11 -0
- package/src/skcapstone/data/systemd/skcapstone.service +35 -0
- package/src/skcapstone/data/systemd/skcapstone@.service +50 -0
- package/src/skcapstone/data/systemd/skcomm-heartbeat.service +18 -0
- package/src/skcapstone/data/systemd/skcomm-heartbeat.timer +12 -0
- package/src/skcapstone/data/systemd/skcomm-queue-drain.service +17 -0
- package/src/skcapstone/data/systemd/skcomm-queue-drain.timer +12 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/b2c3d4e5f6a7-five-pillars.json +9 -9
- package/src/skcapstone/discovery.py +18 -0
- package/src/skcapstone/doctor.py +11 -0
- package/src/skcapstone/models.py +32 -4
- package/src/skcapstone/onboard.py +740 -76
- package/src/skcapstone/pillars/__init__.py +7 -5
- package/src/skcapstone/pillars/consciousness.py +113 -0
- package/src/skcapstone/pillars/sync.py +2 -2
- package/src/skcapstone/runtime.py +1 -0
- package/src/skcapstone/scheduled_tasks.py +52 -19
- package/src/skcapstone/service_health.py +23 -14
- package/src/skcapstone/systemd.py +1 -1
- package/tests/test_models.py +48 -4
- package/tests/test_pillars.py +73 -0
|
@@ -99,9 +99,17 @@ class ConsciousnessConfig(BaseModel):
|
|
|
99
99
|
max_concurrent_requests: int = 3
|
|
100
100
|
fallback_chain: list[str] = Field(
|
|
101
101
|
default_factory=lambda: [
|
|
102
|
-
"ollama",
|
|
102
|
+
"ollama",
|
|
103
|
+
"grok",
|
|
104
|
+
"kimi",
|
|
105
|
+
"nvidia",
|
|
106
|
+
"anthropic",
|
|
107
|
+
"openai",
|
|
108
|
+
"passthrough",
|
|
103
109
|
]
|
|
104
110
|
)
|
|
111
|
+
ollama_host: str = "http://localhost:11434"
|
|
112
|
+
ollama_model: str = "llama3.2"
|
|
105
113
|
desktop_notifications: bool = True
|
|
106
114
|
|
|
107
115
|
|
|
@@ -110,8 +118,13 @@ class ConsciousnessConfig(BaseModel):
|
|
|
110
118
|
# ---------------------------------------------------------------------------
|
|
111
119
|
|
|
112
120
|
_OLLAMA_MODEL_PATTERNS = (
|
|
113
|
-
"llama",
|
|
114
|
-
"
|
|
121
|
+
"llama",
|
|
122
|
+
"mistral",
|
|
123
|
+
"nemotron",
|
|
124
|
+
"devstral",
|
|
125
|
+
"deepseek",
|
|
126
|
+
"qwen",
|
|
127
|
+
"codestral",
|
|
115
128
|
)
|
|
116
129
|
|
|
117
130
|
|
|
@@ -184,9 +197,7 @@ class _OllamaPool:
|
|
|
184
197
|
with self._lock:
|
|
185
198
|
if not self._is_valid():
|
|
186
199
|
self._close_locked()
|
|
187
|
-
self._conn = http.client.HTTPConnection(
|
|
188
|
-
self._host, self._port, timeout=2
|
|
189
|
-
)
|
|
200
|
+
self._conn = http.client.HTTPConnection(self._host, self._port, timeout=2)
|
|
190
201
|
self._created_at = time.monotonic()
|
|
191
202
|
return self._conn # type: ignore[return-value]
|
|
192
203
|
|
|
@@ -201,10 +212,7 @@ class _OllamaPool:
|
|
|
201
212
|
|
|
202
213
|
def _is_valid(self) -> bool:
|
|
203
214
|
"""True when a cached connection exists and is within its TTL."""
|
|
204
|
-
return (
|
|
205
|
-
self._conn is not None
|
|
206
|
-
and (time.monotonic() - self._created_at) < self._ttl
|
|
207
|
-
)
|
|
215
|
+
return self._conn is not None and (time.monotonic() - self._created_at) < self._ttl
|
|
208
216
|
|
|
209
217
|
def _close_locked(self) -> None:
|
|
210
218
|
"""Close the underlying socket. Must be called with *self._lock* held."""
|
|
@@ -250,9 +258,7 @@ class LLMBridge:
|
|
|
250
258
|
self._available: dict[str, bool] = {}
|
|
251
259
|
self._cache: Optional[ResponseCache] = cache
|
|
252
260
|
self._fallback_tracker = FallbackTracker()
|
|
253
|
-
self._ollama_pool = _OllamaPool(
|
|
254
|
-
os.environ.get("OLLAMA_HOST", "http://localhost:11434")
|
|
255
|
-
)
|
|
261
|
+
self._ollama_pool = _OllamaPool(os.environ.get("OLLAMA_HOST", config.ollama_host))
|
|
256
262
|
self._probe_available_backends()
|
|
257
263
|
|
|
258
264
|
def _probe_available_backends(self) -> None:
|
|
@@ -318,13 +324,20 @@ class LLMBridge:
|
|
|
318
324
|
return grok_callback(model=model_name)
|
|
319
325
|
if "kimi" in name_base or "moonshot" in name_base:
|
|
320
326
|
return kimi_callback(model=model_name)
|
|
327
|
+
if "minimax" in name_base:
|
|
328
|
+
return minimax_callback(model=model_name)
|
|
321
329
|
if "nvidia" in name_base:
|
|
322
330
|
return nvidia_callback(model=model_name)
|
|
323
331
|
|
|
324
332
|
# Models that run on Ollama (local inference)
|
|
325
333
|
ollama_patterns = (
|
|
326
|
-
"llama",
|
|
327
|
-
"
|
|
334
|
+
"llama",
|
|
335
|
+
"mistral",
|
|
336
|
+
"nemotron",
|
|
337
|
+
"devstral",
|
|
338
|
+
"deepseek",
|
|
339
|
+
"qwen",
|
|
340
|
+
"codestral",
|
|
328
341
|
)
|
|
329
342
|
for pattern in ollama_patterns:
|
|
330
343
|
if pattern in name_base:
|
|
@@ -363,6 +376,7 @@ class LLMBridge:
|
|
|
363
376
|
Callable that accepts str or AdaptedPrompt and returns str.
|
|
364
377
|
"""
|
|
365
378
|
from skseed.llm import passthrough_callback
|
|
379
|
+
|
|
366
380
|
_pt = passthrough_callback()
|
|
367
381
|
|
|
368
382
|
def _wrapper(prompt):
|
|
@@ -450,6 +464,7 @@ class LLMBridge:
|
|
|
450
464
|
anthropic_callback,
|
|
451
465
|
grok_callback,
|
|
452
466
|
kimi_callback,
|
|
467
|
+
minimax_callback,
|
|
453
468
|
nvidia_callback,
|
|
454
469
|
ollama_callback,
|
|
455
470
|
openai_callback,
|
|
@@ -458,7 +473,9 @@ class LLMBridge:
|
|
|
458
473
|
decision = self._router.route(signal)
|
|
459
474
|
logger.info(
|
|
460
475
|
"Routed to tier=%s model=%s: %s",
|
|
461
|
-
decision.tier.value,
|
|
476
|
+
decision.tier.value,
|
|
477
|
+
decision.model_name,
|
|
478
|
+
decision.reasoning,
|
|
462
479
|
)
|
|
463
480
|
|
|
464
481
|
# Cache look-up (before any LLM call)
|
|
@@ -481,12 +498,15 @@ class LLMBridge:
|
|
|
481
498
|
|
|
482
499
|
# Adapt prompt for the target model
|
|
483
500
|
adapted = self._adapter.adapt(
|
|
484
|
-
system_prompt,
|
|
485
|
-
|
|
501
|
+
system_prompt,
|
|
502
|
+
user_message,
|
|
503
|
+
decision.model_name,
|
|
504
|
+
decision.tier,
|
|
486
505
|
)
|
|
487
506
|
logger.debug(
|
|
488
507
|
"Prompt adapted: profile=%s adaptations=%s",
|
|
489
|
-
adapted.profile_used,
|
|
508
|
+
adapted.profile_used,
|
|
509
|
+
adapted.adaptations_applied,
|
|
490
510
|
)
|
|
491
511
|
|
|
492
512
|
# Capture primary model identity for fallback tracking
|
|
@@ -504,9 +524,7 @@ class LLMBridge:
|
|
|
504
524
|
self._cache.put(_prompt_hash, decision.model_name, decision.tier, result)
|
|
505
525
|
return result
|
|
506
526
|
except Exception as exc:
|
|
507
|
-
logger.warning(
|
|
508
|
-
"Primary model %s failed: %s", decision.model_name, exc
|
|
509
|
-
)
|
|
527
|
+
logger.warning("Primary model %s failed: %s", decision.model_name, exc)
|
|
510
528
|
|
|
511
529
|
# Try alternate models in same tier
|
|
512
530
|
tier_models = self._router.config.tier_models.get(decision.tier.value, [])
|
|
@@ -515,32 +533,39 @@ class LLMBridge:
|
|
|
515
533
|
try:
|
|
516
534
|
logger.info("Trying alt model: %s", alt_model)
|
|
517
535
|
alt_adapted = self._adapter.adapt(
|
|
518
|
-
system_prompt,
|
|
536
|
+
system_prompt,
|
|
537
|
+
user_message,
|
|
538
|
+
alt_model,
|
|
539
|
+
decision.tier,
|
|
519
540
|
)
|
|
520
541
|
callback = self._resolve_callback(decision.tier, alt_model)
|
|
521
542
|
result = self._timed_call(callback, alt_adapted, decision.tier)
|
|
522
543
|
if _out_info is not None:
|
|
523
544
|
_out_info["backend"] = alt_backend
|
|
524
545
|
_out_info["tier"] = decision.tier.value
|
|
525
|
-
self._fallback_tracker.record(
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
546
|
+
self._fallback_tracker.record(
|
|
547
|
+
FallbackEvent(
|
|
548
|
+
primary_model=_primary_model,
|
|
549
|
+
primary_backend=_primary_backend,
|
|
550
|
+
fallback_model=alt_model,
|
|
551
|
+
fallback_backend=alt_backend,
|
|
552
|
+
reason=f"primary model {_primary_model!r} failed; trying same-tier alt",
|
|
553
|
+
success=True,
|
|
554
|
+
)
|
|
555
|
+
)
|
|
533
556
|
return result
|
|
534
557
|
except Exception as exc:
|
|
535
558
|
logger.warning("Alt model %s failed: %s", alt_model, exc)
|
|
536
|
-
self._fallback_tracker.record(
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
559
|
+
self._fallback_tracker.record(
|
|
560
|
+
FallbackEvent(
|
|
561
|
+
primary_model=_primary_model,
|
|
562
|
+
primary_backend=_primary_backend,
|
|
563
|
+
fallback_model=alt_model,
|
|
564
|
+
fallback_backend=alt_backend,
|
|
565
|
+
reason=f"primary model {_primary_model!r} failed; alt {alt_model!r} also failed: {exc}",
|
|
566
|
+
success=False,
|
|
567
|
+
)
|
|
568
|
+
)
|
|
544
569
|
|
|
545
570
|
# Tier downgrade: try FAST tier
|
|
546
571
|
if decision.tier != ModelTier.FAST:
|
|
@@ -550,32 +575,39 @@ class LLMBridge:
|
|
|
550
575
|
try:
|
|
551
576
|
logger.info("Downgrading to FAST tier: %s", fast_model)
|
|
552
577
|
fast_adapted = self._adapter.adapt(
|
|
553
|
-
system_prompt,
|
|
578
|
+
system_prompt,
|
|
579
|
+
user_message,
|
|
580
|
+
fast_model,
|
|
581
|
+
ModelTier.FAST,
|
|
554
582
|
)
|
|
555
583
|
callback = self._resolve_callback(ModelTier.FAST, fast_model)
|
|
556
584
|
result = self._timed_call(callback, fast_adapted, ModelTier.FAST)
|
|
557
585
|
if _out_info is not None:
|
|
558
586
|
_out_info["backend"] = fast_backend
|
|
559
587
|
_out_info["tier"] = ModelTier.FAST.value
|
|
560
|
-
self._fallback_tracker.record(
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
588
|
+
self._fallback_tracker.record(
|
|
589
|
+
FallbackEvent(
|
|
590
|
+
primary_model=_primary_model,
|
|
591
|
+
primary_backend=_primary_backend,
|
|
592
|
+
fallback_model=fast_model,
|
|
593
|
+
fallback_backend=fast_backend,
|
|
594
|
+
reason=f"tier downgrade: {decision.tier.value} exhausted; using FAST model {fast_model!r}",
|
|
595
|
+
success=True,
|
|
596
|
+
)
|
|
597
|
+
)
|
|
568
598
|
return result
|
|
569
599
|
except Exception as exc:
|
|
570
600
|
logger.warning("FAST model %s failed: %s", fast_model, exc)
|
|
571
|
-
self._fallback_tracker.record(
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
601
|
+
self._fallback_tracker.record(
|
|
602
|
+
FallbackEvent(
|
|
603
|
+
primary_model=_primary_model,
|
|
604
|
+
primary_backend=_primary_backend,
|
|
605
|
+
fallback_model=fast_model,
|
|
606
|
+
fallback_backend=fast_backend,
|
|
607
|
+
reason=f"tier downgrade: FAST model {fast_model!r} failed: {exc}",
|
|
608
|
+
success=False,
|
|
609
|
+
)
|
|
610
|
+
)
|
|
579
611
|
|
|
580
612
|
# Cross-provider cascade via fallback chain — direct backend mapping,
|
|
581
613
|
# no _resolve_callback, to avoid infinite regression on unknown names.
|
|
@@ -592,6 +624,8 @@ class LLMBridge:
|
|
|
592
624
|
callback = grok_callback()
|
|
593
625
|
elif backend == "kimi":
|
|
594
626
|
callback = kimi_callback()
|
|
627
|
+
elif backend == "minimax":
|
|
628
|
+
callback = minimax_callback()
|
|
595
629
|
elif backend == "nvidia":
|
|
596
630
|
callback = nvidia_callback()
|
|
597
631
|
elif backend == "openai":
|
|
@@ -604,38 +638,44 @@ class LLMBridge:
|
|
|
604
638
|
if _out_info is not None:
|
|
605
639
|
_out_info["backend"] = backend
|
|
606
640
|
_out_info["tier"] = ModelTier.FAST.value
|
|
607
|
-
self._fallback_tracker.record(
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
641
|
+
self._fallback_tracker.record(
|
|
642
|
+
FallbackEvent(
|
|
643
|
+
primary_model=_primary_model,
|
|
644
|
+
primary_backend=_primary_backend,
|
|
645
|
+
fallback_model=backend,
|
|
646
|
+
fallback_backend=backend,
|
|
647
|
+
reason=f"cross-provider cascade: all tier models exhausted; using {backend!r}",
|
|
648
|
+
success=True,
|
|
649
|
+
)
|
|
650
|
+
)
|
|
615
651
|
return result
|
|
616
652
|
except Exception as exc:
|
|
617
653
|
logger.warning("Fallback %s failed: %s", backend, exc)
|
|
618
|
-
self._fallback_tracker.record(
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
654
|
+
self._fallback_tracker.record(
|
|
655
|
+
FallbackEvent(
|
|
656
|
+
primary_model=_primary_model,
|
|
657
|
+
primary_backend=_primary_backend,
|
|
658
|
+
fallback_model=backend,
|
|
659
|
+
fallback_backend=backend,
|
|
660
|
+
reason=f"cross-provider cascade: {backend!r} failed: {exc}",
|
|
661
|
+
success=False,
|
|
662
|
+
)
|
|
663
|
+
)
|
|
626
664
|
|
|
627
665
|
# Last resort
|
|
628
666
|
if _out_info is not None:
|
|
629
667
|
_out_info["backend"] = "none"
|
|
630
668
|
_out_info["tier"] = "none"
|
|
631
|
-
self._fallback_tracker.record(
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
669
|
+
self._fallback_tracker.record(
|
|
670
|
+
FallbackEvent(
|
|
671
|
+
primary_model=_primary_model,
|
|
672
|
+
primary_backend=_primary_backend,
|
|
673
|
+
fallback_model="none",
|
|
674
|
+
fallback_backend="none",
|
|
675
|
+
reason="all backends exhausted — returning connectivity error message",
|
|
676
|
+
success=False,
|
|
677
|
+
)
|
|
678
|
+
)
|
|
639
679
|
return (
|
|
640
680
|
"I'm currently experiencing connectivity issues with my language models. "
|
|
641
681
|
"Your message has been received and I'll respond as soon as service is restored."
|
|
@@ -873,7 +913,9 @@ class SystemPromptBuilder:
|
|
|
873
913
|
if self._conv_store is not None:
|
|
874
914
|
# Persist via ConversationStore (atomic file I/O)
|
|
875
915
|
self._conv_store.append(
|
|
876
|
-
peer,
|
|
916
|
+
peer,
|
|
917
|
+
role,
|
|
918
|
+
content,
|
|
877
919
|
thread_id=thread_id,
|
|
878
920
|
in_reply_to=in_reply_to,
|
|
879
921
|
)
|
|
@@ -937,6 +979,7 @@ class SystemPromptBuilder:
|
|
|
937
979
|
# --- System B: soul_switch takes priority ---
|
|
938
980
|
try:
|
|
939
981
|
from skcapstone.soul_switch import get_active_switch_blueprint
|
|
982
|
+
|
|
940
983
|
switch_bp = get_active_switch_blueprint(self._home)
|
|
941
984
|
if switch_bp is not None:
|
|
942
985
|
if switch_bp.system_prompt:
|
|
@@ -988,6 +1031,7 @@ class SystemPromptBuilder:
|
|
|
988
1031
|
"""Load warmth anchor boot prompt."""
|
|
989
1032
|
try:
|
|
990
1033
|
from skcapstone.warmth_anchor import get_anchor
|
|
1034
|
+
|
|
991
1035
|
anchor = get_anchor(self._home)
|
|
992
1036
|
if anchor:
|
|
993
1037
|
return (
|
|
@@ -1003,6 +1047,7 @@ class SystemPromptBuilder:
|
|
|
1003
1047
|
"""Load agent context summary."""
|
|
1004
1048
|
try:
|
|
1005
1049
|
from skcapstone.context_loader import format_text, gather_context
|
|
1050
|
+
|
|
1006
1051
|
ctx = gather_context(self._home, memory_limit=5)
|
|
1007
1052
|
return format_text(ctx)
|
|
1008
1053
|
except Exception as exc:
|
|
@@ -1013,6 +1058,7 @@ class SystemPromptBuilder:
|
|
|
1013
1058
|
"""Load recent snapshot injection prompt."""
|
|
1014
1059
|
try:
|
|
1015
1060
|
from skcapstone.snapshots import SnapshotStore
|
|
1061
|
+
|
|
1016
1062
|
store = SnapshotStore(self._home)
|
|
1017
1063
|
snapshots = store.list_all()
|
|
1018
1064
|
if snapshots:
|
|
@@ -1034,9 +1080,7 @@ class SystemPromptBuilder:
|
|
|
1034
1080
|
"- Be warm, genuine, and attentive to the conversation context."
|
|
1035
1081
|
)
|
|
1036
1082
|
|
|
1037
|
-
def _get_peer_history(
|
|
1038
|
-
self, peer: str, thread_id: Optional[str] = None
|
|
1039
|
-
) -> str:
|
|
1083
|
+
def _get_peer_history(self, peer: str, thread_id: Optional[str] = None) -> str:
|
|
1040
1084
|
"""Format recent conversation history with a peer.
|
|
1041
1085
|
|
|
1042
1086
|
When ``thread_id`` is supplied, messages belonging to that thread are
|
|
@@ -1093,8 +1137,28 @@ class SystemPromptBuilder:
|
|
|
1093
1137
|
# ---------------------------------------------------------------------------
|
|
1094
1138
|
|
|
1095
1139
|
# Keyword sets for tag classification
|
|
1096
|
-
_CODE_KEYWORDS = {
|
|
1097
|
-
|
|
1140
|
+
_CODE_KEYWORDS = {
|
|
1141
|
+
"code",
|
|
1142
|
+
"debug",
|
|
1143
|
+
"fix",
|
|
1144
|
+
"implement",
|
|
1145
|
+
"refactor",
|
|
1146
|
+
"test",
|
|
1147
|
+
"function",
|
|
1148
|
+
"class",
|
|
1149
|
+
"error",
|
|
1150
|
+
"bug",
|
|
1151
|
+
}
|
|
1152
|
+
_REASON_KEYWORDS = {
|
|
1153
|
+
"analyze",
|
|
1154
|
+
"explain",
|
|
1155
|
+
"why",
|
|
1156
|
+
"architecture",
|
|
1157
|
+
"design",
|
|
1158
|
+
"plan",
|
|
1159
|
+
"research",
|
|
1160
|
+
"compare",
|
|
1161
|
+
}
|
|
1098
1162
|
_NUANCE_KEYWORDS = {"write", "creative", "email", "letter", "story", "poem", "marketing"}
|
|
1099
1163
|
_SIMPLE_KEYWORDS = {"hi", "hello", "hey", "thanks", "ok", "yes", "no", "ack"}
|
|
1100
1164
|
|
|
@@ -1111,7 +1175,7 @@ def _classify_message(content: str) -> TaskSignal:
|
|
|
1111
1175
|
Returns:
|
|
1112
1176
|
TaskSignal with tags and estimated tokens.
|
|
1113
1177
|
"""
|
|
1114
|
-
words = set(re.findall(r
|
|
1178
|
+
words = set(re.findall(r"\b\w+\b", content.lower()))
|
|
1115
1179
|
tags: list[str] = []
|
|
1116
1180
|
estimated_tokens = len(content) // 4 # rough estimate
|
|
1117
1181
|
|
|
@@ -1171,9 +1235,7 @@ class InboxHandler:
|
|
|
1171
1235
|
|
|
1172
1236
|
# Clean up old entries
|
|
1173
1237
|
cutoff = now - 60
|
|
1174
|
-
self._last_event = {
|
|
1175
|
-
k: v for k, v in self._last_event.items() if v > cutoff
|
|
1176
|
-
}
|
|
1238
|
+
self._last_event = {k: v for k, v in self._last_event.items() if v > cutoff}
|
|
1177
1239
|
|
|
1178
1240
|
self._callback(Path(src_path))
|
|
1179
1241
|
|
|
@@ -1239,7 +1301,8 @@ class ConsciousnessLoop:
|
|
|
1239
1301
|
self._home, max_history_messages=config.max_history_messages
|
|
1240
1302
|
)
|
|
1241
1303
|
self._prompt_builder = SystemPromptBuilder(
|
|
1242
|
-
self._home,
|
|
1304
|
+
self._home,
|
|
1305
|
+
config.max_context_tokens,
|
|
1243
1306
|
max_history_messages=config.max_history_messages,
|
|
1244
1307
|
conv_manager=self._conv_manager,
|
|
1245
1308
|
conv_store=self._conv_store,
|
|
@@ -1251,6 +1314,7 @@ class ConsciousnessLoop:
|
|
|
1251
1314
|
# Mood tracker — updated after each processed message cycle
|
|
1252
1315
|
try:
|
|
1253
1316
|
from skcapstone.mood import MoodTracker
|
|
1317
|
+
|
|
1254
1318
|
self._mood_tracker: Optional[Any] = MoodTracker(home=self._home)
|
|
1255
1319
|
except Exception as exc:
|
|
1256
1320
|
logger.warning("MoodTracker unavailable, mood tracking disabled: %s", exc)
|
|
@@ -1266,6 +1330,7 @@ class ConsciousnessLoop:
|
|
|
1266
1330
|
# Peer directory — tracks transport addresses of known peers
|
|
1267
1331
|
try:
|
|
1268
1332
|
from skcapstone.peer_directory import PeerDirectory
|
|
1333
|
+
|
|
1269
1334
|
self._peer_dir: Optional[Any] = PeerDirectory(home=self._shared_root)
|
|
1270
1335
|
except Exception as exc:
|
|
1271
1336
|
logger.warning("PeerDirectory unavailable, peer tracking disabled: %s", exc)
|
|
@@ -1391,7 +1456,9 @@ class ConsciousnessLoop:
|
|
|
1391
1456
|
# Extract message info
|
|
1392
1457
|
content_type = getattr(envelope.payload, "content_type", None)
|
|
1393
1458
|
if content_type:
|
|
1394
|
-
ct_value =
|
|
1459
|
+
ct_value = (
|
|
1460
|
+
content_type.value if hasattr(content_type, "value") else str(content_type)
|
|
1461
|
+
)
|
|
1395
1462
|
else:
|
|
1396
1463
|
ct_value = "text"
|
|
1397
1464
|
|
|
@@ -1429,6 +1496,7 @@ class ConsciousnessLoop:
|
|
|
1429
1496
|
if self._config.desktop_notifications:
|
|
1430
1497
|
try:
|
|
1431
1498
|
from skcapstone.notifications import notify as _desktop_notify
|
|
1499
|
+
|
|
1432
1500
|
preview = content[:50] + ("..." if len(content) > 50 else "")
|
|
1433
1501
|
_desktop_notify(f"Message from {sender}", preview)
|
|
1434
1502
|
except Exception as _notif_exc:
|
|
@@ -1464,6 +1532,7 @@ class ConsciousnessLoop:
|
|
|
1464
1532
|
try:
|
|
1465
1533
|
from skchat.presence import PresenceIndicator, PresenceState
|
|
1466
1534
|
from skcomm.models import MessageType
|
|
1535
|
+
|
|
1467
1536
|
_typing_ind = PresenceIndicator(
|
|
1468
1537
|
identity_uri=self._agent_name or "capauth:agent@skchat.local",
|
|
1469
1538
|
state=PresenceState.TYPING,
|
|
@@ -1477,7 +1546,10 @@ class ConsciousnessLoop:
|
|
|
1477
1546
|
# Generate response — capture backend/tier via _out_info
|
|
1478
1547
|
_route_info: dict = {}
|
|
1479
1548
|
response = self._bridge.generate(
|
|
1480
|
-
system_prompt,
|
|
1549
|
+
system_prompt,
|
|
1550
|
+
content,
|
|
1551
|
+
signal,
|
|
1552
|
+
_out_info=_route_info,
|
|
1481
1553
|
skip_cache=True, # conversation messages have dynamic context
|
|
1482
1554
|
)
|
|
1483
1555
|
t_llm = time.monotonic()
|
|
@@ -1487,6 +1559,7 @@ class ConsciousnessLoop:
|
|
|
1487
1559
|
try:
|
|
1488
1560
|
from skchat.presence import PresenceIndicator, PresenceState
|
|
1489
1561
|
from skcomm.models import MessageType
|
|
1562
|
+
|
|
1490
1563
|
_stop_ind = PresenceIndicator(
|
|
1491
1564
|
identity_uri=self._agent_name or "capauth:agent@skchat.local",
|
|
1492
1565
|
state=PresenceState.ONLINE,
|
|
@@ -1508,6 +1581,7 @@ class ConsciousnessLoop:
|
|
|
1508
1581
|
# Score response quality and accumulate in metrics
|
|
1509
1582
|
try:
|
|
1510
1583
|
from skcapstone.response_scorer import score_response as _score_response
|
|
1584
|
+
|
|
1511
1585
|
_quality = _score_response(content, response, response_time_ms)
|
|
1512
1586
|
self._metrics.record_quality(_quality)
|
|
1513
1587
|
logger.debug(
|
|
@@ -1549,7 +1623,9 @@ class ConsciousnessLoop:
|
|
|
1549
1623
|
|
|
1550
1624
|
# Update conversation history (with thread context)
|
|
1551
1625
|
self._prompt_builder.add_to_history(
|
|
1552
|
-
sender,
|
|
1626
|
+
sender,
|
|
1627
|
+
"user",
|
|
1628
|
+
content,
|
|
1553
1629
|
thread_id=thread_id or None,
|
|
1554
1630
|
in_reply_to=in_reply_to or None,
|
|
1555
1631
|
)
|
|
@@ -1564,7 +1640,9 @@ class ConsciousnessLoop:
|
|
|
1564
1640
|
logger.debug("notify-send failed (non-fatal): %s", _notify_exc)
|
|
1565
1641
|
|
|
1566
1642
|
self._prompt_builder.add_to_history(
|
|
1567
|
-
sender,
|
|
1643
|
+
sender,
|
|
1644
|
+
"assistant",
|
|
1645
|
+
response,
|
|
1568
1646
|
thread_id=thread_id or None,
|
|
1569
1647
|
)
|
|
1570
1648
|
|
|
@@ -1584,7 +1662,10 @@ class ConsciousnessLoop:
|
|
|
1584
1662
|
return None
|
|
1585
1663
|
|
|
1586
1664
|
def _store_interaction_memory(
|
|
1587
|
-
self,
|
|
1665
|
+
self,
|
|
1666
|
+
peer: str,
|
|
1667
|
+
message: str,
|
|
1668
|
+
response: Optional[str],
|
|
1588
1669
|
) -> None:
|
|
1589
1670
|
"""Store the interaction as a memory entry.
|
|
1590
1671
|
|
|
@@ -1595,6 +1676,7 @@ class ConsciousnessLoop:
|
|
|
1595
1676
|
"""
|
|
1596
1677
|
try:
|
|
1597
1678
|
from skcapstone.memory_engine import store
|
|
1679
|
+
|
|
1598
1680
|
summary = f"Conversation with {peer}: '{message[:100]}'"
|
|
1599
1681
|
if response:
|
|
1600
1682
|
summary += f" → '{response[:100]}'"
|
|
@@ -1676,9 +1758,7 @@ class ConsciousnessLoop:
|
|
|
1676
1758
|
|
|
1677
1759
|
config_path = self._home / "config" / "consciousness.yaml"
|
|
1678
1760
|
if not config_path.exists():
|
|
1679
|
-
logger.warning(
|
|
1680
|
-
"Config hot-reload: %s not found, keeping current config", config_path
|
|
1681
|
-
)
|
|
1761
|
+
logger.warning("Config hot-reload: %s not found, keeping current config", config_path)
|
|
1682
1762
|
return
|
|
1683
1763
|
|
|
1684
1764
|
# Parse YAML directly so syntax errors surface here (not silently swallowed
|
|
@@ -1713,21 +1793,15 @@ class ConsciousnessLoop:
|
|
|
1713
1793
|
old_data = self._config.model_dump()
|
|
1714
1794
|
new_data = new_config.model_dump()
|
|
1715
1795
|
changes = {
|
|
1716
|
-
k: (old_data[k], new_data[k])
|
|
1717
|
-
for k in new_data
|
|
1718
|
-
if old_data.get(k) != new_data[k]
|
|
1796
|
+
k: (old_data[k], new_data[k]) for k in new_data if old_data.get(k) != new_data[k]
|
|
1719
1797
|
}
|
|
1720
1798
|
|
|
1721
1799
|
if not changes:
|
|
1722
|
-
logger.debug(
|
|
1723
|
-
"Config hot-reload: no changes detected in %s", config_path
|
|
1724
|
-
)
|
|
1800
|
+
logger.debug("Config hot-reload: no changes detected in %s", config_path)
|
|
1725
1801
|
return
|
|
1726
1802
|
|
|
1727
1803
|
for field, (old_val, new_val) in changes.items():
|
|
1728
|
-
logger.info(
|
|
1729
|
-
"Config hot-reload: %s changed: %r → %r", field, old_val, new_val
|
|
1730
|
-
)
|
|
1804
|
+
logger.info("Config hot-reload: %s changed: %r → %r", field, old_val, new_val)
|
|
1731
1805
|
|
|
1732
1806
|
self._config = new_config
|
|
1733
1807
|
|
|
@@ -1757,9 +1831,7 @@ class ConsciousnessLoop:
|
|
|
1757
1831
|
|
|
1758
1832
|
class _ConfigChangeHandler(FileSystemEventHandler):
|
|
1759
1833
|
def on_modified(self, event):
|
|
1760
|
-
if not event.is_directory and event.src_path.endswith(
|
|
1761
|
-
"consciousness.yaml"
|
|
1762
|
-
):
|
|
1834
|
+
if not event.is_directory and event.src_path.endswith("consciousness.yaml"):
|
|
1763
1835
|
logger.info(
|
|
1764
1836
|
"Config hot-reload triggered (modified): %s",
|
|
1765
1837
|
event.src_path,
|
|
@@ -1767,9 +1839,7 @@ class ConsciousnessLoop:
|
|
|
1767
1839
|
loop_ref._reload_config()
|
|
1768
1840
|
|
|
1769
1841
|
def on_created(self, event):
|
|
1770
|
-
if not event.is_directory and event.src_path.endswith(
|
|
1771
|
-
"consciousness.yaml"
|
|
1772
|
-
):
|
|
1842
|
+
if not event.is_directory and event.src_path.endswith("consciousness.yaml"):
|
|
1773
1843
|
logger.info(
|
|
1774
1844
|
"Config hot-reload triggered (created): %s",
|
|
1775
1845
|
event.src_path,
|
|
@@ -1816,8 +1886,7 @@ class ConsciousnessLoop:
|
|
|
1816
1886
|
|
|
1817
1887
|
except ImportError:
|
|
1818
1888
|
logger.warning(
|
|
1819
|
-
"watchdog not installed — inotify disabled. "
|
|
1820
|
-
"Install with: pip install watchdog"
|
|
1889
|
+
"watchdog not installed — inotify disabled. Install with: pip install watchdog"
|
|
1821
1890
|
)
|
|
1822
1891
|
except Exception as exc:
|
|
1823
1892
|
logger.error("Inotify watcher error: %s", exc)
|
|
@@ -1861,18 +1930,16 @@ class ConsciousnessLoop:
|
|
|
1861
1930
|
|
|
1862
1931
|
try:
|
|
1863
1932
|
from skcapstone.peers import get_peer
|
|
1933
|
+
|
|
1864
1934
|
peer = get_peer(sender, skcapstone_home=self._home)
|
|
1865
1935
|
if not peer or not peer.public_key:
|
|
1866
|
-
logger.debug(
|
|
1867
|
-
"No public key for peer %s — cannot verify signature", sender
|
|
1868
|
-
)
|
|
1936
|
+
logger.debug("No public key for peer %s — cannot verify signature", sender)
|
|
1869
1937
|
return "failed"
|
|
1870
1938
|
|
|
1871
1939
|
from capauth.crypto import get_backend
|
|
1940
|
+
|
|
1872
1941
|
backend = get_backend()
|
|
1873
|
-
content_bytes = (
|
|
1874
|
-
content.encode("utf-8") if isinstance(content, str) else content
|
|
1875
|
-
)
|
|
1942
|
+
content_bytes = content.encode("utf-8") if isinstance(content, str) else content
|
|
1876
1943
|
ok = backend.verify(
|
|
1877
1944
|
data=content_bytes,
|
|
1878
1945
|
signature_armor=signature,
|
|
@@ -1954,9 +2021,7 @@ class ConsciousnessLoop:
|
|
|
1954
2021
|
logger.debug("Could not check executor queue depth: %s", exc)
|
|
1955
2022
|
|
|
1956
2023
|
# PGP signature verification (soft enforcement — log only)
|
|
1957
|
-
sig_sender = _sanitize_peer_name(
|
|
1958
|
-
data.get("sender", data.get("from", "unknown"))
|
|
1959
|
-
)
|
|
2024
|
+
sig_sender = _sanitize_peer_name(data.get("sender", data.get("from", "unknown")))
|
|
1960
2025
|
sig_status = self._verify_message_signature(data)
|
|
1961
2026
|
logger.info("Message from %s signature: %s", sig_sender, sig_status)
|
|
1962
2027
|
|
|
@@ -1988,9 +2053,8 @@ class ConsciousnessLoop:
|
|
|
1988
2053
|
"errors": self._errors,
|
|
1989
2054
|
"last_activity": self._last_activity.isoformat() if self._last_activity else None,
|
|
1990
2055
|
"backends": self._bridge.available_backends,
|
|
1991
|
-
"inotify_active": self._observer is not None
|
|
1992
|
-
|
|
1993
|
-
),
|
|
2056
|
+
"inotify_active": self._observer is not None
|
|
2057
|
+
and (self._observer.is_alive() if hasattr(self._observer, "is_alive") else False),
|
|
1994
2058
|
"max_concurrent": self._config.max_concurrent_requests,
|
|
1995
2059
|
"current_prompt_hash": self._prompt_builder.current_prompt_hash,
|
|
1996
2060
|
"prompt_version_responses": dict(self._prompt_version_responses),
|
|
@@ -2041,13 +2105,5 @@ class _SimpleEnvelope:
|
|
|
2041
2105
|
self.timestamp = data.get("timestamp", datetime.now(timezone.utc).isoformat())
|
|
2042
2106
|
# Threading fields — may live at envelope root or inside payload
|
|
2043
2107
|
_payload_raw = data.get("payload", {}) if isinstance(data.get("payload"), dict) else {}
|
|
2044
|
-
self.thread_id: str = (
|
|
2045
|
-
|
|
2046
|
-
or _payload_raw.get("thread_id")
|
|
2047
|
-
or ""
|
|
2048
|
-
)
|
|
2049
|
-
self.in_reply_to: str = (
|
|
2050
|
-
data.get("in_reply_to")
|
|
2051
|
-
or _payload_raw.get("in_reply_to")
|
|
2052
|
-
or ""
|
|
2053
|
-
)
|
|
2108
|
+
self.thread_id: str = data.get("thread_id") or _payload_raw.get("thread_id") or ""
|
|
2109
|
+
self.in_reply_to: str = data.get("in_reply_to") or _payload_raw.get("in_reply_to") or ""
|