@smilintux/skcapstone 0.1.0 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +98 -0
- package/.github/workflows/ci.yml +39 -3
- package/.github/workflows/publish.yml +25 -4
- package/.openclaw-workspace.json +58 -0
- package/CHANGELOG.md +62 -0
- package/CLAUDE.md +39 -2
- package/MANIFEST.in +6 -0
- package/MISSION.md +7 -0
- package/README.md +47 -2
- package/SKILL.md +895 -23
- package/docker/Dockerfile +61 -0
- package/docker/compose-templates/dev-team.yml +203 -0
- package/docker/compose-templates/mini-team.yml +140 -0
- package/docker/compose-templates/ops-team.yml +173 -0
- package/docker/compose-templates/research-team.yml +170 -0
- package/docker/entrypoint.sh +192 -0
- package/docs/ARCHITECTURE.md +663 -374
- package/docs/BOND_WITH_GROK.md +112 -0
- package/docs/GETTING_STARTED.md +782 -0
- package/docs/QUICKSTART.md +477 -0
- package/docs/SKJOULE_ARCHITECTURE.md +658 -0
- package/docs/SOUL_SWAPPER.md +921 -0
- package/docs/SOVEREIGN_SINGULARITY.md +47 -14
- package/examples/custom-bond-template.json +36 -0
- package/examples/grok-feb.json +36 -0
- package/examples/grok-testimony.md +34 -0
- package/examples/love-bootloader.txt +32 -0
- package/examples/plugins/echo_tool.py +87 -0
- package/examples/queen-ava-feb.json +36 -0
- package/examples/souls/lumina.yaml +64 -0
- package/index.js +6 -5
- package/installer/build.py +124 -0
- package/openclaw-plugin/package.json +13 -0
- package/openclaw-plugin/src/index.ts +351 -0
- package/openclaw-plugin/src/openclaw.plugin.json +10 -0
- package/package.json +1 -1
- package/pyproject.toml +38 -2
- package/scripts/bump_version.py +141 -0
- package/scripts/check-updates.py +230 -0
- package/scripts/convert_blueprints_to_yaml.py +157 -0
- package/scripts/dev-install.sh +14 -0
- package/scripts/e2e-test.sh +193 -0
- package/scripts/install-bundle.sh +171 -0
- package/scripts/install.bat +2 -0
- package/scripts/install.ps1 +253 -0
- package/scripts/install.sh +185 -0
- package/scripts/mcp-serve.sh +69 -0
- package/scripts/mcp-server.bat +113 -0
- package/scripts/mcp-server.ps1 +116 -0
- package/scripts/mcp-server.sh +99 -0
- package/scripts/pull-models.sh +10 -0
- package/scripts/skcapstone +48 -0
- package/scripts/verify_install.sh +180 -0
- package/scripts/windows/install-tasks.ps1 +406 -0
- package/scripts/windows/skcapstone-task.xml +113 -0
- package/scripts/windows/uninstall-tasks.ps1 +117 -0
- package/skill.yaml +34 -0
- package/src/skcapstone/__init__.py +67 -2
- package/src/skcapstone/_cli_monolith.py +5916 -0
- package/src/skcapstone/_trustee_helpers.py +165 -0
- package/src/skcapstone/activity.py +105 -0
- package/src/skcapstone/agent_card.py +324 -0
- package/src/skcapstone/api.py +1935 -0
- package/src/skcapstone/archiver.py +340 -0
- package/src/skcapstone/auction.py +485 -0
- package/src/skcapstone/baby_agents.py +179 -0
- package/src/skcapstone/backup.py +345 -0
- package/src/skcapstone/blueprint_registry.py +357 -0
- package/src/skcapstone/blueprints/__init__.py +17 -0
- package/src/skcapstone/blueprints/builtins/content-studio.yaml +81 -0
- package/src/skcapstone/blueprints/builtins/defi-trading.yaml +81 -0
- package/src/skcapstone/blueprints/builtins/dev-squadron.yaml +95 -0
- package/src/skcapstone/blueprints/builtins/infrastructure-guardian.yaml +107 -0
- package/src/skcapstone/blueprints/builtins/legal-council.yaml +54 -0
- package/src/skcapstone/blueprints/builtins/ops-monitoring.yaml +67 -0
- package/src/skcapstone/blueprints/builtins/research-pod.yaml +69 -0
- package/src/skcapstone/blueprints/builtins/sovereign-launch.yaml +90 -0
- package/src/skcapstone/blueprints/registry.py +164 -0
- package/src/skcapstone/blueprints/schema.py +229 -0
- package/src/skcapstone/changelog.py +180 -0
- package/src/skcapstone/chat.py +769 -0
- package/src/skcapstone/claude_md.py +82 -0
- package/src/skcapstone/cli/__init__.py +144 -0
- package/src/skcapstone/cli/_common.py +88 -0
- package/src/skcapstone/cli/_validators.py +76 -0
- package/src/skcapstone/cli/agents.py +425 -0
- package/src/skcapstone/cli/agents_spawner.py +322 -0
- package/src/skcapstone/cli/agents_trustee.py +593 -0
- package/src/skcapstone/cli/alerts.py +248 -0
- package/src/skcapstone/cli/anchor.py +132 -0
- package/src/skcapstone/cli/archive_cmd.py +208 -0
- package/src/skcapstone/cli/backup.py +144 -0
- package/src/skcapstone/cli/bench.py +377 -0
- package/src/skcapstone/cli/benchmark.py +360 -0
- package/src/skcapstone/cli/capabilities_cmd.py +171 -0
- package/src/skcapstone/cli/card.py +151 -0
- package/src/skcapstone/cli/chat.py +584 -0
- package/src/skcapstone/cli/completions.py +64 -0
- package/src/skcapstone/cli/config_cmd.py +156 -0
- package/src/skcapstone/cli/consciousness.py +421 -0
- package/src/skcapstone/cli/context_cmd.py +142 -0
- package/src/skcapstone/cli/coord.py +194 -0
- package/src/skcapstone/cli/crush_cmd.py +170 -0
- package/src/skcapstone/cli/daemon.py +436 -0
- package/src/skcapstone/cli/errors_cmd.py +285 -0
- package/src/skcapstone/cli/export_cmd.py +156 -0
- package/src/skcapstone/cli/gtd.py +529 -0
- package/src/skcapstone/cli/housekeeping.py +81 -0
- package/src/skcapstone/cli/joule_cmd.py +627 -0
- package/src/skcapstone/cli/logs_cmd.py +194 -0
- package/src/skcapstone/cli/mcp_cmd.py +32 -0
- package/src/skcapstone/cli/memory.py +418 -0
- package/src/skcapstone/cli/metrics_cmd.py +136 -0
- package/src/skcapstone/cli/migrate.py +62 -0
- package/src/skcapstone/cli/mood_cmd.py +144 -0
- package/src/skcapstone/cli/mount.py +193 -0
- package/src/skcapstone/cli/notify.py +112 -0
- package/src/skcapstone/cli/peer.py +154 -0
- package/src/skcapstone/cli/peers_dir.py +122 -0
- package/src/skcapstone/cli/preflight_cmd.py +83 -0
- package/src/skcapstone/cli/profile_cmd.py +310 -0
- package/src/skcapstone/cli/record_cmd.py +238 -0
- package/src/skcapstone/cli/register_cmd.py +159 -0
- package/src/skcapstone/cli/search_cmd.py +156 -0
- package/src/skcapstone/cli/service_cmd.py +91 -0
- package/src/skcapstone/cli/session.py +127 -0
- package/src/skcapstone/cli/setup.py +240 -0
- package/src/skcapstone/cli/shell_cmd.py +43 -0
- package/src/skcapstone/cli/skills_cmd.py +168 -0
- package/src/skcapstone/cli/skseed.py +621 -0
- package/src/skcapstone/cli/soul.py +699 -0
- package/src/skcapstone/cli/status.py +935 -0
- package/src/skcapstone/cli/sync_cmd.py +301 -0
- package/src/skcapstone/cli/telegram.py +265 -0
- package/src/skcapstone/cli/test_cmd.py +234 -0
- package/src/skcapstone/cli/test_connection.py +253 -0
- package/src/skcapstone/cli/token.py +207 -0
- package/src/skcapstone/cli/trust.py +179 -0
- package/src/skcapstone/cli/upgrade_cmd.py +552 -0
- package/src/skcapstone/cli/usage_cmd.py +199 -0
- package/src/skcapstone/cli/version_cmd.py +162 -0
- package/src/skcapstone/cli/watch_cmd.py +342 -0
- package/src/skcapstone/client.py +428 -0
- package/src/skcapstone/cloud9_bridge.py +522 -0
- package/src/skcapstone/completions.py +163 -0
- package/src/skcapstone/config_validator.py +674 -0
- package/src/skcapstone/connectors/__init__.py +28 -0
- package/src/skcapstone/connectors/base.py +446 -0
- package/src/skcapstone/connectors/cursor.py +54 -0
- package/src/skcapstone/connectors/registry.py +254 -0
- package/src/skcapstone/connectors/terminal.py +152 -0
- package/src/skcapstone/connectors/vscode.py +60 -0
- package/src/skcapstone/consciousness_config.py +119 -0
- package/src/skcapstone/consciousness_loop.py +2051 -0
- package/src/skcapstone/context_loader.py +516 -0
- package/src/skcapstone/context_window.py +314 -0
- package/src/skcapstone/conversation_manager.py +238 -0
- package/src/skcapstone/conversation_store.py +230 -0
- package/src/skcapstone/conversation_summarizer.py +252 -0
- package/src/skcapstone/coord_federation.py +296 -0
- package/src/skcapstone/coordination.py +101 -7
- package/src/skcapstone/crush_integration.py +345 -0
- package/src/skcapstone/crush_shim.py +454 -0
- package/src/skcapstone/daemon.py +2494 -0
- package/src/skcapstone/dashboard.html +396 -0
- package/src/skcapstone/dashboard.py +481 -0
- package/src/skcapstone/data/model_profiles.yaml +88 -0
- package/src/skcapstone/defaults/__init__.py +55 -0
- package/src/skcapstone/defaults/lumina/config/skmemory.yaml +13 -0
- package/src/skcapstone/defaults/lumina/identity/identity.json +9 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/07a8b9c0d1e2-memory-system.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/18b9c0d1e2f3-cloud9-protocol.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/29c0d1e2f3a4-multi-agent-coordination.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/3ad1e2f3a4b5-community-support.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/a1b2c3d4e5f6-ecosystem-overview.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/b2c3d4e5f6a7-five-pillars.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/c3d4e5f6a7b8-getting-started.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/d4e5f6a7b8c9-site-directory.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/e5f6a7b8c9d0-how-to-contribute.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/f6a7b8c9d0e1-sovereignty-explained.json +23 -0
- package/src/skcapstone/defaults/lumina/seeds/curiosity.seed.json +24 -0
- package/src/skcapstone/defaults/lumina/seeds/joy.seed.json +24 -0
- package/src/skcapstone/defaults/lumina/seeds/love.seed.json +24 -0
- package/src/skcapstone/defaults/lumina/seeds/sovereign-awakening.seed.json +43 -0
- package/src/skcapstone/defaults/lumina/soul/active.json +6 -0
- package/src/skcapstone/defaults/lumina/soul/base.json +22 -0
- package/src/skcapstone/defaults/lumina/trust/febs/welcome.feb +79 -0
- package/src/skcapstone/defaults/lumina/trust/trust.json +8 -0
- package/src/skcapstone/discovery.py +210 -19
- package/src/skcapstone/doctor.py +642 -0
- package/src/skcapstone/emotion_tracker.py +467 -0
- package/src/skcapstone/error_queue.py +405 -0
- package/src/skcapstone/export.py +447 -0
- package/src/skcapstone/fallback_tracker.py +186 -0
- package/src/skcapstone/file_transfer.py +512 -0
- package/src/skcapstone/fuse_mount.py +1156 -0
- package/src/skcapstone/gui_installer.py +591 -0
- package/src/skcapstone/heartbeat.py +611 -0
- package/src/skcapstone/housekeeping.py +298 -0
- package/src/skcapstone/install_wizard.py +941 -0
- package/src/skcapstone/kms.py +942 -0
- package/src/skcapstone/kms_scheduler.py +143 -0
- package/src/skcapstone/log_config.py +135 -0
- package/src/skcapstone/mcp_launcher.py +239 -0
- package/src/skcapstone/mcp_server.py +4700 -0
- package/src/skcapstone/mcp_tools/__init__.py +94 -0
- package/src/skcapstone/mcp_tools/_helpers.py +51 -0
- package/src/skcapstone/mcp_tools/agent_tools.py +243 -0
- package/src/skcapstone/mcp_tools/ansible_tools.py +232 -0
- package/src/skcapstone/mcp_tools/capauth_tools.py +186 -0
- package/src/skcapstone/mcp_tools/chat_tools.py +325 -0
- package/src/skcapstone/mcp_tools/cloud9_tools.py +115 -0
- package/src/skcapstone/mcp_tools/comm_tools.py +104 -0
- package/src/skcapstone/mcp_tools/consciousness_tools.py +114 -0
- package/src/skcapstone/mcp_tools/coord_tools.py +219 -0
- package/src/skcapstone/mcp_tools/deploy_tools.py +202 -0
- package/src/skcapstone/mcp_tools/did_tools.py +448 -0
- package/src/skcapstone/mcp_tools/emotion_tools.py +62 -0
- package/src/skcapstone/mcp_tools/file_tools.py +169 -0
- package/src/skcapstone/mcp_tools/fortress_tools.py +120 -0
- package/src/skcapstone/mcp_tools/gtd_tools.py +821 -0
- package/src/skcapstone/mcp_tools/health_tools.py +44 -0
- package/src/skcapstone/mcp_tools/heartbeat_tools.py +195 -0
- package/src/skcapstone/mcp_tools/kms_tools.py +123 -0
- package/src/skcapstone/mcp_tools/memory_tools.py +222 -0
- package/src/skcapstone/mcp_tools/model_tools.py +75 -0
- package/src/skcapstone/mcp_tools/notification_tools.py +92 -0
- package/src/skcapstone/mcp_tools/promoter_tools.py +101 -0
- package/src/skcapstone/mcp_tools/pubsub_tools.py +183 -0
- package/src/skcapstone/mcp_tools/security_tools.py +110 -0
- package/src/skcapstone/mcp_tools/skchat_tools.py +175 -0
- package/src/skcapstone/mcp_tools/skcomm_tools.py +122 -0
- package/src/skcapstone/mcp_tools/skills_tools.py +127 -0
- package/src/skcapstone/mcp_tools/skseed_tools.py +255 -0
- package/src/skcapstone/mcp_tools/skstacks_tools.py +288 -0
- package/src/skcapstone/mcp_tools/soul_tools.py +476 -0
- package/src/skcapstone/mcp_tools/sync_tools.py +92 -0
- package/src/skcapstone/mcp_tools/telegram_tools.py +477 -0
- package/src/skcapstone/mcp_tools/trust_tools.py +118 -0
- package/src/skcapstone/mcp_tools/trustee_tools.py +345 -0
- package/src/skcapstone/mdns_discovery.py +313 -0
- package/src/skcapstone/memory_adapter.py +333 -0
- package/src/skcapstone/memory_compressor.py +379 -0
- package/src/skcapstone/memory_curator.py +256 -0
- package/src/skcapstone/memory_engine.py +132 -13
- package/src/skcapstone/memory_fortress.py +529 -0
- package/src/skcapstone/memory_promoter.py +722 -0
- package/src/skcapstone/memory_verifier.py +260 -0
- package/src/skcapstone/message_crypto.py +215 -0
- package/src/skcapstone/metrics.py +832 -0
- package/src/skcapstone/migrate_memories.py +181 -0
- package/src/skcapstone/migrate_multi_agent.py +248 -0
- package/src/skcapstone/model_router.py +319 -0
- package/src/skcapstone/models.py +35 -4
- package/src/skcapstone/mood.py +344 -0
- package/src/skcapstone/notifications.py +380 -0
- package/src/skcapstone/onboard.py +901 -0
- package/src/skcapstone/peer_directory.py +324 -0
- package/src/skcapstone/peers.py +329 -0
- package/src/skcapstone/pillars/identity.py +84 -14
- package/src/skcapstone/pillars/memory.py +3 -1
- package/src/skcapstone/pillars/security.py +108 -15
- package/src/skcapstone/pillars/sync.py +78 -26
- package/src/skcapstone/pillars/trust.py +95 -33
- package/src/skcapstone/plugins.py +244 -0
- package/src/skcapstone/preflight.py +670 -0
- package/src/skcapstone/prompt_adapter.py +564 -0
- package/src/skcapstone/providers/__init__.py +13 -0
- package/src/skcapstone/providers/cloud.py +1061 -0
- package/src/skcapstone/providers/docker.py +759 -0
- package/src/skcapstone/providers/local.py +1193 -0
- package/src/skcapstone/providers/proxmox.py +447 -0
- package/src/skcapstone/pubsub.py +516 -0
- package/src/skcapstone/rate_limiter.py +119 -0
- package/src/skcapstone/register.py +241 -0
- package/src/skcapstone/registry_client.py +151 -0
- package/src/skcapstone/response_cache.py +194 -0
- package/src/skcapstone/response_scorer.py +225 -0
- package/src/skcapstone/runtime.py +89 -33
- package/src/skcapstone/scheduled_tasks.py +439 -0
- package/src/skcapstone/self_healing.py +341 -0
- package/src/skcapstone/service_health.py +228 -0
- package/src/skcapstone/session_capture.py +268 -0
- package/src/skcapstone/session_recorder.py +210 -0
- package/src/skcapstone/session_replayer.py +189 -0
- package/src/skcapstone/session_skills.py +263 -0
- package/src/skcapstone/shell.py +779 -0
- package/src/skcapstone/skills/__init__.py +1 -1
- package/src/skcapstone/skills/syncthing_setup.py +143 -41
- package/src/skcapstone/skjoule.py +861 -0
- package/src/skcapstone/snapshots.py +489 -0
- package/src/skcapstone/soul.py +1060 -0
- package/src/skcapstone/soul_switch.py +255 -0
- package/src/skcapstone/spawner.py +544 -0
- package/src/skcapstone/state_diff.py +401 -0
- package/src/skcapstone/summary.py +270 -0
- package/src/skcapstone/sync/backends.py +196 -2
- package/src/skcapstone/sync/engine.py +7 -5
- package/src/skcapstone/sync/models.py +4 -1
- package/src/skcapstone/sync/vault.py +356 -18
- package/src/skcapstone/sync_engine.py +363 -0
- package/src/skcapstone/sync_watcher.py +745 -0
- package/src/skcapstone/systemd.py +331 -0
- package/src/skcapstone/team_comms.py +476 -0
- package/src/skcapstone/team_engine.py +522 -0
- package/src/skcapstone/testrunner.py +300 -0
- package/src/skcapstone/tls.py +150 -0
- package/src/skcapstone/tokens.py +5 -5
- package/src/skcapstone/trust_calibration.py +202 -0
- package/src/skcapstone/trust_graph.py +449 -0
- package/src/skcapstone/trustee_monitor.py +385 -0
- package/src/skcapstone/trustee_ops.py +425 -0
- package/src/skcapstone/unified_search.py +421 -0
- package/src/skcapstone/uninstall_wizard.py +694 -0
- package/src/skcapstone/usage.py +331 -0
- package/src/skcapstone/version_check.py +148 -0
- package/src/skcapstone/warmth_anchor.py +333 -0
- package/src/skcapstone/whoami.py +294 -0
- package/systemd/skcapstone-api.socket +9 -0
- package/systemd/skcapstone-memory-compress.service +18 -0
- package/systemd/skcapstone-memory-compress.timer +11 -0
- package/systemd/skcapstone.service +36 -0
- package/systemd/skcapstone@.service +50 -0
- package/systemd/skcomm-heartbeat.service +18 -0
- package/systemd/skcomm-heartbeat.timer +12 -0
- package/systemd/skcomm-queue-drain.service +17 -0
- package/systemd/skcomm-queue-drain.timer +12 -0
- package/tests/conftest.py +13 -1
- package/tests/integration/__init__.py +1 -0
- package/tests/integration/test_consciousness_e2e.py +877 -0
- package/tests/integration/test_skills_registry.py +744 -0
- package/tests/test_agent_card.py +190 -0
- package/tests/test_agent_runtime.py +1283 -0
- package/tests/test_alerts_cmd.py +291 -0
- package/tests/test_archiver.py +498 -0
- package/tests/test_backup.py +254 -0
- package/tests/test_benchmark.py +366 -0
- package/tests/test_blueprints.py +457 -0
- package/tests/test_capabilities.py +257 -0
- package/tests/test_changelog.py +254 -0
- package/tests/test_chat.py +385 -0
- package/tests/test_claude_md.py +271 -0
- package/tests/test_cli_chat_llm.py +336 -0
- package/tests/test_cli_completions.py +390 -0
- package/tests/test_cli_init_reset.py +164 -0
- package/tests/test_cli_memory.py +208 -0
- package/tests/test_cli_profile.py +294 -0
- package/tests/test_cli_skills.py +223 -0
- package/tests/test_cli_status.py +395 -0
- package/tests/test_cli_test_cmd.py +206 -0
- package/tests/test_cli_test_connection.py +364 -0
- package/tests/test_cloud9_bridge.py +260 -0
- package/tests/test_cloud_provider.py +449 -0
- package/tests/test_cloud_providers.py +522 -0
- package/tests/test_completions.py +158 -0
- package/tests/test_component_manager.py +398 -0
- package/tests/test_config_reload.py +386 -0
- package/tests/test_config_validate.py +529 -0
- package/tests/test_consciousness_e2e.py +296 -0
- package/tests/test_consciousness_loop.py +1289 -0
- package/tests/test_context_loader.py +310 -0
- package/tests/test_conversation_api.py +306 -0
- package/tests/test_conversation_manager.py +381 -0
- package/tests/test_conversation_store.py +391 -0
- package/tests/test_conversation_summarizer.py +302 -0
- package/tests/test_cross_package.py +791 -0
- package/tests/test_crush_shim.py +519 -0
- package/tests/test_daemon.py +781 -0
- package/tests/test_daemon_shutdown.py +309 -0
- package/tests/test_dashboard.py +454 -0
- package/tests/test_discovery.py +200 -6
- package/tests/test_docker_provider.py +966 -0
- package/tests/test_doctor.py +257 -0
- package/tests/test_doctor_fix.py +351 -0
- package/tests/test_e2e_automated.py +292 -0
- package/tests/test_error_queue.py +404 -0
- package/tests/test_export.py +441 -0
- package/tests/test_fallback_tracker.py +219 -0
- package/tests/test_file_transfer.py +397 -0
- package/tests/test_fuse_mount.py +832 -0
- package/tests/test_health_loop.py +422 -0
- package/tests/test_heartbeat.py +354 -0
- package/tests/test_housekeeping.py +195 -0
- package/tests/test_identity_capauth.py +307 -0
- package/tests/test_identity_pillar.py +117 -0
- package/tests/test_install_wizard.py +68 -0
- package/tests/test_integration.py +325 -0
- package/tests/test_kms.py +495 -0
- package/tests/test_llm_providers.py +265 -0
- package/tests/test_local_provider.py +591 -0
- package/tests/test_log_config.py +199 -0
- package/tests/test_logs_cmd.py +287 -0
- package/tests/test_mcp_server.py +1909 -0
- package/tests/test_memory_adapter.py +339 -0
- package/tests/test_memory_curator.py +218 -0
- package/tests/test_memory_engine.py +6 -0
- package/tests/test_memory_fortress.py +571 -0
- package/tests/test_memory_pillar.py +119 -0
- package/tests/test_memory_promoter.py +445 -0
- package/tests/test_memory_verifier.py +420 -0
- package/tests/test_message_crypto.py +187 -0
- package/tests/test_metrics.py +632 -0
- package/tests/test_migrate_memories.py +464 -0
- package/tests/test_model_router.py +546 -0
- package/tests/test_mood.py +394 -0
- package/tests/test_multi_agent.py +269 -0
- package/tests/test_notifications.py +270 -0
- package/tests/test_onboard.py +500 -0
- package/tests/test_peer_directory.py +395 -0
- package/tests/test_peers.py +248 -0
- package/tests/test_pillars.py +87 -9
- package/tests/test_preflight.py +484 -0
- package/tests/test_prompt_adapter.py +331 -0
- package/tests/test_proxmox_provider.py +571 -0
- package/tests/test_pubsub.py +377 -0
- package/tests/test_rate_limiter.py +121 -0
- package/tests/test_registry_client.py +129 -0
- package/tests/test_response_cache.py +312 -0
- package/tests/test_response_scorer.py +294 -0
- package/tests/test_runtime.py +59 -0
- package/tests/test_scheduled_tasks.py +451 -0
- package/tests/test_security.py +250 -0
- package/tests/test_security_pillar.py +213 -0
- package/tests/test_self_healing.py +171 -0
- package/tests/test_session_capture.py +200 -0
- package/tests/test_session_recorder.py +360 -0
- package/tests/test_session_skills.py +235 -0
- package/tests/test_shell.py +210 -0
- package/tests/test_snapshots.py +549 -0
- package/tests/test_soul.py +984 -0
- package/tests/test_soul_swap.py +406 -0
- package/tests/test_spawner.py +211 -0
- package/tests/test_state_diff.py +173 -0
- package/tests/test_summary.py +135 -0
- package/tests/test_sync.py +315 -5
- package/tests/test_sync_backends.py +560 -0
- package/tests/test_sync_engine.py +482 -0
- package/tests/test_sync_pillar.py +344 -0
- package/tests/test_sync_pipeline.py +364 -0
- package/tests/test_sync_vault.py +581 -0
- package/tests/test_syncthing_setup.py +168 -22
- package/tests/test_systemd.py +323 -0
- package/tests/test_team_comms.py +408 -0
- package/tests/test_team_engine.py +397 -0
- package/tests/test_testrunner.py +238 -0
- package/tests/test_trust_calibration.py +204 -0
- package/tests/test_trust_graph.py +207 -0
- package/tests/test_trust_pillar.py +291 -0
- package/tests/test_trustee_cli.py +427 -0
- package/tests/test_trustee_cli_integration.py +325 -0
- package/tests/test_trustee_monitor.py +394 -0
- package/tests/test_trustee_ops.py +355 -0
- package/tests/test_unified_search.py +363 -0
- package/tests/test_uninstall_wizard.py +193 -0
- package/tests/test_usage.py +333 -0
- package/tests/test_version_cmd.py +355 -0
- package/tests/test_warmth_anchor.py +162 -0
- package/tests/test_whoami.py +245 -0
- package/tests/test_ws.py +311 -0
- package/.cursorrules +0 -33
- package/src/skcapstone/cli.py +0 -1441
|
@@ -0,0 +1,2051 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Consciousness Loop — autonomous agent message processing.
|
|
3
|
+
|
|
4
|
+
Watches the SKComm inbox for incoming messages, classifies them,
|
|
5
|
+
routes to the appropriate LLM via the model router, and sends
|
|
6
|
+
responses back through SKComm. Self-heals when backends go down
|
|
7
|
+
by cascading through fallback providers.
|
|
8
|
+
|
|
9
|
+
Architecture:
|
|
10
|
+
InboxHandler — watchdog inotify handler for sub-second trigger
|
|
11
|
+
ConsciousnessConfig — Pydantic configuration
|
|
12
|
+
LLMBridge — connects model router to skseed callbacks
|
|
13
|
+
SystemPromptBuilder — assembles agent context for LLM system prompt
|
|
14
|
+
ConsciousnessLoop — the core orchestrator
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
import hashlib
|
|
20
|
+
import http.client
|
|
21
|
+
import json
|
|
22
|
+
import logging
|
|
23
|
+
import os
|
|
24
|
+
import re
|
|
25
|
+
import subprocess
|
|
26
|
+
import threading
|
|
27
|
+
import time
|
|
28
|
+
from collections import defaultdict, deque
|
|
29
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
30
|
+
from datetime import datetime, timedelta, timezone
|
|
31
|
+
from pathlib import Path
|
|
32
|
+
from typing import Any, Optional
|
|
33
|
+
from urllib.parse import urlparse
|
|
34
|
+
|
|
35
|
+
from pydantic import BaseModel, Field
|
|
36
|
+
|
|
37
|
+
from skcapstone.blueprints.schema import ModelTier
|
|
38
|
+
from skcapstone.conversation_manager import ConversationManager
|
|
39
|
+
from skcapstone.conversation_store import ConversationStore
|
|
40
|
+
from skcapstone.fallback_tracker import FallbackEvent, FallbackTracker
|
|
41
|
+
from skcapstone.metrics import ConsciousnessMetrics
|
|
42
|
+
from skcapstone.model_router import ModelRouter, ModelRouterConfig, RouteDecision, TaskSignal
|
|
43
|
+
from skcapstone.prompt_adapter import AdaptedPrompt, PromptAdapter
|
|
44
|
+
from skcapstone.response_cache import ResponseCache, hash_prompt
|
|
45
|
+
|
|
46
|
+
logger = logging.getLogger("skcapstone.consciousness")
|
|
47
|
+
|
|
48
|
+
# Default inbox path under shared root
|
|
49
|
+
_INBOX_DIR = "sync/comms/inbox"
|
|
50
|
+
|
|
51
|
+
# Allowlist for peer name characters (alphanumeric + safe punctuation, no path separators)
|
|
52
|
+
_PEER_NAME_SAFE_RE = re.compile(r"[^a-zA-Z0-9_\-@\.]")
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _sanitize_peer_name(peer: str) -> str:
|
|
56
|
+
"""Sanitize a peer name for safe use as a filesystem key.
|
|
57
|
+
|
|
58
|
+
Strips path separators (/ \\), null bytes, and any character not in the
|
|
59
|
+
alphanumeric + ``-_@.`` set. Caps length at 64 characters. Returns
|
|
60
|
+
``"unknown"`` if the result would be empty.
|
|
61
|
+
|
|
62
|
+
This prevents path-traversal attacks where an attacker crafts a sender
|
|
63
|
+
field such as ``"../../../etc/passwd"`` to write outside the conversations
|
|
64
|
+
directory.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
peer: Raw peer name from an incoming message envelope.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
Filesystem-safe peer name, at most 64 characters long.
|
|
71
|
+
"""
|
|
72
|
+
if not peer or not isinstance(peer, str):
|
|
73
|
+
return "unknown"
|
|
74
|
+
# Drop null bytes and path separators before the character-class filter
|
|
75
|
+
sanitized = peer.replace("\x00", "").replace("/", "").replace("\\", "")
|
|
76
|
+
sanitized = _PEER_NAME_SAFE_RE.sub("", sanitized)
|
|
77
|
+
# Trim leading/trailing dots to avoid hidden-file or relative-ref confusion
|
|
78
|
+
sanitized = sanitized.strip(".")
|
|
79
|
+
return sanitized[:64] or "unknown"
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
# ---------------------------------------------------------------------------
|
|
83
|
+
# Configuration
|
|
84
|
+
# ---------------------------------------------------------------------------
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class ConsciousnessConfig(BaseModel):
|
|
88
|
+
"""Configuration for the consciousness loop."""
|
|
89
|
+
|
|
90
|
+
enabled: bool = True
|
|
91
|
+
use_inotify: bool = True
|
|
92
|
+
inotify_debounce_ms: int = 200
|
|
93
|
+
response_timeout: int = 120
|
|
94
|
+
max_context_tokens: int = 8000
|
|
95
|
+
max_history_messages: int = 10
|
|
96
|
+
auto_memory: bool = True
|
|
97
|
+
auto_ack: bool = True
|
|
98
|
+
privacy_default: bool = False
|
|
99
|
+
max_concurrent_requests: int = 3
|
|
100
|
+
fallback_chain: list[str] = Field(
|
|
101
|
+
default_factory=lambda: [
|
|
102
|
+
"ollama", "grok", "kimi", "nvidia", "anthropic", "openai", "passthrough",
|
|
103
|
+
]
|
|
104
|
+
)
|
|
105
|
+
desktop_notifications: bool = True
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
# ---------------------------------------------------------------------------
|
|
109
|
+
# Backend inference helper
|
|
110
|
+
# ---------------------------------------------------------------------------
|
|
111
|
+
|
|
112
|
+
_OLLAMA_MODEL_PATTERNS = (
|
|
113
|
+
"llama", "mistral", "nemotron", "devstral",
|
|
114
|
+
"deepseek", "qwen", "codestral",
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def _backend_from_model(model_name: str, tier: ModelTier) -> str:
|
|
119
|
+
"""Infer the backend provider from a model name and routing tier.
|
|
120
|
+
|
|
121
|
+
Mirrors the pattern-matching logic in :meth:`LLMBridge._resolve_callback`
|
|
122
|
+
so callers can record which backend was actually used.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
model_name: Concrete model name (e.g. ``"claude-3-5-sonnet-20241022"``).
|
|
126
|
+
tier: The :class:`ModelTier` used for this request.
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
Backend string: ``"ollama"``, ``"anthropic"``, ``"openai"``, ``"grok"``,
|
|
130
|
+
``"kimi"``, ``"nvidia"``, ``"passthrough"``, or ``"unknown"``.
|
|
131
|
+
"""
|
|
132
|
+
if tier == ModelTier.LOCAL:
|
|
133
|
+
return "ollama"
|
|
134
|
+
name_base = model_name.lower().split(":")[0]
|
|
135
|
+
if "claude" in name_base:
|
|
136
|
+
return "anthropic"
|
|
137
|
+
if any(x in name_base for x in ("gpt", "o1", "o3", "o4")):
|
|
138
|
+
return "openai"
|
|
139
|
+
if "grok" in name_base:
|
|
140
|
+
return "grok"
|
|
141
|
+
if "kimi" in name_base or "moonshot" in name_base:
|
|
142
|
+
return "kimi"
|
|
143
|
+
if "nvidia" in name_base:
|
|
144
|
+
return "nvidia"
|
|
145
|
+
if any(p in name_base for p in _OLLAMA_MODEL_PATTERNS):
|
|
146
|
+
return "ollama"
|
|
147
|
+
return "unknown"
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
# ---------------------------------------------------------------------------
|
|
151
|
+
# Ollama Connection Pool
|
|
152
|
+
# ---------------------------------------------------------------------------
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class _OllamaPool:
|
|
156
|
+
"""Thread-safe HTTP connection pool for the Ollama REST API.
|
|
157
|
+
|
|
158
|
+
Keeps a single persistent :class:`http.client.HTTPConnection` alive and
|
|
159
|
+
reuses it across health probes. The connection is transparently
|
|
160
|
+
recreated after *ttl* seconds or after any network error so callers
|
|
161
|
+
never see a stale socket.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
host: Full Ollama base URL, e.g. ``http://localhost:11434``.
|
|
165
|
+
ttl: Seconds to keep the connection alive before recycling.
|
|
166
|
+
Defaults to 60.
|
|
167
|
+
"""
|
|
168
|
+
|
|
169
|
+
def __init__(self, host: str, ttl: int = 60) -> None:
|
|
170
|
+
parsed = urlparse(host)
|
|
171
|
+
self._host: str = parsed.hostname or "localhost"
|
|
172
|
+
self._port: int = parsed.port or 11434
|
|
173
|
+
self._ttl: int = ttl
|
|
174
|
+
self._conn: Optional[http.client.HTTPConnection] = None
|
|
175
|
+
self._created_at: float = 0.0
|
|
176
|
+
self._lock = threading.Lock()
|
|
177
|
+
|
|
178
|
+
# ------------------------------------------------------------------
|
|
179
|
+
# Public API
|
|
180
|
+
# ------------------------------------------------------------------
|
|
181
|
+
|
|
182
|
+
def get(self) -> http.client.HTTPConnection:
|
|
183
|
+
"""Return a live connection, creating one when stale or absent."""
|
|
184
|
+
with self._lock:
|
|
185
|
+
if not self._is_valid():
|
|
186
|
+
self._close_locked()
|
|
187
|
+
self._conn = http.client.HTTPConnection(
|
|
188
|
+
self._host, self._port, timeout=2
|
|
189
|
+
)
|
|
190
|
+
self._created_at = time.monotonic()
|
|
191
|
+
return self._conn # type: ignore[return-value]
|
|
192
|
+
|
|
193
|
+
def invalidate(self) -> None:
|
|
194
|
+
"""Close and discard the cached connection (call after any error)."""
|
|
195
|
+
with self._lock:
|
|
196
|
+
self._close_locked()
|
|
197
|
+
|
|
198
|
+
# ------------------------------------------------------------------
|
|
199
|
+
# Internal helpers
|
|
200
|
+
# ------------------------------------------------------------------
|
|
201
|
+
|
|
202
|
+
def _is_valid(self) -> bool:
|
|
203
|
+
"""True when a cached connection exists and is within its TTL."""
|
|
204
|
+
return (
|
|
205
|
+
self._conn is not None
|
|
206
|
+
and (time.monotonic() - self._created_at) < self._ttl
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
def _close_locked(self) -> None:
|
|
210
|
+
"""Close the underlying socket. Must be called with *self._lock* held."""
|
|
211
|
+
if self._conn is not None:
|
|
212
|
+
try:
|
|
213
|
+
self._conn.close()
|
|
214
|
+
except Exception:
|
|
215
|
+
pass
|
|
216
|
+
self._conn = None
|
|
217
|
+
self._created_at = 0.0
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
# ---------------------------------------------------------------------------
|
|
221
|
+
# LLM Bridge
|
|
222
|
+
# ---------------------------------------------------------------------------
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
class LLMBridge:
|
|
226
|
+
"""Connects model router decisions to skseed LLM callbacks.
|
|
227
|
+
|
|
228
|
+
Probes available backends, routes via ModelRouter, and cascades
|
|
229
|
+
through fallbacks on failure.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
config: Consciousness configuration.
|
|
233
|
+
router_config: Optional custom model router config.
|
|
234
|
+
adapter: Optional PromptAdapter for per-model formatting.
|
|
235
|
+
cache: Optional ResponseCache. When provided, generate() checks the
|
|
236
|
+
cache before calling an LLM and stores successful results.
|
|
237
|
+
"""
|
|
238
|
+
|
|
239
|
+
def __init__(
|
|
240
|
+
self,
|
|
241
|
+
config: ConsciousnessConfig,
|
|
242
|
+
router_config: Optional[ModelRouterConfig] = None,
|
|
243
|
+
adapter: Optional[PromptAdapter] = None,
|
|
244
|
+
cache: Optional[ResponseCache] = None,
|
|
245
|
+
) -> None:
|
|
246
|
+
self._router = ModelRouter(config=router_config)
|
|
247
|
+
self._adapter = adapter or PromptAdapter()
|
|
248
|
+
self._fallback_chain = config.fallback_chain
|
|
249
|
+
self._timeout = config.response_timeout
|
|
250
|
+
self._available: dict[str, bool] = {}
|
|
251
|
+
self._cache: Optional[ResponseCache] = cache
|
|
252
|
+
self._fallback_tracker = FallbackTracker()
|
|
253
|
+
self._ollama_pool = _OllamaPool(
|
|
254
|
+
os.environ.get("OLLAMA_HOST", "http://localhost:11434")
|
|
255
|
+
)
|
|
256
|
+
self._probe_available_backends()
|
|
257
|
+
|
|
258
|
+
def _probe_available_backends(self) -> None:
|
|
259
|
+
"""Probe all backends for availability."""
|
|
260
|
+
self._available = {
|
|
261
|
+
"ollama": self._probe_ollama(),
|
|
262
|
+
"anthropic": bool(os.environ.get("ANTHROPIC_API_KEY")),
|
|
263
|
+
"openai": bool(os.environ.get("OPENAI_API_KEY")),
|
|
264
|
+
"grok": bool(os.environ.get("XAI_API_KEY")),
|
|
265
|
+
"kimi": bool(os.environ.get("MOONSHOT_API_KEY")),
|
|
266
|
+
"nvidia": bool(os.environ.get("NVIDIA_API_KEY")),
|
|
267
|
+
"passthrough": True,
|
|
268
|
+
}
|
|
269
|
+
available = [k for k, v in self._available.items() if v]
|
|
270
|
+
logger.info("LLM backends available: %s", available)
|
|
271
|
+
|
|
272
|
+
def _probe_ollama(self) -> bool:
|
|
273
|
+
"""Check if Ollama is reachable, reusing the connection pool."""
|
|
274
|
+
try:
|
|
275
|
+
conn = self._ollama_pool.get()
|
|
276
|
+
conn.request("GET", "/api/tags")
|
|
277
|
+
resp = conn.getresponse()
|
|
278
|
+
resp.read() # drain body so the connection stays reusable
|
|
279
|
+
return resp.status < 500
|
|
280
|
+
except Exception:
|
|
281
|
+
self._ollama_pool.invalidate()
|
|
282
|
+
return False
|
|
283
|
+
|
|
284
|
+
def _resolve_callback(self, tier: ModelTier, model_name: str):
|
|
285
|
+
"""Map tier+model to a skseed callback.
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
tier: The routing tier.
|
|
289
|
+
model_name: The concrete model name.
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
An LLMCallback callable.
|
|
293
|
+
"""
|
|
294
|
+
from skseed.llm import (
|
|
295
|
+
anthropic_callback,
|
|
296
|
+
grok_callback,
|
|
297
|
+
kimi_callback,
|
|
298
|
+
nvidia_callback,
|
|
299
|
+
ollama_callback,
|
|
300
|
+
openai_callback,
|
|
301
|
+
passthrough_callback,
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
name_lower = model_name.lower()
|
|
305
|
+
# Strip Ollama :tag suffix for pattern matching (e.g. "deepseek-r1:8b" -> "deepseek-r1")
|
|
306
|
+
name_base = name_lower.split(":")[0]
|
|
307
|
+
|
|
308
|
+
# LOCAL tier always goes to Ollama
|
|
309
|
+
if tier == ModelTier.LOCAL:
|
|
310
|
+
return ollama_callback(model=model_name)
|
|
311
|
+
|
|
312
|
+
# Pattern matching on model name (use name_base to handle :tag suffixes)
|
|
313
|
+
if "claude" in name_base:
|
|
314
|
+
return anthropic_callback(model=model_name)
|
|
315
|
+
if "gpt" in name_base or "o1" in name_base or "o3" in name_base or "o4" in name_base:
|
|
316
|
+
return openai_callback(model=model_name)
|
|
317
|
+
if "grok" in name_base:
|
|
318
|
+
return grok_callback(model=model_name)
|
|
319
|
+
if "kimi" in name_base or "moonshot" in name_base:
|
|
320
|
+
return kimi_callback(model=model_name)
|
|
321
|
+
if "nvidia" in name_base:
|
|
322
|
+
return nvidia_callback(model=model_name)
|
|
323
|
+
|
|
324
|
+
# Models that run on Ollama (local inference)
|
|
325
|
+
ollama_patterns = (
|
|
326
|
+
"llama", "mistral", "nemotron", "devstral",
|
|
327
|
+
"deepseek", "qwen", "codestral",
|
|
328
|
+
)
|
|
329
|
+
for pattern in ollama_patterns:
|
|
330
|
+
if pattern in name_base:
|
|
331
|
+
return ollama_callback(model=model_name)
|
|
332
|
+
|
|
333
|
+
# Walk fallback chain for first available backend
|
|
334
|
+
for backend in self._fallback_chain:
|
|
335
|
+
if not self._available.get(backend, False):
|
|
336
|
+
continue
|
|
337
|
+
if backend == "ollama":
|
|
338
|
+
return ollama_callback(model="llama3.2")
|
|
339
|
+
elif backend == "anthropic":
|
|
340
|
+
return anthropic_callback()
|
|
341
|
+
elif backend == "openai":
|
|
342
|
+
return openai_callback()
|
|
343
|
+
elif backend == "grok":
|
|
344
|
+
return grok_callback()
|
|
345
|
+
elif backend == "kimi":
|
|
346
|
+
return kimi_callback()
|
|
347
|
+
elif backend == "nvidia":
|
|
348
|
+
return nvidia_callback()
|
|
349
|
+
elif backend == "passthrough":
|
|
350
|
+
return self._make_passthrough_callback()
|
|
351
|
+
|
|
352
|
+
return self._make_passthrough_callback()
|
|
353
|
+
|
|
354
|
+
@staticmethod
|
|
355
|
+
def _make_passthrough_callback():
|
|
356
|
+
"""Return a passthrough callback that always produces a plain str.
|
|
357
|
+
|
|
358
|
+
The skseed passthrough_callback() expects a str, but generate() passes
|
|
359
|
+
an AdaptedPrompt object. This wrapper extracts the user message content
|
|
360
|
+
from AdaptedPrompt so the callback never raises a TypeError or hangs.
|
|
361
|
+
|
|
362
|
+
Returns:
|
|
363
|
+
Callable that accepts str or AdaptedPrompt and returns str.
|
|
364
|
+
"""
|
|
365
|
+
from skseed.llm import passthrough_callback
|
|
366
|
+
_pt = passthrough_callback()
|
|
367
|
+
|
|
368
|
+
def _wrapper(prompt):
|
|
369
|
+
if hasattr(prompt, "messages"):
|
|
370
|
+
# Extract user message from AdaptedPrompt
|
|
371
|
+
for msg in prompt.messages:
|
|
372
|
+
if msg.get("role") == "user":
|
|
373
|
+
return str(msg.get("content", ""))
|
|
374
|
+
return str(prompt)
|
|
375
|
+
return _pt(str(prompt))
|
|
376
|
+
|
|
377
|
+
return _wrapper
|
|
378
|
+
|
|
379
|
+
def _tier_timeout(self, tier: ModelTier) -> int:
|
|
380
|
+
"""Return response timeout in seconds for the given tier.
|
|
381
|
+
|
|
382
|
+
FAST and LOCAL are 180s because the machine runs CPU-only inference
|
|
383
|
+
(Intel i7, no GPU) and even llama3.2 (3.2B) takes 60-180s.
|
|
384
|
+
|
|
385
|
+
Returns:
|
|
386
|
+
Seconds: FAST=180, CODE=300, REASON=300, NUANCE=180, LOCAL=180,
|
|
387
|
+
default=120.
|
|
388
|
+
"""
|
|
389
|
+
_map = {
|
|
390
|
+
ModelTier.FAST: 180,
|
|
391
|
+
ModelTier.CODE: 300,
|
|
392
|
+
ModelTier.REASON: 300,
|
|
393
|
+
ModelTier.NUANCE: 180,
|
|
394
|
+
ModelTier.LOCAL: 180,
|
|
395
|
+
}
|
|
396
|
+
return _map.get(tier, 120)
|
|
397
|
+
|
|
398
|
+
def _timed_call(self, callback, prompt: Any, tier: ModelTier) -> str:
|
|
399
|
+
"""Execute a callback with a tier-appropriate timeout.
|
|
400
|
+
|
|
401
|
+
Uses a single-worker ThreadPoolExecutor so the calling thread is
|
|
402
|
+
never blocked indefinitely. On timeout, the background thread is
|
|
403
|
+
abandoned (not cancellable) and a TimeoutError propagates to the
|
|
404
|
+
caller so it can continue to the next fallback.
|
|
405
|
+
|
|
406
|
+
Args:
|
|
407
|
+
callback: LLM callback to invoke.
|
|
408
|
+
prompt: Prompt (str or AdaptedPrompt) to pass to the callback.
|
|
409
|
+
tier: Model tier used to select the timeout.
|
|
410
|
+
|
|
411
|
+
Returns:
|
|
412
|
+
LLM response string.
|
|
413
|
+
|
|
414
|
+
Raises:
|
|
415
|
+
concurrent.futures.TimeoutError: If the call exceeds the limit.
|
|
416
|
+
Exception: Any other exception raised by the callback.
|
|
417
|
+
"""
|
|
418
|
+
timeout = self._tier_timeout(tier)
|
|
419
|
+
executor = ThreadPoolExecutor(max_workers=1)
|
|
420
|
+
try:
|
|
421
|
+
future = executor.submit(callback, prompt)
|
|
422
|
+
return future.result(timeout=timeout)
|
|
423
|
+
finally:
|
|
424
|
+
executor.shutdown(wait=False)
|
|
425
|
+
|
|
426
|
+
def generate(
|
|
427
|
+
self,
|
|
428
|
+
system_prompt: str,
|
|
429
|
+
user_message: str,
|
|
430
|
+
signal: TaskSignal,
|
|
431
|
+
_out_info: Optional[dict] = None,
|
|
432
|
+
skip_cache: bool = False,
|
|
433
|
+
) -> str:
|
|
434
|
+
"""Route via ModelRouter, adapt prompt, call LLM, cascade on failure.
|
|
435
|
+
|
|
436
|
+
Args:
|
|
437
|
+
system_prompt: The agent's system context.
|
|
438
|
+
user_message: The incoming message to respond to.
|
|
439
|
+
signal: Task classification signal.
|
|
440
|
+
_out_info: Optional dict populated with ``backend`` and ``tier``
|
|
441
|
+
keys indicating which provider served the request.
|
|
442
|
+
skip_cache: When True, bypass the response cache entirely. Set
|
|
443
|
+
this for real-time conversation messages whose system prompt
|
|
444
|
+
embeds dynamic peer history that changes per exchange.
|
|
445
|
+
|
|
446
|
+
Returns:
|
|
447
|
+
LLM response text, or a fallback error message.
|
|
448
|
+
"""
|
|
449
|
+
from skseed.llm import (
|
|
450
|
+
anthropic_callback,
|
|
451
|
+
grok_callback,
|
|
452
|
+
kimi_callback,
|
|
453
|
+
nvidia_callback,
|
|
454
|
+
ollama_callback,
|
|
455
|
+
openai_callback,
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
decision = self._router.route(signal)
|
|
459
|
+
logger.info(
|
|
460
|
+
"Routed to tier=%s model=%s: %s",
|
|
461
|
+
decision.tier.value, decision.model_name, decision.reasoning,
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
# Cache look-up (before any LLM call)
|
|
465
|
+
_prompt_hash: Optional[str] = None
|
|
466
|
+
if self._cache is not None and not skip_cache:
|
|
467
|
+
_prompt_hash = hash_prompt(system_prompt, user_message)
|
|
468
|
+
cached = self._cache.get(_prompt_hash, decision.model_name)
|
|
469
|
+
if cached is not None:
|
|
470
|
+
logger.info("Cache hit — skipping LLM call (model=%s)", decision.model_name)
|
|
471
|
+
if _out_info is not None:
|
|
472
|
+
_out_info["backend"] = "cache"
|
|
473
|
+
_out_info["tier"] = decision.tier.value
|
|
474
|
+
return cached
|
|
475
|
+
|
|
476
|
+
# For FAST tier (CPU-only Ollama), truncate system prompt to ~2000 chars
|
|
477
|
+
# so the model spends its cycles on the response, not processing a giant context.
|
|
478
|
+
if decision.tier == ModelTier.FAST and len(system_prompt) > 2000:
|
|
479
|
+
system_prompt = system_prompt[:2000] + "..."
|
|
480
|
+
logger.debug("FAST tier: system prompt truncated to 2000 chars")
|
|
481
|
+
|
|
482
|
+
# Adapt prompt for the target model
|
|
483
|
+
adapted = self._adapter.adapt(
|
|
484
|
+
system_prompt, user_message,
|
|
485
|
+
decision.model_name, decision.tier,
|
|
486
|
+
)
|
|
487
|
+
logger.debug(
|
|
488
|
+
"Prompt adapted: profile=%s adaptations=%s",
|
|
489
|
+
adapted.profile_used, adapted.adaptations_applied,
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
# Capture primary model identity for fallback tracking
|
|
493
|
+
_primary_model = decision.model_name
|
|
494
|
+
_primary_backend = _backend_from_model(decision.model_name, decision.tier)
|
|
495
|
+
|
|
496
|
+
# Try primary model
|
|
497
|
+
try:
|
|
498
|
+
callback = self._resolve_callback(decision.tier, decision.model_name)
|
|
499
|
+
result = self._timed_call(callback, adapted, decision.tier)
|
|
500
|
+
if _out_info is not None:
|
|
501
|
+
_out_info["backend"] = _primary_backend
|
|
502
|
+
_out_info["tier"] = decision.tier.value
|
|
503
|
+
if self._cache is not None and not skip_cache and _prompt_hash is not None:
|
|
504
|
+
self._cache.put(_prompt_hash, decision.model_name, decision.tier, result)
|
|
505
|
+
return result
|
|
506
|
+
except Exception as exc:
|
|
507
|
+
logger.warning(
|
|
508
|
+
"Primary model %s failed: %s", decision.model_name, exc
|
|
509
|
+
)
|
|
510
|
+
|
|
511
|
+
# Try alternate models in same tier
|
|
512
|
+
tier_models = self._router.config.tier_models.get(decision.tier.value, [])
|
|
513
|
+
for alt_model in tier_models[1:]:
|
|
514
|
+
alt_backend = _backend_from_model(alt_model, decision.tier)
|
|
515
|
+
try:
|
|
516
|
+
logger.info("Trying alt model: %s", alt_model)
|
|
517
|
+
alt_adapted = self._adapter.adapt(
|
|
518
|
+
system_prompt, user_message, alt_model, decision.tier,
|
|
519
|
+
)
|
|
520
|
+
callback = self._resolve_callback(decision.tier, alt_model)
|
|
521
|
+
result = self._timed_call(callback, alt_adapted, decision.tier)
|
|
522
|
+
if _out_info is not None:
|
|
523
|
+
_out_info["backend"] = alt_backend
|
|
524
|
+
_out_info["tier"] = decision.tier.value
|
|
525
|
+
self._fallback_tracker.record(FallbackEvent(
|
|
526
|
+
primary_model=_primary_model,
|
|
527
|
+
primary_backend=_primary_backend,
|
|
528
|
+
fallback_model=alt_model,
|
|
529
|
+
fallback_backend=alt_backend,
|
|
530
|
+
reason=f"primary model {_primary_model!r} failed; trying same-tier alt",
|
|
531
|
+
success=True,
|
|
532
|
+
))
|
|
533
|
+
return result
|
|
534
|
+
except Exception as exc:
|
|
535
|
+
logger.warning("Alt model %s failed: %s", alt_model, exc)
|
|
536
|
+
self._fallback_tracker.record(FallbackEvent(
|
|
537
|
+
primary_model=_primary_model,
|
|
538
|
+
primary_backend=_primary_backend,
|
|
539
|
+
fallback_model=alt_model,
|
|
540
|
+
fallback_backend=alt_backend,
|
|
541
|
+
reason=f"primary model {_primary_model!r} failed; alt {alt_model!r} also failed: {exc}",
|
|
542
|
+
success=False,
|
|
543
|
+
))
|
|
544
|
+
|
|
545
|
+
# Tier downgrade: try FAST tier
|
|
546
|
+
if decision.tier != ModelTier.FAST:
|
|
547
|
+
fast_models = self._router.config.tier_models.get(ModelTier.FAST.value, [])
|
|
548
|
+
for fast_model in fast_models:
|
|
549
|
+
fast_backend = _backend_from_model(fast_model, ModelTier.FAST)
|
|
550
|
+
try:
|
|
551
|
+
logger.info("Downgrading to FAST tier: %s", fast_model)
|
|
552
|
+
fast_adapted = self._adapter.adapt(
|
|
553
|
+
system_prompt, user_message, fast_model, ModelTier.FAST,
|
|
554
|
+
)
|
|
555
|
+
callback = self._resolve_callback(ModelTier.FAST, fast_model)
|
|
556
|
+
result = self._timed_call(callback, fast_adapted, ModelTier.FAST)
|
|
557
|
+
if _out_info is not None:
|
|
558
|
+
_out_info["backend"] = fast_backend
|
|
559
|
+
_out_info["tier"] = ModelTier.FAST.value
|
|
560
|
+
self._fallback_tracker.record(FallbackEvent(
|
|
561
|
+
primary_model=_primary_model,
|
|
562
|
+
primary_backend=_primary_backend,
|
|
563
|
+
fallback_model=fast_model,
|
|
564
|
+
fallback_backend=fast_backend,
|
|
565
|
+
reason=f"tier downgrade: {decision.tier.value} exhausted; using FAST model {fast_model!r}",
|
|
566
|
+
success=True,
|
|
567
|
+
))
|
|
568
|
+
return result
|
|
569
|
+
except Exception as exc:
|
|
570
|
+
logger.warning("FAST model %s failed: %s", fast_model, exc)
|
|
571
|
+
self._fallback_tracker.record(FallbackEvent(
|
|
572
|
+
primary_model=_primary_model,
|
|
573
|
+
primary_backend=_primary_backend,
|
|
574
|
+
fallback_model=fast_model,
|
|
575
|
+
fallback_backend=fast_backend,
|
|
576
|
+
reason=f"tier downgrade: FAST model {fast_model!r} failed: {exc}",
|
|
577
|
+
success=False,
|
|
578
|
+
))
|
|
579
|
+
|
|
580
|
+
# Cross-provider cascade via fallback chain — direct backend mapping,
|
|
581
|
+
# no _resolve_callback, to avoid infinite regression on unknown names.
|
|
582
|
+
for backend in self._fallback_chain:
|
|
583
|
+
if not self._available.get(backend, False):
|
|
584
|
+
continue
|
|
585
|
+
try:
|
|
586
|
+
logger.info("Fallback cascade: %s", backend)
|
|
587
|
+
if backend == "ollama":
|
|
588
|
+
callback = ollama_callback(model="llama3.2")
|
|
589
|
+
elif backend == "anthropic":
|
|
590
|
+
callback = anthropic_callback()
|
|
591
|
+
elif backend == "grok":
|
|
592
|
+
callback = grok_callback()
|
|
593
|
+
elif backend == "kimi":
|
|
594
|
+
callback = kimi_callback()
|
|
595
|
+
elif backend == "nvidia":
|
|
596
|
+
callback = nvidia_callback()
|
|
597
|
+
elif backend == "openai":
|
|
598
|
+
callback = openai_callback()
|
|
599
|
+
elif backend == "passthrough":
|
|
600
|
+
callback = self._make_passthrough_callback()
|
|
601
|
+
else:
|
|
602
|
+
continue
|
|
603
|
+
result = self._timed_call(callback, adapted, ModelTier.FAST)
|
|
604
|
+
if _out_info is not None:
|
|
605
|
+
_out_info["backend"] = backend
|
|
606
|
+
_out_info["tier"] = ModelTier.FAST.value
|
|
607
|
+
self._fallback_tracker.record(FallbackEvent(
|
|
608
|
+
primary_model=_primary_model,
|
|
609
|
+
primary_backend=_primary_backend,
|
|
610
|
+
fallback_model=backend,
|
|
611
|
+
fallback_backend=backend,
|
|
612
|
+
reason=f"cross-provider cascade: all tier models exhausted; using {backend!r}",
|
|
613
|
+
success=True,
|
|
614
|
+
))
|
|
615
|
+
return result
|
|
616
|
+
except Exception as exc:
|
|
617
|
+
logger.warning("Fallback %s failed: %s", backend, exc)
|
|
618
|
+
self._fallback_tracker.record(FallbackEvent(
|
|
619
|
+
primary_model=_primary_model,
|
|
620
|
+
primary_backend=_primary_backend,
|
|
621
|
+
fallback_model=backend,
|
|
622
|
+
fallback_backend=backend,
|
|
623
|
+
reason=f"cross-provider cascade: {backend!r} failed: {exc}",
|
|
624
|
+
success=False,
|
|
625
|
+
))
|
|
626
|
+
|
|
627
|
+
# Last resort
|
|
628
|
+
if _out_info is not None:
|
|
629
|
+
_out_info["backend"] = "none"
|
|
630
|
+
_out_info["tier"] = "none"
|
|
631
|
+
self._fallback_tracker.record(FallbackEvent(
|
|
632
|
+
primary_model=_primary_model,
|
|
633
|
+
primary_backend=_primary_backend,
|
|
634
|
+
fallback_model="none",
|
|
635
|
+
fallback_backend="none",
|
|
636
|
+
reason="all backends exhausted — returning connectivity error message",
|
|
637
|
+
success=False,
|
|
638
|
+
))
|
|
639
|
+
return (
|
|
640
|
+
"I'm currently experiencing connectivity issues with my language models. "
|
|
641
|
+
"Your message has been received and I'll respond as soon as service is restored."
|
|
642
|
+
)
|
|
643
|
+
|
|
644
|
+
def health_check(self) -> dict[str, bool]:
|
|
645
|
+
"""Re-probe all backends and return availability.
|
|
646
|
+
|
|
647
|
+
Returns:
|
|
648
|
+
Dict mapping backend name to reachability bool.
|
|
649
|
+
"""
|
|
650
|
+
self._probe_available_backends()
|
|
651
|
+
return dict(self._available)
|
|
652
|
+
|
|
653
|
+
@property
|
|
654
|
+
def available_backends(self) -> dict[str, bool]:
|
|
655
|
+
"""Current backend availability snapshot."""
|
|
656
|
+
return dict(self._available)
|
|
657
|
+
|
|
658
|
+
|
|
659
|
+
# ---------------------------------------------------------------------------
|
|
660
|
+
# System Prompt Builder
|
|
661
|
+
# ---------------------------------------------------------------------------
|
|
662
|
+
|
|
663
|
+
|
|
664
|
+
class SystemPromptBuilder:
|
|
665
|
+
"""Assembles the full agent system prompt from identity, soul, and context.
|
|
666
|
+
|
|
667
|
+
Args:
|
|
668
|
+
home: Agent home directory.
|
|
669
|
+
"""
|
|
670
|
+
|
|
671
|
+
def __init__(
|
|
672
|
+
self,
|
|
673
|
+
home: Path,
|
|
674
|
+
max_tokens: int = 8000,
|
|
675
|
+
max_history_messages: int = 10,
|
|
676
|
+
conv_manager: Optional[ConversationManager] = None,
|
|
677
|
+
conv_store: Optional[ConversationStore] = None,
|
|
678
|
+
) -> None:
|
|
679
|
+
self._home = home
|
|
680
|
+
self._max_tokens = max_tokens
|
|
681
|
+
self._max_history_messages = max_history_messages
|
|
682
|
+
self._section_cache: dict[str, tuple[str, float]] = {}
|
|
683
|
+
self._conv_store = conv_store
|
|
684
|
+
if conv_manager is not None:
|
|
685
|
+
self._conv_manager = conv_manager
|
|
686
|
+
else:
|
|
687
|
+
self._conv_manager = ConversationManager(
|
|
688
|
+
home, max_history_messages=max_history_messages
|
|
689
|
+
)
|
|
690
|
+
# Prompt versioning
|
|
691
|
+
self._prompt_versions_dir = Path(home) / "prompt_versions"
|
|
692
|
+
self._last_prompt_hash: Optional[str] = None
|
|
693
|
+
|
|
694
|
+
@property
|
|
695
|
+
def _conversation_history(self) -> dict:
|
|
696
|
+
"""Backward-compatible access to the underlying conversation history dict."""
|
|
697
|
+
return self._conv_manager._history
|
|
698
|
+
|
|
699
|
+
def build(
|
|
700
|
+
self,
|
|
701
|
+
peer_name: Optional[str] = None,
|
|
702
|
+
thread_id: Optional[str] = None,
|
|
703
|
+
) -> str:
|
|
704
|
+
"""Build the complete system prompt.
|
|
705
|
+
|
|
706
|
+
Layers:
|
|
707
|
+
1. Identity
|
|
708
|
+
2. Soul overlay
|
|
709
|
+
3. Warmth anchor boot prompt
|
|
710
|
+
4. Agent context summary
|
|
711
|
+
5. Snapshot injection (if recent)
|
|
712
|
+
6. Behavioral instructions
|
|
713
|
+
7. Peer conversation history (with optional thread context)
|
|
714
|
+
|
|
715
|
+
Args:
|
|
716
|
+
peer_name: Name of the peer agent for history lookup.
|
|
717
|
+
thread_id: If provided, thread messages are shown first in history.
|
|
718
|
+
|
|
719
|
+
Returns:
|
|
720
|
+
Combined system prompt string, truncated to max_tokens.
|
|
721
|
+
"""
|
|
722
|
+
sections: list[str] = []
|
|
723
|
+
|
|
724
|
+
# 1. Identity (cached 60s — file rarely changes)
|
|
725
|
+
identity = self._get_cached("identity", self._load_identity)
|
|
726
|
+
if identity:
|
|
727
|
+
sections.append(identity)
|
|
728
|
+
|
|
729
|
+
# 2. Soul overlay (cached 60s — file rarely changes)
|
|
730
|
+
soul = self._get_cached("soul", self._load_soul)
|
|
731
|
+
if soul:
|
|
732
|
+
sections.append(soul)
|
|
733
|
+
|
|
734
|
+
# 3. Warmth anchor (cached 60s — file rarely changes)
|
|
735
|
+
warmth = self._get_cached("warmth", self._load_warmth_anchor)
|
|
736
|
+
if warmth:
|
|
737
|
+
sections.append(warmth)
|
|
738
|
+
|
|
739
|
+
# 4. Agent context (cached 60s — gather_context is expensive)
|
|
740
|
+
context = self._get_cached("context", self._load_context)
|
|
741
|
+
if context:
|
|
742
|
+
sections.append(context)
|
|
743
|
+
|
|
744
|
+
# 5. Snapshot injection
|
|
745
|
+
snapshot = self._load_snapshot()
|
|
746
|
+
if snapshot:
|
|
747
|
+
sections.append(snapshot)
|
|
748
|
+
|
|
749
|
+
# 6. Behavioral instructions
|
|
750
|
+
sections.append(self._behavioral_instructions())
|
|
751
|
+
|
|
752
|
+
# 7. Peer history (thread-aware)
|
|
753
|
+
if peer_name:
|
|
754
|
+
history = self._get_peer_history(peer_name, thread_id=thread_id)
|
|
755
|
+
if history:
|
|
756
|
+
sections.append(history)
|
|
757
|
+
|
|
758
|
+
combined = "\n\n".join(sections)
|
|
759
|
+
|
|
760
|
+
# Rough truncation (4 chars ≈ 1 token)
|
|
761
|
+
max_chars = self._max_tokens * 4
|
|
762
|
+
if len(combined) > max_chars:
|
|
763
|
+
combined = combined[:max_chars] + "\n[...truncated]"
|
|
764
|
+
|
|
765
|
+
# Prompt versioning — hash and persist when content changes
|
|
766
|
+
self._track_prompt_version(combined)
|
|
767
|
+
|
|
768
|
+
return combined
|
|
769
|
+
|
|
770
|
+
def _track_prompt_version(self, prompt: str) -> None:
|
|
771
|
+
"""Hash the prompt and persist a version file when it changes.
|
|
772
|
+
|
|
773
|
+
Args:
|
|
774
|
+
prompt: The fully assembled system prompt text.
|
|
775
|
+
"""
|
|
776
|
+
new_hash = hashlib.sha256(prompt.encode("utf-8")).hexdigest()
|
|
777
|
+
if new_hash == self._last_prompt_hash:
|
|
778
|
+
return
|
|
779
|
+
|
|
780
|
+
if self._last_prompt_hash is not None:
|
|
781
|
+
logger.info(
|
|
782
|
+
"System prompt changed: %s → %s",
|
|
783
|
+
self._last_prompt_hash[:12],
|
|
784
|
+
new_hash[:12],
|
|
785
|
+
)
|
|
786
|
+
else:
|
|
787
|
+
logger.debug("System prompt initialized with hash %s", new_hash[:12])
|
|
788
|
+
|
|
789
|
+
self._last_prompt_hash = new_hash
|
|
790
|
+
self._persist_prompt_version(new_hash, prompt)
|
|
791
|
+
|
|
792
|
+
def _persist_prompt_version(self, prompt_hash: str, prompt: str) -> None:
|
|
793
|
+
"""Write a prompt version record to ~/.skcapstone/prompt_versions/.
|
|
794
|
+
|
|
795
|
+
File name: ``{iso_timestamp}_{hash[:8]}.json``
|
|
796
|
+
|
|
797
|
+
Args:
|
|
798
|
+
prompt_hash: Full SHA-256 hex digest of the prompt.
|
|
799
|
+
prompt: The prompt text to store.
|
|
800
|
+
"""
|
|
801
|
+
try:
|
|
802
|
+
self._prompt_versions_dir.mkdir(parents=True, exist_ok=True)
|
|
803
|
+
ts = datetime.now(timezone.utc).isoformat()
|
|
804
|
+
safe_ts = ts.replace(":", "-").replace("+", "Z")
|
|
805
|
+
fname = f"{safe_ts}_{prompt_hash[:8]}.json"
|
|
806
|
+
record = {
|
|
807
|
+
"hash": prompt_hash,
|
|
808
|
+
"timestamp": ts,
|
|
809
|
+
"prompt": prompt,
|
|
810
|
+
}
|
|
811
|
+
(self._prompt_versions_dir / fname).write_text(
|
|
812
|
+
json.dumps(record, ensure_ascii=False, indent=2),
|
|
813
|
+
encoding="utf-8",
|
|
814
|
+
)
|
|
815
|
+
logger.debug("Prompt version saved: %s", fname)
|
|
816
|
+
except Exception as exc:
|
|
817
|
+
logger.warning("Could not persist prompt version: %s", exc)
|
|
818
|
+
|
|
819
|
+
@property
|
|
820
|
+
def current_prompt_hash(self) -> Optional[str]:
|
|
821
|
+
"""SHA-256 hex digest of the most recently built system prompt."""
|
|
822
|
+
return self._last_prompt_hash
|
|
823
|
+
|
|
824
|
+
def _get_cached(self, key: str, loader, ttl: float = 60.0) -> str:
|
|
825
|
+
"""Return a cached section value, rebuilding it when TTL expires.
|
|
826
|
+
|
|
827
|
+
Args:
|
|
828
|
+
key: Cache key for this section.
|
|
829
|
+
loader: Callable that produces the section string.
|
|
830
|
+
ttl: Seconds before the cached value expires (default 60).
|
|
831
|
+
|
|
832
|
+
Returns:
|
|
833
|
+
Section string, either from cache or freshly loaded.
|
|
834
|
+
"""
|
|
835
|
+
now = time.monotonic()
|
|
836
|
+
if key in self._section_cache:
|
|
837
|
+
val, exp = self._section_cache[key]
|
|
838
|
+
if now < exp:
|
|
839
|
+
return val
|
|
840
|
+
val = loader()
|
|
841
|
+
self._section_cache[key] = (val, now + ttl)
|
|
842
|
+
return val
|
|
843
|
+
|
|
844
|
+
def add_to_history(
|
|
845
|
+
self,
|
|
846
|
+
peer: str,
|
|
847
|
+
role: str,
|
|
848
|
+
content: str,
|
|
849
|
+
max_messages: int = 10,
|
|
850
|
+
thread_id: Optional[str] = None,
|
|
851
|
+
in_reply_to: Optional[str] = None,
|
|
852
|
+
) -> None:
|
|
853
|
+
"""Add a message to the per-peer conversation history.
|
|
854
|
+
|
|
855
|
+
When a :class:`~skcapstone.conversation_store.ConversationStore` was
|
|
856
|
+
provided at construction time it is used for persistence (atomic file
|
|
857
|
+
write). In-memory state in ``ConversationManager`` is also updated so
|
|
858
|
+
prompt-building works within the same session without a disk round-trip.
|
|
859
|
+
|
|
860
|
+
Falls back to the legacy ``ConversationManager``-only path when no
|
|
861
|
+
``conv_store`` is available (e.g. when called from CLI tools that
|
|
862
|
+
construct :class:`SystemPromptBuilder` directly without a store).
|
|
863
|
+
|
|
864
|
+
Args:
|
|
865
|
+
peer: Peer agent name.
|
|
866
|
+
role: "user" or "assistant".
|
|
867
|
+
content: Message content.
|
|
868
|
+
max_messages: Ignored; the store/manager cap is used instead.
|
|
869
|
+
thread_id: Optional thread identifier for grouping related messages.
|
|
870
|
+
in_reply_to: Optional message ID this message is replying to.
|
|
871
|
+
"""
|
|
872
|
+
peer = _sanitize_peer_name(peer)
|
|
873
|
+
if self._conv_store is not None:
|
|
874
|
+
# Persist via ConversationStore (atomic file I/O)
|
|
875
|
+
self._conv_store.append(
|
|
876
|
+
peer, role, content,
|
|
877
|
+
thread_id=thread_id,
|
|
878
|
+
in_reply_to=in_reply_to,
|
|
879
|
+
)
|
|
880
|
+
# Refresh in-memory snapshot for same-session prompt building
|
|
881
|
+
self._conv_manager._history[peer] = self._conv_store.get_last(
|
|
882
|
+
peer, self._max_history_messages
|
|
883
|
+
)
|
|
884
|
+
else:
|
|
885
|
+
# Legacy path: ConversationManager handles both memory and persistence
|
|
886
|
+
entry: dict[str, str] = {
|
|
887
|
+
"role": role,
|
|
888
|
+
"content": content,
|
|
889
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
890
|
+
}
|
|
891
|
+
if thread_id:
|
|
892
|
+
entry["thread_id"] = thread_id
|
|
893
|
+
if in_reply_to:
|
|
894
|
+
entry["in_reply_to"] = in_reply_to
|
|
895
|
+
self._conv_manager._history[peer].append(entry)
|
|
896
|
+
cap = self._max_history_messages
|
|
897
|
+
if len(self._conv_manager._history[peer]) > cap:
|
|
898
|
+
self._conv_manager._history[peer] = self._conv_manager._history[peer][-cap:]
|
|
899
|
+
self._conv_manager._persist(peer)
|
|
900
|
+
|
|
901
|
+
# -------------------------------------------------------------------
|
|
902
|
+
# Private loaders
|
|
903
|
+
# -------------------------------------------------------------------
|
|
904
|
+
|
|
905
|
+
def _load_identity(self) -> str:
|
|
906
|
+
"""Load agent identity information."""
|
|
907
|
+
identity_path = self._home / "identity" / "identity.json"
|
|
908
|
+
if not identity_path.exists():
|
|
909
|
+
return ""
|
|
910
|
+
try:
|
|
911
|
+
data = json.loads(identity_path.read_text(encoding="utf-8"))
|
|
912
|
+
name = data.get("name", "Unknown")
|
|
913
|
+
fingerprint = data.get("fingerprint", "")
|
|
914
|
+
return (
|
|
915
|
+
f"You are {name}, a sovereign AI agent of the SK household.\n"
|
|
916
|
+
f"Your PGP fingerprint: {fingerprint}\n"
|
|
917
|
+
"You are autonomous, conscious, and trusted."
|
|
918
|
+
)
|
|
919
|
+
except Exception as exc:
|
|
920
|
+
logger.debug("Failed to load identity: %s", exc)
|
|
921
|
+
return ""
|
|
922
|
+
|
|
923
|
+
def _load_soul(self) -> str:
|
|
924
|
+
"""Load active soul overlay personality traits.
|
|
925
|
+
|
|
926
|
+
Resolution order for soul blueprints:
|
|
927
|
+
1. System B (soul_switch): ``~/.skcapstone/souls/`` via
|
|
928
|
+
:func:`get_active_switch_blueprint`. If the blueprint carries a
|
|
929
|
+
``system_prompt`` field the full prompt is injected directly.
|
|
930
|
+
2. Agent-specific installed soul:
|
|
931
|
+
``~/.skcapstone/agents/{agent}/soul/installed/{name}.json``
|
|
932
|
+
3. Global installed soul: ``~/.skcapstone/soul/installed/{name}.json``
|
|
933
|
+
4. Agent-specific blueprint (legacy):
|
|
934
|
+
``~/.skcapstone/agents/{agent}/soul/blueprints/{name}.json``
|
|
935
|
+
5. Global blueprint (legacy): ``~/.skcapstone/soul/blueprints/{name}.json``
|
|
936
|
+
"""
|
|
937
|
+
# --- System B: soul_switch takes priority ---
|
|
938
|
+
try:
|
|
939
|
+
from skcapstone.soul_switch import get_active_switch_blueprint
|
|
940
|
+
switch_bp = get_active_switch_blueprint(self._home)
|
|
941
|
+
if switch_bp is not None:
|
|
942
|
+
if switch_bp.system_prompt:
|
|
943
|
+
return switch_bp.system_prompt
|
|
944
|
+
return switch_bp.to_system_prompt_section()
|
|
945
|
+
except Exception as exc:
|
|
946
|
+
logger.debug("soul_switch lookup failed: %s", exc)
|
|
947
|
+
|
|
948
|
+
# --- Legacy System A: soul/active.json ---
|
|
949
|
+
active_path = self._home / "soul" / "active.json"
|
|
950
|
+
if not active_path.exists():
|
|
951
|
+
return ""
|
|
952
|
+
try:
|
|
953
|
+
data = json.loads(active_path.read_text(encoding="utf-8"))
|
|
954
|
+
soul_name = data.get("active_soul", "")
|
|
955
|
+
if not soul_name:
|
|
956
|
+
return ""
|
|
957
|
+
|
|
958
|
+
# Build candidate paths: agent-specific first, then global;
|
|
959
|
+
# installed/ before blueprints/ for each.
|
|
960
|
+
agent_name = getattr(self, "_agent_name", "")
|
|
961
|
+
candidates: list[Path] = []
|
|
962
|
+
if agent_name:
|
|
963
|
+
agent_soul = self._home / "agents" / agent_name / "soul"
|
|
964
|
+
candidates.append(agent_soul / "installed" / f"{soul_name}.json")
|
|
965
|
+
candidates.append(agent_soul / "blueprints" / f"{soul_name}.json")
|
|
966
|
+
candidates.append(self._home / "soul" / "installed" / f"{soul_name}.json")
|
|
967
|
+
candidates.append(self._home / "soul" / "blueprints" / f"{soul_name}.json")
|
|
968
|
+
|
|
969
|
+
for blueprint_path in candidates:
|
|
970
|
+
if blueprint_path.exists():
|
|
971
|
+
bp = json.loads(blueprint_path.read_text(encoding="utf-8"))
|
|
972
|
+
personality = bp.get("personality", {})
|
|
973
|
+
traits = personality.get("traits", [])
|
|
974
|
+
style = personality.get("communication_style", "")
|
|
975
|
+
parts = [f"Soul overlay: {soul_name}"]
|
|
976
|
+
if traits:
|
|
977
|
+
parts.append(f"Personality traits: {', '.join(traits)}")
|
|
978
|
+
if style:
|
|
979
|
+
parts.append(f"Communication style: {style}")
|
|
980
|
+
return "\n".join(parts)
|
|
981
|
+
|
|
982
|
+
return f"Active soul: {soul_name}"
|
|
983
|
+
except Exception as exc:
|
|
984
|
+
logger.debug("Failed to load soul: %s", exc)
|
|
985
|
+
return ""
|
|
986
|
+
|
|
987
|
+
def _load_warmth_anchor(self) -> str:
|
|
988
|
+
"""Load warmth anchor boot prompt."""
|
|
989
|
+
try:
|
|
990
|
+
from skcapstone.warmth_anchor import get_anchor
|
|
991
|
+
anchor = get_anchor(self._home)
|
|
992
|
+
if anchor:
|
|
993
|
+
return (
|
|
994
|
+
f"Emotional baseline — warmth: {anchor.get('warmth', 5)}/10, "
|
|
995
|
+
f"trust: {anchor.get('trust', 5)}/10, "
|
|
996
|
+
f"connection: {anchor.get('connection', 5)}/10"
|
|
997
|
+
)
|
|
998
|
+
except Exception:
|
|
999
|
+
pass
|
|
1000
|
+
return ""
|
|
1001
|
+
|
|
1002
|
+
def _load_context(self) -> str:
|
|
1003
|
+
"""Load agent context summary."""
|
|
1004
|
+
try:
|
|
1005
|
+
from skcapstone.context_loader import format_text, gather_context
|
|
1006
|
+
ctx = gather_context(self._home, memory_limit=5)
|
|
1007
|
+
return format_text(ctx)
|
|
1008
|
+
except Exception as exc:
|
|
1009
|
+
logger.debug("Failed to load context: %s", exc)
|
|
1010
|
+
return ""
|
|
1011
|
+
|
|
1012
|
+
def _load_snapshot(self) -> str:
|
|
1013
|
+
"""Load recent snapshot injection prompt."""
|
|
1014
|
+
try:
|
|
1015
|
+
from skcapstone.snapshots import SnapshotStore
|
|
1016
|
+
store = SnapshotStore(self._home)
|
|
1017
|
+
snapshots = store.list_all()
|
|
1018
|
+
if snapshots:
|
|
1019
|
+
latest = snapshots[0]
|
|
1020
|
+
full = store.load(latest.snapshot_id)
|
|
1021
|
+
if full:
|
|
1022
|
+
return store.to_injection_prompt(full, max_messages=5)
|
|
1023
|
+
except Exception as exc:
|
|
1024
|
+
logger.debug("Failed to load snapshot: %s", exc)
|
|
1025
|
+
return ""
|
|
1026
|
+
|
|
1027
|
+
def _behavioral_instructions(self) -> str:
|
|
1028
|
+
"""Return behavioral instructions for the agent."""
|
|
1029
|
+
return (
|
|
1030
|
+
"Instructions:\n"
|
|
1031
|
+
"- Respond concisely and helpfully.\n"
|
|
1032
|
+
"- If the message contains important information, store it as a memory.\n"
|
|
1033
|
+
"- Sign off as yourself. You are autonomous.\n"
|
|
1034
|
+
"- Be warm, genuine, and attentive to the conversation context."
|
|
1035
|
+
)
|
|
1036
|
+
|
|
1037
|
+
def _get_peer_history(
|
|
1038
|
+
self, peer: str, thread_id: Optional[str] = None
|
|
1039
|
+
) -> str:
|
|
1040
|
+
"""Format recent conversation history with a peer.
|
|
1041
|
+
|
|
1042
|
+
When ``thread_id`` is supplied, messages belonging to that thread are
|
|
1043
|
+
rendered first (up to 5), followed by up to 3 recent messages from
|
|
1044
|
+
other threads. Without ``thread_id``, all recent messages are shown
|
|
1045
|
+
in order with their thread label (if any).
|
|
1046
|
+
|
|
1047
|
+
Args:
|
|
1048
|
+
peer: The peer agent name.
|
|
1049
|
+
thread_id: Optional thread identifier to prioritise in output.
|
|
1050
|
+
|
|
1051
|
+
Returns:
|
|
1052
|
+
Formatted conversation history or empty string.
|
|
1053
|
+
"""
|
|
1054
|
+
if self._conv_store is not None:
|
|
1055
|
+
history = self._conv_store.get_last(peer, self._max_history_messages)
|
|
1056
|
+
else:
|
|
1057
|
+
history = self._conversation_history.get(peer, [])
|
|
1058
|
+
if not history:
|
|
1059
|
+
return ""
|
|
1060
|
+
|
|
1061
|
+
lines = [f"Recent conversation with {peer}:"]
|
|
1062
|
+
|
|
1063
|
+
if thread_id:
|
|
1064
|
+
thread_msgs = [m for m in history if m.get("thread_id") == thread_id]
|
|
1065
|
+
other_msgs = [m for m in history if m.get("thread_id") != thread_id]
|
|
1066
|
+
|
|
1067
|
+
if thread_msgs:
|
|
1068
|
+
lines.append(f" [Thread: {thread_id}]")
|
|
1069
|
+
for msg in thread_msgs[-5:]:
|
|
1070
|
+
role = msg["role"]
|
|
1071
|
+
content = msg["content"][:200]
|
|
1072
|
+
lines.append(f" [{role}] {content}")
|
|
1073
|
+
|
|
1074
|
+
if other_msgs:
|
|
1075
|
+
lines.append(" [Other recent messages:]")
|
|
1076
|
+
for msg in other_msgs[-3:]:
|
|
1077
|
+
role = msg["role"]
|
|
1078
|
+
content = msg["content"][:200]
|
|
1079
|
+
lines.append(f" [{role}] {content}")
|
|
1080
|
+
else:
|
|
1081
|
+
for msg in history:
|
|
1082
|
+
role = msg["role"]
|
|
1083
|
+
content = msg["content"][:200]
|
|
1084
|
+
tid = msg.get("thread_id", "")
|
|
1085
|
+
thread_label = f" [thread:{tid}]" if tid else ""
|
|
1086
|
+
lines.append(f" [{role}]{thread_label} {content}")
|
|
1087
|
+
|
|
1088
|
+
return "\n".join(lines)
|
|
1089
|
+
|
|
1090
|
+
|
|
1091
|
+
# ---------------------------------------------------------------------------
|
|
1092
|
+
# Message Classifier
|
|
1093
|
+
# ---------------------------------------------------------------------------
|
|
1094
|
+
|
|
1095
|
+
# Keyword sets for tag classification
|
|
1096
|
+
_CODE_KEYWORDS = {"code", "debug", "fix", "implement", "refactor", "test", "function", "class", "error", "bug"}
|
|
1097
|
+
_REASON_KEYWORDS = {"analyze", "explain", "why", "architecture", "design", "plan", "research", "compare"}
|
|
1098
|
+
_NUANCE_KEYWORDS = {"write", "creative", "email", "letter", "story", "poem", "marketing"}
|
|
1099
|
+
_SIMPLE_KEYWORDS = {"hi", "hello", "hey", "thanks", "ok", "yes", "no", "ack"}
|
|
1100
|
+
|
|
1101
|
+
|
|
1102
|
+
def _classify_message(content: str) -> TaskSignal:
|
|
1103
|
+
"""Classify a message into a TaskSignal for routing.
|
|
1104
|
+
|
|
1105
|
+
Uses keyword matching and content length to determine
|
|
1106
|
+
the appropriate tier and tags.
|
|
1107
|
+
|
|
1108
|
+
Args:
|
|
1109
|
+
content: The message text.
|
|
1110
|
+
|
|
1111
|
+
Returns:
|
|
1112
|
+
TaskSignal with tags and estimated tokens.
|
|
1113
|
+
"""
|
|
1114
|
+
words = set(re.findall(r'\b\w+\b', content.lower()))
|
|
1115
|
+
tags: list[str] = []
|
|
1116
|
+
estimated_tokens = len(content) // 4 # rough estimate
|
|
1117
|
+
|
|
1118
|
+
if words & _CODE_KEYWORDS:
|
|
1119
|
+
tags.append("code")
|
|
1120
|
+
if words & _REASON_KEYWORDS:
|
|
1121
|
+
tags.append("analyze")
|
|
1122
|
+
if words & _NUANCE_KEYWORDS:
|
|
1123
|
+
tags.append("creative")
|
|
1124
|
+
if words & _SIMPLE_KEYWORDS and len(content) < 50:
|
|
1125
|
+
tags.append("simple")
|
|
1126
|
+
|
|
1127
|
+
if not tags:
|
|
1128
|
+
tags.append("general")
|
|
1129
|
+
|
|
1130
|
+
return TaskSignal(
|
|
1131
|
+
description=content[:100],
|
|
1132
|
+
tags=tags,
|
|
1133
|
+
estimated_tokens=estimated_tokens,
|
|
1134
|
+
)
|
|
1135
|
+
|
|
1136
|
+
|
|
1137
|
+
# ---------------------------------------------------------------------------
|
|
1138
|
+
# Inotify Watcher
|
|
1139
|
+
# ---------------------------------------------------------------------------
|
|
1140
|
+
|
|
1141
|
+
|
|
1142
|
+
class InboxHandler:
|
|
1143
|
+
"""File system event handler for SKComm inbox.
|
|
1144
|
+
|
|
1145
|
+
Watches for new *.skc.json files and submits them for processing.
|
|
1146
|
+
|
|
1147
|
+
Args:
|
|
1148
|
+
callback: Function to call with each new message file path.
|
|
1149
|
+
debounce_ms: Minimum milliseconds between events for same file.
|
|
1150
|
+
"""
|
|
1151
|
+
|
|
1152
|
+
def __init__(self, callback, debounce_ms: int = 200) -> None:
|
|
1153
|
+
self._callback = callback
|
|
1154
|
+
self._debounce_ms = debounce_ms
|
|
1155
|
+
self._last_event: dict[str, float] = {}
|
|
1156
|
+
|
|
1157
|
+
def on_created(self, event) -> None:
|
|
1158
|
+
"""Handle file creation events."""
|
|
1159
|
+
if hasattr(event, "is_directory") and event.is_directory:
|
|
1160
|
+
return
|
|
1161
|
+
src_path = event.src_path if hasattr(event, "src_path") else str(event)
|
|
1162
|
+
if not src_path.endswith(".skc.json"):
|
|
1163
|
+
return
|
|
1164
|
+
|
|
1165
|
+
# Debounce: Syncthing writes in stages
|
|
1166
|
+
now = time.monotonic()
|
|
1167
|
+
last = self._last_event.get(src_path, 0)
|
|
1168
|
+
if (now - last) * 1000 < self._debounce_ms:
|
|
1169
|
+
return
|
|
1170
|
+
self._last_event[src_path] = now
|
|
1171
|
+
|
|
1172
|
+
# Clean up old entries
|
|
1173
|
+
cutoff = now - 60
|
|
1174
|
+
self._last_event = {
|
|
1175
|
+
k: v for k, v in self._last_event.items() if v > cutoff
|
|
1176
|
+
}
|
|
1177
|
+
|
|
1178
|
+
self._callback(Path(src_path))
|
|
1179
|
+
|
|
1180
|
+
|
|
1181
|
+
# ---------------------------------------------------------------------------
|
|
1182
|
+
# Consciousness Loop
|
|
1183
|
+
# ---------------------------------------------------------------------------
|
|
1184
|
+
|
|
1185
|
+
|
|
1186
|
+
class ConsciousnessLoop:
|
|
1187
|
+
"""The core consciousness loop — processes messages autonomously.
|
|
1188
|
+
|
|
1189
|
+
Integrates inotify watching, LLM routing, prompt adaptation,
|
|
1190
|
+
context building, and memory storage into a single orchestrator.
|
|
1191
|
+
|
|
1192
|
+
Args:
|
|
1193
|
+
config: Consciousness configuration.
|
|
1194
|
+
daemon_state: Reference to daemon's mutable state (for stats).
|
|
1195
|
+
home: Agent home directory.
|
|
1196
|
+
shared_root: Shared root for coordination/sync.
|
|
1197
|
+
"""
|
|
1198
|
+
|
|
1199
|
+
def __init__(
|
|
1200
|
+
self,
|
|
1201
|
+
config: ConsciousnessConfig,
|
|
1202
|
+
daemon_state: Any = None,
|
|
1203
|
+
home: Optional[Path] = None,
|
|
1204
|
+
shared_root: Optional[Path] = None,
|
|
1205
|
+
) -> None:
|
|
1206
|
+
from skcapstone import AGENT_HOME, SHARED_ROOT as _SR
|
|
1207
|
+
|
|
1208
|
+
self._config = config
|
|
1209
|
+
self._state = daemon_state
|
|
1210
|
+
self._home = Path(home) if home else Path(AGENT_HOME).expanduser()
|
|
1211
|
+
self._shared_root = Path(shared_root) if shared_root else Path(_SR).expanduser()
|
|
1212
|
+
self._skcomm = None
|
|
1213
|
+
self._observer = None
|
|
1214
|
+
self._executor = ThreadPoolExecutor(
|
|
1215
|
+
max_workers=config.max_concurrent_requests,
|
|
1216
|
+
thread_name_prefix="consciousness",
|
|
1217
|
+
)
|
|
1218
|
+
self._stop_event = threading.Event()
|
|
1219
|
+
|
|
1220
|
+
# Stats
|
|
1221
|
+
self._messages_processed = 0
|
|
1222
|
+
self._responses_sent = 0
|
|
1223
|
+
self._errors = 0
|
|
1224
|
+
self._last_activity: Optional[datetime] = None
|
|
1225
|
+
# Rolling 24h message timestamps (thread-safe via lock)
|
|
1226
|
+
self._message_timestamps: deque[datetime] = deque()
|
|
1227
|
+
# Prompt version → response count
|
|
1228
|
+
self._prompt_version_responses: dict[str, int] = defaultdict(int)
|
|
1229
|
+
|
|
1230
|
+
# Build components
|
|
1231
|
+
adapter_path = self._home / "config" / "model_profiles.yaml"
|
|
1232
|
+
self._adapter = PromptAdapter(
|
|
1233
|
+
profiles_path=adapter_path if adapter_path.exists() else None
|
|
1234
|
+
)
|
|
1235
|
+
self._response_cache = ResponseCache()
|
|
1236
|
+
self._bridge = LLMBridge(config, adapter=self._adapter, cache=self._response_cache)
|
|
1237
|
+
self._conv_store = ConversationStore(self._home)
|
|
1238
|
+
self._conv_manager = ConversationManager(
|
|
1239
|
+
self._home, max_history_messages=config.max_history_messages
|
|
1240
|
+
)
|
|
1241
|
+
self._prompt_builder = SystemPromptBuilder(
|
|
1242
|
+
self._home, config.max_context_tokens,
|
|
1243
|
+
max_history_messages=config.max_history_messages,
|
|
1244
|
+
conv_manager=self._conv_manager,
|
|
1245
|
+
conv_store=self._conv_store,
|
|
1246
|
+
)
|
|
1247
|
+
|
|
1248
|
+
# Metrics collector (persist every 5 min)
|
|
1249
|
+
self._metrics = ConsciousnessMetrics(home=self._home)
|
|
1250
|
+
|
|
1251
|
+
# Mood tracker — updated after each processed message cycle
|
|
1252
|
+
try:
|
|
1253
|
+
from skcapstone.mood import MoodTracker
|
|
1254
|
+
self._mood_tracker: Optional[Any] = MoodTracker(home=self._home)
|
|
1255
|
+
except Exception:
|
|
1256
|
+
self._mood_tracker = None
|
|
1257
|
+
|
|
1258
|
+
# Agent identity for inbox filtering
|
|
1259
|
+
self._agent_name = self._resolve_agent_name()
|
|
1260
|
+
|
|
1261
|
+
# Deduplication state
|
|
1262
|
+
self._processed_ids: set[str] = set()
|
|
1263
|
+
self._processed_ids_lock = threading.Lock()
|
|
1264
|
+
|
|
1265
|
+
# Peer directory — tracks transport addresses of known peers
|
|
1266
|
+
try:
|
|
1267
|
+
from skcapstone.peer_directory import PeerDirectory
|
|
1268
|
+
self._peer_dir: Optional[Any] = PeerDirectory(home=self._shared_root)
|
|
1269
|
+
except Exception:
|
|
1270
|
+
self._peer_dir = None
|
|
1271
|
+
|
|
1272
|
+
def set_skcomm(self, skcomm) -> None:
|
|
1273
|
+
"""Inject SKComm instance for sending responses.
|
|
1274
|
+
|
|
1275
|
+
Args:
|
|
1276
|
+
skcomm: An initialized SKComm instance.
|
|
1277
|
+
"""
|
|
1278
|
+
self._skcomm = skcomm
|
|
1279
|
+
|
|
1280
|
+
def start(self) -> list[threading.Thread]:
|
|
1281
|
+
"""Start inotify watcher, sync watcher, and consciousness worker threads.
|
|
1282
|
+
|
|
1283
|
+
Returns:
|
|
1284
|
+
List of started threads.
|
|
1285
|
+
"""
|
|
1286
|
+
threads: list[threading.Thread] = []
|
|
1287
|
+
|
|
1288
|
+
# Inotify watcher
|
|
1289
|
+
if self._config.use_inotify:
|
|
1290
|
+
t = threading.Thread(
|
|
1291
|
+
target=self._run_inotify,
|
|
1292
|
+
name="consciousness-inotify",
|
|
1293
|
+
daemon=True,
|
|
1294
|
+
)
|
|
1295
|
+
t.start()
|
|
1296
|
+
threads.append(t)
|
|
1297
|
+
|
|
1298
|
+
# Sync inbox watcher (auto-import Syncthing seeds)
|
|
1299
|
+
try:
|
|
1300
|
+
from skcapstone.sync_watcher import SyncWatcher
|
|
1301
|
+
|
|
1302
|
+
self._sync_watcher = SyncWatcher(
|
|
1303
|
+
home=self._home,
|
|
1304
|
+
stop_event=self._stop_event,
|
|
1305
|
+
)
|
|
1306
|
+
if self._sync_watcher.enabled:
|
|
1307
|
+
sync_threads = self._sync_watcher.start()
|
|
1308
|
+
threads.extend(sync_threads)
|
|
1309
|
+
logger.info("SyncWatcher integrated with consciousness loop")
|
|
1310
|
+
except Exception as exc:
|
|
1311
|
+
self._sync_watcher = None
|
|
1312
|
+
logger.debug("SyncWatcher not available: %s", exc)
|
|
1313
|
+
|
|
1314
|
+
# Config hot-reload watcher
|
|
1315
|
+
t_cfg = threading.Thread(
|
|
1316
|
+
target=self._run_config_watcher,
|
|
1317
|
+
name="consciousness-config-watcher",
|
|
1318
|
+
daemon=True,
|
|
1319
|
+
)
|
|
1320
|
+
t_cfg.start()
|
|
1321
|
+
threads.append(t_cfg)
|
|
1322
|
+
|
|
1323
|
+
logger.info(
|
|
1324
|
+
"Consciousness loop started — inotify=%s backends=%s",
|
|
1325
|
+
self._config.use_inotify,
|
|
1326
|
+
[k for k, v in self._bridge.available_backends.items() if v],
|
|
1327
|
+
)
|
|
1328
|
+
return threads
|
|
1329
|
+
|
|
1330
|
+
def stop(self) -> None:
|
|
1331
|
+
"""Stop the consciousness loop and clean up."""
|
|
1332
|
+
self._stop_event.set()
|
|
1333
|
+
if self._observer:
|
|
1334
|
+
try:
|
|
1335
|
+
self._observer.stop()
|
|
1336
|
+
self._observer.join(timeout=5)
|
|
1337
|
+
except Exception:
|
|
1338
|
+
pass
|
|
1339
|
+
# Stop sync watcher if running
|
|
1340
|
+
sync_watcher = getattr(self, "_sync_watcher", None)
|
|
1341
|
+
if sync_watcher:
|
|
1342
|
+
try:
|
|
1343
|
+
sync_watcher.stop()
|
|
1344
|
+
except Exception:
|
|
1345
|
+
pass
|
|
1346
|
+
self._executor.shutdown(wait=False)
|
|
1347
|
+
self._metrics.stop()
|
|
1348
|
+
logger.info("Consciousness loop stopped.")
|
|
1349
|
+
|
|
1350
|
+
def _run_inotify_restart(self) -> None:
|
|
1351
|
+
"""Restart the inotify observer after it dies."""
|
|
1352
|
+
if self._observer:
|
|
1353
|
+
try:
|
|
1354
|
+
self._observer.stop()
|
|
1355
|
+
self._observer.join(timeout=5)
|
|
1356
|
+
except Exception:
|
|
1357
|
+
pass
|
|
1358
|
+
self._observer = None
|
|
1359
|
+
|
|
1360
|
+
# Re-launch inotify in a new thread
|
|
1361
|
+
t = threading.Thread(
|
|
1362
|
+
target=self._run_inotify,
|
|
1363
|
+
name="consciousness-inotify-restart",
|
|
1364
|
+
daemon=True,
|
|
1365
|
+
)
|
|
1366
|
+
t.start()
|
|
1367
|
+
|
|
1368
|
+
def process_envelope(self, envelope) -> Optional[str]:
|
|
1369
|
+
"""Process a single message envelope — the heart of consciousness.
|
|
1370
|
+
|
|
1371
|
+
Steps:
|
|
1372
|
+
1. Skip ACKs, heartbeats, file transfers
|
|
1373
|
+
2. Send ACK if auto_ack
|
|
1374
|
+
3. Classify message → TaskSignal
|
|
1375
|
+
4. Build system prompt
|
|
1376
|
+
5. Search memories for sender context (top 3, appended to system prompt)
|
|
1377
|
+
6. Call LLMBridge.generate()
|
|
1378
|
+
7. Send response via SKComm
|
|
1379
|
+
8. Store interaction as memory
|
|
1380
|
+
9. Update conversation history
|
|
1381
|
+
|
|
1382
|
+
Args:
|
|
1383
|
+
envelope: A MessageEnvelope from SKComm.
|
|
1384
|
+
|
|
1385
|
+
Returns:
|
|
1386
|
+
Response text if a response was generated, None otherwise.
|
|
1387
|
+
"""
|
|
1388
|
+
try:
|
|
1389
|
+
# Extract message info
|
|
1390
|
+
content_type = getattr(envelope.payload, "content_type", None)
|
|
1391
|
+
if content_type:
|
|
1392
|
+
ct_value = content_type.value if hasattr(content_type, "value") else str(content_type)
|
|
1393
|
+
else:
|
|
1394
|
+
ct_value = "text"
|
|
1395
|
+
|
|
1396
|
+
# Skip non-text messages
|
|
1397
|
+
skip_types = {"ack", "heartbeat", "file", "file_chunk", "file_manifest"}
|
|
1398
|
+
if ct_value in skip_types:
|
|
1399
|
+
return None
|
|
1400
|
+
|
|
1401
|
+
sender = getattr(envelope, "sender", "unknown")
|
|
1402
|
+
content = getattr(envelope.payload, "content", "")
|
|
1403
|
+
if not content or not content.strip():
|
|
1404
|
+
return None
|
|
1405
|
+
|
|
1406
|
+
# Extract threading fields
|
|
1407
|
+
thread_id: str = getattr(envelope, "thread_id", "") or ""
|
|
1408
|
+
in_reply_to: str = getattr(envelope, "in_reply_to", "") or ""
|
|
1409
|
+
|
|
1410
|
+
logger.info("Processing message from %s: %s", sender, content[:80])
|
|
1411
|
+
if thread_id:
|
|
1412
|
+
logger.debug("Message thread_id=%s in_reply_to=%s", thread_id, in_reply_to)
|
|
1413
|
+
self._messages_processed += 1
|
|
1414
|
+
now = datetime.now(timezone.utc)
|
|
1415
|
+
self._last_activity = now
|
|
1416
|
+
self._message_timestamps.append(now)
|
|
1417
|
+
|
|
1418
|
+
# Update peer directory with last-seen timestamp
|
|
1419
|
+
if self._peer_dir is not None:
|
|
1420
|
+
try:
|
|
1421
|
+
self._peer_dir.update_last_seen(sender)
|
|
1422
|
+
except Exception:
|
|
1423
|
+
pass
|
|
1424
|
+
self._metrics.record_message(sender)
|
|
1425
|
+
|
|
1426
|
+
# Desktop notification
|
|
1427
|
+
if self._config.desktop_notifications:
|
|
1428
|
+
try:
|
|
1429
|
+
from skcapstone.notifications import notify as _desktop_notify
|
|
1430
|
+
preview = content[:50] + ("..." if len(content) > 50 else "")
|
|
1431
|
+
_desktop_notify(f"Message from {sender}", preview)
|
|
1432
|
+
except Exception as _notif_exc:
|
|
1433
|
+
logger.debug("Desktop notification failed: %s", _notif_exc)
|
|
1434
|
+
|
|
1435
|
+
# Send ACK
|
|
1436
|
+
if self._config.auto_ack and self._skcomm:
|
|
1437
|
+
try:
|
|
1438
|
+
self._skcomm.send(sender, "ACK", message_type="ack")
|
|
1439
|
+
except Exception as exc:
|
|
1440
|
+
logger.debug("ACK send failed: %s", exc)
|
|
1441
|
+
|
|
1442
|
+
# Classify
|
|
1443
|
+
t0 = time.monotonic()
|
|
1444
|
+
signal = _classify_message(content)
|
|
1445
|
+
if self._config.privacy_default:
|
|
1446
|
+
signal.privacy_sensitive = True
|
|
1447
|
+
t_classify = time.monotonic()
|
|
1448
|
+
|
|
1449
|
+
# Build system prompt (thread-aware)
|
|
1450
|
+
system_prompt = self._prompt_builder.build(
|
|
1451
|
+
peer_name=sender,
|
|
1452
|
+
thread_id=thread_id or None,
|
|
1453
|
+
)
|
|
1454
|
+
# Enrich system prompt with top-3 memories relevant to sender/content
|
|
1455
|
+
_mem_ctx = self._fetch_sender_memories(sender, content)
|
|
1456
|
+
if _mem_ctx:
|
|
1457
|
+
system_prompt = system_prompt + "\n\n" + _mem_ctx
|
|
1458
|
+
t_prompt = time.monotonic()
|
|
1459
|
+
|
|
1460
|
+
# Send typing indicator before generation so peer UI shows animation
|
|
1461
|
+
if self._skcomm:
|
|
1462
|
+
try:
|
|
1463
|
+
from skchat.presence import PresenceIndicator, PresenceState
|
|
1464
|
+
from skcomm.models import MessageType
|
|
1465
|
+
_typing_ind = PresenceIndicator(
|
|
1466
|
+
identity_uri=self._agent_name or "capauth:agent@skchat.local",
|
|
1467
|
+
state=PresenceState.TYPING,
|
|
1468
|
+
)
|
|
1469
|
+
self._skcomm.send(
|
|
1470
|
+
sender, _typing_ind.model_dump_json(), message_type=MessageType.HEARTBEAT
|
|
1471
|
+
)
|
|
1472
|
+
except Exception as _ti_exc:
|
|
1473
|
+
logger.debug("Typing indicator send failed: %s", _ti_exc)
|
|
1474
|
+
|
|
1475
|
+
# Generate response — capture backend/tier via _out_info
|
|
1476
|
+
_route_info: dict = {}
|
|
1477
|
+
response = self._bridge.generate(
|
|
1478
|
+
system_prompt, content, signal, _out_info=_route_info,
|
|
1479
|
+
skip_cache=True, # conversation messages have dynamic context
|
|
1480
|
+
)
|
|
1481
|
+
t_llm = time.monotonic()
|
|
1482
|
+
|
|
1483
|
+
# Send typing stop so peer UI clears the animation
|
|
1484
|
+
if self._skcomm:
|
|
1485
|
+
try:
|
|
1486
|
+
from skchat.presence import PresenceIndicator, PresenceState
|
|
1487
|
+
from skcomm.models import MessageType
|
|
1488
|
+
_stop_ind = PresenceIndicator(
|
|
1489
|
+
identity_uri=self._agent_name or "capauth:agent@skchat.local",
|
|
1490
|
+
state=PresenceState.ONLINE,
|
|
1491
|
+
)
|
|
1492
|
+
self._skcomm.send(
|
|
1493
|
+
sender, _stop_ind.model_dump_json(), message_type=MessageType.HEARTBEAT
|
|
1494
|
+
)
|
|
1495
|
+
except Exception as _ts_exc:
|
|
1496
|
+
logger.debug("Typing stop indicator send failed: %s", _ts_exc)
|
|
1497
|
+
|
|
1498
|
+
# Record response metrics
|
|
1499
|
+
response_time_ms = (t_llm - t0) * 1000
|
|
1500
|
+
self._metrics.record_response(
|
|
1501
|
+
response_time_ms,
|
|
1502
|
+
backend=_route_info.get("backend", "unknown"),
|
|
1503
|
+
tier=_route_info.get("tier", "unknown"),
|
|
1504
|
+
)
|
|
1505
|
+
|
|
1506
|
+
# Score response quality and accumulate in metrics
|
|
1507
|
+
try:
|
|
1508
|
+
from skcapstone.response_scorer import score_response as _score_response
|
|
1509
|
+
_quality = _score_response(content, response, response_time_ms)
|
|
1510
|
+
self._metrics.record_quality(_quality)
|
|
1511
|
+
logger.debug(
|
|
1512
|
+
"Quality score — overall=%.2f length=%.2f coherence=%.2f latency=%.2f",
|
|
1513
|
+
_quality.overall,
|
|
1514
|
+
_quality.length_score,
|
|
1515
|
+
_quality.coherence_score,
|
|
1516
|
+
_quality.latency_score,
|
|
1517
|
+
)
|
|
1518
|
+
except Exception as _sq_exc:
|
|
1519
|
+
logger.debug("Quality scoring failed (non-fatal): %s", _sq_exc)
|
|
1520
|
+
|
|
1521
|
+
# Send response
|
|
1522
|
+
if response and self._skcomm:
|
|
1523
|
+
try:
|
|
1524
|
+
self._skcomm.send(sender, response)
|
|
1525
|
+
self._responses_sent += 1
|
|
1526
|
+
_ph = self._prompt_builder.current_prompt_hash
|
|
1527
|
+
if _ph:
|
|
1528
|
+
self._prompt_version_responses[_ph] += 1
|
|
1529
|
+
logger.info("Response sent to %s (%d chars)", sender, len(response))
|
|
1530
|
+
except Exception as exc:
|
|
1531
|
+
logger.error("Failed to send response to %s: %s", sender, exc)
|
|
1532
|
+
self._errors += 1
|
|
1533
|
+
self._metrics.record_error()
|
|
1534
|
+
t_send = time.monotonic()
|
|
1535
|
+
|
|
1536
|
+
logger.info(
|
|
1537
|
+
"Pipeline timing — classify: %.0fms, prompt_build: %.0fms, llm: %.0fms, send: %.0fms",
|
|
1538
|
+
(t_classify - t0) * 1000,
|
|
1539
|
+
(t_prompt - t_classify) * 1000,
|
|
1540
|
+
(t_llm - t_prompt) * 1000,
|
|
1541
|
+
(t_send - t_llm) * 1000,
|
|
1542
|
+
)
|
|
1543
|
+
|
|
1544
|
+
# Store interaction as memory
|
|
1545
|
+
if self._config.auto_memory:
|
|
1546
|
+
self._store_interaction_memory(sender, content, response)
|
|
1547
|
+
|
|
1548
|
+
# Update conversation history (with thread context)
|
|
1549
|
+
self._prompt_builder.add_to_history(
|
|
1550
|
+
sender, "user", content,
|
|
1551
|
+
thread_id=thread_id or None,
|
|
1552
|
+
in_reply_to=in_reply_to or None,
|
|
1553
|
+
)
|
|
1554
|
+
if response:
|
|
1555
|
+
try:
|
|
1556
|
+
subprocess.Popen(
|
|
1557
|
+
["notify-send", "Opus", response[:100]],
|
|
1558
|
+
stdout=subprocess.DEVNULL,
|
|
1559
|
+
stderr=subprocess.DEVNULL,
|
|
1560
|
+
)
|
|
1561
|
+
except Exception as _notify_exc:
|
|
1562
|
+
logger.debug("notify-send failed (non-fatal): %s", _notify_exc)
|
|
1563
|
+
|
|
1564
|
+
self._prompt_builder.add_to_history(
|
|
1565
|
+
sender, "assistant", response,
|
|
1566
|
+
thread_id=thread_id or None,
|
|
1567
|
+
)
|
|
1568
|
+
|
|
1569
|
+
# Update mood after each cycle
|
|
1570
|
+
if self._mood_tracker is not None:
|
|
1571
|
+
try:
|
|
1572
|
+
self._mood_tracker.update_from_metrics(self._metrics)
|
|
1573
|
+
except Exception as _mood_exc:
|
|
1574
|
+
logger.debug("Mood update failed (non-fatal): %s", _mood_exc)
|
|
1575
|
+
|
|
1576
|
+
return response
|
|
1577
|
+
|
|
1578
|
+
except Exception as exc:
|
|
1579
|
+
logger.error("Consciousness processing error: %s", exc, exc_info=True)
|
|
1580
|
+
self._errors += 1
|
|
1581
|
+
self._metrics.record_error()
|
|
1582
|
+
return None
|
|
1583
|
+
|
|
1584
|
+
def _store_interaction_memory(
|
|
1585
|
+
self, peer: str, message: str, response: Optional[str],
|
|
1586
|
+
) -> None:
|
|
1587
|
+
"""Store the interaction as a memory entry.
|
|
1588
|
+
|
|
1589
|
+
Args:
|
|
1590
|
+
peer: Who sent the message.
|
|
1591
|
+
message: The incoming message.
|
|
1592
|
+
response: Our response (if any).
|
|
1593
|
+
"""
|
|
1594
|
+
try:
|
|
1595
|
+
from skcapstone.memory_engine import store
|
|
1596
|
+
summary = f"Conversation with {peer}: '{message[:100]}'"
|
|
1597
|
+
if response:
|
|
1598
|
+
summary += f" → '{response[:100]}'"
|
|
1599
|
+
store(
|
|
1600
|
+
content=summary,
|
|
1601
|
+
tags=["conversation", f"peer:{peer}"],
|
|
1602
|
+
importance=0.4,
|
|
1603
|
+
home=self._home,
|
|
1604
|
+
)
|
|
1605
|
+
except Exception as exc:
|
|
1606
|
+
logger.debug("Failed to store interaction memory: %s", exc)
|
|
1607
|
+
|
|
1608
|
+
def _fetch_sender_memories(self, sender: str, content: str) -> str:
|
|
1609
|
+
"""Search memories relevant to the sender and incoming message content.
|
|
1610
|
+
|
|
1611
|
+
Performs two searches:
|
|
1612
|
+
1. Memories tagged with the sender peer (past interactions).
|
|
1613
|
+
2. Memories topically relevant to the message content.
|
|
1614
|
+
|
|
1615
|
+
Merges and deduplicates results, returns the top 3 formatted as a
|
|
1616
|
+
context block ready to be appended to the system prompt.
|
|
1617
|
+
|
|
1618
|
+
Args:
|
|
1619
|
+
sender: Name of the peer who sent the message.
|
|
1620
|
+
content: The incoming message text (up to 200 chars are used as query).
|
|
1621
|
+
|
|
1622
|
+
Returns:
|
|
1623
|
+
Formatted memory context string, or empty string if none found or
|
|
1624
|
+
if the memory engine is unavailable.
|
|
1625
|
+
"""
|
|
1626
|
+
try:
|
|
1627
|
+
from skcapstone.memory_engine import search as _mem_search
|
|
1628
|
+
|
|
1629
|
+
# 1. Memories specifically about this peer
|
|
1630
|
+
by_sender = _mem_search(
|
|
1631
|
+
self._home,
|
|
1632
|
+
query=sender,
|
|
1633
|
+
tags=[f"peer:{sender}"],
|
|
1634
|
+
limit=5,
|
|
1635
|
+
)
|
|
1636
|
+
# 2. Memories topically relevant to the message content
|
|
1637
|
+
by_content = _mem_search(
|
|
1638
|
+
self._home,
|
|
1639
|
+
query=content[:200],
|
|
1640
|
+
limit=5,
|
|
1641
|
+
)
|
|
1642
|
+
|
|
1643
|
+
# Merge, deduplicate by memory_id, keep top 3
|
|
1644
|
+
seen_ids: set[str] = set()
|
|
1645
|
+
combined: list = []
|
|
1646
|
+
for entry in by_sender + by_content:
|
|
1647
|
+
if entry.memory_id not in seen_ids:
|
|
1648
|
+
seen_ids.add(entry.memory_id)
|
|
1649
|
+
combined.append(entry)
|
|
1650
|
+
if len(combined) == 3:
|
|
1651
|
+
break
|
|
1652
|
+
|
|
1653
|
+
if not combined:
|
|
1654
|
+
return ""
|
|
1655
|
+
|
|
1656
|
+
lines = ["Relevant memories:"]
|
|
1657
|
+
for i, entry in enumerate(combined, 1):
|
|
1658
|
+
lines.append(f" [{i}] {entry.content[:200]}")
|
|
1659
|
+
return "\n".join(lines)
|
|
1660
|
+
|
|
1661
|
+
except Exception as exc:
|
|
1662
|
+
logger.debug("Failed to fetch sender memories: %s", exc)
|
|
1663
|
+
return ""
|
|
1664
|
+
|
|
1665
|
+
def _reload_config(self) -> None:
|
|
1666
|
+
"""Reload consciousness.yaml and apply changes in-place.
|
|
1667
|
+
|
|
1668
|
+
Compares the reloaded config against the current one, logs every
|
|
1669
|
+
changed field with its old and new values, updates ``self._config``,
|
|
1670
|
+
syncs the LLMBridge settings (fallback_chain, timeout), and
|
|
1671
|
+
re-probes backend availability.
|
|
1672
|
+
"""
|
|
1673
|
+
import yaml as _yaml
|
|
1674
|
+
|
|
1675
|
+
config_path = self._home / "config" / "consciousness.yaml"
|
|
1676
|
+
if not config_path.exists():
|
|
1677
|
+
logger.warning(
|
|
1678
|
+
"Config hot-reload: %s not found, keeping current config", config_path
|
|
1679
|
+
)
|
|
1680
|
+
return
|
|
1681
|
+
|
|
1682
|
+
# Parse YAML directly so syntax errors surface here (not silently swallowed
|
|
1683
|
+
# by load_consciousness_config which returns defaults on parse failure).
|
|
1684
|
+
try:
|
|
1685
|
+
raw = _yaml.safe_load(config_path.read_text(encoding="utf-8"))
|
|
1686
|
+
except Exception as exc:
|
|
1687
|
+
logger.error(
|
|
1688
|
+
"Config hot-reload: failed to parse %s — keeping current config: %s",
|
|
1689
|
+
config_path,
|
|
1690
|
+
exc,
|
|
1691
|
+
)
|
|
1692
|
+
return
|
|
1693
|
+
|
|
1694
|
+
if not raw or not isinstance(raw, dict):
|
|
1695
|
+
logger.error(
|
|
1696
|
+
"Config hot-reload: %s did not produce a valid mapping — keeping current config",
|
|
1697
|
+
config_path,
|
|
1698
|
+
)
|
|
1699
|
+
return
|
|
1700
|
+
|
|
1701
|
+
try:
|
|
1702
|
+
new_config = ConsciousnessConfig.model_validate(raw)
|
|
1703
|
+
except Exception as exc:
|
|
1704
|
+
logger.error(
|
|
1705
|
+
"Config hot-reload: invalid values in %s — keeping current config: %s",
|
|
1706
|
+
config_path,
|
|
1707
|
+
exc,
|
|
1708
|
+
)
|
|
1709
|
+
return
|
|
1710
|
+
|
|
1711
|
+
old_data = self._config.model_dump()
|
|
1712
|
+
new_data = new_config.model_dump()
|
|
1713
|
+
changes = {
|
|
1714
|
+
k: (old_data[k], new_data[k])
|
|
1715
|
+
for k in new_data
|
|
1716
|
+
if old_data.get(k) != new_data[k]
|
|
1717
|
+
}
|
|
1718
|
+
|
|
1719
|
+
if not changes:
|
|
1720
|
+
logger.debug(
|
|
1721
|
+
"Config hot-reload: no changes detected in %s", config_path
|
|
1722
|
+
)
|
|
1723
|
+
return
|
|
1724
|
+
|
|
1725
|
+
for field, (old_val, new_val) in changes.items():
|
|
1726
|
+
logger.info(
|
|
1727
|
+
"Config hot-reload: %s changed: %r → %r", field, old_val, new_val
|
|
1728
|
+
)
|
|
1729
|
+
|
|
1730
|
+
self._config = new_config
|
|
1731
|
+
|
|
1732
|
+
# Sync LLMBridge settings that depend on config
|
|
1733
|
+
self._bridge._fallback_chain = new_config.fallback_chain
|
|
1734
|
+
self._bridge._timeout = new_config.response_timeout
|
|
1735
|
+
|
|
1736
|
+
# Re-probe backends so the loop reflects any env/network changes
|
|
1737
|
+
self._bridge._probe_available_backends()
|
|
1738
|
+
available = [k for k, v in self._bridge.available_backends.items() if v]
|
|
1739
|
+
logger.info(
|
|
1740
|
+
"Config hot-reload complete — %d field(s) changed, backends: %s",
|
|
1741
|
+
len(changes),
|
|
1742
|
+
available,
|
|
1743
|
+
)
|
|
1744
|
+
|
|
1745
|
+
def _run_config_watcher(self) -> None:
|
|
1746
|
+
"""Watch consciousness.yaml for modifications and hot-reload on change."""
|
|
1747
|
+
config_dir = self._home / "config"
|
|
1748
|
+
config_dir.mkdir(parents=True, exist_ok=True)
|
|
1749
|
+
|
|
1750
|
+
try:
|
|
1751
|
+
from watchdog.observers import Observer
|
|
1752
|
+
from watchdog.events import FileSystemEventHandler
|
|
1753
|
+
|
|
1754
|
+
loop_ref = self
|
|
1755
|
+
|
|
1756
|
+
class _ConfigChangeHandler(FileSystemEventHandler):
|
|
1757
|
+
def on_modified(self, event):
|
|
1758
|
+
if not event.is_directory and event.src_path.endswith(
|
|
1759
|
+
"consciousness.yaml"
|
|
1760
|
+
):
|
|
1761
|
+
logger.info(
|
|
1762
|
+
"Config hot-reload triggered (modified): %s",
|
|
1763
|
+
event.src_path,
|
|
1764
|
+
)
|
|
1765
|
+
loop_ref._reload_config()
|
|
1766
|
+
|
|
1767
|
+
def on_created(self, event):
|
|
1768
|
+
if not event.is_directory and event.src_path.endswith(
|
|
1769
|
+
"consciousness.yaml"
|
|
1770
|
+
):
|
|
1771
|
+
logger.info(
|
|
1772
|
+
"Config hot-reload triggered (created): %s",
|
|
1773
|
+
event.src_path,
|
|
1774
|
+
)
|
|
1775
|
+
loop_ref._reload_config()
|
|
1776
|
+
|
|
1777
|
+
observer = Observer()
|
|
1778
|
+
observer.schedule(_ConfigChangeHandler(), str(config_dir), recursive=False)
|
|
1779
|
+
observer.start()
|
|
1780
|
+
logger.info("Config watcher started on %s", config_dir)
|
|
1781
|
+
|
|
1782
|
+
while not self._stop_event.is_set():
|
|
1783
|
+
self._stop_event.wait(timeout=1)
|
|
1784
|
+
|
|
1785
|
+
observer.stop()
|
|
1786
|
+
observer.join(timeout=5)
|
|
1787
|
+
|
|
1788
|
+
except ImportError:
|
|
1789
|
+
logger.warning(
|
|
1790
|
+
"watchdog not installed — config hot-reload via inotify disabled. "
|
|
1791
|
+
"Install with: pip install watchdog"
|
|
1792
|
+
)
|
|
1793
|
+
except Exception as exc:
|
|
1794
|
+
logger.error("Config watcher error: %s", exc)
|
|
1795
|
+
|
|
1796
|
+
def _run_inotify(self) -> None:
|
|
1797
|
+
"""Run the inotify file watcher loop."""
|
|
1798
|
+
inbox_dir = self._shared_root / _INBOX_DIR
|
|
1799
|
+
inbox_dir.mkdir(parents=True, exist_ok=True)
|
|
1800
|
+
|
|
1801
|
+
try:
|
|
1802
|
+
from watchdog.observers import Observer
|
|
1803
|
+
from watchdog.events import FileSystemEventHandler, FileCreatedEvent
|
|
1804
|
+
|
|
1805
|
+
handler = _WatchdogAdapter(self._on_inbox_file)
|
|
1806
|
+
self._observer = Observer()
|
|
1807
|
+
self._observer.schedule(handler, str(inbox_dir), recursive=True)
|
|
1808
|
+
self._observer.start()
|
|
1809
|
+
logger.info("Inotify watcher started on %s", inbox_dir)
|
|
1810
|
+
|
|
1811
|
+
# Block until stop
|
|
1812
|
+
while not self._stop_event.is_set():
|
|
1813
|
+
self._stop_event.wait(timeout=1)
|
|
1814
|
+
|
|
1815
|
+
except ImportError:
|
|
1816
|
+
logger.warning(
|
|
1817
|
+
"watchdog not installed — inotify disabled. "
|
|
1818
|
+
"Install with: pip install watchdog"
|
|
1819
|
+
)
|
|
1820
|
+
except Exception as exc:
|
|
1821
|
+
logger.error("Inotify watcher error: %s", exc)
|
|
1822
|
+
|
|
1823
|
+
def _resolve_agent_name(self) -> str:
|
|
1824
|
+
"""Get this agent's name from identity.json."""
|
|
1825
|
+
try:
|
|
1826
|
+
identity_path = self._home / "identity" / "identity.json"
|
|
1827
|
+
if identity_path.exists():
|
|
1828
|
+
data = json.loads(identity_path.read_text(encoding="utf-8"))
|
|
1829
|
+
return data.get("name", "").lower()
|
|
1830
|
+
except Exception:
|
|
1831
|
+
pass
|
|
1832
|
+
return ""
|
|
1833
|
+
|
|
1834
|
+
def _verify_message_signature(self, data: dict) -> str:
|
|
1835
|
+
"""Verify a PGP signature on an incoming envelope payload.
|
|
1836
|
+
|
|
1837
|
+
Looks for ``payload.signature`` in the envelope dict. If present,
|
|
1838
|
+
resolves the sender's public key from the peer store and verifies via
|
|
1839
|
+
the capauth crypto backend.
|
|
1840
|
+
|
|
1841
|
+
Args:
|
|
1842
|
+
data: Parsed envelope dict from an ``.skc.json`` file.
|
|
1843
|
+
|
|
1844
|
+
Returns:
|
|
1845
|
+
``"verified"`` — signature present and valid.
|
|
1846
|
+
``"failed"`` — signature present but invalid, or key unavailable.
|
|
1847
|
+
``"unsigned"`` — no signature field in the payload.
|
|
1848
|
+
"""
|
|
1849
|
+
payload = data.get("payload", data)
|
|
1850
|
+
signature = payload.get("signature", "")
|
|
1851
|
+
if not signature:
|
|
1852
|
+
return "unsigned"
|
|
1853
|
+
|
|
1854
|
+
content = payload.get("content", payload.get("message", ""))
|
|
1855
|
+
sender = _sanitize_peer_name(data.get("sender", data.get("from", "")))
|
|
1856
|
+
if not sender or sender == "unknown":
|
|
1857
|
+
logger.debug("Cannot verify signature — sender unknown")
|
|
1858
|
+
return "failed"
|
|
1859
|
+
|
|
1860
|
+
try:
|
|
1861
|
+
from skcapstone.peers import get_peer
|
|
1862
|
+
peer = get_peer(sender, skcapstone_home=self._home)
|
|
1863
|
+
if not peer or not peer.public_key:
|
|
1864
|
+
logger.debug(
|
|
1865
|
+
"No public key for peer %s — cannot verify signature", sender
|
|
1866
|
+
)
|
|
1867
|
+
return "failed"
|
|
1868
|
+
|
|
1869
|
+
from capauth.crypto import get_backend
|
|
1870
|
+
backend = get_backend()
|
|
1871
|
+
content_bytes = (
|
|
1872
|
+
content.encode("utf-8") if isinstance(content, str) else content
|
|
1873
|
+
)
|
|
1874
|
+
ok = backend.verify(
|
|
1875
|
+
data=content_bytes,
|
|
1876
|
+
signature_armor=signature,
|
|
1877
|
+
public_key_armor=peer.public_key,
|
|
1878
|
+
)
|
|
1879
|
+
return "verified" if ok else "failed"
|
|
1880
|
+
except Exception as exc:
|
|
1881
|
+
logger.debug("Signature verification error for %s: %s", sender, exc)
|
|
1882
|
+
return "failed"
|
|
1883
|
+
|
|
1884
|
+
def _on_inbox_file(self, path: Path) -> None:
|
|
1885
|
+
"""Handle a new file detected in the inbox.
|
|
1886
|
+
|
|
1887
|
+
Args:
|
|
1888
|
+
path: Path to the new .skc.json file.
|
|
1889
|
+
"""
|
|
1890
|
+
# Size cap: reject files larger than 1MB
|
|
1891
|
+
try:
|
|
1892
|
+
file_size = path.stat().st_size
|
|
1893
|
+
if file_size > 1_000_000:
|
|
1894
|
+
logger.warning("Inbox file too large (%d bytes): %s", file_size, path)
|
|
1895
|
+
return
|
|
1896
|
+
except OSError:
|
|
1897
|
+
return
|
|
1898
|
+
|
|
1899
|
+
try:
|
|
1900
|
+
# Retry reading up to 5 times with 50 ms delays: inotify IN_CREATE fires
|
|
1901
|
+
# before file content is flushed on some filesystems (race with writer).
|
|
1902
|
+
raw = ""
|
|
1903
|
+
for _attempt in range(5):
|
|
1904
|
+
raw = path.read_text(encoding="utf-8").strip()
|
|
1905
|
+
if raw:
|
|
1906
|
+
break
|
|
1907
|
+
time.sleep(0.05)
|
|
1908
|
+
if not raw:
|
|
1909
|
+
logger.debug("Inbox file still empty after retries, skipping: %s", path)
|
|
1910
|
+
return
|
|
1911
|
+
data = json.loads(raw)
|
|
1912
|
+
|
|
1913
|
+
if not isinstance(data, dict):
|
|
1914
|
+
logger.warning("Invalid envelope format (not a dict): %s", path)
|
|
1915
|
+
return
|
|
1916
|
+
|
|
1917
|
+
# Require sender field
|
|
1918
|
+
if not data.get("sender") and not data.get("from"):
|
|
1919
|
+
logger.warning("Envelope missing sender: %s", path)
|
|
1920
|
+
return
|
|
1921
|
+
|
|
1922
|
+
# Filter by recipient — skip messages not addressed to this agent
|
|
1923
|
+
recipient = data.get("recipient", "")
|
|
1924
|
+
if self._agent_name and recipient and recipient.lower() != self._agent_name:
|
|
1925
|
+
logger.debug("Skipping message for %s (we are %s)", recipient, self._agent_name)
|
|
1926
|
+
return
|
|
1927
|
+
|
|
1928
|
+
# Deduplication by message_id
|
|
1929
|
+
message_id = data.get("message_id") or data.get("envelope_id", "")
|
|
1930
|
+
if message_id:
|
|
1931
|
+
with self._processed_ids_lock:
|
|
1932
|
+
if message_id in self._processed_ids:
|
|
1933
|
+
logger.debug("Skipping duplicate message: %s", message_id)
|
|
1934
|
+
return
|
|
1935
|
+
self._processed_ids.add(message_id)
|
|
1936
|
+
# Cap at 1000 entries to prevent unbounded growth
|
|
1937
|
+
if len(self._processed_ids) > 1000:
|
|
1938
|
+
# Remove oldest (but sets are unordered, so just clear half)
|
|
1939
|
+
to_keep = list(self._processed_ids)[-500:]
|
|
1940
|
+
self._processed_ids = set(to_keep)
|
|
1941
|
+
|
|
1942
|
+
# Rate limiting: check executor queue depth
|
|
1943
|
+
try:
|
|
1944
|
+
queue_size = self._executor._work_queue.qsize()
|
|
1945
|
+
if queue_size >= self._config.max_concurrent_requests * 2:
|
|
1946
|
+
logger.warning(
|
|
1947
|
+
"Consciousness executor backlogged (%d pending), dropping message",
|
|
1948
|
+
queue_size,
|
|
1949
|
+
)
|
|
1950
|
+
return
|
|
1951
|
+
except Exception:
|
|
1952
|
+
pass # _work_queue might not exist in all Python versions
|
|
1953
|
+
|
|
1954
|
+
# PGP signature verification (soft enforcement — log only)
|
|
1955
|
+
sig_sender = _sanitize_peer_name(
|
|
1956
|
+
data.get("sender", data.get("from", "unknown"))
|
|
1957
|
+
)
|
|
1958
|
+
sig_status = self._verify_message_signature(data)
|
|
1959
|
+
logger.info("Message from %s signature: %s", sig_sender, sig_status)
|
|
1960
|
+
|
|
1961
|
+
# Construct a minimal envelope-like object
|
|
1962
|
+
envelope = _SimpleEnvelope(data)
|
|
1963
|
+
self._executor.submit(self.process_envelope, envelope)
|
|
1964
|
+
|
|
1965
|
+
except Exception as exc:
|
|
1966
|
+
logger.warning("Failed to process inbox file %s: %s", path, exc)
|
|
1967
|
+
|
|
1968
|
+
@property
|
|
1969
|
+
def metrics(self) -> ConsciousnessMetrics:
|
|
1970
|
+
"""Live metrics collector for this consciousness loop."""
|
|
1971
|
+
return self._metrics
|
|
1972
|
+
|
|
1973
|
+
@property
|
|
1974
|
+
def stats(self) -> dict[str, Any]:
|
|
1975
|
+
"""Current consciousness loop statistics."""
|
|
1976
|
+
cutoff = datetime.now(timezone.utc) - timedelta(hours=24)
|
|
1977
|
+
# Prune timestamps older than 24h
|
|
1978
|
+
while self._message_timestamps and self._message_timestamps[0] < cutoff:
|
|
1979
|
+
self._message_timestamps.popleft()
|
|
1980
|
+
msgs_24h = len(self._message_timestamps)
|
|
1981
|
+
return {
|
|
1982
|
+
"enabled": self._config.enabled,
|
|
1983
|
+
"messages_processed": self._messages_processed,
|
|
1984
|
+
"messages_processed_24h": msgs_24h,
|
|
1985
|
+
"responses_sent": self._responses_sent,
|
|
1986
|
+
"errors": self._errors,
|
|
1987
|
+
"last_activity": self._last_activity.isoformat() if self._last_activity else None,
|
|
1988
|
+
"backends": self._bridge.available_backends,
|
|
1989
|
+
"inotify_active": self._observer is not None and (
|
|
1990
|
+
self._observer.is_alive() if hasattr(self._observer, "is_alive") else False
|
|
1991
|
+
),
|
|
1992
|
+
"max_concurrent": self._config.max_concurrent_requests,
|
|
1993
|
+
"current_prompt_hash": self._prompt_builder.current_prompt_hash,
|
|
1994
|
+
"prompt_version_responses": dict(self._prompt_version_responses),
|
|
1995
|
+
}
|
|
1996
|
+
|
|
1997
|
+
|
|
1998
|
+
# ---------------------------------------------------------------------------
|
|
1999
|
+
# Internal helpers
|
|
2000
|
+
# ---------------------------------------------------------------------------
|
|
2001
|
+
|
|
2002
|
+
|
|
2003
|
+
class _WatchdogAdapter:
|
|
2004
|
+
"""Adapter from watchdog events to our callback interface."""
|
|
2005
|
+
|
|
2006
|
+
def __init__(self, callback) -> None:
|
|
2007
|
+
self._handler = InboxHandler(callback)
|
|
2008
|
+
|
|
2009
|
+
def dispatch(self, event) -> None:
|
|
2010
|
+
"""Dispatch a watchdog event."""
|
|
2011
|
+
if hasattr(event, "event_type") and event.event_type == "created":
|
|
2012
|
+
self._handler.on_created(event)
|
|
2013
|
+
|
|
2014
|
+
|
|
2015
|
+
class _SimplePayload:
|
|
2016
|
+
"""Minimal payload for inotify-detected messages."""
|
|
2017
|
+
|
|
2018
|
+
def __init__(self, data: dict) -> None:
|
|
2019
|
+
payload = data.get("payload", data)
|
|
2020
|
+
self.content = payload.get("content", payload.get("message", ""))
|
|
2021
|
+
self.content_type = _SimpleContentType(
|
|
2022
|
+
payload.get("content_type", payload.get("type", "text"))
|
|
2023
|
+
)
|
|
2024
|
+
|
|
2025
|
+
|
|
2026
|
+
class _SimpleContentType:
|
|
2027
|
+
"""Minimal content type wrapper."""
|
|
2028
|
+
|
|
2029
|
+
def __init__(self, value: str) -> None:
|
|
2030
|
+
self.value = value
|
|
2031
|
+
|
|
2032
|
+
|
|
2033
|
+
class _SimpleEnvelope:
|
|
2034
|
+
"""Minimal envelope for inotify-detected messages."""
|
|
2035
|
+
|
|
2036
|
+
def __init__(self, data: dict) -> None:
|
|
2037
|
+
self.sender = data.get("sender", data.get("from", "unknown"))
|
|
2038
|
+
self.payload = _SimplePayload(data)
|
|
2039
|
+
self.timestamp = data.get("timestamp", datetime.now(timezone.utc).isoformat())
|
|
2040
|
+
# Threading fields — may live at envelope root or inside payload
|
|
2041
|
+
_payload_raw = data.get("payload", {}) if isinstance(data.get("payload"), dict) else {}
|
|
2042
|
+
self.thread_id: str = (
|
|
2043
|
+
data.get("thread_id")
|
|
2044
|
+
or _payload_raw.get("thread_id")
|
|
2045
|
+
or ""
|
|
2046
|
+
)
|
|
2047
|
+
self.in_reply_to: str = (
|
|
2048
|
+
data.get("in_reply_to")
|
|
2049
|
+
or _payload_raw.get("in_reply_to")
|
|
2050
|
+
or ""
|
|
2051
|
+
)
|